diff --git a/[refs] b/[refs] index 7071fbe09dd7..f738693080d8 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 93912fe69d67597ee4c0b451d24539ead550bdc3 +refs/heads/master: 54de1f1c44c3522dbdc2201be1f65e733526de51 diff --git a/trunk/Documentation/ABI/testing/sysfs-power b/trunk/Documentation/ABI/testing/sysfs-power index 217772615d02..31725ffeeb3a 100644 --- a/trunk/Documentation/ABI/testing/sysfs-power +++ b/trunk/Documentation/ABI/testing/sysfs-power @@ -231,16 +231,3 @@ Description: Reads from this file return a string consisting of the names of wakeup sources created with the help of /sys/power/wake_lock that are inactive at the moment, separated with spaces. - -What: /sys/power/pm_print_times -Date: May 2012 -Contact: Sameer Nanda -Description: - The /sys/power/pm_print_times file allows user space to - control whether the time taken by devices to suspend and - resume is printed. These prints are useful for hunting down - devices that take too long to suspend or resume. - - Writing a "1" enables this printing while writing a "0" - disables it. The default value is "0". Reading from this file - will display the current value. diff --git a/trunk/Documentation/RCU/checklist.txt b/trunk/Documentation/RCU/checklist.txt index fc103d7a0474..5c8d74968090 100644 --- a/trunk/Documentation/RCU/checklist.txt +++ b/trunk/Documentation/RCU/checklist.txt @@ -162,9 +162,9 @@ over a rather long period of time, but improvements are always welcome! when publicizing a pointer to a structure that can be traversed by an RCU read-side critical section. -5. If call_rcu(), or a related primitive such as call_rcu_bh(), - call_rcu_sched(), or call_srcu() is used, the callback function - must be written to be called from softirq context. In particular, +5. If call_rcu(), or a related primitive such as call_rcu_bh() or + call_rcu_sched(), is used, the callback function must be + written to be called from softirq context. In particular, it cannot block. 6. Since synchronize_rcu() can block, it cannot be called from @@ -202,12 +202,11 @@ over a rather long period of time, but improvements are always welcome! updater uses call_rcu_sched() or synchronize_sched(), then the corresponding readers must disable preemption, possibly by calling rcu_read_lock_sched() and rcu_read_unlock_sched(). - If the updater uses synchronize_srcu() or call_srcu(), - the the corresponding readers must use srcu_read_lock() and - srcu_read_unlock(), and with the same srcu_struct. The rules for - the expedited primitives are the same as for their non-expedited - counterparts. Mixing things up will result in confusion and - broken kernels. + If the updater uses synchronize_srcu(), the the corresponding + readers must use srcu_read_lock() and srcu_read_unlock(), + and with the same srcu_struct. The rules for the expedited + primitives are the same as for their non-expedited counterparts. + Mixing things up will result in confusion and broken kernels. One exception to this rule: rcu_read_lock() and rcu_read_unlock() may be substituted for rcu_read_lock_bh() and rcu_read_unlock_bh() @@ -334,14 +333,14 @@ over a rather long period of time, but improvements are always welcome! victim CPU from ever going offline.) 14. SRCU (srcu_read_lock(), srcu_read_unlock(), srcu_dereference(), - synchronize_srcu(), synchronize_srcu_expedited(), and call_srcu()) - may only be invoked from process context. Unlike other forms of - RCU, it -is- permissible to block in an SRCU read-side critical - section (demarked by srcu_read_lock() and srcu_read_unlock()), - hence the "SRCU": "sleepable RCU". Please note that if you - don't need to sleep in read-side critical sections, you should be - using RCU rather than SRCU, because RCU is almost always faster - and easier to use than is SRCU. + synchronize_srcu(), and synchronize_srcu_expedited()) may only + be invoked from process context. Unlike other forms of RCU, it + -is- permissible to block in an SRCU read-side critical section + (demarked by srcu_read_lock() and srcu_read_unlock()), hence the + "SRCU": "sleepable RCU". Please note that if you don't need + to sleep in read-side critical sections, you should be using + RCU rather than SRCU, because RCU is almost always faster and + easier to use than is SRCU. If you need to enter your read-side critical section in a hardirq or exception handler, and then exit that same read-side @@ -354,8 +353,8 @@ over a rather long period of time, but improvements are always welcome! cleanup_srcu_struct(). These are passed a "struct srcu_struct" that defines the scope of a given SRCU domain. Once initialized, the srcu_struct is passed to srcu_read_lock(), srcu_read_unlock() - synchronize_srcu(), synchronize_srcu_expedited(), and call_srcu(). - A given synchronize_srcu() waits only for SRCU read-side critical + synchronize_srcu(), and synchronize_srcu_expedited(). A given + synchronize_srcu() waits only for SRCU read-side critical sections governed by srcu_read_lock() and srcu_read_unlock() calls that have been passed the same srcu_struct. This property is what makes sleeping read-side critical sections tolerable -- @@ -375,7 +374,7 @@ over a rather long period of time, but improvements are always welcome! requiring SRCU's read-side deadlock immunity or low read-side realtime latency. - Note that, rcu_assign_pointer() relates to SRCU just as it does + Note that, rcu_assign_pointer() relates to SRCU just as they do to other forms of RCU. 15. The whole point of call_rcu(), synchronize_rcu(), and friends diff --git a/trunk/Documentation/RCU/rcubarrier.txt b/trunk/Documentation/RCU/rcubarrier.txt index 38428c125135..e439a0edee22 100644 --- a/trunk/Documentation/RCU/rcubarrier.txt +++ b/trunk/Documentation/RCU/rcubarrier.txt @@ -79,6 +79,8 @@ complete. Pseudo-code using rcu_barrier() is as follows: 2. Execute rcu_barrier(). 3. Allow the module to be unloaded. +Quick Quiz #1: Why is there no srcu_barrier()? + The rcutorture module makes use of rcu_barrier in its exit function as follows: @@ -160,7 +162,7 @@ for any pre-existing callbacks to complete. Then lines 55-62 print status and do operation-specific cleanup, and then return, permitting the module-unload operation to be completed. -Quick Quiz #1: Is there any other situation where rcu_barrier() might +Quick Quiz #2: Is there any other situation where rcu_barrier() might be required? Your module might have additional complications. For example, if your @@ -240,7 +242,7 @@ reaches zero, as follows: 4 complete(&rcu_barrier_completion); 5 } -Quick Quiz #2: What happens if CPU 0's rcu_barrier_func() executes +Quick Quiz #3: What happens if CPU 0's rcu_barrier_func() executes immediately (thus incrementing rcu_barrier_cpu_count to the value one), but the other CPU's rcu_barrier_func() invocations are delayed for a full grace period? Couldn't this result in @@ -257,7 +259,12 @@ so that your module may be safely unloaded. Answers to Quick Quizzes -Quick Quiz #1: Is there any other situation where rcu_barrier() might +Quick Quiz #1: Why is there no srcu_barrier()? + +Answer: Since there is no call_srcu(), there can be no outstanding SRCU + callbacks. Therefore, there is no need to wait for them. + +Quick Quiz #2: Is there any other situation where rcu_barrier() might be required? Answer: Interestingly enough, rcu_barrier() was not originally @@ -271,7 +278,7 @@ Answer: Interestingly enough, rcu_barrier() was not originally implementing rcutorture, and found that rcu_barrier() solves this problem as well. -Quick Quiz #2: What happens if CPU 0's rcu_barrier_func() executes +Quick Quiz #3: What happens if CPU 0's rcu_barrier_func() executes immediately (thus incrementing rcu_barrier_cpu_count to the value one), but the other CPU's rcu_barrier_func() invocations are delayed for a full grace period? Couldn't this result in diff --git a/trunk/Documentation/RCU/torture.txt b/trunk/Documentation/RCU/torture.txt index 7dce8a17eac2..4ddf3913fd8c 100644 --- a/trunk/Documentation/RCU/torture.txt +++ b/trunk/Documentation/RCU/torture.txt @@ -174,20 +174,11 @@ torture_type The type of RCU to test, with string values as follows: and synchronize_rcu_bh_expedited(). "srcu": srcu_read_lock(), srcu_read_unlock() and - call_srcu(). - - "srcu_sync": srcu_read_lock(), srcu_read_unlock() and synchronize_srcu(). "srcu_expedited": srcu_read_lock(), srcu_read_unlock() and synchronize_srcu_expedited(). - "srcu_raw": srcu_read_lock_raw(), srcu_read_unlock_raw(), - and call_srcu(). - - "srcu_raw_sync": srcu_read_lock_raw(), srcu_read_unlock_raw(), - and synchronize_srcu(). - "sched": preempt_disable(), preempt_enable(), and call_rcu_sched(). diff --git a/trunk/Documentation/RCU/whatisRCU.txt b/trunk/Documentation/RCU/whatisRCU.txt index 69ee188515e7..6bbe8dcdc3da 100644 --- a/trunk/Documentation/RCU/whatisRCU.txt +++ b/trunk/Documentation/RCU/whatisRCU.txt @@ -833,9 +833,9 @@ sched: Critical sections Grace period Barrier SRCU: Critical sections Grace period Barrier - srcu_read_lock synchronize_srcu srcu_barrier - srcu_read_unlock call_srcu - srcu_read_lock_raw synchronize_srcu_expedited + srcu_read_lock synchronize_srcu N/A + srcu_read_unlock synchronize_srcu_expedited + srcu_read_lock_raw srcu_read_unlock_raw srcu_dereference diff --git a/trunk/Documentation/devicetree/bindings/mfd/tps65910.txt b/trunk/Documentation/devicetree/bindings/mfd/tps65910.txt index d2802d4717bc..645f5eaadb3f 100644 --- a/trunk/Documentation/devicetree/bindings/mfd/tps65910.txt +++ b/trunk/Documentation/devicetree/bindings/mfd/tps65910.txt @@ -17,46 +17,18 @@ Required properties: device need to be present. The definition for each of these nodes is defined using the standard binding for regulators found at Documentation/devicetree/bindings/regulator/regulator.txt. - The regulator is matched with the regulator-compatible. - The valid regulator-compatible values are: + The valid names for regulators are: tps65910: vrtc, vio, vdd1, vdd2, vdd3, vdig1, vdig2, vpll, vdac, vaux1, vaux2, vaux33, vmmc tps65911: vrtc, vio, vdd1, vdd3, vddctrl, ldo1, ldo2, ldo3, ldo4, ldo5, ldo6, ldo7, ldo8 -- xxx-supply: Input voltage supply regulator. - These entries are require if regulators are enabled for a device. Missing of these - properties can cause the regulator registration fails. - If some of input supply is powered through battery or always-on supply then - also it is require to have these parameters with proper node handle of always - on power supply. - tps65910: - vcc1-supply: VDD1 input. - vcc2-supply: VDD2 input. - vcc3-supply: VAUX33 and VMMC input. - vcc4-supply: VAUX1 and VAUX2 input. - vcc5-supply: VPLL and VDAC input. - vcc6-supply: VDIG1 and VDIG2 input. - vcc7-supply: VRTC input. - vccio-supply: VIO input. - tps65911: - vcc1-supply: VDD1 input. - vcc2-supply: VDD2 input. - vcc3-supply: LDO6, LDO7 and LDO8 input. - vcc4-supply: LDO5 input. - vcc5-supply: LDO3 and LDO4 input. - vcc6-supply: LDO1 and LDO2 input. - vcc7-supply: VRTC input. - vccio-supply: VIO input. - Optional properties: - ti,vmbch-threshold: (tps65911) main battery charged threshold comparator. (see VMBCH_VSEL in TPS65910 datasheet) - ti,vmbch2-threshold: (tps65911) main battery discharged threshold comparator. (see VMBCH_VSEL in TPS65910 datasheet) -- ti,en-ck32k-xtal: enable external 32-kHz crystal oscillator (see CK32K_CTRL - in TPS6591X datasheet) - ti,en-gpio-sleep: enable sleep control for gpios There should be 9 entries here, one for each gpio. @@ -84,110 +56,74 @@ Example: ti,en-gpio-sleep = <0 0 1 0 0 0 0 0 0>; - vcc1-supply = <®_parent>; - vcc2-supply = <&some_reg>; - vcc3-supply = <...>; - vcc4-supply = <...>; - vcc5-supply = <...>; - vcc6-supply = <...>; - vcc7-supply = <...>; - vccio-supply = <...>; - regulators { - #address-cells = <1>; - #size-cells = <0>; - - vdd1_reg: regulator@0 { - regulator-compatible = "vdd1"; - reg = <0>; + vdd1_reg: vdd1 { regulator-min-microvolt = < 600000>; regulator-max-microvolt = <1500000>; regulator-always-on; regulator-boot-on; ti,regulator-ext-sleep-control = <0>; }; - vdd2_reg: regulator@1 { - regulator-compatible = "vdd2"; - reg = <1>; + vdd2_reg: vdd2 { regulator-min-microvolt = < 600000>; regulator-max-microvolt = <1500000>; regulator-always-on; regulator-boot-on; ti,regulator-ext-sleep-control = <4>; }; - vddctrl_reg: regulator@2 { - regulator-compatible = "vddctrl"; - reg = <2>; + vddctrl_reg: vddctrl { regulator-min-microvolt = < 600000>; regulator-max-microvolt = <1400000>; regulator-always-on; regulator-boot-on; ti,regulator-ext-sleep-control = <0>; }; - vio_reg: regulator@3 { - regulator-compatible = "vio"; - reg = <3>; + vio_reg: vio { regulator-min-microvolt = <1500000>; regulator-max-microvolt = <1800000>; regulator-always-on; regulator-boot-on; ti,regulator-ext-sleep-control = <1>; }; - ldo1_reg: regulator@4 { - regulator-compatible = "ldo1"; - reg = <4>; + ldo1_reg: ldo1 { regulator-min-microvolt = <1000000>; regulator-max-microvolt = <3300000>; ti,regulator-ext-sleep-control = <0>; }; - ldo2_reg: regulator@5 { - regulator-compatible = "ldo2"; - reg = <5>; + ldo2_reg: ldo2 { regulator-min-microvolt = <1050000>; regulator-max-microvolt = <1050000>; ti,regulator-ext-sleep-control = <0>; }; - ldo3_reg: regulator@6 { - regulator-compatible = "ldo3"; - reg = <6>; + ldo3_reg: ldo3 { regulator-min-microvolt = <1000000>; regulator-max-microvolt = <3300000>; ti,regulator-ext-sleep-control = <0>; }; - ldo4_reg: regulator@7 { - regulator-compatible = "ldo4"; - reg = <7>; + ldo4_reg: ldo4 { regulator-min-microvolt = <1000000>; regulator-max-microvolt = <3300000>; regulator-always-on; ti,regulator-ext-sleep-control = <0>; }; - ldo5_reg: regulator@8 { - regulator-compatible = "ldo5"; - reg = <8>; + ldo5_reg: ldo5 { regulator-min-microvolt = <1000000>; regulator-max-microvolt = <3300000>; ti,regulator-ext-sleep-control = <0>; }; - ldo6_reg: regulator@9 { - regulator-compatible = "ldo6"; - reg = <9>; + ldo6_reg: ldo6 { regulator-min-microvolt = <1200000>; regulator-max-microvolt = <1200000>; ti,regulator-ext-sleep-control = <0>; }; - ldo7_reg: regulator@10 { - regulator-compatible = "ldo7"; - reg = <10>; + ldo7_reg: ldo7 { regulator-min-microvolt = <1200000>; regulator-max-microvolt = <1200000>; regulator-always-on; regulator-boot-on; ti,regulator-ext-sleep-control = <1>; }; - ldo8_reg: regulator@11 { - regulator-compatible = "ldo8"; - reg = <11>; + ldo8_reg: ldo8 { regulator-min-microvolt = <1000000>; regulator-max-microvolt = <3300000>; regulator-always-on; diff --git a/trunk/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt b/trunk/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt index bd9be0b5bc20..0d93b4b0e0e3 100644 --- a/trunk/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt +++ b/trunk/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt @@ -3,22 +3,21 @@ The Enhanced Secure Digital Host Controller provides an interface for MMC, SD, and SDIO types of memory cards. -This file documents differences between the core properties described -by mmc.txt and the properties used by the sdhci-esdhc driver. - Required properties: + - compatible : should be + "fsl,-esdhc", "fsl,esdhc" + - reg : should contain eSDHC registers location and length. + - interrupts : should contain eSDHC interrupt. - interrupt-parent : interrupt source phandle. - clock-frequency : specifies eSDHC base clock frequency. - -Optional properties: - - sdhci,wp-inverted : specifies that eSDHC controller reports - inverted write-protect state; New devices should use the generic - "wp-inverted" property. - - sdhci,1-bit-only : specifies that a controller can only handle - 1-bit data transfers. New devices should use the generic - "bus-width = <1>" property. - - sdhci,auto-cmd12: specifies that a controller can only handle auto - CMD12. + - sdhci,wp-inverted : (optional) specifies that eSDHC controller + reports inverted write-protect state; New devices should use + the generic "wp-inverted" property. + - sdhci,1-bit-only : (optional) specifies that a controller can + only handle 1-bit data transfers. New devices should use the + generic "bus-width = <1>" property. + - sdhci,auto-cmd12: (optional) specifies that a controller can + only handle auto CMD12. Example: diff --git a/trunk/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt b/trunk/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt index 70cd49b1caa8..fea541ee8b34 100644 --- a/trunk/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt +++ b/trunk/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt @@ -3,15 +3,17 @@ The Enhanced Secure Digital Host Controller on Freescale i.MX family provides an interface for MMC, SD, and SDIO types of memory cards. -This file documents differences between the core properties described -by mmc.txt and the properties used by the sdhci-esdhc-imx driver. - Required properties: - compatible : Should be "fsl,-esdhc" +- reg : Should contain eSDHC registers location and length +- interrupts : Should contain eSDHC interrupt Optional properties: +- non-removable : Indicate the card is wired to host permanently - fsl,cd-internal : Indicate to use controller internal card detection - fsl,wp-internal : Indicate to use controller internal write protection +- cd-gpios : Specify GPIOs for card detection +- wp-gpios : Specify GPIOs for write protection Examples: diff --git a/trunk/Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt b/trunk/Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt index 0e5e2ec4001d..d64aea5a4203 100644 --- a/trunk/Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt +++ b/trunk/Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt @@ -1,9 +1,8 @@ MMC/SD/SDIO slot directly connected to a SPI bus -This file documents differences between the core properties described -by mmc.txt and the properties used by the mmc_spi driver. - Required properties: +- compatible : should be "mmc-spi-slot". +- reg : should specify SPI address (chip-select number). - spi-max-frequency : maximum frequency for this device (Hz). - voltage-ranges : two cells are required, first cell specifies minimum slot voltage (mV), second cell specifies maximum slot voltage (mV). @@ -12,7 +11,8 @@ Required properties: Optional properties: - gpios : may specify GPIOs in this order: Card-Detect GPIO, Write-Protect GPIO. Note that this does not follow the - binding from mmc.txt, for historical reasons. + binding from mmc.txt, for historic reasons. +- interrupts : the interrupt of a card detect interrupt. - interrupt-parent : the phandle for the interrupt controller that services interrupts for this device. diff --git a/trunk/Documentation/devicetree/bindings/mmc/mmc.txt b/trunk/Documentation/devicetree/bindings/mmc/mmc.txt index 8a6811f4a02f..6e70dcde0a71 100644 --- a/trunk/Documentation/devicetree/bindings/mmc/mmc.txt +++ b/trunk/Documentation/devicetree/bindings/mmc/mmc.txt @@ -2,17 +2,13 @@ These properties are common to multiple MMC host controllers. Any host that requires the respective functionality should implement them using these definitions. -Interpreted by the OF core: -- reg: Registers location and length. -- interrupts: Interrupts used by the MMC controller. - Required properties: - bus-width: Number of data lines, can be <1>, <4>, or <8> Optional properties: -- cd-gpios: Specify GPIOs for card detection, see gpio binding -- wp-gpios: Specify GPIOs for write protection, see gpio binding -- cd-inverted: when present, polarity on the cd gpio line is inverted +- cd-gpios : Specify GPIOs for card detection, see gpio binding +- wp-gpios : Specify GPIOs for write protection, see gpio binding +- cd-inverted: when present, polarity on the wp gpio line is inverted - wp-inverted: when present, polarity on the wp gpio line is inverted - non-removable: non-removable slot (like eMMC) - max-frequency: maximum operating clock frequency diff --git a/trunk/Documentation/devicetree/bindings/mmc/mmci.txt b/trunk/Documentation/devicetree/bindings/mmc/mmci.txt index 2b584cae352a..14a81d526118 100644 --- a/trunk/Documentation/devicetree/bindings/mmc/mmci.txt +++ b/trunk/Documentation/devicetree/bindings/mmc/mmci.txt @@ -1,15 +1,19 @@ * ARM PrimeCell MultiMedia Card Interface (MMCI) PL180/1 -The ARM PrimeCell MMCI PL180 and PL181 provides an interface for +The ARM PrimeCell MMCI PL180 and PL181 provides and interface for reading and writing to MultiMedia and SD cards alike. -This file documents differences between the core properties described -by mmc.txt and the properties used by the mmci driver. - Required properties: - compatible : contains "arm,pl18x", "arm,primecell". +- reg : contains pl18x registers and length. +- interrupts : contains the device IRQ(s). - arm,primecell-periphid : contains the PrimeCell Peripheral ID. Optional properties: +- wp-gpios : contains any write protect (ro) gpios +- cd-gpios : contains any card detection gpios +- cd-inverted : indicates whether the cd gpio is inverted +- max-frequency : contains the maximum operating frequency +- bus-width : number of data lines, can be <1>, <4>, or <8> - mmc-cap-mmc-highspeed : indicates whether MMC is high speed capable - mmc-cap-sd-highspeed : indicates whether SD is high speed capable diff --git a/trunk/Documentation/devicetree/bindings/mmc/mxs-mmc.txt b/trunk/Documentation/devicetree/bindings/mmc/mxs-mmc.txt index 54949f6faede..14d870a9e3db 100644 --- a/trunk/Documentation/devicetree/bindings/mmc/mxs-mmc.txt +++ b/trunk/Documentation/devicetree/bindings/mmc/mxs-mmc.txt @@ -3,14 +3,16 @@ The Freescale MXS Synchronous Serial Ports (SSP) can act as a MMC controller to support MMC, SD, and SDIO types of memory cards. -This file documents differences between the core properties in mmc.txt -and the properties used by the mxsmmc driver. - Required properties: - compatible: Should be "fsl,-mmc". The supported chips include imx23 and imx28. +- reg: Should contain registers location and length - interrupts: Should contain ERROR and DMA interrupts - fsl,ssp-dma-channel: APBH DMA channel for the SSP +- bus-width: Number of data lines, can be <1>, <4>, or <8> + +Optional properties: +- wp-gpios: Specify GPIOs for write protection Examples: diff --git a/trunk/Documentation/devicetree/bindings/mmc/nvidia-sdhci.txt b/trunk/Documentation/devicetree/bindings/mmc/nvidia-sdhci.txt index c6d7b11db9eb..f77c3031607f 100644 --- a/trunk/Documentation/devicetree/bindings/mmc/nvidia-sdhci.txt +++ b/trunk/Documentation/devicetree/bindings/mmc/nvidia-sdhci.txt @@ -3,13 +3,15 @@ This controller on Tegra family SoCs provides an interface for MMC, SD, and SDIO types of memory cards. -This file documents differences between the core properties described -by mmc.txt and the properties used by the sdhci-tegra driver. - Required properties: - compatible : Should be "nvidia,-sdhci" +- reg : Should contain SD/MMC registers location and length +- interrupts : Should contain SD/MMC interrupt +- bus-width : Number of data lines, can be <1>, <4>, or <8> Optional properties: +- cd-gpios : Specify GPIOs for card detection +- wp-gpios : Specify GPIOs for write protection - power-gpios : Specify GPIOs for power control Example: diff --git a/trunk/Documentation/devicetree/bindings/mmc/sdhci-pxa.txt b/trunk/Documentation/devicetree/bindings/mmc/sdhci-pxa.txt deleted file mode 100644 index dbe98a3c183a..000000000000 --- a/trunk/Documentation/devicetree/bindings/mmc/sdhci-pxa.txt +++ /dev/null @@ -1,21 +0,0 @@ -* Marvell sdhci-pxa v2/v3 controller - -This file documents differences between the core properties in mmc.txt -and the properties used by the sdhci-pxav2 and sdhci-pxav3 drivers. - -Required properties: -- compatible: Should be "mrvl,pxav2-mmc" or "mrvl,pxav3-mmc". - -Optional properties: -- mrvl,clk-delay-cycles: Specify a number of cycles to delay for tuning. - -Example: - -sdhci@d4280800 { - compatible = "mrvl,pxav3-mmc"; - reg = <0xd4280800 0x800>; - bus-width = <8>; - interrupts = <27>; - non-removable; - mrvl,clk-delay-cycles = <31>; -}; diff --git a/trunk/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt b/trunk/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt index be76a23b34c4..8a53958c9a9f 100644 --- a/trunk/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt +++ b/trunk/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt @@ -3,20 +3,21 @@ The Highspeed MMC Host Controller on TI OMAP family provides an interface for MMC, SD, and SDIO types of memory cards. -This file documents differences between the core properties described -by mmc.txt and the properties used by the omap_hsmmc driver. - Required properties: - compatible: Should be "ti,omap2-hsmmc", for OMAP2 controllers Should be "ti,omap3-hsmmc", for OMAP3 controllers Should be "ti,omap4-hsmmc", for OMAP4 controllers - ti,hwmods: Must be "mmc", n is controller instance starting 1 +- reg : should contain hsmmc registers location and length Optional properties: ti,dual-volt: boolean, supports dual voltage cards -supply: phandle to the regulator device tree node "supply-name" examples are "vmmc", "vmmc_aux" etc +bus-width: Number of data lines, default assumed is 1 if the property is missing. +cd-gpios: GPIOs for card detection +wp-gpios: GPIOs for write protection ti,non-removable: non-removable slot (like eMMC) ti,needs-special-reset: Requires a special softreset sequence diff --git a/trunk/Documentation/devicetree/bindings/pinctrl/fsl,imx6q-pinctrl.txt b/trunk/Documentation/devicetree/bindings/pinctrl/fsl,imx6q-pinctrl.txt index a4119f6422d9..82b43f915857 100644 --- a/trunk/Documentation/devicetree/bindings/pinctrl/fsl,imx6q-pinctrl.txt +++ b/trunk/Documentation/devicetree/bindings/pinctrl/fsl,imx6q-pinctrl.txt @@ -1626,5 +1626,3 @@ MX6Q_PAD_SD2_DAT3__PCIE_CTRL_MUX_11 1587 MX6Q_PAD_SD2_DAT3__GPIO_1_12 1588 MX6Q_PAD_SD2_DAT3__SJC_DONE 1589 MX6Q_PAD_SD2_DAT3__ANATOP_TESTO_3 1590 -MX6Q_PAD_ENET_RX_ER__ANATOP_USBOTG_ID 1591 -MX6Q_PAD_GPIO_1__ANATOP_USBOTG_ID 1592 diff --git a/trunk/Documentation/devicetree/bindings/regulator/fixed-regulator.txt b/trunk/Documentation/devicetree/bindings/regulator/fixed-regulator.txt index 4fae41d54798..2f5b6b1ba15f 100644 --- a/trunk/Documentation/devicetree/bindings/regulator/fixed-regulator.txt +++ b/trunk/Documentation/devicetree/bindings/regulator/fixed-regulator.txt @@ -10,7 +10,6 @@ Optional properties: If this property is missing, the default assumed is Active low. - gpio-open-drain: GPIO is open drain type. If this property is missing then default assumption is false. --vin-supply: Input supply name. Any property defined as part of the core regulator binding, defined in regulator.txt, can also be used. @@ -30,5 +29,4 @@ Example: enable-active-high; regulator-boot-on; gpio-open-drain; - vin-supply = <&parent_reg>; }; diff --git a/trunk/Documentation/devicetree/bindings/regulator/regulator.txt b/trunk/Documentation/devicetree/bindings/regulator/regulator.txt index 66ece3f87bbc..5b7a408acdaa 100644 --- a/trunk/Documentation/devicetree/bindings/regulator/regulator.txt +++ b/trunk/Documentation/devicetree/bindings/regulator/regulator.txt @@ -10,11 +10,6 @@ Optional properties: - regulator-always-on: boolean, regulator should never be disabled - regulator-boot-on: bootloader/firmware enabled regulator - -supply: phandle to the parent supply/regulator node -- regulator-ramp-delay: ramp delay for regulator(in uV/uS) -- regulator-compatible: If a regulator chip contains multiple - regulators, and if the chip's binding contains a child node that - describes each regulator, then this property indicates which regulator - this child node is intended to configure. Example: diff --git a/trunk/Documentation/devicetree/bindings/regulator/tps65217.txt b/trunk/Documentation/devicetree/bindings/regulator/tps65217.txt deleted file mode 100644 index 0487e9675ba0..000000000000 --- a/trunk/Documentation/devicetree/bindings/regulator/tps65217.txt +++ /dev/null @@ -1,91 +0,0 @@ -TPS65217 family of regulators - -Required properties: -- compatible: "ti,tps65217" -- reg: I2C slave address -- regulators: list of regulators provided by this controller, must be named - after their hardware counterparts: dcdc[1-3] and ldo[1-4] -- regulators: This is the list of child nodes that specify the regulator - initialization data for defined regulators. Not all regulators for the given - device need to be present. The definition for each of these nodes is defined - using the standard binding for regulators found at - Documentation/devicetree/bindings/regulator/regulator.txt. - - The valid names for regulators are: - tps65217: dcdc1, dcdc2, dcdc3, ldo1, ldo2, ldo3 and ldo4 - -Each regulator is defined using the standard binding for regulators. - -Example: - - tps: tps@24 { - compatible = "ti,tps65217"; - - regulators { - #address-cells = <1>; - #size-cells = <0>; - - dcdc1_reg: regulator@0 { - reg = <0>; - regulator-compatible = "dcdc1"; - regulator-min-microvolt = <900000>; - regulator-max-microvolt = <1800000>; - regulator-boot-on; - regulator-always-on; - }; - - dcdc2_reg: regulator@1 { - reg = <1>; - regulator-compatible = "dcdc2"; - regulator-min-microvolt = <900000>; - regulator-max-microvolt = <3300000>; - regulator-boot-on; - regulator-always-on; - }; - - dcdc3_reg: regulator@2 { - reg = <2>; - regulator-compatible = "dcdc3"; - regulator-min-microvolt = <900000>; - regulator-max-microvolt = <1500000>; - regulator-boot-on; - regulator-always-on; - }; - - ldo1_reg: regulator@3 { - reg = <3>; - regulator-compatible = "ldo1"; - regulator-min-microvolt = <1000000>; - regulator-max-microvolt = <3300000>; - regulator-boot-on; - regulator-always-on; - }; - - ldo2_reg: regulator@4 { - reg = <4>; - regulator-compatible = "ldo2"; - regulator-min-microvolt = <900000>; - regulator-max-microvolt = <3300000>; - regulator-boot-on; - regulator-always-on; - }; - - ldo3_reg: regulator@5 { - reg = <5>; - regulator-compatible = "ldo3"; - regulator-min-microvolt = <1800000>; - regulator-max-microvolt = <3300000>; - regulator-boot-on; - regulator-always-on; - }; - - ldo4_reg: regulator@6 { - reg = <6>; - regulator-compatible = "ldo4"; - regulator-min-microvolt = <1800000>; - regulator-max-microvolt = <3300000>; - regulator-boot-on; - regulator-always-on; - }; - }; - }; diff --git a/trunk/Documentation/devicetree/bindings/regulator/tps6586x.txt b/trunk/Documentation/devicetree/bindings/regulator/tps6586x.txt index d156e1b5db12..0fcabaa3baa3 100644 --- a/trunk/Documentation/devicetree/bindings/regulator/tps6586x.txt +++ b/trunk/Documentation/devicetree/bindings/regulator/tps6586x.txt @@ -6,17 +6,8 @@ Required properties: - interrupts: the interrupt outputs of the controller - #gpio-cells: number of cells to describe a GPIO - gpio-controller: mark the device as a GPIO controller -- regulators: list of regulators provided by this controller, must have - property "regulator-compatible" to match their hardware counterparts: - sm[0-2], ldo[0-9] and ldo_rtc -- sm0-supply: The input supply for the SM0. -- sm1-supply: The input supply for the SM1. -- sm2-supply: The input supply for the SM2. -- vinldo01-supply: The input supply for the LDO1 and LDO2 -- vinldo23-supply: The input supply for the LDO2 and LDO3 -- vinldo4-supply: The input supply for the LDO4 -- vinldo678-supply: The input supply for the LDO6, LDO7 and LDO8 -- vinldo9-supply: The input supply for the LDO9 +- regulators: list of regulators provided by this controller, must be named + after their hardware counterparts: sm[0-2], ldo[0-9] and ldo_rtc Each regulator is defined using the standard binding for regulators. @@ -30,113 +21,75 @@ Example: #gpio-cells = <2>; gpio-controller; - sm0-supply = <&some_reg>; - sm1-supply = <&some_reg>; - sm2-supply = <&some_reg>; - vinldo01-supply = <...>; - vinldo23-supply = <...>; - vinldo4-supply = <...>; - vinldo678-supply = <...>; - vinldo9-supply = <...>; - regulators { - #address-cells = <1>; - #size-cells = <0>; - - sm0_reg: regulator@0 { - reg = <0>; - regulator-compatible = "sm0"; + sm0_reg: sm0 { regulator-min-microvolt = < 725000>; regulator-max-microvolt = <1500000>; regulator-boot-on; regulator-always-on; }; - sm1_reg: regulator@1 { - reg = <1>; - regulator-compatible = "sm1"; + sm1_reg: sm1 { regulator-min-microvolt = < 725000>; regulator-max-microvolt = <1500000>; regulator-boot-on; regulator-always-on; }; - sm2_reg: regulator@2 { - reg = <2>; - regulator-compatible = "sm2"; + sm2_reg: sm2 { regulator-min-microvolt = <3000000>; regulator-max-microvolt = <4550000>; regulator-boot-on; regulator-always-on; }; - ldo0_reg: regulator@3 { - reg = <3>; - regulator-compatible = "ldo0"; + ldo0_reg: ldo0 { regulator-name = "PCIE CLK"; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; }; - ldo1_reg: regulator@4 { - reg = <4>; - regulator-compatible = "ldo1"; + ldo1_reg: ldo1 { regulator-min-microvolt = < 725000>; regulator-max-microvolt = <1500000>; }; - ldo2_reg: regulator@5 { - reg = <5>; - regulator-compatible = "ldo2"; + ldo2_reg: ldo2 { regulator-min-microvolt = < 725000>; regulator-max-microvolt = <1500000>; }; - ldo3_reg: regulator@6 { - reg = <6>; - regulator-compatible = "ldo3"; + ldo3_reg: ldo3 { regulator-min-microvolt = <1250000>; regulator-max-microvolt = <3300000>; }; - ldo4_reg: regulator@7 { - reg = <7>; - regulator-compatible = "ldo4"; + ldo4_reg: ldo4 { regulator-min-microvolt = <1700000>; regulator-max-microvolt = <2475000>; }; - ldo5_reg: regulator@8 { - reg = <8>; - regulator-compatible = "ldo5"; + ldo5_reg: ldo5 { regulator-min-microvolt = <1250000>; regulator-max-microvolt = <3300000>; }; - ldo6_reg: regulator@9 { - reg = <9>; - regulator-compatible = "ldo6"; + ldo6_reg: ldo6 { regulator-min-microvolt = <1250000>; regulator-max-microvolt = <3300000>; }; - ldo7_reg: regulator@10 { - reg = <10>; - regulator-compatible = "ldo7"; + ldo7_reg: ldo7 { regulator-min-microvolt = <1250000>; regulator-max-microvolt = <3300000>; }; - ldo8_reg: regulator@11 { - reg = <11>; - regulator-compatible = "ldo8"; + ldo8_reg: ldo8 { regulator-min-microvolt = <1250000>; regulator-max-microvolt = <3300000>; }; - ldo9_reg: regulator@12 { - reg = <12>; - regulator-compatible = "ldo9"; + ldo9_reg: ldo9 { regulator-min-microvolt = <1250000>; regulator-max-microvolt = <3300000>; }; diff --git a/trunk/Documentation/devicetree/bindings/regulator/twl-regulator.txt b/trunk/Documentation/devicetree/bindings/regulator/twl-regulator.txt index 658749b90b97..0c3395d55ac1 100644 --- a/trunk/Documentation/devicetree/bindings/regulator/twl-regulator.txt +++ b/trunk/Documentation/devicetree/bindings/regulator/twl-regulator.txt @@ -15,6 +15,7 @@ For twl6030 regulators/LDOs - "ti,twl6030-vusb" for VUSB LDO - "ti,twl6030-v1v8" for V1V8 LDO - "ti,twl6030-v2v1" for V2V1 LDO + - "ti,twl6030-clk32kg" for CLK32KG RESOURCE - "ti,twl6030-vdd1" for VDD1 SMPS - "ti,twl6030-vdd2" for VDD2 SMPS - "ti,twl6030-vdd3" for VDD3 SMPS diff --git a/trunk/Documentation/filesystems/Locking b/trunk/Documentation/filesystems/Locking index e0cce2a5f820..8e2da1e06e3b 100644 --- a/trunk/Documentation/filesystems/Locking +++ b/trunk/Documentation/filesystems/Locking @@ -9,7 +9,7 @@ be able to use diff(1). --------------------------- dentry_operations -------------------------- prototypes: - int (*d_revalidate)(struct dentry *, unsigned int); + int (*d_revalidate)(struct dentry *, struct nameidata *); int (*d_hash)(const struct dentry *, const struct inode *, struct qstr *); int (*d_compare)(const struct dentry *, const struct inode *, @@ -37,8 +37,9 @@ d_manage: no no yes (ref-walk) maybe --------------------------- inode_operations --------------------------- prototypes: - int (*create) (struct inode *,struct dentry *,umode_t, bool); - struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int); + int (*create) (struct inode *,struct dentry *,umode_t, struct nameidata *); + struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameid +ata *); int (*link) (struct dentry *,struct inode *,struct dentry *); int (*unlink) (struct inode *,struct dentry *); int (*symlink) (struct inode *,struct dentry *,const char *); @@ -61,9 +62,6 @@ prototypes: int (*removexattr) (struct dentry *, const char *); int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len); void (*update_time)(struct inode *, struct timespec *, int); - int (*atomic_open)(struct inode *, struct dentry *, - struct file *, unsigned open_flag, - umode_t create_mode, int *opened); locking rules: all may block @@ -91,7 +89,6 @@ listxattr: no removexattr: yes fiemap: no update_time: no -atomic_open: yes Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_mutex on victim. diff --git a/trunk/Documentation/filesystems/porting b/trunk/Documentation/filesystems/porting index 2bef2b3843d1..8c91d1057d9a 100644 --- a/trunk/Documentation/filesystems/porting +++ b/trunk/Documentation/filesystems/porting @@ -355,10 +355,12 @@ protects *all* the dcache state of a given dentry. via rcu-walk path walk (basically, if the file can have had a path name in the vfs namespace). - Even though i_dentry and i_rcu share storage in a union, we will -initialize the former in inode_init_always(), so just leave it alone in -the callback. It used to be necessary to clean it there, but not anymore -(starting at 3.2). + i_dentry and i_rcu share storage in a union, and the vfs expects +i_dentry to be reinitialized before it is freed, so an: + + INIT_LIST_HEAD(&inode->i_dentry); + +must be done in the RCU callback. -- [recommended] @@ -431,14 +433,3 @@ release it yourself. d_alloc_root() is gone, along with a lot of bugs caused by code misusing it. Replacement: d_make_root(inode). The difference is, d_make_root() drops the reference to inode if dentry allocation fails. - --- -[mandatory] - The witch is dead! Well, 2/3 of it, anyway. ->d_revalidate() and -->lookup() do *not* take struct nameidata anymore; just the flags. --- -[mandatory] - ->create() doesn't take struct nameidata *; unlike the previous -two, it gets "is it an O_EXCL or equivalent?" boolean argument. Note that -local filesystems can ignore tha argument - they are guaranteed that the -object doesn't exist. It's remote/distributed ones that might care... diff --git a/trunk/Documentation/filesystems/vfs.txt b/trunk/Documentation/filesystems/vfs.txt index aa754e01464e..efd23f481704 100644 --- a/trunk/Documentation/filesystems/vfs.txt +++ b/trunk/Documentation/filesystems/vfs.txt @@ -341,8 +341,8 @@ This describes how the VFS can manipulate an inode in your filesystem. As of kernel 2.6.22, the following members are defined: struct inode_operations { - int (*create) (struct inode *,struct dentry *, umode_t, bool); - struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int); + int (*create) (struct inode *,struct dentry *, umode_t, struct nameidata *); + struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *); int (*link) (struct dentry *,struct inode *,struct dentry *); int (*unlink) (struct inode *,struct dentry *); int (*symlink) (struct inode *,struct dentry *,const char *); @@ -364,9 +364,6 @@ struct inode_operations { ssize_t (*listxattr) (struct dentry *, char *, size_t); int (*removexattr) (struct dentry *, const char *); void (*update_time)(struct inode *, struct timespec *, int); - int (*atomic_open)(struct inode *, struct dentry *, - struct file *, unsigned open_flag, - umode_t create_mode, int *opened); }; Again, all methods are called without any locks being held, unless @@ -479,14 +476,6 @@ otherwise noted. an inode. If this is not defined the VFS will update the inode itself and call mark_inode_dirty_sync. - atomic_open: called on the last component of an open. Using this optional - method the filesystem can look up, possibly create and open the file in - one atomic operation. If it cannot perform this (e.g. the file type - turned out to be wrong) it may signal this by returning 1 instead of - usual 0 or -ve . This method is only called if the last - component is negative or needs lookup. Cached positive dentries are - still handled by f_op->open(). - The Address Space Object ======================== @@ -902,7 +891,7 @@ the VFS uses a default. As of kernel 2.6.22, the following members are defined: struct dentry_operations { - int (*d_revalidate)(struct dentry *, unsigned int); + int (*d_revalidate)(struct dentry *, struct nameidata *); int (*d_hash)(const struct dentry *, const struct inode *, struct qstr *); int (*d_compare)(const struct dentry *, const struct inode *, @@ -921,11 +910,11 @@ struct dentry_operations { dcache. Most filesystems leave this as NULL, because all their dentries in the dcache are valid - d_revalidate may be called in rcu-walk mode (flags & LOOKUP_RCU). + d_revalidate may be called in rcu-walk mode (nd->flags & LOOKUP_RCU). If in rcu-walk mode, the filesystem must revalidate the dentry without blocking or storing to the dentry, d_parent and d_inode should not be - used without care (because they can change and, in d_inode case, even - become NULL under us). + used without care (because they can go NULL), instead nd->inode should + be used. If a situation is encountered that rcu-walk cannot handle, return -ECHILD and it will be called again in ref-walk mode. diff --git a/trunk/Documentation/kdump/kdump.txt b/trunk/Documentation/kdump/kdump.txt index 13f1aa09b938..506c7390c2b9 100644 --- a/trunk/Documentation/kdump/kdump.txt +++ b/trunk/Documentation/kdump/kdump.txt @@ -86,7 +86,7 @@ There is also a gitweb interface available at http://www.kernel.org/git/?p=utils/kernel/kexec/kexec-tools.git More information about kexec-tools can be found at -http://horms.net/projects/kexec/ +http://www.kernel.org/pub/linux/utils/kernel/kexec/README.html 3) Unpack the tarball with the tar command, as follows: diff --git a/trunk/Documentation/kernel-parameters.txt b/trunk/Documentation/kernel-parameters.txt index 12783fa833c3..a92c5ebf373e 100644 --- a/trunk/Documentation/kernel-parameters.txt +++ b/trunk/Documentation/kernel-parameters.txt @@ -2367,11 +2367,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted. Set maximum number of finished RCU callbacks to process in one batch. - rcutree.fanout_leaf= [KNL,BOOT] - Increase the number of CPUs assigned to each - leaf rcu_node structure. Useful for very large - systems. - rcutree.qhimark= [KNL,BOOT] Set threshold of queued RCU callbacks over which batch limiting is disabled. diff --git a/trunk/Documentation/power/devices.txt b/trunk/Documentation/power/devices.txt index 504dfe4d52eb..872815cd41d3 100644 --- a/trunk/Documentation/power/devices.txt +++ b/trunk/Documentation/power/devices.txt @@ -583,10 +583,9 @@ for the given device during all power transitions, instead of the respective subsystem-level callbacks. Specifically, if a device's pm_domain pointer is not NULL, the ->suspend() callback from the object pointed to by it will be executed instead of its subsystem's (e.g. bus type's) ->suspend() callback and -analogously for all of the remaining callbacks. In other words, power -management domain callbacks, if defined for the given device, always take -precedence over the callbacks provided by the device's subsystem (e.g. bus -type). +anlogously for all of the remaining callbacks. In other words, power management +domain callbacks, if defined for the given device, always take precedence over +the callbacks provided by the device's subsystem (e.g. bus type). The support for device power management domains is only relevant to platforms needing to use the same device driver power management callbacks in many @@ -599,7 +598,7 @@ it into account in any way. Device Low Power (suspend) States --------------------------------- Device low-power states aren't standard. One device might only handle -"on" and "off", while another might support a dozen different versions of +"on" and "off, while another might support a dozen different versions of "on" (how many engines are active?), plus a state that gets back to "on" faster than from a full "off". diff --git a/trunk/Documentation/power/swsusp.txt b/trunk/Documentation/power/swsusp.txt index 92341b84250d..ac190cf1963e 100644 --- a/trunk/Documentation/power/swsusp.txt +++ b/trunk/Documentation/power/swsusp.txt @@ -33,11 +33,6 @@ echo shutdown > /sys/power/disk; echo disk > /sys/power/state echo platform > /sys/power/disk; echo disk > /sys/power/state -. If you would like to write hibernation image to swap and then suspend -to RAM (provided your platform supports it), you can try - -echo suspend > /sys/power/disk; echo disk > /sys/power/state - . If you have SATA disks, you'll need recent kernels with SATA suspend support. For suspend and resume to work, make sure your disk drivers are built into kernel -- not modules. [There's way to make diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS index fe643e7b9df6..1b71f6ceae0a 100644 --- a/trunk/MAINTAINERS +++ b/trunk/MAINTAINERS @@ -3433,14 +3433,13 @@ S: Supported F: drivers/idle/i7300_idle.c IEEE 802.15.4 SUBSYSTEM -M: Alexander Smirnov M: Dmitry Eremin-Solenikov +M: Sergey Lapin L: linux-zigbee-devel@lists.sourceforge.net (moderated for non-subscribers) W: http://apps.sourceforge.net/trac/linux-zigbee T: git git://git.kernel.org/pub/scm/linux/kernel/git/lowpan/lowpan.git S: Maintained F: net/ieee802154/ -F: net/mac802154/ F: drivers/ieee802154/ IIO SUBSYSTEM AND DRIVERS @@ -5565,7 +5564,7 @@ F: Documentation/networking/LICENSE.qla3xxx F: drivers/net/ethernet/qlogic/qla3xxx.* QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER -M: Jitendra Kalsaria +M: Anirban Chakraborty M: Sony Chacko M: linux-driver@qlogic.com L: netdev@vger.kernel.org @@ -5573,6 +5572,7 @@ S: Supported F: drivers/net/ethernet/qlogic/qlcnic/ QLOGIC QLGE 10Gb ETHERNET DRIVER +M: Anirban Chakraborty M: Jitendra Kalsaria M: Ron Mercer M: linux-driver@qlogic.com diff --git a/trunk/Makefile b/trunk/Makefile index 4bb09e1b1230..aa8e315f26ef 100644 --- a/trunk/Makefile +++ b/trunk/Makefile @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 5 SUBLEVEL = 0 -EXTRAVERSION = +EXTRAVERSION = -rc7 NAME = Saber-toothed Squirrel # *DOCUMENTATION* diff --git a/trunk/arch/arm/boot/dts/db8500.dtsi b/trunk/arch/arm/boot/dts/db8500.dtsi index ec2be92b270d..4ad5160018cb 100644 --- a/trunk/arch/arm/boot/dts/db8500.dtsi +++ b/trunk/arch/arm/boot/dts/db8500.dtsi @@ -206,74 +206,62 @@ // DB8500_REGULATOR_VAPE db8500_vape_reg: db8500_vape { - regulator-compatible = "db8500_vape"; regulator-name = "db8500-vape"; regulator-always-on; }; // DB8500_REGULATOR_VARM db8500_varm_reg: db8500_varm { - regulator-compatible = "db8500_varm"; regulator-name = "db8500-varm"; }; // DB8500_REGULATOR_VMODEM db8500_vmodem_reg: db8500_vmodem { - regulator-compatible = "db8500_vmodem"; regulator-name = "db8500-vmodem"; }; // DB8500_REGULATOR_VPLL db8500_vpll_reg: db8500_vpll { - regulator-compatible = "db8500_vpll"; regulator-name = "db8500-vpll"; }; // DB8500_REGULATOR_VSMPS1 db8500_vsmps1_reg: db8500_vsmps1 { - regulator-compatible = "db8500_vsmps1"; regulator-name = "db8500-vsmps1"; }; // DB8500_REGULATOR_VSMPS2 db8500_vsmps2_reg: db8500_vsmps2 { - regulator-compatible = "db8500_vsmps2"; regulator-name = "db8500-vsmps2"; }; // DB8500_REGULATOR_VSMPS3 db8500_vsmps3_reg: db8500_vsmps3 { - regulator-compatible = "db8500_vsmps3"; regulator-name = "db8500-vsmps3"; }; // DB8500_REGULATOR_VRF1 db8500_vrf1_reg: db8500_vrf1 { - regulator-compatible = "db8500_vrf1"; regulator-name = "db8500-vrf1"; }; // DB8500_REGULATOR_SWITCH_SVAMMDSP db8500_sva_mmdsp_reg: db8500_sva_mmdsp { - regulator-compatible = "db8500_sva_mmdsp"; regulator-name = "db8500-sva-mmdsp"; }; // DB8500_REGULATOR_SWITCH_SVAMMDSPRET db8500_sva_mmdsp_ret_reg: db8500_sva_mmdsp_ret { - regulator-compatible = "db8500_sva_mmdsp_ret"; regulator-name = "db8500-sva-mmdsp-ret"; }; // DB8500_REGULATOR_SWITCH_SVAPIPE db8500_sva_pipe_reg: db8500_sva_pipe { - regulator-compatible = "db8500_sva_pipe"; regulator-name = "db8500_sva_pipe"; }; // DB8500_REGULATOR_SWITCH_SIAMMDSP db8500_sia_mmdsp_reg: db8500_sia_mmdsp { - regulator-compatible = "db8500_sia_mmdsp"; regulator-name = "db8500_sia_mmdsp"; }; @@ -284,45 +272,38 @@ // DB8500_REGULATOR_SWITCH_SIAPIPE db8500_sia_pipe_reg: db8500_sia_pipe { - regulator-compatible = "db8500_sia_pipe"; regulator-name = "db8500-sia-pipe"; }; // DB8500_REGULATOR_SWITCH_SGA db8500_sga_reg: db8500_sga { - regulator-compatible = "db8500_sga"; regulator-name = "db8500-sga"; vin-supply = <&db8500_vape_reg>; }; // DB8500_REGULATOR_SWITCH_B2R2_MCDE db8500_b2r2_mcde_reg: db8500_b2r2_mcde { - regulator-compatible = "db8500_b2r2_mcde"; regulator-name = "db8500-b2r2-mcde"; vin-supply = <&db8500_vape_reg>; }; // DB8500_REGULATOR_SWITCH_ESRAM12 db8500_esram12_reg: db8500_esram12 { - regulator-compatible = "db8500_esram12"; regulator-name = "db8500-esram12"; }; // DB8500_REGULATOR_SWITCH_ESRAM12RET db8500_esram12_ret_reg: db8500_esram12_ret { - regulator-compatible = "db8500_esram12_ret"; regulator-name = "db8500-esram12-ret"; }; // DB8500_REGULATOR_SWITCH_ESRAM34 db8500_esram34_reg: db8500_esram34 { - regulator-compatible = "db8500_esram34"; regulator-name = "db8500-esram34"; }; // DB8500_REGULATOR_SWITCH_ESRAM34RET db8500_esram34_ret_reg: db8500_esram34_ret { - regulator-compatible = "db8500_esram34_ret"; regulator-name = "db8500-esram34-ret"; }; }; @@ -337,7 +318,6 @@ // supplies to the display/camera ab8500_ldo_aux1_reg: ab8500_ldo_aux1 { - regulator-compatible = "ab8500_ldo_aux1"; regulator-name = "V-DISPLAY"; regulator-min-microvolt = <2500000>; regulator-max-microvolt = <2900000>; @@ -348,7 +328,6 @@ // supplies to the on-board eMMC ab8500_ldo_aux2_reg: ab8500_ldo_aux2 { - regulator-compatible = "ab8500_ldo_aux2"; regulator-name = "V-eMMC1"; regulator-min-microvolt = <1100000>; regulator-max-microvolt = <3300000>; @@ -356,7 +335,6 @@ // supply for VAUX3; SDcard slots ab8500_ldo_aux3_reg: ab8500_ldo_aux3 { - regulator-compatible = "ab8500_ldo_aux3"; regulator-name = "V-MMC-SD"; regulator-min-microvolt = <1100000>; regulator-max-microvolt = <3300000>; @@ -364,49 +342,41 @@ // supply for v-intcore12; VINTCORE12 LDO ab8500_ldo_initcore_reg: ab8500_ldo_initcore { - regulator-compatible = "ab8500_ldo_initcore"; regulator-name = "V-INTCORE"; }; // supply for tvout; gpadc; TVOUT LDO ab8500_ldo_tvout_reg: ab8500_ldo_tvout { - regulator-compatible = "ab8500_ldo_tvout"; regulator-name = "V-TVOUT"; }; // supply for ab8500-usb; USB LDO ab8500_ldo_usb_reg: ab8500_ldo_usb { - regulator-compatible = "ab8500_ldo_usb"; regulator-name = "dummy"; }; // supply for ab8500-vaudio; VAUDIO LDO ab8500_ldo_audio_reg: ab8500_ldo_audio { - regulator-compatible = "ab8500_ldo_audio"; regulator-name = "V-AUD"; }; // supply for v-anamic1 VAMic1-LDO ab8500_ldo_anamic1_reg: ab8500_ldo_anamic1 { - regulator-compatible = "ab8500_ldo_anamic1"; regulator-name = "V-AMIC1"; }; // supply for v-amic2; VAMIC2 LDO; reuse constants for AMIC1 ab8500_ldo_amamic2_reg: ab8500_ldo_amamic2 { - regulator-compatible = "ab8500_ldo_amamic2"; regulator-name = "V-AMIC2"; }; // supply for v-dmic; VDMIC LDO ab8500_ldo_dmic_reg: ab8500_ldo_dmic { - regulator-compatible = "ab8500_ldo_dmic"; regulator-name = "V-DMIC"; }; // supply for U8500 CSI/DSI; VANA LDO ab8500_ldo_ana_reg: ab8500_ldo_ana { - regulator-compatible = "ab8500_ldo_ana"; regulator-name = "V-CSI/DSI"; }; }; diff --git a/trunk/arch/arm/boot/dts/spear13xx.dtsi b/trunk/arch/arm/boot/dts/spear13xx.dtsi index f7b84aced654..10dcec7e7321 100644 --- a/trunk/arch/arm/boot/dts/spear13xx.dtsi +++ b/trunk/arch/arm/boot/dts/spear13xx.dtsi @@ -43,8 +43,8 @@ pmu { compatible = "arm,cortex-a9-pmu"; - interrupts = <0 6 0x04 - 0 7 0x04>; + interrupts = <0 8 0x04 + 0 9 0x04>; }; L2: l2-cache { @@ -119,8 +119,8 @@ gmac0: eth@e2000000 { compatible = "st,spear600-gmac"; reg = <0xe2000000 0x8000>; - interrupts = <0 33 0x4 - 0 34 0x4>; + interrupts = <0 23 0x4 + 0 24 0x4>; interrupt-names = "macirq", "eth_wake_irq"; status = "disabled"; }; @@ -202,7 +202,6 @@ kbd@e0300000 { compatible = "st,spear300-kbd"; reg = <0xe0300000 0x1000>; - interrupts = <0 52 0x4>; status = "disabled"; }; @@ -225,7 +224,7 @@ serial@e0000000 { compatible = "arm,pl011", "arm,primecell"; reg = <0xe0000000 0x1000>; - interrupts = <0 35 0x4>; + interrupts = <0 36 0x4>; status = "disabled"; }; diff --git a/trunk/arch/arm/boot/dts/spear320-evb.dts b/trunk/arch/arm/boot/dts/spear320-evb.dts index e4e912f95024..c13fd1f3b09f 100644 --- a/trunk/arch/arm/boot/dts/spear320-evb.dts +++ b/trunk/arch/arm/boot/dts/spear320-evb.dts @@ -15,8 +15,8 @@ /include/ "spear320.dtsi" / { - model = "ST SPEAr320 Evaluation Board"; - compatible = "st,spear320-evb", "st,spear320"; + model = "ST SPEAr300 Evaluation Board"; + compatible = "st,spear300-evb", "st,spear300"; #address-cells = <1>; #size-cells = <1>; @@ -26,7 +26,7 @@ ahb { pinmux@b3000000 { - st,pinmux-mode = <4>; + st,pinmux-mode = <3>; pinctrl-names = "default"; pinctrl-0 = <&state_default>; diff --git a/trunk/arch/arm/boot/dts/spear600.dtsi b/trunk/arch/arm/boot/dts/spear600.dtsi index a3c36e47d7ef..089f0a42c50e 100644 --- a/trunk/arch/arm/boot/dts/spear600.dtsi +++ b/trunk/arch/arm/boot/dts/spear600.dtsi @@ -181,7 +181,6 @@ timer@f0000000 { compatible = "st,spear-timer"; reg = <0xf0000000 0x400>; - interrupt-parent = <&vic0>; interrupts = <16>; }; }; diff --git a/trunk/arch/arm/mach-spear3xx/spear3xx.c b/trunk/arch/arm/mach-spear3xx/spear3xx.c index 66db5f13af84..0f41bd1c47c3 100644 --- a/trunk/arch/arm/mach-spear3xx/spear3xx.c +++ b/trunk/arch/arm/mach-spear3xx/spear3xx.c @@ -87,7 +87,7 @@ void __init spear3xx_map_io(void) static void __init spear3xx_timer_init(void) { - char pclk_name[] = "pll3_clk"; + char pclk_name[] = "pll3_48m_clk"; struct clk *gpt_clk, *pclk; spear3xx_clk_init(); diff --git a/trunk/arch/arm/mach-spear6xx/spear6xx.c b/trunk/arch/arm/mach-spear6xx/spear6xx.c index 9af67d003c62..2e2e3596583e 100644 --- a/trunk/arch/arm/mach-spear6xx/spear6xx.c +++ b/trunk/arch/arm/mach-spear6xx/spear6xx.c @@ -423,7 +423,7 @@ void __init spear6xx_map_io(void) static void __init spear6xx_timer_init(void) { - char pclk_name[] = "pll3_clk"; + char pclk_name[] = "pll3_48m_clk"; struct clk *gpt_clk, *pclk; spear6xx_clk_init(); diff --git a/trunk/arch/arm/mm/dma-mapping.c b/trunk/arch/arm/mm/dma-mapping.c index 655878bcc96d..4044abcf6f9d 100644 --- a/trunk/arch/arm/mm/dma-mapping.c +++ b/trunk/arch/arm/mm/dma-mapping.c @@ -1091,7 +1091,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t while (--i) if (pages[i]) __free_pages(pages[i], 0); - if (array_size <= PAGE_SIZE) + if (array_size < PAGE_SIZE) kfree(pages); else vfree(pages); @@ -1106,7 +1106,7 @@ static int __iommu_free_buffer(struct device *dev, struct page **pages, size_t s for (i = 0; i < count; i++) if (pages[i]) __free_pages(pages[i], 0); - if (array_size <= PAGE_SIZE) + if (array_size < PAGE_SIZE) kfree(pages); else vfree(pages); diff --git a/trunk/arch/hexagon/kernel/smp.c b/trunk/arch/hexagon/kernel/smp.c index 149fbefc1a4d..f7264621e58d 100644 --- a/trunk/arch/hexagon/kernel/smp.c +++ b/trunk/arch/hexagon/kernel/smp.c @@ -180,7 +180,9 @@ void __cpuinit start_secondary(void) notify_cpu_starting(cpu); + ipi_call_lock(); set_cpu_online(cpu, true); + ipi_call_unlock(); local_irq_enable(); diff --git a/trunk/arch/ia64/kernel/smpboot.c b/trunk/arch/ia64/kernel/smpboot.c index 963d2db53bfa..1113b8aba07f 100644 --- a/trunk/arch/ia64/kernel/smpboot.c +++ b/trunk/arch/ia64/kernel/smpboot.c @@ -382,6 +382,7 @@ smp_callin (void) set_numa_node(cpu_to_node_map[cpuid]); set_numa_mem(local_memory_node(cpu_to_node_map[cpuid])); + ipi_call_lock_irq(); spin_lock(&vector_lock); /* Setup the per cpu irq handling data structures */ __setup_vector_irq(cpuid); @@ -389,6 +390,7 @@ smp_callin (void) set_cpu_online(cpuid, true); per_cpu(cpu_state, cpuid) = CPU_ONLINE; spin_unlock(&vector_lock); + ipi_call_unlock_irq(); smp_setup_percpu_timer(); diff --git a/trunk/arch/m32r/boot/compressed/Makefile b/trunk/arch/m32r/boot/compressed/Makefile index 01729c2979ba..177716b1d613 100644 --- a/trunk/arch/m32r/boot/compressed/Makefile +++ b/trunk/arch/m32r/boot/compressed/Makefile @@ -43,9 +43,9 @@ endif OBJCOPYFLAGS += -R .empty_zero_page -suffix-$(CONFIG_KERNEL_GZIP) = gz -suffix-$(CONFIG_KERNEL_BZIP2) = bz2 -suffix-$(CONFIG_KERNEL_LZMA) = lzma +suffix_$(CONFIG_KERNEL_GZIP) = gz +suffix_$(CONFIG_KERNEL_BZIP2) = bz2 +suffix_$(CONFIG_KERNEL_LZMA) = lzma $(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix-y) FORCE $(call if_changed,ld) diff --git a/trunk/arch/m32r/boot/compressed/misc.c b/trunk/arch/m32r/boot/compressed/misc.c index 28a09529f206..370d60881977 100644 --- a/trunk/arch/m32r/boot/compressed/misc.c +++ b/trunk/arch/m32r/boot/compressed/misc.c @@ -28,7 +28,7 @@ static unsigned long free_mem_ptr; static unsigned long free_mem_end_ptr; #ifdef CONFIG_KERNEL_BZIP2 -void *memset(void *s, int c, size_t n) +static void *memset(void *s, int c, size_t n) { char *ss = s; @@ -39,16 +39,6 @@ void *memset(void *s, int c, size_t n) #endif #ifdef CONFIG_KERNEL_GZIP -void *memcpy(void *dest, const void *src, size_t n) -{ - char *d = dest; - const char *s = src; - while (n--) - *d++ = *s++; - - return dest; -} - #define BOOT_HEAP_SIZE 0x10000 #include "../../../../lib/decompress_inflate.c" #endif diff --git a/trunk/arch/m32r/include/asm/ptrace.h b/trunk/arch/m32r/include/asm/ptrace.h index 4313aa62b51b..527527584dd0 100644 --- a/trunk/arch/m32r/include/asm/ptrace.h +++ b/trunk/arch/m32r/include/asm/ptrace.h @@ -113,6 +113,9 @@ struct pt_regs { #define PTRACE_OLDSETOPTIONS 21 +/* options set using PTRACE_SETOPTIONS */ +#define PTRACE_O_TRACESYSGOOD 0x00000001 + #ifdef __KERNEL__ #include /* M32R_PSW_BSM, M32R_PSW_BPM */ diff --git a/trunk/arch/m32r/include/asm/smp.h b/trunk/arch/m32r/include/asm/smp.h index c689b828dfe2..cf7829a61551 100644 --- a/trunk/arch/m32r/include/asm/smp.h +++ b/trunk/arch/m32r/include/asm/smp.h @@ -79,6 +79,11 @@ static __inline__ int cpu_number_map(int cpu) return cpu; } +static __inline__ unsigned int num_booting_cpus(void) +{ + return cpumask_weight(&cpu_callout_map); +} + extern void smp_send_timer(void); extern unsigned long send_IPI_mask_phys(const cpumask_t*, int, int); diff --git a/trunk/arch/m32r/kernel/ptrace.c b/trunk/arch/m32r/kernel/ptrace.c index 51f5e9aa4901..4c03361537aa 100644 --- a/trunk/arch/m32r/kernel/ptrace.c +++ b/trunk/arch/m32r/kernel/ptrace.c @@ -591,16 +591,17 @@ void user_enable_single_step(struct task_struct *child) if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0) != sizeof(insn)) - return; + return -EIO; compute_next_pc(insn, pc, &next_pc, child); if (next_pc & 0x80000000) - return; + return -EIO; if (embed_debug_trap(child, next_pc)) - return; + return -EIO; invalidate_cache(); + return 0; } void user_disable_single_step(struct task_struct *child) diff --git a/trunk/arch/m32r/kernel/signal.c b/trunk/arch/m32r/kernel/signal.c index d0f60b97bbc5..f3fb2c029cfc 100644 --- a/trunk/arch/m32r/kernel/signal.c +++ b/trunk/arch/m32r/kernel/signal.c @@ -286,7 +286,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, case -ERESTARTNOINTR: regs->r0 = regs->orig_r0; if (prev_insn(regs) < 0) - return; + return -EFAULT; } } diff --git a/trunk/arch/mips/Kconfig b/trunk/arch/mips/Kconfig index b3e10fdd3898..09ab87ee6fef 100644 --- a/trunk/arch/mips/Kconfig +++ b/trunk/arch/mips/Kconfig @@ -288,7 +288,6 @@ config MIPS_MALTA select SYS_HAS_CPU_MIPS32_R1 select SYS_HAS_CPU_MIPS32_R2 select SYS_HAS_CPU_MIPS64_R1 - select SYS_HAS_CPU_MIPS64_R2 select SYS_HAS_CPU_NEVADA select SYS_HAS_CPU_RM7000 select SYS_HAS_EARLY_PRINTK @@ -1424,7 +1423,6 @@ config CPU_SB1 config CPU_CAVIUM_OCTEON bool "Cavium Octeon processor" depends on SYS_HAS_CPU_CAVIUM_OCTEON - select ARCH_SPARSEMEM_ENABLE select CPU_HAS_PREFETCH select CPU_SUPPORTS_64BIT_KERNEL select SYS_SUPPORTS_SMP diff --git a/trunk/arch/mips/bcm47xx/Kconfig b/trunk/arch/mips/bcm47xx/Kconfig index b311be45a720..6210b8d84109 100644 --- a/trunk/arch/mips/bcm47xx/Kconfig +++ b/trunk/arch/mips/bcm47xx/Kconfig @@ -21,7 +21,6 @@ config BCM47XX_BCMA select BCMA select BCMA_HOST_SOC select BCMA_DRIVER_MIPS - select BCMA_HOST_PCI if PCI select BCMA_DRIVER_PCI_HOSTMODE if PCI default y help diff --git a/trunk/arch/mips/bcm63xx/dev-pcmcia.c b/trunk/arch/mips/bcm63xx/dev-pcmcia.c index a551bab5ecb9..de4d917fd54d 100644 --- a/trunk/arch/mips/bcm63xx/dev-pcmcia.c +++ b/trunk/arch/mips/bcm63xx/dev-pcmcia.c @@ -79,11 +79,11 @@ static int __init config_pcmcia_cs(unsigned int cs, return ret; } -static const struct { +static const __initdata struct { unsigned int cs; unsigned int base; unsigned int size; -} pcmcia_cs[3] __initconst = { +} pcmcia_cs[3] = { { .cs = MPI_CS_PCMCIA_COMMON, .base = BCM_PCMCIA_COMMON_BASE_PA, diff --git a/trunk/arch/mips/cavium-octeon/Kconfig b/trunk/arch/mips/cavium-octeon/Kconfig index 2f4f6d5e05b6..f9e275a50d98 100644 --- a/trunk/arch/mips/cavium-octeon/Kconfig +++ b/trunk/arch/mips/cavium-octeon/Kconfig @@ -82,6 +82,10 @@ config CAVIUM_OCTEON_LOCK_L2_MEMCPY help Lock the kernel's implementation of memcpy() into L2. +config ARCH_SPARSEMEM_ENABLE + def_bool y + select SPARSEMEM_STATIC + config IOMMU_HELPER bool diff --git a/trunk/arch/mips/cavium-octeon/smp.c b/trunk/arch/mips/cavium-octeon/smp.c index ee1fb9f7f517..4b93048044eb 100644 --- a/trunk/arch/mips/cavium-octeon/smp.c +++ b/trunk/arch/mips/cavium-octeon/smp.c @@ -185,6 +185,7 @@ static void __cpuinit octeon_init_secondary(void) octeon_init_cvmcount(); octeon_irq_setup_secondary(); + raw_local_irq_enable(); } /** @@ -232,7 +233,6 @@ static void octeon_smp_finish(void) /* to generate the first CPU timer interrupt */ write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ); - local_irq_enable(); } /** diff --git a/trunk/arch/mips/include/asm/bitops.h b/trunk/arch/mips/include/asm/bitops.h index 82ad35ce2b45..2e1ad4c652b7 100644 --- a/trunk/arch/mips/include/asm/bitops.h +++ b/trunk/arch/mips/include/asm/bitops.h @@ -17,6 +17,7 @@ #include #include #include +#include #include /* sigh ... */ #include #include diff --git a/trunk/arch/mips/include/asm/cmpxchg.h b/trunk/arch/mips/include/asm/cmpxchg.h index eee10dc07ac1..285a41fa0b18 100644 --- a/trunk/arch/mips/include/asm/cmpxchg.h +++ b/trunk/arch/mips/include/asm/cmpxchg.h @@ -8,7 +8,6 @@ #ifndef __ASM_CMPXCHG_H #define __ASM_CMPXCHG_H -#include #include #include diff --git a/trunk/arch/mips/include/asm/cpu.h b/trunk/arch/mips/include/asm/cpu.h index 95e40c1e8ed1..f9fa2a479dd0 100644 --- a/trunk/arch/mips/include/asm/cpu.h +++ b/trunk/arch/mips/include/asm/cpu.h @@ -94,7 +94,6 @@ #define PRID_IMP_24KE 0x9600 #define PRID_IMP_74K 0x9700 #define PRID_IMP_1004K 0x9900 -#define PRID_IMP_M14KC 0x9c00 /* * These are the PRID's for when 23:16 == PRID_COMP_SIBYTE @@ -261,12 +260,12 @@ enum cpu_type_enum { */ CPU_4KC, CPU_4KEC, CPU_4KSC, CPU_24K, CPU_34K, CPU_1004K, CPU_74K, CPU_ALCHEMY, CPU_PR4450, CPU_BMIPS32, CPU_BMIPS3300, CPU_BMIPS4350, - CPU_BMIPS4380, CPU_BMIPS5000, CPU_JZRISC, CPU_M14KC, + CPU_BMIPS4380, CPU_BMIPS5000, CPU_JZRISC, /* * MIPS64 class processors */ - CPU_5KC, CPU_5KE, CPU_20KC, CPU_25KF, CPU_SB1, CPU_SB1A, CPU_LOONGSON2, + CPU_5KC, CPU_20KC, CPU_25KF, CPU_SB1, CPU_SB1A, CPU_LOONGSON2, CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS, CPU_CAVIUM_OCTEON2, CPU_XLR, CPU_XLP, @@ -289,7 +288,7 @@ enum cpu_type_enum { #define MIPS_CPU_ISA_M64R2 0x00000100 #define MIPS_CPU_ISA_32BIT (MIPS_CPU_ISA_I | MIPS_CPU_ISA_II | \ - MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2) + MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 ) #define MIPS_CPU_ISA_64BIT (MIPS_CPU_ISA_III | MIPS_CPU_ISA_IV | \ MIPS_CPU_ISA_V | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2) diff --git a/trunk/arch/mips/include/asm/gic.h b/trunk/arch/mips/include/asm/gic.h index 991b659e2548..86548da650e7 100644 --- a/trunk/arch/mips/include/asm/gic.h +++ b/trunk/arch/mips/include/asm/gic.h @@ -206,7 +206,7 @@ #define GIC_VPE_EIC_SHADOW_SET_BASE 0x0100 #define GIC_VPE_EIC_SS(intr) \ - (GIC_VPE_EIC_SHADOW_SET_BASE + (4 * intr)) + (GIC_EIC_SHADOW_SET_BASE + (4 * intr)) #define GIC_VPE_EIC_VEC_BASE 0x0800 #define GIC_VPE_EIC_VEC(intr) \ @@ -330,17 +330,6 @@ struct gic_intr_map { #define GIC_FLAG_TRANSPARENT 0x02 }; -/* - * This is only used in EIC mode. This helps to figure out which - * shared interrupts we need to process when we get a vector interrupt. - */ -#define GIC_MAX_SHARED_INTR 0x5 -struct gic_shared_intr_map { - unsigned int num_shared_intr; - unsigned int intr_list[GIC_MAX_SHARED_INTR]; - unsigned int local_intr_mask; -}; - extern void gic_init(unsigned long gic_base_addr, unsigned long gic_addrspace_size, struct gic_intr_map *intrmap, unsigned int intrmap_size, unsigned int irqbase); @@ -349,7 +338,5 @@ extern unsigned int gic_get_int(void); extern void gic_send_ipi(unsigned int intr); extern unsigned int plat_ipi_call_int_xlate(unsigned int); extern unsigned int plat_ipi_resched_int_xlate(unsigned int); -extern void gic_bind_eic_interrupt(int irq, int set); -extern unsigned int gic_get_timer_pending(void); #endif /* _ASM_GICREGS_H */ diff --git a/trunk/arch/mips/include/asm/inst.h b/trunk/arch/mips/include/asm/inst.h index ab84064283db..7ebfc392e58d 100644 --- a/trunk/arch/mips/include/asm/inst.h +++ b/trunk/arch/mips/include/asm/inst.h @@ -251,7 +251,7 @@ struct f_format { /* FPU register format */ unsigned int func : 6; }; -struct ma_format { /* FPU multiply and add format (MIPS IV) */ +struct ma_format { /* FPU multipy and add format (MIPS IV) */ unsigned int opcode : 6; unsigned int fr : 5; unsigned int ft : 5; @@ -324,7 +324,7 @@ struct f_format { /* FPU register format */ unsigned int opcode : 6; }; -struct ma_format { /* FPU multiply and add format (MIPS IV) */ +struct ma_format { /* FPU multipy and add format (MIPS IV) */ unsigned int fmt : 2; unsigned int func : 4; unsigned int fd : 5; diff --git a/trunk/arch/mips/include/asm/io.h b/trunk/arch/mips/include/asm/io.h index 29d9c23c20c7..a58f22998a86 100644 --- a/trunk/arch/mips/include/asm/io.h +++ b/trunk/arch/mips/include/asm/io.h @@ -17,7 +17,6 @@ #include #include -#include #include #include #include diff --git a/trunk/arch/mips/include/asm/irq.h b/trunk/arch/mips/include/asm/irq.h index 78dbb8a86da2..fb698dc09bc9 100644 --- a/trunk/arch/mips/include/asm/irq.h +++ b/trunk/arch/mips/include/asm/irq.h @@ -136,7 +136,6 @@ extern void free_irqno(unsigned int irq); * IE7. Since R2 their number has to be read from the c0_intctl register. */ #define CP0_LEGACY_COMPARE_IRQ 7 -#define CP0_LEGACY_PERFCNT_IRQ 7 extern int cp0_compare_irq; extern int cp0_compare_irq_shift; diff --git a/trunk/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h b/trunk/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h index fdcd78ca1b03..94d4faad29a1 100644 --- a/trunk/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h +++ b/trunk/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h @@ -99,7 +99,7 @@ #define CKCTL_6368_USBH_CLK_EN (1 << 15) #define CKCTL_6368_DISABLE_GLESS_EN (1 << 16) #define CKCTL_6368_NAND_CLK_EN (1 << 17) -#define CKCTL_6368_IPSEC_CLK_EN (1 << 18) +#define CKCTL_6368_IPSEC_CLK_EN (1 << 17) #define CKCTL_6368_ALL_SAFE_EN (CKCTL_6368_SWPKT_USB_EN | \ CKCTL_6368_SWPKT_SAR_EN | \ diff --git a/trunk/arch/mips/include/asm/mips-boards/maltaint.h b/trunk/arch/mips/include/asm/mips-boards/maltaint.h index 5447d9fc4219..d11aa02a956a 100644 --- a/trunk/arch/mips/include/asm/mips-boards/maltaint.h +++ b/trunk/arch/mips/include/asm/mips-boards/maltaint.h @@ -86,16 +86,6 @@ #define GIC_CPU_INT4 4 /* . */ #define GIC_CPU_INT5 5 /* Core Interrupt 5 */ -/* MALTA GIC local interrupts */ -#define GIC_INT_TMR (GIC_CPU_INT5) -#define GIC_INT_PERFCTR (GIC_CPU_INT5) - -/* GIC constants */ -/* Add 2 to convert non-eic hw int # to eic vector # */ -#define GIC_CPU_TO_VEC_OFFSET (2) -/* If we map an intr to pin X, GIC will actually generate vector X+1 */ -#define GIC_PIN_TO_VEC_OFFSET (1) - #define GIC_EXT_INTR(x) x /* External Interrupts used for IPI */ diff --git a/trunk/arch/mips/include/asm/mipsmtregs.h b/trunk/arch/mips/include/asm/mipsmtregs.h index e71ff4c317f2..c9420aa97e32 100644 --- a/trunk/arch/mips/include/asm/mipsmtregs.h +++ b/trunk/arch/mips/include/asm/mipsmtregs.h @@ -48,7 +48,7 @@ #define CP0_VPECONF0 $1, 2 #define CP0_VPECONF1 $1, 3 #define CP0_YQMASK $1, 4 -#define CP0_VPESCHEDULE $1, 5 +#define CP0_VPESCHEDULE $1, 5 #define CP0_VPESCHEFBK $1, 6 #define CP0_TCSTATUS $2, 1 #define CP0_TCBIND $2, 2 diff --git a/trunk/arch/mips/include/asm/switch_to.h b/trunk/arch/mips/include/asm/switch_to.h index 4f8ddba8c360..5d33621b5658 100644 --- a/trunk/arch/mips/include/asm/switch_to.h +++ b/trunk/arch/mips/include/asm/switch_to.h @@ -22,7 +22,7 @@ struct task_struct; * switch_to(n) should switch tasks to task nr n, first * checking that n isn't the current task, in which case it does nothing. */ -extern asmlinkage void *resume(void *last, void *next, void *next_ti, u32 __usedfpu); +extern asmlinkage void *resume(void *last, void *next, void *next_ti); extern unsigned int ll_bit; extern struct task_struct *ll_task; @@ -66,13 +66,11 @@ do { \ #define switch_to(prev, next, last) \ do { \ - u32 __usedfpu; \ __mips_mt_fpaff_switch_to(prev); \ if (cpu_has_dsp) \ __save_dsp(prev); \ __clear_software_ll_bit(); \ - __usedfpu = test_and_clear_tsk_thread_flag(prev, TIF_USEDFPU); \ - (last) = resume(prev, next, task_thread_info(next), __usedfpu); \ + (last) = resume(prev, next, task_thread_info(next)); \ } while (0) #define finish_arch_switch(prev) \ diff --git a/trunk/arch/mips/include/asm/thread_info.h b/trunk/arch/mips/include/asm/thread_info.h index ca97e0ecb64b..e2eca7d10598 100644 --- a/trunk/arch/mips/include/asm/thread_info.h +++ b/trunk/arch/mips/include/asm/thread_info.h @@ -60,8 +60,6 @@ struct thread_info { register struct thread_info *__current_thread_info __asm__("$28"); #define current_thread_info() __current_thread_info -#endif /* !__ASSEMBLY__ */ - /* thread information allocation */ #if defined(CONFIG_PAGE_SIZE_4KB) && defined(CONFIG_32BIT) #define THREAD_SIZE_ORDER (1) @@ -87,6 +85,8 @@ register struct thread_info *__current_thread_info __asm__("$28"); #define STACK_WARN (THREAD_SIZE / 8) +#endif /* !__ASSEMBLY__ */ + #define PREEMPT_ACTIVE 0x10000000 /* diff --git a/trunk/arch/mips/kernel/cpu-probe.c b/trunk/arch/mips/kernel/cpu-probe.c index f4630e1082ab..6ae7ce4ac63e 100644 --- a/trunk/arch/mips/kernel/cpu-probe.c +++ b/trunk/arch/mips/kernel/cpu-probe.c @@ -4,7 +4,7 @@ * Copyright (C) xxxx the Anonymous * Copyright (C) 1994 - 2006 Ralf Baechle * Copyright (C) 2003, 2004 Maciej W. Rozycki - * Copyright (C) 2001, 2004, 2011, 2012 MIPS Technologies, Inc. + * Copyright (C) 2001, 2004 MIPS Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -199,7 +199,6 @@ void __init check_wait(void) cpu_wait = rm7k_wait_irqoff; break; - case CPU_M14KC: case CPU_24K: case CPU_34K: case CPU_1004K: @@ -811,10 +810,6 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu) c->cputype = CPU_5KC; __cpu_name[cpu] = "MIPS 5Kc"; break; - case PRID_IMP_5KE: - c->cputype = CPU_5KE; - __cpu_name[cpu] = "MIPS 5KE"; - break; case PRID_IMP_20KC: c->cputype = CPU_20KC; __cpu_name[cpu] = "MIPS 20Kc"; @@ -836,10 +831,6 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu) c->cputype = CPU_74K; __cpu_name[cpu] = "MIPS 74Kc"; break; - case PRID_IMP_M14KC: - c->cputype = CPU_M14KC; - __cpu_name[cpu] = "MIPS M14Kc"; - break; case PRID_IMP_1004K: c->cputype = CPU_1004K; __cpu_name[cpu] = "MIPS 1004Kc"; diff --git a/trunk/arch/mips/kernel/mips_ksyms.c b/trunk/arch/mips/kernel/mips_ksyms.c index 3fc1691110dc..57ba13edb03a 100644 --- a/trunk/arch/mips/kernel/mips_ksyms.c +++ b/trunk/arch/mips/kernel/mips_ksyms.c @@ -5,7 +5,7 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 1996, 97, 98, 99, 2000, 01, 03, 04, 05, 12 by Ralf Baechle + * Copyright (C) 1996, 97, 98, 99, 2000, 01, 03, 04, 05 by Ralf Baechle * Copyright (C) 1999, 2000, 01 Silicon Graphics, Inc. */ #include @@ -34,12 +34,6 @@ EXPORT_SYMBOL(memmove); EXPORT_SYMBOL(kernel_thread); -/* - * Functions that operate on entire pages. Mostly used by memory management. - */ -EXPORT_SYMBOL(clear_page); -EXPORT_SYMBOL(copy_page); - /* * Userspace access stuff. */ diff --git a/trunk/arch/mips/kernel/octeon_switch.S b/trunk/arch/mips/kernel/octeon_switch.S index 0441f54b2a6a..ce89c8061708 100644 --- a/trunk/arch/mips/kernel/octeon_switch.S +++ b/trunk/arch/mips/kernel/octeon_switch.S @@ -31,7 +31,7 @@ /* * task_struct *resume(task_struct *prev, task_struct *next, - * struct thread_info *next_ti, int usedfpu) + * struct thread_info *next_ti) */ .align 7 LEAF(resume) diff --git a/trunk/arch/mips/kernel/perf_event_mipsxx.c b/trunk/arch/mips/kernel/perf_event_mipsxx.c index eb5e394a4650..f29099b104c4 100644 --- a/trunk/arch/mips/kernel/perf_event_mipsxx.c +++ b/trunk/arch/mips/kernel/perf_event_mipsxx.c @@ -162,6 +162,11 @@ static unsigned int counters_total_to_per_cpu(unsigned int counters) return counters >> vpe_shift(); } +static unsigned int counters_per_cpu_to_total(unsigned int counters) +{ + return counters << vpe_shift(); +} + #else /* !CONFIG_MIPS_MT_SMP */ #define vpe_id() 0 diff --git a/trunk/arch/mips/kernel/r2300_switch.S b/trunk/arch/mips/kernel/r2300_switch.S index 9c51be5a163a..293898391e67 100644 --- a/trunk/arch/mips/kernel/r2300_switch.S +++ b/trunk/arch/mips/kernel/r2300_switch.S @@ -43,7 +43,7 @@ /* * task_struct *resume(task_struct *prev, task_struct *next, - * struct thread_info *next_ti, int usedfpu) + * struct thread_info *next_ti) ) */ LEAF(resume) mfc0 t1, CP0_STATUS @@ -51,9 +51,18 @@ LEAF(resume) cpu_save_nonscratch a0 sw ra, THREAD_REG31(a0) - beqz a3, 1f + /* + * check if we need to save FPU registers + */ + lw t3, TASK_THREAD_INFO(a0) + lw t0, TI_FLAGS(t3) + li t1, _TIF_USEDFPU + and t2, t0, t1 + beqz t2, 1f + nor t1, zero, t1 - PTR_L t3, TASK_THREAD_INFO(a0) + and t0, t0, t1 + sw t0, TI_FLAGS(t3) /* * clear saved user stack CU1 bit diff --git a/trunk/arch/mips/kernel/r4k_switch.S b/trunk/arch/mips/kernel/r4k_switch.S index 42d2a3938420..9414f9354469 100644 --- a/trunk/arch/mips/kernel/r4k_switch.S +++ b/trunk/arch/mips/kernel/r4k_switch.S @@ -41,7 +41,7 @@ /* * task_struct *resume(task_struct *prev, task_struct *next, - * struct thread_info *next_ti, int usedfpu) + * struct thread_info *next_ti) */ .align 5 LEAF(resume) @@ -53,10 +53,16 @@ /* * check if we need to save FPU registers */ + PTR_L t3, TASK_THREAD_INFO(a0) + LONG_L t0, TI_FLAGS(t3) + li t1, _TIF_USEDFPU + and t2, t0, t1 + beqz t2, 1f + nor t1, zero, t1 - beqz a3, 1f + and t0, t0, t1 + LONG_S t0, TI_FLAGS(t3) - PTR_L t3, TASK_THREAD_INFO(a0) /* * clear saved user stack CU1 bit */ diff --git a/trunk/arch/mips/kernel/smp-bmips.c b/trunk/arch/mips/kernel/smp-bmips.c index 8e393b8443f7..3046e2986006 100644 --- a/trunk/arch/mips/kernel/smp-bmips.c +++ b/trunk/arch/mips/kernel/smp-bmips.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -196,6 +197,13 @@ static void bmips_init_secondary(void) write_c0_brcm_action(ACTION_CLR_IPI(smp_processor_id(), 0)); #endif + + /* make sure there won't be a timer interrupt for a little while */ + write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ); + + irq_enable_hazard(); + set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ1 | IE_IRQ5 | ST0_IE); + irq_enable_hazard(); } /* @@ -204,13 +212,6 @@ static void bmips_init_secondary(void) static void bmips_smp_finish(void) { pr_info("SMP: CPU%d is running\n", smp_processor_id()); - - /* make sure there won't be a timer interrupt for a little while */ - write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ); - - irq_enable_hazard(); - set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ1 | IE_IRQ5 | ST0_IE); - irq_enable_hazard(); } /* diff --git a/trunk/arch/mips/kernel/smp.c b/trunk/arch/mips/kernel/smp.c index 1268392f1d27..48650c818040 100644 --- a/trunk/arch/mips/kernel/smp.c +++ b/trunk/arch/mips/kernel/smp.c @@ -122,21 +122,13 @@ asmlinkage __cpuinit void start_secondary(void) notify_cpu_starting(cpu); - set_cpu_online(cpu, true); - + mp_ops->smp_finish(); set_cpu_sibling_map(cpu); cpu_set(cpu, cpu_callin_map); synchronise_count_slave(); - /* - * irq will be enabled in ->smp_finish(), enabling it too early - * is dangerous. - */ - WARN_ON_ONCE(!irqs_disabled()); - mp_ops->smp_finish(); - cpu_idle(); } @@ -204,6 +196,8 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) while (!cpu_isset(cpu, cpu_callin_map)) udelay(100); + set_cpu_online(cpu, true); + return 0; } diff --git a/trunk/arch/mips/kernel/smtc.c b/trunk/arch/mips/kernel/smtc.c index 15b5f3cfd20c..f5dd38f1d015 100644 --- a/trunk/arch/mips/kernel/smtc.c +++ b/trunk/arch/mips/kernel/smtc.c @@ -322,7 +322,7 @@ int __init smtc_build_cpu_map(int start_cpu_slot) /* * Common setup before any secondaries are started - * Make sure all CPUs are in a sensible state before we boot any of the + * Make sure all CPU's are in a sensible state before we boot any of the * secondaries. * * For MIPS MT "SMTC" operation, we set up all TCs, spread as evenly @@ -340,12 +340,12 @@ static void smtc_tc_setup(int vpe, int tc, int cpu) /* * TCContext gets an offset from the base of the IPIQ array * to be used in low-level code to detect the presence of - * an active IPI queue. + * an active IPI queue */ write_tc_c0_tccontext((sizeof(struct smtc_ipi_q) * cpu) << 16); /* Bind tc to vpe */ write_tc_c0_tcbind(vpe); - /* In general, all TCs should have the same cpu_data indications. */ + /* In general, all TCs should have the same cpu_data indications */ memcpy(&cpu_data[cpu], &cpu_data[0], sizeof(struct cpuinfo_mips)); /* For 34Kf, start with TC/CPU 0 as sole owner of single FPU context */ if (cpu_data[0].cputype == CPU_34K || @@ -358,8 +358,8 @@ static void smtc_tc_setup(int vpe, int tc, int cpu) } /* - * Tweak to get Count registes in as close a sync as possible. The - * value seems good for 34K-class cores. + * Tweak to get Count registes in as close a sync as possible. + * Value seems good for 34K-class cores. */ #define CP0_SKEW 8 @@ -615,6 +615,7 @@ void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle) void smtc_init_secondary(void) { + local_irq_enable(); } void smtc_smp_finish(void) @@ -630,8 +631,6 @@ void smtc_smp_finish(void) if (cpu > 0 && (cpu_data[cpu].vpe_id != cpu_data[cpu - 1].vpe_id)) write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ); - local_irq_enable(); - printk("TC %d going on-line as CPU %d\n", cpu_data[smp_processor_id()].tc_id, smp_processor_id()); } diff --git a/trunk/arch/mips/kernel/sync-r4k.c b/trunk/arch/mips/kernel/sync-r4k.c index 842d55e411fd..99f913c8d7a6 100644 --- a/trunk/arch/mips/kernel/sync-r4k.c +++ b/trunk/arch/mips/kernel/sync-r4k.c @@ -111,6 +111,7 @@ void __cpuinit synchronise_count_master(void) void __cpuinit synchronise_count_slave(void) { int i; + unsigned long flags; unsigned int initcount; int ncpus; @@ -122,6 +123,8 @@ void __cpuinit synchronise_count_slave(void) return; #endif + local_irq_save(flags); + /* * Not every cpu is online at the time this gets called, * so we first wait for the master to say everyone is ready @@ -151,5 +154,7 @@ void __cpuinit synchronise_count_slave(void) } /* Arrange for an interrupt in a short while */ write_c0_compare(read_c0_count() + COUNTON); + + local_irq_restore(flags); } #undef NR_LOOPS diff --git a/trunk/arch/mips/kernel/traps.c b/trunk/arch/mips/kernel/traps.c index c3c293543703..2d0c2a277f52 100644 --- a/trunk/arch/mips/kernel/traps.c +++ b/trunk/arch/mips/kernel/traps.c @@ -132,9 +132,6 @@ static void show_backtrace(struct task_struct *task, const struct pt_regs *regs) unsigned long ra = regs->regs[31]; unsigned long pc = regs->cp0_epc; - if (!task) - task = current; - if (raw_show_trace || !__kernel_text_address(pc)) { show_raw_backtrace(sp); return; @@ -1252,7 +1249,6 @@ static inline void parity_protection_init(void) break; case CPU_5KC: - case CPU_5KE: write_c0_ecc(0x80000000); back_to_back_c0_hazard(); /* Set the PE bit (bit 31) in the c0_errctl register. */ @@ -1502,7 +1498,6 @@ extern void flush_tlb_handlers(void); * Timer interrupt */ int cp0_compare_irq; -EXPORT_SYMBOL_GPL(cp0_compare_irq); int cp0_compare_irq_shift; /* @@ -1602,7 +1597,7 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu) cp0_perfcount_irq = -1; } else { cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ; - cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ; + cp0_compare_irq_shift = cp0_compare_irq; cp0_perfcount_irq = -1; } diff --git a/trunk/arch/mips/kernel/vmlinux.lds.S b/trunk/arch/mips/kernel/vmlinux.lds.S index df243a64f430..924da5eb7031 100644 --- a/trunk/arch/mips/kernel/vmlinux.lds.S +++ b/trunk/arch/mips/kernel/vmlinux.lds.S @@ -1,6 +1,5 @@ #include #include -#include #include #undef mips @@ -73,7 +72,7 @@ SECTIONS .data : { /* Data */ . = . + DATAOFFSET; /* for CONFIG_MAPPED_KERNEL */ - INIT_TASK_DATA(THREAD_SIZE) + INIT_TASK_DATA(PAGE_SIZE) NOSAVE_DATA CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT) READ_MOSTLY_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT) diff --git a/trunk/arch/mips/mm/Makefile b/trunk/arch/mips/mm/Makefile index fd6203f14f1f..4aa20280613e 100644 --- a/trunk/arch/mips/mm/Makefile +++ b/trunk/arch/mips/mm/Makefile @@ -3,8 +3,8 @@ # obj-y += cache.o dma-default.o extable.o fault.o \ - gup.o init.o mmap.o page.o page-funcs.o \ - tlbex.o tlbex-fault.o uasm.o + gup.o init.o mmap.o page.o tlbex.o \ + tlbex-fault.o uasm.o obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o obj-$(CONFIG_64BIT) += pgtable-64.o diff --git a/trunk/arch/mips/mm/c-r4k.c b/trunk/arch/mips/mm/c-r4k.c index f092c265dc63..5109be96d98d 100644 --- a/trunk/arch/mips/mm/c-r4k.c +++ b/trunk/arch/mips/mm/c-r4k.c @@ -977,7 +977,7 @@ static void __cpuinit probe_pcache(void) c->icache.linesz = 2 << lsize; else c->icache.linesz = lsize; - c->icache.sets = 32 << (((config1 >> 22) + 1) & 7); + c->icache.sets = 64 << ((config1 >> 22) & 7); c->icache.ways = 1 + ((config1 >> 16) & 7); icache_size = c->icache.sets * @@ -997,7 +997,7 @@ static void __cpuinit probe_pcache(void) c->dcache.linesz = 2 << lsize; else c->dcache.linesz= lsize; - c->dcache.sets = 32 << (((config1 >> 13) + 1) & 7); + c->dcache.sets = 64 << ((config1 >> 13) & 7); c->dcache.ways = 1 + ((config1 >> 7) & 7); dcache_size = c->dcache.sets * @@ -1051,7 +1051,6 @@ static void __cpuinit probe_pcache(void) case CPU_R14000: break; - case CPU_M14KC: case CPU_24K: case CPU_34K: case CPU_74K: diff --git a/trunk/arch/mips/mm/page-funcs.S b/trunk/arch/mips/mm/page-funcs.S deleted file mode 100644 index 48a6b38ff13e..000000000000 --- a/trunk/arch/mips/mm/page-funcs.S +++ /dev/null @@ -1,50 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Micro-assembler generated clear_page/copy_page functions. - * - * Copyright (C) 2012 MIPS Technologies, Inc. - * Copyright (C) 2012 Ralf Baechle - */ -#include -#include - -#ifdef CONFIG_SIBYTE_DMA_PAGEOPS -#define cpu_clear_page_function_name clear_page_cpu -#define cpu_copy_page_function_name copy_page_cpu -#else -#define cpu_clear_page_function_name clear_page -#define cpu_copy_page_function_name copy_page -#endif - -/* - * Maximum sizes: - * - * R4000 128 bytes S-cache: 0x058 bytes - * R4600 v1.7: 0x05c bytes - * R4600 v2.0: 0x060 bytes - * With prefetching, 16 word strides 0x120 bytes - */ -EXPORT(__clear_page_start) -LEAF(cpu_clear_page_function_name) -1: j 1b /* Dummy, will be replaced. */ - .space 288 -END(cpu_clear_page_function_name) -EXPORT(__clear_page_end) - -/* - * Maximum sizes: - * - * R4000 128 bytes S-cache: 0x11c bytes - * R4600 v1.7: 0x080 bytes - * R4600 v2.0: 0x07c bytes - * With prefetching, 16 word strides 0x540 bytes - */ -EXPORT(__copy_page_start) -LEAF(cpu_copy_page_function_name) -1: j 1b /* Dummy, will be replaced. */ - .space 1344 -END(cpu_copy_page_function_name) -EXPORT(__copy_page_end) diff --git a/trunk/arch/mips/mm/page.c b/trunk/arch/mips/mm/page.c index 98f530e18216..cc0b626858b3 100644 --- a/trunk/arch/mips/mm/page.c +++ b/trunk/arch/mips/mm/page.c @@ -6,7 +6,6 @@ * Copyright (C) 2003, 04, 05 Ralf Baechle (ralf@linux-mips.org) * Copyright (C) 2007 Maciej W. Rozycki * Copyright (C) 2008 Thiemo Seufer - * Copyright (C) 2012 MIPS Technologies, Inc. */ #include #include @@ -72,6 +71,45 @@ static struct uasm_reloc __cpuinitdata relocs[5]; #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010) #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020) +/* + * Maximum sizes: + * + * R4000 128 bytes S-cache: 0x058 bytes + * R4600 v1.7: 0x05c bytes + * R4600 v2.0: 0x060 bytes + * With prefetching, 16 word strides 0x120 bytes + */ + +static u32 clear_page_array[0x120 / 4]; + +#ifdef CONFIG_SIBYTE_DMA_PAGEOPS +void clear_page_cpu(void *page) __attribute__((alias("clear_page_array"))); +#else +void clear_page(void *page) __attribute__((alias("clear_page_array"))); +#endif + +EXPORT_SYMBOL(clear_page); + +/* + * Maximum sizes: + * + * R4000 128 bytes S-cache: 0x11c bytes + * R4600 v1.7: 0x080 bytes + * R4600 v2.0: 0x07c bytes + * With prefetching, 16 word strides 0x540 bytes + */ +static u32 copy_page_array[0x540 / 4]; + +#ifdef CONFIG_SIBYTE_DMA_PAGEOPS +void +copy_page_cpu(void *to, void *from) __attribute__((alias("copy_page_array"))); +#else +void copy_page(void *to, void *from) __attribute__((alias("copy_page_array"))); +#endif + +EXPORT_SYMBOL(copy_page); + + static int pref_bias_clear_store __cpuinitdata; static int pref_bias_copy_load __cpuinitdata; static int pref_bias_copy_store __cpuinitdata; @@ -244,15 +282,10 @@ static inline void __cpuinit build_clear_pref(u32 **buf, int off) } } -extern u32 __clear_page_start; -extern u32 __clear_page_end; -extern u32 __copy_page_start; -extern u32 __copy_page_end; - void __cpuinit build_clear_page(void) { int off; - u32 *buf = &__clear_page_start; + u32 *buf = (u32 *)&clear_page_array; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; int i; @@ -323,17 +356,17 @@ void __cpuinit build_clear_page(void) uasm_i_jr(&buf, RA); uasm_i_nop(&buf); - BUG_ON(buf > &__clear_page_end); + BUG_ON(buf > clear_page_array + ARRAY_SIZE(clear_page_array)); uasm_resolve_relocs(relocs, labels); pr_debug("Synthesized clear page handler (%u instructions).\n", - (u32)(buf - &__clear_page_start)); + (u32)(buf - clear_page_array)); pr_debug("\t.set push\n"); pr_debug("\t.set noreorder\n"); - for (i = 0; i < (buf - &__clear_page_start); i++) - pr_debug("\t.word 0x%08x\n", (&__clear_page_start)[i]); + for (i = 0; i < (buf - clear_page_array); i++) + pr_debug("\t.word 0x%08x\n", clear_page_array[i]); pr_debug("\t.set pop\n"); } @@ -394,7 +427,7 @@ static inline void build_copy_store_pref(u32 **buf, int off) void __cpuinit build_copy_page(void) { int off; - u32 *buf = &__copy_page_start; + u32 *buf = (u32 *)©_page_array; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; int i; @@ -562,23 +595,21 @@ void __cpuinit build_copy_page(void) uasm_i_jr(&buf, RA); uasm_i_nop(&buf); - BUG_ON(buf > &__copy_page_end); + BUG_ON(buf > copy_page_array + ARRAY_SIZE(copy_page_array)); uasm_resolve_relocs(relocs, labels); pr_debug("Synthesized copy page handler (%u instructions).\n", - (u32)(buf - &__copy_page_start)); + (u32)(buf - copy_page_array)); pr_debug("\t.set push\n"); pr_debug("\t.set noreorder\n"); - for (i = 0; i < (buf - &__copy_page_start); i++) - pr_debug("\t.word 0x%08x\n", (&__copy_page_start)[i]); + for (i = 0; i < (buf - copy_page_array); i++) + pr_debug("\t.word 0x%08x\n", copy_page_array[i]); pr_debug("\t.set pop\n"); } #ifdef CONFIG_SIBYTE_DMA_PAGEOPS -extern void clear_page_cpu(void *page); -extern void copy_page_cpu(void *to, void *from); /* * Pad descriptors to cacheline, since each is exclusively owned by a diff --git a/trunk/arch/mips/mm/tlbex.c b/trunk/arch/mips/mm/tlbex.c index 03eb0ef91580..0bc485b3cd60 100644 --- a/trunk/arch/mips/mm/tlbex.c +++ b/trunk/arch/mips/mm/tlbex.c @@ -9,7 +9,6 @@ * Copyright (C) 2005, 2007, 2008, 2009 Maciej W. Rozycki * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) * Copyright (C) 2008, 2009 Cavium Networks, Inc. - * Copyright (C) 2011 MIPS Technologies, Inc. * * ... and the days got worse and worse and now you see * I've gone completly out of my mind. @@ -495,7 +494,6 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l, case CPU_R14000: case CPU_4KC: case CPU_4KEC: - case CPU_M14KC: case CPU_SB1: case CPU_SB1A: case CPU_4KSC: diff --git a/trunk/arch/mips/mti-malta/malta-pci.c b/trunk/arch/mips/mti-malta/malta-pci.c index 284dea54faf5..bf80921f2f56 100644 --- a/trunk/arch/mips/mti-malta/malta-pci.c +++ b/trunk/arch/mips/mti-malta/malta-pci.c @@ -241,9 +241,8 @@ void __init mips_pcibios_init(void) return; } - /* Change start address to avoid conflicts with ACPI and SMB devices */ - if (controller->io_resource->start < 0x00002000UL) - controller->io_resource->start = 0x00002000UL; + if (controller->io_resource->start < 0x00001000UL) /* FIXME */ + controller->io_resource->start = 0x00001000UL; iomem_resource.end &= 0xfffffffffULL; /* 64 GB */ ioport_resource.end = controller->io_resource->end; @@ -254,7 +253,7 @@ void __init mips_pcibios_init(void) } /* Enable PCI 2.1 compatibility in PIIX4 */ -static void __devinit quirk_dlcsetup(struct pci_dev *dev) +static void __init quirk_dlcsetup(struct pci_dev *dev) { u8 odlc, ndlc; (void) pci_read_config_byte(dev, 0x82, &odlc); diff --git a/trunk/arch/mips/mti-malta/malta-setup.c b/trunk/arch/mips/mti-malta/malta-setup.c index 2e28f653f66d..b7f37d4982fa 100644 --- a/trunk/arch/mips/mti-malta/malta-setup.c +++ b/trunk/arch/mips/mti-malta/malta-setup.c @@ -111,7 +111,7 @@ static void __init pci_clock_check(void) unsigned int __iomem *jmpr_p = (unsigned int *) ioremap(MALTA_JMPRS_REG, sizeof(unsigned int)); int jmpr = (__raw_readl(jmpr_p) >> 2) & 0x07; - static const int pciclocks[] __initconst = { + static const int pciclocks[] __initdata = { 33, 20, 25, 30, 12, 16, 37, 10 }; int pciclock = pciclocks[jmpr]; diff --git a/trunk/arch/mips/netlogic/xlp/setup.c b/trunk/arch/mips/netlogic/xlp/setup.c index b3df7c2aad1e..acb677a1227c 100644 --- a/trunk/arch/mips/netlogic/xlp/setup.c +++ b/trunk/arch/mips/netlogic/xlp/setup.c @@ -82,10 +82,8 @@ void __init prom_free_prom_memory(void) void xlp_mmu_init(void) { - /* enable extended TLB and Large Fixed TLB */ write_c0_config6(read_c0_config6() | 0x24); - - /* set page mask of Fixed TLB in config7 */ + current_cpu_data.tlbsize = ((read_c0_config6() >> 16) & 0xffff) + 1; write_c0_config7(PM_DEFAULT_MASK >> (13 + (ffz(PM_DEFAULT_MASK >> 13) / 2))); } @@ -102,10 +100,6 @@ void __init prom_init(void) nlm_common_ebase = read_c0_ebase() & (~((1 << 12) - 1)); #ifdef CONFIG_SMP nlm_wakeup_secondary_cpus(0xffffffff); - - /* update TLB size after waking up threads */ - current_cpu_data.tlbsize = ((read_c0_config6() >> 16) & 0xffff) + 1; - register_smp_ops(&nlm_smp_ops); #endif } diff --git a/trunk/arch/mips/oprofile/common.c b/trunk/arch/mips/oprofile/common.c index b6e378211a2c..d1f2d4c52d42 100644 --- a/trunk/arch/mips/oprofile/common.c +++ b/trunk/arch/mips/oprofile/common.c @@ -78,7 +78,6 @@ int __init oprofile_arch_init(struct oprofile_operations *ops) switch (current_cpu_type()) { case CPU_5KC: - case CPU_M14KC: case CPU_20KC: case CPU_24K: case CPU_25KF: diff --git a/trunk/arch/mips/oprofile/op_model_mipsxx.c b/trunk/arch/mips/oprofile/op_model_mipsxx.c index 4d80a856048d..baba3bcaa3c2 100644 --- a/trunk/arch/mips/oprofile/op_model_mipsxx.c +++ b/trunk/arch/mips/oprofile/op_model_mipsxx.c @@ -322,10 +322,6 @@ static int __init mipsxx_init(void) op_model_mipsxx_ops.num_counters = counters; switch (current_cpu_type()) { - case CPU_M14KC: - op_model_mipsxx_ops.cpu_type = "mips/M14Kc"; - break; - case CPU_20KC: op_model_mipsxx_ops.cpu_type = "mips/20K"; break; diff --git a/trunk/arch/mips/pci/fixup-fuloong2e.c b/trunk/arch/mips/pci/fixup-fuloong2e.c index 0857ab8c3919..d5d4c018fb04 100644 --- a/trunk/arch/mips/pci/fixup-fuloong2e.c +++ b/trunk/arch/mips/pci/fixup-fuloong2e.c @@ -48,7 +48,7 @@ int pcibios_plat_dev_init(struct pci_dev *dev) return 0; } -static void __devinit loongson2e_nec_fixup(struct pci_dev *pdev) +static void __init loongson2e_nec_fixup(struct pci_dev *pdev) { unsigned int val; @@ -60,7 +60,7 @@ static void __devinit loongson2e_nec_fixup(struct pci_dev *pdev) pci_write_config_dword(pdev, 0xe4, 1 << 5); } -static void __devinit loongson2e_686b_func0_fixup(struct pci_dev *pdev) +static void __init loongson2e_686b_func0_fixup(struct pci_dev *pdev) { unsigned char c; @@ -135,7 +135,7 @@ static void __devinit loongson2e_686b_func0_fixup(struct pci_dev *pdev) printk(KERN_INFO"via686b fix: ISA bridge done\n"); } -static void __devinit loongson2e_686b_func1_fixup(struct pci_dev *pdev) +static void __init loongson2e_686b_func1_fixup(struct pci_dev *pdev) { printk(KERN_INFO"via686b fix: IDE\n"); @@ -168,19 +168,19 @@ static void __devinit loongson2e_686b_func1_fixup(struct pci_dev *pdev) printk(KERN_INFO"via686b fix: IDE done\n"); } -static void __devinit loongson2e_686b_func2_fixup(struct pci_dev *pdev) +static void __init loongson2e_686b_func2_fixup(struct pci_dev *pdev) { /* irq routing */ pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, 10); } -static void __devinit loongson2e_686b_func3_fixup(struct pci_dev *pdev) +static void __init loongson2e_686b_func3_fixup(struct pci_dev *pdev) { /* irq routing */ pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, 11); } -static void __devinit loongson2e_686b_func5_fixup(struct pci_dev *pdev) +static void __init loongson2e_686b_func5_fixup(struct pci_dev *pdev) { unsigned int val; unsigned char c; diff --git a/trunk/arch/mips/pci/fixup-lemote2f.c b/trunk/arch/mips/pci/fixup-lemote2f.c index a7b917dcf604..4b9768d5d729 100644 --- a/trunk/arch/mips/pci/fixup-lemote2f.c +++ b/trunk/arch/mips/pci/fixup-lemote2f.c @@ -96,21 +96,21 @@ int pcibios_plat_dev_init(struct pci_dev *dev) } /* CS5536 SPEC. fixup */ -static void __devinit loongson_cs5536_isa_fixup(struct pci_dev *pdev) +static void __init loongson_cs5536_isa_fixup(struct pci_dev *pdev) { /* the uart1 and uart2 interrupt in PIC is enabled as default */ pci_write_config_dword(pdev, PCI_UART1_INT_REG, 1); pci_write_config_dword(pdev, PCI_UART2_INT_REG, 1); } -static void __devinit loongson_cs5536_ide_fixup(struct pci_dev *pdev) +static void __init loongson_cs5536_ide_fixup(struct pci_dev *pdev) { /* setting the mutex pin as IDE function */ pci_write_config_dword(pdev, PCI_IDE_CFG_REG, CS5536_IDE_FLASH_SIGNATURE); } -static void __devinit loongson_cs5536_acc_fixup(struct pci_dev *pdev) +static void __init loongson_cs5536_acc_fixup(struct pci_dev *pdev) { /* enable the AUDIO interrupt in PIC */ pci_write_config_dword(pdev, PCI_ACC_INT_REG, 1); @@ -118,14 +118,14 @@ static void __devinit loongson_cs5536_acc_fixup(struct pci_dev *pdev) pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xc0); } -static void __devinit loongson_cs5536_ohci_fixup(struct pci_dev *pdev) +static void __init loongson_cs5536_ohci_fixup(struct pci_dev *pdev) { /* enable the OHCI interrupt in PIC */ /* THE OHCI, EHCI, UDC, OTG are shared with interrupt in PIC */ pci_write_config_dword(pdev, PCI_OHCI_INT_REG, 1); } -static void __devinit loongson_cs5536_ehci_fixup(struct pci_dev *pdev) +static void __init loongson_cs5536_ehci_fixup(struct pci_dev *pdev) { u32 hi, lo; @@ -137,7 +137,7 @@ static void __devinit loongson_cs5536_ehci_fixup(struct pci_dev *pdev) pci_write_config_dword(pdev, PCI_EHCI_FLADJ_REG, 0x2000); } -static void __devinit loongson_nec_fixup(struct pci_dev *pdev) +static void __init loongson_nec_fixup(struct pci_dev *pdev) { unsigned int val; diff --git a/trunk/arch/mips/pci/fixup-malta.c b/trunk/arch/mips/pci/fixup-malta.c index 70073c98ed32..0f48498bc231 100644 --- a/trunk/arch/mips/pci/fixup-malta.c +++ b/trunk/arch/mips/pci/fixup-malta.c @@ -49,10 +49,10 @@ int pcibios_plat_dev_init(struct pci_dev *dev) return 0; } -static void __devinit malta_piix_func0_fixup(struct pci_dev *pdev) +static void __init malta_piix_func0_fixup(struct pci_dev *pdev) { unsigned char reg_val; - static int piixirqmap[16] __devinitdata = { /* PIIX PIRQC[A:D] irq mappings */ + static int piixirqmap[16] __initdata = { /* PIIX PIRQC[A:D] irq mappings */ 0, 0, 0, 3, 4, 5, 6, 7, 0, 9, 10, 11, @@ -83,7 +83,7 @@ static void __devinit malta_piix_func0_fixup(struct pci_dev *pdev) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_0, malta_piix_func0_fixup); -static void __devinit malta_piix_func1_fixup(struct pci_dev *pdev) +static void __init malta_piix_func1_fixup(struct pci_dev *pdev) { unsigned char reg_val; diff --git a/trunk/arch/mips/pci/fixup-mpc30x.c b/trunk/arch/mips/pci/fixup-mpc30x.c index 8e4f8288eca2..e08f49cb6875 100644 --- a/trunk/arch/mips/pci/fixup-mpc30x.c +++ b/trunk/arch/mips/pci/fixup-mpc30x.c @@ -22,13 +22,13 @@ #include -static const int internal_func_irqs[] __initconst = { +static const int internal_func_irqs[] __initdata = { VRC4173_CASCADE_IRQ, VRC4173_AC97_IRQ, VRC4173_USB_IRQ, }; -static const int irq_tab_mpc30x[] __initconst = { +static const int irq_tab_mpc30x[] __initdata = { [12] = VRC4173_PCMCIA1_IRQ, [13] = VRC4173_PCMCIA2_IRQ, [29] = MQ200_IRQ, diff --git a/trunk/arch/mips/pci/fixup-sb1250.c b/trunk/arch/mips/pci/fixup-sb1250.c index d02900a72916..f0bb9146e6c0 100644 --- a/trunk/arch/mips/pci/fixup-sb1250.c +++ b/trunk/arch/mips/pci/fixup-sb1250.c @@ -15,7 +15,7 @@ * Set the BCM1250, etc. PCI host bridge's TRDY timeout * to the finite max. */ -static void __devinit quirk_sb1250_pci(struct pci_dev *dev) +static void __init quirk_sb1250_pci(struct pci_dev *dev) { pci_write_config_byte(dev, 0x40, 0xff); } @@ -25,7 +25,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIBYTE, PCI_DEVICE_ID_BCM1250_PCI, /* * The BCM1250, etc. PCI/HT bridge reports as a host bridge. */ -static void __devinit quirk_sb1250_ht(struct pci_dev *dev) +static void __init quirk_sb1250_ht(struct pci_dev *dev) { dev->class = PCI_CLASS_BRIDGE_PCI << 8; } @@ -35,7 +35,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIBYTE, PCI_DEVICE_ID_BCM1250_HT, /* * Set the SP1011 HT/PCI bridge's TRDY timeout to the finite max. */ -static void __devinit quirk_sp1011(struct pci_dev *dev) +static void __init quirk_sp1011(struct pci_dev *dev) { pci_write_config_byte(dev, 0x64, 0xff); } diff --git a/trunk/arch/mips/pci/ops-tx4927.c b/trunk/arch/mips/pci/ops-tx4927.c index bc13e29d2bb3..a1e7e6d80c8c 100644 --- a/trunk/arch/mips/pci/ops-tx4927.c +++ b/trunk/arch/mips/pci/ops-tx4927.c @@ -495,7 +495,7 @@ irqreturn_t tx4927_pcierr_interrupt(int irq, void *dev_id) } #ifdef CONFIG_TOSHIBA_FPCIB0 -static void __devinit tx4927_quirk_slc90e66_bridge(struct pci_dev *dev) +static void __init tx4927_quirk_slc90e66_bridge(struct pci_dev *dev) { struct tx4927_pcic_reg __iomem *pcicptr = pci_bus_to_pcicptr(dev->bus); diff --git a/trunk/arch/mips/pci/pci-ip27.c b/trunk/arch/mips/pci/pci-ip27.c index fdc24440294c..0fbe4c0c170a 100644 --- a/trunk/arch/mips/pci/pci-ip27.c +++ b/trunk/arch/mips/pci/pci-ip27.c @@ -212,7 +212,7 @@ static inline void pci_enable_swapping(struct pci_dev *dev) bridge->b_widget.w_tflush; /* Flush */ } -static void __devinit pci_fixup_ioc3(struct pci_dev *d) +static void __init pci_fixup_ioc3(struct pci_dev *d) { pci_disable_swapping(d); } diff --git a/trunk/arch/mips/pci/pci-xlr.c b/trunk/arch/mips/pci/pci-xlr.c index 172af1cd5867..1644805a6730 100644 --- a/trunk/arch/mips/pci/pci-xlr.c +++ b/trunk/arch/mips/pci/pci-xlr.c @@ -41,7 +41,6 @@ #include #include #include -#include #include @@ -157,55 +156,35 @@ struct pci_controller nlm_pci_controller = { .io_offset = 0x00000000UL, }; -/* - * The top level PCIe links on the XLS PCIe controller appear as - * bridges. Given a device, this function finds which link it is - * on. - */ -static struct pci_dev *xls_get_pcie_link(const struct pci_dev *dev) -{ - struct pci_bus *bus, *p; - - /* Find the bridge on bus 0 */ - bus = dev->bus; - for (p = bus->parent; p && p->number != 0; p = p->parent) - bus = p; - - return p ? bus->self : NULL; -} - static int get_irq_vector(const struct pci_dev *dev) { - struct pci_dev *lnk; - if (!nlm_chip_is_xls()) - return PIC_PCIX_IRQ; /* for XLR just one IRQ */ + return PIC_PCIX_IRQ; /* for XLR just one IRQ*/ /* * For XLS PCIe, there is an IRQ per Link, find out which * link the device is on to assign interrupts - */ - lnk = xls_get_pcie_link(dev); - if (lnk == NULL) + */ + if (dev->bus->self == NULL) return 0; - switch (PCI_SLOT(lnk->devfn)) { - case 0: + switch (dev->bus->self->devfn) { + case 0x0: return PIC_PCIE_LINK0_IRQ; - case 1: + case 0x8: return PIC_PCIE_LINK1_IRQ; - case 2: + case 0x10: if (nlm_chip_is_xls_b()) return PIC_PCIE_XLSB0_LINK2_IRQ; else return PIC_PCIE_LINK2_IRQ; - case 3: + case 0x18: if (nlm_chip_is_xls_b()) return PIC_PCIE_XLSB0_LINK3_IRQ; else return PIC_PCIE_LINK3_IRQ; } - WARN(1, "Unexpected devfn %d\n", lnk->devfn); + WARN(1, "Unexpected devfn %d\n", dev->bus->self->devfn); return 0; } @@ -223,27 +202,7 @@ void arch_teardown_msi_irq(unsigned int irq) int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) { struct msi_msg msg; - struct pci_dev *lnk; int irq, ret; - u16 val; - - /* MSI not supported on XLR */ - if (!nlm_chip_is_xls()) - return 1; - - /* - * Enable MSI on the XLS PCIe controller bridge which was disabled - * at enumeration, the bridge MSI capability is at 0x50 - */ - lnk = xls_get_pcie_link(dev); - if (lnk == NULL) - return 1; - - pci_read_config_word(lnk, 0x50 + PCI_MSI_FLAGS, &val); - if ((val & PCI_MSI_FLAGS_ENABLE) == 0) { - val |= PCI_MSI_FLAGS_ENABLE; - pci_write_config_word(lnk, 0x50 + PCI_MSI_FLAGS, val); - } irq = get_irq_vector(dev); if (irq <= 0) @@ -368,7 +327,7 @@ static int __init pcibios_init(void) } } else { /* XLR PCI controller ACK */ - irq_set_handler_data(PIC_PCIX_IRQ, xlr_pci_ack); + irq_set_handler_data(PIC_PCIE_XLSB0_LINK3_IRQ, xlr_pci_ack); } return 0; diff --git a/trunk/arch/mips/pmc-sierra/yosemite/smp.c b/trunk/arch/mips/pmc-sierra/yosemite/smp.c index 5edab2bc6fc0..b71fae231049 100644 --- a/trunk/arch/mips/pmc-sierra/yosemite/smp.c +++ b/trunk/arch/mips/pmc-sierra/yosemite/smp.c @@ -115,11 +115,11 @@ static void yos_send_ipi_mask(const struct cpumask *mask, unsigned int action) */ static void __cpuinit yos_init_secondary(void) { + set_c0_status(ST0_CO | ST0_IE | ST0_IM); } static void __cpuinit yos_smp_finish(void) { - set_c0_status(ST0_CO | ST0_IM | ST0_IE); } /* Hook for after all CPUs are online */ diff --git a/trunk/arch/mips/powertv/asic/asic-calliope.c b/trunk/arch/mips/powertv/asic/asic-calliope.c index 7773f3d956b0..0a170e0ffeaa 100644 --- a/trunk/arch/mips/powertv/asic/asic-calliope.c +++ b/trunk/arch/mips/powertv/asic/asic-calliope.c @@ -28,7 +28,7 @@ #define CALLIOPE_ADDR(x) (CALLIOPE_IO_BASE + (x)) -const struct register_map calliope_register_map __initconst = { +const struct register_map calliope_register_map __initdata = { .eic_slow0_strt_add = {.phys = CALLIOPE_ADDR(0x800000)}, .eic_cfg_bits = {.phys = CALLIOPE_ADDR(0x800038)}, .eic_ready_status = {.phys = CALLIOPE_ADDR(0x80004c)}, diff --git a/trunk/arch/mips/powertv/asic/asic-cronus.c b/trunk/arch/mips/powertv/asic/asic-cronus.c index da076db7b7ed..bbc0c122be5e 100644 --- a/trunk/arch/mips/powertv/asic/asic-cronus.c +++ b/trunk/arch/mips/powertv/asic/asic-cronus.c @@ -28,7 +28,7 @@ #define CRONUS_ADDR(x) (CRONUS_IO_BASE + (x)) -const struct register_map cronus_register_map __initconst = { +const struct register_map cronus_register_map __initdata = { .eic_slow0_strt_add = {.phys = CRONUS_ADDR(0x000000)}, .eic_cfg_bits = {.phys = CRONUS_ADDR(0x000038)}, .eic_ready_status = {.phys = CRONUS_ADDR(0x00004C)}, diff --git a/trunk/arch/mips/powertv/asic/asic-gaia.c b/trunk/arch/mips/powertv/asic/asic-gaia.c index 47683b370e74..91dda682752c 100644 --- a/trunk/arch/mips/powertv/asic/asic-gaia.c +++ b/trunk/arch/mips/powertv/asic/asic-gaia.c @@ -23,7 +23,7 @@ #include #include -const struct register_map gaia_register_map __initconst = { +const struct register_map gaia_register_map __initdata = { .eic_slow0_strt_add = {.phys = GAIA_IO_BASE + 0x000000}, .eic_cfg_bits = {.phys = GAIA_IO_BASE + 0x000038}, .eic_ready_status = {.phys = GAIA_IO_BASE + 0x00004C}, diff --git a/trunk/arch/mips/powertv/asic/asic-zeus.c b/trunk/arch/mips/powertv/asic/asic-zeus.c index 6ff4b10f09da..4a05bb096476 100644 --- a/trunk/arch/mips/powertv/asic/asic-zeus.c +++ b/trunk/arch/mips/powertv/asic/asic-zeus.c @@ -28,7 +28,7 @@ #define ZEUS_ADDR(x) (ZEUS_IO_BASE + (x)) -const struct register_map zeus_register_map __initconst = { +const struct register_map zeus_register_map __initdata = { .eic_slow0_strt_add = {.phys = ZEUS_ADDR(0x000000)}, .eic_cfg_bits = {.phys = ZEUS_ADDR(0x000038)}, .eic_ready_status = {.phys = ZEUS_ADDR(0x00004c)}, diff --git a/trunk/arch/mips/txx9/generic/pci.c b/trunk/arch/mips/txx9/generic/pci.c index 64eb71b15280..682efb0c108d 100644 --- a/trunk/arch/mips/txx9/generic/pci.c +++ b/trunk/arch/mips/txx9/generic/pci.c @@ -269,7 +269,7 @@ txx9_i8259_irq_setup(int irq) return err; } -static void __devinit quirk_slc90e66_bridge(struct pci_dev *dev) +static void __init quirk_slc90e66_bridge(struct pci_dev *dev) { int irq; /* PCI/ISA Bridge interrupt */ u8 reg_64; diff --git a/trunk/arch/mn10300/kernel/signal.c b/trunk/arch/mn10300/kernel/signal.c index 4d584ae29ae1..6ab0bee2a54f 100644 --- a/trunk/arch/mn10300/kernel/signal.c +++ b/trunk/arch/mn10300/kernel/signal.c @@ -459,11 +459,10 @@ static int handle_signal(int sig, else ret = setup_frame(sig, ka, oldset, regs); if (ret) - return ret; + return; signal_delivered(sig, info, ka, regs, - test_thread_flag(TIF_SINGLESTEP)); - return 0; + test_thread_flag(TIF_SINGLESTEP)); } /* diff --git a/trunk/arch/mn10300/kernel/smp.c b/trunk/arch/mn10300/kernel/smp.c index e62c223e4c45..090d35d36973 100644 --- a/trunk/arch/mn10300/kernel/smp.c +++ b/trunk/arch/mn10300/kernel/smp.c @@ -876,7 +876,9 @@ static void __init smp_online(void) notify_cpu_starting(cpu); + ipi_call_lock(); set_cpu_online(cpu, true); + ipi_call_unlock(); local_irq_enable(); } diff --git a/trunk/arch/parisc/kernel/smp.c b/trunk/arch/parisc/kernel/smp.c index 6266730efd61..a47828d31fe6 100644 --- a/trunk/arch/parisc/kernel/smp.c +++ b/trunk/arch/parisc/kernel/smp.c @@ -300,7 +300,9 @@ smp_cpu_init(int cpunum) notify_cpu_starting(cpunum); + ipi_call_lock(); set_cpu_online(cpunum, true); + ipi_call_unlock(); /* Initialise the idle task for this CPU */ atomic_inc(&init_mm.mm_count); diff --git a/trunk/arch/powerpc/kernel/smp.c b/trunk/arch/powerpc/kernel/smp.c index e1417c42155c..e4cb34322de4 100644 --- a/trunk/arch/powerpc/kernel/smp.c +++ b/trunk/arch/powerpc/kernel/smp.c @@ -571,6 +571,7 @@ void __devinit start_secondary(void *unused) if (system_state == SYSTEM_RUNNING) vdso_data->processorCount++; #endif + ipi_call_lock(); notify_cpu_starting(cpu); set_cpu_online(cpu, true); /* Update sibling maps */ @@ -600,6 +601,7 @@ void __devinit start_secondary(void *unused) of_node_put(np); } of_node_put(l2_cache); + ipi_call_unlock(); local_irq_enable(); diff --git a/trunk/arch/powerpc/platforms/cell/spufs/inode.c b/trunk/arch/powerpc/platforms/cell/spufs/inode.c index d544d7816df3..66519d263da7 100644 --- a/trunk/arch/powerpc/platforms/cell/spufs/inode.c +++ b/trunk/arch/powerpc/platforms/cell/spufs/inode.c @@ -317,23 +317,28 @@ spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags, return ret; } -static int spufs_context_open(struct path *path) +static int spufs_context_open(struct dentry *dentry, struct vfsmount *mnt) { int ret; struct file *filp; ret = get_unused_fd(); - if (ret < 0) - return ret; + if (ret < 0) { + dput(dentry); + mntput(mnt); + goto out; + } - filp = dentry_open(path, O_RDONLY, current_cred()); + filp = dentry_open(dentry, mnt, O_RDONLY, current_cred()); if (IS_ERR(filp)) { put_unused_fd(ret); - return PTR_ERR(filp); + ret = PTR_ERR(filp); + goto out; } filp->f_op = &spufs_context_fops; fd_install(ret, filp); +out: return ret; } @@ -448,7 +453,6 @@ spufs_create_context(struct inode *inode, struct dentry *dentry, int affinity; struct spu_gang *gang; struct spu_context *neighbor; - struct path path = {.mnt = mnt, .dentry = dentry}; ret = -EPERM; if ((flags & SPU_CREATE_NOSCHED) && @@ -491,7 +495,11 @@ spufs_create_context(struct inode *inode, struct dentry *dentry, put_spu_context(neighbor); } - ret = spufs_context_open(&path); + /* + * get references for dget and mntget, will be released + * in error path of *_open(). + */ + ret = spufs_context_open(dget(dentry), mntget(mnt)); if (ret < 0) { WARN_ON(spufs_rmdir(inode, dentry)); if (affinity) @@ -548,27 +556,28 @@ spufs_mkgang(struct inode *dir, struct dentry *dentry, umode_t mode) return ret; } -static int spufs_gang_open(struct path *path) +static int spufs_gang_open(struct dentry *dentry, struct vfsmount *mnt) { int ret; struct file *filp; ret = get_unused_fd(); - if (ret < 0) - return ret; + if (ret < 0) { + dput(dentry); + mntput(mnt); + goto out; + } - /* - * get references for dget and mntget, will be released - * in error path of *_open(). - */ - filp = dentry_open(path, O_RDONLY, current_cred()); + filp = dentry_open(dentry, mnt, O_RDONLY, current_cred()); if (IS_ERR(filp)) { put_unused_fd(ret); - return PTR_ERR(filp); + ret = PTR_ERR(filp); + goto out; } filp->f_op = &simple_dir_operations; fd_install(ret, filp); +out: return ret; } @@ -576,14 +585,17 @@ static int spufs_create_gang(struct inode *inode, struct dentry *dentry, struct vfsmount *mnt, umode_t mode) { - struct path path = {.mnt = mnt, .dentry = dentry}; int ret; ret = spufs_mkgang(inode, dentry, mode & S_IRWXUGO); if (ret) goto out; - ret = spufs_gang_open(&path); + /* + * get references for dget and mntget, will be released + * in error path of *_open(). + */ + ret = spufs_gang_open(dget(dentry), mntget(mnt)); if (ret < 0) { int err = simple_rmdir(inode, dentry); WARN_ON(err); diff --git a/trunk/arch/s390/kernel/smp.c b/trunk/arch/s390/kernel/smp.c index 8dca9c248ac7..15cca26ccb6c 100644 --- a/trunk/arch/s390/kernel/smp.c +++ b/trunk/arch/s390/kernel/smp.c @@ -717,7 +717,9 @@ static void __cpuinit smp_start_secondary(void *cpuvoid) init_cpu_vtimer(); pfault_init(); notify_cpu_starting(smp_processor_id()); + ipi_call_lock(); set_cpu_online(smp_processor_id(), true); + ipi_call_unlock(); local_irq_enable(); /* cpu_idle will call schedule for us */ cpu_idle(); diff --git a/trunk/arch/sparc/kernel/smp_64.c b/trunk/arch/sparc/kernel/smp_64.c index 781bcb10b8bd..f591598d92f6 100644 --- a/trunk/arch/sparc/kernel/smp_64.c +++ b/trunk/arch/sparc/kernel/smp_64.c @@ -103,6 +103,8 @@ void __cpuinit smp_callin(void) if (cheetah_pcache_forced_on) cheetah_enable_pcache(); + local_irq_enable(); + callin_flag = 1; __asm__ __volatile__("membar #Sync\n\t" "flush %%g6" : : : "memory"); @@ -122,8 +124,9 @@ void __cpuinit smp_callin(void) while (!cpumask_test_cpu(cpuid, &smp_commenced_mask)) rmb(); + ipi_call_lock_irq(); set_cpu_online(cpuid, true); - local_irq_enable(); + ipi_call_unlock_irq(); /* idle thread is expected to have preempt disabled */ preempt_disable(); @@ -1305,7 +1308,9 @@ int __cpu_disable(void) mdelay(1); local_irq_disable(); + ipi_call_lock(); set_cpu_online(cpu, false); + ipi_call_unlock(); cpu_map_rebuild(); diff --git a/trunk/arch/tile/kernel/smpboot.c b/trunk/arch/tile/kernel/smpboot.c index e686c5ac90be..84873fbe8f27 100644 --- a/trunk/arch/tile/kernel/smpboot.c +++ b/trunk/arch/tile/kernel/smpboot.c @@ -198,7 +198,17 @@ void __cpuinit online_secondary(void) notify_cpu_starting(smp_processor_id()); + /* + * We need to hold call_lock, so there is no inconsistency + * between the time smp_call_function() determines number of + * IPI recipients, and the time when the determination is made + * for which cpus receive the IPI. Holding this + * lock helps us to not include this cpu in a currently in progress + * smp_call_function(). + */ + ipi_call_lock(); set_cpu_online(smp_processor_id(), 1); + ipi_call_unlock(); __get_cpu_var(cpu_state) = CPU_ONLINE; /* Set up tile-specific state for this cpu. */ diff --git a/trunk/arch/x86/Makefile b/trunk/arch/x86/Makefile index b0c5276861ec..1f2521434554 100644 --- a/trunk/arch/x86/Makefile +++ b/trunk/arch/x86/Makefile @@ -49,9 +49,6 @@ else KBUILD_AFLAGS += -m64 KBUILD_CFLAGS += -m64 - # Use -mpreferred-stack-boundary=3 if supported. - KBUILD_CFLAGS += $(call cc-option,-mno-sse -mpreferred-stack-boundary=3) - # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu) cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8) cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona) diff --git a/trunk/arch/x86/include/asm/alternative.h b/trunk/arch/x86/include/asm/alternative.h index 70780689599a..49331bedc158 100644 --- a/trunk/arch/x86/include/asm/alternative.h +++ b/trunk/arch/x86/include/asm/alternative.h @@ -75,54 +75,23 @@ static inline int alternatives_text_reserved(void *start, void *end) } #endif /* CONFIG_SMP */ -#define OLDINSTR(oldinstr) "661:\n\t" oldinstr "\n662:\n" - -#define b_replacement(number) "663"#number -#define e_replacement(number) "664"#number - -#define alt_slen "662b-661b" -#define alt_rlen(number) e_replacement(number)"f-"b_replacement(number)"f" - -#define ALTINSTR_ENTRY(feature, number) \ - " .long 661b - .\n" /* label */ \ - " .long " b_replacement(number)"f - .\n" /* new instruction */ \ - " .word " __stringify(feature) "\n" /* feature bit */ \ - " .byte " alt_slen "\n" /* source len */ \ - " .byte " alt_rlen(number) "\n" /* replacement len */ - -#define DISCARD_ENTRY(number) /* rlen <= slen */ \ - " .byte 0xff + (" alt_rlen(number) ") - (" alt_slen ")\n" - -#define ALTINSTR_REPLACEMENT(newinstr, feature, number) /* replacement */ \ - b_replacement(number)":\n\t" newinstr "\n" e_replacement(number) ":\n\t" - /* alternative assembly primitive: */ #define ALTERNATIVE(oldinstr, newinstr, feature) \ - OLDINSTR(oldinstr) \ - ".section .altinstructions,\"a\"\n" \ - ALTINSTR_ENTRY(feature, 1) \ - ".previous\n" \ - ".section .discard,\"aw\",@progbits\n" \ - DISCARD_ENTRY(1) \ - ".previous\n" \ - ".section .altinstr_replacement, \"ax\"\n" \ - ALTINSTR_REPLACEMENT(newinstr, feature, 1) \ - ".previous" - -#define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\ - OLDINSTR(oldinstr) \ - ".section .altinstructions,\"a\"\n" \ - ALTINSTR_ENTRY(feature1, 1) \ - ALTINSTR_ENTRY(feature2, 2) \ - ".previous\n" \ - ".section .discard,\"aw\",@progbits\n" \ - DISCARD_ENTRY(1) \ - DISCARD_ENTRY(2) \ - ".previous\n" \ - ".section .altinstr_replacement, \"ax\"\n" \ - ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \ - ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \ - ".previous" + \ + "661:\n\t" oldinstr "\n662:\n" \ + ".section .altinstructions,\"a\"\n" \ + " .long 661b - .\n" /* label */ \ + " .long 663f - .\n" /* new instruction */ \ + " .word " __stringify(feature) "\n" /* feature bit */ \ + " .byte 662b-661b\n" /* sourcelen */ \ + " .byte 664f-663f\n" /* replacementlen */ \ + ".previous\n" \ + ".section .discard,\"aw\",@progbits\n" \ + " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \ + ".previous\n" \ + ".section .altinstr_replacement, \"ax\"\n" \ + "663:\n\t" newinstr "\n664:\n" /* replacement */ \ + ".previous" /* * This must be included *after* the definition of ALTERNATIVE due to @@ -170,19 +139,6 @@ static inline int alternatives_text_reserved(void *start, void *end) asm volatile (ALTERNATIVE("call %P[old]", "call %P[new]", feature) \ : output : [old] "i" (oldfunc), [new] "i" (newfunc), ## input) -/* - * Like alternative_call, but there are two features and respective functions. - * If CPU has feature2, function2 is used. - * Otherwise, if CPU has feature1, function1 is used. - * Otherwise, old function is used. - */ -#define alternative_call_2(oldfunc, newfunc1, feature1, newfunc2, feature2, \ - output, input...) \ - asm volatile (ALTERNATIVE_2("call %P[old]", "call %P[new1]", feature1,\ - "call %P[new2]", feature2) \ - : output : [old] "i" (oldfunc), [new1] "i" (newfunc1), \ - [new2] "i" (newfunc2), ## input) - /* * use this macro(s) if you need more than one output parameter * in alternative_io diff --git a/trunk/arch/x86/include/asm/amd_nb.h b/trunk/arch/x86/include/asm/amd_nb.h index b3341e9cd8fd..49ad773f4b9f 100644 --- a/trunk/arch/x86/include/asm/amd_nb.h +++ b/trunk/arch/x86/include/asm/amd_nb.h @@ -26,31 +26,10 @@ struct amd_l3_cache { u8 subcaches[4]; }; -struct threshold_block { - unsigned int block; - unsigned int bank; - unsigned int cpu; - u32 address; - u16 interrupt_enable; - bool interrupt_capable; - u16 threshold_limit; - struct kobject kobj; - struct list_head miscj; -}; - -struct threshold_bank { - struct kobject *kobj; - struct threshold_block *blocks; - - /* initialized to the number of CPUs on the node sharing this bank */ - atomic_t cpus; -}; - struct amd_northbridge { struct pci_dev *misc; struct pci_dev *link; struct amd_l3_cache l3_cache; - struct threshold_bank *bank4; }; struct amd_northbridge_info { diff --git a/trunk/arch/x86/include/asm/apic.h b/trunk/arch/x86/include/asm/apic.h index 88093c1d44fd..eaff4790ed96 100644 --- a/trunk/arch/x86/include/asm/apic.h +++ b/trunk/arch/x86/include/asm/apic.h @@ -306,8 +306,7 @@ struct apic { unsigned long (*check_apicid_used)(physid_mask_t *map, int apicid); unsigned long (*check_apicid_present)(int apicid); - void (*vector_allocation_domain)(int cpu, struct cpumask *retmask, - const struct cpumask *mask); + void (*vector_allocation_domain)(int cpu, struct cpumask *retmask); void (*init_apic_ldr)(void); void (*ioapic_phys_id_map)(physid_mask_t *phys_map, physid_mask_t *retmap); @@ -332,9 +331,9 @@ struct apic { unsigned long (*set_apic_id)(unsigned int id); unsigned long apic_id_mask; - int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask, - const struct cpumask *andmask, - unsigned int *apicid); + unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask); + unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask, + const struct cpumask *andmask); /* ipi */ void (*send_IPI_mask)(const struct cpumask *mask, int vector); @@ -538,11 +537,6 @@ static inline const struct cpumask *default_target_cpus(void) #endif } -static inline const struct cpumask *online_target_cpus(void) -{ - return cpu_online_mask; -} - DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid); @@ -592,50 +586,21 @@ static inline int default_phys_pkg_id(int cpuid_apic, int index_msb) #endif -static inline int -flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask, - const struct cpumask *andmask, - unsigned int *apicid) +static inline unsigned int +default_cpu_mask_to_apicid(const struct cpumask *cpumask) { - unsigned long cpu_mask = cpumask_bits(cpumask)[0] & - cpumask_bits(andmask)[0] & - cpumask_bits(cpu_online_mask)[0] & - APIC_ALL_CPUS; - - if (likely(cpu_mask)) { - *apicid = (unsigned int)cpu_mask; - return 0; - } else { - return -EINVAL; - } + return cpumask_bits(cpumask)[0] & APIC_ALL_CPUS; } -extern int +static inline unsigned int default_cpu_mask_to_apicid_and(const struct cpumask *cpumask, - const struct cpumask *andmask, - unsigned int *apicid); - -static inline void -flat_vector_allocation_domain(int cpu, struct cpumask *retmask, - const struct cpumask *mask) + const struct cpumask *andmask) { - /* Careful. Some cpus do not strictly honor the set of cpus - * specified in the interrupt destination when using lowest - * priority interrupt delivery mode. - * - * In particular there was a hyperthreading cpu observed to - * deliver interrupts to the wrong hyperthread when only one - * hyperthread was specified in the interrupt desitination. - */ - cpumask_clear(retmask); - cpumask_bits(retmask)[0] = APIC_ALL_CPUS; -} + unsigned long mask1 = cpumask_bits(cpumask)[0]; + unsigned long mask2 = cpumask_bits(andmask)[0]; + unsigned long mask3 = cpumask_bits(cpu_online_mask)[0]; -static inline void -default_vector_allocation_domain(int cpu, struct cpumask *retmask, - const struct cpumask *mask) -{ - cpumask_copy(retmask, cpumask_of(cpu)); + return (unsigned int)(mask1 & mask2 & mask3); } static inline unsigned long default_check_apicid_used(physid_mask_t *map, int apicid) diff --git a/trunk/arch/x86/include/asm/emergency-restart.h b/trunk/arch/x86/include/asm/emergency-restart.h index 75ce3f47d204..cc70c1c78ca4 100644 --- a/trunk/arch/x86/include/asm/emergency-restart.h +++ b/trunk/arch/x86/include/asm/emergency-restart.h @@ -4,7 +4,9 @@ enum reboot_type { BOOT_TRIPLE = 't', BOOT_KBD = 'k', +#ifdef CONFIG_X86_32 BOOT_BIOS = 'b', +#endif BOOT_ACPI = 'a', BOOT_EFI = 'e', BOOT_CF9 = 'p', diff --git a/trunk/arch/x86/include/asm/floppy.h b/trunk/arch/x86/include/asm/floppy.h index d3d74698dce9..dbe82a5c5eac 100644 --- a/trunk/arch/x86/include/asm/floppy.h +++ b/trunk/arch/x86/include/asm/floppy.h @@ -99,7 +99,7 @@ static irqreturn_t floppy_hardint(int irq, void *dev_id) virtual_dma_residue += virtual_dma_count; virtual_dma_count = 0; #ifdef TRACE_FLPY_INT - printk(KERN_DEBUG "count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n", + printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n", virtual_dma_count, virtual_dma_residue, calls, bytes, dma_wait); calls = 0; diff --git a/trunk/arch/x86/include/asm/kvm_host.h b/trunk/arch/x86/include/asm/kvm_host.h index 2da88c0cda14..db7c1f2709a2 100644 --- a/trunk/arch/x86/include/asm/kvm_host.h +++ b/trunk/arch/x86/include/asm/kvm_host.h @@ -313,8 +313,8 @@ struct kvm_pmu { u64 counter_bitmask[2]; u64 global_ctrl_mask; u8 version; - struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC]; - struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED]; + struct kvm_pmc gp_counters[X86_PMC_MAX_GENERIC]; + struct kvm_pmc fixed_counters[X86_PMC_MAX_FIXED]; struct irq_work irq_work; u64 reprogram_pmi; }; diff --git a/trunk/arch/x86/include/asm/msr.h b/trunk/arch/x86/include/asm/msr.h index 813ed103f45e..084ef95274cd 100644 --- a/trunk/arch/x86/include/asm/msr.h +++ b/trunk/arch/x86/include/asm/msr.h @@ -115,8 +115,8 @@ notrace static inline int native_write_msr_safe(unsigned int msr, extern unsigned long long native_read_tsc(void); -extern int rdmsr_safe_regs(u32 regs[8]); -extern int wrmsr_safe_regs(u32 regs[8]); +extern int native_rdmsr_safe_regs(u32 regs[8]); +extern int native_wrmsr_safe_regs(u32 regs[8]); static __always_inline unsigned long long __native_read_tsc(void) { @@ -187,6 +187,43 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) return err; } +static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) +{ + u32 gprs[8] = { 0 }; + int err; + + gprs[1] = msr; + gprs[7] = 0x9c5a203a; + + err = native_rdmsr_safe_regs(gprs); + + *p = gprs[0] | ((u64)gprs[2] << 32); + + return err; +} + +static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val) +{ + u32 gprs[8] = { 0 }; + + gprs[0] = (u32)val; + gprs[1] = msr; + gprs[2] = val >> 32; + gprs[7] = 0x9c5a203a; + + return native_wrmsr_safe_regs(gprs); +} + +static inline int rdmsr_safe_regs(u32 regs[8]) +{ + return native_rdmsr_safe_regs(regs); +} + +static inline int wrmsr_safe_regs(u32 regs[8]) +{ + return native_wrmsr_safe_regs(regs); +} + #define rdtscl(low) \ ((low) = (u32)__native_read_tsc()) @@ -200,8 +237,6 @@ do { \ (high) = (u32)(_l >> 32); \ } while (0) -#define rdpmcl(counter, val) ((val) = native_read_pmc(counter)) - #define rdtscp(low, high, aux) \ do { \ unsigned long long _val = native_read_tscp(&(aux)); \ @@ -213,7 +248,8 @@ do { \ #endif /* !CONFIG_PARAVIRT */ -#define wrmsrl_safe(msr, val) wrmsr_safe((msr), (u32)(val), \ + +#define checking_wrmsrl(msr, val) wrmsr_safe((msr), (u32)(val), \ (u32)((val) >> 32)) #define write_tsc(val1, val2) wrmsr(MSR_IA32_TSC, (val1), (val2)) diff --git a/trunk/arch/x86/include/asm/nmi.h b/trunk/arch/x86/include/asm/nmi.h index c0fa356e90de..dc580c42851c 100644 --- a/trunk/arch/x86/include/asm/nmi.h +++ b/trunk/arch/x86/include/asm/nmi.h @@ -44,14 +44,28 @@ struct nmiaction { const char *name; }; -#define register_nmi_handler(t, fn, fg, n, init...) \ +#define register_nmi_handler(t, fn, fg, n) \ ({ \ - static struct nmiaction init fn##_na = { \ + static struct nmiaction fn##_na = { \ .handler = (fn), \ .name = (n), \ .flags = (fg), \ }; \ - __register_nmi_handler((t), &fn##_na); \ + __register_nmi_handler((t), &fn##_na); \ +}) + +/* + * For special handlers that register/unregister in the + * init section only. This should be considered rare. + */ +#define register_nmi_handler_initonly(t, fn, fg, n) \ +({ \ + static struct nmiaction fn##_na __initdata = { \ + .handler = (fn), \ + .name = (n), \ + .flags = (fg), \ + }; \ + __register_nmi_handler((t), &fn##_na); \ }) int __register_nmi_handler(unsigned int, struct nmiaction *); diff --git a/trunk/arch/x86/include/asm/paravirt.h b/trunk/arch/x86/include/asm/paravirt.h index 0b47ddb6f00b..6cbbabf52707 100644 --- a/trunk/arch/x86/include/asm/paravirt.h +++ b/trunk/arch/x86/include/asm/paravirt.h @@ -128,11 +128,21 @@ static inline u64 paravirt_read_msr(unsigned msr, int *err) return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err); } +static inline int paravirt_rdmsr_regs(u32 *regs) +{ + return PVOP_CALL1(int, pv_cpu_ops.rdmsr_regs, regs); +} + static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high) { return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high); } +static inline int paravirt_wrmsr_regs(u32 *regs) +{ + return PVOP_CALL1(int, pv_cpu_ops.wrmsr_regs, regs); +} + /* These should all do BUG_ON(_err), but our headers are too tangled. */ #define rdmsr(msr, val1, val2) \ do { \ @@ -166,6 +176,9 @@ do { \ _err; \ }) +#define rdmsr_safe_regs(regs) paravirt_rdmsr_regs(regs) +#define wrmsr_safe_regs(regs) paravirt_wrmsr_regs(regs) + static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) { int err; @@ -173,6 +186,32 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) *p = paravirt_read_msr(msr, &err); return err; } +static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) +{ + u32 gprs[8] = { 0 }; + int err; + + gprs[1] = msr; + gprs[7] = 0x9c5a203a; + + err = paravirt_rdmsr_regs(gprs); + + *p = gprs[0] | ((u64)gprs[2] << 32); + + return err; +} + +static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val) +{ + u32 gprs[8] = { 0 }; + + gprs[0] = (u32)val; + gprs[1] = msr; + gprs[2] = val >> 32; + gprs[7] = 0x9c5a203a; + + return paravirt_wrmsr_regs(gprs); +} static inline u64 paravirt_read_tsc(void) { @@ -213,8 +252,6 @@ do { \ high = _l >> 32; \ } while (0) -#define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter)) - static inline unsigned long long paravirt_rdtscp(unsigned int *aux) { return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux); diff --git a/trunk/arch/x86/include/asm/paravirt_types.h b/trunk/arch/x86/include/asm/paravirt_types.h index 8613cbb7ba41..8e8b9a4987ee 100644 --- a/trunk/arch/x86/include/asm/paravirt_types.h +++ b/trunk/arch/x86/include/asm/paravirt_types.h @@ -153,7 +153,9 @@ struct pv_cpu_ops { /* MSR, PMC and TSR operations. err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */ u64 (*read_msr)(unsigned int msr, int *err); + int (*rdmsr_regs)(u32 *regs); int (*write_msr)(unsigned int msr, unsigned low, unsigned high); + int (*wrmsr_regs)(u32 *regs); u64 (*read_tsc)(void); u64 (*read_pmc)(int counter); diff --git a/trunk/arch/x86/include/asm/pci_x86.h b/trunk/arch/x86/include/asm/pci_x86.h index 5ad24a89b19b..b3a531746026 100644 --- a/trunk/arch/x86/include/asm/pci_x86.h +++ b/trunk/arch/x86/include/asm/pci_x86.h @@ -7,13 +7,9 @@ #undef DEBUG #ifdef DEBUG -#define DBG(fmt, ...) printk(fmt, ##__VA_ARGS__) +#define DBG(x...) printk(x) #else -#define DBG(fmt, ...) \ -do { \ - if (0) \ - printk(fmt, ##__VA_ARGS__); \ -} while (0) +#define DBG(x...) #endif #define PCI_PROBE_BIOS 0x0001 diff --git a/trunk/arch/x86/include/asm/perf_event.h b/trunk/arch/x86/include/asm/perf_event.h index c78f14a0df00..588f52ea810e 100644 --- a/trunk/arch/x86/include/asm/perf_event.h +++ b/trunk/arch/x86/include/asm/perf_event.h @@ -5,10 +5,11 @@ * Performance event hw details: */ -#define INTEL_PMC_MAX_GENERIC 32 -#define INTEL_PMC_MAX_FIXED 3 -#define INTEL_PMC_IDX_FIXED 32 +#define X86_PMC_MAX_GENERIC 32 +#define X86_PMC_MAX_FIXED 3 +#define X86_PMC_IDX_GENERIC 0 +#define X86_PMC_IDX_FIXED 32 #define X86_PMC_IDX_MAX 64 #define MSR_ARCH_PERFMON_PERFCTR0 0xc1 @@ -47,7 +48,8 @@ (X86_RAW_EVENT_MASK | \ AMD64_EVENTSEL_EVENT) #define AMD64_NUM_COUNTERS 4 -#define AMD64_NUM_COUNTERS_CORE 6 +#define AMD64_NUM_COUNTERS_F15H 6 +#define AMD64_NUM_COUNTERS_MAX AMD64_NUM_COUNTERS_F15H #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) @@ -119,16 +121,16 @@ struct x86_pmu_capability { /* Instr_Retired.Any: */ #define MSR_ARCH_PERFMON_FIXED_CTR0 0x309 -#define INTEL_PMC_IDX_FIXED_INSTRUCTIONS (INTEL_PMC_IDX_FIXED + 0) +#define X86_PMC_IDX_FIXED_INSTRUCTIONS (X86_PMC_IDX_FIXED + 0) /* CPU_CLK_Unhalted.Core: */ #define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a -#define INTEL_PMC_IDX_FIXED_CPU_CYCLES (INTEL_PMC_IDX_FIXED + 1) +#define X86_PMC_IDX_FIXED_CPU_CYCLES (X86_PMC_IDX_FIXED + 1) /* CPU_CLK_Unhalted.Ref: */ #define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b -#define INTEL_PMC_IDX_FIXED_REF_CYCLES (INTEL_PMC_IDX_FIXED + 2) -#define INTEL_PMC_MSK_FIXED_REF_CYCLES (1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES) +#define X86_PMC_IDX_FIXED_REF_CYCLES (X86_PMC_IDX_FIXED + 2) +#define X86_PMC_MSK_FIXED_REF_CYCLES (1ULL << X86_PMC_IDX_FIXED_REF_CYCLES) /* * We model BTS tracing as another fixed-mode PMC. @@ -137,7 +139,7 @@ struct x86_pmu_capability { * values are used by actual fixed events and higher values are used * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr. */ -#define INTEL_PMC_IDX_FIXED_BTS (INTEL_PMC_IDX_FIXED + 16) +#define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16) /* * IBS cpuid feature detection @@ -232,7 +234,6 @@ struct perf_guest_switch_msr { extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr); extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap); -extern void perf_check_microcode(void); #else static inline perf_guest_switch_msr *perf_guest_get_msrs(int *nr) { @@ -246,7 +247,6 @@ static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap) } static inline void perf_events_lapic_init(void) { } -static inline void perf_check_microcode(void) { } #endif #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) diff --git a/trunk/arch/x86/include/asm/pgtable-2level.h b/trunk/arch/x86/include/asm/pgtable-2level.h index f2b489cf1602..98391db840c6 100644 --- a/trunk/arch/x86/include/asm/pgtable-2level.h +++ b/trunk/arch/x86/include/asm/pgtable-2level.h @@ -2,9 +2,9 @@ #define _ASM_X86_PGTABLE_2LEVEL_H #define pte_ERROR(e) \ - pr_err("%s:%d: bad pte %08lx\n", __FILE__, __LINE__, (e).pte_low) + printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low) #define pgd_ERROR(e) \ - pr_err("%s:%d: bad pgd %08lx\n", __FILE__, __LINE__, pgd_val(e)) + printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) /* * Certain architectures need to do special things when PTEs diff --git a/trunk/arch/x86/include/asm/pgtable-3level.h b/trunk/arch/x86/include/asm/pgtable-3level.h index 4cc9f2b7cdc3..cb00ccc7d571 100644 --- a/trunk/arch/x86/include/asm/pgtable-3level.h +++ b/trunk/arch/x86/include/asm/pgtable-3level.h @@ -9,13 +9,13 @@ */ #define pte_ERROR(e) \ - pr_err("%s:%d: bad pte %p(%08lx%08lx)\n", \ + printk("%s:%d: bad pte %p(%08lx%08lx).\n", \ __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low) #define pmd_ERROR(e) \ - pr_err("%s:%d: bad pmd %p(%016Lx)\n", \ + printk("%s:%d: bad pmd %p(%016Lx).\n", \ __FILE__, __LINE__, &(e), pmd_val(e)) #define pgd_ERROR(e) \ - pr_err("%s:%d: bad pgd %p(%016Lx)\n", \ + printk("%s:%d: bad pgd %p(%016Lx).\n", \ __FILE__, __LINE__, &(e), pgd_val(e)) /* Rules for using set_pte: the pte being assigned *must* be diff --git a/trunk/arch/x86/include/asm/pgtable_64.h b/trunk/arch/x86/include/asm/pgtable_64.h index 8251be02301e..975f709e09ae 100644 --- a/trunk/arch/x86/include/asm/pgtable_64.h +++ b/trunk/arch/x86/include/asm/pgtable_64.h @@ -26,16 +26,16 @@ extern pgd_t init_level4_pgt[]; extern void paging_init(void); #define pte_ERROR(e) \ - pr_err("%s:%d: bad pte %p(%016lx)\n", \ + printk("%s:%d: bad pte %p(%016lx).\n", \ __FILE__, __LINE__, &(e), pte_val(e)) #define pmd_ERROR(e) \ - pr_err("%s:%d: bad pmd %p(%016lx)\n", \ + printk("%s:%d: bad pmd %p(%016lx).\n", \ __FILE__, __LINE__, &(e), pmd_val(e)) #define pud_ERROR(e) \ - pr_err("%s:%d: bad pud %p(%016lx)\n", \ + printk("%s:%d: bad pud %p(%016lx).\n", \ __FILE__, __LINE__, &(e), pud_val(e)) #define pgd_ERROR(e) \ - pr_err("%s:%d: bad pgd %p(%016lx)\n", \ + printk("%s:%d: bad pgd %p(%016lx).\n", \ __FILE__, __LINE__, &(e), pgd_val(e)) struct mm_struct; diff --git a/trunk/arch/x86/include/asm/realmode.h b/trunk/arch/x86/include/asm/realmode.h index fe1ec5bcd846..fce3f4ae5bd6 100644 --- a/trunk/arch/x86/include/asm/realmode.h +++ b/trunk/arch/x86/include/asm/realmode.h @@ -21,9 +21,8 @@ struct real_mode_header { u32 wakeup_header; #endif /* APM/BIOS reboot */ +#ifdef CONFIG_X86_32 u32 machine_real_restart_asm; -#ifdef CONFIG_X86_64 - u32 machine_real_restart_seg; #endif }; diff --git a/trunk/arch/x86/include/asm/reboot.h b/trunk/arch/x86/include/asm/reboot.h index a82c4f1b4d83..92f297069e87 100644 --- a/trunk/arch/x86/include/asm/reboot.h +++ b/trunk/arch/x86/include/asm/reboot.h @@ -18,8 +18,8 @@ extern struct machine_ops machine_ops; void native_machine_crash_shutdown(struct pt_regs *regs); void native_machine_shutdown(void); -void __noreturn machine_real_restart(unsigned int type); -/* These must match dispatch in arch/x86/realmore/rm/reboot.S */ +void machine_real_restart(unsigned int type); +/* These must match dispatch_table in reboot_32.S */ #define MRR_BIOS 0 #define MRR_APM 1 diff --git a/trunk/arch/x86/include/asm/smp.h b/trunk/arch/x86/include/asm/smp.h index 2ffa95dc2333..f48394513c37 100644 --- a/trunk/arch/x86/include/asm/smp.h +++ b/trunk/arch/x86/include/asm/smp.h @@ -169,6 +169,11 @@ void x86_idle_thread_init(unsigned int cpu, struct task_struct *idle); void smp_store_cpu_info(int id); #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) +/* We don't mark CPUs online until __cpu_up(), so we need another measure */ +static inline int num_booting_cpus(void) +{ + return cpumask_weight(cpu_callout_mask); +} #else /* !CONFIG_SMP */ #define wbinvd_on_cpu(cpu) wbinvd() static inline int wbinvd_on_all_cpus(void) diff --git a/trunk/arch/x86/include/asm/uaccess_64.h b/trunk/arch/x86/include/asm/uaccess_64.h index d8def8b3dba0..8e796fbbf9c6 100644 --- a/trunk/arch/x86/include/asm/uaccess_64.h +++ b/trunk/arch/x86/include/asm/uaccess_64.h @@ -17,8 +17,6 @@ /* Handles exceptions in both to and from, but doesn't do access_ok */ __must_check unsigned long -copy_user_enhanced_fast_string(void *to, const void *from, unsigned len); -__must_check unsigned long copy_user_generic_string(void *to, const void *from, unsigned len); __must_check unsigned long copy_user_generic_unrolled(void *to, const void *from, unsigned len); @@ -28,16 +26,9 @@ copy_user_generic(void *to, const void *from, unsigned len) { unsigned ret; - /* - * If CPU has ERMS feature, use copy_user_enhanced_fast_string. - * Otherwise, if CPU has rep_good feature, use copy_user_generic_string. - * Otherwise, use copy_user_generic_unrolled. - */ - alternative_call_2(copy_user_generic_unrolled, + alternative_call(copy_user_generic_unrolled, copy_user_generic_string, X86_FEATURE_REP_GOOD, - copy_user_enhanced_fast_string, - X86_FEATURE_ERMS, ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from), "=d" (len)), "1" (to), "2" (from), "3" (len) diff --git a/trunk/arch/x86/include/asm/uprobes.h b/trunk/arch/x86/include/asm/uprobes.h index f3971bbcd1de..1e9bed14f7ae 100644 --- a/trunk/arch/x86/include/asm/uprobes.h +++ b/trunk/arch/x86/include/asm/uprobes.h @@ -48,7 +48,7 @@ struct arch_uprobe_task { #endif }; -extern int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long addr); +extern int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm); extern int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs); extern int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs); extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk); diff --git a/trunk/arch/x86/include/asm/uv/uv_bau.h b/trunk/arch/x86/include/asm/uv/uv_bau.h index a06983cdc125..6149b476d9df 100644 --- a/trunk/arch/x86/include/asm/uv/uv_bau.h +++ b/trunk/arch/x86/include/asm/uv/uv_bau.h @@ -140,9 +140,6 @@ #define IPI_RESET_LIMIT 1 /* after this # consecutive successes, bump up the throttle if it was lowered */ #define COMPLETE_THRESHOLD 5 -/* after this # of giveups (fall back to kernel IPI's) disable the use of - the BAU for a period of time */ -#define GIVEUP_LIMIT 100 #define UV_LB_SUBNODEID 0x10 @@ -169,6 +166,7 @@ #define FLUSH_RETRY_TIMEOUT 2 #define FLUSH_GIVEUP 3 #define FLUSH_COMPLETE 4 +#define FLUSH_RETRY_BUSYBUG 5 /* * tuning the action when the numalink network is extremely delayed @@ -177,7 +175,7 @@ microseconds */ #define CONGESTED_REPS 10 /* long delays averaged over this many broadcasts */ -#define DISABLED_PERIOD 10 /* time for the bau to be +#define CONGESTED_PERIOD 30 /* time for the bau to be disabled, in seconds */ /* see msg_type: */ #define MSG_NOOP 0 @@ -522,12 +520,6 @@ struct ptc_stats { unsigned long s_uv2_wars; /* uv2 workaround, perm. busy */ unsigned long s_uv2_wars_hw; /* uv2 workaround, hiwater */ unsigned long s_uv2_war_waits; /* uv2 workaround, long waits */ - unsigned long s_overipilimit; /* over the ipi reset limit */ - unsigned long s_giveuplimit; /* disables, over giveup limit*/ - unsigned long s_enters; /* entries to the driver */ - unsigned long s_ipifordisabled; /* fall back to IPI; disabled */ - unsigned long s_plugged; /* plugged by h/w bug*/ - unsigned long s_congested; /* giveup on long wait */ /* destination statistics */ unsigned long d_alltlb; /* times all tlb's on this cpu were flushed */ @@ -594,8 +586,8 @@ struct bau_control { int timeout_tries; int ipi_attempts; int conseccompletes; - short nobau; - short baudisabled; + int baudisabled; + int set_bau_off; short cpu; short osnode; short uvhub_cpu; @@ -604,16 +596,14 @@ struct bau_control { short cpus_in_socket; short cpus_in_uvhub; short partition_base_pnode; - short busy; /* all were busy (war) */ + short using_desc; /* an index, like uvhub_cpu */ + unsigned int inuse_map; unsigned short message_number; unsigned short uvhub_quiesce; short socket_acknowledge_count[DEST_Q_SIZE]; cycles_t send_message; - cycles_t period_end; - cycles_t period_time; spinlock_t uvhub_lock; spinlock_t queue_lock; - spinlock_t disable_lock; /* tunables */ int max_concurr; int max_concurr_const; @@ -624,9 +614,9 @@ struct bau_control { int complete_threshold; int cong_response_us; int cong_reps; - cycles_t disabled_period; - int period_giveups; - int giveup_limit; + int cong_period; + unsigned long clocks_per_100_usec; + cycles_t period_time; long period_requests; struct hub_and_pnode *thp; }; diff --git a/trunk/arch/x86/include/asm/x2apic.h b/trunk/arch/x86/include/asm/x2apic.h index f90f0a587c66..92e54abf89e0 100644 --- a/trunk/arch/x86/include/asm/x2apic.h +++ b/trunk/arch/x86/include/asm/x2apic.h @@ -9,6 +9,15 @@ #include #include +/* + * Need to use more than cpu 0, because we need more vectors + * when MSI-X are used. + */ +static const struct cpumask *x2apic_target_cpus(void) +{ + return cpu_online_mask; +} + static int x2apic_apic_id_valid(int apicid) { return 1; @@ -19,6 +28,15 @@ static int x2apic_apic_id_registered(void) return 1; } +/* + * For now each logical cpu is in its own vector allocation domain. + */ +static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask) +{ + cpumask_clear(retmask); + cpumask_set_cpu(cpu, retmask); +} + static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest) { diff --git a/trunk/arch/x86/include/asm/x86_init.h b/trunk/arch/x86/include/asm/x86_init.h index 38155f667144..c090af10ac7d 100644 --- a/trunk/arch/x86/include/asm/x86_init.h +++ b/trunk/arch/x86/include/asm/x86_init.h @@ -156,6 +156,7 @@ struct x86_cpuinit_ops { /** * struct x86_platform_ops - platform specific runtime functions * @calibrate_tsc: calibrate TSC + * @wallclock_init: init the wallclock device * @get_wallclock: get time from HW clock like RTC etc. * @set_wallclock: set time back to HW clock * @is_untracked_pat_range exclude from PAT logic @@ -163,10 +164,10 @@ struct x86_cpuinit_ops { * @i8042_detect pre-detect if i8042 controller exists * @save_sched_clock_state: save state for sched_clock() on suspend * @restore_sched_clock_state: restore state for sched_clock() on resume - * @apic_post_init: adjust apic if neeeded */ struct x86_platform_ops { unsigned long (*calibrate_tsc)(void); + void (*wallclock_init)(void); unsigned long (*get_wallclock)(void); int (*set_wallclock)(unsigned long nowtime); void (*iommu_shutdown)(void); @@ -176,7 +177,6 @@ struct x86_platform_ops { int (*i8042_detect)(void); void (*save_sched_clock_state)(void); void (*restore_sched_clock_state)(void); - void (*apic_post_init)(void); }; struct pci_dev; diff --git a/trunk/arch/x86/kernel/alternative.c b/trunk/arch/x86/kernel/alternative.c index 931280ff8299..1f84794f0759 100644 --- a/trunk/arch/x86/kernel/alternative.c +++ b/trunk/arch/x86/kernel/alternative.c @@ -1,5 +1,3 @@ -#define pr_fmt(fmt) "SMP alternatives: " fmt - #include #include #include @@ -65,11 +63,8 @@ static int __init setup_noreplace_paravirt(char *str) __setup("noreplace-paravirt", setup_noreplace_paravirt); #endif -#define DPRINTK(fmt, ...) \ -do { \ - if (debug_alternative) \ - printk(KERN_DEBUG fmt, ##__VA_ARGS__); \ -} while (0) +#define DPRINTK(fmt, args...) if (debug_alternative) \ + printk(KERN_DEBUG fmt, args) /* * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes @@ -433,7 +428,7 @@ void alternatives_smp_switch(int smp) * If this still occurs then you should see a hang * or crash shortly after this line: */ - pr_info("lockdep: fixing up alternatives\n"); + printk("lockdep: fixing up alternatives.\n"); #endif if (noreplace_smp || smp_alt_once || skip_smp_alternatives) @@ -449,14 +444,14 @@ void alternatives_smp_switch(int smp) if (smp == smp_mode) { /* nothing */ } else if (smp) { - pr_info("switching to SMP code\n"); + printk(KERN_INFO "SMP alternatives: switching to SMP code\n"); clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP); clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP); list_for_each_entry(mod, &smp_alt_modules, next) alternatives_smp_lock(mod->locks, mod->locks_end, mod->text, mod->text_end); } else { - pr_info("switching to UP code\n"); + printk(KERN_INFO "SMP alternatives: switching to UP code\n"); set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP); set_cpu_cap(&cpu_data(0), X86_FEATURE_UP); list_for_each_entry(mod, &smp_alt_modules, next) @@ -551,7 +546,7 @@ void __init alternative_instructions(void) #ifdef CONFIG_SMP if (smp_alt_once) { if (1 == num_possible_cpus()) { - pr_info("switching to UP code\n"); + printk(KERN_INFO "SMP alternatives: switching to UP code\n"); set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP); set_cpu_cap(&cpu_data(0), X86_FEATURE_UP); @@ -669,7 +664,7 @@ static int __kprobes stop_machine_text_poke(void *data) struct text_poke_param *p; int i; - if (atomic_xchg(&stop_machine_first, 0)) { + if (atomic_dec_and_test(&stop_machine_first)) { for (i = 0; i < tpp->nparams; i++) { p = &tpp->params[i]; text_poke(p->addr, p->opcode, p->len); diff --git a/trunk/arch/x86/kernel/amd_nb.c b/trunk/arch/x86/kernel/amd_nb.c index aadf3359e2a7..be16854591cc 100644 --- a/trunk/arch/x86/kernel/amd_nb.c +++ b/trunk/arch/x86/kernel/amd_nb.c @@ -2,9 +2,6 @@ * Shared support code for AMD K8 northbridges and derivates. * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2. */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include #include #include @@ -19,7 +16,6 @@ const struct pci_device_id amd_nb_misc_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) }, - { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) }, {} }; EXPORT_SYMBOL(amd_nb_misc_ids); @@ -262,7 +258,7 @@ void amd_flush_garts(void) } spin_unlock_irqrestore(&gart_lock, flags); if (!flushed) - pr_notice("nothing to flush?\n"); + printk("nothing to flush?\n"); } EXPORT_SYMBOL_GPL(amd_flush_garts); @@ -273,10 +269,11 @@ static __init int init_amd_nbs(void) err = amd_cache_northbridges(); if (err < 0) - pr_notice("Cannot enumerate AMD northbridges\n"); + printk(KERN_NOTICE "AMD NB: Cannot enumerate AMD northbridges.\n"); if (amd_cache_gart() < 0) - pr_notice("Cannot initialize GART flush words, GART support disabled\n"); + printk(KERN_NOTICE "AMD NB: Cannot initialize GART flush words, " + "GART support disabled.\n"); return err; } diff --git a/trunk/arch/x86/kernel/apic/apic.c b/trunk/arch/x86/kernel/apic/apic.c index c421512ca5eb..39a222e094af 100644 --- a/trunk/arch/x86/kernel/apic/apic.c +++ b/trunk/arch/x86/kernel/apic/apic.c @@ -2123,25 +2123,6 @@ void default_init_apic_ldr(void) apic_write(APIC_LDR, val); } -int default_cpu_mask_to_apicid_and(const struct cpumask *cpumask, - const struct cpumask *andmask, - unsigned int *apicid) -{ - unsigned int cpu; - - for_each_cpu_and(cpu, cpumask, andmask) { - if (cpumask_test_cpu(cpu, cpu_online_mask)) - break; - } - - if (likely(cpu < nr_cpu_ids)) { - *apicid = per_cpu(x86_cpu_to_apicid, cpu); - return 0; - } - - return -EINVAL; -} - /* * Power management */ diff --git a/trunk/arch/x86/kernel/apic/apic_flat_64.c b/trunk/arch/x86/kernel/apic/apic_flat_64.c index 00c77cf78e9e..0e881c46e8c8 100644 --- a/trunk/arch/x86/kernel/apic/apic_flat_64.c +++ b/trunk/arch/x86/kernel/apic/apic_flat_64.c @@ -36,6 +36,25 @@ static int flat_acpi_madt_oem_check(char *oem_id, char *oem_table_id) return 1; } +static const struct cpumask *flat_target_cpus(void) +{ + return cpu_online_mask; +} + +static void flat_vector_allocation_domain(int cpu, struct cpumask *retmask) +{ + /* Careful. Some cpus do not strictly honor the set of cpus + * specified in the interrupt destination when using lowest + * priority interrupt delivery mode. + * + * In particular there was a hyperthreading cpu observed to + * deliver interrupts to the wrong hyperthread when only one + * hyperthread was specified in the interrupt desitination. + */ + cpumask_clear(retmask); + cpumask_bits(retmask)[0] = APIC_ALL_CPUS; +} + /* * Set up the logical destination ID. * @@ -73,7 +92,7 @@ static void flat_send_IPI_mask(const struct cpumask *cpumask, int vector) } static void -flat_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector) + flat_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector) { unsigned long mask = cpumask_bits(cpumask)[0]; int cpu = smp_processor_id(); @@ -167,7 +186,7 @@ static struct apic apic_flat = { .irq_delivery_mode = dest_LowestPrio, .irq_dest_mode = 1, /* logical */ - .target_cpus = online_target_cpus, + .target_cpus = flat_target_cpus, .disable_esr = 0, .dest_logical = APIC_DEST_LOGICAL, .check_apicid_used = NULL, @@ -191,7 +210,8 @@ static struct apic apic_flat = { .set_apic_id = set_apic_id, .apic_id_mask = 0xFFu << 24, - .cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and, + .cpu_mask_to_apicid = default_cpu_mask_to_apicid, + .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and, .send_IPI_mask = flat_send_IPI_mask, .send_IPI_mask_allbutself = flat_send_IPI_mask_allbutself, @@ -242,6 +262,17 @@ static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id) return 0; } +static const struct cpumask *physflat_target_cpus(void) +{ + return cpu_online_mask; +} + +static void physflat_vector_allocation_domain(int cpu, struct cpumask *retmask) +{ + cpumask_clear(retmask); + cpumask_set_cpu(cpu, retmask); +} + static void physflat_send_IPI_mask(const struct cpumask *cpumask, int vector) { default_send_IPI_mask_sequence_phys(cpumask, vector); @@ -263,6 +294,38 @@ static void physflat_send_IPI_all(int vector) physflat_send_IPI_mask(cpu_online_mask, vector); } +static unsigned int physflat_cpu_mask_to_apicid(const struct cpumask *cpumask) +{ + int cpu; + + /* + * We're using fixed IRQ delivery, can only return one phys APIC ID. + * May as well be the first. + */ + cpu = cpumask_first(cpumask); + if ((unsigned)cpu < nr_cpu_ids) + return per_cpu(x86_cpu_to_apicid, cpu); + else + return BAD_APICID; +} + +static unsigned int +physflat_cpu_mask_to_apicid_and(const struct cpumask *cpumask, + const struct cpumask *andmask) +{ + int cpu; + + /* + * We're using fixed IRQ delivery, can only return one phys APIC ID. + * May as well be the first. + */ + for_each_cpu_and(cpu, cpumask, andmask) { + if (cpumask_test_cpu(cpu, cpu_online_mask)) + break; + } + return per_cpu(x86_cpu_to_apicid, cpu); +} + static int physflat_probe(void) { if (apic == &apic_physflat || num_possible_cpus() > 8) @@ -282,13 +345,13 @@ static struct apic apic_physflat = { .irq_delivery_mode = dest_Fixed, .irq_dest_mode = 0, /* physical */ - .target_cpus = online_target_cpus, + .target_cpus = physflat_target_cpus, .disable_esr = 0, .dest_logical = 0, .check_apicid_used = NULL, .check_apicid_present = NULL, - .vector_allocation_domain = default_vector_allocation_domain, + .vector_allocation_domain = physflat_vector_allocation_domain, /* not needed, but shouldn't hurt: */ .init_apic_ldr = flat_init_apic_ldr, @@ -307,7 +370,8 @@ static struct apic apic_physflat = { .set_apic_id = set_apic_id, .apic_id_mask = 0xFFu << 24, - .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and, + .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid, + .cpu_mask_to_apicid_and = physflat_cpu_mask_to_apicid_and, .send_IPI_mask = physflat_send_IPI_mask, .send_IPI_mask_allbutself = physflat_send_IPI_mask_allbutself, diff --git a/trunk/arch/x86/kernel/apic/apic_noop.c b/trunk/arch/x86/kernel/apic/apic_noop.c index e145f28b4099..a6e4c6e06c08 100644 --- a/trunk/arch/x86/kernel/apic/apic_noop.c +++ b/trunk/arch/x86/kernel/apic/apic_noop.c @@ -100,12 +100,12 @@ static unsigned long noop_check_apicid_present(int bit) return physid_isset(bit, phys_cpu_present_map); } -static void noop_vector_allocation_domain(int cpu, struct cpumask *retmask, - const struct cpumask *mask) +static void noop_vector_allocation_domain(int cpu, struct cpumask *retmask) { if (cpu != 0) pr_warning("APIC: Vector allocated for non-BSP cpu\n"); - cpumask_copy(retmask, cpumask_of(cpu)); + cpumask_clear(retmask); + cpumask_set_cpu(cpu, retmask); } static u32 noop_apic_read(u32 reg) @@ -159,7 +159,8 @@ struct apic apic_noop = { .set_apic_id = NULL, .apic_id_mask = 0x0F << 24, - .cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and, + .cpu_mask_to_apicid = default_cpu_mask_to_apicid, + .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and, .send_IPI_mask = noop_send_IPI_mask, .send_IPI_mask_allbutself = noop_send_IPI_mask_allbutself, diff --git a/trunk/arch/x86/kernel/apic/apic_numachip.c b/trunk/arch/x86/kernel/apic/apic_numachip.c index bc552cff2578..6ec6d5d297c3 100644 --- a/trunk/arch/x86/kernel/apic/apic_numachip.c +++ b/trunk/arch/x86/kernel/apic/apic_numachip.c @@ -72,6 +72,17 @@ static int numachip_phys_pkg_id(int initial_apic_id, int index_msb) return initial_apic_id >> index_msb; } +static const struct cpumask *numachip_target_cpus(void) +{ + return cpu_online_mask; +} + +static void numachip_vector_allocation_domain(int cpu, struct cpumask *retmask) +{ + cpumask_clear(retmask); + cpumask_set_cpu(cpu, retmask); +} + static int __cpuinit numachip_wakeup_secondary(int phys_apicid, unsigned long start_rip) { union numachip_csr_g3_ext_irq_gen int_gen; @@ -146,6 +157,38 @@ static void numachip_send_IPI_self(int vector) __default_send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL); } +static unsigned int numachip_cpu_mask_to_apicid(const struct cpumask *cpumask) +{ + int cpu; + + /* + * We're using fixed IRQ delivery, can only return one phys APIC ID. + * May as well be the first. + */ + cpu = cpumask_first(cpumask); + if (likely((unsigned)cpu < nr_cpu_ids)) + return per_cpu(x86_cpu_to_apicid, cpu); + + return BAD_APICID; +} + +static unsigned int +numachip_cpu_mask_to_apicid_and(const struct cpumask *cpumask, + const struct cpumask *andmask) +{ + int cpu; + + /* + * We're using fixed IRQ delivery, can only return one phys APIC ID. + * May as well be the first. + */ + for_each_cpu_and(cpu, cpumask, andmask) { + if (cpumask_test_cpu(cpu, cpu_online_mask)) + break; + } + return per_cpu(x86_cpu_to_apicid, cpu); +} + static int __init numachip_probe(void) { return apic == &apic_numachip; @@ -210,13 +253,13 @@ static struct apic apic_numachip __refconst = { .irq_delivery_mode = dest_Fixed, .irq_dest_mode = 0, /* physical */ - .target_cpus = online_target_cpus, + .target_cpus = numachip_target_cpus, .disable_esr = 0, .dest_logical = 0, .check_apicid_used = NULL, .check_apicid_present = NULL, - .vector_allocation_domain = default_vector_allocation_domain, + .vector_allocation_domain = numachip_vector_allocation_domain, .init_apic_ldr = flat_init_apic_ldr, .ioapic_phys_id_map = NULL, @@ -234,7 +277,8 @@ static struct apic apic_numachip __refconst = { .set_apic_id = set_apic_id, .apic_id_mask = 0xffU << 24, - .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and, + .cpu_mask_to_apicid = numachip_cpu_mask_to_apicid, + .cpu_mask_to_apicid_and = numachip_cpu_mask_to_apicid_and, .send_IPI_mask = numachip_send_IPI_mask, .send_IPI_mask_allbutself = numachip_send_IPI_mask_allbutself, diff --git a/trunk/arch/x86/kernel/apic/bigsmp_32.c b/trunk/arch/x86/kernel/apic/bigsmp_32.c index d50e3640d5ae..31fbdbfbf960 100644 --- a/trunk/arch/x86/kernel/apic/bigsmp_32.c +++ b/trunk/arch/x86/kernel/apic/bigsmp_32.c @@ -26,6 +26,15 @@ static int bigsmp_apic_id_registered(void) return 1; } +static const struct cpumask *bigsmp_target_cpus(void) +{ +#ifdef CONFIG_SMP + return cpu_online_mask; +#else + return cpumask_of(0); +#endif +} + static unsigned long bigsmp_check_apicid_used(physid_mask_t *map, int apicid) { return 0; @@ -96,6 +105,32 @@ static int bigsmp_check_phys_apicid_present(int phys_apicid) return 1; } +/* As we are using single CPU as destination, pick only one CPU here */ +static unsigned int bigsmp_cpu_mask_to_apicid(const struct cpumask *cpumask) +{ + int cpu = cpumask_first(cpumask); + + if (cpu < nr_cpu_ids) + return cpu_physical_id(cpu); + return BAD_APICID; +} + +static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask, + const struct cpumask *andmask) +{ + int cpu; + + /* + * We're using fixed IRQ delivery, can only return one phys APIC ID. + * May as well be the first. + */ + for_each_cpu_and(cpu, cpumask, andmask) { + if (cpumask_test_cpu(cpu, cpu_online_mask)) + return cpu_physical_id(cpu); + } + return BAD_APICID; +} + static int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb) { return cpuid_apic >> index_msb; @@ -142,6 +177,12 @@ static const struct dmi_system_id bigsmp_dmi_table[] = { { } /* NULL entry stops DMI scanning */ }; +static void bigsmp_vector_allocation_domain(int cpu, struct cpumask *retmask) +{ + cpumask_clear(retmask); + cpumask_set_cpu(cpu, retmask); +} + static int probe_bigsmp(void) { if (def_to_bigsmp) @@ -164,13 +205,13 @@ static struct apic apic_bigsmp = { /* phys delivery to target CPU: */ .irq_dest_mode = 0, - .target_cpus = default_target_cpus, + .target_cpus = bigsmp_target_cpus, .disable_esr = 1, .dest_logical = 0, .check_apicid_used = bigsmp_check_apicid_used, .check_apicid_present = bigsmp_check_apicid_present, - .vector_allocation_domain = default_vector_allocation_domain, + .vector_allocation_domain = bigsmp_vector_allocation_domain, .init_apic_ldr = bigsmp_init_apic_ldr, .ioapic_phys_id_map = bigsmp_ioapic_phys_id_map, @@ -188,7 +229,8 @@ static struct apic apic_bigsmp = { .set_apic_id = NULL, .apic_id_mask = 0xFF << 24, - .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and, + .cpu_mask_to_apicid = bigsmp_cpu_mask_to_apicid, + .cpu_mask_to_apicid_and = bigsmp_cpu_mask_to_apicid_and, .send_IPI_mask = bigsmp_send_IPI_mask, .send_IPI_mask_allbutself = NULL, diff --git a/trunk/arch/x86/kernel/apic/es7000_32.c b/trunk/arch/x86/kernel/apic/es7000_32.c index 0874799a98c6..db4ab1be3c79 100644 --- a/trunk/arch/x86/kernel/apic/es7000_32.c +++ b/trunk/arch/x86/kernel/apic/es7000_32.c @@ -394,6 +394,21 @@ static void es7000_enable_apic_mode(void) WARN(1, "Command failed, status = %x\n", mip_status); } +static void es7000_vector_allocation_domain(int cpu, struct cpumask *retmask) +{ + /* Careful. Some cpus do not strictly honor the set of cpus + * specified in the interrupt destination when using lowest + * priority interrupt delivery mode. + * + * In particular there was a hyperthreading cpu observed to + * deliver interrupts to the wrong hyperthread when only one + * hyperthread was specified in the interrupt desitination. + */ + cpumask_clear(retmask); + cpumask_bits(retmask)[0] = APIC_ALL_CPUS; +} + + static void es7000_wait_for_init_deassert(atomic_t *deassert) { while (!atomic_read(deassert)) @@ -525,49 +540,45 @@ static int es7000_check_phys_apicid_present(int cpu_physical_apicid) return 1; } -static inline int -es7000_cpu_mask_to_apicid(const struct cpumask *cpumask, unsigned int *dest_id) +static unsigned int es7000_cpu_mask_to_apicid(const struct cpumask *cpumask) { unsigned int round = 0; - unsigned int cpu, uninitialized_var(apicid); + int cpu, uninitialized_var(apicid); /* * The cpus in the mask must all be on the apic cluster. */ - for_each_cpu_and(cpu, cpumask, cpu_online_mask) { + for_each_cpu(cpu, cpumask) { int new_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu); if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { WARN(1, "Not a valid mask!"); - return -EINVAL; + return BAD_APICID; } - apicid |= new_apicid; + apicid = new_apicid; round++; } - if (!round) - return -EINVAL; - *dest_id = apicid; - return 0; + return apicid; } -static int +static unsigned int es7000_cpu_mask_to_apicid_and(const struct cpumask *inmask, - const struct cpumask *andmask, - unsigned int *apicid) + const struct cpumask *andmask) { + int apicid = early_per_cpu(x86_cpu_to_logical_apicid, 0); cpumask_var_t cpumask; - *apicid = early_per_cpu(x86_cpu_to_logical_apicid, 0); if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) - return 0; + return apicid; cpumask_and(cpumask, inmask, andmask); - es7000_cpu_mask_to_apicid(cpumask, apicid); + cpumask_and(cpumask, cpumask, cpu_online_mask); + apicid = es7000_cpu_mask_to_apicid(cpumask); free_cpumask_var(cpumask); - return 0; + return apicid; } static int es7000_phys_pkg_id(int cpuid_apic, int index_msb) @@ -627,7 +638,7 @@ static struct apic __refdata apic_es7000_cluster = { .check_apicid_used = es7000_check_apicid_used, .check_apicid_present = es7000_check_apicid_present, - .vector_allocation_domain = flat_vector_allocation_domain, + .vector_allocation_domain = es7000_vector_allocation_domain, .init_apic_ldr = es7000_init_apic_ldr_cluster, .ioapic_phys_id_map = es7000_ioapic_phys_id_map, @@ -645,6 +656,7 @@ static struct apic __refdata apic_es7000_cluster = { .set_apic_id = NULL, .apic_id_mask = 0xFF << 24, + .cpu_mask_to_apicid = es7000_cpu_mask_to_apicid, .cpu_mask_to_apicid_and = es7000_cpu_mask_to_apicid_and, .send_IPI_mask = es7000_send_IPI_mask, @@ -693,7 +705,7 @@ static struct apic __refdata apic_es7000 = { .check_apicid_used = es7000_check_apicid_used, .check_apicid_present = es7000_check_apicid_present, - .vector_allocation_domain = flat_vector_allocation_domain, + .vector_allocation_domain = es7000_vector_allocation_domain, .init_apic_ldr = es7000_init_apic_ldr, .ioapic_phys_id_map = es7000_ioapic_phys_id_map, @@ -711,6 +723,7 @@ static struct apic __refdata apic_es7000 = { .set_apic_id = NULL, .apic_id_mask = 0xFF << 24, + .cpu_mask_to_apicid = es7000_cpu_mask_to_apicid, .cpu_mask_to_apicid_and = es7000_cpu_mask_to_apicid_and, .send_IPI_mask = es7000_send_IPI_mask, diff --git a/trunk/arch/x86/kernel/apic/io_apic.c b/trunk/arch/x86/kernel/apic/io_apic.c index 406eee784684..5f0ff597437c 100644 --- a/trunk/arch/x86/kernel/apic/io_apic.c +++ b/trunk/arch/x86/kernel/apic/io_apic.c @@ -448,8 +448,8 @@ static int __add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pi entry = alloc_irq_pin_list(node); if (!entry) { - pr_err("can not alloc irq_pin_list (%d,%d,%d)\n", - node, apic, pin); + printk(KERN_ERR "can not alloc irq_pin_list (%d,%d,%d)\n", + node, apic, pin); return -ENOMEM; } entry->apic = apic; @@ -661,7 +661,7 @@ static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) ioapic_mask_entry(apic, pin); entry = ioapic_read_entry(apic, pin); if (entry.irr) - pr_err("Unable to reset IRR for apic: %d, pin :%d\n", + printk(KERN_ERR "Unable to reset IRR for apic: %d, pin :%d\n", mpc_ioapic_id(apic), pin); } @@ -895,7 +895,7 @@ static int irq_polarity(int idx) } case 2: /* reserved */ { - pr_warn("broken BIOS!!\n"); + printk(KERN_WARNING "broken BIOS!!\n"); polarity = 1; break; } @@ -906,7 +906,7 @@ static int irq_polarity(int idx) } default: /* invalid */ { - pr_warn("broken BIOS!!\n"); + printk(KERN_WARNING "broken BIOS!!\n"); polarity = 1; break; } @@ -948,7 +948,7 @@ static int irq_trigger(int idx) } default: { - pr_warn("broken BIOS!!\n"); + printk(KERN_WARNING "broken BIOS!!\n"); trigger = 1; break; } @@ -962,7 +962,7 @@ static int irq_trigger(int idx) } case 2: /* reserved */ { - pr_warn("broken BIOS!!\n"); + printk(KERN_WARNING "broken BIOS!!\n"); trigger = 1; break; } @@ -973,7 +973,7 @@ static int irq_trigger(int idx) } default: /* invalid */ { - pr_warn("broken BIOS!!\n"); + printk(KERN_WARNING "broken BIOS!!\n"); trigger = 0; break; } @@ -991,7 +991,7 @@ static int pin_2_irq(int idx, int apic, int pin) * Debugging check, we are in big trouble if this message pops up! */ if (mp_irqs[idx].dstirq != pin) - pr_err("broken BIOS or MPTABLE parser, ayiee!!\n"); + printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n"); if (test_bit(bus, mp_bus_not_pci)) { irq = mp_irqs[idx].srcbusirq; @@ -1112,7 +1112,8 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) * 0x80, because int 0x80 is hm, kind of importantish. ;) */ static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START; - static int current_offset = VECTOR_OFFSET_START % 16; + static int current_offset = VECTOR_OFFSET_START % 8; + unsigned int old_vector; int cpu, err; cpumask_var_t tmp_mask; @@ -1122,45 +1123,35 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC)) return -ENOMEM; + old_vector = cfg->vector; + if (old_vector) { + cpumask_and(tmp_mask, mask, cpu_online_mask); + cpumask_and(tmp_mask, cfg->domain, tmp_mask); + if (!cpumask_empty(tmp_mask)) { + free_cpumask_var(tmp_mask); + return 0; + } + } + /* Only try and allocate irqs on cpus that are present */ err = -ENOSPC; - cpumask_clear(cfg->old_domain); - cpu = cpumask_first_and(mask, cpu_online_mask); - while (cpu < nr_cpu_ids) { - int new_cpu, vector, offset; + for_each_cpu_and(cpu, mask, cpu_online_mask) { + int new_cpu; + int vector, offset; - apic->vector_allocation_domain(cpu, tmp_mask, mask); - - if (cpumask_subset(tmp_mask, cfg->domain)) { - err = 0; - if (cpumask_equal(tmp_mask, cfg->domain)) - break; - /* - * New cpumask using the vector is a proper subset of - * the current in use mask. So cleanup the vector - * allocation for the members that are not used anymore. - */ - cpumask_andnot(cfg->old_domain, cfg->domain, tmp_mask); - cfg->move_in_progress = 1; - cpumask_and(cfg->domain, cfg->domain, tmp_mask); - break; - } + apic->vector_allocation_domain(cpu, tmp_mask); vector = current_vector; offset = current_offset; next: - vector += 16; + vector += 8; if (vector >= first_system_vector) { - offset = (offset + 1) % 16; + /* If out of vectors on large boxen, must share them. */ + offset = (offset + 1) % 8; vector = FIRST_EXTERNAL_VECTOR + offset; } - - if (unlikely(current_vector == vector)) { - cpumask_or(cfg->old_domain, cfg->old_domain, tmp_mask); - cpumask_andnot(tmp_mask, mask, cfg->old_domain); - cpu = cpumask_first_and(tmp_mask, cpu_online_mask); + if (unlikely(current_vector == vector)) continue; - } if (test_bit(vector, used_vectors)) goto next; @@ -1171,7 +1162,7 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) /* Found one! */ current_vector = vector; current_offset = offset; - if (cfg->vector) { + if (old_vector) { cfg->move_in_progress = 1; cpumask_copy(cfg->old_domain, cfg->domain); } @@ -1355,18 +1346,18 @@ static void setup_ioapic_irq(unsigned int irq, struct irq_cfg *cfg, if (!IO_APIC_IRQ(irq)) return; + /* + * For legacy irqs, cfg->domain starts with cpu 0 for legacy + * controllers like 8259. Now that IO-APIC can handle this irq, update + * the cfg->domain. + */ + if (irq < legacy_pic->nr_legacy_irqs && cpumask_test_cpu(0, cfg->domain)) + apic->vector_allocation_domain(0, cfg->domain); if (assign_irq_vector(irq, cfg, apic->target_cpus())) return; - if (apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus(), - &dest)) { - pr_warn("Failed to obtain apicid for ioapic %d, pin %d\n", - mpc_ioapic_id(attr->ioapic), attr->ioapic_pin); - __clear_irq_vector(irq, cfg); - - return; - } + dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); apic_printk(APIC_VERBOSE,KERN_DEBUG "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> " @@ -1375,7 +1366,7 @@ static void setup_ioapic_irq(unsigned int irq, struct irq_cfg *cfg, cfg->vector, irq, attr->trigger, attr->polarity, dest); if (setup_ioapic_entry(irq, &entry, dest, cfg->vector, attr)) { - pr_warn("Failed to setup ioapic entry for ioapic %d, pin %d\n", + pr_warn("Failed to setup ioapic entry for ioapic %d, pin %d\n", mpc_ioapic_id(attr->ioapic), attr->ioapic_pin); __clear_irq_vector(irq, cfg); @@ -1478,10 +1469,9 @@ void setup_IO_APIC_irq_extra(u32 gsi) * Set up the timer pin, possibly with the 8259A-master behind. */ static void __init setup_timer_IRQ0_pin(unsigned int ioapic_idx, - unsigned int pin, int vector) + unsigned int pin, int vector) { struct IO_APIC_route_entry entry; - unsigned int dest; if (irq_remapping_enabled) return; @@ -1492,13 +1482,9 @@ static void __init setup_timer_IRQ0_pin(unsigned int ioapic_idx, * We use logical delivery to get the timer IRQ * to the first CPU. */ - if (unlikely(apic->cpu_mask_to_apicid_and(apic->target_cpus(), - apic->target_cpus(), &dest))) - dest = BAD_APICID; - entry.dest_mode = apic->irq_dest_mode; entry.mask = 0; /* don't mask IRQ for edge */ - entry.dest = dest; + entry.dest = apic->cpu_mask_to_apicid(apic->target_cpus()); entry.delivery_mode = apic->irq_delivery_mode; entry.polarity = 0; entry.trigger = 0; @@ -1535,6 +1521,7 @@ __apicdebuginit(void) print_IO_APIC(int ioapic_idx) reg_03.raw = io_apic_read(ioapic_idx, 3); raw_spin_unlock_irqrestore(&ioapic_lock, flags); + printk("\n"); printk(KERN_DEBUG "IO APIC #%d......\n", mpc_ioapic_id(ioapic_idx)); printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw); printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID); @@ -1591,7 +1578,7 @@ __apicdebuginit(void) print_IO_APIC(int ioapic_idx) i, ir_entry->index ); - pr_cont("%1d %1d %1d %1d %1d " + printk("%1d %1d %1d %1d %1d " "%1d %1d %X %02X\n", ir_entry->format, ir_entry->mask, @@ -1611,7 +1598,7 @@ __apicdebuginit(void) print_IO_APIC(int ioapic_idx) i, entry.dest ); - pr_cont("%1d %1d %1d %1d %1d " + printk("%1d %1d %1d %1d %1d " "%1d %1d %02X\n", entry.mask, entry.trigger, @@ -1664,8 +1651,8 @@ __apicdebuginit(void) print_IO_APICs(void) continue; printk(KERN_DEBUG "IRQ%d ", irq); for_each_irq_pin(entry, cfg->irq_2_pin) - pr_cont("-> %d:%d", entry->apic, entry->pin); - pr_cont("\n"); + printk("-> %d:%d", entry->apic, entry->pin); + printk("\n"); } printk(KERN_INFO ".................................... done.\n"); @@ -1678,9 +1665,9 @@ __apicdebuginit(void) print_APIC_field(int base) printk(KERN_DEBUG); for (i = 0; i < 8; i++) - pr_cont("%08x", apic_read(base + i*0x10)); + printk(KERN_CONT "%08x", apic_read(base + i*0x10)); - pr_cont("\n"); + printk(KERN_CONT "\n"); } __apicdebuginit(void) print_local_APIC(void *dummy) @@ -1782,7 +1769,7 @@ __apicdebuginit(void) print_local_APIC(void *dummy) printk(KERN_DEBUG "... APIC EILVT%d: %08x\n", i, v); } } - pr_cont("\n"); + printk("\n"); } __apicdebuginit(void) print_local_APICs(int maxcpu) @@ -2078,7 +2065,7 @@ void __init setup_ioapic_ids_from_mpc_nocheck(void) reg_00.raw = io_apic_read(ioapic_idx, 0); raw_spin_unlock_irqrestore(&ioapic_lock, flags); if (reg_00.bits.ID != mpc_ioapic_id(ioapic_idx)) - pr_cont("could not set ID!\n"); + printk("could not set ID!\n"); else apic_printk(APIC_VERBOSE, " ok.\n"); } @@ -2223,6 +2210,71 @@ void send_cleanup_vector(struct irq_cfg *cfg) cfg->move_in_progress = 0; } +static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg) +{ + int apic, pin; + struct irq_pin_list *entry; + u8 vector = cfg->vector; + + for_each_irq_pin(entry, cfg->irq_2_pin) { + unsigned int reg; + + apic = entry->apic; + pin = entry->pin; + /* + * With interrupt-remapping, destination information comes + * from interrupt-remapping table entry. + */ + if (!irq_remapped(cfg)) + io_apic_write(apic, 0x11 + pin*2, dest); + reg = io_apic_read(apic, 0x10 + pin*2); + reg &= ~IO_APIC_REDIR_VECTOR_MASK; + reg |= vector; + io_apic_modify(apic, 0x10 + pin*2, reg); + } +} + +/* + * Either sets data->affinity to a valid value, and returns + * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and + * leaves data->affinity untouched. + */ +int __ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, + unsigned int *dest_id) +{ + struct irq_cfg *cfg = data->chip_data; + + if (!cpumask_intersects(mask, cpu_online_mask)) + return -1; + + if (assign_irq_vector(data->irq, data->chip_data, mask)) + return -1; + + cpumask_copy(data->affinity, mask); + + *dest_id = apic->cpu_mask_to_apicid_and(mask, cfg->domain); + return 0; +} + +static int +ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, + bool force) +{ + unsigned int dest, irq = data->irq; + unsigned long flags; + int ret; + + raw_spin_lock_irqsave(&ioapic_lock, flags); + ret = __ioapic_set_affinity(data, mask, &dest); + if (!ret) { + /* Only the high 8 bits are valid. */ + dest = SET_APIC_LOGICAL_ID(dest); + __target_IO_APIC_irq(irq, dest, data->chip_data); + } + raw_spin_unlock_irqrestore(&ioapic_lock, flags); + return ret; +} + asmlinkage void smp_irq_move_cleanup_interrupt(void) { unsigned vector, me; @@ -2310,87 +2362,6 @@ void irq_force_complete_move(int irq) static inline void irq_complete_move(struct irq_cfg *cfg) { } #endif -static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg) -{ - int apic, pin; - struct irq_pin_list *entry; - u8 vector = cfg->vector; - - for_each_irq_pin(entry, cfg->irq_2_pin) { - unsigned int reg; - - apic = entry->apic; - pin = entry->pin; - /* - * With interrupt-remapping, destination information comes - * from interrupt-remapping table entry. - */ - if (!irq_remapped(cfg)) - io_apic_write(apic, 0x11 + pin*2, dest); - reg = io_apic_read(apic, 0x10 + pin*2); - reg &= ~IO_APIC_REDIR_VECTOR_MASK; - reg |= vector; - io_apic_modify(apic, 0x10 + pin*2, reg); - } -} - -/* - * Either sets data->affinity to a valid value, and returns - * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and - * leaves data->affinity untouched. - */ -int __ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, - unsigned int *dest_id) -{ - struct irq_cfg *cfg = data->chip_data; - unsigned int irq = data->irq; - int err; - - if (!config_enabled(CONFIG_SMP)) - return -1; - - if (!cpumask_intersects(mask, cpu_online_mask)) - return -EINVAL; - - err = assign_irq_vector(irq, cfg, mask); - if (err) - return err; - - err = apic->cpu_mask_to_apicid_and(mask, cfg->domain, dest_id); - if (err) { - if (assign_irq_vector(irq, cfg, data->affinity)) - pr_err("Failed to recover vector for irq %d\n", irq); - return err; - } - - cpumask_copy(data->affinity, mask); - - return 0; -} - -static int -ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, - bool force) -{ - unsigned int dest, irq = data->irq; - unsigned long flags; - int ret; - - if (!config_enabled(CONFIG_SMP)) - return -1; - - raw_spin_lock_irqsave(&ioapic_lock, flags); - ret = __ioapic_set_affinity(data, mask, &dest); - if (!ret) { - /* Only the high 8 bits are valid. */ - dest = SET_APIC_LOGICAL_ID(dest); - __target_IO_APIC_irq(irq, dest, data->chip_data); - ret = IRQ_SET_MASK_OK_NOCOPY; - } - raw_spin_unlock_irqrestore(&ioapic_lock, flags); - return ret; -} - static void ack_apic_edge(struct irq_data *data) { irq_complete_move(data->chip_data); @@ -2570,7 +2541,9 @@ static void irq_remap_modify_chip_defaults(struct irq_chip *chip) chip->irq_ack = ir_ack_apic_edge; chip->irq_eoi = ir_ack_apic_level; +#ifdef CONFIG_SMP chip->irq_set_affinity = set_remapped_irq_affinity; +#endif } #endif /* CONFIG_IRQ_REMAP */ @@ -2581,7 +2554,9 @@ static struct irq_chip ioapic_chip __read_mostly = { .irq_unmask = unmask_ioapic_irq, .irq_ack = ack_apic_edge, .irq_eoi = ack_apic_level, +#ifdef CONFIG_SMP .irq_set_affinity = ioapic_set_affinity, +#endif .irq_retrigger = ioapic_retrigger_irq, }; @@ -3063,10 +3038,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, if (err) return err; - err = apic->cpu_mask_to_apicid_and(cfg->domain, - apic->target_cpus(), &dest); - if (err) - return err; + dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); if (irq_remapped(cfg)) { compose_remapped_msi_msg(pdev, irq, dest, msg, hpet_id); @@ -3100,6 +3072,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, return err; } +#ifdef CONFIG_SMP static int msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) { @@ -3119,8 +3092,9 @@ msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) __write_msi_msg(data->msi_desc, &msg); - return IRQ_SET_MASK_OK_NOCOPY; + return 0; } +#endif /* CONFIG_SMP */ /* * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices, @@ -3131,7 +3105,9 @@ static struct irq_chip msi_chip = { .irq_unmask = unmask_msi_irq, .irq_mask = mask_msi_irq, .irq_ack = ack_apic_edge, +#ifdef CONFIG_SMP .irq_set_affinity = msi_set_affinity, +#endif .irq_retrigger = ioapic_retrigger_irq, }; @@ -3216,6 +3192,7 @@ void native_teardown_msi_irq(unsigned int irq) } #ifdef CONFIG_DMAR_TABLE +#ifdef CONFIG_SMP static int dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) @@ -3237,15 +3214,19 @@ dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask, dmar_msi_write(irq, &msg); - return IRQ_SET_MASK_OK_NOCOPY; + return 0; } +#endif /* CONFIG_SMP */ + static struct irq_chip dmar_msi_type = { .name = "DMAR_MSI", .irq_unmask = dmar_msi_unmask, .irq_mask = dmar_msi_mask, .irq_ack = ack_apic_edge, +#ifdef CONFIG_SMP .irq_set_affinity = dmar_msi_set_affinity, +#endif .irq_retrigger = ioapic_retrigger_irq, }; @@ -3266,6 +3247,7 @@ int arch_setup_dmar_msi(unsigned int irq) #ifdef CONFIG_HPET_TIMER +#ifdef CONFIG_SMP static int hpet_msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) { @@ -3285,15 +3267,19 @@ static int hpet_msi_set_affinity(struct irq_data *data, hpet_msi_write(data->handler_data, &msg); - return IRQ_SET_MASK_OK_NOCOPY; + return 0; } +#endif /* CONFIG_SMP */ + static struct irq_chip hpet_msi_type = { .name = "HPET_MSI", .irq_unmask = hpet_msi_unmask, .irq_mask = hpet_msi_mask, .irq_ack = ack_apic_edge, +#ifdef CONFIG_SMP .irq_set_affinity = hpet_msi_set_affinity, +#endif .irq_retrigger = ioapic_retrigger_irq, }; @@ -3328,6 +3314,8 @@ int arch_setup_hpet_msi(unsigned int irq, unsigned int id) */ #ifdef CONFIG_HT_IRQ +#ifdef CONFIG_SMP + static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector) { struct ht_irq_msg msg; @@ -3352,23 +3340,25 @@ ht_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) return -1; target_ht_irq(data->irq, dest, cfg->vector); - return IRQ_SET_MASK_OK_NOCOPY; + return 0; } +#endif + static struct irq_chip ht_irq_chip = { .name = "PCI-HT", .irq_mask = mask_ht_irq, .irq_unmask = unmask_ht_irq, .irq_ack = ack_apic_edge, +#ifdef CONFIG_SMP .irq_set_affinity = ht_set_affinity, +#endif .irq_retrigger = ioapic_retrigger_irq, }; int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) { struct irq_cfg *cfg; - struct ht_irq_msg msg; - unsigned dest; int err; if (disable_apic) @@ -3376,37 +3366,36 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) cfg = irq_cfg(irq); err = assign_irq_vector(irq, cfg, apic->target_cpus()); - if (err) - return err; - - err = apic->cpu_mask_to_apicid_and(cfg->domain, - apic->target_cpus(), &dest); - if (err) - return err; + if (!err) { + struct ht_irq_msg msg; + unsigned dest; - msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); + dest = apic->cpu_mask_to_apicid_and(cfg->domain, + apic->target_cpus()); - msg.address_lo = - HT_IRQ_LOW_BASE | - HT_IRQ_LOW_DEST_ID(dest) | - HT_IRQ_LOW_VECTOR(cfg->vector) | - ((apic->irq_dest_mode == 0) ? - HT_IRQ_LOW_DM_PHYSICAL : - HT_IRQ_LOW_DM_LOGICAL) | - HT_IRQ_LOW_RQEOI_EDGE | - ((apic->irq_delivery_mode != dest_LowestPrio) ? - HT_IRQ_LOW_MT_FIXED : - HT_IRQ_LOW_MT_ARBITRATED) | - HT_IRQ_LOW_IRQ_MASKED; + msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); - write_ht_irq_msg(irq, &msg); + msg.address_lo = + HT_IRQ_LOW_BASE | + HT_IRQ_LOW_DEST_ID(dest) | + HT_IRQ_LOW_VECTOR(cfg->vector) | + ((apic->irq_dest_mode == 0) ? + HT_IRQ_LOW_DM_PHYSICAL : + HT_IRQ_LOW_DM_LOGICAL) | + HT_IRQ_LOW_RQEOI_EDGE | + ((apic->irq_delivery_mode != dest_LowestPrio) ? + HT_IRQ_LOW_MT_FIXED : + HT_IRQ_LOW_MT_ARBITRATED) | + HT_IRQ_LOW_IRQ_MASKED; - irq_set_chip_and_handler_name(irq, &ht_irq_chip, - handle_edge_irq, "edge"); + write_ht_irq_msg(irq, &msg); - dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq); + irq_set_chip_and_handler_name(irq, &ht_irq_chip, + handle_edge_irq, "edge"); - return 0; + dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq); + } + return err; } #endif /* CONFIG_HT_IRQ */ @@ -3574,8 +3563,7 @@ static int __init io_apic_get_unique_id(int ioapic, int apic_id) /* Sanity check */ if (reg_00.bits.ID != apic_id) { - pr_err("IOAPIC[%d]: Unable to change apic_id!\n", - ioapic); + printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic); return -1; } } diff --git a/trunk/arch/x86/kernel/apic/numaq_32.c b/trunk/arch/x86/kernel/apic/numaq_32.c index d661ee95cabf..f00a68cca37a 100644 --- a/trunk/arch/x86/kernel/apic/numaq_32.c +++ b/trunk/arch/x86/kernel/apic/numaq_32.c @@ -406,13 +406,16 @@ static inline int numaq_check_phys_apicid_present(int phys_apicid) * We use physical apicids here, not logical, so just return the default * physical broadcast to stop people from breaking us */ -static int +static unsigned int numaq_cpu_mask_to_apicid(const struct cpumask *cpumask) +{ + return 0x0F; +} + +static inline unsigned int numaq_cpu_mask_to_apicid_and(const struct cpumask *cpumask, - const struct cpumask *andmask, - unsigned int *apicid) + const struct cpumask *andmask) { - *apicid = 0x0F; - return 0; + return 0x0F; } /* No NUMA-Q box has a HT CPU, but it can't hurt to use the default code. */ @@ -438,6 +441,20 @@ static int probe_numaq(void) return found_numaq; } +static void numaq_vector_allocation_domain(int cpu, struct cpumask *retmask) +{ + /* Careful. Some cpus do not strictly honor the set of cpus + * specified in the interrupt destination when using lowest + * priority interrupt delivery mode. + * + * In particular there was a hyperthreading cpu observed to + * deliver interrupts to the wrong hyperthread when only one + * hyperthread was specified in the interrupt desitination. + */ + cpumask_clear(retmask); + cpumask_bits(retmask)[0] = APIC_ALL_CPUS; +} + static void numaq_setup_portio_remap(void) { int num_quads = num_online_nodes(); @@ -474,7 +491,7 @@ static struct apic __refdata apic_numaq = { .check_apicid_used = numaq_check_apicid_used, .check_apicid_present = numaq_check_apicid_present, - .vector_allocation_domain = flat_vector_allocation_domain, + .vector_allocation_domain = numaq_vector_allocation_domain, .init_apic_ldr = numaq_init_apic_ldr, .ioapic_phys_id_map = numaq_ioapic_phys_id_map, @@ -492,6 +509,7 @@ static struct apic __refdata apic_numaq = { .set_apic_id = NULL, .apic_id_mask = 0x0F << 24, + .cpu_mask_to_apicid = numaq_cpu_mask_to_apicid, .cpu_mask_to_apicid_and = numaq_cpu_mask_to_apicid_and, .send_IPI_mask = numaq_send_IPI_mask, diff --git a/trunk/arch/x86/kernel/apic/probe_32.c b/trunk/arch/x86/kernel/apic/probe_32.c index eb35ef9ee63f..1b291da09e60 100644 --- a/trunk/arch/x86/kernel/apic/probe_32.c +++ b/trunk/arch/x86/kernel/apic/probe_32.c @@ -66,6 +66,21 @@ static void setup_apic_flat_routing(void) #endif } +static void default_vector_allocation_domain(int cpu, struct cpumask *retmask) +{ + /* + * Careful. Some cpus do not strictly honor the set of cpus + * specified in the interrupt destination when using lowest + * priority interrupt delivery mode. + * + * In particular there was a hyperthreading cpu observed to + * deliver interrupts to the wrong hyperthread when only one + * hyperthread was specified in the interrupt desitination. + */ + cpumask_clear(retmask); + cpumask_bits(retmask)[0] = APIC_ALL_CPUS; +} + /* should be called last. */ static int probe_default(void) { @@ -90,7 +105,7 @@ static struct apic apic_default = { .check_apicid_used = default_check_apicid_used, .check_apicid_present = default_check_apicid_present, - .vector_allocation_domain = flat_vector_allocation_domain, + .vector_allocation_domain = default_vector_allocation_domain, .init_apic_ldr = default_init_apic_ldr, .ioapic_phys_id_map = default_ioapic_phys_id_map, @@ -108,7 +123,8 @@ static struct apic apic_default = { .set_apic_id = NULL, .apic_id_mask = 0x0F << 24, - .cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and, + .cpu_mask_to_apicid = default_cpu_mask_to_apicid, + .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and, .send_IPI_mask = default_send_IPI_mask_logical, .send_IPI_mask_allbutself = default_send_IPI_mask_allbutself_logical, @@ -192,9 +208,6 @@ void __init default_setup_apic_routing(void) if (apic->setup_apic_routing) apic->setup_apic_routing(); - - if (x86_platform.apic_post_init) - x86_platform.apic_post_init(); } void __init generic_apic_probe(void) diff --git a/trunk/arch/x86/kernel/apic/probe_64.c b/trunk/arch/x86/kernel/apic/probe_64.c index 1793dba7a741..3fe986698929 100644 --- a/trunk/arch/x86/kernel/apic/probe_64.c +++ b/trunk/arch/x86/kernel/apic/probe_64.c @@ -23,6 +23,11 @@ #include #include +static int apicid_phys_pkg_id(int initial_apic_id, int index_msb) +{ + return hard_smp_processor_id() >> index_msb; +} + /* * Check the APIC IDs in bios_cpu_apicid and choose the APIC mode. */ @@ -43,8 +48,10 @@ void __init default_setup_apic_routing(void) } } - if (x86_platform.apic_post_init) - x86_platform.apic_post_init(); + if (is_vsmp_box()) { + /* need to update phys_pkg_id */ + apic->phys_pkg_id = apicid_phys_pkg_id; + } } /* Same for both flat and physical. */ diff --git a/trunk/arch/x86/kernel/apic/summit_32.c b/trunk/arch/x86/kernel/apic/summit_32.c index 77c95c0e1bf7..659897c00755 100644 --- a/trunk/arch/x86/kernel/apic/summit_32.c +++ b/trunk/arch/x86/kernel/apic/summit_32.c @@ -26,8 +26,6 @@ * */ -#define pr_fmt(fmt) "summit: %s: " fmt, __func__ - #include #include #include @@ -237,8 +235,8 @@ static int summit_apic_id_registered(void) static void summit_setup_apic_routing(void) { - pr_info("Enabling APIC mode: Summit. Using %d I/O APICs\n", - nr_ioapics); + printk("Enabling APIC mode: Summit. Using %d I/O APICs\n", + nr_ioapics); } static int summit_cpu_present_to_apicid(int mps_cpu) @@ -265,48 +263,43 @@ static int summit_check_phys_apicid_present(int physical_apicid) return 1; } -static inline int -summit_cpu_mask_to_apicid(const struct cpumask *cpumask, unsigned int *dest_id) +static unsigned int summit_cpu_mask_to_apicid(const struct cpumask *cpumask) { unsigned int round = 0; - unsigned int cpu, apicid = 0; + int cpu, apicid = 0; /* * The cpus in the mask must all be on the apic cluster. */ - for_each_cpu_and(cpu, cpumask, cpu_online_mask) { + for_each_cpu(cpu, cpumask) { int new_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu); if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { - pr_err("Not a valid mask!\n"); - return -EINVAL; + printk("%s: Not a valid mask!\n", __func__); + return BAD_APICID; } apicid |= new_apicid; round++; } - if (!round) - return -EINVAL; - *dest_id = apicid; - return 0; + return apicid; } -static int -summit_cpu_mask_to_apicid_and(const struct cpumask *inmask, - const struct cpumask *andmask, - unsigned int *apicid) +static unsigned int summit_cpu_mask_to_apicid_and(const struct cpumask *inmask, + const struct cpumask *andmask) { + int apicid = early_per_cpu(x86_cpu_to_logical_apicid, 0); cpumask_var_t cpumask; - *apicid = early_per_cpu(x86_cpu_to_logical_apicid, 0); if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) - return 0; + return apicid; cpumask_and(cpumask, inmask, andmask); - summit_cpu_mask_to_apicid(cpumask, apicid); + cpumask_and(cpumask, cpumask, cpu_online_mask); + apicid = summit_cpu_mask_to_apicid(cpumask); free_cpumask_var(cpumask); - return 0; + return apicid; } /* @@ -327,6 +320,20 @@ static int probe_summit(void) return 0; } +static void summit_vector_allocation_domain(int cpu, struct cpumask *retmask) +{ + /* Careful. Some cpus do not strictly honor the set of cpus + * specified in the interrupt destination when using lowest + * priority interrupt delivery mode. + * + * In particular there was a hyperthreading cpu observed to + * deliver interrupts to the wrong hyperthread when only one + * hyperthread was specified in the interrupt desitination. + */ + cpumask_clear(retmask); + cpumask_bits(retmask)[0] = APIC_ALL_CPUS; +} + #ifdef CONFIG_X86_SUMMIT_NUMA static struct rio_table_hdr *rio_table_hdr; static struct scal_detail *scal_devs[MAX_NUMNODES]; @@ -348,7 +355,7 @@ static int setup_pci_node_map_for_wpeg(int wpeg_num, int last_bus) } } if (i == rio_table_hdr->num_rio_dev) { - pr_err("Couldn't find owner Cyclone for Winnipeg!\n"); + printk(KERN_ERR "%s: Couldn't find owner Cyclone for Winnipeg!\n", __func__); return last_bus; } @@ -359,7 +366,7 @@ static int setup_pci_node_map_for_wpeg(int wpeg_num, int last_bus) } } if (i == rio_table_hdr->num_scal_dev) { - pr_err("Couldn't find owner Twister for Cyclone!\n"); + printk(KERN_ERR "%s: Couldn't find owner Twister for Cyclone!\n", __func__); return last_bus; } @@ -389,7 +396,7 @@ static int setup_pci_node_map_for_wpeg(int wpeg_num, int last_bus) num_buses = 9; break; default: - pr_info("Unsupported Winnipeg type!\n"); + printk(KERN_INFO "%s: Unsupported Winnipeg type!\n", __func__); return last_bus; } @@ -404,15 +411,13 @@ static int build_detail_arrays(void) int i, scal_detail_size, rio_detail_size; if (rio_table_hdr->num_scal_dev > MAX_NUMNODES) { - pr_warn("MAX_NUMNODES too low! Defined as %d, but system has %d nodes\n", - MAX_NUMNODES, rio_table_hdr->num_scal_dev); + printk(KERN_WARNING "%s: MAX_NUMNODES too low! Defined as %d, but system has %d nodes.\n", __func__, MAX_NUMNODES, rio_table_hdr->num_scal_dev); return 0; } switch (rio_table_hdr->version) { default: - pr_warn("Invalid Rio Grande Table Version: %d\n", - rio_table_hdr->version); + printk(KERN_WARNING "%s: Invalid Rio Grande Table Version: %d\n", __func__, rio_table_hdr->version); return 0; case 2: scal_detail_size = 11; @@ -457,7 +462,7 @@ void setup_summit(void) offset = *((unsigned short *)(ptr + offset)); } if (!rio_table_hdr) { - pr_err("Unable to locate Rio Grande Table in EBDA - bailing!\n"); + printk(KERN_ERR "%s: Unable to locate Rio Grande Table in EBDA - bailing!\n", __func__); return; } @@ -504,7 +509,7 @@ static struct apic apic_summit = { .check_apicid_used = summit_check_apicid_used, .check_apicid_present = summit_check_apicid_present, - .vector_allocation_domain = flat_vector_allocation_domain, + .vector_allocation_domain = summit_vector_allocation_domain, .init_apic_ldr = summit_init_apic_ldr, .ioapic_phys_id_map = summit_ioapic_phys_id_map, @@ -522,6 +527,7 @@ static struct apic apic_summit = { .set_apic_id = NULL, .apic_id_mask = 0xFF << 24, + .cpu_mask_to_apicid = summit_cpu_mask_to_apicid, .cpu_mask_to_apicid_and = summit_cpu_mask_to_apicid_and, .send_IPI_mask = summit_send_IPI_mask, diff --git a/trunk/arch/x86/kernel/apic/x2apic_cluster.c b/trunk/arch/x86/kernel/apic/x2apic_cluster.c index c88baa4ff0e5..ff35cff0e1a7 100644 --- a/trunk/arch/x86/kernel/apic/x2apic_cluster.c +++ b/trunk/arch/x86/kernel/apic/x2apic_cluster.c @@ -81,7 +81,7 @@ static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector) } static void -x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector) + x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector) { __x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLBUT); } @@ -96,37 +96,36 @@ static void x2apic_send_IPI_all(int vector) __x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLINC); } -static int -x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask, - const struct cpumask *andmask, - unsigned int *apicid) +static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask) { - u32 dest = 0; - u16 cluster; - int i; + /* + * We're using fixed IRQ delivery, can only return one logical APIC ID. + * May as well be the first. + */ + int cpu = cpumask_first(cpumask); - for_each_cpu_and(i, cpumask, andmask) { - if (!cpumask_test_cpu(i, cpu_online_mask)) - continue; - dest = per_cpu(x86_cpu_to_logical_apicid, i); - cluster = x2apic_cluster(i); - break; - } + if ((unsigned)cpu < nr_cpu_ids) + return per_cpu(x86_cpu_to_logical_apicid, cpu); + else + return BAD_APICID; +} - if (!dest) - return -EINVAL; +static unsigned int +x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask, + const struct cpumask *andmask) +{ + int cpu; - for_each_cpu_and(i, cpumask, andmask) { - if (!cpumask_test_cpu(i, cpu_online_mask)) - continue; - if (cluster != x2apic_cluster(i)) - continue; - dest |= per_cpu(x86_cpu_to_logical_apicid, i); + /* + * We're using fixed IRQ delivery, can only return one logical APIC ID. + * May as well be the first. + */ + for_each_cpu_and(cpu, cpumask, andmask) { + if (cpumask_test_cpu(cpu, cpu_online_mask)) + break; } - *apicid = dest; - - return 0; + return per_cpu(x86_cpu_to_logical_apicid, cpu); } static void init_x2apic_ldr(void) @@ -209,32 +208,6 @@ static int x2apic_cluster_probe(void) return 0; } -static const struct cpumask *x2apic_cluster_target_cpus(void) -{ - return cpu_all_mask; -} - -/* - * Each x2apic cluster is an allocation domain. - */ -static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask, - const struct cpumask *mask) -{ - /* - * To minimize vector pressure, default case of boot, device bringup - * etc will use a single cpu for the interrupt destination. - * - * On explicit migration requests coming from irqbalance etc, - * interrupts will be routed to the x2apic cluster (cluster-id - * derived from the first cpu in the mask) members specified - * in the mask. - */ - if (mask == x2apic_cluster_target_cpus()) - cpumask_copy(retmask, cpumask_of(cpu)); - else - cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu)); -} - static struct apic apic_x2apic_cluster = { .name = "cluster x2apic", @@ -246,13 +219,13 @@ static struct apic apic_x2apic_cluster = { .irq_delivery_mode = dest_LowestPrio, .irq_dest_mode = 1, /* logical */ - .target_cpus = x2apic_cluster_target_cpus, + .target_cpus = x2apic_target_cpus, .disable_esr = 0, .dest_logical = APIC_DEST_LOGICAL, .check_apicid_used = NULL, .check_apicid_present = NULL, - .vector_allocation_domain = cluster_vector_allocation_domain, + .vector_allocation_domain = x2apic_vector_allocation_domain, .init_apic_ldr = init_x2apic_ldr, .ioapic_phys_id_map = NULL, @@ -270,6 +243,7 @@ static struct apic apic_x2apic_cluster = { .set_apic_id = x2apic_set_apic_id, .apic_id_mask = 0xFFFFFFFFu, + .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and, .send_IPI_mask = x2apic_send_IPI_mask, diff --git a/trunk/arch/x86/kernel/apic/x2apic_phys.c b/trunk/arch/x86/kernel/apic/x2apic_phys.c index e03a1e180e81..c17e982db275 100644 --- a/trunk/arch/x86/kernel/apic/x2apic_phys.c +++ b/trunk/arch/x86/kernel/apic/x2apic_phys.c @@ -76,6 +76,38 @@ static void x2apic_send_IPI_all(int vector) __x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLINC); } +static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask) +{ + /* + * We're using fixed IRQ delivery, can only return one phys APIC ID. + * May as well be the first. + */ + int cpu = cpumask_first(cpumask); + + if ((unsigned)cpu < nr_cpu_ids) + return per_cpu(x86_cpu_to_apicid, cpu); + else + return BAD_APICID; +} + +static unsigned int +x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask, + const struct cpumask *andmask) +{ + int cpu; + + /* + * We're using fixed IRQ delivery, can only return one phys APIC ID. + * May as well be the first. + */ + for_each_cpu_and(cpu, cpumask, andmask) { + if (cpumask_test_cpu(cpu, cpu_online_mask)) + break; + } + + return per_cpu(x86_cpu_to_apicid, cpu); +} + static void init_x2apic_ldr(void) { } @@ -99,13 +131,13 @@ static struct apic apic_x2apic_phys = { .irq_delivery_mode = dest_Fixed, .irq_dest_mode = 0, /* physical */ - .target_cpus = online_target_cpus, + .target_cpus = x2apic_target_cpus, .disable_esr = 0, .dest_logical = 0, .check_apicid_used = NULL, .check_apicid_present = NULL, - .vector_allocation_domain = default_vector_allocation_domain, + .vector_allocation_domain = x2apic_vector_allocation_domain, .init_apic_ldr = init_x2apic_ldr, .ioapic_phys_id_map = NULL, @@ -123,7 +155,8 @@ static struct apic apic_x2apic_phys = { .set_apic_id = x2apic_set_apic_id, .apic_id_mask = 0xFFFFFFFFu, - .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and, + .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, + .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and, .send_IPI_mask = x2apic_send_IPI_mask, .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself, diff --git a/trunk/arch/x86/kernel/apic/x2apic_uv_x.c b/trunk/arch/x86/kernel/apic/x2apic_uv_x.c index 8cfade9510a4..c6d03f7a4401 100644 --- a/trunk/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/trunk/arch/x86/kernel/apic/x2apic_uv_x.c @@ -185,6 +185,17 @@ EXPORT_SYMBOL_GPL(uv_possible_blades); unsigned long sn_rtc_cycles_per_second; EXPORT_SYMBOL(sn_rtc_cycles_per_second); +static const struct cpumask *uv_target_cpus(void) +{ + return cpu_online_mask; +} + +static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask) +{ + cpumask_clear(retmask); + cpumask_set_cpu(cpu, retmask); +} + static int __cpuinit uv_wakeup_secondary(int phys_apicid, unsigned long start_rip) { #ifdef CONFIG_SMP @@ -269,12 +280,25 @@ static void uv_init_apic_ldr(void) { } -static int +static unsigned int uv_cpu_mask_to_apicid(const struct cpumask *cpumask) +{ + /* + * We're using fixed IRQ delivery, can only return one phys APIC ID. + * May as well be the first. + */ + int cpu = cpumask_first(cpumask); + + if ((unsigned)cpu < nr_cpu_ids) + return per_cpu(x86_cpu_to_apicid, cpu) | uv_apicid_hibits; + else + return BAD_APICID; +} + +static unsigned int uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask, - const struct cpumask *andmask, - unsigned int *apicid) + const struct cpumask *andmask) { - int unsigned cpu; + int cpu; /* * We're using fixed IRQ delivery, can only return one phys APIC ID. @@ -284,13 +308,7 @@ uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask, if (cpumask_test_cpu(cpu, cpu_online_mask)) break; } - - if (likely(cpu < nr_cpu_ids)) { - *apicid = per_cpu(x86_cpu_to_apicid, cpu) | uv_apicid_hibits; - return 0; - } - - return -EINVAL; + return per_cpu(x86_cpu_to_apicid, cpu) | uv_apicid_hibits; } static unsigned int x2apic_get_apic_id(unsigned long x) @@ -344,13 +362,13 @@ static struct apic __refdata apic_x2apic_uv_x = { .irq_delivery_mode = dest_Fixed, .irq_dest_mode = 0, /* physical */ - .target_cpus = online_target_cpus, + .target_cpus = uv_target_cpus, .disable_esr = 0, .dest_logical = APIC_DEST_LOGICAL, .check_apicid_used = NULL, .check_apicid_present = NULL, - .vector_allocation_domain = default_vector_allocation_domain, + .vector_allocation_domain = uv_vector_allocation_domain, .init_apic_ldr = uv_init_apic_ldr, .ioapic_phys_id_map = NULL, @@ -368,6 +386,7 @@ static struct apic __refdata apic_x2apic_uv_x = { .set_apic_id = set_apic_id, .apic_id_mask = 0xFFFFFFFFu, + .cpu_mask_to_apicid = uv_cpu_mask_to_apicid, .cpu_mask_to_apicid_and = uv_cpu_mask_to_apicid_and, .send_IPI_mask = uv_send_IPI_mask, diff --git a/trunk/arch/x86/kernel/apm_32.c b/trunk/arch/x86/kernel/apm_32.c index d65464e43503..07b0c0db466c 100644 --- a/trunk/arch/x86/kernel/apm_32.c +++ b/trunk/arch/x86/kernel/apm_32.c @@ -201,8 +201,6 @@ * http://www.microsoft.com/whdc/archive/amp_12.mspx] */ -#define pr_fmt(fmt) "apm: " fmt - #include #include @@ -487,11 +485,11 @@ static void apm_error(char *str, int err) if (error_table[i].key == err) break; if (i < ERROR_COUNT) - pr_notice("%s: %s\n", str, error_table[i].msg); + printk(KERN_NOTICE "apm: %s: %s\n", str, error_table[i].msg); else if (err < 0) - pr_notice("%s: linux error code %i\n", str, err); + printk(KERN_NOTICE "apm: %s: linux error code %i\n", str, err); else - pr_notice("%s: unknown error code %#2.2x\n", + printk(KERN_NOTICE "apm: %s: unknown error code %#2.2x\n", str, err); } @@ -1186,7 +1184,7 @@ static void queue_event(apm_event_t event, struct apm_user *sender) static int notified; if (notified++ == 0) - pr_err("an event queue overflowed\n"); + printk(KERN_ERR "apm: an event queue overflowed\n"); if (++as->event_tail >= APM_MAX_EVENTS) as->event_tail = 0; } @@ -1449,7 +1447,7 @@ static void apm_mainloop(void) static int check_apm_user(struct apm_user *as, const char *func) { if (as == NULL || as->magic != APM_BIOS_MAGIC) { - pr_err("%s passed bad filp\n", func); + printk(KERN_ERR "apm: %s passed bad filp\n", func); return 1; } return 0; @@ -1588,7 +1586,7 @@ static int do_release(struct inode *inode, struct file *filp) as1 = as1->next) ; if (as1 == NULL) - pr_err("filp not in user list\n"); + printk(KERN_ERR "apm: filp not in user list\n"); else as1->next = as->next; } @@ -1602,9 +1600,11 @@ static int do_open(struct inode *inode, struct file *filp) struct apm_user *as; as = kmalloc(sizeof(*as), GFP_KERNEL); - if (as == NULL) + if (as == NULL) { + printk(KERN_ERR "apm: cannot allocate struct of size %d bytes\n", + sizeof(*as)); return -ENOMEM; - + } as->magic = APM_BIOS_MAGIC; as->event_tail = as->event_head = 0; as->suspends_pending = as->standbys_pending = 0; @@ -2313,16 +2313,16 @@ static int __init apm_init(void) } if (apm_info.disabled) { - pr_notice("disabled on user request.\n"); + printk(KERN_NOTICE "apm: disabled on user request.\n"); return -ENODEV; } if ((num_online_cpus() > 1) && !power_off && !smp) { - pr_notice("disabled - APM is not SMP safe.\n"); + printk(KERN_NOTICE "apm: disabled - APM is not SMP safe.\n"); apm_info.disabled = 1; return -ENODEV; } if (!acpi_disabled) { - pr_notice("overridden by ACPI.\n"); + printk(KERN_NOTICE "apm: overridden by ACPI.\n"); apm_info.disabled = 1; return -ENODEV; } @@ -2356,7 +2356,8 @@ static int __init apm_init(void) kapmd_task = kthread_create(apm, NULL, "kapmd"); if (IS_ERR(kapmd_task)) { - pr_err("disabled - Unable to start kernel thread\n"); + printk(KERN_ERR "apm: disabled - Unable to start kernel " + "thread.\n"); err = PTR_ERR(kapmd_task); kapmd_task = NULL; remove_proc_entry("apm", NULL); diff --git a/trunk/arch/x86/kernel/cpu/Makefile b/trunk/arch/x86/kernel/cpu/Makefile index bac4c3804cc7..6ab6aa2fdfdd 100644 --- a/trunk/arch/x86/kernel/cpu/Makefile +++ b/trunk/arch/x86/kernel/cpu/Makefile @@ -32,9 +32,7 @@ obj-$(CONFIG_PERF_EVENTS) += perf_event.o ifdef CONFIG_PERF_EVENTS obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd.o -obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_p4.o -obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o -obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore.o +obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_p4.o perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o endif obj-$(CONFIG_X86_MCE) += mcheck/ diff --git a/trunk/arch/x86/kernel/cpu/amd.c b/trunk/arch/x86/kernel/cpu/amd.c index 9d92e19039f0..146bb6218eec 100644 --- a/trunk/arch/x86/kernel/cpu/amd.c +++ b/trunk/arch/x86/kernel/cpu/amd.c @@ -19,39 +19,6 @@ #include "cpu.h" -static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) -{ - struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); - u32 gprs[8] = { 0 }; - int err; - - WARN_ONCE((c->x86 != 0xf), "%s should only be used on K8!\n", __func__); - - gprs[1] = msr; - gprs[7] = 0x9c5a203a; - - err = rdmsr_safe_regs(gprs); - - *p = gprs[0] | ((u64)gprs[2] << 32); - - return err; -} - -static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val) -{ - struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); - u32 gprs[8] = { 0 }; - - WARN_ONCE((c->x86 != 0xf), "%s should only be used on K8!\n", __func__); - - gprs[0] = (u32)val; - gprs[1] = msr; - gprs[2] = val >> 32; - gprs[7] = 0x9c5a203a; - - return wrmsr_safe_regs(gprs); -} - #ifdef CONFIG_X86_32 /* * B step AMD K6 before B 9730xxxx have hardware bugs that can cause @@ -619,9 +586,9 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) !cpu_has(c, X86_FEATURE_TOPOEXT)) { u64 val; - if (!rdmsrl_safe(0xc0011005, &val)) { + if (!rdmsrl_amd_safe(0xc0011005, &val)) { val |= 1ULL << 54; - wrmsrl_safe(0xc0011005, val); + wrmsrl_amd_safe(0xc0011005, val); rdmsrl(0xc0011005, val); if (val & (1ULL << 54)) { set_cpu_cap(c, X86_FEATURE_TOPOEXT); @@ -712,7 +679,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) err = rdmsrl_safe(MSR_AMD64_MCx_MASK(4), &mask); if (err == 0) { mask |= (1 << 10); - wrmsrl_safe(MSR_AMD64_MCx_MASK(4), mask); + checking_wrmsrl(MSR_AMD64_MCx_MASK(4), mask); } } diff --git a/trunk/arch/x86/kernel/cpu/bugs.c b/trunk/arch/x86/kernel/cpu/bugs.c index c97bb7b5a9f8..46674fbb62ba 100644 --- a/trunk/arch/x86/kernel/cpu/bugs.c +++ b/trunk/arch/x86/kernel/cpu/bugs.c @@ -55,8 +55,8 @@ static void __init check_fpu(void) if (!boot_cpu_data.hard_math) { #ifndef CONFIG_MATH_EMULATION - pr_emerg("No coprocessor found and no math emulation present\n"); - pr_emerg("Giving up\n"); + printk(KERN_EMERG "No coprocessor found and no math emulation present.\n"); + printk(KERN_EMERG "Giving up.\n"); for (;;) ; #endif return; @@ -86,7 +86,7 @@ static void __init check_fpu(void) boot_cpu_data.fdiv_bug = fdiv_bug; if (boot_cpu_data.fdiv_bug) - pr_warn("Hmm, FPU with FDIV bug\n"); + printk(KERN_WARNING "Hmm, FPU with FDIV bug.\n"); } static void __init check_hlt(void) @@ -94,16 +94,16 @@ static void __init check_hlt(void) if (boot_cpu_data.x86 >= 5 || paravirt_enabled()) return; - pr_info("Checking 'hlt' instruction... "); + printk(KERN_INFO "Checking 'hlt' instruction... "); if (!boot_cpu_data.hlt_works_ok) { - pr_cont("disabled\n"); + printk("disabled\n"); return; } halt(); halt(); halt(); halt(); - pr_cont("OK\n"); + printk(KERN_CONT "OK.\n"); } /* @@ -116,7 +116,7 @@ static void __init check_popad(void) #ifndef CONFIG_X86_POPAD_OK int res, inp = (int) &res; - pr_info("Checking for popad bug... "); + printk(KERN_INFO "Checking for popad bug... "); __asm__ __volatile__( "movl $12345678,%%eax; movl $0,%%edi; pusha; popa; movl (%%edx,%%edi),%%ecx " : "=&a" (res) @@ -127,9 +127,9 @@ static void __init check_popad(void) * CPU hard. Too bad. */ if (res != 12345678) - pr_cont("Buggy\n"); + printk(KERN_CONT "Buggy.\n"); else - pr_cont("OK\n"); + printk(KERN_CONT "OK.\n"); #endif } @@ -161,7 +161,7 @@ void __init check_bugs(void) { identify_boot_cpu(); #ifndef CONFIG_SMP - pr_info("CPU: "); + printk(KERN_INFO "CPU: "); print_cpu_info(&boot_cpu_data); #endif check_config(); diff --git a/trunk/arch/x86/kernel/cpu/common.c b/trunk/arch/x86/kernel/cpu/common.c index 5bbc082c47ad..6b9333b429ba 100644 --- a/trunk/arch/x86/kernel/cpu/common.c +++ b/trunk/arch/x86/kernel/cpu/common.c @@ -947,7 +947,7 @@ static void __cpuinit __print_cpu_msr(void) index_max = msr_range_array[i].max; for (index = index_min; index < index_max; index++) { - if (rdmsrl_safe(index, &val)) + if (rdmsrl_amd_safe(index, &val)) continue; printk(KERN_INFO " MSR%08x: %016llx\n", index, val); } diff --git a/trunk/arch/x86/kernel/cpu/mcheck/mce.c b/trunk/arch/x86/kernel/cpu/mcheck/mce.c index 9473e8772fd1..da27c5d2168a 100644 --- a/trunk/arch/x86/kernel/cpu/mcheck/mce.c +++ b/trunk/arch/x86/kernel/cpu/mcheck/mce.c @@ -7,9 +7,6 @@ * Copyright 2008 Intel Corporation * Author: Andi Kleen */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include #include #include @@ -213,7 +210,7 @@ static void drain_mcelog_buffer(void) cpu_relax(); if (!m->finished && retries >= 4) { - pr_err("skipping error being logged currently!\n"); + pr_err("MCE: skipping error being logged currently!\n"); break; } } @@ -1170,9 +1167,8 @@ int memory_failure(unsigned long pfn, int vector, int flags) { /* mce_severity() should not hand us an ACTION_REQUIRED error */ BUG_ON(flags & MF_ACTION_REQUIRED); - pr_err("Uncorrected memory error in page 0x%lx ignored\n" - "Rebuild kernel with CONFIG_MEMORY_FAILURE=y for smarter handling\n", - pfn); + printk(KERN_ERR "Uncorrected memory error in page 0x%lx ignored\n" + "Rebuild kernel with CONFIG_MEMORY_FAILURE=y for smarter handling\n", pfn); return 0; } @@ -1190,7 +1186,6 @@ void mce_notify_process(void) { unsigned long pfn; struct mce_info *mi = mce_find_info(); - int flags = MF_ACTION_REQUIRED; if (!mi) mce_panic("Lost physical address for unconsumed uncorrectable error", NULL, NULL); @@ -1205,9 +1200,8 @@ void mce_notify_process(void) * doomed. We still need to mark the page as poisoned and alert any * other users of the page. */ - if (!mi->restartable) - flags |= MF_MUST_KILL; - if (memory_failure(pfn, MCE_VECTOR, flags) < 0) { + if (memory_failure(pfn, MCE_VECTOR, MF_ACTION_REQUIRED) < 0 || + mi->restartable == 0) { pr_err("Memory error not recovered"); force_sig(SIGBUS, current); } @@ -1364,10 +1358,11 @@ static int __cpuinit __mcheck_cpu_cap_init(void) b = cap & MCG_BANKCNT_MASK; if (!banks) - pr_info("CPU supports %d MCE banks\n", b); + printk(KERN_INFO "mce: CPU supports %d MCE banks\n", b); if (b > MAX_NR_BANKS) { - pr_warn("Using only %u machine check banks out of %u\n", + printk(KERN_WARNING + "MCE: Using only %u machine check banks out of %u\n", MAX_NR_BANKS, b); b = MAX_NR_BANKS; } @@ -1424,7 +1419,7 @@ static void __mcheck_cpu_init_generic(void) static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) { if (c->x86_vendor == X86_VENDOR_UNKNOWN) { - pr_info("unknown CPU type - not enabling MCE support\n"); + pr_info("MCE: unknown CPU type - not enabling MCE support.\n"); return -EOPNOTSUPP; } @@ -1579,7 +1574,7 @@ static void __mcheck_cpu_init_timer(void) /* Handle unconfigured int18 (should never happen) */ static void unexpected_machine_check(struct pt_regs *regs, long error_code) { - pr_err("CPU#%d: Unexpected int18 (Machine Check)\n", + printk(KERN_ERR "CPU#%d: Unexpected int18 (Machine Check).\n", smp_processor_id()); } @@ -1898,7 +1893,8 @@ static int __init mcheck_enable(char *str) get_option(&str, &monarch_timeout); } } else { - pr_info("mce argument %s ignored. Please use /sys\n", str); + printk(KERN_INFO "mce argument %s ignored. Please use /sys\n", + str); return 0; } return 1; diff --git a/trunk/arch/x86/kernel/cpu/mcheck/mce_amd.c b/trunk/arch/x86/kernel/cpu/mcheck/mce_amd.c index 671b95a2ffb5..f4873a64f46d 100644 --- a/trunk/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/trunk/arch/x86/kernel/cpu/mcheck/mce_amd.c @@ -1,17 +1,15 @@ /* - * (c) 2005-2012 Advanced Micro Devices, Inc. + * (c) 2005, 2006 Advanced Micro Devices, Inc. * Your use of this code is subject to the terms and conditions of the * GNU general public license version 2. See "COPYING" or * http://www.gnu.org/licenses/gpl.html * * Written by Jacob Shin - AMD, Inc. * - * Support: borislav.petkov@amd.com + * Support : jacob.shin@amd.com * * April 2006 * - added support for AMD Family 0x10 processors - * May 2012 - * - major scrubbing * * All MC4_MISCi registers are shared between multi-cores */ @@ -27,7 +25,6 @@ #include #include -#include #include #include #include @@ -48,15 +45,23 @@ #define MASK_BLKPTR_LO 0xFF000000 #define MCG_XBLK_ADDR 0xC0000400 -static const char * const th_names[] = { - "load_store", - "insn_fetch", - "combined_unit", - "", - "northbridge", - "execution_unit", +struct threshold_block { + unsigned int block; + unsigned int bank; + unsigned int cpu; + u32 address; + u16 interrupt_enable; + bool interrupt_capable; + u16 threshold_limit; + struct kobject kobj; + struct list_head miscj; }; +struct threshold_bank { + struct kobject *kobj; + struct threshold_block *blocks; + cpumask_var_t cpus; +}; static DEFINE_PER_CPU(struct threshold_bank * [NR_BANKS], threshold_banks); static unsigned char shared_bank[NR_BANKS] = { @@ -79,26 +84,6 @@ struct thresh_restart { u16 old_limit; }; -static const char * const bank4_names(struct threshold_block *b) -{ - switch (b->address) { - /* MSR4_MISC0 */ - case 0x00000413: - return "dram"; - - case 0xc0000408: - return "ht_links"; - - case 0xc0000409: - return "l3_cache"; - - default: - WARN(1, "Funny MSR: 0x%08x\n", b->address); - return ""; - } -}; - - static bool lvt_interrupt_supported(unsigned int bank, u32 msr_high_bits) { /* @@ -239,6 +224,8 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c) if (!block) per_cpu(bank_map, cpu) |= (1 << bank); + if (shared_bank[bank] && c->cpu_core_id) + break; memset(&b, 0, sizeof(b)); b.cpu = cpu; @@ -339,7 +326,7 @@ struct threshold_attr { #define SHOW_FIELDS(name) \ static ssize_t show_ ## name(struct threshold_block *b, char *buf) \ { \ - return sprintf(buf, "%lu\n", (unsigned long) b->name); \ + return sprintf(buf, "%lx\n", (unsigned long) b->name); \ } SHOW_FIELDS(interrupt_enable) SHOW_FIELDS(threshold_limit) @@ -390,20 +377,37 @@ store_threshold_limit(struct threshold_block *b, const char *buf, size_t size) return size; } -static ssize_t show_error_count(struct threshold_block *b, char *buf) +struct threshold_block_cross_cpu { + struct threshold_block *tb; + long retval; +}; + +static void local_error_count_handler(void *_tbcc) { - u32 lo, hi; + struct threshold_block_cross_cpu *tbcc = _tbcc; + struct threshold_block *b = tbcc->tb; + u32 low, high; - rdmsr_on_cpu(b->cpu, b->address, &lo, &hi); + rdmsr(b->address, low, high); + tbcc->retval = (high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit); +} - return sprintf(buf, "%u\n", ((hi & THRESHOLD_MAX) - - (THRESHOLD_MAX - b->threshold_limit))); +static ssize_t show_error_count(struct threshold_block *b, char *buf) +{ + struct threshold_block_cross_cpu tbcc = { .tb = b, }; + + smp_call_function_single(b->cpu, local_error_count_handler, &tbcc, 1); + return sprintf(buf, "%lx\n", tbcc.retval); } -static struct threshold_attr error_count = { - .attr = {.name = __stringify(error_count), .mode = 0444 }, - .show = show_error_count, -}; +static ssize_t store_error_count(struct threshold_block *b, + const char *buf, size_t count) +{ + struct thresh_restart tr = { .b = b, .reset = 1, .old_limit = 0 }; + + smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1); + return 1; +} #define RW_ATTR(val) \ static struct threshold_attr val = { \ @@ -414,6 +418,7 @@ static struct threshold_attr val = { \ RW_ATTR(interrupt_enable); RW_ATTR(threshold_limit); +RW_ATTR(error_count); static struct attribute *default_attrs[] = { &threshold_limit.attr, @@ -512,7 +517,7 @@ static __cpuinit int allocate_threshold_blocks(unsigned int cpu, err = kobject_init_and_add(&b->kobj, &threshold_ktype, per_cpu(threshold_banks, cpu)[bank]->kobj, - (bank == 4 ? bank4_names(b) : th_names[bank])); + "misc%i", block); if (err) goto out_free; recurse: @@ -543,91 +548,98 @@ static __cpuinit int allocate_threshold_blocks(unsigned int cpu, return err; } -static __cpuinit int __threshold_add_blocks(struct threshold_bank *b) +static __cpuinit long +local_allocate_threshold_blocks(int cpu, unsigned int bank) { - struct list_head *head = &b->blocks->miscj; - struct threshold_block *pos = NULL; - struct threshold_block *tmp = NULL; - int err = 0; - - err = kobject_add(&b->blocks->kobj, b->kobj, b->blocks->kobj.name); - if (err) - return err; - - list_for_each_entry_safe(pos, tmp, head, miscj) { - - err = kobject_add(&pos->kobj, b->kobj, pos->kobj.name); - if (err) { - list_for_each_entry_safe_reverse(pos, tmp, head, miscj) - kobject_del(&pos->kobj); - - return err; - } - } - return err; + return allocate_threshold_blocks(cpu, bank, 0, + MSR_IA32_MC0_MISC + bank * 4); } +/* symlinks sibling shared banks to first core. first core owns dir/files. */ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) { - struct device *dev = per_cpu(mce_device, cpu); - struct amd_northbridge *nb = NULL; + int i, err = 0; struct threshold_bank *b = NULL; - const char *name = th_names[bank]; - int err = 0; + struct device *dev = per_cpu(mce_device, cpu); + char name[32]; - if (shared_bank[bank]) { + sprintf(name, "threshold_bank%i", bank); - nb = node_to_amd_nb(amd_get_nb_id(cpu)); - WARN_ON(!nb); +#ifdef CONFIG_SMP + if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */ + i = cpumask_first(cpu_llc_shared_mask(cpu)); - /* threshold descriptor already initialized on this node? */ - if (nb->bank4) { - /* yes, use it */ - b = nb->bank4; - err = kobject_add(b->kobj, &dev->kobj, name); - if (err) - goto out; + /* first core not up yet */ + if (cpu_data(i).cpu_core_id) + goto out; - per_cpu(threshold_banks, cpu)[bank] = b; - atomic_inc(&b->cpus); + /* already linked */ + if (per_cpu(threshold_banks, cpu)[bank]) + goto out; - err = __threshold_add_blocks(b); + b = per_cpu(threshold_banks, i)[bank]; + if (!b) goto out; - } + + err = sysfs_create_link(&dev->kobj, b->kobj, name); + if (err) + goto out; + + cpumask_copy(b->cpus, cpu_llc_shared_mask(cpu)); + per_cpu(threshold_banks, cpu)[bank] = b; + + goto out; } +#endif b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL); if (!b) { err = -ENOMEM; goto out; } + if (!zalloc_cpumask_var(&b->cpus, GFP_KERNEL)) { + kfree(b); + err = -ENOMEM; + goto out; + } b->kobj = kobject_create_and_add(name, &dev->kobj); - if (!b->kobj) { - err = -EINVAL; + if (!b->kobj) goto out_free; - } + +#ifndef CONFIG_SMP + cpumask_setall(b->cpus); +#else + cpumask_set_cpu(cpu, b->cpus); +#endif per_cpu(threshold_banks, cpu)[bank] = b; - if (shared_bank[bank]) { - atomic_set(&b->cpus, 1); + err = local_allocate_threshold_blocks(cpu, bank); + if (err) + goto out_free; + + for_each_cpu(i, b->cpus) { + if (i == cpu) + continue; - /* nb is already initialized, see above */ - WARN_ON(nb->bank4); - nb->bank4 = b; + dev = per_cpu(mce_device, i); + if (dev) + err = sysfs_create_link(&dev->kobj,b->kobj, name); + if (err) + goto out; + + per_cpu(threshold_banks, i)[bank] = b; } - err = allocate_threshold_blocks(cpu, bank, 0, - MSR_IA32_MC0_MISC + bank * 4); - if (!err) - goto out; + goto out; - out_free: +out_free: + per_cpu(threshold_banks, cpu)[bank] = NULL; + free_cpumask_var(b->cpus); kfree(b); - - out: +out: return err; } @@ -648,6 +660,12 @@ static __cpuinit int threshold_create_device(unsigned int cpu) return err; } +/* + * let's be hotplug friendly. + * in case of multiple core processors, the first core always takes ownership + * of shared sysfs dir/files, and rest of the cores will be symlinked to it. + */ + static void deallocate_threshold_block(unsigned int cpu, unsigned int bank) { @@ -668,42 +686,41 @@ static void deallocate_threshold_block(unsigned int cpu, per_cpu(threshold_banks, cpu)[bank]->blocks = NULL; } -static void __threshold_remove_blocks(struct threshold_bank *b) -{ - struct threshold_block *pos = NULL; - struct threshold_block *tmp = NULL; - - kobject_del(b->kobj); - - list_for_each_entry_safe(pos, tmp, &b->blocks->miscj, miscj) - kobject_del(&pos->kobj); -} - static void threshold_remove_bank(unsigned int cpu, int bank) { - struct amd_northbridge *nb; struct threshold_bank *b; + struct device *dev; + char name[32]; + int i = 0; b = per_cpu(threshold_banks, cpu)[bank]; if (!b) return; - if (!b->blocks) goto free_out; - if (shared_bank[bank]) { - if (!atomic_dec_and_test(&b->cpus)) { - __threshold_remove_blocks(b); - per_cpu(threshold_banks, cpu)[bank] = NULL; - return; - } else { - /* - * the last CPU on this node using the shared bank is - * going away, remove that bank now. - */ - nb = node_to_amd_nb(amd_get_nb_id(cpu)); - nb->bank4 = NULL; - } + sprintf(name, "threshold_bank%i", bank); + +#ifdef CONFIG_SMP + /* sibling symlink */ + if (shared_bank[bank] && b->blocks->cpu != cpu) { + dev = per_cpu(mce_device, cpu); + sysfs_remove_link(&dev->kobj, name); + per_cpu(threshold_banks, cpu)[bank] = NULL; + + return; + } +#endif + + /* remove all sibling symlinks before unregistering */ + for_each_cpu(i, b->cpus) { + if (i == cpu) + continue; + + dev = per_cpu(mce_device, i); + if (dev) + sysfs_remove_link(&dev->kobj, name); + per_cpu(threshold_banks, i)[bank] = NULL; } deallocate_threshold_block(cpu, bank); @@ -711,6 +728,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank) free_out: kobject_del(b->kobj); kobject_put(b->kobj); + free_cpumask_var(b->cpus); kfree(b); per_cpu(threshold_banks, cpu)[bank] = NULL; } diff --git a/trunk/arch/x86/kernel/cpu/mtrr/cleanup.c b/trunk/arch/x86/kernel/cpu/mtrr/cleanup.c index 35ffda5d0727..bdda2e6c673b 100644 --- a/trunk/arch/x86/kernel/cpu/mtrr/cleanup.c +++ b/trunk/arch/x86/kernel/cpu/mtrr/cleanup.c @@ -258,11 +258,11 @@ range_to_mtrr(unsigned int reg, unsigned long range_startk, /* Compute the maximum size with which we can make a range: */ if (range_startk) - max_align = __ffs(range_startk); + max_align = ffs(range_startk) - 1; else - max_align = BITS_PER_LONG - 1; + max_align = 32; - align = __fls(range_sizek); + align = fls(range_sizek) - 1; if (align > max_align) align = max_align; diff --git a/trunk/arch/x86/kernel/cpu/mtrr/generic.c b/trunk/arch/x86/kernel/cpu/mtrr/generic.c index e9fe907cd249..75772ae6c65f 100644 --- a/trunk/arch/x86/kernel/cpu/mtrr/generic.c +++ b/trunk/arch/x86/kernel/cpu/mtrr/generic.c @@ -361,7 +361,11 @@ static void __init print_mtrr_state(void) } pr_debug("MTRR variable ranges %sabled:\n", mtrr_state.enabled & 2 ? "en" : "dis"); - high_width = (__ffs64(size_or_mask) - (32 - PAGE_SHIFT) + 3) / 4; + if (size_or_mask & 0xffffffffUL) + high_width = ffs(size_or_mask & 0xffffffffUL) - 1; + else + high_width = ffs(size_or_mask>>32) + 32 - 1; + high_width = (high_width - (32 - PAGE_SHIFT) + 3) / 4; for (i = 0; i < num_var_ranges; ++i) { if (mtrr_state.var_ranges[i].mask_lo & (1 << 11)) diff --git a/trunk/arch/x86/kernel/cpu/perf_event.c b/trunk/arch/x86/kernel/cpu/perf_event.c index 29557aa06dda..c4706cf9c011 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event.c +++ b/trunk/arch/x86/kernel/cpu/perf_event.c @@ -35,6 +35,17 @@ #include "perf_event.h" +#if 0 +#undef wrmsrl +#define wrmsrl(msr, val) \ +do { \ + trace_printk("wrmsrl(%lx, %lx)\n", (unsigned long)(msr),\ + (unsigned long)(val)); \ + native_write_msr((msr), (u32)((u64)(val)), \ + (u32)((u64)(val) >> 32)); \ +} while (0) +#endif + struct x86_pmu x86_pmu __read_mostly; DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { @@ -63,7 +74,7 @@ u64 x86_perf_event_update(struct perf_event *event) int idx = hwc->idx; s64 delta; - if (idx == INTEL_PMC_IDX_FIXED_BTS) + if (idx == X86_PMC_IDX_FIXED_BTS) return 0; /* @@ -75,7 +86,7 @@ u64 x86_perf_event_update(struct perf_event *event) */ again: prev_raw_count = local64_read(&hwc->prev_count); - rdpmcl(hwc->event_base_rdpmc, new_raw_count); + rdmsrl(hwc->event_base, new_raw_count); if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, new_raw_count) != prev_raw_count) @@ -178,7 +189,7 @@ static void release_pmc_hardware(void) {} static bool check_hw_exists(void) { - u64 val, val_new = ~0; + u64 val, val_new = 0; int i, reg, ret = 0; /* @@ -211,9 +222,8 @@ static bool check_hw_exists(void) * that don't trap on the MSR access and always return 0s. */ val = 0xabcdUL; - reg = x86_pmu_event_addr(0); - ret = wrmsrl_safe(reg, val); - ret |= rdmsrl_safe(reg, &val_new); + ret = checking_wrmsrl(x86_pmu_event_addr(0), val); + ret |= rdmsrl_safe(x86_pmu_event_addr(0), &val_new); if (ret || val != val_new) goto msr_fail; @@ -230,7 +240,6 @@ static bool check_hw_exists(void) msr_fail: printk(KERN_CONT "Broken PMU hardware detected, using software events only.\n"); - printk(KERN_ERR "Failed to access perfctr msr (MSR %x is %Lx)\n", reg, val_new); return false; } @@ -379,7 +388,7 @@ int x86_pmu_hw_config(struct perf_event *event) int precise = 0; /* Support for constant skid */ - if (x86_pmu.pebs_active && !x86_pmu.pebs_broken) { + if (x86_pmu.pebs_active) { precise++; /* Support for IP fixup */ @@ -628,8 +637,8 @@ static bool __perf_sched_find_counter(struct perf_sched *sched) c = sched->constraints[sched->state.event]; /* Prefer fixed purpose counters */ - if (c->idxmsk64 & (~0ULL << INTEL_PMC_IDX_FIXED)) { - idx = INTEL_PMC_IDX_FIXED; + if (x86_pmu.num_counters_fixed) { + idx = X86_PMC_IDX_FIXED; for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_MAX) { if (!__test_and_set_bit(idx, sched->state.used)) goto done; @@ -637,7 +646,7 @@ static bool __perf_sched_find_counter(struct perf_sched *sched) } /* Grab the first unused counter starting with idx */ idx = sched->state.counter; - for_each_set_bit_from(idx, c->idxmsk, INTEL_PMC_IDX_FIXED) { + for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_FIXED) { if (!__test_and_set_bit(idx, sched->state.used)) goto done; } @@ -695,8 +704,8 @@ static bool perf_sched_next_event(struct perf_sched *sched) /* * Assign a counter for each event. */ -int perf_assign_events(struct event_constraint **constraints, int n, - int wmin, int wmax, int *assign) +static int perf_assign_events(struct event_constraint **constraints, int n, + int wmin, int wmax, int *assign) { struct perf_sched sched; @@ -815,17 +824,15 @@ static inline void x86_assign_hw_event(struct perf_event *event, hwc->last_cpu = smp_processor_id(); hwc->last_tag = ++cpuc->tags[i]; - if (hwc->idx == INTEL_PMC_IDX_FIXED_BTS) { + if (hwc->idx == X86_PMC_IDX_FIXED_BTS) { hwc->config_base = 0; hwc->event_base = 0; - } else if (hwc->idx >= INTEL_PMC_IDX_FIXED) { + } else if (hwc->idx >= X86_PMC_IDX_FIXED) { hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; - hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - INTEL_PMC_IDX_FIXED); - hwc->event_base_rdpmc = (hwc->idx - INTEL_PMC_IDX_FIXED) | 1<<30; + hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - X86_PMC_IDX_FIXED); } else { hwc->config_base = x86_pmu_config_addr(hwc->idx); hwc->event_base = x86_pmu_event_addr(hwc->idx); - hwc->event_base_rdpmc = hwc->idx; } } @@ -923,7 +930,7 @@ int x86_perf_event_set_period(struct perf_event *event) s64 period = hwc->sample_period; int ret = 0, idx = hwc->idx; - if (idx == INTEL_PMC_IDX_FIXED_BTS) + if (idx == X86_PMC_IDX_FIXED_BTS) return 0; /* @@ -1309,6 +1316,7 @@ static struct attribute_group x86_pmu_format_group = { static int __init init_hw_perf_events(void) { struct x86_pmu_quirk *quirk; + struct event_constraint *c; int err; pr_info("Performance Events: "); @@ -1339,8 +1347,21 @@ static int __init init_hw_perf_events(void) for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next) quirk->func(); - if (!x86_pmu.intel_ctrl) - x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1; + if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) { + WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!", + x86_pmu.num_counters, X86_PMC_MAX_GENERIC); + x86_pmu.num_counters = X86_PMC_MAX_GENERIC; + } + x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1; + + if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) { + WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!", + x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED); + x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED; + } + + x86_pmu.intel_ctrl |= + ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED; perf_events_lapic_init(); register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI"); @@ -1349,6 +1370,22 @@ static int __init init_hw_perf_events(void) __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1, 0, x86_pmu.num_counters, 0); + if (x86_pmu.event_constraints) { + /* + * event on fixed counter2 (REF_CYCLES) only works on this + * counter, so do not extend mask to generic counters + */ + for_each_event_constraint(c, x86_pmu.event_constraints) { + if (c->cmask != X86_RAW_EVENT_MASK + || c->idxmsk64 == X86_PMC_MSK_FIXED_REF_CYCLES) { + continue; + } + + c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1; + c->weight += x86_pmu.num_counters; + } + } + x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */ x86_pmu_format_group.attrs = x86_pmu.format_attrs; @@ -1583,8 +1620,8 @@ static int x86_pmu_event_idx(struct perf_event *event) if (!x86_pmu.attr_rdpmc) return 0; - if (x86_pmu.num_counters_fixed && idx >= INTEL_PMC_IDX_FIXED) { - idx -= INTEL_PMC_IDX_FIXED; + if (x86_pmu.num_counters_fixed && idx >= X86_PMC_IDX_FIXED) { + idx -= X86_PMC_IDX_FIXED; idx |= 1 << 30; } @@ -1612,12 +1649,7 @@ static ssize_t set_attr_rdpmc(struct device *cdev, struct device_attribute *attr, const char *buf, size_t count) { - unsigned long val; - ssize_t ret; - - ret = kstrtoul(buf, 0, &val); - if (ret) - return ret; + unsigned long val = simple_strtoul(buf, NULL, 0); if (!!val != !!x86_pmu.attr_rdpmc) { x86_pmu.attr_rdpmc = !!val; @@ -1650,20 +1682,13 @@ static void x86_pmu_flush_branch_stack(void) x86_pmu.flush_branch_stack(); } -void perf_check_microcode(void) -{ - if (x86_pmu.check_microcode) - x86_pmu.check_microcode(); -} -EXPORT_SYMBOL_GPL(perf_check_microcode); - static struct pmu pmu = { .pmu_enable = x86_pmu_enable, .pmu_disable = x86_pmu_disable, - .attr_groups = x86_pmu_attr_groups, + .attr_groups = x86_pmu_attr_groups, - .event_init = x86_pmu_event_init, + .event_init = x86_pmu_event_init, .add = x86_pmu_add, .del = x86_pmu_del, @@ -1671,11 +1696,11 @@ static struct pmu pmu = { .stop = x86_pmu_stop, .read = x86_pmu_read, - .start_txn = x86_pmu_start_txn, - .cancel_txn = x86_pmu_cancel_txn, - .commit_txn = x86_pmu_commit_txn, + .start_txn = x86_pmu_start_txn, + .cancel_txn = x86_pmu_cancel_txn, + .commit_txn = x86_pmu_commit_txn, - .event_idx = x86_pmu_event_idx, + .event_idx = x86_pmu_event_idx, .flush_branch_stack = x86_pmu_flush_branch_stack, }; @@ -1838,7 +1863,7 @@ unsigned long perf_misc_flags(struct pt_regs *regs) else misc |= PERF_RECORD_MISC_GUEST_KERNEL; } else { - if (!kernel_ip(regs->ip)) + if (user_mode(regs)) misc |= PERF_RECORD_MISC_USER; else misc |= PERF_RECORD_MISC_KERNEL; diff --git a/trunk/arch/x86/kernel/cpu/perf_event.h b/trunk/arch/x86/kernel/cpu/perf_event.h index a15df4be151f..7241e2fc3c17 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event.h +++ b/trunk/arch/x86/kernel/cpu/perf_event.h @@ -14,18 +14,6 @@ #include -#if 0 -#undef wrmsrl -#define wrmsrl(msr, val) \ -do { \ - unsigned int _msr = (msr); \ - u64 _val = (val); \ - trace_printk("wrmsrl(%x, %Lx)\n", (unsigned int)(_msr), \ - (unsigned long long)(_val)); \ - native_write_msr((_msr), (u32)(_val), (u32)(_val >> 32)); \ -} while (0) -#endif - /* * | NHM/WSM | SNB | * register ------------------------------- @@ -69,7 +57,7 @@ struct amd_nb { }; /* The maximal number of PEBS events: */ -#define MAX_PEBS_EVENTS 8 +#define MAX_PEBS_EVENTS 4 /* * A debug store configuration. @@ -361,8 +349,6 @@ struct x86_pmu { void (*cpu_starting)(int cpu); void (*cpu_dying)(int cpu); void (*cpu_dead)(int cpu); - - void (*check_microcode)(void); void (*flush_branch_stack)(void); /* @@ -374,16 +360,12 @@ struct x86_pmu { /* * Intel DebugStore bits */ - int bts :1, - bts_active :1, - pebs :1, - pebs_active :1, - pebs_broken :1; + int bts, pebs; + int bts_active, pebs_active; int pebs_record_size; void (*drain_pebs)(struct pt_regs *regs); struct event_constraint *pebs_constraints; void (*pebs_aliases)(struct perf_event *event); - int max_pebs_events; /* * Intel LBR @@ -486,8 +468,6 @@ static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, void x86_pmu_enable_all(int added); -int perf_assign_events(struct event_constraint **constraints, int n, - int wmin, int wmax, int *assign); int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign); void x86_pmu_stop(struct perf_event *event, int flags); diff --git a/trunk/arch/x86/kernel/cpu/perf_event_amd.c b/trunk/arch/x86/kernel/cpu/perf_event_amd.c index 4528ae7b6ec4..11a4eb9131d5 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event_amd.c +++ b/trunk/arch/x86/kernel/cpu/perf_event_amd.c @@ -366,7 +366,7 @@ static void amd_pmu_cpu_starting(int cpu) cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY; - if (boot_cpu_data.x86_max_cores < 2) + if (boot_cpu_data.x86_max_cores < 2 || boot_cpu_data.x86 == 0x15) return; nb_id = amd_get_nb_id(cpu); @@ -422,6 +422,35 @@ static struct attribute *amd_format_attr[] = { NULL, }; +static __initconst const struct x86_pmu amd_pmu = { + .name = "AMD", + .handle_irq = x86_pmu_handle_irq, + .disable_all = x86_pmu_disable_all, + .enable_all = x86_pmu_enable_all, + .enable = x86_pmu_enable_event, + .disable = x86_pmu_disable_event, + .hw_config = amd_pmu_hw_config, + .schedule_events = x86_schedule_events, + .eventsel = MSR_K7_EVNTSEL0, + .perfctr = MSR_K7_PERFCTR0, + .event_map = amd_pmu_event_map, + .max_events = ARRAY_SIZE(amd_perfmon_event_map), + .num_counters = AMD64_NUM_COUNTERS, + .cntval_bits = 48, + .cntval_mask = (1ULL << 48) - 1, + .apic = 1, + /* use highest bit to detect overflow */ + .max_period = (1ULL << 47) - 1, + .get_event_constraints = amd_get_event_constraints, + .put_event_constraints = amd_put_event_constraints, + + .format_attrs = amd_format_attr, + + .cpu_prepare = amd_pmu_cpu_prepare, + .cpu_starting = amd_pmu_cpu_starting, + .cpu_dead = amd_pmu_cpu_dead, +}; + /* AMD Family 15h */ #define AMD_EVENT_TYPE_MASK 0x000000F0ULL @@ -568,8 +597,8 @@ amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *ev } } -static __initconst const struct x86_pmu amd_pmu = { - .name = "AMD", +static __initconst const struct x86_pmu amd_pmu_f15h = { + .name = "AMD Family 15h", .handle_irq = x86_pmu_handle_irq, .disable_all = x86_pmu_disable_all, .enable_all = x86_pmu_enable_all, @@ -577,68 +606,50 @@ static __initconst const struct x86_pmu amd_pmu = { .disable = x86_pmu_disable_event, .hw_config = amd_pmu_hw_config, .schedule_events = x86_schedule_events, - .eventsel = MSR_K7_EVNTSEL0, - .perfctr = MSR_K7_PERFCTR0, + .eventsel = MSR_F15H_PERF_CTL, + .perfctr = MSR_F15H_PERF_CTR, .event_map = amd_pmu_event_map, .max_events = ARRAY_SIZE(amd_perfmon_event_map), - .num_counters = AMD64_NUM_COUNTERS, + .num_counters = AMD64_NUM_COUNTERS_F15H, .cntval_bits = 48, .cntval_mask = (1ULL << 48) - 1, .apic = 1, /* use highest bit to detect overflow */ .max_period = (1ULL << 47) - 1, - .get_event_constraints = amd_get_event_constraints, + .get_event_constraints = amd_get_event_constraints_f15h, + /* nortbridge counters not yet implemented: */ +#if 0 .put_event_constraints = amd_put_event_constraints, - .format_attrs = amd_format_attr, - .cpu_prepare = amd_pmu_cpu_prepare, - .cpu_starting = amd_pmu_cpu_starting, .cpu_dead = amd_pmu_cpu_dead, +#endif + .cpu_starting = amd_pmu_cpu_starting, + .format_attrs = amd_format_attr, }; -static int setup_event_constraints(void) -{ - if (boot_cpu_data.x86 >= 0x15) - x86_pmu.get_event_constraints = amd_get_event_constraints_f15h; - return 0; -} - -static int setup_perfctr_core(void) -{ - if (!cpu_has_perfctr_core) { - WARN(x86_pmu.get_event_constraints == amd_get_event_constraints_f15h, - KERN_ERR "Odd, counter constraints enabled but no core perfctrs detected!"); - return -ENODEV; - } - - WARN(x86_pmu.get_event_constraints == amd_get_event_constraints, - KERN_ERR "hw perf events core counters need constraints handler!"); - - /* - * If core performance counter extensions exists, we must use - * MSR_F15H_PERF_CTL/MSR_F15H_PERF_CTR msrs. See also - * x86_pmu_addr_offset(). - */ - x86_pmu.eventsel = MSR_F15H_PERF_CTL; - x86_pmu.perfctr = MSR_F15H_PERF_CTR; - x86_pmu.num_counters = AMD64_NUM_COUNTERS_CORE; - - printk(KERN_INFO "perf: AMD core performance counters detected\n"); - - return 0; -} - __init int amd_pmu_init(void) { /* Performance-monitoring supported from K7 and later: */ if (boot_cpu_data.x86 < 6) return -ENODEV; - x86_pmu = amd_pmu; - - setup_event_constraints(); - setup_perfctr_core(); + /* + * If core performance counter extensions exists, it must be + * family 15h, otherwise fail. See x86_pmu_addr_offset(). + */ + switch (boot_cpu_data.x86) { + case 0x15: + if (!cpu_has_perfctr_core) + return -ENODEV; + x86_pmu = amd_pmu_f15h; + break; + default: + if (cpu_has_perfctr_core) + return -ENODEV; + x86_pmu = amd_pmu; + break; + } /* Events are common for all AMDs */ memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, diff --git a/trunk/arch/x86/kernel/cpu/perf_event_intel.c b/trunk/arch/x86/kernel/cpu/perf_event_intel.c index 7a8b9d0abcaa..187c294bc658 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event_intel.c +++ b/trunk/arch/x86/kernel/cpu/perf_event_intel.c @@ -5,8 +5,6 @@ * among events on a single PMU. */ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include #include #include @@ -23,14 +21,14 @@ */ static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly = { - [PERF_COUNT_HW_CPU_CYCLES] = 0x003c, - [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, - [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e, - [PERF_COUNT_HW_CACHE_MISSES] = 0x412e, - [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, - [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, - [PERF_COUNT_HW_BUS_CYCLES] = 0x013c, - [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x0300, /* pseudo-encoding */ + [PERF_COUNT_HW_CPU_CYCLES] = 0x003c, + [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, + [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e, + [PERF_COUNT_HW_CACHE_MISSES] = 0x412e, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, + [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, + [PERF_COUNT_HW_BUS_CYCLES] = 0x013c, + [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x0300, /* pseudo-encoding */ }; static struct event_constraint intel_core_event_constraints[] __read_mostly = @@ -749,7 +747,7 @@ static void intel_pmu_disable_all(void) wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); - if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) + if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) intel_pmu_disable_bts(); intel_pmu_pebs_disable_all(); @@ -765,9 +763,9 @@ static void intel_pmu_enable_all(int added) wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask); - if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) { + if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) { struct perf_event *event = - cpuc->events[INTEL_PMC_IDX_FIXED_BTS]; + cpuc->events[X86_PMC_IDX_FIXED_BTS]; if (WARN_ON_ONCE(!event)) return; @@ -873,7 +871,7 @@ static inline void intel_pmu_ack_status(u64 ack) static void intel_pmu_disable_fixed(struct hw_perf_event *hwc) { - int idx = hwc->idx - INTEL_PMC_IDX_FIXED; + int idx = hwc->idx - X86_PMC_IDX_FIXED; u64 ctrl_val, mask; mask = 0xfULL << (idx * 4); @@ -888,7 +886,7 @@ static void intel_pmu_disable_event(struct perf_event *event) struct hw_perf_event *hwc = &event->hw; struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); - if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) { + if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) { intel_pmu_disable_bts(); intel_pmu_drain_bts_buffer(); return; @@ -917,7 +915,7 @@ static void intel_pmu_disable_event(struct perf_event *event) static void intel_pmu_enable_fixed(struct hw_perf_event *hwc) { - int idx = hwc->idx - INTEL_PMC_IDX_FIXED; + int idx = hwc->idx - X86_PMC_IDX_FIXED; u64 ctrl_val, bits, mask; /* @@ -951,7 +949,7 @@ static void intel_pmu_enable_event(struct perf_event *event) struct hw_perf_event *hwc = &event->hw; struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); - if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) { + if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) { if (!__this_cpu_read(cpu_hw_events.enabled)) return; @@ -1002,14 +1000,14 @@ static void intel_pmu_reset(void) local_irq_save(flags); - pr_info("clearing PMU state on CPU#%d\n", smp_processor_id()); + printk("clearing PMU state on CPU#%d\n", smp_processor_id()); for (idx = 0; idx < x86_pmu.num_counters; idx++) { - wrmsrl_safe(x86_pmu_config_addr(idx), 0ull); - wrmsrl_safe(x86_pmu_event_addr(idx), 0ull); + checking_wrmsrl(x86_pmu_config_addr(idx), 0ull); + checking_wrmsrl(x86_pmu_event_addr(idx), 0ull); } for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) - wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull); + checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull); if (ds) ds->bts_index = ds->bts_buffer_base; @@ -1709,61 +1707,16 @@ static __init void intel_clovertown_quirk(void) * But taken together it might just make sense to not enable PEBS on * these chips. */ - pr_warn("PEBS disabled due to CPU errata\n"); + printk(KERN_WARNING "PEBS disabled due to CPU errata.\n"); x86_pmu.pebs = 0; x86_pmu.pebs_constraints = NULL; } -static int intel_snb_pebs_broken(int cpu) -{ - u32 rev = UINT_MAX; /* default to broken for unknown models */ - - switch (cpu_data(cpu).x86_model) { - case 42: /* SNB */ - rev = 0x28; - break; - - case 45: /* SNB-EP */ - switch (cpu_data(cpu).x86_mask) { - case 6: rev = 0x618; break; - case 7: rev = 0x70c; break; - } - } - - return (cpu_data(cpu).microcode < rev); -} - -static void intel_snb_check_microcode(void) -{ - int pebs_broken = 0; - int cpu; - - get_online_cpus(); - for_each_online_cpu(cpu) { - if ((pebs_broken = intel_snb_pebs_broken(cpu))) - break; - } - put_online_cpus(); - - if (pebs_broken == x86_pmu.pebs_broken) - return; - - /* - * Serialized by the microcode lock.. - */ - if (x86_pmu.pebs_broken) { - pr_info("PEBS enabled due to microcode update\n"); - x86_pmu.pebs_broken = 0; - } else { - pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n"); - x86_pmu.pebs_broken = 1; - } -} - static __init void intel_sandybridge_quirk(void) { - x86_pmu.check_microcode = intel_snb_check_microcode; - intel_snb_check_microcode(); + printk(KERN_WARNING "PEBS disabled due to CPU errata.\n"); + x86_pmu.pebs = 0; + x86_pmu.pebs_constraints = NULL; } static const struct { int id; char *name; } intel_arch_events_map[] __initconst = { @@ -1783,8 +1736,8 @@ static __init void intel_arch_events_quirk(void) /* disable event that reported as not presend by cpuid */ for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) { intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0; - pr_warn("CPUID marked event: \'%s\' unavailable\n", - intel_arch_events_map[bit].name); + printk(KERN_WARNING "CPUID marked event: \'%s\' unavailable\n", + intel_arch_events_map[bit].name); } } @@ -1803,7 +1756,7 @@ static __init void intel_nehalem_quirk(void) intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89; ebx.split.no_branch_misses_retired = 0; x86_pmu.events_maskl = ebx.full; - pr_info("CPU erratum AAJ80 worked around\n"); + printk(KERN_INFO "CPU erratum AAJ80 worked around\n"); } } @@ -1812,7 +1765,6 @@ __init int intel_pmu_init(void) union cpuid10_edx edx; union cpuid10_eax eax; union cpuid10_ebx ebx; - struct event_constraint *c; unsigned int unused; int version; @@ -1848,8 +1800,6 @@ __init int intel_pmu_init(void) x86_pmu.events_maskl = ebx.full; x86_pmu.events_mask_len = eax.split.mask_length; - x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters); - /* * Quirk: v2 perfmon does not report fixed-purpose events, so * assume at least 3 events: @@ -2001,37 +1951,5 @@ __init int intel_pmu_init(void) } } - if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) { - WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!", - x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC); - x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC; - } - x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1; - - if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) { - WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!", - x86_pmu.num_counters_fixed, INTEL_PMC_MAX_FIXED); - x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED; - } - - x86_pmu.intel_ctrl |= - ((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED; - - if (x86_pmu.event_constraints) { - /* - * event on fixed counter2 (REF_CYCLES) only works on this - * counter, so do not extend mask to generic counters - */ - for_each_event_constraint(c, x86_pmu.event_constraints) { - if (c->cmask != X86_RAW_EVENT_MASK - || c->idxmsk64 == INTEL_PMC_MSK_FIXED_REF_CYCLES) { - continue; - } - - c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1; - c->weight += x86_pmu.num_counters; - } - } - return 0; } diff --git a/trunk/arch/x86/kernel/cpu/perf_event_intel_ds.c b/trunk/arch/x86/kernel/cpu/perf_event_intel_ds.c index 629ae0b7ad90..35e2192df9f4 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/trunk/arch/x86/kernel/cpu/perf_event_intel_ds.c @@ -248,7 +248,7 @@ void reserve_ds_buffers(void) */ struct event_constraint bts_constraint = - EVENT_CONSTRAINT(0, 1ULL << INTEL_PMC_IDX_FIXED_BTS, 0); + EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0); void intel_pmu_enable_bts(u64 config) { @@ -295,7 +295,7 @@ int intel_pmu_drain_bts_buffer(void) u64 to; u64 flags; }; - struct perf_event *event = cpuc->events[INTEL_PMC_IDX_FIXED_BTS]; + struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS]; struct bts_record *at, *top; struct perf_output_handle handle; struct perf_event_header header; @@ -620,7 +620,7 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) * Should not happen, we program the threshold at 1 and do not * set a reset value. */ - WARN_ONCE(n > 1, "bad leftover pebs %d\n", n); + WARN_ON_ONCE(n > 1); at += n - 1; __intel_pmu_pebs_event(event, iregs, at); @@ -651,10 +651,10 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) * Should not happen, we program the threshold at 1 and do not * set a reset value. */ - WARN_ONCE(n > x86_pmu.max_pebs_events, "Unexpected number of pebs records %d\n", n); + WARN_ON_ONCE(n > MAX_PEBS_EVENTS); for ( ; at < top; at++) { - for_each_set_bit(bit, (unsigned long *)&at->status, x86_pmu.max_pebs_events) { + for_each_set_bit(bit, (unsigned long *)&at->status, MAX_PEBS_EVENTS) { event = cpuc->events[bit]; if (!test_bit(bit, cpuc->active_mask)) continue; @@ -670,7 +670,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) break; } - if (!event || bit >= x86_pmu.max_pebs_events) + if (!event || bit >= MAX_PEBS_EVENTS) continue; __intel_pmu_pebs_event(event, iregs, at); diff --git a/trunk/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/trunk/arch/x86/kernel/cpu/perf_event_intel_uncore.c deleted file mode 100644 index 19faffc60886..000000000000 --- a/trunk/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ /dev/null @@ -1,1850 +0,0 @@ -#include "perf_event_intel_uncore.h" - -static struct intel_uncore_type *empty_uncore[] = { NULL, }; -static struct intel_uncore_type **msr_uncores = empty_uncore; -static struct intel_uncore_type **pci_uncores = empty_uncore; -/* pci bus to socket mapping */ -static int pcibus_to_physid[256] = { [0 ... 255] = -1, }; - -static DEFINE_RAW_SPINLOCK(uncore_box_lock); - -/* mask of cpus that collect uncore events */ -static cpumask_t uncore_cpu_mask; - -/* constraint for the fixed counter */ -static struct event_constraint constraint_fixed = - EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL); -static struct event_constraint constraint_empty = - EVENT_CONSTRAINT(0, 0, 0); - -DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); -DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); -DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); -DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19"); -DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23"); -DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28"); -DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31"); -DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31"); -DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28"); -DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15"); -DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30"); -DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51"); -DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4"); -DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17"); -DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22"); -DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31"); -DEFINE_UNCORE_FORMAT_ATTR(filter_brand0, filter_brand0, "config1:0-7"); -DEFINE_UNCORE_FORMAT_ATTR(filter_brand1, filter_brand1, "config1:8-15"); -DEFINE_UNCORE_FORMAT_ATTR(filter_brand2, filter_brand2, "config1:16-23"); -DEFINE_UNCORE_FORMAT_ATTR(filter_brand3, filter_brand3, "config1:24-31"); - -/* Sandy Bridge-EP uncore support */ -static struct intel_uncore_type snbep_uncore_cbox; -static struct intel_uncore_type snbep_uncore_pcu; - -static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box) -{ - struct pci_dev *pdev = box->pci_dev; - int box_ctl = uncore_pci_box_ctl(box); - u32 config; - - pci_read_config_dword(pdev, box_ctl, &config); - config |= SNBEP_PMON_BOX_CTL_FRZ; - pci_write_config_dword(pdev, box_ctl, config); -} - -static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box) -{ - struct pci_dev *pdev = box->pci_dev; - int box_ctl = uncore_pci_box_ctl(box); - u32 config; - - pci_read_config_dword(pdev, box_ctl, &config); - config &= ~SNBEP_PMON_BOX_CTL_FRZ; - pci_write_config_dword(pdev, box_ctl, config); -} - -static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, - struct perf_event *event) -{ - struct pci_dev *pdev = box->pci_dev; - struct hw_perf_event *hwc = &event->hw; - - pci_write_config_dword(pdev, hwc->config_base, hwc->config | - SNBEP_PMON_CTL_EN); -} - -static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, - struct perf_event *event) -{ - struct pci_dev *pdev = box->pci_dev; - struct hw_perf_event *hwc = &event->hw; - - pci_write_config_dword(pdev, hwc->config_base, hwc->config); -} - -static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, - struct perf_event *event) -{ - struct pci_dev *pdev = box->pci_dev; - struct hw_perf_event *hwc = &event->hw; - u64 count; - - pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count); - pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1); - return count; -} - -static void snbep_uncore_pci_init_box(struct intel_uncore_box *box) -{ - struct pci_dev *pdev = box->pci_dev; - pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, - SNBEP_PMON_BOX_CTL_INT); -} - -static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box) -{ - u64 config; - unsigned msr; - - msr = uncore_msr_box_ctl(box); - if (msr) { - rdmsrl(msr, config); - config |= SNBEP_PMON_BOX_CTL_FRZ; - wrmsrl(msr, config); - return; - } -} - -static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box) -{ - u64 config; - unsigned msr; - - msr = uncore_msr_box_ctl(box); - if (msr) { - rdmsrl(msr, config); - config &= ~SNBEP_PMON_BOX_CTL_FRZ; - wrmsrl(msr, config); - return; - } -} - -static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, - struct perf_event *event) -{ - struct hw_perf_event *hwc = &event->hw; - struct hw_perf_event_extra *reg1 = &hwc->extra_reg; - - if (reg1->idx != EXTRA_REG_NONE) - wrmsrl(reg1->reg, reg1->config); - - wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); -} - -static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box, - struct perf_event *event) -{ - struct hw_perf_event *hwc = &event->hw; - - wrmsrl(hwc->config_base, hwc->config); -} - -static u64 snbep_uncore_msr_read_counter(struct intel_uncore_box *box, - struct perf_event *event) -{ - struct hw_perf_event *hwc = &event->hw; - u64 count; - - rdmsrl(hwc->event_base, count); - return count; -} - -static void snbep_uncore_msr_init_box(struct intel_uncore_box *box) -{ - unsigned msr = uncore_msr_box_ctl(box); - if (msr) - wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT); -} - -static struct event_constraint * -snbep_uncore_get_constraint(struct intel_uncore_box *box, - struct perf_event *event) -{ - struct intel_uncore_extra_reg *er; - struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; - unsigned long flags; - bool ok = false; - - if (reg1->idx == EXTRA_REG_NONE || (box->phys_id >= 0 && reg1->alloc)) - return NULL; - - er = &box->shared_regs[reg1->idx]; - raw_spin_lock_irqsave(&er->lock, flags); - if (!atomic_read(&er->ref) || er->config1 == reg1->config) { - atomic_inc(&er->ref); - er->config1 = reg1->config; - ok = true; - } - raw_spin_unlock_irqrestore(&er->lock, flags); - - if (ok) { - if (box->phys_id >= 0) - reg1->alloc = 1; - return NULL; - } - return &constraint_empty; -} - -static void snbep_uncore_put_constraint(struct intel_uncore_box *box, - struct perf_event *event) -{ - struct intel_uncore_extra_reg *er; - struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; - - if (box->phys_id < 0 || !reg1->alloc) - return; - - er = &box->shared_regs[reg1->idx]; - atomic_dec(&er->ref); - reg1->alloc = 0; -} - -static int snbep_uncore_hw_config(struct intel_uncore_box *box, - struct perf_event *event) -{ - struct hw_perf_event *hwc = &event->hw; - struct hw_perf_event_extra *reg1 = &hwc->extra_reg; - - if (box->pmu->type == &snbep_uncore_cbox) { - reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER + - SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx; - reg1->config = event->attr.config1 & - SNBEP_CB0_MSR_PMON_BOX_FILTER_MASK; - } else if (box->pmu->type == &snbep_uncore_pcu) { - reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER; - reg1->config = event->attr.config1 & - SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK; - } else { - return 0; - } - reg1->idx = 0; - return 0; -} - -static struct attribute *snbep_uncore_formats_attr[] = { - &format_attr_event.attr, - &format_attr_umask.attr, - &format_attr_edge.attr, - &format_attr_inv.attr, - &format_attr_thresh8.attr, - NULL, -}; - -static struct attribute *snbep_uncore_ubox_formats_attr[] = { - &format_attr_event.attr, - &format_attr_umask.attr, - &format_attr_edge.attr, - &format_attr_inv.attr, - &format_attr_thresh5.attr, - NULL, -}; - -static struct attribute *snbep_uncore_cbox_formats_attr[] = { - &format_attr_event.attr, - &format_attr_umask.attr, - &format_attr_edge.attr, - &format_attr_tid_en.attr, - &format_attr_inv.attr, - &format_attr_thresh8.attr, - &format_attr_filter_tid.attr, - &format_attr_filter_nid.attr, - &format_attr_filter_state.attr, - &format_attr_filter_opc.attr, - NULL, -}; - -static struct attribute *snbep_uncore_pcu_formats_attr[] = { - &format_attr_event.attr, - &format_attr_occ_sel.attr, - &format_attr_edge.attr, - &format_attr_inv.attr, - &format_attr_thresh5.attr, - &format_attr_occ_invert.attr, - &format_attr_occ_edge.attr, - &format_attr_filter_brand0.attr, - &format_attr_filter_brand1.attr, - &format_attr_filter_brand2.attr, - &format_attr_filter_brand3.attr, - NULL, -}; - -static struct uncore_event_desc snbep_uncore_imc_events[] = { - INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"), - INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"), - INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"), - { /* end: all zeroes */ }, -}; - -static struct uncore_event_desc snbep_uncore_qpi_events[] = { - INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"), - INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"), - INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x02,umask=0x08"), - INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x03,umask=0x04"), - { /* end: all zeroes */ }, -}; - -static struct attribute_group snbep_uncore_format_group = { - .name = "format", - .attrs = snbep_uncore_formats_attr, -}; - -static struct attribute_group snbep_uncore_ubox_format_group = { - .name = "format", - .attrs = snbep_uncore_ubox_formats_attr, -}; - -static struct attribute_group snbep_uncore_cbox_format_group = { - .name = "format", - .attrs = snbep_uncore_cbox_formats_attr, -}; - -static struct attribute_group snbep_uncore_pcu_format_group = { - .name = "format", - .attrs = snbep_uncore_pcu_formats_attr, -}; - -static struct intel_uncore_ops snbep_uncore_msr_ops = { - .init_box = snbep_uncore_msr_init_box, - .disable_box = snbep_uncore_msr_disable_box, - .enable_box = snbep_uncore_msr_enable_box, - .disable_event = snbep_uncore_msr_disable_event, - .enable_event = snbep_uncore_msr_enable_event, - .read_counter = snbep_uncore_msr_read_counter, - .get_constraint = snbep_uncore_get_constraint, - .put_constraint = snbep_uncore_put_constraint, - .hw_config = snbep_uncore_hw_config, -}; - -static struct intel_uncore_ops snbep_uncore_pci_ops = { - .init_box = snbep_uncore_pci_init_box, - .disable_box = snbep_uncore_pci_disable_box, - .enable_box = snbep_uncore_pci_enable_box, - .disable_event = snbep_uncore_pci_disable_event, - .enable_event = snbep_uncore_pci_enable_event, - .read_counter = snbep_uncore_pci_read_counter, -}; - -static struct event_constraint snbep_uncore_cbox_constraints[] = { - UNCORE_EVENT_CONSTRAINT(0x01, 0x1), - UNCORE_EVENT_CONSTRAINT(0x02, 0x3), - UNCORE_EVENT_CONSTRAINT(0x04, 0x3), - UNCORE_EVENT_CONSTRAINT(0x05, 0x3), - UNCORE_EVENT_CONSTRAINT(0x07, 0x3), - UNCORE_EVENT_CONSTRAINT(0x11, 0x1), - UNCORE_EVENT_CONSTRAINT(0x12, 0x3), - UNCORE_EVENT_CONSTRAINT(0x13, 0x3), - UNCORE_EVENT_CONSTRAINT(0x1b, 0xc), - UNCORE_EVENT_CONSTRAINT(0x1c, 0xc), - UNCORE_EVENT_CONSTRAINT(0x1d, 0xc), - UNCORE_EVENT_CONSTRAINT(0x1e, 0xc), - EVENT_CONSTRAINT_OVERLAP(0x1f, 0xe, 0xff), - UNCORE_EVENT_CONSTRAINT(0x21, 0x3), - UNCORE_EVENT_CONSTRAINT(0x23, 0x3), - UNCORE_EVENT_CONSTRAINT(0x31, 0x3), - UNCORE_EVENT_CONSTRAINT(0x32, 0x3), - UNCORE_EVENT_CONSTRAINT(0x33, 0x3), - UNCORE_EVENT_CONSTRAINT(0x34, 0x3), - UNCORE_EVENT_CONSTRAINT(0x35, 0x3), - UNCORE_EVENT_CONSTRAINT(0x36, 0x1), - UNCORE_EVENT_CONSTRAINT(0x37, 0x3), - UNCORE_EVENT_CONSTRAINT(0x38, 0x3), - UNCORE_EVENT_CONSTRAINT(0x39, 0x3), - UNCORE_EVENT_CONSTRAINT(0x3b, 0x1), - EVENT_CONSTRAINT_END -}; - -static struct event_constraint snbep_uncore_r2pcie_constraints[] = { - UNCORE_EVENT_CONSTRAINT(0x10, 0x3), - UNCORE_EVENT_CONSTRAINT(0x11, 0x3), - UNCORE_EVENT_CONSTRAINT(0x12, 0x1), - UNCORE_EVENT_CONSTRAINT(0x23, 0x3), - UNCORE_EVENT_CONSTRAINT(0x24, 0x3), - UNCORE_EVENT_CONSTRAINT(0x25, 0x3), - UNCORE_EVENT_CONSTRAINT(0x26, 0x3), - UNCORE_EVENT_CONSTRAINT(0x32, 0x3), - UNCORE_EVENT_CONSTRAINT(0x33, 0x3), - UNCORE_EVENT_CONSTRAINT(0x34, 0x3), - EVENT_CONSTRAINT_END -}; - -static struct event_constraint snbep_uncore_r3qpi_constraints[] = { - UNCORE_EVENT_CONSTRAINT(0x10, 0x3), - UNCORE_EVENT_CONSTRAINT(0x11, 0x3), - UNCORE_EVENT_CONSTRAINT(0x12, 0x3), - UNCORE_EVENT_CONSTRAINT(0x13, 0x1), - UNCORE_EVENT_CONSTRAINT(0x20, 0x3), - UNCORE_EVENT_CONSTRAINT(0x21, 0x3), - UNCORE_EVENT_CONSTRAINT(0x22, 0x3), - UNCORE_EVENT_CONSTRAINT(0x23, 0x3), - UNCORE_EVENT_CONSTRAINT(0x24, 0x3), - UNCORE_EVENT_CONSTRAINT(0x25, 0x3), - UNCORE_EVENT_CONSTRAINT(0x26, 0x3), - UNCORE_EVENT_CONSTRAINT(0x30, 0x3), - UNCORE_EVENT_CONSTRAINT(0x31, 0x3), - UNCORE_EVENT_CONSTRAINT(0x32, 0x3), - UNCORE_EVENT_CONSTRAINT(0x33, 0x3), - UNCORE_EVENT_CONSTRAINT(0x34, 0x3), - UNCORE_EVENT_CONSTRAINT(0x36, 0x3), - UNCORE_EVENT_CONSTRAINT(0x37, 0x3), - EVENT_CONSTRAINT_END -}; - -static struct intel_uncore_type snbep_uncore_ubox = { - .name = "ubox", - .num_counters = 2, - .num_boxes = 1, - .perf_ctr_bits = 44, - .fixed_ctr_bits = 48, - .perf_ctr = SNBEP_U_MSR_PMON_CTR0, - .event_ctl = SNBEP_U_MSR_PMON_CTL0, - .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK, - .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR, - .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL, - .ops = &snbep_uncore_msr_ops, - .format_group = &snbep_uncore_ubox_format_group, -}; - -static struct intel_uncore_type snbep_uncore_cbox = { - .name = "cbox", - .num_counters = 4, - .num_boxes = 8, - .perf_ctr_bits = 44, - .event_ctl = SNBEP_C0_MSR_PMON_CTL0, - .perf_ctr = SNBEP_C0_MSR_PMON_CTR0, - .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK, - .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL, - .msr_offset = SNBEP_CBO_MSR_OFFSET, - .num_shared_regs = 1, - .constraints = snbep_uncore_cbox_constraints, - .ops = &snbep_uncore_msr_ops, - .format_group = &snbep_uncore_cbox_format_group, -}; - -static struct intel_uncore_type snbep_uncore_pcu = { - .name = "pcu", - .num_counters = 4, - .num_boxes = 1, - .perf_ctr_bits = 48, - .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0, - .event_ctl = SNBEP_PCU_MSR_PMON_CTL0, - .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK, - .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL, - .num_shared_regs = 1, - .ops = &snbep_uncore_msr_ops, - .format_group = &snbep_uncore_pcu_format_group, -}; - -static struct intel_uncore_type *snbep_msr_uncores[] = { - &snbep_uncore_ubox, - &snbep_uncore_cbox, - &snbep_uncore_pcu, - NULL, -}; - -#define SNBEP_UNCORE_PCI_COMMON_INIT() \ - .perf_ctr = SNBEP_PCI_PMON_CTR0, \ - .event_ctl = SNBEP_PCI_PMON_CTL0, \ - .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \ - .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \ - .ops = &snbep_uncore_pci_ops, \ - .format_group = &snbep_uncore_format_group - -static struct intel_uncore_type snbep_uncore_ha = { - .name = "ha", - .num_counters = 4, - .num_boxes = 1, - .perf_ctr_bits = 48, - SNBEP_UNCORE_PCI_COMMON_INIT(), -}; - -static struct intel_uncore_type snbep_uncore_imc = { - .name = "imc", - .num_counters = 4, - .num_boxes = 4, - .perf_ctr_bits = 48, - .fixed_ctr_bits = 48, - .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR, - .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL, - .event_descs = snbep_uncore_imc_events, - SNBEP_UNCORE_PCI_COMMON_INIT(), -}; - -static struct intel_uncore_type snbep_uncore_qpi = { - .name = "qpi", - .num_counters = 4, - .num_boxes = 2, - .perf_ctr_bits = 48, - .event_descs = snbep_uncore_qpi_events, - SNBEP_UNCORE_PCI_COMMON_INIT(), -}; - - -static struct intel_uncore_type snbep_uncore_r2pcie = { - .name = "r2pcie", - .num_counters = 4, - .num_boxes = 1, - .perf_ctr_bits = 44, - .constraints = snbep_uncore_r2pcie_constraints, - SNBEP_UNCORE_PCI_COMMON_INIT(), -}; - -static struct intel_uncore_type snbep_uncore_r3qpi = { - .name = "r3qpi", - .num_counters = 3, - .num_boxes = 2, - .perf_ctr_bits = 44, - .constraints = snbep_uncore_r3qpi_constraints, - SNBEP_UNCORE_PCI_COMMON_INIT(), -}; - -static struct intel_uncore_type *snbep_pci_uncores[] = { - &snbep_uncore_ha, - &snbep_uncore_imc, - &snbep_uncore_qpi, - &snbep_uncore_r2pcie, - &snbep_uncore_r3qpi, - NULL, -}; - -static DEFINE_PCI_DEVICE_TABLE(snbep_uncore_pci_ids) = { - { /* Home Agent */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA), - .driver_data = (unsigned long)&snbep_uncore_ha, - }, - { /* MC Channel 0 */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0), - .driver_data = (unsigned long)&snbep_uncore_imc, - }, - { /* MC Channel 1 */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1), - .driver_data = (unsigned long)&snbep_uncore_imc, - }, - { /* MC Channel 2 */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2), - .driver_data = (unsigned long)&snbep_uncore_imc, - }, - { /* MC Channel 3 */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3), - .driver_data = (unsigned long)&snbep_uncore_imc, - }, - { /* QPI Port 0 */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0), - .driver_data = (unsigned long)&snbep_uncore_qpi, - }, - { /* QPI Port 1 */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1), - .driver_data = (unsigned long)&snbep_uncore_qpi, - }, - { /* P2PCIe */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE), - .driver_data = (unsigned long)&snbep_uncore_r2pcie, - }, - { /* R3QPI Link 0 */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0), - .driver_data = (unsigned long)&snbep_uncore_r3qpi, - }, - { /* R3QPI Link 1 */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1), - .driver_data = (unsigned long)&snbep_uncore_r3qpi, - }, - { /* end: all zeroes */ } -}; - -static struct pci_driver snbep_uncore_pci_driver = { - .name = "snbep_uncore", - .id_table = snbep_uncore_pci_ids, -}; - -/* - * build pci bus to socket mapping - */ -static void snbep_pci2phy_map_init(void) -{ - struct pci_dev *ubox_dev = NULL; - int i, bus, nodeid; - u32 config; - - while (1) { - /* find the UBOX device */ - ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, - PCI_DEVICE_ID_INTEL_JAKETOWN_UBOX, - ubox_dev); - if (!ubox_dev) - break; - bus = ubox_dev->bus->number; - /* get the Node ID of the local register */ - pci_read_config_dword(ubox_dev, 0x40, &config); - nodeid = config; - /* get the Node ID mapping */ - pci_read_config_dword(ubox_dev, 0x54, &config); - /* - * every three bits in the Node ID mapping register maps - * to a particular node. - */ - for (i = 0; i < 8; i++) { - if (nodeid == ((config >> (3 * i)) & 0x7)) { - pcibus_to_physid[bus] = i; - break; - } - } - }; - return; -} -/* end of Sandy Bridge-EP uncore support */ - - -/* Sandy Bridge uncore support */ -static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, - struct perf_event *event) -{ - struct hw_perf_event *hwc = &event->hw; - - if (hwc->idx < UNCORE_PMC_IDX_FIXED) - wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN); - else - wrmsrl(hwc->config_base, SNB_UNC_CTL_EN); -} - -static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, - struct perf_event *event) -{ - wrmsrl(event->hw.config_base, 0); -} - -static u64 snb_uncore_msr_read_counter(struct intel_uncore_box *box, - struct perf_event *event) -{ - u64 count; - rdmsrl(event->hw.event_base, count); - return count; -} - -static void snb_uncore_msr_init_box(struct intel_uncore_box *box) -{ - if (box->pmu->pmu_idx == 0) { - wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, - SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL); - } -} - -static struct attribute *snb_uncore_formats_attr[] = { - &format_attr_event.attr, - &format_attr_umask.attr, - &format_attr_edge.attr, - &format_attr_inv.attr, - &format_attr_cmask5.attr, - NULL, -}; - -static struct attribute_group snb_uncore_format_group = { - .name = "format", - .attrs = snb_uncore_formats_attr, -}; - -static struct intel_uncore_ops snb_uncore_msr_ops = { - .init_box = snb_uncore_msr_init_box, - .disable_event = snb_uncore_msr_disable_event, - .enable_event = snb_uncore_msr_enable_event, - .read_counter = snb_uncore_msr_read_counter, -}; - -static struct event_constraint snb_uncore_cbox_constraints[] = { - UNCORE_EVENT_CONSTRAINT(0x80, 0x1), - UNCORE_EVENT_CONSTRAINT(0x83, 0x1), - EVENT_CONSTRAINT_END -}; - -static struct intel_uncore_type snb_uncore_cbox = { - .name = "cbox", - .num_counters = 2, - .num_boxes = 4, - .perf_ctr_bits = 44, - .fixed_ctr_bits = 48, - .perf_ctr = SNB_UNC_CBO_0_PER_CTR0, - .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0, - .fixed_ctr = SNB_UNC_FIXED_CTR, - .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL, - .single_fixed = 1, - .event_mask = SNB_UNC_RAW_EVENT_MASK, - .msr_offset = SNB_UNC_CBO_MSR_OFFSET, - .constraints = snb_uncore_cbox_constraints, - .ops = &snb_uncore_msr_ops, - .format_group = &snb_uncore_format_group, -}; - -static struct intel_uncore_type *snb_msr_uncores[] = { - &snb_uncore_cbox, - NULL, -}; -/* end of Sandy Bridge uncore support */ - -/* Nehalem uncore support */ -static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box) -{ - wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0); -} - -static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box) -{ - wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, - NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC); -} - -static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, - struct perf_event *event) -{ - struct hw_perf_event *hwc = &event->hw; - - if (hwc->idx < UNCORE_PMC_IDX_FIXED) - wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN); - else - wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN); -} - -static struct attribute *nhm_uncore_formats_attr[] = { - &format_attr_event.attr, - &format_attr_umask.attr, - &format_attr_edge.attr, - &format_attr_inv.attr, - &format_attr_cmask8.attr, - NULL, -}; - -static struct attribute_group nhm_uncore_format_group = { - .name = "format", - .attrs = nhm_uncore_formats_attr, -}; - -static struct uncore_event_desc nhm_uncore_events[] = { - INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"), - INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any, "event=0x2f,umask=0x0f"), - INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any, "event=0x2c,umask=0x0f"), - INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads, "event=0x20,umask=0x01"), - INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes, "event=0x20,umask=0x02"), - INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads, "event=0x20,umask=0x04"), - INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"), - INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads, "event=0x20,umask=0x10"), - INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes, "event=0x20,umask=0x20"), - { /* end: all zeroes */ }, -}; - -static struct intel_uncore_ops nhm_uncore_msr_ops = { - .disable_box = nhm_uncore_msr_disable_box, - .enable_box = nhm_uncore_msr_enable_box, - .disable_event = snb_uncore_msr_disable_event, - .enable_event = nhm_uncore_msr_enable_event, - .read_counter = snb_uncore_msr_read_counter, -}; - -static struct intel_uncore_type nhm_uncore = { - .name = "", - .num_counters = 8, - .num_boxes = 1, - .perf_ctr_bits = 48, - .fixed_ctr_bits = 48, - .event_ctl = NHM_UNC_PERFEVTSEL0, - .perf_ctr = NHM_UNC_UNCORE_PMC0, - .fixed_ctr = NHM_UNC_FIXED_CTR, - .fixed_ctl = NHM_UNC_FIXED_CTR_CTRL, - .event_mask = NHM_UNC_RAW_EVENT_MASK, - .event_descs = nhm_uncore_events, - .ops = &nhm_uncore_msr_ops, - .format_group = &nhm_uncore_format_group, -}; - -static struct intel_uncore_type *nhm_msr_uncores[] = { - &nhm_uncore, - NULL, -}; -/* end of Nehalem uncore support */ - -static void uncore_assign_hw_event(struct intel_uncore_box *box, - struct perf_event *event, int idx) -{ - struct hw_perf_event *hwc = &event->hw; - - hwc->idx = idx; - hwc->last_tag = ++box->tags[idx]; - - if (hwc->idx == UNCORE_PMC_IDX_FIXED) { - hwc->event_base = uncore_fixed_ctr(box); - hwc->config_base = uncore_fixed_ctl(box); - return; - } - - hwc->config_base = uncore_event_ctl(box, hwc->idx); - hwc->event_base = uncore_perf_ctr(box, hwc->idx); -} - -static void uncore_perf_event_update(struct intel_uncore_box *box, - struct perf_event *event) -{ - u64 prev_count, new_count, delta; - int shift; - - if (event->hw.idx >= UNCORE_PMC_IDX_FIXED) - shift = 64 - uncore_fixed_ctr_bits(box); - else - shift = 64 - uncore_perf_ctr_bits(box); - - /* the hrtimer might modify the previous event value */ -again: - prev_count = local64_read(&event->hw.prev_count); - new_count = uncore_read_counter(box, event); - if (local64_xchg(&event->hw.prev_count, new_count) != prev_count) - goto again; - - delta = (new_count << shift) - (prev_count << shift); - delta >>= shift; - - local64_add(delta, &event->count); -} - -/* - * The overflow interrupt is unavailable for SandyBridge-EP, is broken - * for SandyBridge. So we use hrtimer to periodically poll the counter - * to avoid overflow. - */ -static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer) -{ - struct intel_uncore_box *box; - unsigned long flags; - int bit; - - box = container_of(hrtimer, struct intel_uncore_box, hrtimer); - if (!box->n_active || box->cpu != smp_processor_id()) - return HRTIMER_NORESTART; - /* - * disable local interrupt to prevent uncore_pmu_event_start/stop - * to interrupt the update process - */ - local_irq_save(flags); - - for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX) - uncore_perf_event_update(box, box->events[bit]); - - local_irq_restore(flags); - - hrtimer_forward_now(hrtimer, ns_to_ktime(UNCORE_PMU_HRTIMER_INTERVAL)); - return HRTIMER_RESTART; -} - -static void uncore_pmu_start_hrtimer(struct intel_uncore_box *box) -{ - __hrtimer_start_range_ns(&box->hrtimer, - ns_to_ktime(UNCORE_PMU_HRTIMER_INTERVAL), 0, - HRTIMER_MODE_REL_PINNED, 0); -} - -static void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box) -{ - hrtimer_cancel(&box->hrtimer); -} - -static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box) -{ - hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - box->hrtimer.function = uncore_pmu_hrtimer; -} - -struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, - int cpu) -{ - struct intel_uncore_box *box; - int i, size; - - size = sizeof(*box) + type->num_shared_regs * - sizeof(struct intel_uncore_extra_reg); - - box = kmalloc_node(size, GFP_KERNEL | __GFP_ZERO, cpu_to_node(cpu)); - if (!box) - return NULL; - - for (i = 0; i < type->num_shared_regs; i++) - raw_spin_lock_init(&box->shared_regs[i].lock); - - uncore_pmu_init_hrtimer(box); - atomic_set(&box->refcnt, 1); - box->cpu = -1; - box->phys_id = -1; - - return box; -} - -static struct intel_uncore_box * -uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu) -{ - static struct intel_uncore_box *box; - - box = *per_cpu_ptr(pmu->box, cpu); - if (box) - return box; - - raw_spin_lock(&uncore_box_lock); - list_for_each_entry(box, &pmu->box_list, list) { - if (box->phys_id == topology_physical_package_id(cpu)) { - atomic_inc(&box->refcnt); - *per_cpu_ptr(pmu->box, cpu) = box; - break; - } - } - raw_spin_unlock(&uncore_box_lock); - - return *per_cpu_ptr(pmu->box, cpu); -} - -static struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event) -{ - return container_of(event->pmu, struct intel_uncore_pmu, pmu); -} - -static struct intel_uncore_box *uncore_event_to_box(struct perf_event *event) -{ - /* - * perf core schedules event on the basis of cpu, uncore events are - * collected by one of the cpus inside a physical package. - */ - return uncore_pmu_to_box(uncore_event_to_pmu(event), - smp_processor_id()); -} - -static int uncore_collect_events(struct intel_uncore_box *box, - struct perf_event *leader, bool dogrp) -{ - struct perf_event *event; - int n, max_count; - - max_count = box->pmu->type->num_counters; - if (box->pmu->type->fixed_ctl) - max_count++; - - if (box->n_events >= max_count) - return -EINVAL; - - n = box->n_events; - box->event_list[n] = leader; - n++; - if (!dogrp) - return n; - - list_for_each_entry(event, &leader->sibling_list, group_entry) { - if (event->state <= PERF_EVENT_STATE_OFF) - continue; - - if (n >= max_count) - return -EINVAL; - - box->event_list[n] = event; - n++; - } - return n; -} - -static struct event_constraint * -uncore_get_event_constraint(struct intel_uncore_box *box, - struct perf_event *event) -{ - struct intel_uncore_type *type = box->pmu->type; - struct event_constraint *c; - - if (type->ops->get_constraint) { - c = type->ops->get_constraint(box, event); - if (c) - return c; - } - - if (event->hw.config == ~0ULL) - return &constraint_fixed; - - if (type->constraints) { - for_each_event_constraint(c, type->constraints) { - if ((event->hw.config & c->cmask) == c->code) - return c; - } - } - - return &type->unconstrainted; -} - -static void uncore_put_event_constraint(struct intel_uncore_box *box, - struct perf_event *event) -{ - if (box->pmu->type->ops->put_constraint) - box->pmu->type->ops->put_constraint(box, event); -} - -static int uncore_assign_events(struct intel_uncore_box *box, - int assign[], int n) -{ - unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)]; - struct event_constraint *c, *constraints[UNCORE_PMC_IDX_MAX]; - int i, wmin, wmax, ret = 0; - struct hw_perf_event *hwc; - - bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX); - - for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) { - c = uncore_get_event_constraint(box, box->event_list[i]); - constraints[i] = c; - wmin = min(wmin, c->weight); - wmax = max(wmax, c->weight); - } - - /* fastpath, try to reuse previous register */ - for (i = 0; i < n; i++) { - hwc = &box->event_list[i]->hw; - c = constraints[i]; - - /* never assigned */ - if (hwc->idx == -1) - break; - - /* constraint still honored */ - if (!test_bit(hwc->idx, c->idxmsk)) - break; - - /* not already used */ - if (test_bit(hwc->idx, used_mask)) - break; - - __set_bit(hwc->idx, used_mask); - if (assign) - assign[i] = hwc->idx; - } - /* slow path */ - if (i != n) - ret = perf_assign_events(constraints, n, wmin, wmax, assign); - - if (!assign || ret) { - for (i = 0; i < n; i++) - uncore_put_event_constraint(box, box->event_list[i]); - } - return ret ? -EINVAL : 0; -} - -static void uncore_pmu_event_start(struct perf_event *event, int flags) -{ - struct intel_uncore_box *box = uncore_event_to_box(event); - int idx = event->hw.idx; - - if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) - return; - - if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX)) - return; - - event->hw.state = 0; - box->events[idx] = event; - box->n_active++; - __set_bit(idx, box->active_mask); - - local64_set(&event->hw.prev_count, uncore_read_counter(box, event)); - uncore_enable_event(box, event); - - if (box->n_active == 1) { - uncore_enable_box(box); - uncore_pmu_start_hrtimer(box); - } -} - -static void uncore_pmu_event_stop(struct perf_event *event, int flags) -{ - struct intel_uncore_box *box = uncore_event_to_box(event); - struct hw_perf_event *hwc = &event->hw; - - if (__test_and_clear_bit(hwc->idx, box->active_mask)) { - uncore_disable_event(box, event); - box->n_active--; - box->events[hwc->idx] = NULL; - WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); - hwc->state |= PERF_HES_STOPPED; - - if (box->n_active == 0) { - uncore_disable_box(box); - uncore_pmu_cancel_hrtimer(box); - } - } - - if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { - /* - * Drain the remaining delta count out of a event - * that we are disabling: - */ - uncore_perf_event_update(box, event); - hwc->state |= PERF_HES_UPTODATE; - } -} - -static int uncore_pmu_event_add(struct perf_event *event, int flags) -{ - struct intel_uncore_box *box = uncore_event_to_box(event); - struct hw_perf_event *hwc = &event->hw; - int assign[UNCORE_PMC_IDX_MAX]; - int i, n, ret; - - if (!box) - return -ENODEV; - - ret = n = uncore_collect_events(box, event, false); - if (ret < 0) - return ret; - - hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; - if (!(flags & PERF_EF_START)) - hwc->state |= PERF_HES_ARCH; - - ret = uncore_assign_events(box, assign, n); - if (ret) - return ret; - - /* save events moving to new counters */ - for (i = 0; i < box->n_events; i++) { - event = box->event_list[i]; - hwc = &event->hw; - - if (hwc->idx == assign[i] && - hwc->last_tag == box->tags[assign[i]]) - continue; - /* - * Ensure we don't accidentally enable a stopped - * counter simply because we rescheduled. - */ - if (hwc->state & PERF_HES_STOPPED) - hwc->state |= PERF_HES_ARCH; - - uncore_pmu_event_stop(event, PERF_EF_UPDATE); - } - - /* reprogram moved events into new counters */ - for (i = 0; i < n; i++) { - event = box->event_list[i]; - hwc = &event->hw; - - if (hwc->idx != assign[i] || - hwc->last_tag != box->tags[assign[i]]) - uncore_assign_hw_event(box, event, assign[i]); - else if (i < box->n_events) - continue; - - if (hwc->state & PERF_HES_ARCH) - continue; - - uncore_pmu_event_start(event, 0); - } - box->n_events = n; - - return 0; -} - -static void uncore_pmu_event_del(struct perf_event *event, int flags) -{ - struct intel_uncore_box *box = uncore_event_to_box(event); - int i; - - uncore_pmu_event_stop(event, PERF_EF_UPDATE); - - for (i = 0; i < box->n_events; i++) { - if (event == box->event_list[i]) { - uncore_put_event_constraint(box, event); - - while (++i < box->n_events) - box->event_list[i - 1] = box->event_list[i]; - - --box->n_events; - break; - } - } - - event->hw.idx = -1; - event->hw.last_tag = ~0ULL; -} - -static void uncore_pmu_event_read(struct perf_event *event) -{ - struct intel_uncore_box *box = uncore_event_to_box(event); - uncore_perf_event_update(box, event); -} - -/* - * validation ensures the group can be loaded onto the - * PMU if it was the only group available. - */ -static int uncore_validate_group(struct intel_uncore_pmu *pmu, - struct perf_event *event) -{ - struct perf_event *leader = event->group_leader; - struct intel_uncore_box *fake_box; - int ret = -EINVAL, n; - - fake_box = uncore_alloc_box(pmu->type, smp_processor_id()); - if (!fake_box) - return -ENOMEM; - - fake_box->pmu = pmu; - /* - * the event is not yet connected with its - * siblings therefore we must first collect - * existing siblings, then add the new event - * before we can simulate the scheduling - */ - n = uncore_collect_events(fake_box, leader, true); - if (n < 0) - goto out; - - fake_box->n_events = n; - n = uncore_collect_events(fake_box, event, false); - if (n < 0) - goto out; - - fake_box->n_events = n; - - ret = uncore_assign_events(fake_box, NULL, n); -out: - kfree(fake_box); - return ret; -} - -int uncore_pmu_event_init(struct perf_event *event) -{ - struct intel_uncore_pmu *pmu; - struct intel_uncore_box *box; - struct hw_perf_event *hwc = &event->hw; - int ret; - - if (event->attr.type != event->pmu->type) - return -ENOENT; - - pmu = uncore_event_to_pmu(event); - /* no device found for this pmu */ - if (pmu->func_id < 0) - return -ENOENT; - - /* - * Uncore PMU does measure at all privilege level all the time. - * So it doesn't make sense to specify any exclude bits. - */ - if (event->attr.exclude_user || event->attr.exclude_kernel || - event->attr.exclude_hv || event->attr.exclude_idle) - return -EINVAL; - - /* Sampling not supported yet */ - if (hwc->sample_period) - return -EINVAL; - - /* - * Place all uncore events for a particular physical package - * onto a single cpu - */ - if (event->cpu < 0) - return -EINVAL; - box = uncore_pmu_to_box(pmu, event->cpu); - if (!box || box->cpu < 0) - return -EINVAL; - event->cpu = box->cpu; - - event->hw.idx = -1; - event->hw.last_tag = ~0ULL; - event->hw.extra_reg.idx = EXTRA_REG_NONE; - - if (event->attr.config == UNCORE_FIXED_EVENT) { - /* no fixed counter */ - if (!pmu->type->fixed_ctl) - return -EINVAL; - /* - * if there is only one fixed counter, only the first pmu - * can access the fixed counter - */ - if (pmu->type->single_fixed && pmu->pmu_idx > 0) - return -EINVAL; - hwc->config = ~0ULL; - } else { - hwc->config = event->attr.config & pmu->type->event_mask; - if (pmu->type->ops->hw_config) { - ret = pmu->type->ops->hw_config(box, event); - if (ret) - return ret; - } - } - - if (event->group_leader != event) - ret = uncore_validate_group(pmu, event); - else - ret = 0; - - return ret; -} - -static int __init uncore_pmu_register(struct intel_uncore_pmu *pmu) -{ - int ret; - - pmu->pmu = (struct pmu) { - .attr_groups = pmu->type->attr_groups, - .task_ctx_nr = perf_invalid_context, - .event_init = uncore_pmu_event_init, - .add = uncore_pmu_event_add, - .del = uncore_pmu_event_del, - .start = uncore_pmu_event_start, - .stop = uncore_pmu_event_stop, - .read = uncore_pmu_event_read, - }; - - if (pmu->type->num_boxes == 1) { - if (strlen(pmu->type->name) > 0) - sprintf(pmu->name, "uncore_%s", pmu->type->name); - else - sprintf(pmu->name, "uncore"); - } else { - sprintf(pmu->name, "uncore_%s_%d", pmu->type->name, - pmu->pmu_idx); - } - - ret = perf_pmu_register(&pmu->pmu, pmu->name, -1); - return ret; -} - -static void __init uncore_type_exit(struct intel_uncore_type *type) -{ - int i; - - for (i = 0; i < type->num_boxes; i++) - free_percpu(type->pmus[i].box); - kfree(type->pmus); - type->pmus = NULL; - kfree(type->attr_groups[1]); - type->attr_groups[1] = NULL; -} - -static void uncore_types_exit(struct intel_uncore_type **types) -{ - int i; - for (i = 0; types[i]; i++) - uncore_type_exit(types[i]); -} - -static int __init uncore_type_init(struct intel_uncore_type *type) -{ - struct intel_uncore_pmu *pmus; - struct attribute_group *events_group; - struct attribute **attrs; - int i, j; - - pmus = kzalloc(sizeof(*pmus) * type->num_boxes, GFP_KERNEL); - if (!pmus) - return -ENOMEM; - - type->unconstrainted = (struct event_constraint) - __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1, - 0, type->num_counters, 0); - - for (i = 0; i < type->num_boxes; i++) { - pmus[i].func_id = -1; - pmus[i].pmu_idx = i; - pmus[i].type = type; - INIT_LIST_HEAD(&pmus[i].box_list); - pmus[i].box = alloc_percpu(struct intel_uncore_box *); - if (!pmus[i].box) - goto fail; - } - - if (type->event_descs) { - i = 0; - while (type->event_descs[i].attr.attr.name) - i++; - - events_group = kzalloc(sizeof(struct attribute *) * (i + 1) + - sizeof(*events_group), GFP_KERNEL); - if (!events_group) - goto fail; - - attrs = (struct attribute **)(events_group + 1); - events_group->name = "events"; - events_group->attrs = attrs; - - for (j = 0; j < i; j++) - attrs[j] = &type->event_descs[j].attr.attr; - - type->attr_groups[1] = events_group; - } - - type->pmus = pmus; - return 0; -fail: - uncore_type_exit(type); - return -ENOMEM; -} - -static int __init uncore_types_init(struct intel_uncore_type **types) -{ - int i, ret; - - for (i = 0; types[i]; i++) { - ret = uncore_type_init(types[i]); - if (ret) - goto fail; - } - return 0; -fail: - while (--i >= 0) - uncore_type_exit(types[i]); - return ret; -} - -static struct pci_driver *uncore_pci_driver; -static bool pcidrv_registered; - -/* - * add a pci uncore device - */ -static int __devinit uncore_pci_add(struct intel_uncore_type *type, - struct pci_dev *pdev) -{ - struct intel_uncore_pmu *pmu; - struct intel_uncore_box *box; - int i, phys_id; - - phys_id = pcibus_to_physid[pdev->bus->number]; - if (phys_id < 0) - return -ENODEV; - - box = uncore_alloc_box(type, 0); - if (!box) - return -ENOMEM; - - /* - * for performance monitoring unit with multiple boxes, - * each box has a different function id. - */ - for (i = 0; i < type->num_boxes; i++) { - pmu = &type->pmus[i]; - if (pmu->func_id == pdev->devfn) - break; - if (pmu->func_id < 0) { - pmu->func_id = pdev->devfn; - break; - } - pmu = NULL; - } - - if (!pmu) { - kfree(box); - return -EINVAL; - } - - box->phys_id = phys_id; - box->pci_dev = pdev; - box->pmu = pmu; - uncore_box_init(box); - pci_set_drvdata(pdev, box); - - raw_spin_lock(&uncore_box_lock); - list_add_tail(&box->list, &pmu->box_list); - raw_spin_unlock(&uncore_box_lock); - - return 0; -} - -static void uncore_pci_remove(struct pci_dev *pdev) -{ - struct intel_uncore_box *box = pci_get_drvdata(pdev); - struct intel_uncore_pmu *pmu = box->pmu; - int cpu, phys_id = pcibus_to_physid[pdev->bus->number]; - - if (WARN_ON_ONCE(phys_id != box->phys_id)) - return; - - raw_spin_lock(&uncore_box_lock); - list_del(&box->list); - raw_spin_unlock(&uncore_box_lock); - - for_each_possible_cpu(cpu) { - if (*per_cpu_ptr(pmu->box, cpu) == box) { - *per_cpu_ptr(pmu->box, cpu) = NULL; - atomic_dec(&box->refcnt); - } - } - - WARN_ON_ONCE(atomic_read(&box->refcnt) != 1); - kfree(box); -} - -static int __devinit uncore_pci_probe(struct pci_dev *pdev, - const struct pci_device_id *id) -{ - struct intel_uncore_type *type; - - type = (struct intel_uncore_type *)id->driver_data; - return uncore_pci_add(type, pdev); -} - -static int __init uncore_pci_init(void) -{ - int ret; - - switch (boot_cpu_data.x86_model) { - case 45: /* Sandy Bridge-EP */ - pci_uncores = snbep_pci_uncores; - uncore_pci_driver = &snbep_uncore_pci_driver; - snbep_pci2phy_map_init(); - break; - default: - return 0; - } - - ret = uncore_types_init(pci_uncores); - if (ret) - return ret; - - uncore_pci_driver->probe = uncore_pci_probe; - uncore_pci_driver->remove = uncore_pci_remove; - - ret = pci_register_driver(uncore_pci_driver); - if (ret == 0) - pcidrv_registered = true; - else - uncore_types_exit(pci_uncores); - - return ret; -} - -static void __init uncore_pci_exit(void) -{ - if (pcidrv_registered) { - pcidrv_registered = false; - pci_unregister_driver(uncore_pci_driver); - uncore_types_exit(pci_uncores); - } -} - -static void __cpuinit uncore_cpu_dying(int cpu) -{ - struct intel_uncore_type *type; - struct intel_uncore_pmu *pmu; - struct intel_uncore_box *box; - int i, j; - - for (i = 0; msr_uncores[i]; i++) { - type = msr_uncores[i]; - for (j = 0; j < type->num_boxes; j++) { - pmu = &type->pmus[j]; - box = *per_cpu_ptr(pmu->box, cpu); - *per_cpu_ptr(pmu->box, cpu) = NULL; - if (box && atomic_dec_and_test(&box->refcnt)) - kfree(box); - } - } -} - -static int __cpuinit uncore_cpu_starting(int cpu) -{ - struct intel_uncore_type *type; - struct intel_uncore_pmu *pmu; - struct intel_uncore_box *box, *exist; - int i, j, k, phys_id; - - phys_id = topology_physical_package_id(cpu); - - for (i = 0; msr_uncores[i]; i++) { - type = msr_uncores[i]; - for (j = 0; j < type->num_boxes; j++) { - pmu = &type->pmus[j]; - box = *per_cpu_ptr(pmu->box, cpu); - /* called by uncore_cpu_init? */ - if (box && box->phys_id >= 0) { - uncore_box_init(box); - continue; - } - - for_each_online_cpu(k) { - exist = *per_cpu_ptr(pmu->box, k); - if (exist && exist->phys_id == phys_id) { - atomic_inc(&exist->refcnt); - *per_cpu_ptr(pmu->box, cpu) = exist; - kfree(box); - box = NULL; - break; - } - } - - if (box) { - box->phys_id = phys_id; - uncore_box_init(box); - } - } - } - return 0; -} - -static int __cpuinit uncore_cpu_prepare(int cpu, int phys_id) -{ - struct intel_uncore_type *type; - struct intel_uncore_pmu *pmu; - struct intel_uncore_box *box; - int i, j; - - for (i = 0; msr_uncores[i]; i++) { - type = msr_uncores[i]; - for (j = 0; j < type->num_boxes; j++) { - pmu = &type->pmus[j]; - if (pmu->func_id < 0) - pmu->func_id = j; - - box = uncore_alloc_box(type, cpu); - if (!box) - return -ENOMEM; - - box->pmu = pmu; - box->phys_id = phys_id; - *per_cpu_ptr(pmu->box, cpu) = box; - } - } - return 0; -} - -static void __cpuinit uncore_change_context(struct intel_uncore_type **uncores, - int old_cpu, int new_cpu) -{ - struct intel_uncore_type *type; - struct intel_uncore_pmu *pmu; - struct intel_uncore_box *box; - int i, j; - - for (i = 0; uncores[i]; i++) { - type = uncores[i]; - for (j = 0; j < type->num_boxes; j++) { - pmu = &type->pmus[j]; - if (old_cpu < 0) - box = uncore_pmu_to_box(pmu, new_cpu); - else - box = uncore_pmu_to_box(pmu, old_cpu); - if (!box) - continue; - - if (old_cpu < 0) { - WARN_ON_ONCE(box->cpu != -1); - box->cpu = new_cpu; - continue; - } - - WARN_ON_ONCE(box->cpu != old_cpu); - if (new_cpu >= 0) { - uncore_pmu_cancel_hrtimer(box); - perf_pmu_migrate_context(&pmu->pmu, - old_cpu, new_cpu); - box->cpu = new_cpu; - } else { - box->cpu = -1; - } - } - } -} - -static void __cpuinit uncore_event_exit_cpu(int cpu) -{ - int i, phys_id, target; - - /* if exiting cpu is used for collecting uncore events */ - if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask)) - return; - - /* find a new cpu to collect uncore events */ - phys_id = topology_physical_package_id(cpu); - target = -1; - for_each_online_cpu(i) { - if (i == cpu) - continue; - if (phys_id == topology_physical_package_id(i)) { - target = i; - break; - } - } - - /* migrate uncore events to the new cpu */ - if (target >= 0) - cpumask_set_cpu(target, &uncore_cpu_mask); - - uncore_change_context(msr_uncores, cpu, target); - uncore_change_context(pci_uncores, cpu, target); -} - -static void __cpuinit uncore_event_init_cpu(int cpu) -{ - int i, phys_id; - - phys_id = topology_physical_package_id(cpu); - for_each_cpu(i, &uncore_cpu_mask) { - if (phys_id == topology_physical_package_id(i)) - return; - } - - cpumask_set_cpu(cpu, &uncore_cpu_mask); - - uncore_change_context(msr_uncores, -1, cpu); - uncore_change_context(pci_uncores, -1, cpu); -} - -static int __cpuinit uncore_cpu_notifier(struct notifier_block *self, - unsigned long action, void *hcpu) -{ - unsigned int cpu = (long)hcpu; - - /* allocate/free data structure for uncore box */ - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_UP_PREPARE: - uncore_cpu_prepare(cpu, -1); - break; - case CPU_STARTING: - uncore_cpu_starting(cpu); - break; - case CPU_UP_CANCELED: - case CPU_DYING: - uncore_cpu_dying(cpu); - break; - default: - break; - } - - /* select the cpu that collects uncore events */ - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_DOWN_FAILED: - case CPU_STARTING: - uncore_event_init_cpu(cpu); - break; - case CPU_DOWN_PREPARE: - uncore_event_exit_cpu(cpu); - break; - default: - break; - } - - return NOTIFY_OK; -} - -static struct notifier_block uncore_cpu_nb __cpuinitdata = { - .notifier_call = uncore_cpu_notifier, - /* - * to migrate uncore events, our notifier should be executed - * before perf core's notifier. - */ - .priority = CPU_PRI_PERF + 1, -}; - -static void __init uncore_cpu_setup(void *dummy) -{ - uncore_cpu_starting(smp_processor_id()); -} - -static int __init uncore_cpu_init(void) -{ - int ret, cpu, max_cores; - - max_cores = boot_cpu_data.x86_max_cores; - switch (boot_cpu_data.x86_model) { - case 26: /* Nehalem */ - case 30: - case 37: /* Westmere */ - case 44: - msr_uncores = nhm_msr_uncores; - break; - case 42: /* Sandy Bridge */ - if (snb_uncore_cbox.num_boxes > max_cores) - snb_uncore_cbox.num_boxes = max_cores; - msr_uncores = snb_msr_uncores; - break; - case 45: /* Sandy Birdge-EP */ - if (snbep_uncore_cbox.num_boxes > max_cores) - snbep_uncore_cbox.num_boxes = max_cores; - msr_uncores = snbep_msr_uncores; - break; - default: - return 0; - } - - ret = uncore_types_init(msr_uncores); - if (ret) - return ret; - - get_online_cpus(); - - for_each_online_cpu(cpu) { - int i, phys_id = topology_physical_package_id(cpu); - - for_each_cpu(i, &uncore_cpu_mask) { - if (phys_id == topology_physical_package_id(i)) { - phys_id = -1; - break; - } - } - if (phys_id < 0) - continue; - - uncore_cpu_prepare(cpu, phys_id); - uncore_event_init_cpu(cpu); - } - on_each_cpu(uncore_cpu_setup, NULL, 1); - - register_cpu_notifier(&uncore_cpu_nb); - - put_online_cpus(); - - return 0; -} - -static int __init uncore_pmus_register(void) -{ - struct intel_uncore_pmu *pmu; - struct intel_uncore_type *type; - int i, j; - - for (i = 0; msr_uncores[i]; i++) { - type = msr_uncores[i]; - for (j = 0; j < type->num_boxes; j++) { - pmu = &type->pmus[j]; - uncore_pmu_register(pmu); - } - } - - for (i = 0; pci_uncores[i]; i++) { - type = pci_uncores[i]; - for (j = 0; j < type->num_boxes; j++) { - pmu = &type->pmus[j]; - uncore_pmu_register(pmu); - } - } - - return 0; -} - -static int __init intel_uncore_init(void) -{ - int ret; - - if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) - return -ENODEV; - - ret = uncore_pci_init(); - if (ret) - goto fail; - ret = uncore_cpu_init(); - if (ret) { - uncore_pci_exit(); - goto fail; - } - - uncore_pmus_register(); - return 0; -fail: - return ret; -} -device_initcall(intel_uncore_init); diff --git a/trunk/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/trunk/arch/x86/kernel/cpu/perf_event_intel_uncore.h deleted file mode 100644 index b13e9ea81def..000000000000 --- a/trunk/arch/x86/kernel/cpu/perf_event_intel_uncore.h +++ /dev/null @@ -1,424 +0,0 @@ -#include -#include -#include -#include -#include "perf_event.h" - -#define UNCORE_PMU_NAME_LEN 32 -#define UNCORE_BOX_HASH_SIZE 8 - -#define UNCORE_PMU_HRTIMER_INTERVAL (60 * NSEC_PER_SEC) - -#define UNCORE_FIXED_EVENT 0xff -#define UNCORE_PMC_IDX_MAX_GENERIC 8 -#define UNCORE_PMC_IDX_FIXED UNCORE_PMC_IDX_MAX_GENERIC -#define UNCORE_PMC_IDX_MAX (UNCORE_PMC_IDX_FIXED + 1) - -#define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff) - -/* SNB event control */ -#define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff -#define SNB_UNC_CTL_UMASK_MASK 0x0000ff00 -#define SNB_UNC_CTL_EDGE_DET (1 << 18) -#define SNB_UNC_CTL_EN (1 << 22) -#define SNB_UNC_CTL_INVERT (1 << 23) -#define SNB_UNC_CTL_CMASK_MASK 0x1f000000 -#define NHM_UNC_CTL_CMASK_MASK 0xff000000 -#define NHM_UNC_FIXED_CTR_CTL_EN (1 << 0) - -#define SNB_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \ - SNB_UNC_CTL_UMASK_MASK | \ - SNB_UNC_CTL_EDGE_DET | \ - SNB_UNC_CTL_INVERT | \ - SNB_UNC_CTL_CMASK_MASK) - -#define NHM_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \ - SNB_UNC_CTL_UMASK_MASK | \ - SNB_UNC_CTL_EDGE_DET | \ - SNB_UNC_CTL_INVERT | \ - NHM_UNC_CTL_CMASK_MASK) - -/* SNB global control register */ -#define SNB_UNC_PERF_GLOBAL_CTL 0x391 -#define SNB_UNC_FIXED_CTR_CTRL 0x394 -#define SNB_UNC_FIXED_CTR 0x395 - -/* SNB uncore global control */ -#define SNB_UNC_GLOBAL_CTL_CORE_ALL ((1 << 4) - 1) -#define SNB_UNC_GLOBAL_CTL_EN (1 << 29) - -/* SNB Cbo register */ -#define SNB_UNC_CBO_0_PERFEVTSEL0 0x700 -#define SNB_UNC_CBO_0_PER_CTR0 0x706 -#define SNB_UNC_CBO_MSR_OFFSET 0x10 - -/* NHM global control register */ -#define NHM_UNC_PERF_GLOBAL_CTL 0x391 -#define NHM_UNC_FIXED_CTR 0x394 -#define NHM_UNC_FIXED_CTR_CTRL 0x395 - -/* NHM uncore global control */ -#define NHM_UNC_GLOBAL_CTL_EN_PC_ALL ((1ULL << 8) - 1) -#define NHM_UNC_GLOBAL_CTL_EN_FC (1ULL << 32) - -/* NHM uncore register */ -#define NHM_UNC_PERFEVTSEL0 0x3c0 -#define NHM_UNC_UNCORE_PMC0 0x3b0 - -/* SNB-EP Box level control */ -#define SNBEP_PMON_BOX_CTL_RST_CTRL (1 << 0) -#define SNBEP_PMON_BOX_CTL_RST_CTRS (1 << 1) -#define SNBEP_PMON_BOX_CTL_FRZ (1 << 8) -#define SNBEP_PMON_BOX_CTL_FRZ_EN (1 << 16) -#define SNBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \ - SNBEP_PMON_BOX_CTL_RST_CTRS | \ - SNBEP_PMON_BOX_CTL_FRZ_EN) -/* SNB-EP event control */ -#define SNBEP_PMON_CTL_EV_SEL_MASK 0x000000ff -#define SNBEP_PMON_CTL_UMASK_MASK 0x0000ff00 -#define SNBEP_PMON_CTL_RST (1 << 17) -#define SNBEP_PMON_CTL_EDGE_DET (1 << 18) -#define SNBEP_PMON_CTL_EV_SEL_EXT (1 << 21) /* only for QPI */ -#define SNBEP_PMON_CTL_EN (1 << 22) -#define SNBEP_PMON_CTL_INVERT (1 << 23) -#define SNBEP_PMON_CTL_TRESH_MASK 0xff000000 -#define SNBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \ - SNBEP_PMON_CTL_UMASK_MASK | \ - SNBEP_PMON_CTL_EDGE_DET | \ - SNBEP_PMON_CTL_INVERT | \ - SNBEP_PMON_CTL_TRESH_MASK) - -/* SNB-EP Ubox event control */ -#define SNBEP_U_MSR_PMON_CTL_TRESH_MASK 0x1f000000 -#define SNBEP_U_MSR_PMON_RAW_EVENT_MASK \ - (SNBEP_PMON_CTL_EV_SEL_MASK | \ - SNBEP_PMON_CTL_UMASK_MASK | \ - SNBEP_PMON_CTL_EDGE_DET | \ - SNBEP_PMON_CTL_INVERT | \ - SNBEP_U_MSR_PMON_CTL_TRESH_MASK) - -#define SNBEP_CBO_PMON_CTL_TID_EN (1 << 19) -#define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \ - SNBEP_CBO_PMON_CTL_TID_EN) - -/* SNB-EP PCU event control */ -#define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK 0x0000c000 -#define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK 0x1f000000 -#define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT (1 << 30) -#define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET (1 << 31) -#define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK \ - (SNBEP_PMON_CTL_EV_SEL_MASK | \ - SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \ - SNBEP_PMON_CTL_EDGE_DET | \ - SNBEP_PMON_CTL_INVERT | \ - SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \ - SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \ - SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET) - -/* SNB-EP pci control register */ -#define SNBEP_PCI_PMON_BOX_CTL 0xf4 -#define SNBEP_PCI_PMON_CTL0 0xd8 -/* SNB-EP pci counter register */ -#define SNBEP_PCI_PMON_CTR0 0xa0 - -/* SNB-EP home agent register */ -#define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0 0x40 -#define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1 0x44 -#define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH 0x48 -/* SNB-EP memory controller register */ -#define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL 0xf0 -#define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR 0xd0 -/* SNB-EP QPI register */ -#define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0 0x228 -#define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1 0x22c -#define SNBEP_Q_Py_PCI_PMON_PKT_MASK0 0x238 -#define SNBEP_Q_Py_PCI_PMON_PKT_MASK1 0x23c - -/* SNB-EP Ubox register */ -#define SNBEP_U_MSR_PMON_CTR0 0xc16 -#define SNBEP_U_MSR_PMON_CTL0 0xc10 - -#define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL 0xc08 -#define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR 0xc09 - -/* SNB-EP Cbo register */ -#define SNBEP_C0_MSR_PMON_CTR0 0xd16 -#define SNBEP_C0_MSR_PMON_CTL0 0xd10 -#define SNBEP_C0_MSR_PMON_BOX_CTL 0xd04 -#define SNBEP_C0_MSR_PMON_BOX_FILTER 0xd14 -#define SNBEP_CB0_MSR_PMON_BOX_FILTER_MASK 0xfffffc1f -#define SNBEP_CBO_MSR_OFFSET 0x20 - -/* SNB-EP PCU register */ -#define SNBEP_PCU_MSR_PMON_CTR0 0xc36 -#define SNBEP_PCU_MSR_PMON_CTL0 0xc30 -#define SNBEP_PCU_MSR_PMON_BOX_CTL 0xc24 -#define SNBEP_PCU_MSR_PMON_BOX_FILTER 0xc34 -#define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK 0xffffffff -#define SNBEP_PCU_MSR_CORE_C3_CTR 0x3fc -#define SNBEP_PCU_MSR_CORE_C6_CTR 0x3fd - -struct intel_uncore_ops; -struct intel_uncore_pmu; -struct intel_uncore_box; -struct uncore_event_desc; - -struct intel_uncore_type { - const char *name; - int num_counters; - int num_boxes; - int perf_ctr_bits; - int fixed_ctr_bits; - unsigned perf_ctr; - unsigned event_ctl; - unsigned event_mask; - unsigned fixed_ctr; - unsigned fixed_ctl; - unsigned box_ctl; - unsigned msr_offset; - unsigned num_shared_regs:8; - unsigned single_fixed:1; - struct event_constraint unconstrainted; - struct event_constraint *constraints; - struct intel_uncore_pmu *pmus; - struct intel_uncore_ops *ops; - struct uncore_event_desc *event_descs; - const struct attribute_group *attr_groups[3]; -}; - -#define format_group attr_groups[0] - -struct intel_uncore_ops { - void (*init_box)(struct intel_uncore_box *); - void (*disable_box)(struct intel_uncore_box *); - void (*enable_box)(struct intel_uncore_box *); - void (*disable_event)(struct intel_uncore_box *, struct perf_event *); - void (*enable_event)(struct intel_uncore_box *, struct perf_event *); - u64 (*read_counter)(struct intel_uncore_box *, struct perf_event *); - int (*hw_config)(struct intel_uncore_box *, struct perf_event *); - struct event_constraint *(*get_constraint)(struct intel_uncore_box *, - struct perf_event *); - void (*put_constraint)(struct intel_uncore_box *, struct perf_event *); -}; - -struct intel_uncore_pmu { - struct pmu pmu; - char name[UNCORE_PMU_NAME_LEN]; - int pmu_idx; - int func_id; - struct intel_uncore_type *type; - struct intel_uncore_box ** __percpu box; - struct list_head box_list; -}; - -struct intel_uncore_extra_reg { - raw_spinlock_t lock; - u64 config1; - atomic_t ref; -}; - -struct intel_uncore_box { - int phys_id; - int n_active; /* number of active events */ - int n_events; - int cpu; /* cpu to collect events */ - unsigned long flags; - atomic_t refcnt; - struct perf_event *events[UNCORE_PMC_IDX_MAX]; - struct perf_event *event_list[UNCORE_PMC_IDX_MAX]; - unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)]; - u64 tags[UNCORE_PMC_IDX_MAX]; - struct pci_dev *pci_dev; - struct intel_uncore_pmu *pmu; - struct hrtimer hrtimer; - struct list_head list; - struct intel_uncore_extra_reg shared_regs[0]; -}; - -#define UNCORE_BOX_FLAG_INITIATED 0 - -struct uncore_event_desc { - struct kobj_attribute attr; - const char *config; -}; - -#define INTEL_UNCORE_EVENT_DESC(_name, _config) \ -{ \ - .attr = __ATTR(_name, 0444, uncore_event_show, NULL), \ - .config = _config, \ -} - -#define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \ -static ssize_t __uncore_##_var##_show(struct kobject *kobj, \ - struct kobj_attribute *attr, \ - char *page) \ -{ \ - BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ - return sprintf(page, _format "\n"); \ -} \ -static struct kobj_attribute format_attr_##_var = \ - __ATTR(_name, 0444, __uncore_##_var##_show, NULL) - - -static ssize_t uncore_event_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf) -{ - struct uncore_event_desc *event = - container_of(attr, struct uncore_event_desc, attr); - return sprintf(buf, "%s", event->config); -} - -static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box) -{ - return box->pmu->type->box_ctl; -} - -static inline unsigned uncore_pci_fixed_ctl(struct intel_uncore_box *box) -{ - return box->pmu->type->fixed_ctl; -} - -static inline unsigned uncore_pci_fixed_ctr(struct intel_uncore_box *box) -{ - return box->pmu->type->fixed_ctr; -} - -static inline -unsigned uncore_pci_event_ctl(struct intel_uncore_box *box, int idx) -{ - return idx * 4 + box->pmu->type->event_ctl; -} - -static inline -unsigned uncore_pci_perf_ctr(struct intel_uncore_box *box, int idx) -{ - return idx * 8 + box->pmu->type->perf_ctr; -} - -static inline -unsigned uncore_msr_box_ctl(struct intel_uncore_box *box) -{ - if (!box->pmu->type->box_ctl) - return 0; - return box->pmu->type->box_ctl + - box->pmu->type->msr_offset * box->pmu->pmu_idx; -} - -static inline -unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box) -{ - if (!box->pmu->type->fixed_ctl) - return 0; - return box->pmu->type->fixed_ctl + - box->pmu->type->msr_offset * box->pmu->pmu_idx; -} - -static inline -unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box) -{ - return box->pmu->type->fixed_ctr + - box->pmu->type->msr_offset * box->pmu->pmu_idx; -} - -static inline -unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx) -{ - return idx + box->pmu->type->event_ctl + - box->pmu->type->msr_offset * box->pmu->pmu_idx; -} - -static inline -unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx) -{ - return idx + box->pmu->type->perf_ctr + - box->pmu->type->msr_offset * box->pmu->pmu_idx; -} - -static inline -unsigned uncore_fixed_ctl(struct intel_uncore_box *box) -{ - if (box->pci_dev) - return uncore_pci_fixed_ctl(box); - else - return uncore_msr_fixed_ctl(box); -} - -static inline -unsigned uncore_fixed_ctr(struct intel_uncore_box *box) -{ - if (box->pci_dev) - return uncore_pci_fixed_ctr(box); - else - return uncore_msr_fixed_ctr(box); -} - -static inline -unsigned uncore_event_ctl(struct intel_uncore_box *box, int idx) -{ - if (box->pci_dev) - return uncore_pci_event_ctl(box, idx); - else - return uncore_msr_event_ctl(box, idx); -} - -static inline -unsigned uncore_perf_ctr(struct intel_uncore_box *box, int idx) -{ - if (box->pci_dev) - return uncore_pci_perf_ctr(box, idx); - else - return uncore_msr_perf_ctr(box, idx); -} - -static inline int uncore_perf_ctr_bits(struct intel_uncore_box *box) -{ - return box->pmu->type->perf_ctr_bits; -} - -static inline int uncore_fixed_ctr_bits(struct intel_uncore_box *box) -{ - return box->pmu->type->fixed_ctr_bits; -} - -static inline int uncore_num_counters(struct intel_uncore_box *box) -{ - return box->pmu->type->num_counters; -} - -static inline void uncore_disable_box(struct intel_uncore_box *box) -{ - if (box->pmu->type->ops->disable_box) - box->pmu->type->ops->disable_box(box); -} - -static inline void uncore_enable_box(struct intel_uncore_box *box) -{ - if (box->pmu->type->ops->enable_box) - box->pmu->type->ops->enable_box(box); -} - -static inline void uncore_disable_event(struct intel_uncore_box *box, - struct perf_event *event) -{ - box->pmu->type->ops->disable_event(box, event); -} - -static inline void uncore_enable_event(struct intel_uncore_box *box, - struct perf_event *event) -{ - box->pmu->type->ops->enable_event(box, event); -} - -static inline u64 uncore_read_counter(struct intel_uncore_box *box, - struct perf_event *event) -{ - return box->pmu->type->ops->read_counter(box, event); -} - -static inline void uncore_box_init(struct intel_uncore_box *box) -{ - if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) { - if (box->pmu->type->ops->init_box) - box->pmu->type->ops->init_box(box); - } -} diff --git a/trunk/arch/x86/kernel/cpu/perf_event_p4.c b/trunk/arch/x86/kernel/cpu/perf_event_p4.c index 92c7e39a079f..47124a73dd73 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event_p4.c +++ b/trunk/arch/x86/kernel/cpu/perf_event_p4.c @@ -895,8 +895,8 @@ static void p4_pmu_disable_pebs(void) * So at moment let leave metrics turned on forever -- it's * ok for now but need to be revisited! * - * (void)wrmsrl_safe(MSR_IA32_PEBS_ENABLE, (u64)0); - * (void)wrmsrl_safe(MSR_P4_PEBS_MATRIX_VERT, (u64)0); + * (void)checking_wrmsrl(MSR_IA32_PEBS_ENABLE, (u64)0); + * (void)checking_wrmsrl(MSR_P4_PEBS_MATRIX_VERT, (u64)0); */ } @@ -909,7 +909,7 @@ static inline void p4_pmu_disable_event(struct perf_event *event) * state we need to clear P4_CCCR_OVF, otherwise interrupt get * asserted again and again */ - (void)wrmsrl_safe(hwc->config_base, + (void)checking_wrmsrl(hwc->config_base, (u64)(p4_config_unpack_cccr(hwc->config)) & ~P4_CCCR_ENABLE & ~P4_CCCR_OVF & ~P4_CCCR_RESERVED); } @@ -943,8 +943,8 @@ static void p4_pmu_enable_pebs(u64 config) bind = &p4_pebs_bind_map[idx]; - (void)wrmsrl_safe(MSR_IA32_PEBS_ENABLE, (u64)bind->metric_pebs); - (void)wrmsrl_safe(MSR_P4_PEBS_MATRIX_VERT, (u64)bind->metric_vert); + (void)checking_wrmsrl(MSR_IA32_PEBS_ENABLE, (u64)bind->metric_pebs); + (void)checking_wrmsrl(MSR_P4_PEBS_MATRIX_VERT, (u64)bind->metric_vert); } static void p4_pmu_enable_event(struct perf_event *event) @@ -978,8 +978,8 @@ static void p4_pmu_enable_event(struct perf_event *event) */ p4_pmu_enable_pebs(hwc->config); - (void)wrmsrl_safe(escr_addr, escr_conf); - (void)wrmsrl_safe(hwc->config_base, + (void)checking_wrmsrl(escr_addr, escr_conf); + (void)checking_wrmsrl(hwc->config_base, (cccr & ~P4_CCCR_RESERVED) | P4_CCCR_ENABLE); } @@ -1325,7 +1325,7 @@ __init int p4_pmu_init(void) unsigned int low, high; /* If we get stripped -- indexing fails */ - BUILD_BUG_ON(ARCH_P4_MAX_CCCR > INTEL_PMC_MAX_GENERIC); + BUILD_BUG_ON(ARCH_P4_MAX_CCCR > X86_PMC_MAX_GENERIC); rdmsr(MSR_IA32_MISC_ENABLE, low, high); if (!(low & (1 << 7))) { diff --git a/trunk/arch/x86/kernel/cpu/perf_event_p6.c b/trunk/arch/x86/kernel/cpu/perf_event_p6.c index e4dd0f7a0453..32bcfc7dd230 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event_p6.c +++ b/trunk/arch/x86/kernel/cpu/perf_event_p6.c @@ -71,7 +71,7 @@ p6_pmu_disable_event(struct perf_event *event) if (cpuc->enabled) val |= ARCH_PERFMON_EVENTSEL_ENABLE; - (void)wrmsrl_safe(hwc->config_base, val); + (void)checking_wrmsrl(hwc->config_base, val); } static void p6_pmu_enable_event(struct perf_event *event) @@ -84,7 +84,7 @@ static void p6_pmu_enable_event(struct perf_event *event) if (cpuc->enabled) val |= ARCH_PERFMON_EVENTSEL_ENABLE; - (void)wrmsrl_safe(hwc->config_base, val); + (void)checking_wrmsrl(hwc->config_base, val); } PMU_FORMAT_ATTR(event, "config:0-7" ); diff --git a/trunk/arch/x86/kernel/dumpstack.c b/trunk/arch/x86/kernel/dumpstack.c index ae42418bc50f..571246d81edf 100644 --- a/trunk/arch/x86/kernel/dumpstack.c +++ b/trunk/arch/x86/kernel/dumpstack.c @@ -27,8 +27,8 @@ static int die_counter; void printk_address(unsigned long address, int reliable) { - pr_cont(" [<%p>] %s%pB\n", - (void *)address, reliable ? "" : "? ", (void *)address); + printk(" [<%p>] %s%pB\n", (void *) address, + reliable ? "" : "? ", (void *) address); } #ifdef CONFIG_FUNCTION_GRAPH_TRACER @@ -271,7 +271,6 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err) current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP) return 1; - print_modules(); show_regs(regs); #ifdef CONFIG_X86_32 if (user_mode_vm(regs)) { diff --git a/trunk/arch/x86/kernel/dumpstack_32.c b/trunk/arch/x86/kernel/dumpstack_32.c index 1038a417ea53..e0b1d783daab 100644 --- a/trunk/arch/x86/kernel/dumpstack_32.c +++ b/trunk/arch/x86/kernel/dumpstack_32.c @@ -73,11 +73,11 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, if (kstack_end(stack)) break; if (i && ((i % STACKSLOTS_PER_LINE) == 0)) - pr_cont("\n"); - pr_cont(" %08lx", *stack++); + printk(KERN_CONT "\n"); + printk(KERN_CONT " %08lx", *stack++); touch_nmi_watchdog(); } - pr_cont("\n"); + printk(KERN_CONT "\n"); show_trace_log_lvl(task, regs, sp, bp, log_lvl); } @@ -86,11 +86,12 @@ void show_regs(struct pt_regs *regs) { int i; + print_modules(); __show_regs(regs, !user_mode_vm(regs)); - pr_emerg("Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)\n", - TASK_COMM_LEN, current->comm, task_pid_nr(current), - current_thread_info(), current, task_thread_info(current)); + printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)\n", + TASK_COMM_LEN, current->comm, task_pid_nr(current), + current_thread_info(), current, task_thread_info(current)); /* * When in-kernel, we also print out the stack and code at the * time of the fault.. @@ -101,10 +102,10 @@ void show_regs(struct pt_regs *regs) unsigned char c; u8 *ip; - pr_emerg("Stack:\n"); + printk(KERN_EMERG "Stack:\n"); show_stack_log_lvl(NULL, regs, ®s->sp, 0, KERN_EMERG); - pr_emerg("Code:"); + printk(KERN_EMERG "Code: "); ip = (u8 *)regs->ip - code_prologue; if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) { @@ -115,16 +116,16 @@ void show_regs(struct pt_regs *regs) for (i = 0; i < code_len; i++, ip++) { if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) { - pr_cont(" Bad EIP value."); + printk(KERN_CONT " Bad EIP value."); break; } if (ip == (u8 *)regs->ip) - pr_cont(" <%02x>", c); + printk(KERN_CONT "<%02x> ", c); else - pr_cont(" %02x", c); + printk(KERN_CONT "%02x ", c); } } - pr_cont("\n"); + printk(KERN_CONT "\n"); } int is_valid_bugaddr(unsigned long ip) diff --git a/trunk/arch/x86/kernel/dumpstack_64.c b/trunk/arch/x86/kernel/dumpstack_64.c index b653675d5288..791b76122aa8 100644 --- a/trunk/arch/x86/kernel/dumpstack_64.c +++ b/trunk/arch/x86/kernel/dumpstack_64.c @@ -228,20 +228,20 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, if (stack >= irq_stack && stack <= irq_stack_end) { if (stack == irq_stack_end) { stack = (unsigned long *) (irq_stack_end[-1]); - pr_cont(" "); + printk(KERN_CONT " "); } } else { if (((long) stack & (THREAD_SIZE-1)) == 0) break; } if (i && ((i % STACKSLOTS_PER_LINE) == 0)) - pr_cont("\n"); - pr_cont(" %016lx", *stack++); + printk(KERN_CONT "\n"); + printk(KERN_CONT " %016lx", *stack++); touch_nmi_watchdog(); } preempt_enable(); - pr_cont("\n"); + printk(KERN_CONT "\n"); show_trace_log_lvl(task, regs, sp, bp, log_lvl); } @@ -254,9 +254,10 @@ void show_regs(struct pt_regs *regs) sp = regs->sp; printk("CPU %d ", cpu); + print_modules(); __show_regs(regs, 1); - printk(KERN_DEFAULT "Process %s (pid: %d, threadinfo %p, task %p)\n", - cur->comm, cur->pid, task_thread_info(cur), cur); + printk("Process %s (pid: %d, threadinfo %p, task %p)\n", + cur->comm, cur->pid, task_thread_info(cur), cur); /* * When in-kernel, we also print out the stack and code at the @@ -283,16 +284,16 @@ void show_regs(struct pt_regs *regs) for (i = 0; i < code_len; i++, ip++) { if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) { - pr_cont(" Bad RIP value."); + printk(KERN_CONT " Bad RIP value."); break; } if (ip == (u8 *)regs->ip) - pr_cont("<%02x> ", c); + printk(KERN_CONT "<%02x> ", c); else - pr_cont("%02x ", c); + printk(KERN_CONT "%02x ", c); } } - pr_cont("\n"); + printk(KERN_CONT "\n"); } int is_valid_bugaddr(unsigned long ip) diff --git a/trunk/arch/x86/kernel/entry_64.S b/trunk/arch/x86/kernel/entry_64.S index 111f6bbd8b38..7d65133b51be 100644 --- a/trunk/arch/x86/kernel/entry_64.S +++ b/trunk/arch/x86/kernel/entry_64.S @@ -1758,30 +1758,10 @@ end_repeat_nmi: */ call save_paranoid DEFAULT_FRAME 0 - - /* - * Save off the CR2 register. If we take a page fault in the NMI then - * it could corrupt the CR2 value. If the NMI preempts a page fault - * handler before it was able to read the CR2 register, and then the - * NMI itself takes a page fault, the page fault that was preempted - * will read the information from the NMI page fault and not the - * origin fault. Save it off and restore it if it changes. - * Use the r12 callee-saved register. - */ - movq %cr2, %r12 - /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */ movq %rsp,%rdi movq $-1,%rsi call do_nmi - - /* Did the NMI take a page fault? Restore cr2 if it did */ - movq %cr2, %rcx - cmpq %rcx, %r12 - je 1f - movq %r12, %cr2 -1: - testl %ebx,%ebx /* swapgs needed? */ jnz nmi_restore nmi_swapgs: diff --git a/trunk/arch/x86/kernel/irq.c b/trunk/arch/x86/kernel/irq.c index 1f5f1d5d2a02..3dafc6003b7c 100644 --- a/trunk/arch/x86/kernel/irq.c +++ b/trunk/arch/x86/kernel/irq.c @@ -294,9 +294,9 @@ void fixup_irqs(void) raw_spin_unlock(&desc->lock); if (break_affinity && set_affinity) - pr_notice("Broke affinity for irq %i\n", irq); + printk("Broke affinity for irq %i\n", irq); else if (!set_affinity) - pr_notice("Cannot set affinity for irq %i\n", irq); + printk("Cannot set affinity for irq %i\n", irq); } /* diff --git a/trunk/arch/x86/kernel/microcode_core.c b/trunk/arch/x86/kernel/microcode_core.c index 4873e62db6a1..fbdfc6917180 100644 --- a/trunk/arch/x86/kernel/microcode_core.c +++ b/trunk/arch/x86/kernel/microcode_core.c @@ -87,7 +87,6 @@ #include #include #include -#include MODULE_DESCRIPTION("Microcode Update Driver"); MODULE_AUTHOR("Tigran Aivazian "); @@ -278,6 +277,7 @@ static int reload_for_cpu(int cpu) struct ucode_cpu_info *uci = ucode_cpu_info + cpu; int err = 0; + mutex_lock(µcode_mutex); if (uci->valid) { enum ucode_state ustate; @@ -288,6 +288,7 @@ static int reload_for_cpu(int cpu) if (ustate == UCODE_ERROR) err = -EINVAL; } + mutex_unlock(µcode_mutex); return err; } @@ -297,31 +298,19 @@ static ssize_t reload_store(struct device *dev, const char *buf, size_t size) { unsigned long val; - int cpu; - ssize_t ret = 0, tmp_ret; + int cpu = dev->id; + ssize_t ret = 0; ret = kstrtoul(buf, 0, &val); if (ret) return ret; - if (val != 1) - return size; - - get_online_cpus(); - mutex_lock(µcode_mutex); - for_each_online_cpu(cpu) { - tmp_ret = reload_for_cpu(cpu); - if (tmp_ret != 0) - pr_warn("Error reloading microcode on CPU %d\n", cpu); - - /* save retval of the first encountered reload error */ - if (!ret) - ret = tmp_ret; + if (val == 1) { + get_online_cpus(); + if (cpu_online(cpu)) + ret = reload_for_cpu(cpu); + put_online_cpus(); } - if (!ret) - perf_check_microcode(); - mutex_unlock(µcode_mutex); - put_online_cpus(); if (!ret) ret = size; @@ -350,6 +339,7 @@ static DEVICE_ATTR(version, 0400, version_show, NULL); static DEVICE_ATTR(processor_flags, 0400, pf_show, NULL); static struct attribute *mc_default_attrs[] = { + &dev_attr_reload.attr, &dev_attr_version.attr, &dev_attr_processor_flags.attr, NULL @@ -514,7 +504,7 @@ static struct notifier_block __refdata mc_cpu_notifier = { #ifdef MODULE /* Autoload on Intel and AMD systems */ -static const struct x86_cpu_id __initconst microcode_id[] = { +static const struct x86_cpu_id microcode_id[] = { #ifdef CONFIG_MICROCODE_INTEL { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, }, #endif @@ -526,16 +516,6 @@ static const struct x86_cpu_id __initconst microcode_id[] = { MODULE_DEVICE_TABLE(x86cpu, microcode_id); #endif -static struct attribute *cpu_root_microcode_attrs[] = { - &dev_attr_reload.attr, - NULL -}; - -static struct attribute_group cpu_root_microcode_group = { - .name = "microcode", - .attrs = cpu_root_microcode_attrs, -}; - static int __init microcode_init(void) { struct cpuinfo_x86 *c = &cpu_data(0); @@ -560,25 +540,16 @@ static int __init microcode_init(void) mutex_lock(µcode_mutex); error = subsys_interface_register(&mc_cpu_interface); - if (!error) - perf_check_microcode(); + mutex_unlock(µcode_mutex); put_online_cpus(); if (error) goto out_pdev; - error = sysfs_create_group(&cpu_subsys.dev_root->kobj, - &cpu_root_microcode_group); - - if (error) { - pr_err("Error creating microcode group!\n"); - goto out_driver; - } - error = microcode_dev_init(); if (error) - goto out_ucode_group; + goto out_driver; register_syscore_ops(&mc_syscore_ops); register_hotcpu_notifier(&mc_cpu_notifier); @@ -588,11 +559,7 @@ static int __init microcode_init(void) return 0; - out_ucode_group: - sysfs_remove_group(&cpu_subsys.dev_root->kobj, - &cpu_root_microcode_group); - - out_driver: +out_driver: get_online_cpus(); mutex_lock(µcode_mutex); @@ -601,7 +568,7 @@ static int __init microcode_init(void) mutex_unlock(µcode_mutex); put_online_cpus(); - out_pdev: +out_pdev: platform_device_unregister(microcode_pdev); return error; @@ -617,9 +584,6 @@ static void __exit microcode_exit(void) unregister_hotcpu_notifier(&mc_cpu_notifier); unregister_syscore_ops(&mc_syscore_ops); - sysfs_remove_group(&cpu_subsys.dev_root->kobj, - &cpu_root_microcode_group); - get_online_cpus(); mutex_lock(µcode_mutex); diff --git a/trunk/arch/x86/kernel/module.c b/trunk/arch/x86/kernel/module.c index 202494d2ec6e..f21fd94ac897 100644 --- a/trunk/arch/x86/kernel/module.c +++ b/trunk/arch/x86/kernel/module.c @@ -15,9 +15,6 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include #include #include @@ -33,14 +30,9 @@ #include #if 0 -#define DEBUGP(fmt, ...) \ - printk(KERN_DEBUG fmt, ##__VA_ARGS__) +#define DEBUGP printk #else -#define DEBUGP(fmt, ...) \ -do { \ - if (0) \ - printk(KERN_DEBUG fmt, ##__VA_ARGS__); \ -} while (0) +#define DEBUGP(fmt...) #endif void *module_alloc(unsigned long size) @@ -64,8 +56,8 @@ int apply_relocate(Elf32_Shdr *sechdrs, Elf32_Sym *sym; uint32_t *location; - DEBUGP("Applying relocate section %u to %u\n", - relsec, sechdrs[relsec].sh_info); + DEBUGP("Applying relocate section %u to %u\n", relsec, + sechdrs[relsec].sh_info); for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { /* This is where to make the change */ location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr @@ -85,7 +77,7 @@ int apply_relocate(Elf32_Shdr *sechdrs, *location += sym->st_value - (uint32_t)location; break; default: - pr_err("%s: Unknown relocation: %u\n", + printk(KERN_ERR "module %s: Unknown relocation: %u\n", me->name, ELF32_R_TYPE(rel[i].r_info)); return -ENOEXEC; } @@ -105,8 +97,8 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, void *loc; u64 val; - DEBUGP("Applying relocate section %u to %u\n", - relsec, sechdrs[relsec].sh_info); + DEBUGP("Applying relocate section %u to %u\n", relsec, + sechdrs[relsec].sh_info); for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { /* This is where to make the change */ loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr @@ -118,8 +110,8 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, + ELF64_R_SYM(rel[i].r_info); DEBUGP("type %d st_value %Lx r_addend %Lx loc %Lx\n", - (int)ELF64_R_TYPE(rel[i].r_info), - sym->st_value, rel[i].r_addend, (u64)loc); + (int)ELF64_R_TYPE(rel[i].r_info), + sym->st_value, rel[i].r_addend, (u64)loc); val = sym->st_value + rel[i].r_addend; @@ -148,7 +140,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, #endif break; default: - pr_err("%s: Unknown rela relocation: %llu\n", + printk(KERN_ERR "module %s: Unknown rela relocation: %llu\n", me->name, ELF64_R_TYPE(rel[i].r_info)); return -ENOEXEC; } @@ -156,9 +148,9 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, return 0; overflow: - pr_err("overflow in relocation type %d val %Lx\n", + printk(KERN_ERR "overflow in relocation type %d val %Lx\n", (int)ELF64_R_TYPE(rel[i].r_info), val); - pr_err("`%s' likely not compiled with -mcmodel=kernel\n", + printk(KERN_ERR "`%s' likely not compiled with -mcmodel=kernel\n", me->name); return -ENOEXEC; } diff --git a/trunk/arch/x86/kernel/nmi.c b/trunk/arch/x86/kernel/nmi.c index f84f5c57de35..a0b2f84457be 100644 --- a/trunk/arch/x86/kernel/nmi.c +++ b/trunk/arch/x86/kernel/nmi.c @@ -365,9 +365,8 @@ static __kprobes void default_do_nmi(struct pt_regs *regs) #ifdef CONFIG_X86_32 /* * For i386, NMIs use the same stack as the kernel, and we can - * add a workaround to the iret problem in C (preventing nested - * NMIs if an NMI takes a trap). Simply have 3 states the NMI - * can be in: + * add a workaround to the iret problem in C. Simply have 3 states + * the NMI can be in. * * 1) not running * 2) executing @@ -384,50 +383,32 @@ static __kprobes void default_do_nmi(struct pt_regs *regs) * If an NMI hits a breakpoint that executes an iret, another * NMI can preempt it. We do not want to allow this new NMI * to run, but we want to execute it when the first one finishes. - * We set the state to "latched", and the exit of the first NMI will - * perform a dec_return, if the result is zero (NOT_RUNNING), then - * it will simply exit the NMI handler. If not, the dec_return - * would have set the state to NMI_EXECUTING (what we want it to - * be when we are running). In this case, we simply jump back - * to rerun the NMI handler again, and restart the 'latched' NMI. - * - * No trap (breakpoint or page fault) should be hit before nmi_restart, - * thus there is no race between the first check of state for NOT_RUNNING - * and setting it to NMI_EXECUTING. The HW will prevent nested NMIs - * at this point. - * - * In case the NMI takes a page fault, we need to save off the CR2 - * because the NMI could have preempted another page fault and corrupt - * the CR2 that is about to be read. As nested NMIs must be restarted - * and they can not take breakpoints or page faults, the update of the - * CR2 must be done before converting the nmi state back to NOT_RUNNING. - * Otherwise, there would be a race of another nested NMI coming in - * after setting state to NOT_RUNNING but before updating the nmi_cr2. + * We set the state to "latched", and the first NMI will perform + * an cmpxchg on the state, and if it doesn't successfully + * reset the state to "not running" it will restart the next + * NMI. */ enum nmi_states { - NMI_NOT_RUNNING = 0, + NMI_NOT_RUNNING, NMI_EXECUTING, NMI_LATCHED, }; static DEFINE_PER_CPU(enum nmi_states, nmi_state); -static DEFINE_PER_CPU(unsigned long, nmi_cr2); #define nmi_nesting_preprocess(regs) \ do { \ - if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) { \ - this_cpu_write(nmi_state, NMI_LATCHED); \ + if (__get_cpu_var(nmi_state) != NMI_NOT_RUNNING) { \ + __get_cpu_var(nmi_state) = NMI_LATCHED; \ return; \ } \ - this_cpu_write(nmi_state, NMI_EXECUTING); \ - this_cpu_write(nmi_cr2, read_cr2()); \ - } while (0); \ - nmi_restart: + nmi_restart: \ + __get_cpu_var(nmi_state) = NMI_EXECUTING; \ + } while (0) #define nmi_nesting_postprocess() \ do { \ - if (unlikely(this_cpu_read(nmi_cr2) != read_cr2())) \ - write_cr2(this_cpu_read(nmi_cr2)); \ - if (this_cpu_dec_return(nmi_state)) \ + if (cmpxchg(&__get_cpu_var(nmi_state), \ + NMI_EXECUTING, NMI_NOT_RUNNING) != NMI_EXECUTING) \ goto nmi_restart; \ } while (0) #else /* x86_64 */ diff --git a/trunk/arch/x86/kernel/nmi_selftest.c b/trunk/arch/x86/kernel/nmi_selftest.c index 6d9582ec0324..149b8d9c6ad4 100644 --- a/trunk/arch/x86/kernel/nmi_selftest.c +++ b/trunk/arch/x86/kernel/nmi_selftest.c @@ -42,8 +42,7 @@ static int __init nmi_unk_cb(unsigned int val, struct pt_regs *regs) static void __init init_nmi_testsuite(void) { /* trap all the unknown NMIs we may generate */ - register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk", - __initdata); + register_nmi_handler_initonly(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk"); } static void __init cleanup_nmi_testsuite(void) @@ -65,8 +64,8 @@ static void __init test_nmi_ipi(struct cpumask *mask) { unsigned long timeout; - if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback, - NMI_FLAG_FIRST, "nmi_selftest", __initdata)) { + if (register_nmi_handler_initonly(NMI_LOCAL, test_nmi_ipi_callback, + NMI_FLAG_FIRST, "nmi_selftest")) { nmi_fail = FAILURE; return; } diff --git a/trunk/arch/x86/kernel/paravirt.c b/trunk/arch/x86/kernel/paravirt.c index 17fff18a1031..9ce885996fd7 100644 --- a/trunk/arch/x86/kernel/paravirt.c +++ b/trunk/arch/x86/kernel/paravirt.c @@ -352,7 +352,9 @@ struct pv_cpu_ops pv_cpu_ops = { #endif .wbinvd = native_wbinvd, .read_msr = native_read_msr_safe, + .rdmsr_regs = native_rdmsr_safe_regs, .write_msr = native_write_msr_safe, + .wrmsr_regs = native_wrmsr_safe_regs, .read_tsc = native_read_tsc, .read_pmc = native_read_pmc, .read_tscp = native_read_tscp, diff --git a/trunk/arch/x86/kernel/pci-calgary_64.c b/trunk/arch/x86/kernel/pci-calgary_64.c index 299d49302e7d..b72838bae64a 100644 --- a/trunk/arch/x86/kernel/pci-calgary_64.c +++ b/trunk/arch/x86/kernel/pci-calgary_64.c @@ -22,8 +22,6 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#define pr_fmt(fmt) "Calgary: " fmt - #include #include #include @@ -247,7 +245,7 @@ static unsigned long iommu_range_alloc(struct device *dev, offset = iommu_area_alloc(tbl->it_map, tbl->it_size, 0, npages, 0, boundary_size, 0); if (offset == ~0UL) { - pr_warn("IOMMU full\n"); + printk(KERN_WARNING "Calgary: IOMMU full.\n"); spin_unlock_irqrestore(&tbl->it_lock, flags); if (panic_on_overflow) panic("Calgary: fix the allocator.\n"); @@ -273,8 +271,8 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, entry = iommu_range_alloc(dev, tbl, npages); if (unlikely(entry == DMA_ERROR_CODE)) { - pr_warn("failed to allocate %u pages in iommu %p\n", - npages, tbl); + printk(KERN_WARNING "Calgary: failed to allocate %u pages in " + "iommu %p\n", npages, tbl); return DMA_ERROR_CODE; } @@ -563,7 +561,8 @@ static void calgary_tce_cache_blast(struct iommu_table *tbl) i++; } while ((val & 0xff) != 0xff && i < 100); if (i == 100) - pr_warn("PCI bus not quiesced, continuing anyway\n"); + printk(KERN_WARNING "Calgary: PCI bus not quiesced, " + "continuing anyway\n"); /* invalidate TCE cache */ target = calgary_reg(bbar, tar_offset(tbl->it_busno)); @@ -605,7 +604,8 @@ static void calioc2_tce_cache_blast(struct iommu_table *tbl) i++; } while ((val64 & 0xff) != 0xff && i < 100); if (i == 100) - pr_warn("CalIOC2: PCI bus not quiesced, continuing anyway\n"); + printk(KERN_WARNING "CalIOC2: PCI bus not quiesced, " + "continuing anyway\n"); /* 3. poll Page Migration DEBUG for SoftStopFault */ target = calgary_reg(bbar, phb_offset(bus) | PHB_PAGE_MIG_DEBUG); @@ -617,7 +617,8 @@ static void calioc2_tce_cache_blast(struct iommu_table *tbl) if (++count < 100) goto begin; else { - pr_warn("CalIOC2: too many SoftStopFaults, aborting TCE cache flush sequence!\n"); + printk(KERN_WARNING "CalIOC2: too many SoftStopFaults, " + "aborting TCE cache flush sequence!\n"); return; /* pray for the best */ } } @@ -839,8 +840,8 @@ static void calgary_dump_error_regs(struct iommu_table *tbl) plssr = be32_to_cpu(readl(target)); /* If no error, the agent ID in the CSR is not valid */ - pr_emerg("DMA error on Calgary PHB 0x%x, 0x%08x@CSR 0x%08x@PLSSR\n", - tbl->it_busno, csr, plssr); + printk(KERN_EMERG "Calgary: DMA error on Calgary PHB 0x%x, " + "0x%08x@CSR 0x%08x@PLSSR\n", tbl->it_busno, csr, plssr); } static void calioc2_dump_error_regs(struct iommu_table *tbl) @@ -866,21 +867,22 @@ static void calioc2_dump_error_regs(struct iommu_table *tbl) target = calgary_reg(bbar, phboff | 0x800); mck = be32_to_cpu(readl(target)); - pr_emerg("DMA error on CalIOC2 PHB 0x%x\n", tbl->it_busno); + printk(KERN_EMERG "Calgary: DMA error on CalIOC2 PHB 0x%x\n", + tbl->it_busno); - pr_emerg("0x%08x@CSR 0x%08x@PLSSR 0x%08x@CSMR 0x%08x@MCK\n", - csr, plssr, csmr, mck); + printk(KERN_EMERG "Calgary: 0x%08x@CSR 0x%08x@PLSSR 0x%08x@CSMR 0x%08x@MCK\n", + csr, plssr, csmr, mck); /* dump rest of error regs */ - pr_emerg(""); + printk(KERN_EMERG "Calgary: "); for (i = 0; i < ARRAY_SIZE(errregs); i++) { /* err regs are at 0x810 - 0x870 */ erroff = (0x810 + (i * 0x10)); target = calgary_reg(bbar, phboff | erroff); errregs[i] = be32_to_cpu(readl(target)); - pr_cont("0x%08x@0x%lx ", errregs[i], erroff); + printk("0x%08x@0x%lx ", errregs[i], erroff); } - pr_cont("\n"); + printk("\n"); /* root complex status */ target = calgary_reg(bbar, phboff | PHB_ROOT_COMPLEX_STATUS); diff --git a/trunk/arch/x86/kernel/process.c b/trunk/arch/x86/kernel/process.c index ef6a8456f719..735279e54e59 100644 --- a/trunk/arch/x86/kernel/process.c +++ b/trunk/arch/x86/kernel/process.c @@ -1,5 +1,3 @@ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include #include #include @@ -147,14 +145,16 @@ void show_regs_common(void) /* Board Name is optional */ board = dmi_get_system_info(DMI_BOARD_NAME); - printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s %s %s%s%s\n", - current->pid, current->comm, print_tainted(), - init_utsname()->release, - (int)strcspn(init_utsname()->version, " "), - init_utsname()->version, - vendor, product, - board ? "/" : "", - board ? board : ""); + printk(KERN_CONT "\n"); + printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s", + current->pid, current->comm, print_tainted(), + init_utsname()->release, + (int)strcspn(init_utsname()->version, " "), + init_utsname()->version); + printk(KERN_CONT " %s %s", vendor, product); + if (board) + printk(KERN_CONT "/%s", board); + printk(KERN_CONT "\n"); } void flush_thread(void) @@ -645,7 +645,7 @@ static void amd_e400_idle(void) amd_e400_c1e_detected = true; if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) mark_tsc_unstable("TSC halt in AMD C1E"); - pr_info("System has AMD C1E enabled\n"); + printk(KERN_INFO "System has AMD C1E enabled\n"); } } @@ -659,7 +659,8 @@ static void amd_e400_idle(void) */ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE, &cpu); - pr_info("Switch to broadcast mode on CPU%d\n", cpu); + printk(KERN_INFO "Switch to broadcast mode on CPU%d\n", + cpu); } clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); @@ -680,7 +681,8 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) { #ifdef CONFIG_SMP if (pm_idle == poll_idle && smp_num_siblings > 1) { - pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n"); + printk_once(KERN_WARNING "WARNING: polling idle and HT enabled," + " performance may degrade.\n"); } #endif if (pm_idle) @@ -690,11 +692,11 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) /* * One CPU supports mwait => All CPUs supports mwait */ - pr_info("using mwait in idle threads\n"); + printk(KERN_INFO "using mwait in idle threads.\n"); pm_idle = mwait_idle; } else if (cpu_has_amd_erratum(amd_erratum_400)) { /* E400: APIC timer interrupt does not wake up CPU from C1e */ - pr_info("using AMD E400 aware idle routine\n"); + printk(KERN_INFO "using AMD E400 aware idle routine\n"); pm_idle = amd_e400_idle; } else pm_idle = default_idle; @@ -713,7 +715,7 @@ static int __init idle_setup(char *str) return -EINVAL; if (!strcmp(str, "poll")) { - pr_info("using polling idle threads\n"); + printk("using polling idle threads.\n"); pm_idle = poll_idle; boot_option_idle_override = IDLE_POLL; } else if (!strcmp(str, "mwait")) { diff --git a/trunk/arch/x86/kernel/process_64.c b/trunk/arch/x86/kernel/process_64.c index 0a980c9d7cb8..61cdf7fdf099 100644 --- a/trunk/arch/x86/kernel/process_64.c +++ b/trunk/arch/x86/kernel/process_64.c @@ -117,10 +117,10 @@ void release_thread(struct task_struct *dead_task) { if (dead_task->mm) { if (dead_task->mm->context.size) { - pr_warn("WARNING: dead process %8s still has LDT? <%p/%d>\n", - dead_task->comm, - dead_task->mm->context.ldt, - dead_task->mm->context.size); + printk("WARNING: dead process %8s still has LDT? <%p/%d>\n", + dead_task->comm, + dead_task->mm->context.ldt, + dead_task->mm->context.size); BUG(); } } @@ -466,7 +466,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) task->thread.gs = addr; if (doit) { load_gs_index(0); - ret = wrmsrl_safe(MSR_KERNEL_GS_BASE, addr); + ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr); } } put_cpu(); @@ -494,7 +494,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) /* set the selector to 0 to not confuse __switch_to */ loadsegment(fs, 0); - ret = wrmsrl_safe(MSR_FS_BASE, addr); + ret = checking_wrmsrl(MSR_FS_BASE, addr); } } put_cpu(); diff --git a/trunk/arch/x86/kernel/reboot.c b/trunk/arch/x86/kernel/reboot.c index 52190a938b4a..5de92f1abd76 100644 --- a/trunk/arch/x86/kernel/reboot.c +++ b/trunk/arch/x86/kernel/reboot.c @@ -1,5 +1,3 @@ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include #include #include @@ -22,12 +20,14 @@ #include #include #include -#include -#include -#include -#include -#include +#ifdef CONFIG_X86_32 +# include +# include +# include +#else +# include +#endif /* * Power off function, if any @@ -49,7 +49,7 @@ int reboot_force; */ static int reboot_default = 1; -#ifdef CONFIG_SMP +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) static int reboot_cpu = -1; #endif @@ -67,8 +67,8 @@ bool port_cf9_safe = false; * reboot=b[ios] | s[mp] | t[riple] | k[bd] | e[fi] [, [w]arm | [c]old] | p[ci] * warm Don't set the cold reboot flag * cold Set the cold reboot flag - * bios Reboot by jumping through the BIOS - * smp Reboot by executing reset on BSP or other CPU + * bios Reboot by jumping through the BIOS (only for X86_32) + * smp Reboot by executing reset on BSP or other CPU (only for X86_32) * triple Force a triple fault (init) * kbd Use the keyboard controller. cold reset (default) * acpi Use the RESET_REG in the FADT @@ -95,6 +95,7 @@ static int __init reboot_setup(char *str) reboot_mode = 0; break; +#ifdef CONFIG_X86_32 #ifdef CONFIG_SMP case 's': if (isdigit(*(str+1))) { @@ -111,6 +112,7 @@ static int __init reboot_setup(char *str) #endif /* CONFIG_SMP */ case 'b': +#endif case 'a': case 'k': case 't': @@ -136,6 +138,7 @@ static int __init reboot_setup(char *str) __setup("reboot=", reboot_setup); +#ifdef CONFIG_X86_32 /* * Reboot options and system auto-detection code provided by * Dell Inc. so their systems "just work". :-) @@ -149,14 +152,16 @@ static int __init set_bios_reboot(const struct dmi_system_id *d) { if (reboot_type != BOOT_BIOS) { reboot_type = BOOT_BIOS; - pr_info("%s series board detected. Selecting %s-method for reboots.\n", - "BIOS", d->ident); + printk(KERN_INFO "%s series board detected. Selecting BIOS-method for reboots.\n", d->ident); } return 0; } -void __noreturn machine_real_restart(unsigned int type) +void machine_real_restart(unsigned int type) { + void (*restart_lowmem)(unsigned int) = (void (*)(unsigned int)) + real_mode_header->machine_real_restart_asm; + local_irq_disable(); /* @@ -176,28 +181,25 @@ void __noreturn machine_real_restart(unsigned int type) /* * Switch back to the initial page table. */ -#ifdef CONFIG_X86_32 load_cr3(initial_page_table); -#else - write_cr3(real_mode_header->trampoline_pgd); -#endif + + /* + * Write 0x1234 to absolute memory location 0x472. The BIOS reads + * this on booting to tell it to "Bypass memory test (also warm + * boot)". This seems like a fairly standard thing that gets set by + * REBOOT.COM programs, and the previous reset routine did this + * too. */ + *((unsigned short *)0x472) = reboot_mode; /* Jump to the identity-mapped low memory code */ -#ifdef CONFIG_X86_32 - asm volatile("jmpl *%0" : : - "rm" (real_mode_header->machine_real_restart_asm), - "a" (type)); -#else - asm volatile("ljmpl *%0" : : - "m" (real_mode_header->machine_real_restart_asm), - "D" (type)); -#endif - unreachable(); + restart_lowmem(type); } #ifdef CONFIG_APM_MODULE EXPORT_SYMBOL(machine_real_restart); #endif +#endif /* CONFIG_X86_32 */ + /* * Some Apple MacBook and MacBookPro's needs reboot=p to be able to reboot */ @@ -205,8 +207,8 @@ static int __init set_pci_reboot(const struct dmi_system_id *d) { if (reboot_type != BOOT_CF9) { reboot_type = BOOT_CF9; - pr_info("%s series board detected. Selecting %s-method for reboots.\n", - "PCI", d->ident); + printk(KERN_INFO "%s series board detected. " + "Selecting PCI-method for reboots.\n", d->ident); } return 0; } @@ -215,16 +217,17 @@ static int __init set_kbd_reboot(const struct dmi_system_id *d) { if (reboot_type != BOOT_KBD) { reboot_type = BOOT_KBD; - pr_info("%s series board detected. Selecting %s-method for reboot.\n", - "KBD", d->ident); + printk(KERN_INFO "%s series board detected. Selecting KBD-method for reboot.\n", d->ident); } return 0; } /* - * This is a single dmi_table handling all reboot quirks. + * This is a single dmi_table handling all reboot quirks. Note that + * REBOOT_BIOS is only available for 32bit */ static struct dmi_system_id __initdata reboot_dmi_table[] = { +#ifdef CONFIG_X86_32 { /* Handle problems with rebooting on Dell E520's */ .callback = set_bios_reboot, .ident = "Dell E520", @@ -374,6 +377,7 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { DMI_MATCH(DMI_BOARD_NAME, "P4S800"), }, }, +#endif /* CONFIG_X86_32 */ { /* Handle reboot issue on Acer Aspire one */ .callback = set_kbd_reboot, @@ -580,11 +584,13 @@ static void native_machine_emergency_restart(void) reboot_type = BOOT_KBD; break; +#ifdef CONFIG_X86_32 case BOOT_BIOS: machine_real_restart(MRR_BIOS); reboot_type = BOOT_KBD; break; +#endif case BOOT_ACPI: acpi_reboot(); @@ -626,10 +632,12 @@ void native_machine_shutdown(void) /* The boot cpu is always logical cpu 0 */ int reboot_cpu_id = 0; +#ifdef CONFIG_X86_32 /* See if there has been given a command line override */ if ((reboot_cpu != -1) && (reboot_cpu < nr_cpu_ids) && cpu_online(reboot_cpu)) reboot_cpu_id = reboot_cpu; +#endif /* Make certain the cpu I'm about to reboot on is online */ if (!cpu_online(reboot_cpu_id)) @@ -670,7 +678,7 @@ static void __machine_emergency_restart(int emergency) static void native_machine_restart(char *__unused) { - pr_notice("machine restart\n"); + printk("machine restart\n"); if (!reboot_force) machine_shutdown(); diff --git a/trunk/arch/x86/kernel/setup.c b/trunk/arch/x86/kernel/setup.c index f4b9b80e1b95..16be6dc14db1 100644 --- a/trunk/arch/x86/kernel/setup.c +++ b/trunk/arch/x86/kernel/setup.c @@ -1031,6 +1031,8 @@ void __init setup_arch(char **cmdline_p) x86_init.timers.wallclock_init(); + x86_platform.wallclock_init(); + mcheck_init(); arch_init_ideal_nops(); diff --git a/trunk/arch/x86/kernel/signal.c b/trunk/arch/x86/kernel/signal.c index b280908a376e..21af737053aa 100644 --- a/trunk/arch/x86/kernel/signal.c +++ b/trunk/arch/x86/kernel/signal.c @@ -6,9 +6,6 @@ * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes * 2000-2002 x86-64 support by Andi Kleen */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include #include #include @@ -817,7 +814,7 @@ void signal_fault(struct pt_regs *regs, void __user *frame, char *where) me->comm, me->pid, where, frame, regs->ip, regs->sp, regs->orig_ax); print_vma_addr(" in ", regs->ip); - pr_cont("\n"); + printk(KERN_CONT "\n"); } force_sig(SIGSEGV, me); diff --git a/trunk/arch/x86/kernel/smpboot.c b/trunk/arch/x86/kernel/smpboot.c index c1a310fb8309..7bd8a0823654 100644 --- a/trunk/arch/x86/kernel/smpboot.c +++ b/trunk/arch/x86/kernel/smpboot.c @@ -1,4 +1,4 @@ - /* +/* * x86 SMP booting functions * * (c) 1995 Alan Cox, Building #3 @@ -39,8 +39,6 @@ * Glauber Costa : i386 and x86_64 integration */ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include #include #include @@ -186,7 +184,7 @@ static void __cpuinit smp_callin(void) * boards) */ - pr_debug("CALLIN, before setup_local_APIC()\n"); + pr_debug("CALLIN, before setup_local_APIC().\n"); if (apic->smp_callin_clear_local_apic) apic->smp_callin_clear_local_apic(); setup_local_APIC(); @@ -257,13 +255,22 @@ notrace static void __cpuinit start_secondary(void *unused) check_tsc_sync_target(); /* + * We need to hold call_lock, so there is no inconsistency + * between the time smp_call_function() determines number of + * IPI recipients, and the time when the determination is made + * for which cpus receive the IPI. Holding this + * lock helps us to not include this cpu in a currently in progress + * smp_call_function(). + * * We need to hold vector_lock so there the set of online cpus * does not change while we are assigning vectors to cpus. Holding * this lock ensures we don't half assign or remove an irq from a cpu. */ + ipi_call_lock(); lock_vector_lock(); set_cpu_online(smp_processor_id(), true); unlock_vector_lock(); + ipi_call_unlock(); per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; x86_platform.nmi_init(); @@ -425,16 +432,17 @@ static void impress_friends(void) /* * Allow the user to impress friends. */ - pr_debug("Before bogomips\n"); + pr_debug("Before bogomips.\n"); for_each_possible_cpu(cpu) if (cpumask_test_cpu(cpu, cpu_callout_mask)) bogosum += cpu_data(cpu).loops_per_jiffy; - pr_info("Total of %d processors activated (%lu.%02lu BogoMIPS)\n", + printk(KERN_INFO + "Total of %d processors activated (%lu.%02lu BogoMIPS).\n", num_online_cpus(), bogosum/(500000/HZ), (bogosum/(5000/HZ))%100); - pr_debug("Before bogocount - setting activated=1\n"); + pr_debug("Before bogocount - setting activated=1.\n"); } void __inquire_remote_apic(int apicid) @@ -444,17 +452,18 @@ void __inquire_remote_apic(int apicid) int timeout; u32 status; - pr_info("Inquiring remote APIC 0x%x...\n", apicid); + printk(KERN_INFO "Inquiring remote APIC 0x%x...\n", apicid); for (i = 0; i < ARRAY_SIZE(regs); i++) { - pr_info("... APIC 0x%x %s: ", apicid, names[i]); + printk(KERN_INFO "... APIC 0x%x %s: ", apicid, names[i]); /* * Wait for idle. */ status = safe_apic_wait_icr_idle(); if (status) - pr_cont("a previous APIC delivery may have failed\n"); + printk(KERN_CONT + "a previous APIC delivery may have failed\n"); apic_icr_write(APIC_DM_REMRD | regs[i], apicid); @@ -467,10 +476,10 @@ void __inquire_remote_apic(int apicid) switch (status) { case APIC_ICR_RR_VALID: status = apic_read(APIC_RRR); - pr_cont("%08x\n", status); + printk(KERN_CONT "%08x\n", status); break; default: - pr_cont("failed\n"); + printk(KERN_CONT "failed\n"); } } } @@ -504,12 +513,12 @@ wakeup_secondary_cpu_via_nmi(int logical_apicid, unsigned long start_eip) apic_write(APIC_ESR, 0); accept_status = (apic_read(APIC_ESR) & 0xEF); } - pr_debug("NMI sent\n"); + pr_debug("NMI sent.\n"); if (send_status) - pr_err("APIC never delivered???\n"); + printk(KERN_ERR "APIC never delivered???\n"); if (accept_status) - pr_err("APIC delivery error (%lx)\n", accept_status); + printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status); return (send_status | accept_status); } @@ -531,7 +540,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip) apic_read(APIC_ESR); } - pr_debug("Asserting INIT\n"); + pr_debug("Asserting INIT.\n"); /* * Turn INIT on target chip @@ -547,7 +556,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip) mdelay(10); - pr_debug("Deasserting INIT\n"); + pr_debug("Deasserting INIT.\n"); /* Target chip */ /* Send IPI */ @@ -580,14 +589,14 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip) /* * Run STARTUP IPI loop. */ - pr_debug("#startup loops: %d\n", num_starts); + pr_debug("#startup loops: %d.\n", num_starts); for (j = 1; j <= num_starts; j++) { - pr_debug("Sending STARTUP #%d\n", j); + pr_debug("Sending STARTUP #%d.\n", j); if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ apic_write(APIC_ESR, 0); apic_read(APIC_ESR); - pr_debug("After apic_write\n"); + pr_debug("After apic_write.\n"); /* * STARTUP IPI @@ -604,7 +613,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip) */ udelay(300); - pr_debug("Startup point 1\n"); + pr_debug("Startup point 1.\n"); pr_debug("Waiting for send to finish...\n"); send_status = safe_apic_wait_icr_idle(); @@ -619,12 +628,12 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip) if (send_status || accept_status) break; } - pr_debug("After Startup\n"); + pr_debug("After Startup.\n"); if (send_status) - pr_err("APIC never delivered???\n"); + printk(KERN_ERR "APIC never delivered???\n"); if (accept_status) - pr_err("APIC delivery error (%lx)\n", accept_status); + printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status); return (send_status | accept_status); } @@ -638,11 +647,11 @@ static void __cpuinit announce_cpu(int cpu, int apicid) if (system_state == SYSTEM_BOOTING) { if (node != current_node) { if (current_node > (-1)) - pr_cont(" OK\n"); + pr_cont(" Ok.\n"); current_node = node; pr_info("Booting Node %3d, Processors ", node); } - pr_cont(" #%d%s", cpu, cpu == (nr_cpu_ids - 1) ? " OK\n" : ""); + pr_cont(" #%d%s", cpu, cpu == (nr_cpu_ids - 1) ? " Ok.\n" : ""); return; } else pr_info("Booting Node %d Processor %d APIC 0x%x\n", @@ -722,9 +731,9 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle) /* * allow APs to start initializing. */ - pr_debug("Before Callout %d\n", cpu); + pr_debug("Before Callout %d.\n", cpu); cpumask_set_cpu(cpu, cpu_callout_mask); - pr_debug("After Callout %d\n", cpu); + pr_debug("After Callout %d.\n", cpu); /* * Wait 5s total for a response @@ -752,7 +761,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle) pr_err("CPU%d: Stuck ??\n", cpu); else /* trampoline code not run */ - pr_err("CPU%d: Not responding\n", cpu); + pr_err("CPU%d: Not responding.\n", cpu); if (apic->inquire_remote_apic) apic->inquire_remote_apic(apicid); } @@ -797,7 +806,7 @@ int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle) if (apicid == BAD_APICID || apicid == boot_cpu_physical_apicid || !physid_isset(apicid, phys_cpu_present_map) || !apic->apic_id_valid(apicid)) { - pr_err("%s: bad cpu %d\n", __func__, cpu); + printk(KERN_ERR "%s: bad cpu %d\n", __func__, cpu); return -EINVAL; } @@ -878,8 +887,9 @@ static int __init smp_sanity_check(unsigned max_cpus) unsigned int cpu; unsigned nr; - pr_warn("More than 8 CPUs detected - skipping them\n" - "Use CONFIG_X86_BIGSMP\n"); + printk(KERN_WARNING + "More than 8 CPUs detected - skipping them.\n" + "Use CONFIG_X86_BIGSMP.\n"); nr = 0; for_each_present_cpu(cpu) { @@ -900,7 +910,8 @@ static int __init smp_sanity_check(unsigned max_cpus) #endif if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) { - pr_warn("weird, boot CPU (#%d) not listed by the BIOS\n", + printk(KERN_WARNING + "weird, boot CPU (#%d) not listed by the BIOS.\n", hard_smp_processor_id()); physid_set(hard_smp_processor_id(), phys_cpu_present_map); @@ -912,10 +923,11 @@ static int __init smp_sanity_check(unsigned max_cpus) */ if (!smp_found_config && !acpi_lapic) { preempt_enable(); - pr_notice("SMP motherboard not detected\n"); + printk(KERN_NOTICE "SMP motherboard not detected.\n"); disable_smp(); if (APIC_init_uniprocessor()) - pr_notice("Local APIC not detected. Using dummy APIC emulation.\n"); + printk(KERN_NOTICE "Local APIC not detected." + " Using dummy APIC emulation.\n"); return -1; } @@ -924,8 +936,9 @@ static int __init smp_sanity_check(unsigned max_cpus) * CPU too, but we do it for the sake of robustness anyway. */ if (!apic->check_phys_apicid_present(boot_cpu_physical_apicid)) { - pr_notice("weird, boot CPU (#%d) not listed by the BIOS\n", - boot_cpu_physical_apicid); + printk(KERN_NOTICE + "weird, boot CPU (#%d) not listed by the BIOS.\n", + boot_cpu_physical_apicid); physid_set(hard_smp_processor_id(), phys_cpu_present_map); } preempt_enable(); @@ -938,7 +951,8 @@ static int __init smp_sanity_check(unsigned max_cpus) if (!disable_apic) { pr_err("BIOS bug, local APIC #%d not detected!...\n", boot_cpu_physical_apicid); - pr_err("... forcing use of dummy APIC emulation (tell your hw vendor)\n"); + pr_err("... forcing use of dummy APIC emulation." + "(tell your hw vendor)\n"); } smpboot_clear_io_apic(); disable_ioapic_support(); @@ -951,7 +965,7 @@ static int __init smp_sanity_check(unsigned max_cpus) * If SMP should be disabled, then really disable it! */ if (!max_cpus) { - pr_info("SMP mode deactivated\n"); + printk(KERN_INFO "SMP mode deactivated.\n"); smpboot_clear_io_apic(); connect_bsp_APIC(); @@ -1003,7 +1017,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) if (smp_sanity_check(max_cpus) < 0) { - pr_info("SMP disabled\n"); + printk(KERN_INFO "SMP disabled\n"); disable_smp(); goto out; } @@ -1041,7 +1055,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) * Set up local APIC timer on boot CPU. */ - pr_info("CPU%d: ", 0); + printk(KERN_INFO "CPU%d: ", 0); print_cpu_info(&cpu_data(0)); x86_init.timers.setup_percpu_clockev(); @@ -1091,7 +1105,7 @@ void __init native_smp_prepare_boot_cpu(void) void __init native_smp_cpus_done(unsigned int max_cpus) { - pr_debug("Boot done\n"); + pr_debug("Boot done.\n"); nmi_selftest(); impress_friends(); @@ -1152,7 +1166,8 @@ __init void prefill_possible_map(void) /* nr_cpu_ids could be reduced via nr_cpus= */ if (possible > nr_cpu_ids) { - pr_warn("%d Processors exceeds NR_CPUS limit of %d\n", + printk(KERN_WARNING + "%d Processors exceeds NR_CPUS limit of %d\n", possible, nr_cpu_ids); possible = nr_cpu_ids; } @@ -1161,12 +1176,13 @@ __init void prefill_possible_map(void) if (!setup_max_cpus) #endif if (possible > i) { - pr_warn("%d Processors exceeds max_cpus limit of %u\n", + printk(KERN_WARNING + "%d Processors exceeds max_cpus limit of %u\n", possible, setup_max_cpus); possible = i; } - pr_info("Allowing %d CPUs, %d hotplug CPUs\n", + printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n", possible, max_t(int, possible - num_processors, 0)); for (i = 0; i < possible; i++) diff --git a/trunk/arch/x86/kernel/traps.c b/trunk/arch/x86/kernel/traps.c index b481341c9369..05b31d92f69c 100644 --- a/trunk/arch/x86/kernel/traps.c +++ b/trunk/arch/x86/kernel/traps.c @@ -9,9 +9,6 @@ /* * Handle hardware traps and faults. */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include #include #include @@ -146,11 +143,12 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, #ifdef CONFIG_X86_64 if (show_unhandled_signals && unhandled_signal(tsk, signr) && printk_ratelimit()) { - pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx", - tsk->comm, tsk->pid, str, - regs->ip, regs->sp, error_code); + printk(KERN_INFO + "%s[%d] trap %s ip:%lx sp:%lx error:%lx", + tsk->comm, tsk->pid, str, + regs->ip, regs->sp, error_code); print_vma_addr(" in ", regs->ip); - pr_cont("\n"); + printk("\n"); } #endif @@ -271,11 +269,12 @@ do_general_protection(struct pt_regs *regs, long error_code) if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && printk_ratelimit()) { - pr_info("%s[%d] general protection ip:%lx sp:%lx error:%lx", + printk(KERN_INFO + "%s[%d] general protection ip:%lx sp:%lx error:%lx", tsk->comm, task_pid_nr(tsk), regs->ip, regs->sp, error_code); print_vma_addr(" in ", regs->ip); - pr_cont("\n"); + printk("\n"); } force_sig(SIGSEGV, tsk); @@ -571,7 +570,7 @@ do_spurious_interrupt_bug(struct pt_regs *regs, long error_code) conditional_sti(regs); #if 0 /* No need to warn about this any longer. */ - pr_info("Ignoring P6 Local APIC Spurious Interrupt Bug...\n"); + printk(KERN_INFO "Ignoring P6 Local APIC Spurious Interrupt Bug...\n"); #endif } diff --git a/trunk/arch/x86/kernel/tsc.c b/trunk/arch/x86/kernel/tsc.c index cfa5d4f7ca56..fc0a147e3727 100644 --- a/trunk/arch/x86/kernel/tsc.c +++ b/trunk/arch/x86/kernel/tsc.c @@ -1,5 +1,3 @@ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include #include #include @@ -86,7 +84,8 @@ EXPORT_SYMBOL_GPL(check_tsc_unstable); #ifdef CONFIG_X86_TSC int __init notsc_setup(char *str) { - pr_warn("Kernel compiled with CONFIG_X86_TSC, cannot disable TSC completely\n"); + printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, " + "cannot disable TSC completely.\n"); tsc_disabled = 1; return 1; } @@ -374,7 +373,7 @@ static unsigned long quick_pit_calibrate(void) goto success; } } - pr_err("Fast TSC calibration failed\n"); + printk("Fast TSC calibration failed\n"); return 0; success: @@ -393,7 +392,7 @@ static unsigned long quick_pit_calibrate(void) */ delta *= PIT_TICK_RATE; do_div(delta, i*256*1000); - pr_info("Fast TSC calibration using PIT\n"); + printk("Fast TSC calibration using PIT\n"); return delta; } @@ -488,8 +487,9 @@ unsigned long native_calibrate_tsc(void) * use the reference value, as it is more precise. */ if (delta >= 90 && delta <= 110) { - pr_info("PIT calibration matches %s. %d loops\n", - hpet ? "HPET" : "PMTIMER", i + 1); + printk(KERN_INFO + "TSC: PIT calibration matches %s. %d loops\n", + hpet ? "HPET" : "PMTIMER", i + 1); return tsc_ref_min; } @@ -511,36 +511,38 @@ unsigned long native_calibrate_tsc(void) */ if (tsc_pit_min == ULONG_MAX) { /* PIT gave no useful value */ - pr_warn("Unable to calibrate against PIT\n"); + printk(KERN_WARNING "TSC: Unable to calibrate against PIT\n"); /* We don't have an alternative source, disable TSC */ if (!hpet && !ref1 && !ref2) { - pr_notice("No reference (HPET/PMTIMER) available\n"); + printk("TSC: No reference (HPET/PMTIMER) available\n"); return 0; } /* The alternative source failed as well, disable TSC */ if (tsc_ref_min == ULONG_MAX) { - pr_warn("HPET/PMTIMER calibration failed\n"); + printk(KERN_WARNING "TSC: HPET/PMTIMER calibration " + "failed.\n"); return 0; } /* Use the alternative source */ - pr_info("using %s reference calibration\n", - hpet ? "HPET" : "PMTIMER"); + printk(KERN_INFO "TSC: using %s reference calibration\n", + hpet ? "HPET" : "PMTIMER"); return tsc_ref_min; } /* We don't have an alternative source, use the PIT calibration value */ if (!hpet && !ref1 && !ref2) { - pr_info("Using PIT calibration value\n"); + printk(KERN_INFO "TSC: Using PIT calibration value\n"); return tsc_pit_min; } /* The alternative source failed, use the PIT calibration value */ if (tsc_ref_min == ULONG_MAX) { - pr_warn("HPET/PMTIMER calibration failed. Using PIT calibration.\n"); + printk(KERN_WARNING "TSC: HPET/PMTIMER calibration failed. " + "Using PIT calibration\n"); return tsc_pit_min; } @@ -549,9 +551,9 @@ unsigned long native_calibrate_tsc(void) * the PIT value as we know that there are PMTIMERs around * running at double speed. At least we let the user know: */ - pr_warn("PIT calibration deviates from %s: %lu %lu\n", - hpet ? "HPET" : "PMTIMER", tsc_pit_min, tsc_ref_min); - pr_info("Using PIT calibration value\n"); + printk(KERN_WARNING "TSC: PIT calibration deviates from %s: %lu %lu.\n", + hpet ? "HPET" : "PMTIMER", tsc_pit_min, tsc_ref_min); + printk(KERN_INFO "TSC: Using PIT calibration value\n"); return tsc_pit_min; } @@ -783,7 +785,7 @@ void mark_tsc_unstable(char *reason) tsc_unstable = 1; sched_clock_stable = 0; disable_sched_clock_irqtime(); - pr_info("Marking TSC unstable due to %s\n", reason); + printk(KERN_INFO "Marking TSC unstable due to %s\n", reason); /* Change only the rating, when not registered */ if (clocksource_tsc.mult) clocksource_mark_unstable(&clocksource_tsc); @@ -910,9 +912,9 @@ static void tsc_refine_calibration_work(struct work_struct *work) goto out; tsc_khz = freq; - pr_info("Refined TSC clocksource calibration: %lu.%03lu MHz\n", - (unsigned long)tsc_khz / 1000, - (unsigned long)tsc_khz % 1000); + printk(KERN_INFO "Refined TSC clocksource calibration: " + "%lu.%03lu MHz.\n", (unsigned long)tsc_khz / 1000, + (unsigned long)tsc_khz % 1000); out: clocksource_register_khz(&clocksource_tsc, tsc_khz); @@ -968,9 +970,9 @@ void __init tsc_init(void) return; } - pr_info("Detected %lu.%03lu MHz processor\n", - (unsigned long)cpu_khz / 1000, - (unsigned long)cpu_khz % 1000); + printk("Detected %lu.%03lu MHz processor.\n", + (unsigned long)cpu_khz / 1000, + (unsigned long)cpu_khz % 1000); /* * Secondary CPUs do not run through tsc_init(), so set up diff --git a/trunk/arch/x86/kernel/uprobes.c b/trunk/arch/x86/kernel/uprobes.c index 36fd42091fa7..dc4e910a7d96 100644 --- a/trunk/arch/x86/kernel/uprobes.c +++ b/trunk/arch/x86/kernel/uprobes.c @@ -409,10 +409,9 @@ static int validate_insn_bits(struct arch_uprobe *auprobe, struct mm_struct *mm, * arch_uprobe_analyze_insn - instruction analysis including validity and fixups. * @mm: the probed address space. * @arch_uprobe: the probepoint information. - * @addr: virtual address at which to install the probepoint * Return 0 on success or a -ve number on error. */ -int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long addr) +int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm) { int ret; struct insn insn; diff --git a/trunk/arch/x86/kernel/vm86_32.c b/trunk/arch/x86/kernel/vm86_32.c index 54abcc0baf23..255f58ae71e8 100644 --- a/trunk/arch/x86/kernel/vm86_32.c +++ b/trunk/arch/x86/kernel/vm86_32.c @@ -28,8 +28,6 @@ * */ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include #include #include @@ -139,14 +137,14 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs) local_irq_enable(); if (!current->thread.vm86_info) { - pr_alert("no vm86_info: BAD\n"); + printk("no vm86_info: BAD\n"); do_exit(SIGSEGV); } set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | current->thread.v86mask); tmp = copy_vm86_regs_to_user(¤t->thread.vm86_info->regs, regs); tmp += put_user(current->thread.screen_bitmap, ¤t->thread.vm86_info->screen_bitmap); if (tmp) { - pr_alert("could not access userspace vm86_info\n"); + printk("vm86: could not access userspace vm86_info\n"); do_exit(SIGSEGV); } diff --git a/trunk/arch/x86/kernel/vsmp_64.c b/trunk/arch/x86/kernel/vsmp_64.c index 992f890283e9..8eeb55a551b4 100644 --- a/trunk/arch/x86/kernel/vsmp_64.c +++ b/trunk/arch/x86/kernel/vsmp_64.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include @@ -96,18 +95,6 @@ static void __init set_vsmp_pv_ops(void) ctl = readl(address + 4); printk(KERN_INFO "vSMP CTL: capabilities:0x%08x control:0x%08x\n", cap, ctl); - - /* If possible, let the vSMP foundation route the interrupt optimally */ -#ifdef CONFIG_SMP - if (cap & ctl & BIT(8)) { - ctl &= ~BIT(8); -#ifdef CONFIG_PROC_FS - /* Don't let users change irq affinity via procfs */ - no_irq_affinity = 1; -#endif - } -#endif - if (cap & ctl & (1 << 4)) { /* Setup irq ops and turn on vSMP IRQ fastpath handling */ pv_irq_ops.irq_disable = PV_CALLEE_SAVE(vsmp_irq_disable); @@ -115,11 +102,12 @@ static void __init set_vsmp_pv_ops(void) pv_irq_ops.save_fl = PV_CALLEE_SAVE(vsmp_save_fl); pv_irq_ops.restore_fl = PV_CALLEE_SAVE(vsmp_restore_fl); pv_init_ops.patch = vsmp_patch; + ctl &= ~(1 << 4); + writel(ctl, address + 4); + ctl = readl(address + 4); + printk(KERN_INFO "vSMP CTL: control set to:0x%08x\n", ctl); } - writel(ctl, address + 4); - ctl = readl(address + 4); - pr_info("vSMP CTL: control set to:0x%08x\n", ctl); early_iounmap(address, 8); } @@ -199,36 +187,12 @@ static void __init vsmp_cap_cpus(void) #endif } -static int apicid_phys_pkg_id(int initial_apic_id, int index_msb) -{ - return hard_smp_processor_id() >> index_msb; -} - -/* - * In vSMP, all cpus should be capable of handling interrupts, regardless of - * the APIC used. - */ -static void fill_vector_allocation_domain(int cpu, struct cpumask *retmask, - const struct cpumask *mask) -{ - cpumask_setall(retmask); -} - -static void vsmp_apic_post_init(void) -{ - /* need to update phys_pkg_id */ - apic->phys_pkg_id = apicid_phys_pkg_id; - apic->vector_allocation_domain = fill_vector_allocation_domain; -} - void __init vsmp_init(void) { detect_vsmp_box(); if (!is_vsmp_box()) return; - x86_platform.apic_post_init = vsmp_apic_post_init; - vsmp_cap_cpus(); set_vsmp_pv_ops(); diff --git a/trunk/arch/x86/kernel/vsyscall_64.c b/trunk/arch/x86/kernel/vsyscall_64.c index 8d141b309046..5db36caf4289 100644 --- a/trunk/arch/x86/kernel/vsyscall_64.c +++ b/trunk/arch/x86/kernel/vsyscall_64.c @@ -18,8 +18,6 @@ * use the vDSO. */ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include #include #include @@ -113,13 +111,18 @@ void update_vsyscall(struct timespec *wall_time, struct timespec *wtm, static void warn_bad_vsyscall(const char *level, struct pt_regs *regs, const char *message) { - if (!show_unhandled_signals) + static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); + struct task_struct *tsk; + + if (!show_unhandled_signals || !__ratelimit(&rs)) return; - pr_notice_ratelimited("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n", - level, current->comm, task_pid_nr(current), - message, regs->ip, regs->cs, - regs->sp, regs->ax, regs->si, regs->di); + tsk = current; + + printk("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n", + level, tsk->comm, task_pid_nr(tsk), + message, regs->ip, regs->cs, + regs->sp, regs->ax, regs->si, regs->di); } static int addr_to_vsyscall_nr(unsigned long addr) diff --git a/trunk/arch/x86/kernel/x8664_ksyms_64.c b/trunk/arch/x86/kernel/x8664_ksyms_64.c index 6020f6f5927c..9796c2f3d074 100644 --- a/trunk/arch/x86/kernel/x8664_ksyms_64.c +++ b/trunk/arch/x86/kernel/x8664_ksyms_64.c @@ -28,7 +28,6 @@ EXPORT_SYMBOL(__put_user_8); EXPORT_SYMBOL(copy_user_generic_string); EXPORT_SYMBOL(copy_user_generic_unrolled); -EXPORT_SYMBOL(copy_user_enhanced_fast_string); EXPORT_SYMBOL(__copy_user_nocache); EXPORT_SYMBOL(_copy_from_user); EXPORT_SYMBOL(_copy_to_user); diff --git a/trunk/arch/x86/kernel/x86_init.c b/trunk/arch/x86/kernel/x86_init.c index 9f3167e891ef..35c5e543f550 100644 --- a/trunk/arch/x86/kernel/x86_init.c +++ b/trunk/arch/x86/kernel/x86_init.c @@ -29,6 +29,7 @@ void __init x86_init_uint_noop(unsigned int unused) { } void __init x86_init_pgd_noop(pgd_t *unused) { } int __init iommu_init_noop(void) { return 0; } void iommu_shutdown_noop(void) { } +void wallclock_init_noop(void) { } /* * The platform setup functions are preset with the default functions @@ -100,6 +101,7 @@ static int default_i8042_detect(void) { return 1; }; struct x86_platform_ops x86_platform = { .calibrate_tsc = native_calibrate_tsc, + .wallclock_init = wallclock_init_noop, .get_wallclock = mach_get_cmos_time, .set_wallclock = mach_set_rtc_mmss, .iommu_shutdown = iommu_shutdown_noop, diff --git a/trunk/arch/x86/kernel/xsave.c b/trunk/arch/x86/kernel/xsave.c index 3d3e20709119..bd18149b2b0f 100644 --- a/trunk/arch/x86/kernel/xsave.c +++ b/trunk/arch/x86/kernel/xsave.c @@ -3,9 +3,6 @@ * * Author: Suresh Siddha */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include #include #include @@ -165,7 +162,7 @@ int save_i387_xstate(void __user *buf) BUG_ON(sig_xstate_size < xstate_size); if ((unsigned long)buf % 64) - pr_err("%s: bad fpstate %p\n", __func__, buf); + printk("save_i387_xstate: bad fpstate %p\n", buf); if (!used_math()) return 0; @@ -425,7 +422,7 @@ static void __init xstate_enable_boot_cpu(void) pcntxt_mask = eax + ((u64)edx << 32); if ((pcntxt_mask & XSTATE_FPSSE) != XSTATE_FPSSE) { - pr_err("FP/SSE not shown under xsave features 0x%llx\n", + printk(KERN_ERR "FP/SSE not shown under xsave features 0x%llx\n", pcntxt_mask); BUG(); } @@ -448,8 +445,9 @@ static void __init xstate_enable_boot_cpu(void) setup_xstate_init(); - pr_info("enabled xstate_bv 0x%llx, cntxt size 0x%x\n", - pcntxt_mask, xstate_size); + printk(KERN_INFO "xsave/xrstor: enabled xstate_bv 0x%llx, " + "cntxt size 0x%x\n", + pcntxt_mask, xstate_size); } /* diff --git a/trunk/arch/x86/kvm/pmu.c b/trunk/arch/x86/kvm/pmu.c index 9b7ec1150ab0..2e88438ffd83 100644 --- a/trunk/arch/x86/kvm/pmu.c +++ b/trunk/arch/x86/kvm/pmu.c @@ -80,10 +80,10 @@ static inline struct kvm_pmc *get_fixed_pmc_idx(struct kvm_pmu *pmu, int idx) static struct kvm_pmc *global_idx_to_pmc(struct kvm_pmu *pmu, int idx) { - if (idx < INTEL_PMC_IDX_FIXED) + if (idx < X86_PMC_IDX_FIXED) return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + idx, MSR_P6_EVNTSEL0); else - return get_fixed_pmc_idx(pmu, idx - INTEL_PMC_IDX_FIXED); + return get_fixed_pmc_idx(pmu, idx - X86_PMC_IDX_FIXED); } void kvm_deliver_pmi(struct kvm_vcpu *vcpu) @@ -291,7 +291,7 @@ static void reprogram_idx(struct kvm_pmu *pmu, int idx) if (pmc_is_gp(pmc)) reprogram_gp_counter(pmc, pmc->eventsel); else { - int fidx = idx - INTEL_PMC_IDX_FIXED; + int fidx = idx - X86_PMC_IDX_FIXED; reprogram_fixed_counter(pmc, fixed_en_pmi(pmu->fixed_ctr_ctrl, fidx), fidx); } @@ -452,7 +452,7 @@ void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu) return; pmu->nr_arch_gp_counters = min((int)(entry->eax >> 8) & 0xff, - INTEL_PMC_MAX_GENERIC); + X86_PMC_MAX_GENERIC); pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << ((entry->eax >> 16) & 0xff)) - 1; bitmap_len = (entry->eax >> 24) & 0xff; @@ -462,13 +462,13 @@ void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu) pmu->nr_arch_fixed_counters = 0; } else { pmu->nr_arch_fixed_counters = min((int)(entry->edx & 0x1f), - INTEL_PMC_MAX_FIXED); + X86_PMC_MAX_FIXED); pmu->counter_bitmask[KVM_PMC_FIXED] = ((u64)1 << ((entry->edx >> 5) & 0xff)) - 1; } pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) | - (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED); + (((1ull << pmu->nr_arch_fixed_counters) - 1) << X86_PMC_IDX_FIXED); pmu->global_ctrl_mask = ~pmu->global_ctrl; } @@ -478,15 +478,15 @@ void kvm_pmu_init(struct kvm_vcpu *vcpu) struct kvm_pmu *pmu = &vcpu->arch.pmu; memset(pmu, 0, sizeof(*pmu)); - for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) { + for (i = 0; i < X86_PMC_MAX_GENERIC; i++) { pmu->gp_counters[i].type = KVM_PMC_GP; pmu->gp_counters[i].vcpu = vcpu; pmu->gp_counters[i].idx = i; } - for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) { + for (i = 0; i < X86_PMC_MAX_FIXED; i++) { pmu->fixed_counters[i].type = KVM_PMC_FIXED; pmu->fixed_counters[i].vcpu = vcpu; - pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED; + pmu->fixed_counters[i].idx = i + X86_PMC_IDX_FIXED; } init_irq_work(&pmu->irq_work, trigger_pmi); kvm_pmu_cpuid_update(vcpu); @@ -498,13 +498,13 @@ void kvm_pmu_reset(struct kvm_vcpu *vcpu) int i; irq_work_sync(&pmu->irq_work); - for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) { + for (i = 0; i < X86_PMC_MAX_GENERIC; i++) { struct kvm_pmc *pmc = &pmu->gp_counters[i]; stop_counter(pmc); pmc->counter = pmc->eventsel = 0; } - for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) + for (i = 0; i < X86_PMC_MAX_FIXED; i++) stop_counter(&pmu->fixed_counters[i]); pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status = diff --git a/trunk/arch/x86/kvm/trace.h b/trunk/arch/x86/kvm/trace.h index 62d02e3c3ed6..911d2641f14c 100644 --- a/trunk/arch/x86/kvm/trace.h +++ b/trunk/arch/x86/kvm/trace.h @@ -710,6 +710,16 @@ TRACE_EVENT(kvm_skinit, __entry->rip, __entry->slb) ); +#define __print_insn(insn, ilen) ({ \ + int i; \ + const char *ret = p->buffer + p->len; \ + \ + for (i = 0; i < ilen; ++i) \ + trace_seq_printf(p, " %02x", insn[i]); \ + trace_seq_printf(p, "%c", 0); \ + ret; \ + }) + #define KVM_EMUL_INSN_F_CR0_PE (1 << 0) #define KVM_EMUL_INSN_F_EFL_VM (1 << 1) #define KVM_EMUL_INSN_F_CS_D (1 << 2) @@ -776,7 +786,7 @@ TRACE_EVENT(kvm_emulate_insn, TP_printk("%x:%llx:%s (%s)%s", __entry->csbase, __entry->rip, - __print_hex(__entry->insn, __entry->len), + __print_insn(__entry->insn, __entry->len), __print_symbolic(__entry->flags, kvm_trace_symbol_emul_flags), __entry->failed ? " failed" : "" diff --git a/trunk/arch/x86/lib/msr-reg-export.c b/trunk/arch/x86/lib/msr-reg-export.c index 8d6ef78b5d01..a311cc59b65d 100644 --- a/trunk/arch/x86/lib/msr-reg-export.c +++ b/trunk/arch/x86/lib/msr-reg-export.c @@ -1,5 +1,5 @@ #include #include -EXPORT_SYMBOL(rdmsr_safe_regs); -EXPORT_SYMBOL(wrmsr_safe_regs); +EXPORT_SYMBOL(native_rdmsr_safe_regs); +EXPORT_SYMBOL(native_wrmsr_safe_regs); diff --git a/trunk/arch/x86/lib/msr-reg.S b/trunk/arch/x86/lib/msr-reg.S index f6d13eefad10..69fa10623f21 100644 --- a/trunk/arch/x86/lib/msr-reg.S +++ b/trunk/arch/x86/lib/msr-reg.S @@ -6,13 +6,13 @@ #ifdef CONFIG_X86_64 /* - * int {rdmsr,wrmsr}_safe_regs(u32 gprs[8]); + * int native_{rdmsr,wrmsr}_safe_regs(u32 gprs[8]); * * reg layout: u32 gprs[eax, ecx, edx, ebx, esp, ebp, esi, edi] * */ .macro op_safe_regs op -ENTRY(\op\()_safe_regs) +ENTRY(native_\op\()_safe_regs) CFI_STARTPROC pushq_cfi %rbx pushq_cfi %rbp @@ -45,13 +45,13 @@ ENTRY(\op\()_safe_regs) _ASM_EXTABLE(1b, 3b) CFI_ENDPROC -ENDPROC(\op\()_safe_regs) +ENDPROC(native_\op\()_safe_regs) .endm #else /* X86_32 */ .macro op_safe_regs op -ENTRY(\op\()_safe_regs) +ENTRY(native_\op\()_safe_regs) CFI_STARTPROC pushl_cfi %ebx pushl_cfi %ebp @@ -92,7 +92,7 @@ ENTRY(\op\()_safe_regs) _ASM_EXTABLE(1b, 3b) CFI_ENDPROC -ENDPROC(\op\()_safe_regs) +ENDPROC(native_\op\()_safe_regs) .endm #endif diff --git a/trunk/arch/x86/mm/init.c b/trunk/arch/x86/mm/init.c index e0e6990723e9..bc4e9d84157f 100644 --- a/trunk/arch/x86/mm/init.c +++ b/trunk/arch/x86/mm/init.c @@ -385,7 +385,7 @@ void free_initmem(void) } #ifdef CONFIG_BLK_DEV_INITRD -void __init free_initrd_mem(unsigned long start, unsigned long end) +void free_initrd_mem(unsigned long start, unsigned long end) { /* * end could be not aligned, and We can not align that, diff --git a/trunk/arch/x86/oprofile/op_model_amd.c b/trunk/arch/x86/oprofile/op_model_amd.c index b2b94438ff05..303f08637826 100644 --- a/trunk/arch/x86/oprofile/op_model_amd.c +++ b/trunk/arch/x86/oprofile/op_model_amd.c @@ -312,7 +312,7 @@ static int op_amd_fill_in_addresses(struct op_msrs * const msrs) goto fail; } /* both registers must be reserved */ - if (num_counters == AMD64_NUM_COUNTERS_CORE) { + if (num_counters == AMD64_NUM_COUNTERS_F15H) { msrs->counters[i].addr = MSR_F15H_PERF_CTR + (i << 1); msrs->controls[i].addr = MSR_F15H_PERF_CTL + (i << 1); } else { @@ -514,7 +514,7 @@ static int op_amd_init(struct oprofile_operations *ops) ops->create_files = setup_ibs_files; if (boot_cpu_data.x86 == 0x15) { - num_counters = AMD64_NUM_COUNTERS_CORE; + num_counters = AMD64_NUM_COUNTERS_F15H; } else { num_counters = AMD64_NUM_COUNTERS; } diff --git a/trunk/arch/x86/platform/olpc/olpc-xo15-sci.c b/trunk/arch/x86/platform/olpc/olpc-xo15-sci.c index 599be499fdf7..23e5b9d7977b 100644 --- a/trunk/arch/x86/platform/olpc/olpc-xo15-sci.c +++ b/trunk/arch/x86/platform/olpc/olpc-xo15-sci.c @@ -203,7 +203,7 @@ static int xo15_sci_remove(struct acpi_device *device, int type) return 0; } -static int xo15_sci_resume(struct device *dev) +static int xo15_sci_resume(struct acpi_device *device) { /* Enable all EC events */ olpc_ec_mask_write(EC_SCI_SRC_ALL); @@ -215,8 +215,6 @@ static int xo15_sci_resume(struct device *dev) return 0; } -static SIMPLE_DEV_PM_OPS(xo15_sci_pm, NULL, xo15_sci_resume); - static const struct acpi_device_id xo15_sci_device_ids[] = { {"XO15EC", 0}, {"", 0}, @@ -229,8 +227,8 @@ static struct acpi_driver xo15_sci_drv = { .ops = { .add = xo15_sci_add, .remove = xo15_sci_remove, + .resume = xo15_sci_resume, }, - .drv.pm = &xo15_sci_pm, }; static int __init xo15_sci_init(void) diff --git a/trunk/arch/x86/platform/uv/tlb_uv.c b/trunk/arch/x86/platform/uv/tlb_uv.c index 71b5d5a07d7b..59880afa851f 100644 --- a/trunk/arch/x86/platform/uv/tlb_uv.c +++ b/trunk/arch/x86/platform/uv/tlb_uv.c @@ -1,7 +1,7 @@ /* * SGI UltraViolet TLB flush routines. * - * (c) 2008-2012 Cliff Wickman , SGI. + * (c) 2008-2011 Cliff Wickman , SGI. * * This code is released under the GNU General Public License version 2 or * later. @@ -38,7 +38,8 @@ static int timeout_base_ns[] = { static int timeout_us; static int nobau; -static int nobau_perm; +static int baudisabled; +static spinlock_t disable_lock; static cycles_t congested_cycles; /* tunables: */ @@ -46,13 +47,12 @@ static int max_concurr = MAX_BAU_CONCURRENT; static int max_concurr_const = MAX_BAU_CONCURRENT; static int plugged_delay = PLUGGED_DELAY; static int plugsb4reset = PLUGSB4RESET; -static int giveup_limit = GIVEUP_LIMIT; static int timeoutsb4reset = TIMEOUTSB4RESET; static int ipi_reset_limit = IPI_RESET_LIMIT; static int complete_threshold = COMPLETE_THRESHOLD; static int congested_respns_us = CONGESTED_RESPONSE_US; static int congested_reps = CONGESTED_REPS; -static int disabled_period = DISABLED_PERIOD; +static int congested_period = CONGESTED_PERIOD; static struct tunables tunables[] = { {&max_concurr, MAX_BAU_CONCURRENT}, /* must be [0] */ @@ -63,8 +63,7 @@ static struct tunables tunables[] = { {&complete_threshold, COMPLETE_THRESHOLD}, {&congested_respns_us, CONGESTED_RESPONSE_US}, {&congested_reps, CONGESTED_REPS}, - {&disabled_period, DISABLED_PERIOD}, - {&giveup_limit, GIVEUP_LIMIT} + {&congested_period, CONGESTED_PERIOD} }; static struct dentry *tunables_dir; @@ -121,40 +120,6 @@ static DEFINE_PER_CPU(struct ptc_stats, ptcstats); static DEFINE_PER_CPU(struct bau_control, bau_control); static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask); -static void -set_bau_on(void) -{ - int cpu; - struct bau_control *bcp; - - if (nobau_perm) { - pr_info("BAU not initialized; cannot be turned on\n"); - return; - } - nobau = 0; - for_each_present_cpu(cpu) { - bcp = &per_cpu(bau_control, cpu); - bcp->nobau = 0; - } - pr_info("BAU turned on\n"); - return; -} - -static void -set_bau_off(void) -{ - int cpu; - struct bau_control *bcp; - - nobau = 1; - for_each_present_cpu(cpu) { - bcp = &per_cpu(bau_control, cpu); - bcp->nobau = 1; - } - pr_info("BAU turned off\n"); - return; -} - /* * Determine the first node on a uvhub. 'Nodes' are used for kernel * memory allocation. @@ -313,7 +278,7 @@ static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp, * Both sockets dump their completed count total into * the message's count. */ - *sp = 0; + smaster->socket_acknowledge_count[mdp->msg_slot] = 0; asp = (struct atomic_short *)&msg->acknowledge_count; msg_ack_count = atom_asr(socket_ack_count, asp); @@ -526,15 +491,16 @@ static int uv1_wait_completion(struct bau_desc *bau_desc, } /* - * UV2 could have an extra bit of status in the ACTIVATION_STATUS_2 register. - * But not currently used. + * UV2 has an extra bit of status in the ACTIVATION_STATUS_2 register. */ static unsigned long uv2_read_status(unsigned long offset, int rshft, int desc) { unsigned long descriptor_status; + unsigned long descriptor_status2; - descriptor_status = - ((read_lmmr(offset) >> rshft) & UV_ACT_STATUS_MASK) << 1; + descriptor_status = ((read_lmmr(offset) >> rshft) & UV_ACT_STATUS_MASK); + descriptor_status2 = (read_mmr_uv2_status() >> desc) & 0x1UL; + descriptor_status = (descriptor_status << 1) | descriptor_status2; return descriptor_status; } @@ -565,11 +531,87 @@ int normal_busy(struct bau_control *bcp) */ int handle_uv2_busy(struct bau_control *bcp) { + int busy_one = bcp->using_desc; + int normal = bcp->uvhub_cpu; + int selected = -1; + int i; + unsigned long descriptor_status; + unsigned long status; + int mmr_offset; + struct bau_desc *bau_desc_old; + struct bau_desc *bau_desc_new; + struct bau_control *hmaster = bcp->uvhub_master; struct ptc_stats *stat = bcp->statp; + cycles_t ttm; stat->s_uv2_wars++; - bcp->busy = 1; - return FLUSH_GIVEUP; + spin_lock(&hmaster->uvhub_lock); + /* try for the original first */ + if (busy_one != normal) { + if (!normal_busy(bcp)) + selected = normal; + } + if (selected < 0) { + /* can't use the normal, select an alternate */ + mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1; + descriptor_status = read_lmmr(mmr_offset); + + /* scan available descriptors 32-63 */ + for (i = 0; i < UV_CPUS_PER_AS; i++) { + if ((hmaster->inuse_map & (1 << i)) == 0) { + status = ((descriptor_status >> + (i * UV_ACT_STATUS_SIZE)) & + UV_ACT_STATUS_MASK) << 1; + if (status != UV2H_DESC_BUSY) { + selected = i + UV_CPUS_PER_AS; + break; + } + } + } + } + + if (busy_one != normal) + /* mark the busy alternate as not in-use */ + hmaster->inuse_map &= ~(1 << (busy_one - UV_CPUS_PER_AS)); + + if (selected >= 0) { + /* switch to the selected descriptor */ + if (selected != normal) { + /* set the selected alternate as in-use */ + hmaster->inuse_map |= + (1 << (selected - UV_CPUS_PER_AS)); + if (selected > stat->s_uv2_wars_hw) + stat->s_uv2_wars_hw = selected; + } + bau_desc_old = bcp->descriptor_base; + bau_desc_old += (ITEMS_PER_DESC * busy_one); + bcp->using_desc = selected; + bau_desc_new = bcp->descriptor_base; + bau_desc_new += (ITEMS_PER_DESC * selected); + *bau_desc_new = *bau_desc_old; + } else { + /* + * All are busy. Wait for the normal one for this cpu to + * free up. + */ + stat->s_uv2_war_waits++; + spin_unlock(&hmaster->uvhub_lock); + ttm = get_cycles(); + do { + cpu_relax(); + } while (normal_busy(bcp)); + spin_lock(&hmaster->uvhub_lock); + /* switch to the original descriptor */ + bcp->using_desc = normal; + bau_desc_old = bcp->descriptor_base; + bau_desc_old += (ITEMS_PER_DESC * bcp->using_desc); + bcp->using_desc = (ITEMS_PER_DESC * normal); + bau_desc_new = bcp->descriptor_base; + bau_desc_new += (ITEMS_PER_DESC * normal); + *bau_desc_new = *bau_desc_old; /* copy the entire descriptor */ + } + spin_unlock(&hmaster->uvhub_lock); + return FLUSH_RETRY_BUSYBUG; } static int uv2_wait_completion(struct bau_desc *bau_desc, @@ -578,7 +620,7 @@ static int uv2_wait_completion(struct bau_desc *bau_desc, { unsigned long descriptor_stat; cycles_t ttm; - int desc = bcp->uvhub_cpu; + int desc = bcp->using_desc; long busy_reps = 0; struct ptc_stats *stat = bcp->statp; @@ -586,38 +628,24 @@ static int uv2_wait_completion(struct bau_desc *bau_desc, /* spin on the status MMR, waiting for it to go idle */ while (descriptor_stat != UV2H_DESC_IDLE) { - if ((descriptor_stat == UV2H_DESC_SOURCE_TIMEOUT)) { - /* - * A h/w bug on the destination side may - * have prevented the message being marked - * pending, thus it doesn't get replied to - * and gets continually nacked until it times - * out with a SOURCE_TIMEOUT. - */ + /* + * Our software ack messages may be blocked because + * there are no swack resources available. As long + * as none of them has timed out hardware will NACK + * our message and its state will stay IDLE. + */ + if ((descriptor_stat == UV2H_DESC_SOURCE_TIMEOUT) || + (descriptor_stat == UV2H_DESC_DEST_PUT_ERR)) { stat->s_stimeout++; return FLUSH_GIVEUP; + } else if (descriptor_stat == UV2H_DESC_DEST_STRONG_NACK) { + stat->s_strongnacks++; + bcp->conseccompletes = 0; + return FLUSH_GIVEUP; } else if (descriptor_stat == UV2H_DESC_DEST_TIMEOUT) { - ttm = get_cycles(); - - /* - * Our retries may be blocked by all destination - * swack resources being consumed, and a timeout - * pending. In that case hardware returns the - * ERROR that looks like a destination timeout. - * Without using the extended status we have to - * deduce from the short time that this was a - * strong nack. - */ - if (cycles_2_us(ttm - bcp->send_message) < timeout_us) { - bcp->conseccompletes = 0; - stat->s_plugged++; - /* FLUSH_RETRY_PLUGGED causes hang on boot */ - return FLUSH_GIVEUP; - } stat->s_dtimeout++; bcp->conseccompletes = 0; - /* FLUSH_RETRY_TIMEOUT causes hang on boot */ - return FLUSH_GIVEUP; + return FLUSH_RETRY_TIMEOUT; } else { busy_reps++; if (busy_reps > 1000000) { @@ -625,8 +653,9 @@ static int uv2_wait_completion(struct bau_desc *bau_desc, busy_reps = 0; ttm = get_cycles(); if ((ttm - bcp->send_message) > - bcp->timeout_interval) + (bcp->clocks_per_100_usec)) { return handle_uv2_busy(bcp); + } } /* * descriptor_stat is still BUSY @@ -650,7 +679,7 @@ static int wait_completion(struct bau_desc *bau_desc, { int right_shift; unsigned long mmr_offset; - int desc = bcp->uvhub_cpu; + int desc = bcp->using_desc; if (desc < UV_CPUS_PER_AS) { mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0; @@ -729,31 +758,33 @@ static void destination_timeout(struct bau_desc *bau_desc, } /* - * Stop all cpus on a uvhub from using the BAU for a period of time. - * This is reversed by check_enable. + * Completions are taking a very long time due to a congested numalink + * network. */ -static void disable_for_period(struct bau_control *bcp, struct ptc_stats *stat) +static void disable_for_congestion(struct bau_control *bcp, + struct ptc_stats *stat) { - int tcpu; - struct bau_control *tbcp; - struct bau_control *hmaster; - cycles_t tm1; - - hmaster = bcp->uvhub_master; - spin_lock(&hmaster->disable_lock); - if (!bcp->baudisabled) { + /* let only one cpu do this disabling */ + spin_lock(&disable_lock); + + if (!baudisabled && bcp->period_requests && + ((bcp->period_time / bcp->period_requests) > congested_cycles)) { + int tcpu; + struct bau_control *tbcp; + /* it becomes this cpu's job to turn on the use of the + BAU again */ + baudisabled = 1; + bcp->set_bau_off = 1; + bcp->set_bau_on_time = get_cycles(); + bcp->set_bau_on_time += sec_2_cycles(bcp->cong_period); stat->s_bau_disabled++; - tm1 = get_cycles(); for_each_present_cpu(tcpu) { tbcp = &per_cpu(bau_control, tcpu); - if (tbcp->uvhub_master == hmaster) { - tbcp->baudisabled = 1; - tbcp->set_bau_on_time = - tm1 + bcp->disabled_period; - } + tbcp->baudisabled = 1; } } - spin_unlock(&hmaster->disable_lock); + + spin_unlock(&disable_lock); } static void count_max_concurr(int stat, struct bau_control *bcp, @@ -784,30 +815,16 @@ static void record_send_stats(cycles_t time1, cycles_t time2, bcp->period_requests++; bcp->period_time += elapsed; if ((elapsed > congested_cycles) && - (bcp->period_requests > bcp->cong_reps) && - ((bcp->period_time / bcp->period_requests) > - congested_cycles)) { - stat->s_congested++; - disable_for_period(bcp, stat); - } + (bcp->period_requests > bcp->cong_reps)) + disable_for_congestion(bcp, stat); } } else stat->s_requestor--; if (completion_status == FLUSH_COMPLETE && try > 1) stat->s_retriesok++; - else if (completion_status == FLUSH_GIVEUP) { + else if (completion_status == FLUSH_GIVEUP) stat->s_giveup++; - if (get_cycles() > bcp->period_end) - bcp->period_giveups = 0; - bcp->period_giveups++; - if (bcp->period_giveups == 1) - bcp->period_end = get_cycles() + bcp->disabled_period; - if (bcp->period_giveups > bcp->giveup_limit) { - disable_for_period(bcp, stat); - stat->s_giveuplimit++; - } - } } /* @@ -851,8 +868,7 @@ static void handle_cmplt(int completion_status, struct bau_desc *bau_desc, * Returns 1 if it gives up entirely and the original cpu mask is to be * returned to the kernel. */ -int uv_flush_send_and_wait(struct cpumask *flush_mask, struct bau_control *bcp, - struct bau_desc *bau_desc) +int uv_flush_send_and_wait(struct cpumask *flush_mask, struct bau_control *bcp) { int seq_number = 0; int completion_stat = 0; @@ -865,23 +881,24 @@ int uv_flush_send_and_wait(struct cpumask *flush_mask, struct bau_control *bcp, struct bau_control *hmaster = bcp->uvhub_master; struct uv1_bau_msg_header *uv1_hdr = NULL; struct uv2_bau_msg_header *uv2_hdr = NULL; + struct bau_desc *bau_desc; - if (bcp->uvhub_version == 1) { - uv1 = 1; + if (bcp->uvhub_version == 1) uv1_throttle(hmaster, stat); - } while (hmaster->uvhub_quiesce) cpu_relax(); time1 = get_cycles(); - if (uv1) - uv1_hdr = &bau_desc->header.uv1_hdr; - else - uv2_hdr = &bau_desc->header.uv2_hdr; - do { - if (try == 0) { + bau_desc = bcp->descriptor_base; + bau_desc += (ITEMS_PER_DESC * bcp->using_desc); + if (bcp->uvhub_version == 1) { + uv1 = 1; + uv1_hdr = &bau_desc->header.uv1_hdr; + } else + uv2_hdr = &bau_desc->header.uv2_hdr; + if ((try == 0) || (completion_stat == FLUSH_RETRY_BUSYBUG)) { if (uv1) uv1_hdr->msg_type = MSG_REGULAR; else @@ -899,24 +916,25 @@ int uv_flush_send_and_wait(struct cpumask *flush_mask, struct bau_control *bcp, uv1_hdr->sequence = seq_number; else uv2_hdr->sequence = seq_number; - index = (1UL << AS_PUSH_SHIFT) | bcp->uvhub_cpu; + index = (1UL << AS_PUSH_SHIFT) | bcp->using_desc; bcp->send_message = get_cycles(); write_mmr_activation(index); try++; completion_stat = wait_completion(bau_desc, bcp, try); + /* UV2: wait_completion() may change the bcp->using_desc */ handle_cmplt(completion_stat, bau_desc, bcp, hmaster, stat); if (bcp->ipi_attempts >= bcp->ipi_reset_limit) { bcp->ipi_attempts = 0; - stat->s_overipilimit++; completion_stat = FLUSH_GIVEUP; break; } cpu_relax(); } while ((completion_stat == FLUSH_RETRY_PLUGGED) || + (completion_stat == FLUSH_RETRY_BUSYBUG) || (completion_stat == FLUSH_RETRY_TIMEOUT)); time2 = get_cycles(); @@ -937,33 +955,28 @@ int uv_flush_send_and_wait(struct cpumask *flush_mask, struct bau_control *bcp, } /* - * The BAU is disabled for this uvhub. When the disabled time period has - * expired re-enable it. - * Return 0 if it is re-enabled for all cpus on this uvhub. + * The BAU is disabled. When the disabled time period has expired, the cpu + * that disabled it must re-enable it. + * Return 0 if it is re-enabled for all cpus. */ static int check_enable(struct bau_control *bcp, struct ptc_stats *stat) { int tcpu; struct bau_control *tbcp; - struct bau_control *hmaster; - hmaster = bcp->uvhub_master; - spin_lock(&hmaster->disable_lock); - if (bcp->baudisabled && (get_cycles() >= bcp->set_bau_on_time)) { - stat->s_bau_reenabled++; - for_each_present_cpu(tcpu) { - tbcp = &per_cpu(bau_control, tcpu); - if (tbcp->uvhub_master == hmaster) { + if (bcp->set_bau_off) { + if (get_cycles() >= bcp->set_bau_on_time) { + stat->s_bau_reenabled++; + baudisabled = 0; + for_each_present_cpu(tcpu) { + tbcp = &per_cpu(bau_control, tcpu); tbcp->baudisabled = 0; tbcp->period_requests = 0; tbcp->period_time = 0; - tbcp->period_giveups = 0; } + return 0; } - spin_unlock(&hmaster->disable_lock); - return 0; } - spin_unlock(&hmaster->disable_lock); return -1; } @@ -1065,32 +1078,18 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, struct cpumask *flush_mask; struct ptc_stats *stat; struct bau_control *bcp; - unsigned long descriptor_status; - unsigned long status; - bcp = &per_cpu(bau_control, cpu); - stat = bcp->statp; - stat->s_enters++; - - if (bcp->nobau) + /* kernel was booted 'nobau' */ + if (nobau) return cpumask; - if (bcp->busy) { - descriptor_status = - read_lmmr(UVH_LB_BAU_SB_ACTIVATION_STATUS_0); - status = ((descriptor_status >> (bcp->uvhub_cpu * - UV_ACT_STATUS_SIZE)) & UV_ACT_STATUS_MASK) << 1; - if (status == UV2H_DESC_BUSY) - return cpumask; - bcp->busy = 0; - } + bcp = &per_cpu(bau_control, cpu); + stat = bcp->statp; /* bau was disabled due to slow response */ if (bcp->baudisabled) { - if (check_enable(bcp, stat)) { - stat->s_ipifordisabled++; + if (check_enable(bcp, stat)) return cpumask; - } } /* @@ -1106,7 +1105,7 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, stat->s_ntargself++; bau_desc = bcp->descriptor_base; - bau_desc += (ITEMS_PER_DESC * bcp->uvhub_cpu); + bau_desc += (ITEMS_PER_DESC * bcp->using_desc); bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE); if (set_distrib_bits(flush_mask, bcp, bau_desc, &locals, &remotes)) return NULL; @@ -1119,27 +1118,25 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, * uv_flush_send_and_wait returns 0 if all cpu's were messaged, * or 1 if it gave up and the original cpumask should be returned. */ - if (!uv_flush_send_and_wait(flush_mask, bcp, bau_desc)) + if (!uv_flush_send_and_wait(flush_mask, bcp)) return NULL; else return cpumask; } /* - * Search the message queue for any 'other' unprocessed message with the - * same software acknowledge resource bit vector as the 'msg' message. + * Search the message queue for any 'other' message with the same software + * acknowledge resource bit vector. */ struct bau_pq_entry *find_another_by_swack(struct bau_pq_entry *msg, - struct bau_control *bcp) + struct bau_control *bcp, unsigned char swack_vec) { struct bau_pq_entry *msg_next = msg + 1; - unsigned char swack_vec = msg->swack_vec; if (msg_next > bcp->queue_last) msg_next = bcp->queue_first; - while (msg_next != msg) { - if ((msg_next->canceled == 0) && (msg_next->replied_to == 0) && - (msg_next->swack_vec == swack_vec)) + while ((msg_next->swack_vec != 0) && (msg_next != msg)) { + if (msg_next->swack_vec == swack_vec) return msg_next; msg_next++; if (msg_next > bcp->queue_last) @@ -1168,30 +1165,32 @@ void process_uv2_message(struct msg_desc *mdp, struct bau_control *bcp) * This message was assigned a swack resource, but no * reserved acknowlegment is pending. * The bug has prevented this message from setting the MMR. + * And no other message has used the same sw_ack resource. + * Do the requested shootdown but do not reply to the msg. + * (the 0 means make no acknowledge) */ + bau_process_message(mdp, bcp, 0); + return; + } + + /* + * Some message has set the MMR 'pending' bit; it might have been + * another message. Look for that message. + */ + other_msg = find_another_by_swack(msg, bcp, msg->swack_vec); + if (other_msg) { + /* There is another. Do not ack the current one. */ + bau_process_message(mdp, bcp, 0); /* - * Some message has set the MMR 'pending' bit; it might have - * been another message. Look for that message. + * Let the natural processing of that message acknowledge + * it. Don't get the processing of sw_ack's out of order. */ - other_msg = find_another_by_swack(msg, bcp); - if (other_msg) { - /* - * There is another. Process this one but do not - * ack it. - */ - bau_process_message(mdp, bcp, 0); - /* - * Let the natural processing of that other message - * acknowledge it. Don't get the processing of sw_ack's - * out of order. - */ - return; - } + return; } /* - * Either the MMR shows this one pending a reply or there is no - * other message using this sw_ack, so it is safe to acknowledge it. + * There is no other message using this sw_ack, so it is safe to + * acknowledge it. */ bau_process_message(mdp, bcp, 1); @@ -1296,8 +1295,7 @@ static void __init enable_timeouts(void) */ mmr_image |= (1L << SOFTACK_MSHIFT); if (is_uv2_hub()) { - /* hw bug workaround; do not use extended status */ - mmr_image &= ~(1L << UV2_EXT_SHFT); + mmr_image |= (1L << UV2_EXT_SHFT); } write_mmr_misc_control(pnode, mmr_image); } @@ -1340,34 +1338,29 @@ static inline unsigned long long usec_2_cycles(unsigned long microsec) static int ptc_seq_show(struct seq_file *file, void *data) { struct ptc_stats *stat; - struct bau_control *bcp; int cpu; cpu = *(loff_t *)data; if (!cpu) { seq_printf(file, - "# cpu bauoff sent stime self locals remotes ncpus localhub "); + "# cpu sent stime self locals remotes ncpus localhub "); seq_printf(file, "remotehub numuvhubs numuvhubs16 numuvhubs8 "); seq_printf(file, - "numuvhubs4 numuvhubs2 numuvhubs1 dto snacks retries "); - seq_printf(file, - "rok resetp resett giveup sto bz throt disable "); + "numuvhubs4 numuvhubs2 numuvhubs1 dto snacks retries rok "); seq_printf(file, - "enable wars warshw warwaits enters ipidis plugged "); + "resetp resett giveup sto bz throt swack recv rtime "); seq_printf(file, - "ipiover glim cong swack recv rtime all one mult "); + "all one mult none retry canc nocan reset rcan "); seq_printf(file, - "none retry canc nocan reset rcan\n"); + "disable enable wars warshw warwaits\n"); } if (cpu < num_possible_cpus() && cpu_online(cpu)) { - bcp = &per_cpu(bau_control, cpu); - stat = bcp->statp; + stat = &per_cpu(ptcstats, cpu); /* source side statistics */ seq_printf(file, - "cpu %d %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ", - cpu, bcp->nobau, stat->s_requestor, - cycles_2_us(stat->s_time), + "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ", + cpu, stat->s_requestor, cycles_2_us(stat->s_time), stat->s_ntargself, stat->s_ntarglocals, stat->s_ntargremotes, stat->s_ntargcpu, stat->s_ntarglocaluvhub, stat->s_ntargremoteuvhub, @@ -1381,23 +1374,20 @@ static int ptc_seq_show(struct seq_file *file, void *data) stat->s_resets_plug, stat->s_resets_timeout, stat->s_giveup, stat->s_stimeout, stat->s_busy, stat->s_throttles); - seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ", - stat->s_bau_disabled, stat->s_bau_reenabled, - stat->s_uv2_wars, stat->s_uv2_wars_hw, - stat->s_uv2_war_waits, stat->s_enters, - stat->s_ipifordisabled, stat->s_plugged, - stat->s_overipilimit, stat->s_giveuplimit, - stat->s_congested); /* destination side statistics */ seq_printf(file, - "%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", + "%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ", read_gmmr_sw_ack(uv_cpu_to_pnode(cpu)), stat->d_requestee, cycles_2_us(stat->d_time), stat->d_alltlb, stat->d_onetlb, stat->d_multmsg, stat->d_nomsg, stat->d_retries, stat->d_canceled, stat->d_nocanceled, stat->d_resets, stat->d_rcanceled); + seq_printf(file, "%ld %ld %ld %ld %ld\n", + stat->s_bau_disabled, stat->s_bau_reenabled, + stat->s_uv2_wars, stat->s_uv2_wars_hw, + stat->s_uv2_war_waits); } return 0; } @@ -1411,14 +1401,13 @@ static ssize_t tunables_read(struct file *file, char __user *userbuf, char *buf; int ret; - buf = kasprintf(GFP_KERNEL, "%s %s %s\n%d %d %d %d %d %d %d %d %d %d\n", - "max_concur plugged_delay plugsb4reset timeoutsb4reset", - "ipi_reset_limit complete_threshold congested_response_us", - "congested_reps disabled_period giveup_limit", + buf = kasprintf(GFP_KERNEL, "%s %s %s\n%d %d %d %d %d %d %d %d %d\n", + "max_concur plugged_delay plugsb4reset", + "timeoutsb4reset ipi_reset_limit complete_threshold", + "congested_response_us congested_reps congested_period", max_concurr, plugged_delay, plugsb4reset, timeoutsb4reset, ipi_reset_limit, complete_threshold, - congested_respns_us, congested_reps, disabled_period, - giveup_limit); + congested_respns_us, congested_reps, congested_period); if (!buf) return -ENOMEM; @@ -1449,14 +1438,6 @@ static ssize_t ptc_proc_write(struct file *file, const char __user *user, return -EFAULT; optstr[count - 1] = '\0'; - if (!strcmp(optstr, "on")) { - set_bau_on(); - return count; - } else if (!strcmp(optstr, "off")) { - set_bau_off(); - return count; - } - if (strict_strtol(optstr, 10, &input_arg) < 0) { printk(KERN_DEBUG "%s is invalid\n", optstr); return -EINVAL; @@ -1589,8 +1570,7 @@ static ssize_t tunables_write(struct file *file, const char __user *user, bcp->complete_threshold = complete_threshold; bcp->cong_response_us = congested_respns_us; bcp->cong_reps = congested_reps; - bcp->disabled_period = sec_2_cycles(disabled_period); - bcp->giveup_limit = giveup_limit; + bcp->cong_period = congested_period; } return count; } @@ -1719,10 +1699,6 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode) * fairness chaining multilevel count replied_to */ } else { - /* - * BIOS uses legacy mode, but UV2 hardware always - * uses native mode for selective broadcasts. - */ uv2_hdr = &bd2->header.uv2_hdr; uv2_hdr->swack_flag = 1; uv2_hdr->base_dest_nasid = @@ -1835,8 +1811,8 @@ static int calculate_destination_timeout(void) index = (mmr_image >> BAU_URGENCY_7_SHIFT) & BAU_URGENCY_7_MASK; mmr_image = uv_read_local_mmr(UVH_TRANSACTION_TIMEOUT); mult2 = (mmr_image >> BAU_TRANS_SHIFT) & BAU_TRANS_MASK; - ts_ns = timeout_base_ns[index]; - ts_ns *= (mult1 * mult2); + base = timeout_base_ns[index]; + ts_ns = base * mult1 * mult2; ret = ts_ns / 1000; } else { /* 4 bits 0/1 for 10/80us base, 3 bits of multiplier */ @@ -1860,8 +1836,6 @@ static void __init init_per_cpu_tunables(void) for_each_present_cpu(cpu) { bcp = &per_cpu(bau_control, cpu); bcp->baudisabled = 0; - if (nobau) - bcp->nobau = 1; bcp->statp = &per_cpu(ptcstats, cpu); /* time interval to catch a hardware stay-busy bug */ bcp->timeout_interval = usec_2_cycles(2*timeout_us); @@ -1874,11 +1848,10 @@ static void __init init_per_cpu_tunables(void) bcp->complete_threshold = complete_threshold; bcp->cong_response_us = congested_respns_us; bcp->cong_reps = congested_reps; - bcp->disabled_period = sec_2_cycles(disabled_period); - bcp->giveup_limit = giveup_limit; + bcp->cong_period = congested_period; + bcp->clocks_per_100_usec = usec_2_cycles(100); spin_lock_init(&bcp->queue_lock); spin_lock_init(&bcp->uvhub_lock); - spin_lock_init(&bcp->disable_lock); } } @@ -1999,6 +1972,7 @@ static int scan_sock(struct socket_desc *sdp, struct uvhub_desc *bdp, } bcp->uvhub_master = *hmasterp; bcp->uvhub_cpu = uv_cpu_hub_info(cpu)->blade_processor_id; + bcp->using_desc = bcp->uvhub_cpu; if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) { printk(KERN_EMERG "%d cpus per uvhub invalid\n", bcp->uvhub_cpu); @@ -2095,12 +2069,16 @@ static int __init uv_bau_init(void) if (!is_uv_system()) return 0; + if (nobau) + return 0; + for_each_possible_cpu(cur_cpu) { mask = &per_cpu(uv_flush_tlb_mask, cur_cpu); zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cur_cpu)); } nuvhubs = uv_num_possible_blades(); + spin_lock_init(&disable_lock); congested_cycles = usec_2_cycles(congested_respns_us); uv_base_pnode = 0x7fffffff; @@ -2113,8 +2091,7 @@ static int __init uv_bau_init(void) enable_timeouts(); if (init_per_cpu(nuvhubs, uv_base_pnode)) { - set_bau_off(); - nobau_perm = 1; + nobau = 1; return 0; } diff --git a/trunk/arch/x86/platform/uv/uv_irq.c b/trunk/arch/x86/platform/uv/uv_irq.c index acf7752da952..f25c2765a5c9 100644 --- a/trunk/arch/x86/platform/uv/uv_irq.c +++ b/trunk/arch/x86/platform/uv/uv_irq.c @@ -135,7 +135,6 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, unsigned long mmr_value; struct uv_IO_APIC_route_entry *entry; int mmr_pnode, err; - unsigned int dest; BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long)); @@ -144,10 +143,6 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, if (err != 0) return err; - err = apic->cpu_mask_to_apicid_and(eligible_cpu, eligible_cpu, &dest); - if (err != 0) - return err; - if (limit == UV_AFFINITY_CPU) irq_set_status_flags(irq, IRQ_NO_BALANCING); else @@ -164,7 +159,7 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, entry->polarity = 0; entry->trigger = 0; entry->mask = 0; - entry->dest = dest; + entry->dest = apic->cpu_mask_to_apicid(eligible_cpu); mmr_pnode = uv_blade_to_pnode(mmr_blade); uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); @@ -227,7 +222,7 @@ uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask, if (cfg->move_in_progress) send_cleanup_vector(cfg); - return IRQ_SET_MASK_OK_NOCOPY; + return 0; } /* diff --git a/trunk/arch/x86/realmode/rm/Makefile b/trunk/arch/x86/realmode/rm/Makefile index b2d534cab25f..5b84a2d30888 100644 --- a/trunk/arch/x86/realmode/rm/Makefile +++ b/trunk/arch/x86/realmode/rm/Makefile @@ -22,7 +22,7 @@ wakeup-objs += video-bios.o realmode-y += header.o realmode-y += trampoline_$(BITS).o realmode-y += stack.o -realmode-y += reboot.o +realmode-$(CONFIG_X86_32) += reboot_32.o realmode-$(CONFIG_ACPI_SLEEP) += $(wakeup-objs) targets += $(realmode-y) diff --git a/trunk/arch/x86/realmode/rm/header.S b/trunk/arch/x86/realmode/rm/header.S index a28221d94e69..fadf48378ada 100644 --- a/trunk/arch/x86/realmode/rm/header.S +++ b/trunk/arch/x86/realmode/rm/header.S @@ -6,7 +6,6 @@ #include #include -#include #include "realmode.h" @@ -29,9 +28,8 @@ GLOBAL(real_mode_header) .long pa_wakeup_header #endif /* APM/BIOS reboot */ +#ifdef CONFIG_X86_32 .long pa_machine_real_restart_asm -#ifdef CONFIG_X86_64 - .long __KERNEL32_CS #endif END(real_mode_header) diff --git a/trunk/arch/x86/realmode/rm/reboot.S b/trunk/arch/x86/realmode/rm/reboot_32.S similarity index 85% rename from trunk/arch/x86/realmode/rm/reboot.S rename to trunk/arch/x86/realmode/rm/reboot_32.S index f932ea61d1c8..114044876b3d 100644 --- a/trunk/arch/x86/realmode/rm/reboot.S +++ b/trunk/arch/x86/realmode/rm/reboot_32.S @@ -2,8 +2,6 @@ #include #include #include -#include -#include #include "realmode.h" /* @@ -14,35 +12,13 @@ * doesn't work with at least one type of 486 motherboard. It is easy * to stop this code working; hence the copious comments. * - * This code is called with the restart type (0 = BIOS, 1 = APM) in - * the primary argument register (%eax for 32 bit, %edi for 64 bit). + * This code is called with the restart type (0 = BIOS, 1 = APM) in %eax. */ .section ".text32", "ax" .code32 -ENTRY(machine_real_restart_asm) - -#ifdef CONFIG_X86_64 - /* Switch to trampoline GDT as it is guaranteed < 4 GiB */ - movl $__KERNEL_DS, %eax - movl %eax, %ds - lgdtl pa_tr_gdt - - /* Disable paging to drop us out of long mode */ - movl %cr0, %eax - andl $~X86_CR0_PG, %eax - movl %eax, %cr0 - ljmpl $__KERNEL32_CS, $pa_machine_real_restart_paging_off -GLOBAL(machine_real_restart_paging_off) - xorl %eax, %eax - xorl %edx, %edx - movl $MSR_EFER, %ecx - wrmsr - - movl %edi, %eax - -#endif /* CONFIG_X86_64 */ - + .balign 16 +ENTRY(machine_real_restart_asm) /* Set up the IDT for real mode. */ lidtl pa_machine_real_restart_idt diff --git a/trunk/arch/x86/vdso/vdso32-setup.c b/trunk/arch/x86/vdso/vdso32-setup.c index 0faad646f5fd..66e6d9359826 100644 --- a/trunk/arch/x86/vdso/vdso32-setup.c +++ b/trunk/arch/x86/vdso/vdso32-setup.c @@ -205,9 +205,9 @@ void syscall32_cpu_init(void) { /* Load these always in case some future AMD CPU supports SYSENTER from compat mode too. */ - wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS); - wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL); - wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target); + checking_wrmsrl(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS); + checking_wrmsrl(MSR_IA32_SYSENTER_ESP, 0ULL); + checking_wrmsrl(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target); wrmsrl(MSR_CSTAR, ia32_cstar_target); } diff --git a/trunk/arch/x86/xen/enlighten.c b/trunk/arch/x86/xen/enlighten.c index ed7d54985d0c..ff962d4b821e 100644 --- a/trunk/arch/x86/xen/enlighten.c +++ b/trunk/arch/x86/xen/enlighten.c @@ -1124,7 +1124,9 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = { .wbinvd = native_wbinvd, .read_msr = native_read_msr_safe, + .rdmsr_regs = native_rdmsr_safe_regs, .write_msr = xen_write_msr_safe, + .wrmsr_regs = native_wrmsr_safe_regs, .read_tsc = native_read_tsc, .read_pmc = native_read_pmc, diff --git a/trunk/arch/x86/xen/smp.c b/trunk/arch/x86/xen/smp.c index f58dca7a6e52..afb250d22a6b 100644 --- a/trunk/arch/x86/xen/smp.c +++ b/trunk/arch/x86/xen/smp.c @@ -80,7 +80,9 @@ static void __cpuinit cpu_bringup(void) notify_cpu_starting(cpu); + ipi_call_lock(); set_cpu_online(cpu, true); + ipi_call_unlock(); this_cpu_write(cpu_state, CPU_ONLINE); diff --git a/trunk/drivers/acpi/ac.c b/trunk/drivers/acpi/ac.c index ff9f6bd48301..6512b20aeccd 100644 --- a/trunk/drivers/acpi/ac.c +++ b/trunk/drivers/acpi/ac.c @@ -61,6 +61,7 @@ static int acpi_ac_open_fs(struct inode *inode, struct file *file); static int acpi_ac_add(struct acpi_device *device); static int acpi_ac_remove(struct acpi_device *device, int type); +static int acpi_ac_resume(struct acpi_device *device); static void acpi_ac_notify(struct acpi_device *device, u32 event); static const struct acpi_device_id ac_device_ids[] = { @@ -69,9 +70,6 @@ static const struct acpi_device_id ac_device_ids[] = { }; MODULE_DEVICE_TABLE(acpi, ac_device_ids); -static int acpi_ac_resume(struct device *dev); -static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume); - static struct acpi_driver acpi_ac_driver = { .name = "ac", .class = ACPI_AC_CLASS, @@ -80,9 +78,9 @@ static struct acpi_driver acpi_ac_driver = { .ops = { .add = acpi_ac_add, .remove = acpi_ac_remove, + .resume = acpi_ac_resume, .notify = acpi_ac_notify, }, - .drv.pm = &acpi_ac_pm, }; struct acpi_ac { @@ -311,18 +309,13 @@ static int acpi_ac_add(struct acpi_device *device) return result; } -static int acpi_ac_resume(struct device *dev) +static int acpi_ac_resume(struct acpi_device *device) { struct acpi_ac *ac; unsigned old_state; - - if (!dev) - return -EINVAL; - - ac = acpi_driver_data(to_acpi_device(dev)); - if (!ac) + if (!device || !acpi_driver_data(device)) return -EINVAL; - + ac = acpi_driver_data(device); old_state = ac->state; if (acpi_ac_get_state(ac)) return 0; diff --git a/trunk/drivers/acpi/battery.c b/trunk/drivers/acpi/battery.c index 023f9c8534d0..7dd3f9fb9f3f 100644 --- a/trunk/drivers/acpi/battery.c +++ b/trunk/drivers/acpi/battery.c @@ -1044,24 +1044,17 @@ static int acpi_battery_remove(struct acpi_device *device, int type) } /* this is needed to learn about changes made in suspended state */ -static int acpi_battery_resume(struct device *dev) +static int acpi_battery_resume(struct acpi_device *device) { struct acpi_battery *battery; - - if (!dev) - return -EINVAL; - - battery = acpi_driver_data(to_acpi_device(dev)); - if (!battery) + if (!device) return -EINVAL; - + battery = acpi_driver_data(device); battery->update_time = 0; acpi_battery_update(battery); return 0; } -static SIMPLE_DEV_PM_OPS(acpi_battery_pm, NULL, acpi_battery_resume); - static struct acpi_driver acpi_battery_driver = { .name = "battery", .class = ACPI_BATTERY_CLASS, @@ -1069,10 +1062,10 @@ static struct acpi_driver acpi_battery_driver = { .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS, .ops = { .add = acpi_battery_add, + .resume = acpi_battery_resume, .remove = acpi_battery_remove, .notify = acpi_battery_notify, }, - .drv.pm = &acpi_battery_pm, }; static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie) diff --git a/trunk/drivers/acpi/button.c b/trunk/drivers/acpi/button.c index 79d4c22f7a6d..d27d072472f9 100644 --- a/trunk/drivers/acpi/button.c +++ b/trunk/drivers/acpi/button.c @@ -76,21 +76,19 @@ MODULE_DEVICE_TABLE(acpi, button_device_ids); static int acpi_button_add(struct acpi_device *device); static int acpi_button_remove(struct acpi_device *device, int type); +static int acpi_button_resume(struct acpi_device *device); static void acpi_button_notify(struct acpi_device *device, u32 event); -static int acpi_button_resume(struct device *dev); -static SIMPLE_DEV_PM_OPS(acpi_button_pm, NULL, acpi_button_resume); - static struct acpi_driver acpi_button_driver = { .name = "button", .class = ACPI_BUTTON_CLASS, .ids = button_device_ids, .ops = { .add = acpi_button_add, + .resume = acpi_button_resume, .remove = acpi_button_remove, .notify = acpi_button_notify, }, - .drv.pm = &acpi_button_pm, }; struct acpi_button { @@ -310,9 +308,8 @@ static void acpi_button_notify(struct acpi_device *device, u32 event) } } -static int acpi_button_resume(struct device *dev) +static int acpi_button_resume(struct acpi_device *device) { - struct acpi_device *device = to_acpi_device(dev); struct acpi_button *button = acpi_driver_data(device); if (button->type == ACPI_BUTTON_TYPE_LID) diff --git a/trunk/drivers/acpi/fan.c b/trunk/drivers/acpi/fan.c index 669d9ee80d16..0f0356ca1a9e 100644 --- a/trunk/drivers/acpi/fan.c +++ b/trunk/drivers/acpi/fan.c @@ -46,6 +46,8 @@ MODULE_LICENSE("GPL"); static int acpi_fan_add(struct acpi_device *device); static int acpi_fan_remove(struct acpi_device *device, int type); +static int acpi_fan_suspend(struct acpi_device *device, pm_message_t state); +static int acpi_fan_resume(struct acpi_device *device); static const struct acpi_device_id fan_device_ids[] = { {"PNP0C0B", 0}, @@ -53,10 +55,6 @@ static const struct acpi_device_id fan_device_ids[] = { }; MODULE_DEVICE_TABLE(acpi, fan_device_ids); -static int acpi_fan_suspend(struct device *dev); -static int acpi_fan_resume(struct device *dev); -static SIMPLE_DEV_PM_OPS(acpi_fan_pm, acpi_fan_suspend, acpi_fan_resume); - static struct acpi_driver acpi_fan_driver = { .name = "fan", .class = ACPI_FAN_CLASS, @@ -64,8 +62,9 @@ static struct acpi_driver acpi_fan_driver = { .ops = { .add = acpi_fan_add, .remove = acpi_fan_remove, + .suspend = acpi_fan_suspend, + .resume = acpi_fan_resume, }, - .drv.pm = &acpi_fan_pm, }; /* thermal cooling device callbacks */ @@ -184,24 +183,24 @@ static int acpi_fan_remove(struct acpi_device *device, int type) return 0; } -static int acpi_fan_suspend(struct device *dev) +static int acpi_fan_suspend(struct acpi_device *device, pm_message_t state) { - if (!dev) + if (!device) return -EINVAL; - acpi_bus_set_power(to_acpi_device(dev)->handle, ACPI_STATE_D0); + acpi_bus_set_power(device->handle, ACPI_STATE_D0); return AE_OK; } -static int acpi_fan_resume(struct device *dev) +static int acpi_fan_resume(struct acpi_device *device) { int result; - if (!dev) + if (!device) return -EINVAL; - result = acpi_bus_update_power(to_acpi_device(dev)->handle, NULL); + result = acpi_bus_update_power(device->handle, NULL); if (result) printk(KERN_ERR PREFIX "Error updating fan power state\n"); diff --git a/trunk/drivers/acpi/power.c b/trunk/drivers/acpi/power.c index 894d45c6bc67..dd6d6a3c6780 100644 --- a/trunk/drivers/acpi/power.c +++ b/trunk/drivers/acpi/power.c @@ -60,6 +60,7 @@ ACPI_MODULE_NAME("power"); static int acpi_power_add(struct acpi_device *device); static int acpi_power_remove(struct acpi_device *device, int type); +static int acpi_power_resume(struct acpi_device *device); static const struct acpi_device_id power_device_ids[] = { {ACPI_POWER_HID, 0}, @@ -67,9 +68,6 @@ static const struct acpi_device_id power_device_ids[] = { }; MODULE_DEVICE_TABLE(acpi, power_device_ids); -static int acpi_power_resume(struct device *dev); -static SIMPLE_DEV_PM_OPS(acpi_power_pm, NULL, acpi_power_resume); - static struct acpi_driver acpi_power_driver = { .name = "power", .class = ACPI_POWER_CLASS, @@ -77,8 +75,8 @@ static struct acpi_driver acpi_power_driver = { .ops = { .add = acpi_power_add, .remove = acpi_power_remove, + .resume = acpi_power_resume, }, - .drv.pm = &acpi_power_pm, }; /* @@ -773,16 +771,14 @@ static int acpi_power_remove(struct acpi_device *device, int type) return 0; } -static int acpi_power_resume(struct device *dev) +static int acpi_power_resume(struct acpi_device *device) { int result = 0, state; - struct acpi_device *device; struct acpi_power_resource *resource; - if (!dev) + if (!device) return -EINVAL; - device = to_acpi_device(dev); resource = acpi_driver_data(device); if (!resource) return -EINVAL; diff --git a/trunk/drivers/acpi/processor_driver.c b/trunk/drivers/acpi/processor_driver.c index 7048b97853e0..0734086537b8 100644 --- a/trunk/drivers/acpi/processor_driver.c +++ b/trunk/drivers/acpi/processor_driver.c @@ -93,9 +93,6 @@ static const struct acpi_device_id processor_device_ids[] = { }; MODULE_DEVICE_TABLE(acpi, processor_device_ids); -static SIMPLE_DEV_PM_OPS(acpi_processor_pm, - acpi_processor_suspend, acpi_processor_resume); - static struct acpi_driver acpi_processor_driver = { .name = "processor", .class = ACPI_PROCESSOR_CLASS, @@ -103,9 +100,10 @@ static struct acpi_driver acpi_processor_driver = { .ops = { .add = acpi_processor_add, .remove = acpi_processor_remove, + .suspend = acpi_processor_suspend, + .resume = acpi_processor_resume, .notify = acpi_processor_notify, }, - .drv.pm = &acpi_processor_pm, }; #define INSTALL_NOTIFY_HANDLER 1 @@ -429,11 +427,18 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb, * Initialize missing things */ if (pr->flags.need_hotplug_init) { + struct cpuidle_driver *idle_driver = + cpuidle_get_driver(); + printk(KERN_INFO "Will online and init hotplugged " "CPU: %d\n", pr->id); WARN(acpi_processor_start(pr), "Failed to start CPU:" " %d\n", pr->id); pr->flags.need_hotplug_init = 0; + if (idle_driver && !strcmp(idle_driver->name, + "intel_idle")) { + intel_idle_cpu_init(pr->id); + } /* Normal CPU soft online event */ } else { acpi_processor_ppc_has_changed(pr, 0); diff --git a/trunk/drivers/acpi/processor_idle.c b/trunk/drivers/acpi/processor_idle.c index e589c1985248..47a8caa89dbe 100644 --- a/trunk/drivers/acpi/processor_idle.c +++ b/trunk/drivers/acpi/processor_idle.c @@ -221,6 +221,10 @@ static void lapic_timer_state_broadcast(struct acpi_processor *pr, #endif +/* + * Suspend / resume control + */ +static int acpi_idle_suspend; static u32 saved_bm_rld; static void acpi_idle_bm_rld_save(void) @@ -237,15 +241,23 @@ static void acpi_idle_bm_rld_restore(void) acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld); } -int acpi_processor_suspend(struct device *dev) +int acpi_processor_suspend(struct acpi_device * device, pm_message_t state) { + if (acpi_idle_suspend == 1) + return 0; + acpi_idle_bm_rld_save(); + acpi_idle_suspend = 1; return 0; } -int acpi_processor_resume(struct device *dev) +int acpi_processor_resume(struct acpi_device * device) { + if (acpi_idle_suspend == 0) + return 0; + acpi_idle_bm_rld_restore(); + acpi_idle_suspend = 0; return 0; } @@ -583,6 +595,7 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr, */ cx->valid = 1; + cx->latency_ticks = cx->latency; /* * On older chipsets, BM_RLD needs to be set * in order for Bus Master activity to wake the @@ -615,6 +628,7 @@ static int acpi_processor_power_verify(struct acpi_processor *pr) if (!cx->address) break; cx->valid = 1; + cx->latency_ticks = cx->latency; /* Normalize latency */ break; case ACPI_STATE_C3: @@ -749,6 +763,11 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev, local_irq_disable(); + if (acpi_idle_suspend) { + local_irq_enable(); + cpu_relax(); + return -EBUSY; + } lapic_timer_state_broadcast(pr, cx, 1); kt1 = ktime_get_real(); @@ -760,6 +779,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev, dev->last_residency = (int)idle_time; local_irq_enable(); + cx->usage++; lapic_timer_state_broadcast(pr, cx, 0); return index; @@ -818,6 +838,11 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, local_irq_disable(); + if (acpi_idle_suspend) { + local_irq_enable(); + cpu_relax(); + return -EBUSY; + } if (cx->entry_method != ACPI_CSTATE_FFH) { current_thread_info()->status &= ~TS_POLLING; @@ -862,7 +887,10 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, if (cx->entry_method != ACPI_CSTATE_FFH) current_thread_info()->status |= TS_POLLING; + cx->usage++; + lapic_timer_state_broadcast(pr, cx, 0); + cx->time += idle_time; return index; } @@ -900,7 +928,8 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, drv, drv->safe_state_index); } else { local_irq_disable(); - acpi_safe_halt(); + if (!acpi_idle_suspend) + acpi_safe_halt(); local_irq_enable(); return -EBUSY; } @@ -908,6 +937,11 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, local_irq_disable(); + if (acpi_idle_suspend) { + local_irq_enable(); + cpu_relax(); + return -EBUSY; + } if (cx->entry_method != ACPI_CSTATE_FFH) { current_thread_info()->status &= ~TS_POLLING; @@ -980,7 +1014,10 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, if (cx->entry_method != ACPI_CSTATE_FFH) current_thread_info()->status |= TS_POLLING; + cx->usage++; + lapic_timer_state_broadcast(pr, cx, 0); + cx->time += idle_time; return index; } diff --git a/trunk/drivers/acpi/sbs.c b/trunk/drivers/acpi/sbs.c index c0b9aa5faf4c..6e36d0c0057c 100644 --- a/trunk/drivers/acpi/sbs.c +++ b/trunk/drivers/acpi/sbs.c @@ -988,18 +988,16 @@ static void acpi_sbs_rmdirs(void) #endif } -static int acpi_sbs_resume(struct device *dev) +static int acpi_sbs_resume(struct acpi_device *device) { struct acpi_sbs *sbs; - if (!dev) + if (!device) return -EINVAL; - sbs = to_acpi_device(dev)->driver_data; + sbs = device->driver_data; acpi_sbs_callback(sbs); return 0; } -static SIMPLE_DEV_PM_OPS(acpi_sbs_pm, NULL, acpi_sbs_resume); - static struct acpi_driver acpi_sbs_driver = { .name = "sbs", .class = ACPI_SBS_CLASS, @@ -1007,8 +1005,8 @@ static struct acpi_driver acpi_sbs_driver = { .ops = { .add = acpi_sbs_add, .remove = acpi_sbs_remove, + .resume = acpi_sbs_resume, }, - .drv.pm = &acpi_sbs_pm, }; static int __init acpi_sbs_init(void) diff --git a/trunk/drivers/acpi/scan.c b/trunk/drivers/acpi/scan.c index fdda49336560..c8a1f3b68110 100644 --- a/trunk/drivers/acpi/scan.c +++ b/trunk/drivers/acpi/scan.c @@ -290,6 +290,26 @@ static void acpi_device_release(struct device *dev) kfree(acpi_dev); } +static int acpi_device_suspend(struct device *dev, pm_message_t state) +{ + struct acpi_device *acpi_dev = to_acpi_device(dev); + struct acpi_driver *acpi_drv = acpi_dev->driver; + + if (acpi_drv && acpi_drv->ops.suspend) + return acpi_drv->ops.suspend(acpi_dev, state); + return 0; +} + +static int acpi_device_resume(struct device *dev) +{ + struct acpi_device *acpi_dev = to_acpi_device(dev); + struct acpi_driver *acpi_drv = acpi_dev->driver; + + if (acpi_drv && acpi_drv->ops.resume) + return acpi_drv->ops.resume(acpi_dev); + return 0; +} + static int acpi_bus_match(struct device *dev, struct device_driver *drv) { struct acpi_device *acpi_dev = to_acpi_device(dev); @@ -421,6 +441,8 @@ static int acpi_device_remove(struct device * dev) struct bus_type acpi_bus_type = { .name = "acpi", + .suspend = acpi_device_suspend, + .resume = acpi_device_resume, .match = acpi_bus_match, .probe = acpi_device_probe, .remove = acpi_device_remove, diff --git a/trunk/drivers/acpi/thermal.c b/trunk/drivers/acpi/thermal.c index 21dd4c268aef..7dbebea1ec31 100644 --- a/trunk/drivers/acpi/thermal.c +++ b/trunk/drivers/acpi/thermal.c @@ -98,6 +98,7 @@ MODULE_PARM_DESC(psv, "Disable or override all passive trip points."); static int acpi_thermal_add(struct acpi_device *device); static int acpi_thermal_remove(struct acpi_device *device, int type); +static int acpi_thermal_resume(struct acpi_device *device); static void acpi_thermal_notify(struct acpi_device *device, u32 event); static const struct acpi_device_id thermal_device_ids[] = { @@ -106,9 +107,6 @@ static const struct acpi_device_id thermal_device_ids[] = { }; MODULE_DEVICE_TABLE(acpi, thermal_device_ids); -static int acpi_thermal_resume(struct device *dev); -static SIMPLE_DEV_PM_OPS(acpi_thermal_pm, NULL, acpi_thermal_resume); - static struct acpi_driver acpi_thermal_driver = { .name = "thermal", .class = ACPI_THERMAL_CLASS, @@ -116,9 +114,9 @@ static struct acpi_driver acpi_thermal_driver = { .ops = { .add = acpi_thermal_add, .remove = acpi_thermal_remove, + .resume = acpi_thermal_resume, .notify = acpi_thermal_notify, }, - .drv.pm = &acpi_thermal_pm, }; struct acpi_thermal_state { @@ -1043,18 +1041,17 @@ static int acpi_thermal_remove(struct acpi_device *device, int type) return 0; } -static int acpi_thermal_resume(struct device *dev) +static int acpi_thermal_resume(struct acpi_device *device) { - struct acpi_thermal *tz; + struct acpi_thermal *tz = NULL; int i, j, power_state, result; - if (!dev) - return -EINVAL; - tz = acpi_driver_data(to_acpi_device(dev)); - if (!tz) + if (!device || !acpi_driver_data(device)) return -EINVAL; + tz = acpi_driver_data(device); + for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) { if (!(&tz->trips.active[i])) break; diff --git a/trunk/drivers/base/dd.c b/trunk/drivers/base/dd.c index 4b01ab3d2c24..dcb8a6e48692 100644 --- a/trunk/drivers/base/dd.c +++ b/trunk/drivers/base/dd.c @@ -24,7 +24,6 @@ #include #include #include -#include #include "base.h" #include "power/power.h" @@ -333,7 +332,6 @@ void wait_for_device_probe(void) /* wait for the known devices to complete their probing */ wait_event(probe_waitqueue, atomic_read(&probe_count) == 0); async_synchronize_full(); - scsi_complete_async_scans(); } EXPORT_SYMBOL_GPL(wait_for_device_probe); diff --git a/trunk/drivers/base/devtmpfs.c b/trunk/drivers/base/devtmpfs.c index d91a3a0b2325..765c3a28077a 100644 --- a/trunk/drivers/base/devtmpfs.c +++ b/trunk/drivers/base/devtmpfs.c @@ -227,24 +227,33 @@ static int handle_create(const char *nodename, umode_t mode, struct device *dev) static int dev_rmdir(const char *name) { - struct path parent; + struct nameidata nd; struct dentry *dentry; int err; - dentry = kern_path_locked(name, &parent); - if (IS_ERR(dentry)) - return PTR_ERR(dentry); - if (dentry->d_inode) { - if (dentry->d_inode->i_private == &thread) - err = vfs_rmdir(parent.dentry->d_inode, dentry); - else - err = -EPERM; + err = kern_path_parent(name, &nd); + if (err) + return err; + + mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT); + dentry = lookup_one_len(nd.last.name, nd.path.dentry, nd.last.len); + if (!IS_ERR(dentry)) { + if (dentry->d_inode) { + if (dentry->d_inode->i_private == &thread) + err = vfs_rmdir(nd.path.dentry->d_inode, + dentry); + else + err = -EPERM; + } else { + err = -ENOENT; + } + dput(dentry); } else { - err = -ENOENT; + err = PTR_ERR(dentry); } - dput(dentry); - mutex_unlock(&parent.dentry->d_inode->i_mutex); - path_put(&parent); + + mutex_unlock(&nd.path.dentry->d_inode->i_mutex); + path_put(&nd.path); return err; } @@ -296,43 +305,50 @@ static int dev_mynode(struct device *dev, struct inode *inode, struct kstat *sta static int handle_remove(const char *nodename, struct device *dev) { - struct path parent; + struct nameidata nd; struct dentry *dentry; + struct kstat stat; int deleted = 1; int err; - dentry = kern_path_locked(nodename, &parent); - if (IS_ERR(dentry)) - return PTR_ERR(dentry); + err = kern_path_parent(nodename, &nd); + if (err) + return err; - if (dentry->d_inode) { - struct kstat stat; - err = vfs_getattr(parent.mnt, dentry, &stat); - if (!err && dev_mynode(dev, dentry->d_inode, &stat)) { - struct iattr newattrs; - /* - * before unlinking this node, reset permissions - * of possible references like hardlinks - */ - newattrs.ia_uid = 0; - newattrs.ia_gid = 0; - newattrs.ia_mode = stat.mode & ~0777; - newattrs.ia_valid = - ATTR_UID|ATTR_GID|ATTR_MODE; - mutex_lock(&dentry->d_inode->i_mutex); - notify_change(dentry, &newattrs); - mutex_unlock(&dentry->d_inode->i_mutex); - err = vfs_unlink(parent.dentry->d_inode, dentry); - if (!err || err == -ENOENT) - deleted = 1; + mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT); + dentry = lookup_one_len(nd.last.name, nd.path.dentry, nd.last.len); + if (!IS_ERR(dentry)) { + if (dentry->d_inode) { + err = vfs_getattr(nd.path.mnt, dentry, &stat); + if (!err && dev_mynode(dev, dentry->d_inode, &stat)) { + struct iattr newattrs; + /* + * before unlinking this node, reset permissions + * of possible references like hardlinks + */ + newattrs.ia_uid = 0; + newattrs.ia_gid = 0; + newattrs.ia_mode = stat.mode & ~0777; + newattrs.ia_valid = + ATTR_UID|ATTR_GID|ATTR_MODE; + mutex_lock(&dentry->d_inode->i_mutex); + notify_change(dentry, &newattrs); + mutex_unlock(&dentry->d_inode->i_mutex); + err = vfs_unlink(nd.path.dentry->d_inode, + dentry); + if (!err || err == -ENOENT) + deleted = 1; + } + } else { + err = -ENOENT; } + dput(dentry); } else { - err = -ENOENT; + err = PTR_ERR(dentry); } - dput(dentry); - mutex_unlock(&parent.dentry->d_inode->i_mutex); + mutex_unlock(&nd.path.dentry->d_inode->i_mutex); - path_put(&parent); + path_put(&nd.path); if (deleted && strchr(nodename, '/')) delete_path(nodename); return err; diff --git a/trunk/drivers/base/power/domain.c b/trunk/drivers/base/power/domain.c index ba3487c9835b..83aa694a8efe 100644 --- a/trunk/drivers/base/power/domain.c +++ b/trunk/drivers/base/power/domain.c @@ -75,6 +75,19 @@ static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev) start_latency_ns, "start"); } +static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev) +{ + return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev, + save_state_latency_ns, "state save"); +} + +static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev) +{ + return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev, + restore_state_latency_ns, + "state restore"); +} + static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) { bool ret = false; @@ -126,19 +139,6 @@ static void genpd_set_active(struct generic_pm_domain *genpd) genpd->status = GPD_STATE_ACTIVE; } -static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd) -{ - s64 usecs64; - - if (!genpd->cpu_data) - return; - - usecs64 = genpd->power_on_latency_ns; - do_div(usecs64, NSEC_PER_USEC); - usecs64 += genpd->cpu_data->saved_exit_latency; - genpd->cpu_data->idle_state->exit_latency = usecs64; -} - /** * __pm_genpd_poweron - Restore power to a given PM domain and its masters. * @genpd: PM domain to power up. @@ -146,7 +146,7 @@ static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd) * Restore power to @genpd and all of its masters so that it is possible to * resume a device belonging to it. */ -static int __pm_genpd_poweron(struct generic_pm_domain *genpd) +int __pm_genpd_poweron(struct generic_pm_domain *genpd) __releases(&genpd->lock) __acquires(&genpd->lock) { struct gpd_link *link; @@ -176,13 +176,6 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd) return 0; } - if (genpd->cpu_data) { - cpuidle_pause_and_lock(); - genpd->cpu_data->idle_state->disabled = true; - cpuidle_resume_and_unlock(); - goto out; - } - /* * The list is guaranteed not to change while the loop below is being * executed, unless one of the masters' .power_on() callbacks fiddles @@ -222,7 +215,6 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd) if (elapsed_ns > genpd->power_on_latency_ns) { genpd->power_on_latency_ns = elapsed_ns; genpd->max_off_time_changed = true; - genpd_recalc_cpu_exit_latency(genpd); if (genpd->name) pr_warning("%s: Power-on latency exceeded, " "new value %lld ns\n", genpd->name, @@ -230,7 +222,6 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd) } } - out: genpd_set_active(genpd); return 0; @@ -260,19 +251,6 @@ int pm_genpd_poweron(struct generic_pm_domain *genpd) #ifdef CONFIG_PM_RUNTIME -static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev) -{ - return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev, - save_state_latency_ns, "state save"); -} - -static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev) -{ - return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev, - restore_state_latency_ns, - "state restore"); -} - static int genpd_dev_pm_qos_notifier(struct notifier_block *nb, unsigned long val, void *ptr) { @@ -297,7 +275,7 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb, pdd = dev->power.subsys_data ? dev->power.subsys_data->domain_data : NULL; - if (pdd && pdd->dev) { + if (pdd) { to_gpd_data(pdd)->td.constraint_changed = true; genpd = dev_to_genpd(dev); } else { @@ -361,16 +339,19 @@ static void __pm_genpd_restore_device(struct pm_domain_data *pdd, { struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); struct device *dev = pdd->dev; - bool need_restore = gpd_data->need_restore; - gpd_data->need_restore = false; + if (!gpd_data->need_restore) + return; + mutex_unlock(&genpd->lock); genpd_start_dev(genpd, dev); - if (need_restore) - genpd_restore_dev(genpd, dev); + genpd_restore_dev(genpd, dev); + genpd_stop_dev(genpd, dev); mutex_lock(&genpd->lock); + + gpd_data->need_restore = false; } /** @@ -477,21 +458,6 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) } } - if (genpd->cpu_data) { - /* - * If cpu_data is set, cpuidle should turn the domain off when - * the CPU in it is idle. In that case we don't decrement the - * subdomain counts of the master domains, so that power is not - * removed from the current domain prematurely as a result of - * cutting off the masters' power. - */ - genpd->status = GPD_STATE_POWER_OFF; - cpuidle_pause_and_lock(); - genpd->cpu_data->idle_state->disabled = false; - cpuidle_resume_and_unlock(); - goto out; - } - if (genpd->power_off) { ktime_t time_start; s64 elapsed_ns; @@ -629,7 +595,7 @@ static int pm_genpd_runtime_resume(struct device *dev) /* If power.irq_safe, the PM domain is never powered off. */ if (dev->power.irq_safe) - return genpd_start_dev(genpd, dev); + goto out; mutex_lock(&genpd->lock); ret = __pm_genpd_poweron(genpd); @@ -662,6 +628,9 @@ static int pm_genpd_runtime_resume(struct device *dev) wake_up_all(&genpd->status_wait_queue); mutex_unlock(&genpd->lock); + out: + genpd_start_dev(genpd, dev); + return 0; } @@ -1266,27 +1235,6 @@ static void pm_genpd_complete(struct device *dev) #endif /* CONFIG_PM_SLEEP */ -static struct generic_pm_domain_data *__pm_genpd_alloc_dev_data(struct device *dev) -{ - struct generic_pm_domain_data *gpd_data; - - gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL); - if (!gpd_data) - return NULL; - - mutex_init(&gpd_data->lock); - gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier; - dev_pm_qos_add_notifier(dev, &gpd_data->nb); - return gpd_data; -} - -static void __pm_genpd_free_dev_data(struct device *dev, - struct generic_pm_domain_data *gpd_data) -{ - dev_pm_qos_remove_notifier(dev, &gpd_data->nb); - kfree(gpd_data); -} - /** * __pm_genpd_add_device - Add a device to an I/O PM domain. * @genpd: PM domain to add the device to. @@ -1296,7 +1244,7 @@ static void __pm_genpd_free_dev_data(struct device *dev, int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, struct gpd_timing_data *td) { - struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL; + struct generic_pm_domain_data *gpd_data; struct pm_domain_data *pdd; int ret = 0; @@ -1305,10 +1253,14 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) return -EINVAL; - gpd_data_new = __pm_genpd_alloc_dev_data(dev); - if (!gpd_data_new) + gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL); + if (!gpd_data) return -ENOMEM; + mutex_init(&gpd_data->lock); + gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier; + dev_pm_qos_add_notifier(dev, &gpd_data->nb); + genpd_acquire_lock(genpd); if (genpd->prepared_count > 0) { @@ -1322,42 +1274,35 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, goto out; } - ret = dev_pm_get_subsys_data(dev); - if (ret) - goto out; - genpd->device_count++; genpd->max_off_time_changed = true; - spin_lock_irq(&dev->power.lock); - - dev->pm_domain = &genpd->domain; - if (dev->power.subsys_data->domain_data) { - gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); - } else { - gpd_data = gpd_data_new; - dev->power.subsys_data->domain_data = &gpd_data->base; - } - gpd_data->refcount++; - if (td) - gpd_data->td = *td; - - spin_unlock_irq(&dev->power.lock); + dev_pm_get_subsys_data(dev); mutex_lock(&gpd_data->lock); + spin_lock_irq(&dev->power.lock); + dev->pm_domain = &genpd->domain; + dev->power.subsys_data->domain_data = &gpd_data->base; gpd_data->base.dev = dev; list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); gpd_data->need_restore = genpd->status == GPD_STATE_POWER_OFF; + if (td) + gpd_data->td = *td; + gpd_data->td.constraint_changed = true; gpd_data->td.effective_constraint_ns = -1; + spin_unlock_irq(&dev->power.lock); mutex_unlock(&gpd_data->lock); - out: genpd_release_lock(genpd); - if (gpd_data != gpd_data_new) - __pm_genpd_free_dev_data(dev, gpd_data_new); + return 0; + + out: + genpd_release_lock(genpd); + dev_pm_qos_remove_notifier(dev, &gpd_data->nb); + kfree(gpd_data); return ret; } @@ -1403,7 +1348,6 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, { struct generic_pm_domain_data *gpd_data; struct pm_domain_data *pdd; - bool remove = false; int ret = 0; dev_dbg(dev, "%s()\n", __func__); @@ -1424,28 +1368,22 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, genpd->max_off_time_changed = true; spin_lock_irq(&dev->power.lock); - dev->pm_domain = NULL; pdd = dev->power.subsys_data->domain_data; list_del_init(&pdd->list_node); - gpd_data = to_gpd_data(pdd); - if (--gpd_data->refcount == 0) { - dev->power.subsys_data->domain_data = NULL; - remove = true; - } - + dev->power.subsys_data->domain_data = NULL; spin_unlock_irq(&dev->power.lock); + gpd_data = to_gpd_data(pdd); mutex_lock(&gpd_data->lock); pdd->dev = NULL; mutex_unlock(&gpd_data->lock); genpd_release_lock(genpd); + dev_pm_qos_remove_notifier(dev, &gpd_data->nb); + kfree(gpd_data); dev_pm_put_subsys_data(dev); - if (remove) - __pm_genpd_free_dev_data(dev, gpd_data); - return 0; out: @@ -1603,52 +1541,33 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, * @dev: Device to add the callbacks to. * @ops: Set of callbacks to add. * @td: Timing data to add to the device along with the callbacks (optional). - * - * Every call to this routine should be balanced with a call to - * __pm_genpd_remove_callbacks() and they must not be nested. */ int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops, struct gpd_timing_data *td) { - struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL; + struct pm_domain_data *pdd; int ret = 0; - if (!(dev && ops)) + if (!(dev && dev->power.subsys_data && ops)) return -EINVAL; - gpd_data_new = __pm_genpd_alloc_dev_data(dev); - if (!gpd_data_new) - return -ENOMEM; - pm_runtime_disable(dev); device_pm_lock(); - ret = dev_pm_get_subsys_data(dev); - if (ret) - goto out; - - spin_lock_irq(&dev->power.lock); + pdd = dev->power.subsys_data->domain_data; + if (pdd) { + struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); - if (dev->power.subsys_data->domain_data) { - gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); + gpd_data->ops = *ops; + if (td) + gpd_data->td = *td; } else { - gpd_data = gpd_data_new; - dev->power.subsys_data->domain_data = &gpd_data->base; + ret = -EINVAL; } - gpd_data->refcount++; - gpd_data->ops = *ops; - if (td) - gpd_data->td = *td; - - spin_unlock_irq(&dev->power.lock); - out: device_pm_unlock(); pm_runtime_enable(dev); - if (gpd_data != gpd_data_new) - __pm_genpd_free_dev_data(dev, gpd_data_new); - return ret; } EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks); @@ -1657,13 +1576,10 @@ EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks); * __pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device. * @dev: Device to remove the callbacks from. * @clear_td: If set, clear the device's timing data too. - * - * This routine can only be called after pm_genpd_add_callbacks(). */ int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td) { - struct generic_pm_domain_data *gpd_data = NULL; - bool remove = false; + struct pm_domain_data *pdd; int ret = 0; if (!(dev && dev->power.subsys_data)) @@ -1672,117 +1588,23 @@ int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td) pm_runtime_disable(dev); device_pm_lock(); - spin_lock_irq(&dev->power.lock); + pdd = dev->power.subsys_data->domain_data; + if (pdd) { + struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); - if (dev->power.subsys_data->domain_data) { - gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); - gpd_data->ops = (struct gpd_dev_ops){ NULL }; + gpd_data->ops = (struct gpd_dev_ops){ 0 }; if (clear_td) gpd_data->td = (struct gpd_timing_data){ 0 }; - - if (--gpd_data->refcount == 0) { - dev->power.subsys_data->domain_data = NULL; - remove = true; - } } else { ret = -EINVAL; } - spin_unlock_irq(&dev->power.lock); - device_pm_unlock(); pm_runtime_enable(dev); - if (ret) - return ret; - - dev_pm_put_subsys_data(dev); - if (remove) - __pm_genpd_free_dev_data(dev, gpd_data); - - return 0; -} -EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks); - -int genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state) -{ - struct cpuidle_driver *cpuidle_drv; - struct gpd_cpu_data *cpu_data; - struct cpuidle_state *idle_state; - int ret = 0; - - if (IS_ERR_OR_NULL(genpd) || state < 0) - return -EINVAL; - - genpd_acquire_lock(genpd); - - if (genpd->cpu_data) { - ret = -EEXIST; - goto out; - } - cpu_data = kzalloc(sizeof(*cpu_data), GFP_KERNEL); - if (!cpu_data) { - ret = -ENOMEM; - goto out; - } - cpuidle_drv = cpuidle_driver_ref(); - if (!cpuidle_drv) { - ret = -ENODEV; - goto out; - } - if (cpuidle_drv->state_count <= state) { - ret = -EINVAL; - goto err; - } - idle_state = &cpuidle_drv->states[state]; - if (!idle_state->disabled) { - ret = -EAGAIN; - goto err; - } - cpu_data->idle_state = idle_state; - cpu_data->saved_exit_latency = idle_state->exit_latency; - genpd->cpu_data = cpu_data; - genpd_recalc_cpu_exit_latency(genpd); - - out: - genpd_release_lock(genpd); - return ret; - - err: - cpuidle_driver_unref(); - goto out; -} - -int genpd_detach_cpuidle(struct generic_pm_domain *genpd) -{ - struct gpd_cpu_data *cpu_data; - struct cpuidle_state *idle_state; - int ret = 0; - - if (IS_ERR_OR_NULL(genpd)) - return -EINVAL; - - genpd_acquire_lock(genpd); - - cpu_data = genpd->cpu_data; - if (!cpu_data) { - ret = -ENODEV; - goto out; - } - idle_state = cpu_data->idle_state; - if (!idle_state->disabled) { - ret = -EAGAIN; - goto out; - } - idle_state->exit_latency = cpu_data->saved_exit_latency; - cpuidle_driver_unref(); - genpd->cpu_data = NULL; - kfree(cpu_data); - - out: - genpd_release_lock(genpd); return ret; } +EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks); /* Default device callbacks for generic PM domains. */ @@ -1793,24 +1615,16 @@ int genpd_detach_cpuidle(struct generic_pm_domain *genpd) static int pm_genpd_default_save_state(struct device *dev) { int (*cb)(struct device *__dev); + struct device_driver *drv = dev->driver; cb = dev_gpd_data(dev)->ops.save_state; if (cb) return cb(dev); - if (dev->type && dev->type->pm) - cb = dev->type->pm->runtime_suspend; - else if (dev->class && dev->class->pm) - cb = dev->class->pm->runtime_suspend; - else if (dev->bus && dev->bus->pm) - cb = dev->bus->pm->runtime_suspend; - else - cb = NULL; - - if (!cb && dev->driver && dev->driver->pm) - cb = dev->driver->pm->runtime_suspend; + if (drv && drv->pm && drv->pm->runtime_suspend) + return drv->pm->runtime_suspend(dev); - return cb ? cb(dev) : 0; + return 0; } /** @@ -1820,24 +1634,16 @@ static int pm_genpd_default_save_state(struct device *dev) static int pm_genpd_default_restore_state(struct device *dev) { int (*cb)(struct device *__dev); + struct device_driver *drv = dev->driver; cb = dev_gpd_data(dev)->ops.restore_state; if (cb) return cb(dev); - if (dev->type && dev->type->pm) - cb = dev->type->pm->runtime_resume; - else if (dev->class && dev->class->pm) - cb = dev->class->pm->runtime_resume; - else if (dev->bus && dev->bus->pm) - cb = dev->bus->pm->runtime_resume; - else - cb = NULL; + if (drv && drv->pm && drv->pm->runtime_resume) + return drv->pm->runtime_resume(dev); - if (!cb && dev->driver && dev->driver->pm) - cb = dev->driver->pm->runtime_resume; - - return cb ? cb(dev) : 0; + return 0; } #ifdef CONFIG_PM_SLEEP diff --git a/trunk/drivers/base/power/main.c b/trunk/drivers/base/power/main.c index 0113adc310dc..9cb845e49334 100644 --- a/trunk/drivers/base/power/main.c +++ b/trunk/drivers/base/power/main.c @@ -28,7 +28,7 @@ #include #include #include -#include + #include "../base.h" #include "power.h" @@ -45,10 +45,10 @@ typedef int (*pm_callback_t)(struct device *); */ LIST_HEAD(dpm_list); -static LIST_HEAD(dpm_prepared_list); -static LIST_HEAD(dpm_suspended_list); -static LIST_HEAD(dpm_late_early_list); -static LIST_HEAD(dpm_noirq_list); +LIST_HEAD(dpm_prepared_list); +LIST_HEAD(dpm_suspended_list); +LIST_HEAD(dpm_late_early_list); +LIST_HEAD(dpm_noirq_list); struct suspend_stats suspend_stats; static DEFINE_MUTEX(dpm_list_mtx); @@ -166,7 +166,7 @@ static ktime_t initcall_debug_start(struct device *dev) { ktime_t calltime = ktime_set(0, 0); - if (pm_print_times_enabled) { + if (initcall_debug) { pr_info("calling %s+ @ %i, parent: %s\n", dev_name(dev), task_pid_nr(current), dev->parent ? dev_name(dev->parent) : "none"); @@ -181,7 +181,7 @@ static void initcall_debug_report(struct device *dev, ktime_t calltime, { ktime_t delta, rettime; - if (pm_print_times_enabled) { + if (initcall_debug) { rettime = ktime_get(); delta = ktime_sub(rettime, calltime); pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev), @@ -467,7 +467,6 @@ static void dpm_resume_noirq(pm_message_t state) mutex_unlock(&dpm_list_mtx); dpm_show_time(starttime, state, "noirq"); resume_device_irqs(); - cpuidle_resume(); } /** @@ -868,7 +867,6 @@ static int dpm_suspend_noirq(pm_message_t state) ktime_t starttime = ktime_get(); int error = 0; - cpuidle_pause(); suspend_device_irqs(); mutex_lock(&dpm_list_mtx); while (!list_empty(&dpm_late_early_list)) { @@ -991,16 +989,8 @@ static int dpm_suspend_late(pm_message_t state) int dpm_suspend_end(pm_message_t state) { int error = dpm_suspend_late(state); - if (error) - return error; - - error = dpm_suspend_noirq(state); - if (error) { - dpm_resume_early(state); - return error; - } - return 0; + return error ? : dpm_suspend_noirq(state); } EXPORT_SYMBOL_GPL(dpm_suspend_end); diff --git a/trunk/drivers/base/power/qos.c b/trunk/drivers/base/power/qos.c index 74a67e0019a2..fd849a2c4fa8 100644 --- a/trunk/drivers/base/power/qos.c +++ b/trunk/drivers/base/power/qos.c @@ -462,7 +462,7 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request); static void __dev_pm_qos_drop_user_request(struct device *dev) { dev_pm_qos_remove_request(dev->power.pq_req); - dev->power.pq_req = NULL; + dev->power.pq_req = 0; } /** diff --git a/trunk/drivers/base/power/sysfs.c b/trunk/drivers/base/power/sysfs.c index b91dc6f1e914..48be2ad4dd2c 100644 --- a/trunk/drivers/base/power/sysfs.c +++ b/trunk/drivers/base/power/sysfs.c @@ -474,8 +474,6 @@ static DEVICE_ATTR(runtime_enabled, 0444, rtpm_enabled_show, NULL); #endif -#ifdef CONFIG_PM_SLEEP - static ssize_t async_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -502,8 +500,6 @@ static ssize_t async_store(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR(async, 0644, async_show, async_store); - -#endif #endif /* CONFIG_PM_ADVANCED_DEBUG */ static struct attribute *power_attrs[] = { diff --git a/trunk/drivers/base/regmap/internal.h b/trunk/drivers/base/regmap/internal.h index 80f9ab9c3aa4..b986b8660b0c 100644 --- a/trunk/drivers/base/regmap/internal.h +++ b/trunk/drivers/base/regmap/internal.h @@ -95,9 +95,6 @@ struct regmap { /* if set, converts bulk rw to single rw */ bool use_single_rw; - - struct rb_root range_tree; - void *selector_work_buf; /* Scratch buffer used for selector */ }; struct regcache_ops { @@ -118,20 +115,6 @@ bool regmap_precious(struct regmap *map, unsigned int reg); int _regmap_write(struct regmap *map, unsigned int reg, unsigned int val); -struct regmap_range_node { - struct rb_node node; - - unsigned int range_min; - unsigned int range_max; - - unsigned int selector_reg; - unsigned int selector_mask; - int selector_shift; - - unsigned int window_start; - unsigned int window_len; -}; - #ifdef CONFIG_DEBUG_FS extern void regmap_debugfs_initcall(void); extern void regmap_debugfs_init(struct regmap *map, const char *name); diff --git a/trunk/drivers/base/regmap/regmap-irq.c b/trunk/drivers/base/regmap/regmap-irq.c index a89734621e51..4fac4b9be88f 100644 --- a/trunk/drivers/base/regmap/regmap-irq.c +++ b/trunk/drivers/base/regmap/regmap-irq.c @@ -24,18 +24,14 @@ struct regmap_irq_chip_data { struct mutex lock; struct regmap *map; - const struct regmap_irq_chip *chip; + struct regmap_irq_chip *chip; int irq_base; struct irq_domain *domain; - int irq; - int wake_count; - unsigned int *status_buf; unsigned int *mask_buf; unsigned int *mask_buf_def; - unsigned int *wake_buf; unsigned int irq_reg_stride; }; @@ -75,16 +71,6 @@ static void regmap_irq_sync_unlock(struct irq_data *data) d->chip->mask_base + (i * map->reg_stride)); } - /* If we've changed our wakeup count propagate it to the parent */ - if (d->wake_count < 0) - for (i = d->wake_count; i < 0; i++) - irq_set_irq_wake(d->irq, 0); - else if (d->wake_count > 0) - for (i = 0; i < d->wake_count; i++) - irq_set_irq_wake(d->irq, 1); - - d->wake_count = 0; - mutex_unlock(&d->lock); } @@ -106,41 +92,18 @@ static void regmap_irq_disable(struct irq_data *data) d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask; } -static int regmap_irq_set_wake(struct irq_data *data, unsigned int on) -{ - struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); - struct regmap *map = d->map; - const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq); - - if (!d->chip->wake_base) - return -EINVAL; - - if (on) { - d->wake_buf[irq_data->reg_offset / map->reg_stride] - &= ~irq_data->mask; - d->wake_count++; - } else { - d->wake_buf[irq_data->reg_offset / map->reg_stride] - |= irq_data->mask; - d->wake_count--; - } - - return 0; -} - static struct irq_chip regmap_irq_chip = { .name = "regmap", .irq_bus_lock = regmap_irq_lock, .irq_bus_sync_unlock = regmap_irq_sync_unlock, .irq_disable = regmap_irq_disable, .irq_enable = regmap_irq_enable, - .irq_set_wake = regmap_irq_set_wake, }; static irqreturn_t regmap_irq_thread(int irq, void *d) { struct regmap_irq_chip_data *data = d; - const struct regmap_irq_chip *chip = data->chip; + struct regmap_irq_chip *chip = data->chip; struct regmap *map = data->map; int ret, i; bool handled = false; @@ -232,7 +195,7 @@ static struct irq_domain_ops regmap_domain_ops = { * register values used by the IRQ controller over suspend and resume. */ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, - int irq_base, const struct regmap_irq_chip *chip, + int irq_base, struct regmap_irq_chip *chip, struct regmap_irq_chip_data **data) { struct regmap_irq_chip_data *d; @@ -277,14 +240,6 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, if (!d->mask_buf_def) goto err_alloc; - if (chip->wake_base) { - d->wake_buf = kzalloc(sizeof(unsigned int) * chip->num_regs, - GFP_KERNEL); - if (!d->wake_buf) - goto err_alloc; - } - - d->irq = irq; d->map = map; d->chip = chip; d->irq_base = irq_base; @@ -339,7 +294,6 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, err_domain: /* Should really dispose of the domain but... */ err_alloc: - kfree(d->wake_buf); kfree(d->mask_buf_def); kfree(d->mask_buf); kfree(d->status_buf); @@ -361,7 +315,6 @@ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d) free_irq(irq, d); /* We should unmap the domain but... */ - kfree(d->wake_buf); kfree(d->mask_buf_def); kfree(d->mask_buf); kfree(d->status_buf); @@ -393,10 +346,6 @@ EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base); */ int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq) { - /* Handle holes in the IRQ list */ - if (!data->chip->irqs[irq].mask) - return -EINVAL; - return irq_create_mapping(data->domain, irq); } EXPORT_SYMBOL_GPL(regmap_irq_get_virq); diff --git a/trunk/drivers/base/regmap/regmap-mmio.c b/trunk/drivers/base/regmap/regmap-mmio.c index f05fc74dd84a..febd6de6c8ac 100644 --- a/trunk/drivers/base/regmap/regmap-mmio.c +++ b/trunk/drivers/base/regmap/regmap-mmio.c @@ -37,7 +37,7 @@ static int regmap_mmio_gather_write(void *context, BUG_ON(reg_size != 4); - offset = *(u32 *)reg; + offset = be32_to_cpup(reg); while (val_size) { switch (ctx->val_bytes) { @@ -45,14 +45,14 @@ static int regmap_mmio_gather_write(void *context, writeb(*(u8 *)val, ctx->regs + offset); break; case 2: - writew(*(u16 *)val, ctx->regs + offset); + writew(be16_to_cpup(val), ctx->regs + offset); break; case 4: - writel(*(u32 *)val, ctx->regs + offset); + writel(be32_to_cpup(val), ctx->regs + offset); break; #ifdef CONFIG_64BIT case 8: - writeq(*(u64 *)val, ctx->regs + offset); + writeq(be64_to_cpup(val), ctx->regs + offset); break; #endif default: @@ -83,7 +83,7 @@ static int regmap_mmio_read(void *context, BUG_ON(reg_size != 4); - offset = *(u32 *)reg; + offset = be32_to_cpup(reg); while (val_size) { switch (ctx->val_bytes) { @@ -91,14 +91,14 @@ static int regmap_mmio_read(void *context, *(u8 *)val = readb(ctx->regs + offset); break; case 2: - *(u16 *)val = readw(ctx->regs + offset); + *(u16 *)val = cpu_to_be16(readw(ctx->regs + offset)); break; case 4: - *(u32 *)val = readl(ctx->regs + offset); + *(u32 *)val = cpu_to_be32(readl(ctx->regs + offset)); break; #ifdef CONFIG_64BIT case 8: - *(u64 *)val = readq(ctx->regs + offset); + *(u64 *)val = cpu_to_be32(readq(ctx->regs + offset)); break; #endif default: @@ -124,11 +124,9 @@ static struct regmap_bus regmap_mmio = { .gather_write = regmap_mmio_gather_write, .read = regmap_mmio_read, .free_context = regmap_mmio_free_context, - .reg_format_endian_default = REGMAP_ENDIAN_NATIVE, - .val_format_endian_default = REGMAP_ENDIAN_NATIVE, }; -static struct regmap_mmio_context *regmap_mmio_gen_context(void __iomem *regs, +struct regmap_mmio_context *regmap_mmio_gen_context(void __iomem *regs, const struct regmap_config *config) { struct regmap_mmio_context *ctx; @@ -164,15 +162,7 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(void __iomem *regs, if (config->reg_stride < min_stride) return ERR_PTR(-EINVAL); - switch (config->reg_format_endian) { - case REGMAP_ENDIAN_DEFAULT: - case REGMAP_ENDIAN_NATIVE: - break; - default: - return ERR_PTR(-EINVAL); - } - - ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + ctx = kzalloc(GFP_KERNEL, sizeof(*ctx)); if (!ctx) return ERR_PTR(-ENOMEM); diff --git a/trunk/drivers/base/regmap/regmap.c b/trunk/drivers/base/regmap/regmap.c index c241ae2f2f10..c89aa01fb1de 100644 --- a/trunk/drivers/base/regmap/regmap.c +++ b/trunk/drivers/base/regmap/regmap.c @@ -15,25 +15,12 @@ #include #include #include -#include #define CREATE_TRACE_POINTS #include #include "internal.h" -/* - * Sometimes for failures during very early init the trace - * infrastructure isn't available early enough to be used. For this - * sort of problem defining LOG_DEVICE will add printks for basic - * register I/O on a specific device. - */ -#undef LOG_DEVICE - -static int _regmap_update_bits(struct regmap *map, unsigned int reg, - unsigned int mask, unsigned int val, - bool *change); - bool regmap_writeable(struct regmap *map, unsigned int reg) { if (map->max_register && reg > map->max_register) @@ -132,19 +119,13 @@ static void regmap_format_8(void *buf, unsigned int val, unsigned int shift) b[0] = val << shift; } -static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift) +static void regmap_format_16(void *buf, unsigned int val, unsigned int shift) { __be16 *b = buf; b[0] = cpu_to_be16(val << shift); } -static void regmap_format_16_native(void *buf, unsigned int val, - unsigned int shift) -{ - *(u16 *)buf = val << shift; -} - static void regmap_format_24(void *buf, unsigned int val, unsigned int shift) { u8 *b = buf; @@ -156,19 +137,13 @@ static void regmap_format_24(void *buf, unsigned int val, unsigned int shift) b[2] = val; } -static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift) +static void regmap_format_32(void *buf, unsigned int val, unsigned int shift) { __be32 *b = buf; b[0] = cpu_to_be32(val << shift); } -static void regmap_format_32_native(void *buf, unsigned int val, - unsigned int shift) -{ - *(u32 *)buf = val << shift; -} - static unsigned int regmap_parse_8(void *buf) { u8 *b = buf; @@ -176,7 +151,7 @@ static unsigned int regmap_parse_8(void *buf) return b[0]; } -static unsigned int regmap_parse_16_be(void *buf) +static unsigned int regmap_parse_16(void *buf) { __be16 *b = buf; @@ -185,11 +160,6 @@ static unsigned int regmap_parse_16_be(void *buf) return b[0]; } -static unsigned int regmap_parse_16_native(void *buf) -{ - return *(u16 *)buf; -} - static unsigned int regmap_parse_24(void *buf) { u8 *b = buf; @@ -200,7 +170,7 @@ static unsigned int regmap_parse_24(void *buf) return ret; } -static unsigned int regmap_parse_32_be(void *buf) +static unsigned int regmap_parse_32(void *buf) { __be32 *b = buf; @@ -209,11 +179,6 @@ static unsigned int regmap_parse_32_be(void *buf) return b[0]; } -static unsigned int regmap_parse_32_native(void *buf) -{ - return *(u32 *)buf; -} - static void regmap_lock_mutex(struct regmap *map) { mutex_lock(&map->mutex); @@ -243,67 +208,6 @@ static void dev_get_regmap_release(struct device *dev, void *res) */ } -static bool _regmap_range_add(struct regmap *map, - struct regmap_range_node *data) -{ - struct rb_root *root = &map->range_tree; - struct rb_node **new = &(root->rb_node), *parent = NULL; - - while (*new) { - struct regmap_range_node *this = - container_of(*new, struct regmap_range_node, node); - - parent = *new; - if (data->range_max < this->range_min) - new = &((*new)->rb_left); - else if (data->range_min > this->range_max) - new = &((*new)->rb_right); - else - return false; - } - - rb_link_node(&data->node, parent, new); - rb_insert_color(&data->node, root); - - return true; -} - -static struct regmap_range_node *_regmap_range_lookup(struct regmap *map, - unsigned int reg) -{ - struct rb_node *node = map->range_tree.rb_node; - - while (node) { - struct regmap_range_node *this = - container_of(node, struct regmap_range_node, node); - - if (reg < this->range_min) - node = node->rb_left; - else if (reg > this->range_max) - node = node->rb_right; - else - return this; - } - - return NULL; -} - -static void regmap_range_exit(struct regmap *map) -{ - struct rb_node *next; - struct regmap_range_node *range_node; - - next = rb_first(&map->range_tree); - while (next) { - range_node = rb_entry(next, struct regmap_range_node, node); - next = rb_next(&range_node->node); - rb_erase(&range_node->node, &map->range_tree); - kfree(range_node); - } - - kfree(map->selector_work_buf); -} - /** * regmap_init(): Initialise register map * @@ -323,8 +227,6 @@ struct regmap *regmap_init(struct device *dev, { struct regmap *map, **m; int ret = -EINVAL; - enum regmap_endian reg_endian, val_endian; - int i, j; if (!bus || !config) goto err; @@ -373,18 +275,6 @@ struct regmap *regmap_init(struct device *dev, map->read_flag_mask = bus->read_flag_mask; } - reg_endian = config->reg_format_endian; - if (reg_endian == REGMAP_ENDIAN_DEFAULT) - reg_endian = bus->reg_format_endian_default; - if (reg_endian == REGMAP_ENDIAN_DEFAULT) - reg_endian = REGMAP_ENDIAN_BIG; - - val_endian = config->val_format_endian; - if (val_endian == REGMAP_ENDIAN_DEFAULT) - val_endian = bus->val_format_endian_default; - if (val_endian == REGMAP_ENDIAN_DEFAULT) - val_endian = REGMAP_ENDIAN_BIG; - switch (config->reg_bits + map->reg_shift) { case 2: switch (config->val_bits) { @@ -431,29 +321,11 @@ struct regmap *regmap_init(struct device *dev, break; case 16: - switch (reg_endian) { - case REGMAP_ENDIAN_BIG: - map->format.format_reg = regmap_format_16_be; - break; - case REGMAP_ENDIAN_NATIVE: - map->format.format_reg = regmap_format_16_native; - break; - default: - goto err_map; - } + map->format.format_reg = regmap_format_16; break; case 32: - switch (reg_endian) { - case REGMAP_ENDIAN_BIG: - map->format.format_reg = regmap_format_32_be; - break; - case REGMAP_ENDIAN_NATIVE: - map->format.format_reg = regmap_format_32_native; - break; - default: - goto err_map; - } + map->format.format_reg = regmap_format_32; break; default: @@ -466,47 +338,21 @@ struct regmap *regmap_init(struct device *dev, map->format.parse_val = regmap_parse_8; break; case 16: - switch (val_endian) { - case REGMAP_ENDIAN_BIG: - map->format.format_val = regmap_format_16_be; - map->format.parse_val = regmap_parse_16_be; - break; - case REGMAP_ENDIAN_NATIVE: - map->format.format_val = regmap_format_16_native; - map->format.parse_val = regmap_parse_16_native; - break; - default: - goto err_map; - } + map->format.format_val = regmap_format_16; + map->format.parse_val = regmap_parse_16; break; case 24: - if (val_endian != REGMAP_ENDIAN_BIG) - goto err_map; map->format.format_val = regmap_format_24; map->format.parse_val = regmap_parse_24; break; case 32: - switch (val_endian) { - case REGMAP_ENDIAN_BIG: - map->format.format_val = regmap_format_32_be; - map->format.parse_val = regmap_parse_32_be; - break; - case REGMAP_ENDIAN_NATIVE: - map->format.format_val = regmap_format_32_native; - map->format.parse_val = regmap_parse_32_native; - break; - default: - goto err_map; - } + map->format.format_val = regmap_format_32; + map->format.parse_val = regmap_parse_32; break; } - if (map->format.format_write) { - if ((reg_endian != REGMAP_ENDIAN_BIG) || - (val_endian != REGMAP_ENDIAN_BIG)) - goto err_map; + if (map->format.format_write) map->use_single_rw = true; - } if (!map->format.format_write && !(map->format.format_reg && map->format.format_val)) @@ -518,88 +364,27 @@ struct regmap *regmap_init(struct device *dev, goto err_map; } - map->range_tree = RB_ROOT; - for (i = 0; i < config->n_ranges; i++) { - const struct regmap_range_cfg *range_cfg = &config->ranges[i]; - struct regmap_range_node *new; - - /* Sanity check */ - if (range_cfg->range_max < range_cfg->range_min || - range_cfg->range_max > map->max_register || - range_cfg->selector_reg > map->max_register || - range_cfg->window_len == 0) - goto err_range; - - /* Make sure, that this register range has no selector - or data window within its boundary */ - for (j = 0; j < config->n_ranges; j++) { - unsigned sel_reg = config->ranges[j].selector_reg; - unsigned win_min = config->ranges[j].window_start; - unsigned win_max = win_min + - config->ranges[j].window_len - 1; - - if (range_cfg->range_min <= sel_reg && - sel_reg <= range_cfg->range_max) { - goto err_range; - } - - if (!(win_max < range_cfg->range_min || - win_min > range_cfg->range_max)) { - goto err_range; - } - } - - new = kzalloc(sizeof(*new), GFP_KERNEL); - if (new == NULL) { - ret = -ENOMEM; - goto err_range; - } - - new->range_min = range_cfg->range_min; - new->range_max = range_cfg->range_max; - new->selector_reg = range_cfg->selector_reg; - new->selector_mask = range_cfg->selector_mask; - new->selector_shift = range_cfg->selector_shift; - new->window_start = range_cfg->window_start; - new->window_len = range_cfg->window_len; - - if (_regmap_range_add(map, new) == false) { - kfree(new); - goto err_range; - } - - if (map->selector_work_buf == NULL) { - map->selector_work_buf = - kzalloc(map->format.buf_size, GFP_KERNEL); - if (map->selector_work_buf == NULL) { - ret = -ENOMEM; - goto err_range; - } - } - } + regmap_debugfs_init(map, config->name); ret = regcache_init(map, config); if (ret < 0) - goto err_range; - - regmap_debugfs_init(map, config->name); + goto err_debugfs; /* Add a devres resource for dev_get_regmap() */ m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); if (!m) { ret = -ENOMEM; - goto err_debugfs; + goto err_cache; } *m = map; devres_add(dev, m); return map; +err_cache: + regcache_exit(map); err_debugfs: regmap_debugfs_exit(map); - regcache_exit(map); -err_range: - regmap_range_exit(map); kfree(map->work_buf); err_map: kfree(map); @@ -696,7 +481,6 @@ void regmap_exit(struct regmap *map) { regcache_exit(map); regmap_debugfs_exit(map); - regmap_range_exit(map); if (map->bus->free_context) map->bus->free_context(map->bus_context); kfree(map->work_buf); @@ -742,57 +526,6 @@ struct regmap *dev_get_regmap(struct device *dev, const char *name) } EXPORT_SYMBOL_GPL(dev_get_regmap); -static int _regmap_select_page(struct regmap *map, unsigned int *reg, - unsigned int val_num) -{ - struct regmap_range_node *range; - void *orig_work_buf; - unsigned int win_offset; - unsigned int win_page; - bool page_chg; - int ret; - - range = _regmap_range_lookup(map, *reg); - if (range) { - win_offset = (*reg - range->range_min) % range->window_len; - win_page = (*reg - range->range_min) / range->window_len; - - if (val_num > 1) { - /* Bulk write shouldn't cross range boundary */ - if (*reg + val_num - 1 > range->range_max) - return -EINVAL; - - /* ... or single page boundary */ - if (val_num > range->window_len - win_offset) - return -EINVAL; - } - - /* It is possible to have selector register inside data window. - In that case, selector register is located on every page and - it needs no page switching, when accessed alone. */ - if (val_num > 1 || - range->window_start + win_offset != range->selector_reg) { - /* Use separate work_buf during page switching */ - orig_work_buf = map->work_buf; - map->work_buf = map->selector_work_buf; - - ret = _regmap_update_bits(map, range->selector_reg, - range->selector_mask, - win_page << range->selector_shift, - &page_chg); - - map->work_buf = orig_work_buf; - - if (ret < 0) - return ret; - } - - *reg = range->window_start + win_offset; - } - - return 0; -} - static int _regmap_raw_write(struct regmap *map, unsigned int reg, const void *val, size_t val_len) { @@ -830,10 +563,6 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg, } } - ret = _regmap_select_page(map, ®, val_len / map->format.val_bytes); - if (ret < 0) - return ret; - map->format.format_reg(map->work_buf, reg, map->reg_shift); u8[0] |= map->write_flag_mask; @@ -894,18 +623,9 @@ int _regmap_write(struct regmap *map, unsigned int reg, } } -#ifdef LOG_DEVICE - if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0) - dev_info(map->dev, "%x <= %x\n", reg, val); -#endif - trace_regmap_reg_write(map->dev, reg, val); if (map->format.format_write) { - ret = _regmap_select_page(map, ®, 1); - if (ret < 0) - return ret; - map->format.format_write(map, reg, val); trace_regmap_hw_write_start(map->dev, reg, 1); @@ -1063,10 +783,6 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, u8 *u8 = map->work_buf; int ret; - ret = _regmap_select_page(map, ®, val_len / map->format.val_bytes); - if (ret < 0) - return ret; - map->format.format_reg(map->work_buf, reg, map->reg_shift); /* @@ -1110,12 +826,6 @@ static int _regmap_read(struct regmap *map, unsigned int reg, ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes); if (ret == 0) { *val = map->format.parse_val(map->work_buf); - -#ifdef LOG_DEVICE - if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0) - dev_info(map->dev, "%x => %x\n", reg, *val); -#endif - trace_regmap_reg_read(map->dev, reg, *val); } @@ -1272,9 +982,11 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg, int ret; unsigned int tmp, orig; + map->lock(map); + ret = _regmap_read(map, reg, &orig); if (ret != 0) - return ret; + goto out; tmp = orig & ~mask; tmp |= val & mask; @@ -1286,6 +998,9 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg, *change = false; } +out: + map->unlock(map); + return ret; } @@ -1303,13 +1018,7 @@ int regmap_update_bits(struct regmap *map, unsigned int reg, unsigned int mask, unsigned int val) { bool change; - int ret; - - map->lock(map); - ret = _regmap_update_bits(map, reg, mask, val, &change); - map->unlock(map); - - return ret; + return _regmap_update_bits(map, reg, mask, val, &change); } EXPORT_SYMBOL_GPL(regmap_update_bits); @@ -1329,12 +1038,7 @@ int regmap_update_bits_check(struct regmap *map, unsigned int reg, unsigned int mask, unsigned int val, bool *change) { - int ret; - - map->lock(map); - ret = _regmap_update_bits(map, reg, mask, val, change); - map->unlock(map); - return ret; + return _regmap_update_bits(map, reg, mask, val, change); } EXPORT_SYMBOL_GPL(regmap_update_bits_check); diff --git a/trunk/drivers/block/mg_disk.c b/trunk/drivers/block/mg_disk.c index 1788f491e0fb..76fa3deaee84 100644 --- a/trunk/drivers/block/mg_disk.c +++ b/trunk/drivers/block/mg_disk.c @@ -780,9 +780,9 @@ static const struct block_device_operations mg_disk_ops = { .getgeo = mg_getgeo }; -static int mg_suspend(struct device *dev) +static int mg_suspend(struct platform_device *plat_dev, pm_message_t state) { - struct mg_drv_data *prv_data = dev->platform_data; + struct mg_drv_data *prv_data = plat_dev->dev.platform_data; struct mg_host *host = prv_data->host; if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD)) @@ -804,9 +804,9 @@ static int mg_suspend(struct device *dev) return 0; } -static int mg_resume(struct device *dev) +static int mg_resume(struct platform_device *plat_dev) { - struct mg_drv_data *prv_data = dev->platform_data; + struct mg_drv_data *prv_data = plat_dev->dev.platform_data; struct mg_host *host = prv_data->host; if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD)) @@ -825,8 +825,6 @@ static int mg_resume(struct device *dev) return 0; } -static SIMPLE_DEV_PM_OPS(mg_pm, mg_suspend, mg_resume); - static int mg_probe(struct platform_device *plat_dev) { struct mg_host *host; @@ -1076,10 +1074,11 @@ static int mg_remove(struct platform_device *plat_dev) static struct platform_driver mg_disk_driver = { .probe = mg_probe, .remove = mg_remove, + .suspend = mg_suspend, + .resume = mg_resume, .driver = { .name = MG_DEV_NAME, .owner = THIS_MODULE, - .pm = &mg_pm, } }; diff --git a/trunk/drivers/block/rbd.c b/trunk/drivers/block/rbd.c index 8f428a8ab003..65665c9c42c6 100644 --- a/trunk/drivers/block/rbd.c +++ b/trunk/drivers/block/rbd.c @@ -499,7 +499,7 @@ static int rbd_header_from_disk(struct rbd_image_header *header, / sizeof (*ondisk)) return -EINVAL; header->snapc = kmalloc(sizeof(struct ceph_snap_context) + - snap_count * sizeof(u64), + snap_count * sizeof (*ondisk), gfp_flags); if (!header->snapc) return -ENOMEM; @@ -977,7 +977,7 @@ static void rbd_req_cb(struct ceph_osd_request *req, struct ceph_msg *msg) op = (void *)(replyhead + 1); rc = le32_to_cpu(replyhead->result); bytes = le64_to_cpu(op->extent.length); - read_op = (le16_to_cpu(op->op) == CEPH_OSD_OP_READ); + read_op = (le32_to_cpu(op->op) == CEPH_OSD_OP_READ); dout("rbd_req_cb bytes=%lld readop=%d rc=%d\n", bytes, read_op, rc); diff --git a/trunk/drivers/char/hw_random/omap-rng.c b/trunk/drivers/char/hw_random/omap-rng.c index d706bd0e9e80..1412565c01af 100644 --- a/trunk/drivers/char/hw_random/omap-rng.c +++ b/trunk/drivers/char/hw_random/omap-rng.c @@ -162,24 +162,22 @@ static int __exit omap_rng_remove(struct platform_device *pdev) #ifdef CONFIG_PM -static int omap_rng_suspend(struct device *dev) +static int omap_rng_suspend(struct platform_device *pdev, pm_message_t message) { omap_rng_write_reg(RNG_MASK_REG, 0x0); return 0; } -static int omap_rng_resume(struct device *dev) +static int omap_rng_resume(struct platform_device *pdev) { omap_rng_write_reg(RNG_MASK_REG, 0x1); return 0; } -static SIMPLE_DEV_PM_OPS(omap_rng_pm, omap_rng_suspend, omap_rng_resume); -#define OMAP_RNG_PM (&omap_rng_pm) - #else -#define OMAP_RNG_PM NULL +#define omap_rng_suspend NULL +#define omap_rng_resume NULL #endif @@ -190,10 +188,11 @@ static struct platform_driver omap_rng_driver = { .driver = { .name = "omap_rng", .owner = THIS_MODULE, - .pm = OMAP_RNG_PM, }, .probe = omap_rng_probe, .remove = __exit_p(omap_rng_remove), + .suspend = omap_rng_suspend, + .resume = omap_rng_resume }; static int __init omap_rng_init(void) diff --git a/trunk/drivers/char/ipmi/ipmi_si_intf.c b/trunk/drivers/char/ipmi/ipmi_si_intf.c index 83f85cf7fb1b..1e638fff40ea 100644 --- a/trunk/drivers/char/ipmi/ipmi_si_intf.c +++ b/trunk/drivers/char/ipmi/ipmi_si_intf.c @@ -2503,6 +2503,18 @@ static void __devexit ipmi_pci_remove(struct pci_dev *pdev) cleanup_one_si(info); } +#ifdef CONFIG_PM +static int ipmi_pci_suspend(struct pci_dev *pdev, pm_message_t state) +{ + return 0; +} + +static int ipmi_pci_resume(struct pci_dev *pdev) +{ + return 0; +} +#endif + static struct pci_device_id ipmi_pci_devices[] = { { PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) }, { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE_MASK) }, @@ -2515,6 +2527,10 @@ static struct pci_driver ipmi_pci_driver = { .id_table = ipmi_pci_devices, .probe = ipmi_pci_probe, .remove = __devexit_p(ipmi_pci_remove), +#ifdef CONFIG_PM + .suspend = ipmi_pci_suspend, + .resume = ipmi_pci_resume, +#endif }; #endif /* CONFIG_PCI */ diff --git a/trunk/drivers/char/sonypi.c b/trunk/drivers/char/sonypi.c index f87780502b41..45713f0e7d61 100644 --- a/trunk/drivers/char/sonypi.c +++ b/trunk/drivers/char/sonypi.c @@ -1459,7 +1459,7 @@ static int __devexit sonypi_remove(struct platform_device *dev) #ifdef CONFIG_PM static int old_camera_power; -static int sonypi_suspend(struct device *dev) +static int sonypi_suspend(struct platform_device *dev, pm_message_t state) { old_camera_power = sonypi_device.camera_power; sonypi_disable(); @@ -1467,16 +1467,14 @@ static int sonypi_suspend(struct device *dev) return 0; } -static int sonypi_resume(struct device *dev) +static int sonypi_resume(struct platform_device *dev) { sonypi_enable(old_camera_power); return 0; } - -static SIMPLE_DEV_PM_OPS(sonypi_pm, sonypi_suspend, sonypi_resume); -#define SONYPI_PM (&sonypi_pm) #else -#define SONYPI_PM NULL +#define sonypi_suspend NULL +#define sonypi_resume NULL #endif static void sonypi_shutdown(struct platform_device *dev) @@ -1488,11 +1486,12 @@ static struct platform_driver sonypi_driver = { .driver = { .name = "sonypi", .owner = THIS_MODULE, - .pm = SONYPI_PM, }, .probe = sonypi_probe, .remove = __devexit_p(sonypi_remove), .shutdown = sonypi_shutdown, + .suspend = sonypi_suspend, + .resume = sonypi_resume, }; static struct platform_device *sonypi_platform_device; diff --git a/trunk/drivers/char/tpm/tpm.c b/trunk/drivers/char/tpm/tpm.c index ae43ac55fc1e..ad7c7320dd1b 100644 --- a/trunk/drivers/char/tpm/tpm.c +++ b/trunk/drivers/char/tpm/tpm.c @@ -1274,7 +1274,7 @@ static struct tpm_input_header savestate_header = { * We are about to suspend. Save the TPM state * so that it can be restored. */ -int tpm_pm_suspend(struct device *dev) +int tpm_pm_suspend(struct device *dev, pm_message_t pm_state) { struct tpm_chip *chip = dev_get_drvdata(dev); struct tpm_cmd_t cmd; diff --git a/trunk/drivers/char/tpm/tpm.h b/trunk/drivers/char/tpm/tpm.h index 917f727e6740..b1c5280ac159 100644 --- a/trunk/drivers/char/tpm/tpm.h +++ b/trunk/drivers/char/tpm/tpm.h @@ -299,7 +299,7 @@ extern ssize_t tpm_write(struct file *, const char __user *, size_t, loff_t *); extern ssize_t tpm_read(struct file *, char __user *, size_t, loff_t *); extern void tpm_remove_hardware(struct device *); -extern int tpm_pm_suspend(struct device *); +extern int tpm_pm_suspend(struct device *, pm_message_t); extern int tpm_pm_resume(struct device *); extern int wait_for_tpm_stat(struct tpm_chip *, u8, unsigned long, wait_queue_head_t *); diff --git a/trunk/drivers/char/tpm/tpm_atmel.c b/trunk/drivers/char/tpm/tpm_atmel.c index 678d57019dc4..c64a1bc65349 100644 --- a/trunk/drivers/char/tpm/tpm_atmel.c +++ b/trunk/drivers/char/tpm/tpm_atmel.c @@ -168,14 +168,22 @@ static void atml_plat_remove(void) } } -static SIMPLE_DEV_PM_OPS(tpm_atml_pm, tpm_pm_suspend, tpm_pm_resume); +static int tpm_atml_suspend(struct platform_device *dev, pm_message_t msg) +{ + return tpm_pm_suspend(&dev->dev, msg); +} +static int tpm_atml_resume(struct platform_device *dev) +{ + return tpm_pm_resume(&dev->dev); +} static struct platform_driver atml_drv = { .driver = { .name = "tpm_atmel", .owner = THIS_MODULE, - .pm = &tpm_atml_pm, }, + .suspend = tpm_atml_suspend, + .resume = tpm_atml_resume, }; static int __init init_atmel(void) diff --git a/trunk/drivers/char/tpm/tpm_nsc.c b/trunk/drivers/char/tpm/tpm_nsc.c index 640c9a427b59..4d2464871ada 100644 --- a/trunk/drivers/char/tpm/tpm_nsc.c +++ b/trunk/drivers/char/tpm/tpm_nsc.c @@ -274,13 +274,22 @@ static void tpm_nsc_remove(struct device *dev) } } -static SIMPLE_DEV_PM_OPS(tpm_nsc_pm, tpm_pm_suspend, tpm_pm_resume); +static int tpm_nsc_suspend(struct platform_device *dev, pm_message_t msg) +{ + return tpm_pm_suspend(&dev->dev, msg); +} + +static int tpm_nsc_resume(struct platform_device *dev) +{ + return tpm_pm_resume(&dev->dev); +} static struct platform_driver nsc_drv = { + .suspend = tpm_nsc_suspend, + .resume = tpm_nsc_resume, .driver = { .name = "tpm_nsc", .owner = THIS_MODULE, - .pm = &tpm_nsc_pm, }, }; diff --git a/trunk/drivers/char/tpm/tpm_tis.c b/trunk/drivers/char/tpm/tpm_tis.c index 89682fa8801e..d2a70cae76df 100644 --- a/trunk/drivers/char/tpm/tpm_tis.c +++ b/trunk/drivers/char/tpm/tpm_tis.c @@ -750,7 +750,7 @@ static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev, static int tpm_tis_pnp_suspend(struct pnp_dev *dev, pm_message_t msg) { - return tpm_pm_suspend(&dev->dev); + return tpm_pm_suspend(&dev->dev, msg); } static int tpm_tis_pnp_resume(struct pnp_dev *dev) @@ -806,25 +806,27 @@ module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id, sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444); MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe"); #endif +static int tpm_tis_suspend(struct platform_device *dev, pm_message_t msg) +{ + return tpm_pm_suspend(&dev->dev, msg); +} -static int tpm_tis_resume(struct device *dev) +static int tpm_tis_resume(struct platform_device *dev) { - struct tpm_chip *chip = dev_get_drvdata(dev); + struct tpm_chip *chip = dev_get_drvdata(&dev->dev); if (chip->vendor.irq) tpm_tis_reenable_interrupts(chip); - return tpm_pm_resume(dev); + return tpm_pm_resume(&dev->dev); } - -static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume); - static struct platform_driver tis_drv = { .driver = { .name = "tpm_tis", .owner = THIS_MODULE, - .pm = &tpm_tis_pm, }, + .suspend = tpm_tis_suspend, + .resume = tpm_tis_resume, }; static struct platform_device *pdev; diff --git a/trunk/drivers/clk/spear/spear1310_clock.c b/trunk/drivers/clk/spear/spear1310_clock.c index 0fcec2aae19c..8f05652d53e6 100644 --- a/trunk/drivers/clk/spear/spear1310_clock.c +++ b/trunk/drivers/clk/spear/spear1310_clock.c @@ -345,30 +345,31 @@ static struct frac_rate_tbl gen_rtbl[] = { /* clock parents */ static const char *vco_parents[] = { "osc_24m_clk", "osc_25m_clk", }; static const char *gpt_parents[] = { "osc_24m_clk", "apb_clk", }; -static const char *uart0_parents[] = { "pll5_clk", "uart_syn_gclk", }; -static const char *c3_parents[] = { "pll5_clk", "c3_syn_gclk", }; -static const char *gmac_phy_input_parents[] = { "gmii_pad_clk", "pll2_clk", +static const char *uart0_parents[] = { "pll5_clk", "uart_synth_gate_clk", }; +static const char *c3_parents[] = { "pll5_clk", "c3_synth_gate_clk", }; +static const char *gmac_phy_input_parents[] = { "gmii_125m_pad_clk", "pll2_clk", "osc_25m_clk", }; -static const char *gmac_phy_parents[] = { "phy_input_mclk", "phy_syn_gclk", }; +static const char *gmac_phy_parents[] = { "gmac_phy_input_mux_clk", + "gmac_phy_synth_gate_clk", }; static const char *clcd_synth_parents[] = { "vco1div4_clk", "pll2_clk", }; -static const char *clcd_pixel_parents[] = { "pll5_clk", "clcd_syn_clk", }; +static const char *clcd_pixel_parents[] = { "pll5_clk", "clcd_synth_clk", }; static const char *i2s_src_parents[] = { "vco1div2_clk", "none", "pll3_clk", "i2s_src_pad_clk", }; -static const char *i2s_ref_parents[] = { "i2s_src_mclk", "i2s_prs1_clk", }; +static const char *i2s_ref_parents[] = { "i2s_src_mux_clk", "i2s_prs1_clk", }; static const char *gen_synth0_1_parents[] = { "vco1div4_clk", "vco3div2_clk", "pll3_clk", }; static const char *gen_synth2_3_parents[] = { "vco1div4_clk", "vco3div2_clk", "pll2_clk", }; static const char *rmii_phy_parents[] = { "ras_tx50_clk", "none", - "ras_pll2_clk", "ras_syn0_clk", }; + "ras_pll2_clk", "ras_synth0_clk", }; static const char *smii_rgmii_phy_parents[] = { "none", "ras_tx125_clk", - "ras_pll2_clk", "ras_syn0_clk", }; -static const char *uart_parents[] = { "ras_apb_clk", "gen_syn3_clk", }; -static const char *i2c_parents[] = { "ras_apb_clk", "gen_syn1_clk", }; -static const char *ssp1_parents[] = { "ras_apb_clk", "gen_syn1_clk", + "ras_pll2_clk", "ras_synth0_clk", }; +static const char *uart_parents[] = { "ras_apb_clk", "gen_synth3_clk", }; +static const char *i2c_parents[] = { "ras_apb_clk", "gen_synth1_clk", }; +static const char *ssp1_parents[] = { "ras_apb_clk", "gen_synth1_clk", "ras_plclk0_clk", }; -static const char *pci_parents[] = { "ras_pll3_clk", "gen_syn2_clk", }; -static const char *tdm_parents[] = { "ras_pll3_clk", "gen_syn1_clk", }; +static const char *pci_parents[] = { "ras_pll3_clk", "gen_synth2_clk", }; +static const char *tdm_parents[] = { "ras_pll3_clk", "gen_synth1_clk", }; void __init spear1310_clk_init(void) { @@ -389,9 +390,9 @@ void __init spear1310_clk_init(void) 25000000); clk_register_clkdev(clk, "osc_25m_clk", NULL); - clk = clk_register_fixed_rate(NULL, "gmii_pad_clk", NULL, CLK_IS_ROOT, - 125000000); - clk_register_clkdev(clk, "gmii_pad_clk", NULL); + clk = clk_register_fixed_rate(NULL, "gmii_125m_pad_clk", NULL, + CLK_IS_ROOT, 125000000); + clk_register_clkdev(clk, "gmii_125m_pad_clk", NULL); clk = clk_register_fixed_rate(NULL, "i2s_src_pad_clk", NULL, CLK_IS_ROOT, 12288000); @@ -405,34 +406,34 @@ void __init spear1310_clk_init(void) /* clock derived from 24 or 25 MHz osc clk */ /* vco-pll */ - clk = clk_register_mux(NULL, "vco1_mclk", vco_parents, + clk = clk_register_mux(NULL, "vco1_mux_clk", vco_parents, ARRAY_SIZE(vco_parents), 0, SPEAR1310_PLL_CFG, SPEAR1310_PLL1_CLK_SHIFT, SPEAR1310_PLL_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "vco1_mclk", NULL); - clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "vco1_mclk", + clk_register_clkdev(clk, "vco1_mux_clk", NULL); + clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "vco1_mux_clk", 0, SPEAR1310_PLL1_CTR, SPEAR1310_PLL1_FRQ, pll_rtbl, ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL); clk_register_clkdev(clk, "vco1_clk", NULL); clk_register_clkdev(clk1, "pll1_clk", NULL); - clk = clk_register_mux(NULL, "vco2_mclk", vco_parents, + clk = clk_register_mux(NULL, "vco2_mux_clk", vco_parents, ARRAY_SIZE(vco_parents), 0, SPEAR1310_PLL_CFG, SPEAR1310_PLL2_CLK_SHIFT, SPEAR1310_PLL_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "vco2_mclk", NULL); - clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "vco2_mclk", + clk_register_clkdev(clk, "vco2_mux_clk", NULL); + clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "vco2_mux_clk", 0, SPEAR1310_PLL2_CTR, SPEAR1310_PLL2_FRQ, pll_rtbl, ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL); clk_register_clkdev(clk, "vco2_clk", NULL); clk_register_clkdev(clk1, "pll2_clk", NULL); - clk = clk_register_mux(NULL, "vco3_mclk", vco_parents, + clk = clk_register_mux(NULL, "vco3_mux_clk", vco_parents, ARRAY_SIZE(vco_parents), 0, SPEAR1310_PLL_CFG, SPEAR1310_PLL3_CLK_SHIFT, SPEAR1310_PLL_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "vco3_mclk", NULL); - clk = clk_register_vco_pll("vco3_clk", "pll3_clk", NULL, "vco3_mclk", + clk_register_clkdev(clk, "vco3_mux_clk", NULL); + clk = clk_register_vco_pll("vco3_clk", "pll3_clk", NULL, "vco3_mux_clk", 0, SPEAR1310_PLL3_CTR, SPEAR1310_PLL3_FRQ, pll_rtbl, ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL); clk_register_clkdev(clk, "vco3_clk", NULL); @@ -472,7 +473,7 @@ void __init spear1310_clk_init(void) /* peripherals */ clk_register_fixed_factor(NULL, "thermal_clk", "osc_24m_clk", 0, 1, 128); - clk = clk_register_gate(NULL, "thermal_gclk", "thermal_clk", 0, + clk = clk_register_gate(NULL, "thermal_gate_clk", "thermal_clk", 0, SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_THSENS_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "spear_thermal"); @@ -499,176 +500,177 @@ void __init spear1310_clk_init(void) clk_register_clkdev(clk, "apb_clk", NULL); /* gpt clocks */ - clk = clk_register_mux(NULL, "gpt0_mclk", gpt_parents, + clk = clk_register_mux(NULL, "gpt0_mux_clk", gpt_parents, ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG, SPEAR1310_GPT0_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "gpt0_mclk", NULL); - clk = clk_register_gate(NULL, "gpt0_clk", "gpt0_mclk", 0, + clk_register_clkdev(clk, "gpt0_mux_clk", NULL); + clk = clk_register_gate(NULL, "gpt0_clk", "gpt0_mux_clk", 0, SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_GPT0_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "gpt0"); - clk = clk_register_mux(NULL, "gpt1_mclk", gpt_parents, + clk = clk_register_mux(NULL, "gpt1_mux_clk", gpt_parents, ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG, SPEAR1310_GPT1_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "gpt1_mclk", NULL); - clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mclk", 0, + clk_register_clkdev(clk, "gpt1_mux_clk", NULL); + clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mux_clk", 0, SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_GPT1_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "gpt1"); - clk = clk_register_mux(NULL, "gpt2_mclk", gpt_parents, + clk = clk_register_mux(NULL, "gpt2_mux_clk", gpt_parents, ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG, SPEAR1310_GPT2_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "gpt2_mclk", NULL); - clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mclk", 0, + clk_register_clkdev(clk, "gpt2_mux_clk", NULL); + clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mux_clk", 0, SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_GPT2_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "gpt2"); - clk = clk_register_mux(NULL, "gpt3_mclk", gpt_parents, + clk = clk_register_mux(NULL, "gpt3_mux_clk", gpt_parents, ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG, SPEAR1310_GPT3_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "gpt3_mclk", NULL); - clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mclk", 0, + clk_register_clkdev(clk, "gpt3_mux_clk", NULL); + clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mux_clk", 0, SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_GPT3_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "gpt3"); /* others */ - clk = clk_register_aux("uart_syn_clk", "uart_syn_gclk", "vco1div2_clk", - 0, SPEAR1310_UART_CLK_SYNT, NULL, aux_rtbl, - ARRAY_SIZE(aux_rtbl), &_lock, &clk1); - clk_register_clkdev(clk, "uart_syn_clk", NULL); - clk_register_clkdev(clk1, "uart_syn_gclk", NULL); + clk = clk_register_aux("uart_synth_clk", "uart_synth_gate_clk", + "vco1div2_clk", 0, SPEAR1310_UART_CLK_SYNT, NULL, + aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1); + clk_register_clkdev(clk, "uart_synth_clk", NULL); + clk_register_clkdev(clk1, "uart_synth_gate_clk", NULL); - clk = clk_register_mux(NULL, "uart0_mclk", uart0_parents, + clk = clk_register_mux(NULL, "uart0_mux_clk", uart0_parents, ARRAY_SIZE(uart0_parents), 0, SPEAR1310_PERIP_CLK_CFG, SPEAR1310_UART_CLK_SHIFT, SPEAR1310_UART_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "uart0_mclk", NULL); + clk_register_clkdev(clk, "uart0_mux_clk", NULL); - clk = clk_register_gate(NULL, "uart0_clk", "uart0_mclk", 0, + clk = clk_register_gate(NULL, "uart0_clk", "uart0_mux_clk", 0, SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_UART_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "e0000000.serial"); - clk = clk_register_aux("sdhci_syn_clk", "sdhci_syn_gclk", + clk = clk_register_aux("sdhci_synth_clk", "sdhci_synth_gate_clk", "vco1div2_clk", 0, SPEAR1310_SDHCI_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1); - clk_register_clkdev(clk, "sdhci_syn_clk", NULL); - clk_register_clkdev(clk1, "sdhci_syn_gclk", NULL); + clk_register_clkdev(clk, "sdhci_synth_clk", NULL); + clk_register_clkdev(clk1, "sdhci_synth_gate_clk", NULL); - clk = clk_register_gate(NULL, "sdhci_clk", "sdhci_syn_gclk", 0, + clk = clk_register_gate(NULL, "sdhci_clk", "sdhci_synth_gate_clk", 0, SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_SDHCI_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "b3000000.sdhci"); - clk = clk_register_aux("cfxd_syn_clk", "cfxd_syn_gclk", "vco1div2_clk", - 0, SPEAR1310_CFXD_CLK_SYNT, NULL, aux_rtbl, - ARRAY_SIZE(aux_rtbl), &_lock, &clk1); - clk_register_clkdev(clk, "cfxd_syn_clk", NULL); - clk_register_clkdev(clk1, "cfxd_syn_gclk", NULL); + clk = clk_register_aux("cfxd_synth_clk", "cfxd_synth_gate_clk", + "vco1div2_clk", 0, SPEAR1310_CFXD_CLK_SYNT, NULL, + aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1); + clk_register_clkdev(clk, "cfxd_synth_clk", NULL); + clk_register_clkdev(clk1, "cfxd_synth_gate_clk", NULL); - clk = clk_register_gate(NULL, "cfxd_clk", "cfxd_syn_gclk", 0, + clk = clk_register_gate(NULL, "cfxd_clk", "cfxd_synth_gate_clk", 0, SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_CFXD_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "b2800000.cf"); clk_register_clkdev(clk, NULL, "arasan_xd"); - clk = clk_register_aux("c3_syn_clk", "c3_syn_gclk", "vco1div2_clk", - 0, SPEAR1310_C3_CLK_SYNT, NULL, aux_rtbl, - ARRAY_SIZE(aux_rtbl), &_lock, &clk1); - clk_register_clkdev(clk, "c3_syn_clk", NULL); - clk_register_clkdev(clk1, "c3_syn_gclk", NULL); + clk = clk_register_aux("c3_synth_clk", "c3_synth_gate_clk", + "vco1div2_clk", 0, SPEAR1310_C3_CLK_SYNT, NULL, + aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1); + clk_register_clkdev(clk, "c3_synth_clk", NULL); + clk_register_clkdev(clk1, "c3_synth_gate_clk", NULL); - clk = clk_register_mux(NULL, "c3_mclk", c3_parents, + clk = clk_register_mux(NULL, "c3_mux_clk", c3_parents, ARRAY_SIZE(c3_parents), 0, SPEAR1310_PERIP_CLK_CFG, SPEAR1310_C3_CLK_SHIFT, SPEAR1310_C3_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "c3_mclk", NULL); + clk_register_clkdev(clk, "c3_mux_clk", NULL); - clk = clk_register_gate(NULL, "c3_clk", "c3_mclk", 0, + clk = clk_register_gate(NULL, "c3_clk", "c3_mux_clk", 0, SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_C3_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "c3"); /* gmac */ - clk = clk_register_mux(NULL, "phy_input_mclk", gmac_phy_input_parents, + clk = clk_register_mux(NULL, "gmac_phy_input_mux_clk", + gmac_phy_input_parents, ARRAY_SIZE(gmac_phy_input_parents), 0, SPEAR1310_GMAC_CLK_CFG, SPEAR1310_GMAC_PHY_INPUT_CLK_SHIFT, SPEAR1310_GMAC_PHY_INPUT_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "phy_input_mclk", NULL); + clk_register_clkdev(clk, "gmac_phy_input_mux_clk", NULL); - clk = clk_register_aux("phy_syn_clk", "phy_syn_gclk", "phy_input_mclk", - 0, SPEAR1310_GMAC_CLK_SYNT, NULL, gmac_rtbl, - ARRAY_SIZE(gmac_rtbl), &_lock, &clk1); - clk_register_clkdev(clk, "phy_syn_clk", NULL); - clk_register_clkdev(clk1, "phy_syn_gclk", NULL); + clk = clk_register_aux("gmac_phy_synth_clk", "gmac_phy_synth_gate_clk", + "gmac_phy_input_mux_clk", 0, SPEAR1310_GMAC_CLK_SYNT, + NULL, gmac_rtbl, ARRAY_SIZE(gmac_rtbl), &_lock, &clk1); + clk_register_clkdev(clk, "gmac_phy_synth_clk", NULL); + clk_register_clkdev(clk1, "gmac_phy_synth_gate_clk", NULL); - clk = clk_register_mux(NULL, "phy_mclk", gmac_phy_parents, + clk = clk_register_mux(NULL, "gmac_phy_mux_clk", gmac_phy_parents, ARRAY_SIZE(gmac_phy_parents), 0, SPEAR1310_PERIP_CLK_CFG, SPEAR1310_GMAC_PHY_CLK_SHIFT, SPEAR1310_GMAC_PHY_CLK_MASK, 0, &_lock); clk_register_clkdev(clk, NULL, "stmmacphy.0"); /* clcd */ - clk = clk_register_mux(NULL, "clcd_syn_mclk", clcd_synth_parents, + clk = clk_register_mux(NULL, "clcd_synth_mux_clk", clcd_synth_parents, ARRAY_SIZE(clcd_synth_parents), 0, SPEAR1310_CLCD_CLK_SYNT, SPEAR1310_CLCD_SYNT_CLK_SHIFT, SPEAR1310_CLCD_SYNT_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "clcd_syn_mclk", NULL); + clk_register_clkdev(clk, "clcd_synth_mux_clk", NULL); - clk = clk_register_frac("clcd_syn_clk", "clcd_syn_mclk", 0, + clk = clk_register_frac("clcd_synth_clk", "clcd_synth_mux_clk", 0, SPEAR1310_CLCD_CLK_SYNT, clcd_rtbl, ARRAY_SIZE(clcd_rtbl), &_lock); - clk_register_clkdev(clk, "clcd_syn_clk", NULL); + clk_register_clkdev(clk, "clcd_synth_clk", NULL); - clk = clk_register_mux(NULL, "clcd_pixel_mclk", clcd_pixel_parents, + clk = clk_register_mux(NULL, "clcd_pixel_mux_clk", clcd_pixel_parents, ARRAY_SIZE(clcd_pixel_parents), 0, SPEAR1310_PERIP_CLK_CFG, SPEAR1310_CLCD_CLK_SHIFT, SPEAR1310_CLCD_CLK_MASK, 0, &_lock); clk_register_clkdev(clk, "clcd_pixel_clk", NULL); - clk = clk_register_gate(NULL, "clcd_clk", "clcd_pixel_mclk", 0, + clk = clk_register_gate(NULL, "clcd_clk", "clcd_pixel_mux_clk", 0, SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_CLCD_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, "clcd_clk", NULL); /* i2s */ - clk = clk_register_mux(NULL, "i2s_src_mclk", i2s_src_parents, + clk = clk_register_mux(NULL, "i2s_src_mux_clk", i2s_src_parents, ARRAY_SIZE(i2s_src_parents), 0, SPEAR1310_I2S_CLK_CFG, SPEAR1310_I2S_SRC_CLK_SHIFT, SPEAR1310_I2S_SRC_CLK_MASK, 0, &_lock); clk_register_clkdev(clk, "i2s_src_clk", NULL); - clk = clk_register_aux("i2s_prs1_clk", NULL, "i2s_src_mclk", 0, + clk = clk_register_aux("i2s_prs1_clk", NULL, "i2s_src_mux_clk", 0, SPEAR1310_I2S_CLK_CFG, &i2s_prs1_masks, i2s_prs1_rtbl, ARRAY_SIZE(i2s_prs1_rtbl), &_lock, NULL); clk_register_clkdev(clk, "i2s_prs1_clk", NULL); - clk = clk_register_mux(NULL, "i2s_ref_mclk", i2s_ref_parents, + clk = clk_register_mux(NULL, "i2s_ref_mux_clk", i2s_ref_parents, ARRAY_SIZE(i2s_ref_parents), 0, SPEAR1310_I2S_CLK_CFG, SPEAR1310_I2S_REF_SHIFT, SPEAR1310_I2S_REF_SEL_MASK, 0, &_lock); clk_register_clkdev(clk, "i2s_ref_clk", NULL); - clk = clk_register_gate(NULL, "i2s_ref_pad_clk", "i2s_ref_mclk", 0, + clk = clk_register_gate(NULL, "i2s_ref_pad_clk", "i2s_ref_mux_clk", 0, SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_I2S_REF_PAD_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, "i2s_ref_pad_clk", NULL); - clk = clk_register_aux("i2s_sclk_clk", "i2s_sclk_gclk", + clk = clk_register_aux("i2s_sclk_clk", "i2s_sclk_gate_clk", "i2s_ref_pad_clk", 0, SPEAR1310_I2S_CLK_CFG, &i2s_sclk_masks, i2s_sclk_rtbl, ARRAY_SIZE(i2s_sclk_rtbl), &_lock, &clk1); clk_register_clkdev(clk, "i2s_sclk_clk", NULL); - clk_register_clkdev(clk1, "i2s_sclk_gclk", NULL); + clk_register_clkdev(clk1, "i2s_sclk_gate_clk", NULL); /* clock derived from ahb clk */ clk = clk_register_gate(NULL, "i2c0_clk", "ahb_clk", 0, @@ -745,13 +747,13 @@ void __init spear1310_clk_init(void) &_lock); clk_register_clkdev(clk, "sysram1_clk", NULL); - clk = clk_register_aux("adc_syn_clk", "adc_syn_gclk", "ahb_clk", + clk = clk_register_aux("adc_synth_clk", "adc_synth_gate_clk", "ahb_clk", 0, SPEAR1310_ADC_CLK_SYNT, NULL, adc_rtbl, ARRAY_SIZE(adc_rtbl), &_lock, &clk1); - clk_register_clkdev(clk, "adc_syn_clk", NULL); - clk_register_clkdev(clk1, "adc_syn_gclk", NULL); + clk_register_clkdev(clk, "adc_synth_clk", NULL); + clk_register_clkdev(clk1, "adc_synth_gate_clk", NULL); - clk = clk_register_gate(NULL, "adc_clk", "adc_syn_gclk", 0, + clk = clk_register_gate(NULL, "adc_clk", "adc_synth_gate_clk", 0, SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_ADC_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "adc_clk"); @@ -788,37 +790,37 @@ void __init spear1310_clk_init(void) clk_register_clkdev(clk, NULL, "e0300000.kbd"); /* RAS clks */ - clk = clk_register_mux(NULL, "gen_syn0_1_mclk", gen_synth0_1_parents, - ARRAY_SIZE(gen_synth0_1_parents), 0, SPEAR1310_PLL_CFG, - SPEAR1310_RAS_SYNT0_1_CLK_SHIFT, + clk = clk_register_mux(NULL, "gen_synth0_1_mux_clk", + gen_synth0_1_parents, ARRAY_SIZE(gen_synth0_1_parents), + 0, SPEAR1310_PLL_CFG, SPEAR1310_RAS_SYNT0_1_CLK_SHIFT, SPEAR1310_RAS_SYNT_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "gen_syn0_1_clk", NULL); + clk_register_clkdev(clk, "gen_synth0_1_clk", NULL); - clk = clk_register_mux(NULL, "gen_syn2_3_mclk", gen_synth2_3_parents, - ARRAY_SIZE(gen_synth2_3_parents), 0, SPEAR1310_PLL_CFG, - SPEAR1310_RAS_SYNT2_3_CLK_SHIFT, + clk = clk_register_mux(NULL, "gen_synth2_3_mux_clk", + gen_synth2_3_parents, ARRAY_SIZE(gen_synth2_3_parents), + 0, SPEAR1310_PLL_CFG, SPEAR1310_RAS_SYNT2_3_CLK_SHIFT, SPEAR1310_RAS_SYNT_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "gen_syn2_3_clk", NULL); + clk_register_clkdev(clk, "gen_synth2_3_clk", NULL); - clk = clk_register_frac("gen_syn0_clk", "gen_syn0_1_clk", 0, + clk = clk_register_frac("gen_synth0_clk", "gen_synth0_1_clk", 0, SPEAR1310_RAS_CLK_SYNT0, gen_rtbl, ARRAY_SIZE(gen_rtbl), &_lock); - clk_register_clkdev(clk, "gen_syn0_clk", NULL); + clk_register_clkdev(clk, "gen_synth0_clk", NULL); - clk = clk_register_frac("gen_syn1_clk", "gen_syn0_1_clk", 0, + clk = clk_register_frac("gen_synth1_clk", "gen_synth0_1_clk", 0, SPEAR1310_RAS_CLK_SYNT1, gen_rtbl, ARRAY_SIZE(gen_rtbl), &_lock); - clk_register_clkdev(clk, "gen_syn1_clk", NULL); + clk_register_clkdev(clk, "gen_synth1_clk", NULL); - clk = clk_register_frac("gen_syn2_clk", "gen_syn2_3_clk", 0, + clk = clk_register_frac("gen_synth2_clk", "gen_synth2_3_clk", 0, SPEAR1310_RAS_CLK_SYNT2, gen_rtbl, ARRAY_SIZE(gen_rtbl), &_lock); - clk_register_clkdev(clk, "gen_syn2_clk", NULL); + clk_register_clkdev(clk, "gen_synth2_clk", NULL); - clk = clk_register_frac("gen_syn3_clk", "gen_syn2_3_clk", 0, + clk = clk_register_frac("gen_synth3_clk", "gen_synth2_3_clk", 0, SPEAR1310_RAS_CLK_SYNT3, gen_rtbl, ARRAY_SIZE(gen_rtbl), &_lock); - clk_register_clkdev(clk, "gen_syn3_clk", NULL); + clk_register_clkdev(clk, "gen_synth3_clk", NULL); clk = clk_register_gate(NULL, "ras_osc_24m_clk", "osc_24m_clk", 0, SPEAR1310_RAS_CLK_ENB, SPEAR1310_OSC_24M_CLK_ENB, 0, @@ -845,7 +847,7 @@ void __init spear1310_clk_init(void) &_lock); clk_register_clkdev(clk, "ras_pll3_clk", NULL); - clk = clk_register_gate(NULL, "ras_tx125_clk", "gmii_pad_clk", 0, + clk = clk_register_gate(NULL, "ras_tx125_clk", "gmii_125m_pad_clk", 0, SPEAR1310_RAS_CLK_ENB, SPEAR1310_C125M_PAD_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, "ras_tx125_clk", NULL); @@ -910,7 +912,7 @@ void __init spear1310_clk_init(void) &_lock); clk_register_clkdev(clk, NULL, "5c700000.eth"); - clk = clk_register_mux(NULL, "smii_rgmii_phy_mclk", + clk = clk_register_mux(NULL, "smii_rgmii_phy_mux_clk", smii_rgmii_phy_parents, ARRAY_SIZE(smii_rgmii_phy_parents), 0, SPEAR1310_RAS_CTRL_REG1, @@ -920,184 +922,184 @@ void __init spear1310_clk_init(void) clk_register_clkdev(clk, NULL, "stmmacphy.2"); clk_register_clkdev(clk, NULL, "stmmacphy.4"); - clk = clk_register_mux(NULL, "rmii_phy_mclk", rmii_phy_parents, + clk = clk_register_mux(NULL, "rmii_phy_mux_clk", rmii_phy_parents, ARRAY_SIZE(rmii_phy_parents), 0, SPEAR1310_RAS_CTRL_REG1, SPEAR1310_RMII_PHY_CLK_SHIFT, SPEAR1310_PHY_CLK_MASK, 0, &_lock); clk_register_clkdev(clk, NULL, "stmmacphy.3"); - clk = clk_register_mux(NULL, "uart1_mclk", uart_parents, + clk = clk_register_mux(NULL, "uart1_mux_clk", uart_parents, ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0, SPEAR1310_UART1_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "uart1_mclk", NULL); + clk_register_clkdev(clk, "uart1_mux_clk", NULL); - clk = clk_register_gate(NULL, "uart1_clk", "uart1_mclk", 0, + clk = clk_register_gate(NULL, "uart1_clk", "uart1_mux_clk", 0, SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART1_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "5c800000.serial"); - clk = clk_register_mux(NULL, "uart2_mclk", uart_parents, + clk = clk_register_mux(NULL, "uart2_mux_clk", uart_parents, ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0, SPEAR1310_UART2_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "uart2_mclk", NULL); + clk_register_clkdev(clk, "uart2_mux_clk", NULL); - clk = clk_register_gate(NULL, "uart2_clk", "uart2_mclk", 0, + clk = clk_register_gate(NULL, "uart2_clk", "uart2_mux_clk", 0, SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART2_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "5c900000.serial"); - clk = clk_register_mux(NULL, "uart3_mclk", uart_parents, + clk = clk_register_mux(NULL, "uart3_mux_clk", uart_parents, ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0, SPEAR1310_UART3_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "uart3_mclk", NULL); + clk_register_clkdev(clk, "uart3_mux_clk", NULL); - clk = clk_register_gate(NULL, "uart3_clk", "uart3_mclk", 0, + clk = clk_register_gate(NULL, "uart3_clk", "uart3_mux_clk", 0, SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART3_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "5ca00000.serial"); - clk = clk_register_mux(NULL, "uart4_mclk", uart_parents, + clk = clk_register_mux(NULL, "uart4_mux_clk", uart_parents, ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0, SPEAR1310_UART4_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "uart4_mclk", NULL); + clk_register_clkdev(clk, "uart4_mux_clk", NULL); - clk = clk_register_gate(NULL, "uart4_clk", "uart4_mclk", 0, + clk = clk_register_gate(NULL, "uart4_clk", "uart4_mux_clk", 0, SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART4_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "5cb00000.serial"); - clk = clk_register_mux(NULL, "uart5_mclk", uart_parents, + clk = clk_register_mux(NULL, "uart5_mux_clk", uart_parents, ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0, SPEAR1310_UART5_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "uart5_mclk", NULL); + clk_register_clkdev(clk, "uart5_mux_clk", NULL); - clk = clk_register_gate(NULL, "uart5_clk", "uart5_mclk", 0, + clk = clk_register_gate(NULL, "uart5_clk", "uart5_mux_clk", 0, SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART5_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "5cc00000.serial"); - clk = clk_register_mux(NULL, "i2c1_mclk", i2c_parents, + clk = clk_register_mux(NULL, "i2c1_mux_clk", i2c_parents, ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0, SPEAR1310_I2C1_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "i2c1_mclk", NULL); + clk_register_clkdev(clk, "i2c1_mux_clk", NULL); - clk = clk_register_gate(NULL, "i2c1_clk", "i2c1_mclk", 0, + clk = clk_register_gate(NULL, "i2c1_clk", "i2c1_mux_clk", 0, SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C1_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "5cd00000.i2c"); - clk = clk_register_mux(NULL, "i2c2_mclk", i2c_parents, + clk = clk_register_mux(NULL, "i2c2_mux_clk", i2c_parents, ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0, SPEAR1310_I2C2_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "i2c2_mclk", NULL); + clk_register_clkdev(clk, "i2c2_mux_clk", NULL); - clk = clk_register_gate(NULL, "i2c2_clk", "i2c2_mclk", 0, + clk = clk_register_gate(NULL, "i2c2_clk", "i2c2_mux_clk", 0, SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C2_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "5ce00000.i2c"); - clk = clk_register_mux(NULL, "i2c3_mclk", i2c_parents, + clk = clk_register_mux(NULL, "i2c3_mux_clk", i2c_parents, ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0, SPEAR1310_I2C3_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "i2c3_mclk", NULL); + clk_register_clkdev(clk, "i2c3_mux_clk", NULL); - clk = clk_register_gate(NULL, "i2c3_clk", "i2c3_mclk", 0, + clk = clk_register_gate(NULL, "i2c3_clk", "i2c3_mux_clk", 0, SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C3_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "5cf00000.i2c"); - clk = clk_register_mux(NULL, "i2c4_mclk", i2c_parents, + clk = clk_register_mux(NULL, "i2c4_mux_clk", i2c_parents, ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0, SPEAR1310_I2C4_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "i2c4_mclk", NULL); + clk_register_clkdev(clk, "i2c4_mux_clk", NULL); - clk = clk_register_gate(NULL, "i2c4_clk", "i2c4_mclk", 0, + clk = clk_register_gate(NULL, "i2c4_clk", "i2c4_mux_clk", 0, SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C4_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "5d000000.i2c"); - clk = clk_register_mux(NULL, "i2c5_mclk", i2c_parents, + clk = clk_register_mux(NULL, "i2c5_mux_clk", i2c_parents, ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0, SPEAR1310_I2C5_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "i2c5_mclk", NULL); + clk_register_clkdev(clk, "i2c5_mux_clk", NULL); - clk = clk_register_gate(NULL, "i2c5_clk", "i2c5_mclk", 0, + clk = clk_register_gate(NULL, "i2c5_clk", "i2c5_mux_clk", 0, SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C5_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "5d100000.i2c"); - clk = clk_register_mux(NULL, "i2c6_mclk", i2c_parents, + clk = clk_register_mux(NULL, "i2c6_mux_clk", i2c_parents, ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0, SPEAR1310_I2C6_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "i2c6_mclk", NULL); + clk_register_clkdev(clk, "i2c6_mux_clk", NULL); - clk = clk_register_gate(NULL, "i2c6_clk", "i2c6_mclk", 0, + clk = clk_register_gate(NULL, "i2c6_clk", "i2c6_mux_clk", 0, SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C6_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "5d200000.i2c"); - clk = clk_register_mux(NULL, "i2c7_mclk", i2c_parents, + clk = clk_register_mux(NULL, "i2c7_mux_clk", i2c_parents, ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0, SPEAR1310_I2C7_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "i2c7_mclk", NULL); + clk_register_clkdev(clk, "i2c7_mux_clk", NULL); - clk = clk_register_gate(NULL, "i2c7_clk", "i2c7_mclk", 0, + clk = clk_register_gate(NULL, "i2c7_clk", "i2c7_mux_clk", 0, SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C7_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "5d300000.i2c"); - clk = clk_register_mux(NULL, "ssp1_mclk", ssp1_parents, + clk = clk_register_mux(NULL, "ssp1_mux_clk", ssp1_parents, ARRAY_SIZE(ssp1_parents), 0, SPEAR1310_RAS_CTRL_REG0, SPEAR1310_SSP1_CLK_SHIFT, SPEAR1310_SSP1_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "ssp1_mclk", NULL); + clk_register_clkdev(clk, "ssp1_mux_clk", NULL); - clk = clk_register_gate(NULL, "ssp1_clk", "ssp1_mclk", 0, + clk = clk_register_gate(NULL, "ssp1_clk", "ssp1_mux_clk", 0, SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_SSP1_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "5d400000.spi"); - clk = clk_register_mux(NULL, "pci_mclk", pci_parents, + clk = clk_register_mux(NULL, "pci_mux_clk", pci_parents, ARRAY_SIZE(pci_parents), 0, SPEAR1310_RAS_CTRL_REG0, SPEAR1310_PCI_CLK_SHIFT, SPEAR1310_PCI_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "pci_mclk", NULL); + clk_register_clkdev(clk, "pci_mux_clk", NULL); - clk = clk_register_gate(NULL, "pci_clk", "pci_mclk", 0, + clk = clk_register_gate(NULL, "pci_clk", "pci_mux_clk", 0, SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_PCI_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "pci"); - clk = clk_register_mux(NULL, "tdm1_mclk", tdm_parents, + clk = clk_register_mux(NULL, "tdm1_mux_clk", tdm_parents, ARRAY_SIZE(tdm_parents), 0, SPEAR1310_RAS_CTRL_REG0, SPEAR1310_TDM1_CLK_SHIFT, SPEAR1310_TDM_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "tdm1_mclk", NULL); + clk_register_clkdev(clk, "tdm1_mux_clk", NULL); - clk = clk_register_gate(NULL, "tdm1_clk", "tdm1_mclk", 0, + clk = clk_register_gate(NULL, "tdm1_clk", "tdm1_mux_clk", 0, SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_TDM1_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "tdm_hdlc.0"); - clk = clk_register_mux(NULL, "tdm2_mclk", tdm_parents, + clk = clk_register_mux(NULL, "tdm2_mux_clk", tdm_parents, ARRAY_SIZE(tdm_parents), 0, SPEAR1310_RAS_CTRL_REG0, SPEAR1310_TDM2_CLK_SHIFT, SPEAR1310_TDM_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "tdm2_mclk", NULL); + clk_register_clkdev(clk, "tdm2_mux_clk", NULL); - clk = clk_register_gate(NULL, "tdm2_clk", "tdm2_mclk", 0, + clk = clk_register_gate(NULL, "tdm2_clk", "tdm2_mux_clk", 0, SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_TDM2_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "tdm_hdlc.1"); diff --git a/trunk/drivers/clk/spear/spear1340_clock.c b/trunk/drivers/clk/spear/spear1340_clock.c index 2352cee7f645..e3ea72162236 100644 --- a/trunk/drivers/clk/spear/spear1340_clock.c +++ b/trunk/drivers/clk/spear/spear1340_clock.c @@ -369,25 +369,27 @@ static struct frac_rate_tbl gen_rtbl[] = { /* clock parents */ static const char *vco_parents[] = { "osc_24m_clk", "osc_25m_clk", }; -static const char *sys_parents[] = { "pll1_clk", "pll1_clk", "pll1_clk", - "pll1_clk", "sys_synth_clk", "sys_synth_clk", "pll2_clk", "pll3_clk", }; -static const char *ahb_parents[] = { "cpu_div3_clk", "amba_syn_clk", }; +static const char *sys_parents[] = { "none", "pll1_clk", "none", "none", + "sys_synth_clk", "none", "pll2_clk", "pll3_clk", }; +static const char *ahb_parents[] = { "cpu_div3_clk", "amba_synth_clk", }; static const char *gpt_parents[] = { "osc_24m_clk", "apb_clk", }; static const char *uart0_parents[] = { "pll5_clk", "osc_24m_clk", - "uart0_syn_gclk", }; + "uart0_synth_gate_clk", }; static const char *uart1_parents[] = { "pll5_clk", "osc_24m_clk", - "uart1_syn_gclk", }; -static const char *c3_parents[] = { "pll5_clk", "c3_syn_gclk", }; -static const char *gmac_phy_input_parents[] = { "gmii_pad_clk", "pll2_clk", + "uart1_synth_gate_clk", }; +static const char *c3_parents[] = { "pll5_clk", "c3_synth_gate_clk", }; +static const char *gmac_phy_input_parents[] = { "gmii_125m_pad_clk", "pll2_clk", "osc_25m_clk", }; -static const char *gmac_phy_parents[] = { "phy_input_mclk", "phy_syn_gclk", }; +static const char *gmac_phy_parents[] = { "gmac_phy_input_mux_clk", + "gmac_phy_synth_gate_clk", }; static const char *clcd_synth_parents[] = { "vco1div4_clk", "pll2_clk", }; -static const char *clcd_pixel_parents[] = { "pll5_clk", "clcd_syn_clk", }; +static const char *clcd_pixel_parents[] = { "pll5_clk", "clcd_synth_clk", }; static const char *i2s_src_parents[] = { "vco1div2_clk", "pll2_clk", "pll3_clk", "i2s_src_pad_clk", }; -static const char *i2s_ref_parents[] = { "i2s_src_mclk", "i2s_prs1_clk", }; -static const char *spdif_out_parents[] = { "i2s_src_pad_clk", "gen_syn2_clk", }; -static const char *spdif_in_parents[] = { "pll2_clk", "gen_syn3_clk", }; +static const char *i2s_ref_parents[] = { "i2s_src_mux_clk", "i2s_prs1_clk", }; +static const char *spdif_out_parents[] = { "i2s_src_pad_clk", "gen_synth2_clk", +}; +static const char *spdif_in_parents[] = { "pll2_clk", "gen_synth3_clk", }; static const char *gen_synth0_1_parents[] = { "vco1div4_clk", "vco3div2_clk", "pll3_clk", }; @@ -413,9 +415,9 @@ void __init spear1340_clk_init(void) 25000000); clk_register_clkdev(clk, "osc_25m_clk", NULL); - clk = clk_register_fixed_rate(NULL, "gmii_pad_clk", NULL, CLK_IS_ROOT, - 125000000); - clk_register_clkdev(clk, "gmii_pad_clk", NULL); + clk = clk_register_fixed_rate(NULL, "gmii_125m_pad_clk", NULL, + CLK_IS_ROOT, 125000000); + clk_register_clkdev(clk, "gmii_125m_pad_clk", NULL); clk = clk_register_fixed_rate(NULL, "i2s_src_pad_clk", NULL, CLK_IS_ROOT, 12288000); @@ -429,35 +431,35 @@ void __init spear1340_clk_init(void) /* clock derived from 24 or 25 MHz osc clk */ /* vco-pll */ - clk = clk_register_mux(NULL, "vco1_mclk", vco_parents, + clk = clk_register_mux(NULL, "vco1_mux_clk", vco_parents, ARRAY_SIZE(vco_parents), 0, SPEAR1340_PLL_CFG, SPEAR1340_PLL1_CLK_SHIFT, SPEAR1340_PLL_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "vco1_mclk", NULL); - clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "vco1_mclk", 0, - SPEAR1340_PLL1_CTR, SPEAR1340_PLL1_FRQ, pll_rtbl, + clk_register_clkdev(clk, "vco1_mux_clk", NULL); + clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "vco1_mux_clk", + 0, SPEAR1340_PLL1_CTR, SPEAR1340_PLL1_FRQ, pll_rtbl, ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL); clk_register_clkdev(clk, "vco1_clk", NULL); clk_register_clkdev(clk1, "pll1_clk", NULL); - clk = clk_register_mux(NULL, "vco2_mclk", vco_parents, + clk = clk_register_mux(NULL, "vco2_mux_clk", vco_parents, ARRAY_SIZE(vco_parents), 0, SPEAR1340_PLL_CFG, SPEAR1340_PLL2_CLK_SHIFT, SPEAR1340_PLL_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "vco2_mclk", NULL); - clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "vco2_mclk", 0, - SPEAR1340_PLL2_CTR, SPEAR1340_PLL2_FRQ, pll_rtbl, + clk_register_clkdev(clk, "vco2_mux_clk", NULL); + clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "vco2_mux_clk", + 0, SPEAR1340_PLL2_CTR, SPEAR1340_PLL2_FRQ, pll_rtbl, ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL); clk_register_clkdev(clk, "vco2_clk", NULL); clk_register_clkdev(clk1, "pll2_clk", NULL); - clk = clk_register_mux(NULL, "vco3_mclk", vco_parents, + clk = clk_register_mux(NULL, "vco3_mux_clk", vco_parents, ARRAY_SIZE(vco_parents), 0, SPEAR1340_PLL_CFG, SPEAR1340_PLL3_CLK_SHIFT, SPEAR1340_PLL_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "vco3_mclk", NULL); - clk = clk_register_vco_pll("vco3_clk", "pll3_clk", NULL, "vco3_mclk", 0, - SPEAR1340_PLL3_CTR, SPEAR1340_PLL3_FRQ, pll_rtbl, + clk_register_clkdev(clk, "vco3_mux_clk", NULL); + clk = clk_register_vco_pll("vco3_clk", "pll3_clk", NULL, "vco3_mux_clk", + 0, SPEAR1340_PLL3_CTR, SPEAR1340_PLL3_FRQ, pll_rtbl, ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL); clk_register_clkdev(clk, "vco3_clk", NULL); clk_register_clkdev(clk1, "pll3_clk", NULL); @@ -496,7 +498,7 @@ void __init spear1340_clk_init(void) /* peripherals */ clk_register_fixed_factor(NULL, "thermal_clk", "osc_24m_clk", 0, 1, 128); - clk = clk_register_gate(NULL, "thermal_gclk", "thermal_clk", 0, + clk = clk_register_gate(NULL, "thermal_gate_clk", "thermal_clk", 0, SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_THSENS_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "spear_thermal"); @@ -507,23 +509,23 @@ void __init spear1340_clk_init(void) clk_register_clkdev(clk, "ddr_clk", NULL); /* clock derived from pll1 clk */ - clk = clk_register_frac("sys_syn_clk", "vco1div2_clk", 0, + clk = clk_register_frac("sys_synth_clk", "vco1div2_clk", 0, SPEAR1340_SYS_CLK_SYNT, sys_synth_rtbl, ARRAY_SIZE(sys_synth_rtbl), &_lock); - clk_register_clkdev(clk, "sys_syn_clk", NULL); + clk_register_clkdev(clk, "sys_synth_clk", NULL); - clk = clk_register_frac("amba_syn_clk", "vco1div2_clk", 0, + clk = clk_register_frac("amba_synth_clk", "vco1div2_clk", 0, SPEAR1340_AMBA_CLK_SYNT, amba_synth_rtbl, ARRAY_SIZE(amba_synth_rtbl), &_lock); - clk_register_clkdev(clk, "amba_syn_clk", NULL); + clk_register_clkdev(clk, "amba_synth_clk", NULL); - clk = clk_register_mux(NULL, "sys_mclk", sys_parents, + clk = clk_register_mux(NULL, "sys_mux_clk", sys_parents, ARRAY_SIZE(sys_parents), 0, SPEAR1340_SYS_CLK_CTRL, SPEAR1340_SCLK_SRC_SEL_SHIFT, SPEAR1340_SCLK_SRC_SEL_MASK, 0, &_lock); clk_register_clkdev(clk, "sys_clk", NULL); - clk = clk_register_fixed_factor(NULL, "cpu_clk", "sys_mclk", 0, 1, + clk = clk_register_fixed_factor(NULL, "cpu_clk", "sys_mux_clk", 0, 1, 2); clk_register_clkdev(clk, "cpu_clk", NULL); @@ -546,193 +548,194 @@ void __init spear1340_clk_init(void) clk_register_clkdev(clk, "apb_clk", NULL); /* gpt clocks */ - clk = clk_register_mux(NULL, "gpt0_mclk", gpt_parents, + clk = clk_register_mux(NULL, "gpt0_mux_clk", gpt_parents, ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG, SPEAR1340_GPT0_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "gpt0_mclk", NULL); - clk = clk_register_gate(NULL, "gpt0_clk", "gpt0_mclk", 0, + clk_register_clkdev(clk, "gpt0_mux_clk", NULL); + clk = clk_register_gate(NULL, "gpt0_clk", "gpt0_mux_clk", 0, SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_GPT0_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "gpt0"); - clk = clk_register_mux(NULL, "gpt1_mclk", gpt_parents, + clk = clk_register_mux(NULL, "gpt1_mux_clk", gpt_parents, ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG, SPEAR1340_GPT1_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "gpt1_mclk", NULL); - clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mclk", 0, + clk_register_clkdev(clk, "gpt1_mux_clk", NULL); + clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mux_clk", 0, SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_GPT1_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "gpt1"); - clk = clk_register_mux(NULL, "gpt2_mclk", gpt_parents, + clk = clk_register_mux(NULL, "gpt2_mux_clk", gpt_parents, ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG, SPEAR1340_GPT2_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "gpt2_mclk", NULL); - clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mclk", 0, + clk_register_clkdev(clk, "gpt2_mux_clk", NULL); + clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mux_clk", 0, SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_GPT2_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "gpt2"); - clk = clk_register_mux(NULL, "gpt3_mclk", gpt_parents, + clk = clk_register_mux(NULL, "gpt3_mux_clk", gpt_parents, ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG, SPEAR1340_GPT3_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "gpt3_mclk", NULL); - clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mclk", 0, + clk_register_clkdev(clk, "gpt3_mux_clk", NULL); + clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mux_clk", 0, SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_GPT3_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "gpt3"); /* others */ - clk = clk_register_aux("uart0_syn_clk", "uart0_syn_gclk", + clk = clk_register_aux("uart0_synth_clk", "uart0_synth_gate_clk", "vco1div2_clk", 0, SPEAR1340_UART0_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1); - clk_register_clkdev(clk, "uart0_syn_clk", NULL); - clk_register_clkdev(clk1, "uart0_syn_gclk", NULL); + clk_register_clkdev(clk, "uart0_synth_clk", NULL); + clk_register_clkdev(clk1, "uart0_synth_gate_clk", NULL); - clk = clk_register_mux(NULL, "uart0_mclk", uart0_parents, + clk = clk_register_mux(NULL, "uart0_mux_clk", uart0_parents, ARRAY_SIZE(uart0_parents), 0, SPEAR1340_PERIP_CLK_CFG, SPEAR1340_UART0_CLK_SHIFT, SPEAR1340_UART_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "uart0_mclk", NULL); + clk_register_clkdev(clk, "uart0_mux_clk", NULL); - clk = clk_register_gate(NULL, "uart0_clk", "uart0_mclk", 0, + clk = clk_register_gate(NULL, "uart0_clk", "uart0_mux_clk", 0, SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_UART0_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "e0000000.serial"); - clk = clk_register_aux("uart1_syn_clk", "uart1_syn_gclk", + clk = clk_register_aux("uart1_synth_clk", "uart1_synth_gate_clk", "vco1div2_clk", 0, SPEAR1340_UART1_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1); - clk_register_clkdev(clk, "uart1_syn_clk", NULL); - clk_register_clkdev(clk1, "uart1_syn_gclk", NULL); + clk_register_clkdev(clk, "uart1_synth_clk", NULL); + clk_register_clkdev(clk1, "uart1_synth_gate_clk", NULL); - clk = clk_register_mux(NULL, "uart1_mclk", uart1_parents, + clk = clk_register_mux(NULL, "uart1_mux_clk", uart1_parents, ARRAY_SIZE(uart1_parents), 0, SPEAR1340_PERIP_CLK_CFG, SPEAR1340_UART1_CLK_SHIFT, SPEAR1340_UART_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "uart1_mclk", NULL); + clk_register_clkdev(clk, "uart1_mux_clk", NULL); - clk = clk_register_gate(NULL, "uart1_clk", "uart1_mclk", 0, - SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_UART1_CLK_ENB, 0, + clk = clk_register_gate(NULL, "uart1_clk", "uart1_mux_clk", 0, + SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_UART1_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "b4100000.serial"); - clk = clk_register_aux("sdhci_syn_clk", "sdhci_syn_gclk", + clk = clk_register_aux("sdhci_synth_clk", "sdhci_synth_gate_clk", "vco1div2_clk", 0, SPEAR1340_SDHCI_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1); - clk_register_clkdev(clk, "sdhci_syn_clk", NULL); - clk_register_clkdev(clk1, "sdhci_syn_gclk", NULL); + clk_register_clkdev(clk, "sdhci_synth_clk", NULL); + clk_register_clkdev(clk1, "sdhci_synth_gate_clk", NULL); - clk = clk_register_gate(NULL, "sdhci_clk", "sdhci_syn_gclk", 0, + clk = clk_register_gate(NULL, "sdhci_clk", "sdhci_synth_gate_clk", 0, SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_SDHCI_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "b3000000.sdhci"); - clk = clk_register_aux("cfxd_syn_clk", "cfxd_syn_gclk", "vco1div2_clk", - 0, SPEAR1340_CFXD_CLK_SYNT, NULL, aux_rtbl, - ARRAY_SIZE(aux_rtbl), &_lock, &clk1); - clk_register_clkdev(clk, "cfxd_syn_clk", NULL); - clk_register_clkdev(clk1, "cfxd_syn_gclk", NULL); + clk = clk_register_aux("cfxd_synth_clk", "cfxd_synth_gate_clk", + "vco1div2_clk", 0, SPEAR1340_CFXD_CLK_SYNT, NULL, + aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1); + clk_register_clkdev(clk, "cfxd_synth_clk", NULL); + clk_register_clkdev(clk1, "cfxd_synth_gate_clk", NULL); - clk = clk_register_gate(NULL, "cfxd_clk", "cfxd_syn_gclk", 0, + clk = clk_register_gate(NULL, "cfxd_clk", "cfxd_synth_gate_clk", 0, SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_CFXD_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "b2800000.cf"); clk_register_clkdev(clk, NULL, "arasan_xd"); - clk = clk_register_aux("c3_syn_clk", "c3_syn_gclk", "vco1div2_clk", 0, - SPEAR1340_C3_CLK_SYNT, NULL, aux_rtbl, - ARRAY_SIZE(aux_rtbl), &_lock, &clk1); - clk_register_clkdev(clk, "c3_syn_clk", NULL); - clk_register_clkdev(clk1, "c3_syn_gclk", NULL); + clk = clk_register_aux("c3_synth_clk", "c3_synth_gate_clk", + "vco1div2_clk", 0, SPEAR1340_C3_CLK_SYNT, NULL, + aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1); + clk_register_clkdev(clk, "c3_synth_clk", NULL); + clk_register_clkdev(clk1, "c3_synth_gate_clk", NULL); - clk = clk_register_mux(NULL, "c3_mclk", c3_parents, + clk = clk_register_mux(NULL, "c3_mux_clk", c3_parents, ARRAY_SIZE(c3_parents), 0, SPEAR1340_PERIP_CLK_CFG, SPEAR1340_C3_CLK_SHIFT, SPEAR1340_C3_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "c3_mclk", NULL); + clk_register_clkdev(clk, "c3_mux_clk", NULL); - clk = clk_register_gate(NULL, "c3_clk", "c3_mclk", 0, + clk = clk_register_gate(NULL, "c3_clk", "c3_mux_clk", 0, SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_C3_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "c3"); /* gmac */ - clk = clk_register_mux(NULL, "phy_input_mclk", gmac_phy_input_parents, + clk = clk_register_mux(NULL, "gmac_phy_input_mux_clk", + gmac_phy_input_parents, ARRAY_SIZE(gmac_phy_input_parents), 0, SPEAR1340_GMAC_CLK_CFG, SPEAR1340_GMAC_PHY_INPUT_CLK_SHIFT, SPEAR1340_GMAC_PHY_INPUT_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "phy_input_mclk", NULL); + clk_register_clkdev(clk, "gmac_phy_input_mux_clk", NULL); - clk = clk_register_aux("phy_syn_clk", "phy_syn_gclk", "phy_input_mclk", - 0, SPEAR1340_GMAC_CLK_SYNT, NULL, gmac_rtbl, - ARRAY_SIZE(gmac_rtbl), &_lock, &clk1); - clk_register_clkdev(clk, "phy_syn_clk", NULL); - clk_register_clkdev(clk1, "phy_syn_gclk", NULL); + clk = clk_register_aux("gmac_phy_synth_clk", "gmac_phy_synth_gate_clk", + "gmac_phy_input_mux_clk", 0, SPEAR1340_GMAC_CLK_SYNT, + NULL, gmac_rtbl, ARRAY_SIZE(gmac_rtbl), &_lock, &clk1); + clk_register_clkdev(clk, "gmac_phy_synth_clk", NULL); + clk_register_clkdev(clk1, "gmac_phy_synth_gate_clk", NULL); - clk = clk_register_mux(NULL, "phy_mclk", gmac_phy_parents, + clk = clk_register_mux(NULL, "gmac_phy_mux_clk", gmac_phy_parents, ARRAY_SIZE(gmac_phy_parents), 0, SPEAR1340_PERIP_CLK_CFG, SPEAR1340_GMAC_PHY_CLK_SHIFT, SPEAR1340_GMAC_PHY_CLK_MASK, 0, &_lock); clk_register_clkdev(clk, NULL, "stmmacphy.0"); /* clcd */ - clk = clk_register_mux(NULL, "clcd_syn_mclk", clcd_synth_parents, + clk = clk_register_mux(NULL, "clcd_synth_mux_clk", clcd_synth_parents, ARRAY_SIZE(clcd_synth_parents), 0, SPEAR1340_CLCD_CLK_SYNT, SPEAR1340_CLCD_SYNT_CLK_SHIFT, SPEAR1340_CLCD_SYNT_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "clcd_syn_mclk", NULL); + clk_register_clkdev(clk, "clcd_synth_mux_clk", NULL); - clk = clk_register_frac("clcd_syn_clk", "clcd_syn_mclk", 0, + clk = clk_register_frac("clcd_synth_clk", "clcd_synth_mux_clk", 0, SPEAR1340_CLCD_CLK_SYNT, clcd_rtbl, ARRAY_SIZE(clcd_rtbl), &_lock); - clk_register_clkdev(clk, "clcd_syn_clk", NULL); + clk_register_clkdev(clk, "clcd_synth_clk", NULL); - clk = clk_register_mux(NULL, "clcd_pixel_mclk", clcd_pixel_parents, + clk = clk_register_mux(NULL, "clcd_pixel_mux_clk", clcd_pixel_parents, ARRAY_SIZE(clcd_pixel_parents), 0, SPEAR1340_PERIP_CLK_CFG, SPEAR1340_CLCD_CLK_SHIFT, SPEAR1340_CLCD_CLK_MASK, 0, &_lock); clk_register_clkdev(clk, "clcd_pixel_clk", NULL); - clk = clk_register_gate(NULL, "clcd_clk", "clcd_pixel_mclk", 0, + clk = clk_register_gate(NULL, "clcd_clk", "clcd_pixel_mux_clk", 0, SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_CLCD_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, "clcd_clk", NULL); /* i2s */ - clk = clk_register_mux(NULL, "i2s_src_mclk", i2s_src_parents, + clk = clk_register_mux(NULL, "i2s_src_mux_clk", i2s_src_parents, ARRAY_SIZE(i2s_src_parents), 0, SPEAR1340_I2S_CLK_CFG, SPEAR1340_I2S_SRC_CLK_SHIFT, SPEAR1340_I2S_SRC_CLK_MASK, 0, &_lock); clk_register_clkdev(clk, "i2s_src_clk", NULL); - clk = clk_register_aux("i2s_prs1_clk", NULL, "i2s_src_mclk", 0, + clk = clk_register_aux("i2s_prs1_clk", NULL, "i2s_src_mux_clk", 0, SPEAR1340_I2S_CLK_CFG, &i2s_prs1_masks, i2s_prs1_rtbl, ARRAY_SIZE(i2s_prs1_rtbl), &_lock, NULL); clk_register_clkdev(clk, "i2s_prs1_clk", NULL); - clk = clk_register_mux(NULL, "i2s_ref_mclk", i2s_ref_parents, + clk = clk_register_mux(NULL, "i2s_ref_mux_clk", i2s_ref_parents, ARRAY_SIZE(i2s_ref_parents), 0, SPEAR1340_I2S_CLK_CFG, SPEAR1340_I2S_REF_SHIFT, SPEAR1340_I2S_REF_SEL_MASK, 0, &_lock); clk_register_clkdev(clk, "i2s_ref_clk", NULL); - clk = clk_register_gate(NULL, "i2s_ref_pad_clk", "i2s_ref_mclk", 0, + clk = clk_register_gate(NULL, "i2s_ref_pad_clk", "i2s_ref_mux_clk", 0, SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_I2S_REF_PAD_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, "i2s_ref_pad_clk", NULL); - clk = clk_register_aux("i2s_sclk_clk", "i2s_sclk_gclk", "i2s_ref_mclk", - 0, SPEAR1340_I2S_CLK_CFG, &i2s_sclk_masks, - i2s_sclk_rtbl, ARRAY_SIZE(i2s_sclk_rtbl), &_lock, - &clk1); + clk = clk_register_aux("i2s_sclk_clk", "i2s_sclk_gate_clk", + "i2s_ref_mux_clk", 0, SPEAR1340_I2S_CLK_CFG, + &i2s_sclk_masks, i2s_sclk_rtbl, + ARRAY_SIZE(i2s_sclk_rtbl), &_lock, &clk1); clk_register_clkdev(clk, "i2s_sclk_clk", NULL); - clk_register_clkdev(clk1, "i2s_sclk_gclk", NULL); + clk_register_clkdev(clk1, "i2s_sclk_gate_clk", NULL); /* clock derived from ahb clk */ clk = clk_register_gate(NULL, "i2c0_clk", "ahb_clk", 0, @@ -741,7 +744,7 @@ void __init spear1340_clk_init(void) clk_register_clkdev(clk, NULL, "e0280000.i2c"); clk = clk_register_gate(NULL, "i2c1_clk", "ahb_clk", 0, - SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_I2C1_CLK_ENB, 0, + SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_I2C1_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "b4000000.i2c"); @@ -797,13 +800,13 @@ void __init spear1340_clk_init(void) &_lock); clk_register_clkdev(clk, "sysram1_clk", NULL); - clk = clk_register_aux("adc_syn_clk", "adc_syn_gclk", "ahb_clk", + clk = clk_register_aux("adc_synth_clk", "adc_synth_gate_clk", "ahb_clk", 0, SPEAR1340_ADC_CLK_SYNT, NULL, adc_rtbl, ARRAY_SIZE(adc_rtbl), &_lock, &clk1); - clk_register_clkdev(clk, "adc_syn_clk", NULL); - clk_register_clkdev(clk1, "adc_syn_gclk", NULL); + clk_register_clkdev(clk, "adc_synth_clk", NULL); + clk_register_clkdev(clk1, "adc_synth_gate_clk", NULL); - clk = clk_register_gate(NULL, "adc_clk", "adc_syn_gclk", 0, + clk = clk_register_gate(NULL, "adc_clk", "adc_synth_gate_clk", 0, SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_ADC_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "adc_clk"); @@ -840,39 +843,39 @@ void __init spear1340_clk_init(void) clk_register_clkdev(clk, NULL, "e0300000.kbd"); /* RAS clks */ - clk = clk_register_mux(NULL, "gen_syn0_1_mclk", gen_synth0_1_parents, - ARRAY_SIZE(gen_synth0_1_parents), 0, SPEAR1340_PLL_CFG, - SPEAR1340_GEN_SYNT0_1_CLK_SHIFT, + clk = clk_register_mux(NULL, "gen_synth0_1_mux_clk", + gen_synth0_1_parents, ARRAY_SIZE(gen_synth0_1_parents), + 0, SPEAR1340_PLL_CFG, SPEAR1340_GEN_SYNT0_1_CLK_SHIFT, SPEAR1340_GEN_SYNT_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "gen_syn0_1_clk", NULL); + clk_register_clkdev(clk, "gen_synth0_1_clk", NULL); - clk = clk_register_mux(NULL, "gen_syn2_3_mclk", gen_synth2_3_parents, - ARRAY_SIZE(gen_synth2_3_parents), 0, SPEAR1340_PLL_CFG, - SPEAR1340_GEN_SYNT2_3_CLK_SHIFT, + clk = clk_register_mux(NULL, "gen_synth2_3_mux_clk", + gen_synth2_3_parents, ARRAY_SIZE(gen_synth2_3_parents), + 0, SPEAR1340_PLL_CFG, SPEAR1340_GEN_SYNT2_3_CLK_SHIFT, SPEAR1340_GEN_SYNT_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "gen_syn2_3_clk", NULL); + clk_register_clkdev(clk, "gen_synth2_3_clk", NULL); - clk = clk_register_frac("gen_syn0_clk", "gen_syn0_1_clk", 0, + clk = clk_register_frac("gen_synth0_clk", "gen_synth0_1_clk", 0, SPEAR1340_GEN_CLK_SYNT0, gen_rtbl, ARRAY_SIZE(gen_rtbl), &_lock); - clk_register_clkdev(clk, "gen_syn0_clk", NULL); + clk_register_clkdev(clk, "gen_synth0_clk", NULL); - clk = clk_register_frac("gen_syn1_clk", "gen_syn0_1_clk", 0, + clk = clk_register_frac("gen_synth1_clk", "gen_synth0_1_clk", 0, SPEAR1340_GEN_CLK_SYNT1, gen_rtbl, ARRAY_SIZE(gen_rtbl), &_lock); - clk_register_clkdev(clk, "gen_syn1_clk", NULL); + clk_register_clkdev(clk, "gen_synth1_clk", NULL); - clk = clk_register_frac("gen_syn2_clk", "gen_syn2_3_clk", 0, + clk = clk_register_frac("gen_synth2_clk", "gen_synth2_3_clk", 0, SPEAR1340_GEN_CLK_SYNT2, gen_rtbl, ARRAY_SIZE(gen_rtbl), &_lock); - clk_register_clkdev(clk, "gen_syn2_clk", NULL); + clk_register_clkdev(clk, "gen_synth2_clk", NULL); - clk = clk_register_frac("gen_syn3_clk", "gen_syn2_3_clk", 0, + clk = clk_register_frac("gen_synth3_clk", "gen_synth2_3_clk", 0, SPEAR1340_GEN_CLK_SYNT3, gen_rtbl, ARRAY_SIZE(gen_rtbl), &_lock); - clk_register_clkdev(clk, "gen_syn3_clk", NULL); + clk_register_clkdev(clk, "gen_synth3_clk", NULL); - clk = clk_register_gate(NULL, "mali_clk", "gen_syn3_clk", 0, + clk = clk_register_gate(NULL, "mali_clk", "gen_synth3_clk", 0, SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_MALI_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "mali"); @@ -887,74 +890,74 @@ void __init spear1340_clk_init(void) &_lock); clk_register_clkdev(clk, NULL, "spear_cec.1"); - clk = clk_register_mux(NULL, "spdif_out_mclk", spdif_out_parents, + clk = clk_register_mux(NULL, "spdif_out_mux_clk", spdif_out_parents, ARRAY_SIZE(spdif_out_parents), 0, SPEAR1340_PERIP_CLK_CFG, SPEAR1340_SPDIF_OUT_CLK_SHIFT, SPEAR1340_SPDIF_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "spdif_out_mclk", NULL); + clk_register_clkdev(clk, "spdif_out_mux_clk", NULL); - clk = clk_register_gate(NULL, "spdif_out_clk", "spdif_out_mclk", 0, + clk = clk_register_gate(NULL, "spdif_out_clk", "spdif_out_mux_clk", 0, SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_SPDIF_OUT_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "spdif-out"); - clk = clk_register_mux(NULL, "spdif_in_mclk", spdif_in_parents, + clk = clk_register_mux(NULL, "spdif_in_mux_clk", spdif_in_parents, ARRAY_SIZE(spdif_in_parents), 0, SPEAR1340_PERIP_CLK_CFG, SPEAR1340_SPDIF_IN_CLK_SHIFT, SPEAR1340_SPDIF_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "spdif_in_mclk", NULL); + clk_register_clkdev(clk, "spdif_in_mux_clk", NULL); - clk = clk_register_gate(NULL, "spdif_in_clk", "spdif_in_mclk", 0, + clk = clk_register_gate(NULL, "spdif_in_clk", "spdif_in_mux_clk", 0, SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_SPDIF_IN_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "spdif-in"); - clk = clk_register_gate(NULL, "acp_clk", "acp_mclk", 0, + clk = clk_register_gate(NULL, "acp_clk", "acp_mux_clk", 0, SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_ACP_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "acp_clk"); - clk = clk_register_gate(NULL, "plgpio_clk", "plgpio_mclk", 0, + clk = clk_register_gate(NULL, "plgpio_clk", "plgpio_mux_clk", 0, SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_PLGPIO_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "plgpio"); - clk = clk_register_gate(NULL, "video_dec_clk", "video_dec_mclk", 0, + clk = clk_register_gate(NULL, "video_dec_clk", "video_dec_mux_clk", 0, SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_VIDEO_DEC_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "video_dec"); - clk = clk_register_gate(NULL, "video_enc_clk", "video_enc_mclk", 0, + clk = clk_register_gate(NULL, "video_enc_clk", "video_enc_mux_clk", 0, SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_VIDEO_ENC_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "video_enc"); - clk = clk_register_gate(NULL, "video_in_clk", "video_in_mclk", 0, + clk = clk_register_gate(NULL, "video_in_clk", "video_in_mux_clk", 0, SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_VIDEO_IN_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "spear_vip"); - clk = clk_register_gate(NULL, "cam0_clk", "cam0_mclk", 0, + clk = clk_register_gate(NULL, "cam0_clk", "cam0_mux_clk", 0, SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM0_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "spear_camif.0"); - clk = clk_register_gate(NULL, "cam1_clk", "cam1_mclk", 0, + clk = clk_register_gate(NULL, "cam1_clk", "cam1_mux_clk", 0, SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM1_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "spear_camif.1"); - clk = clk_register_gate(NULL, "cam2_clk", "cam2_mclk", 0, + clk = clk_register_gate(NULL, "cam2_clk", "cam2_mux_clk", 0, SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM2_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "spear_camif.2"); - clk = clk_register_gate(NULL, "cam3_clk", "cam3_mclk", 0, + clk = clk_register_gate(NULL, "cam3_clk", "cam3_mux_clk", 0, SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM3_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "spear_camif.3"); - clk = clk_register_gate(NULL, "pwm_clk", "pwm_mclk", 0, + clk = clk_register_gate(NULL, "pwm_clk", "pwm_mux_clk", 0, SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_PWM_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "pwm"); diff --git a/trunk/drivers/clk/spear/spear3xx_clock.c b/trunk/drivers/clk/spear/spear3xx_clock.c index c3157454bb3f..01dd6daff2a1 100644 --- a/trunk/drivers/clk/spear/spear3xx_clock.c +++ b/trunk/drivers/clk/spear/spear3xx_clock.c @@ -122,12 +122,12 @@ static struct gpt_rate_tbl gpt_rtbl[] = { }; /* clock parents */ -static const char *uart0_parents[] = { "pll3_clk", "uart_syn_gclk", }; -static const char *firda_parents[] = { "pll3_clk", "firda_syn_gclk", +static const char *uart0_parents[] = { "pll3_48m_clk", "uart_synth_gate_clk", }; +static const char *firda_parents[] = { "pll3_48m_clk", "firda_synth_gate_clk", }; -static const char *gpt0_parents[] = { "pll3_clk", "gpt0_syn_clk", }; -static const char *gpt1_parents[] = { "pll3_clk", "gpt1_syn_clk", }; -static const char *gpt2_parents[] = { "pll3_clk", "gpt2_syn_clk", }; +static const char *gpt0_parents[] = { "pll3_48m_clk", "gpt0_synth_clk", }; +static const char *gpt1_parents[] = { "pll3_48m_clk", "gpt1_synth_clk", }; +static const char *gpt2_parents[] = { "pll3_48m_clk", "gpt2_synth_clk", }; static const char *gen2_3_parents[] = { "pll1_clk", "pll2_clk", }; static const char *ddr_parents[] = { "ahb_clk", "ahbmult2_clk", "none", "pll2_clk", }; @@ -137,7 +137,7 @@ static void __init spear300_clk_init(void) { struct clk *clk; - clk = clk_register_fixed_factor(NULL, "clcd_clk", "ras_pll3_clk", 0, + clk = clk_register_fixed_factor(NULL, "clcd_clk", "ras_pll3_48m_clk", 0, 1, 1); clk_register_clkdev(clk, NULL, "60000000.clcd"); @@ -219,11 +219,15 @@ static void __init spear310_clk_init(void) #define SPEAR320_UARTX_PCLK_VAL_SYNTH1 0x0 #define SPEAR320_UARTX_PCLK_VAL_APB 0x1 -static const char *i2s_ref_parents[] = { "ras_pll2_clk", "ras_syn2_gclk", }; -static const char *sdhci_parents[] = { "ras_pll3_clk", "ras_syn3_gclk", }; +static const char *i2s_ref_parents[] = { "ras_pll2_clk", + "ras_gen2_synth_gate_clk", }; +static const char *sdhci_parents[] = { "ras_pll3_48m_clk", + "ras_gen3_synth_gate_clk", +}; static const char *smii0_parents[] = { "smii_125m_pad", "ras_pll2_clk", - "ras_syn0_gclk", }; -static const char *uartx_parents[] = { "ras_syn1_gclk", "ras_apb_clk", }; + "ras_gen0_synth_gate_clk", }; +static const char *uartx_parents[] = { "ras_gen1_synth_gate_clk", "ras_apb_clk", +}; static void __init spear320_clk_init(void) { @@ -233,7 +237,7 @@ static void __init spear320_clk_init(void) CLK_IS_ROOT, 125000000); clk_register_clkdev(clk, "smii_125m_pad", NULL); - clk = clk_register_fixed_factor(NULL, "clcd_clk", "ras_pll3_clk", 0, + clk = clk_register_fixed_factor(NULL, "clcd_clk", "ras_pll3_48m_clk", 0, 1, 1); clk_register_clkdev(clk, NULL, "90000000.clcd"); @@ -359,9 +363,9 @@ void __init spear3xx_clk_init(void) clk_register_clkdev(clk, NULL, "fc900000.rtc"); /* clock derived from 24 MHz osc clk */ - clk = clk_register_fixed_rate(NULL, "pll3_clk", "osc_24m_clk", 0, + clk = clk_register_fixed_rate(NULL, "pll3_48m_clk", "osc_24m_clk", 0, 48000000); - clk_register_clkdev(clk, "pll3_clk", NULL); + clk_register_clkdev(clk, "pll3_48m_clk", NULL); clk = clk_register_fixed_factor(NULL, "wdt_clk", "osc_24m_clk", 0, 1, 1); @@ -388,98 +392,98 @@ void __init spear3xx_clk_init(void) HCLK_RATIO_MASK, 0, &_lock); clk_register_clkdev(clk, "ahb_clk", NULL); - clk = clk_register_aux("uart_syn_clk", "uart_syn_gclk", "pll1_clk", 0, - UART_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl), - &_lock, &clk1); - clk_register_clkdev(clk, "uart_syn_clk", NULL); - clk_register_clkdev(clk1, "uart_syn_gclk", NULL); + clk = clk_register_aux("uart_synth_clk", "uart_synth_gate_clk", + "pll1_clk", 0, UART_CLK_SYNT, NULL, aux_rtbl, + ARRAY_SIZE(aux_rtbl), &_lock, &clk1); + clk_register_clkdev(clk, "uart_synth_clk", NULL); + clk_register_clkdev(clk1, "uart_synth_gate_clk", NULL); - clk = clk_register_mux(NULL, "uart0_mclk", uart0_parents, + clk = clk_register_mux(NULL, "uart0_mux_clk", uart0_parents, ARRAY_SIZE(uart0_parents), 0, PERIP_CLK_CFG, UART_CLK_SHIFT, UART_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "uart0_mclk", NULL); + clk_register_clkdev(clk, "uart0_mux_clk", NULL); - clk = clk_register_gate(NULL, "uart0", "uart0_mclk", 0, PERIP1_CLK_ENB, - UART_CLK_ENB, 0, &_lock); + clk = clk_register_gate(NULL, "uart0", "uart0_mux_clk", 0, + PERIP1_CLK_ENB, UART_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "d0000000.serial"); - clk = clk_register_aux("firda_syn_clk", "firda_syn_gclk", "pll1_clk", 0, - FIRDA_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl), - &_lock, &clk1); - clk_register_clkdev(clk, "firda_syn_clk", NULL); - clk_register_clkdev(clk1, "firda_syn_gclk", NULL); + clk = clk_register_aux("firda_synth_clk", "firda_synth_gate_clk", + "pll1_clk", 0, FIRDA_CLK_SYNT, NULL, aux_rtbl, + ARRAY_SIZE(aux_rtbl), &_lock, &clk1); + clk_register_clkdev(clk, "firda_synth_clk", NULL); + clk_register_clkdev(clk1, "firda_synth_gate_clk", NULL); - clk = clk_register_mux(NULL, "firda_mclk", firda_parents, + clk = clk_register_mux(NULL, "firda_mux_clk", firda_parents, ARRAY_SIZE(firda_parents), 0, PERIP_CLK_CFG, FIRDA_CLK_SHIFT, FIRDA_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "firda_mclk", NULL); + clk_register_clkdev(clk, "firda_mux_clk", NULL); - clk = clk_register_gate(NULL, "firda_clk", "firda_mclk", 0, + clk = clk_register_gate(NULL, "firda_clk", "firda_mux_clk", 0, PERIP1_CLK_ENB, FIRDA_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "firda"); /* gpt clocks */ - clk_register_gpt("gpt0_syn_clk", "pll1_clk", 0, PRSC0_CLK_CFG, gpt_rtbl, - ARRAY_SIZE(gpt_rtbl), &_lock); + clk_register_gpt("gpt0_synth_clk", "pll1_clk", 0, PRSC0_CLK_CFG, + gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock); clk = clk_register_mux(NULL, "gpt0_clk", gpt0_parents, ARRAY_SIZE(gpt0_parents), 0, PERIP_CLK_CFG, GPT0_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock); clk_register_clkdev(clk, NULL, "gpt0"); - clk_register_gpt("gpt1_syn_clk", "pll1_clk", 0, PRSC1_CLK_CFG, gpt_rtbl, - ARRAY_SIZE(gpt_rtbl), &_lock); - clk = clk_register_mux(NULL, "gpt1_mclk", gpt1_parents, + clk_register_gpt("gpt1_synth_clk", "pll1_clk", 0, PRSC1_CLK_CFG, + gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock); + clk = clk_register_mux(NULL, "gpt1_mux_clk", gpt1_parents, ARRAY_SIZE(gpt1_parents), 0, PERIP_CLK_CFG, GPT1_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "gpt1_mclk", NULL); - clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mclk", 0, + clk_register_clkdev(clk, "gpt1_mux_clk", NULL); + clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mux_clk", 0, PERIP1_CLK_ENB, GPT1_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "gpt1"); - clk_register_gpt("gpt2_syn_clk", "pll1_clk", 0, PRSC2_CLK_CFG, gpt_rtbl, - ARRAY_SIZE(gpt_rtbl), &_lock); - clk = clk_register_mux(NULL, "gpt2_mclk", gpt2_parents, + clk_register_gpt("gpt2_synth_clk", "pll1_clk", 0, PRSC2_CLK_CFG, + gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock); + clk = clk_register_mux(NULL, "gpt2_mux_clk", gpt2_parents, ARRAY_SIZE(gpt2_parents), 0, PERIP_CLK_CFG, GPT2_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "gpt2_mclk", NULL); - clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mclk", 0, + clk_register_clkdev(clk, "gpt2_mux_clk", NULL); + clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mux_clk", 0, PERIP1_CLK_ENB, GPT2_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "gpt2"); /* general synths clocks */ - clk = clk_register_aux("gen0_syn_clk", "gen0_syn_gclk", "pll1_clk", - 0, GEN0_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl), - &_lock, &clk1); - clk_register_clkdev(clk, "gen0_syn_clk", NULL); - clk_register_clkdev(clk1, "gen0_syn_gclk", NULL); - - clk = clk_register_aux("gen1_syn_clk", "gen1_syn_gclk", "pll1_clk", - 0, GEN1_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl), - &_lock, &clk1); - clk_register_clkdev(clk, "gen1_syn_clk", NULL); - clk_register_clkdev(clk1, "gen1_syn_gclk", NULL); - - clk = clk_register_mux(NULL, "gen2_3_par_clk", gen2_3_parents, + clk = clk_register_aux("gen0_synth_clk", "gen0_synth_gate_clk", + "pll1_clk", 0, GEN0_CLK_SYNT, NULL, aux_rtbl, + ARRAY_SIZE(aux_rtbl), &_lock, &clk1); + clk_register_clkdev(clk, "gen0_synth_clk", NULL); + clk_register_clkdev(clk1, "gen0_synth_gate_clk", NULL); + + clk = clk_register_aux("gen1_synth_clk", "gen1_synth_gate_clk", + "pll1_clk", 0, GEN1_CLK_SYNT, NULL, aux_rtbl, + ARRAY_SIZE(aux_rtbl), &_lock, &clk1); + clk_register_clkdev(clk, "gen1_synth_clk", NULL); + clk_register_clkdev(clk1, "gen1_synth_gate_clk", NULL); + + clk = clk_register_mux(NULL, "gen2_3_parent_clk", gen2_3_parents, ARRAY_SIZE(gen2_3_parents), 0, CORE_CLK_CFG, GEN_SYNTH2_3_CLK_SHIFT, GEN_SYNTH2_3_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "gen2_3_par_clk", NULL); + clk_register_clkdev(clk, "gen2_3_parent_clk", NULL); - clk = clk_register_aux("gen2_syn_clk", "gen2_syn_gclk", - "gen2_3_par_clk", 0, GEN2_CLK_SYNT, NULL, aux_rtbl, + clk = clk_register_aux("gen2_synth_clk", "gen2_synth_gate_clk", + "gen2_3_parent_clk", 0, GEN2_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1); - clk_register_clkdev(clk, "gen2_syn_clk", NULL); - clk_register_clkdev(clk1, "gen2_syn_gclk", NULL); + clk_register_clkdev(clk, "gen2_synth_clk", NULL); + clk_register_clkdev(clk1, "gen2_synth_gate_clk", NULL); - clk = clk_register_aux("gen3_syn_clk", "gen3_syn_gclk", - "gen2_3_par_clk", 0, GEN3_CLK_SYNT, NULL, aux_rtbl, + clk = clk_register_aux("gen3_synth_clk", "gen3_synth_gate_clk", + "gen2_3_parent_clk", 0, GEN3_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1); - clk_register_clkdev(clk, "gen3_syn_clk", NULL); - clk_register_clkdev(clk1, "gen3_syn_gclk", NULL); + clk_register_clkdev(clk, "gen3_synth_clk", NULL); + clk_register_clkdev(clk1, "gen3_synth_gate_clk", NULL); /* clock derived from pll3 clk */ - clk = clk_register_gate(NULL, "usbh_clk", "pll3_clk", 0, PERIP1_CLK_ENB, - USBH_CLK_ENB, 0, &_lock); + clk = clk_register_gate(NULL, "usbh_clk", "pll3_48m_clk", 0, + PERIP1_CLK_ENB, USBH_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, "usbh_clk", NULL); clk = clk_register_fixed_factor(NULL, "usbh.0_clk", "usbh_clk", 0, 1, @@ -490,8 +494,8 @@ void __init spear3xx_clk_init(void) 1); clk_register_clkdev(clk, "usbh.1_clk", NULL); - clk = clk_register_gate(NULL, "usbd_clk", "pll3_clk", 0, PERIP1_CLK_ENB, - USBD_CLK_ENB, 0, &_lock); + clk = clk_register_gate(NULL, "usbd_clk", "pll3_48m_clk", 0, + PERIP1_CLK_ENB, USBD_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "designware_udc"); /* clock derived from ahb clk */ @@ -575,25 +579,29 @@ void __init spear3xx_clk_init(void) RAS_CLK_ENB, RAS_PLL2_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, "ras_pll2_clk", NULL); - clk = clk_register_gate(NULL, "ras_pll3_clk", "pll3_clk", 0, + clk = clk_register_gate(NULL, "ras_pll3_48m_clk", "pll3_48m_clk", 0, RAS_CLK_ENB, RAS_48M_CLK_ENB, 0, &_lock); - clk_register_clkdev(clk, "ras_pll3_clk", NULL); - - clk = clk_register_gate(NULL, "ras_syn0_gclk", "gen0_syn_gclk", 0, - RAS_CLK_ENB, RAS_SYNT0_CLK_ENB, 0, &_lock); - clk_register_clkdev(clk, "ras_syn0_gclk", NULL); - - clk = clk_register_gate(NULL, "ras_syn1_gclk", "gen1_syn_gclk", 0, - RAS_CLK_ENB, RAS_SYNT1_CLK_ENB, 0, &_lock); - clk_register_clkdev(clk, "ras_syn1_gclk", NULL); - - clk = clk_register_gate(NULL, "ras_syn2_gclk", "gen2_syn_gclk", 0, - RAS_CLK_ENB, RAS_SYNT2_CLK_ENB, 0, &_lock); - clk_register_clkdev(clk, "ras_syn2_gclk", NULL); - - clk = clk_register_gate(NULL, "ras_syn3_gclk", "gen3_syn_gclk", 0, - RAS_CLK_ENB, RAS_SYNT3_CLK_ENB, 0, &_lock); - clk_register_clkdev(clk, "ras_syn3_gclk", NULL); + clk_register_clkdev(clk, "ras_pll3_48m_clk", NULL); + + clk = clk_register_gate(NULL, "ras_gen0_synth_gate_clk", + "gen0_synth_gate_clk", 0, RAS_CLK_ENB, + RAS_SYNT0_CLK_ENB, 0, &_lock); + clk_register_clkdev(clk, "ras_gen0_synth_gate_clk", NULL); + + clk = clk_register_gate(NULL, "ras_gen1_synth_gate_clk", + "gen1_synth_gate_clk", 0, RAS_CLK_ENB, + RAS_SYNT1_CLK_ENB, 0, &_lock); + clk_register_clkdev(clk, "ras_gen1_synth_gate_clk", NULL); + + clk = clk_register_gate(NULL, "ras_gen2_synth_gate_clk", + "gen2_synth_gate_clk", 0, RAS_CLK_ENB, + RAS_SYNT2_CLK_ENB, 0, &_lock); + clk_register_clkdev(clk, "ras_gen2_synth_gate_clk", NULL); + + clk = clk_register_gate(NULL, "ras_gen3_synth_gate_clk", + "gen3_synth_gate_clk", 0, RAS_CLK_ENB, + RAS_SYNT3_CLK_ENB, 0, &_lock); + clk_register_clkdev(clk, "ras_gen3_synth_gate_clk", NULL); if (of_machine_is_compatible("st,spear300")) spear300_clk_init(); diff --git a/trunk/drivers/clk/spear/spear6xx_clock.c b/trunk/drivers/clk/spear/spear6xx_clock.c index a98d0866f541..61026ae564ab 100644 --- a/trunk/drivers/clk/spear/spear6xx_clock.c +++ b/trunk/drivers/clk/spear/spear6xx_clock.c @@ -97,12 +97,13 @@ static struct aux_rate_tbl aux_rtbl[] = { {.xscale = 1, .yscale = 2, .eq = 1}, /* 166 MHz */ }; -static const char *clcd_parents[] = { "pll3_clk", "clcd_syn_gclk", }; -static const char *firda_parents[] = { "pll3_clk", "firda_syn_gclk", }; -static const char *uart_parents[] = { "pll3_clk", "uart_syn_gclk", }; -static const char *gpt0_1_parents[] = { "pll3_clk", "gpt0_1_syn_clk", }; -static const char *gpt2_parents[] = { "pll3_clk", "gpt2_syn_clk", }; -static const char *gpt3_parents[] = { "pll3_clk", "gpt3_syn_clk", }; +static const char *clcd_parents[] = { "pll3_48m_clk", "clcd_synth_gate_clk", }; +static const char *firda_parents[] = { "pll3_48m_clk", "firda_synth_gate_clk", +}; +static const char *uart_parents[] = { "pll3_48m_clk", "uart_synth_gate_clk", }; +static const char *gpt0_1_parents[] = { "pll3_48m_clk", "gpt0_1_synth_clk", }; +static const char *gpt2_parents[] = { "pll3_48m_clk", "gpt2_synth_clk", }; +static const char *gpt3_parents[] = { "pll3_48m_clk", "gpt3_synth_clk", }; static const char *ddr_parents[] = { "ahb_clk", "ahbmult2_clk", "none", "pll2_clk", }; @@ -135,9 +136,9 @@ void __init spear6xx_clk_init(void) clk_register_clkdev(clk, NULL, "rtc-spear"); /* clock derived from 30 MHz osc clk */ - clk = clk_register_fixed_rate(NULL, "pll3_clk", "osc_24m_clk", 0, + clk = clk_register_fixed_rate(NULL, "pll3_48m_clk", "osc_24m_clk", 0, 48000000); - clk_register_clkdev(clk, "pll3_clk", NULL); + clk_register_clkdev(clk, "pll3_48m_clk", NULL); clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "osc_30m_clk", 0, PLL1_CTR, PLL1_FRQ, pll_rtbl, ARRAY_SIZE(pll_rtbl), @@ -145,9 +146,9 @@ void __init spear6xx_clk_init(void) clk_register_clkdev(clk, "vco1_clk", NULL); clk_register_clkdev(clk1, "pll1_clk", NULL); - clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "osc_30m_clk", - 0, PLL2_CTR, PLL2_FRQ, pll_rtbl, ARRAY_SIZE(pll_rtbl), - &_lock, &clk1, NULL); + clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, + "osc_30m_clk", 0, PLL2_CTR, PLL2_FRQ, pll_rtbl, + ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL); clk_register_clkdev(clk, "vco2_clk", NULL); clk_register_clkdev(clk1, "pll2_clk", NULL); @@ -164,111 +165,111 @@ void __init spear6xx_clk_init(void) HCLK_RATIO_MASK, 0, &_lock); clk_register_clkdev(clk, "ahb_clk", NULL); - clk = clk_register_aux("uart_syn_clk", "uart_syn_gclk", "pll1_clk", 0, - UART_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl), - &_lock, &clk1); - clk_register_clkdev(clk, "uart_syn_clk", NULL); - clk_register_clkdev(clk1, "uart_syn_gclk", NULL); + clk = clk_register_aux("uart_synth_clk", "uart_synth_gate_clk", + "pll1_clk", 0, UART_CLK_SYNT, NULL, aux_rtbl, + ARRAY_SIZE(aux_rtbl), &_lock, &clk1); + clk_register_clkdev(clk, "uart_synth_clk", NULL); + clk_register_clkdev(clk1, "uart_synth_gate_clk", NULL); - clk = clk_register_mux(NULL, "uart_mclk", uart_parents, + clk = clk_register_mux(NULL, "uart_mux_clk", uart_parents, ARRAY_SIZE(uart_parents), 0, PERIP_CLK_CFG, UART_CLK_SHIFT, UART_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "uart_mclk", NULL); + clk_register_clkdev(clk, "uart_mux_clk", NULL); - clk = clk_register_gate(NULL, "uart0", "uart_mclk", 0, PERIP1_CLK_ENB, - UART0_CLK_ENB, 0, &_lock); + clk = clk_register_gate(NULL, "uart0", "uart_mux_clk", 0, + PERIP1_CLK_ENB, UART0_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "d0000000.serial"); - clk = clk_register_gate(NULL, "uart1", "uart_mclk", 0, PERIP1_CLK_ENB, - UART1_CLK_ENB, 0, &_lock); + clk = clk_register_gate(NULL, "uart1", "uart_mux_clk", 0, + PERIP1_CLK_ENB, UART1_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "d0080000.serial"); - clk = clk_register_aux("firda_syn_clk", "firda_syn_gclk", "pll1_clk", - 0, FIRDA_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl), - &_lock, &clk1); - clk_register_clkdev(clk, "firda_syn_clk", NULL); - clk_register_clkdev(clk1, "firda_syn_gclk", NULL); + clk = clk_register_aux("firda_synth_clk", "firda_synth_gate_clk", + "pll1_clk", 0, FIRDA_CLK_SYNT, NULL, aux_rtbl, + ARRAY_SIZE(aux_rtbl), &_lock, &clk1); + clk_register_clkdev(clk, "firda_synth_clk", NULL); + clk_register_clkdev(clk1, "firda_synth_gate_clk", NULL); - clk = clk_register_mux(NULL, "firda_mclk", firda_parents, + clk = clk_register_mux(NULL, "firda_mux_clk", firda_parents, ARRAY_SIZE(firda_parents), 0, PERIP_CLK_CFG, FIRDA_CLK_SHIFT, FIRDA_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "firda_mclk", NULL); + clk_register_clkdev(clk, "firda_mux_clk", NULL); - clk = clk_register_gate(NULL, "firda_clk", "firda_mclk", 0, + clk = clk_register_gate(NULL, "firda_clk", "firda_mux_clk", 0, PERIP1_CLK_ENB, FIRDA_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "firda"); - clk = clk_register_aux("clcd_syn_clk", "clcd_syn_gclk", "pll1_clk", - 0, CLCD_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl), - &_lock, &clk1); - clk_register_clkdev(clk, "clcd_syn_clk", NULL); - clk_register_clkdev(clk1, "clcd_syn_gclk", NULL); + clk = clk_register_aux("clcd_synth_clk", "clcd_synth_gate_clk", + "pll1_clk", 0, CLCD_CLK_SYNT, NULL, aux_rtbl, + ARRAY_SIZE(aux_rtbl), &_lock, &clk1); + clk_register_clkdev(clk, "clcd_synth_clk", NULL); + clk_register_clkdev(clk1, "clcd_synth_gate_clk", NULL); - clk = clk_register_mux(NULL, "clcd_mclk", clcd_parents, + clk = clk_register_mux(NULL, "clcd_mux_clk", clcd_parents, ARRAY_SIZE(clcd_parents), 0, PERIP_CLK_CFG, CLCD_CLK_SHIFT, CLCD_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "clcd_mclk", NULL); + clk_register_clkdev(clk, "clcd_mux_clk", NULL); - clk = clk_register_gate(NULL, "clcd_clk", "clcd_mclk", 0, + clk = clk_register_gate(NULL, "clcd_clk", "clcd_mux_clk", 0, PERIP1_CLK_ENB, CLCD_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "clcd"); /* gpt clocks */ - clk = clk_register_gpt("gpt0_1_syn_clk", "pll1_clk", 0, PRSC0_CLK_CFG, + clk = clk_register_gpt("gpt0_1_synth_clk", "pll1_clk", 0, PRSC0_CLK_CFG, gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock); - clk_register_clkdev(clk, "gpt0_1_syn_clk", NULL); + clk_register_clkdev(clk, "gpt0_1_synth_clk", NULL); - clk = clk_register_mux(NULL, "gpt0_mclk", gpt0_1_parents, + clk = clk_register_mux(NULL, "gpt0_mux_clk", gpt0_1_parents, ARRAY_SIZE(gpt0_1_parents), 0, PERIP_CLK_CFG, GPT0_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock); clk_register_clkdev(clk, NULL, "gpt0"); - clk = clk_register_mux(NULL, "gpt1_mclk", gpt0_1_parents, + clk = clk_register_mux(NULL, "gpt1_mux_clk", gpt0_1_parents, ARRAY_SIZE(gpt0_1_parents), 0, PERIP_CLK_CFG, GPT1_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "gpt1_mclk", NULL); + clk_register_clkdev(clk, "gpt1_mux_clk", NULL); - clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mclk", 0, + clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mux_clk", 0, PERIP1_CLK_ENB, GPT1_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "gpt1"); - clk = clk_register_gpt("gpt2_syn_clk", "pll1_clk", 0, PRSC1_CLK_CFG, + clk = clk_register_gpt("gpt2_synth_clk", "pll1_clk", 0, PRSC1_CLK_CFG, gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock); - clk_register_clkdev(clk, "gpt2_syn_clk", NULL); + clk_register_clkdev(clk, "gpt2_synth_clk", NULL); - clk = clk_register_mux(NULL, "gpt2_mclk", gpt2_parents, + clk = clk_register_mux(NULL, "gpt2_mux_clk", gpt2_parents, ARRAY_SIZE(gpt2_parents), 0, PERIP_CLK_CFG, GPT2_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "gpt2_mclk", NULL); + clk_register_clkdev(clk, "gpt2_mux_clk", NULL); - clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mclk", 0, + clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mux_clk", 0, PERIP1_CLK_ENB, GPT2_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "gpt2"); - clk = clk_register_gpt("gpt3_syn_clk", "pll1_clk", 0, PRSC2_CLK_CFG, + clk = clk_register_gpt("gpt3_synth_clk", "pll1_clk", 0, PRSC2_CLK_CFG, gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock); - clk_register_clkdev(clk, "gpt3_syn_clk", NULL); + clk_register_clkdev(clk, "gpt3_synth_clk", NULL); - clk = clk_register_mux(NULL, "gpt3_mclk", gpt3_parents, + clk = clk_register_mux(NULL, "gpt3_mux_clk", gpt3_parents, ARRAY_SIZE(gpt3_parents), 0, PERIP_CLK_CFG, GPT3_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock); - clk_register_clkdev(clk, "gpt3_mclk", NULL); + clk_register_clkdev(clk, "gpt3_mux_clk", NULL); - clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mclk", 0, + clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mux_clk", 0, PERIP1_CLK_ENB, GPT3_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "gpt3"); /* clock derived from pll3 clk */ - clk = clk_register_gate(NULL, "usbh0_clk", "pll3_clk", 0, + clk = clk_register_gate(NULL, "usbh0_clk", "pll3_48m_clk", 0, PERIP1_CLK_ENB, USBH0_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "usbh.0_clk"); - clk = clk_register_gate(NULL, "usbh1_clk", "pll3_clk", 0, + clk = clk_register_gate(NULL, "usbh1_clk", "pll3_48m_clk", 0, PERIP1_CLK_ENB, USBH1_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "usbh.1_clk"); - clk = clk_register_gate(NULL, "usbd_clk", "pll3_clk", 0, PERIP1_CLK_ENB, - USBD_CLK_ENB, 0, &_lock); + clk = clk_register_gate(NULL, "usbd_clk", "pll3_48m_clk", 0, + PERIP1_CLK_ENB, USBD_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "designware_udc"); /* clock derived from ahb clk */ @@ -277,8 +278,9 @@ void __init spear6xx_clk_init(void) clk_register_clkdev(clk, "ahbmult2_clk", NULL); clk = clk_register_mux(NULL, "ddr_clk", ddr_parents, - ARRAY_SIZE(ddr_parents), 0, PLL_CLK_CFG, MCTR_CLK_SHIFT, - MCTR_CLK_MASK, 0, &_lock); + ARRAY_SIZE(ddr_parents), + 0, PLL_CLK_CFG, MCTR_CLK_SHIFT, MCTR_CLK_MASK, 0, + &_lock); clk_register_clkdev(clk, "ddr_clk", NULL); clk = clk_register_divider(NULL, "apb_clk", "ahb_clk", diff --git a/trunk/drivers/cpufreq/cpufreq.c b/trunk/drivers/cpufreq/cpufreq.c index fb8a5279c5d8..7f2f149ae40f 100644 --- a/trunk/drivers/cpufreq/cpufreq.c +++ b/trunk/drivers/cpufreq/cpufreq.c @@ -138,7 +138,7 @@ void disable_cpufreq(void) static LIST_HEAD(cpufreq_governor_list); static DEFINE_MUTEX(cpufreq_governor_mutex); -static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs) +struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) { struct cpufreq_policy *data; unsigned long flags; @@ -162,7 +162,7 @@ static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs) if (!data) goto err_out_put_module; - if (!sysfs && !kobject_get(&data->kobj)) + if (!kobject_get(&data->kobj)) goto err_out_put_module; spin_unlock_irqrestore(&cpufreq_driver_lock, flags); @@ -175,35 +175,16 @@ static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs) err_out: return NULL; } - -struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) -{ - return __cpufreq_cpu_get(cpu, false); -} EXPORT_SYMBOL_GPL(cpufreq_cpu_get); -static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu) -{ - return __cpufreq_cpu_get(cpu, true); -} - -static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs) -{ - if (!sysfs) - kobject_put(&data->kobj); - module_put(cpufreq_driver->owner); -} void cpufreq_cpu_put(struct cpufreq_policy *data) { - __cpufreq_cpu_put(data, false); + kobject_put(&data->kobj); + module_put(cpufreq_driver->owner); } EXPORT_SYMBOL_GPL(cpufreq_cpu_put); -static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data) -{ - __cpufreq_cpu_put(data, true); -} /********************************************************************* * EXTERNALLY AFFECTING FREQUENCY CHANGES * @@ -636,7 +617,7 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) struct cpufreq_policy *policy = to_policy(kobj); struct freq_attr *fattr = to_attr(attr); ssize_t ret = -EINVAL; - policy = cpufreq_cpu_get_sysfs(policy->cpu); + policy = cpufreq_cpu_get(policy->cpu); if (!policy) goto no_policy; @@ -650,7 +631,7 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) unlock_policy_rwsem_read(policy->cpu); fail: - cpufreq_cpu_put_sysfs(policy); + cpufreq_cpu_put(policy); no_policy: return ret; } @@ -661,7 +642,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr, struct cpufreq_policy *policy = to_policy(kobj); struct freq_attr *fattr = to_attr(attr); ssize_t ret = -EINVAL; - policy = cpufreq_cpu_get_sysfs(policy->cpu); + policy = cpufreq_cpu_get(policy->cpu); if (!policy) goto no_policy; @@ -675,7 +656,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr, unlock_policy_rwsem_write(policy->cpu); fail: - cpufreq_cpu_put_sysfs(policy); + cpufreq_cpu_put(policy); no_policy: return ret; } diff --git a/trunk/drivers/cpufreq/exynos-cpufreq.c b/trunk/drivers/cpufreq/exynos-cpufreq.c index af2d81e10f71..b243a7ee01f6 100644 --- a/trunk/drivers/cpufreq/exynos-cpufreq.c +++ b/trunk/drivers/cpufreq/exynos-cpufreq.c @@ -62,18 +62,8 @@ static int exynos_target(struct cpufreq_policy *policy, goto out; } - /* - * The policy max have been changed so that we cannot get proper - * old_index with cpufreq_frequency_table_target(). Thus, ignore - * policy and get the index from the raw freqeuncy table. - */ - for (old_index = 0; - freq_table[old_index].frequency != CPUFREQ_TABLE_END; - old_index++) - if (freq_table[old_index].frequency == freqs.old) - break; - - if (freq_table[old_index].frequency == CPUFREQ_TABLE_END) { + if (cpufreq_frequency_table_target(policy, freq_table, + freqs.old, relation, &old_index)) { ret = -EINVAL; goto out; } diff --git a/trunk/drivers/cpuidle/cpuidle.c b/trunk/drivers/cpuidle/cpuidle.c index d6a533e68e0f..d90519cec880 100644 --- a/trunk/drivers/cpuidle/cpuidle.c +++ b/trunk/drivers/cpuidle/cpuidle.c @@ -201,22 +201,6 @@ void cpuidle_resume_and_unlock(void) EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock); -/* Currently used in suspend/resume path to suspend cpuidle */ -void cpuidle_pause(void) -{ - mutex_lock(&cpuidle_lock); - cpuidle_uninstall_idle_handler(); - mutex_unlock(&cpuidle_lock); -} - -/* Currently used in suspend/resume path to resume cpuidle */ -void cpuidle_resume(void) -{ - mutex_lock(&cpuidle_lock); - cpuidle_install_idle_handler(); - mutex_unlock(&cpuidle_lock); -} - /** * cpuidle_wrap_enter - performs timekeeping and irqen around enter function * @dev: pointer to a valid cpuidle_device object @@ -281,7 +265,7 @@ static void poll_idle_init(struct cpuidle_driver *drv) state->power_usage = -1; state->flags = 0; state->enter = poll_idle; - state->disabled = false; + state->disable = 0; } #else static void poll_idle_init(struct cpuidle_driver *drv) {} diff --git a/trunk/drivers/cpuidle/driver.c b/trunk/drivers/cpuidle/driver.c index 58bf3b1ac9c4..40cd3f3024df 100644 --- a/trunk/drivers/cpuidle/driver.c +++ b/trunk/drivers/cpuidle/driver.c @@ -16,7 +16,6 @@ static struct cpuidle_driver *cpuidle_curr_driver; DEFINE_SPINLOCK(cpuidle_driver_lock); -int cpuidle_driver_refcount; static void __cpuidle_register_driver(struct cpuidle_driver *drv) { @@ -90,34 +89,8 @@ void cpuidle_unregister_driver(struct cpuidle_driver *drv) } spin_lock(&cpuidle_driver_lock); - - if (!WARN_ON(cpuidle_driver_refcount > 0)) - cpuidle_curr_driver = NULL; - + cpuidle_curr_driver = NULL; spin_unlock(&cpuidle_driver_lock); } EXPORT_SYMBOL_GPL(cpuidle_unregister_driver); - -struct cpuidle_driver *cpuidle_driver_ref(void) -{ - struct cpuidle_driver *drv; - - spin_lock(&cpuidle_driver_lock); - - drv = cpuidle_curr_driver; - cpuidle_driver_refcount++; - - spin_unlock(&cpuidle_driver_lock); - return drv; -} - -void cpuidle_driver_unref(void) -{ - spin_lock(&cpuidle_driver_lock); - - if (!WARN_ON(cpuidle_driver_refcount <= 0)) - cpuidle_driver_refcount--; - - spin_unlock(&cpuidle_driver_lock); -} diff --git a/trunk/drivers/cpuidle/governors/menu.c b/trunk/drivers/cpuidle/governors/menu.c index 5b1f2c372c1f..06335756ea14 100644 --- a/trunk/drivers/cpuidle/governors/menu.c +++ b/trunk/drivers/cpuidle/governors/menu.c @@ -281,8 +281,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) * unless the timer is happening really really soon. */ if (data->expected_us > 5 && - !drv->states[CPUIDLE_DRIVER_STATE_START].disabled && - dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable == 0) + drv->states[CPUIDLE_DRIVER_STATE_START].disable == 0) data->last_state_idx = CPUIDLE_DRIVER_STATE_START; /* @@ -291,9 +290,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) */ for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) { struct cpuidle_state *s = &drv->states[i]; - struct cpuidle_state_usage *su = &dev->states_usage[i]; - if (s->disabled || su->disable) + if (s->disable) continue; if (s->target_residency > data->predicted_us) continue; diff --git a/trunk/drivers/cpuidle/sysfs.c b/trunk/drivers/cpuidle/sysfs.c index 5f809e337b89..88032b4dc6d2 100644 --- a/trunk/drivers/cpuidle/sysfs.c +++ b/trunk/drivers/cpuidle/sysfs.c @@ -217,8 +217,7 @@ struct cpuidle_state_attr { struct attribute attr; ssize_t (*show)(struct cpuidle_state *, \ struct cpuidle_state_usage *, char *); - ssize_t (*store)(struct cpuidle_state *, \ - struct cpuidle_state_usage *, const char *, size_t); + ssize_t (*store)(struct cpuidle_state *, const char *, size_t); }; #define define_one_state_ro(_name, show) \ @@ -234,22 +233,21 @@ static ssize_t show_state_##_name(struct cpuidle_state *state, \ return sprintf(buf, "%u\n", state->_name);\ } -#define define_store_state_ull_function(_name) \ +#define define_store_state_function(_name) \ static ssize_t store_state_##_name(struct cpuidle_state *state, \ - struct cpuidle_state_usage *state_usage, \ const char *buf, size_t size) \ { \ - unsigned long long value; \ + long value; \ int err; \ if (!capable(CAP_SYS_ADMIN)) \ return -EPERM; \ - err = kstrtoull(buf, 0, &value); \ + err = kstrtol(buf, 0, &value); \ if (err) \ return err; \ if (value) \ - state_usage->_name = 1; \ + state->disable = 1; \ else \ - state_usage->_name = 0; \ + state->disable = 0; \ return size; \ } @@ -275,8 +273,8 @@ define_show_state_ull_function(usage) define_show_state_ull_function(time) define_show_state_str_function(name) define_show_state_str_function(desc) -define_show_state_ull_function(disable) -define_store_state_ull_function(disable) +define_show_state_function(disable) +define_store_state_function(disable) define_one_state_ro(name, show_state_name); define_one_state_ro(desc, show_state_desc); @@ -320,11 +318,10 @@ static ssize_t cpuidle_state_store(struct kobject *kobj, { int ret = -EIO; struct cpuidle_state *state = kobj_to_state(kobj); - struct cpuidle_state_usage *state_usage = kobj_to_state_usage(kobj); struct cpuidle_state_attr *cattr = attr_to_stateattr(attr); if (cattr->store) - ret = cattr->store(state, state_usage, buf, size); + ret = cattr->store(state, buf, size); return ret; } diff --git a/trunk/drivers/crypto/ux500/cryp/cryp_core.c b/trunk/drivers/crypto/ux500/cryp/cryp_core.c index 1c307e1b840c..7cac12793a4b 100644 --- a/trunk/drivers/crypto/ux500/cryp/cryp_core.c +++ b/trunk/drivers/crypto/ux500/cryp/cryp_core.c @@ -1661,26 +1661,27 @@ static void ux500_cryp_shutdown(struct platform_device *pdev) } -static int ux500_cryp_suspend(struct device *dev) +static int ux500_cryp_suspend(struct platform_device *pdev, pm_message_t state) { int ret; - struct platform_device *pdev = to_platform_device(dev); struct cryp_device_data *device_data; struct resource *res_irq; struct cryp_ctx *temp_ctx = NULL; - dev_dbg(dev, "[%s]", __func__); + dev_dbg(&pdev->dev, "[%s]", __func__); /* Handle state? */ device_data = platform_get_drvdata(pdev); if (!device_data) { - dev_err(dev, "[%s]: platform_get_drvdata() failed!", __func__); + dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!", + __func__); return -ENOMEM; } res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res_irq) - dev_err(dev, "[%s]: IORESOURCE_IRQ, unavailable", __func__); + dev_err(&pdev->dev, "[%s]: IORESOURCE_IRQ, unavailable", + __func__); else disable_irq(res_irq->start); @@ -1691,32 +1692,32 @@ static int ux500_cryp_suspend(struct device *dev) if (device_data->current_ctx == ++temp_ctx) { if (down_interruptible(&driver_data.device_allocation)) - dev_dbg(dev, "[%s]: down_interruptible() failed", - __func__); - ret = cryp_disable_power(dev, device_data, false); + dev_dbg(&pdev->dev, "[%s]: down_interruptible() " + "failed", __func__); + ret = cryp_disable_power(&pdev->dev, device_data, false); } else - ret = cryp_disable_power(dev, device_data, true); + ret = cryp_disable_power(&pdev->dev, device_data, true); if (ret) - dev_err(dev, "[%s]: cryp_disable_power()", __func__); + dev_err(&pdev->dev, "[%s]: cryp_disable_power()", __func__); return ret; } -static int ux500_cryp_resume(struct device *dev) +static int ux500_cryp_resume(struct platform_device *pdev) { int ret = 0; - struct platform_device *pdev = to_platform_device(dev); struct cryp_device_data *device_data; struct resource *res_irq; struct cryp_ctx *temp_ctx = NULL; - dev_dbg(dev, "[%s]", __func__); + dev_dbg(&pdev->dev, "[%s]", __func__); device_data = platform_get_drvdata(pdev); if (!device_data) { - dev_err(dev, "[%s]: platform_get_drvdata() failed!", __func__); + dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!", + __func__); return -ENOMEM; } @@ -1729,10 +1730,11 @@ static int ux500_cryp_resume(struct device *dev) if (!device_data->current_ctx) up(&driver_data.device_allocation); else - ret = cryp_enable_power(dev, device_data, true); + ret = cryp_enable_power(&pdev->dev, device_data, true); if (ret) - dev_err(dev, "[%s]: cryp_enable_power() failed!", __func__); + dev_err(&pdev->dev, "[%s]: cryp_enable_power() failed!", + __func__); else { res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (res_irq) @@ -1742,16 +1744,15 @@ static int ux500_cryp_resume(struct device *dev) return ret; } -static SIMPLE_DEV_PM_OPS(ux500_cryp_pm, ux500_cryp_suspend, ux500_cryp_resume); - static struct platform_driver cryp_driver = { .probe = ux500_cryp_probe, .remove = ux500_cryp_remove, .shutdown = ux500_cryp_shutdown, + .suspend = ux500_cryp_suspend, + .resume = ux500_cryp_resume, .driver = { .owner = THIS_MODULE, .name = "cryp1" - .pm = &ux500_cryp_pm, } }; diff --git a/trunk/drivers/crypto/ux500/hash/hash_core.c b/trunk/drivers/crypto/ux500/hash/hash_core.c index 08d5032cb564..6dbb9ec709a3 100644 --- a/trunk/drivers/crypto/ux500/hash/hash_core.c +++ b/trunk/drivers/crypto/ux500/hash/hash_core.c @@ -1894,17 +1894,19 @@ static void ux500_hash_shutdown(struct platform_device *pdev) /** * ux500_hash_suspend - Function that suspends the hash device. - * @dev: Device to suspend. + * @pdev: The platform device. + * @state: - */ -static int ux500_hash_suspend(struct device *dev) +static int ux500_hash_suspend(struct platform_device *pdev, pm_message_t state) { int ret; struct hash_device_data *device_data; struct hash_ctx *temp_ctx = NULL; - device_data = dev_get_drvdata(dev); + device_data = platform_get_drvdata(pdev); if (!device_data) { - dev_err(dev, "[%s] platform_get_drvdata() failed!", __func__); + dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!", + __func__); return -ENOMEM; } @@ -1915,32 +1917,33 @@ static int ux500_hash_suspend(struct device *dev) if (device_data->current_ctx == ++temp_ctx) { if (down_interruptible(&driver_data.device_allocation)) - dev_dbg(dev, "[%s]: down_interruptible() failed", - __func__); + dev_dbg(&pdev->dev, "[%s]: down_interruptible() " + "failed", __func__); ret = hash_disable_power(device_data, false); } else ret = hash_disable_power(device_data, true); if (ret) - dev_err(dev, "[%s]: hash_disable_power()", __func__); + dev_err(&pdev->dev, "[%s]: hash_disable_power()", __func__); return ret; } /** * ux500_hash_resume - Function that resume the hash device. - * @dev: Device to resume. + * @pdev: The platform device. */ -static int ux500_hash_resume(struct device *dev) +static int ux500_hash_resume(struct platform_device *pdev) { int ret = 0; struct hash_device_data *device_data; struct hash_ctx *temp_ctx = NULL; - device_data = dev_get_drvdata(dev); + device_data = platform_get_drvdata(pdev); if (!device_data) { - dev_err(dev, "[%s] platform_get_drvdata() failed!", __func__); + dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!", + __func__); return -ENOMEM; } @@ -1955,21 +1958,21 @@ static int ux500_hash_resume(struct device *dev) ret = hash_enable_power(device_data, true); if (ret) - dev_err(dev, "[%s]: hash_enable_power() failed!", __func__); + dev_err(&pdev->dev, "[%s]: hash_enable_power() failed!", + __func__); return ret; } -static SIMPLE_DEV_PM_OPS(ux500_hash_pm, ux500_hash_suspend, ux500_hash_resume); - static struct platform_driver hash_driver = { .probe = ux500_hash_probe, .remove = ux500_hash_remove, .shutdown = ux500_hash_shutdown, + .suspend = ux500_hash_suspend, + .resume = ux500_hash_resume, .driver = { .owner = THIS_MODULE, .name = "hash1", - .pm = &ux500_hash_pm, } }; diff --git a/trunk/drivers/gpu/drm/gma500/cdv_device.c b/trunk/drivers/gpu/drm/gma500/cdv_device.c index b7e7b49d8f62..9764045428ce 100644 --- a/trunk/drivers/gpu/drm/gma500/cdv_device.c +++ b/trunk/drivers/gpu/drm/gma500/cdv_device.c @@ -78,6 +78,21 @@ static int cdv_backlight_combination_mode(struct drm_device *dev) return REG_READ(BLC_PWM_CTL2) & PWM_LEGACY_MODE; } +static int cdv_get_brightness(struct backlight_device *bd) +{ + struct drm_device *dev = bl_get_data(bd); + u32 val = REG_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; + + if (cdv_backlight_combination_mode(dev)) { + u8 lbpc; + + val &= ~1; + pci_read_config_byte(dev->pdev, 0xF4, &lbpc); + val *= lbpc; + } + return val; +} + static u32 cdv_get_max_backlight(struct drm_device *dev) { u32 max = REG_READ(BLC_PWM_CTL); @@ -95,22 +110,6 @@ static u32 cdv_get_max_backlight(struct drm_device *dev) return max; } -static int cdv_get_brightness(struct backlight_device *bd) -{ - struct drm_device *dev = bl_get_data(bd); - u32 val = REG_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; - - if (cdv_backlight_combination_mode(dev)) { - u8 lbpc; - - val &= ~1; - pci_read_config_byte(dev->pdev, 0xF4, &lbpc); - val *= lbpc; - } - return (val * 100)/cdv_get_max_backlight(dev); - -} - static int cdv_set_brightness(struct backlight_device *bd) { struct drm_device *dev = bl_get_data(bd); @@ -121,9 +120,6 @@ static int cdv_set_brightness(struct backlight_device *bd) if (level < 1) level = 1; - level *= cdv_get_max_backlight(dev); - level /= 100; - if (cdv_backlight_combination_mode(dev)) { u32 max = cdv_get_max_backlight(dev); u8 lbpc; @@ -161,6 +157,7 @@ static int cdv_backlight_init(struct drm_device *dev) cdv_backlight_device->props.brightness = cdv_get_brightness(cdv_backlight_device); + cdv_backlight_device->props.max_brightness = cdv_get_max_backlight(dev); backlight_update_status(cdv_backlight_device); dev_priv->backlight_device = cdv_backlight_device; return 0; diff --git a/trunk/drivers/gpu/drm/gma500/opregion.c b/trunk/drivers/gpu/drm/gma500/opregion.c index c430bd424681..4f186eca3a30 100644 --- a/trunk/drivers/gpu/drm/gma500/opregion.c +++ b/trunk/drivers/gpu/drm/gma500/opregion.c @@ -144,8 +144,6 @@ struct opregion_asle { #define ASLE_CBLV_VALID (1<<31) -static struct psb_intel_opregion *system_opregion; - static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) { struct drm_psb_private *dev_priv = dev->dev_private; @@ -207,7 +205,7 @@ void psb_intel_opregion_enable_asle(struct drm_device *dev) struct drm_psb_private *dev_priv = dev->dev_private; struct opregion_asle *asle = dev_priv->opregion.asle; - if (asle && system_opregion ) { + if (asle) { /* Don't do this on Medfield or other non PC like devices, they use the bit for something different altogether */ psb_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE); @@ -223,6 +221,7 @@ void psb_intel_opregion_enable_asle(struct drm_device *dev) #define ACPI_EV_LID (1<<1) #define ACPI_EV_DOCK (1<<2) +static struct psb_intel_opregion *system_opregion; static int psb_intel_opregion_video_event(struct notifier_block *nb, unsigned long val, void *data) @@ -267,6 +266,9 @@ void psb_intel_opregion_init(struct drm_device *dev) system_opregion = opregion; register_acpi_notifier(&psb_intel_opregion_notifier); } + + if (opregion->asle) + psb_intel_opregion_enable_asle(dev); } void psb_intel_opregion_fini(struct drm_device *dev) diff --git a/trunk/drivers/gpu/drm/gma500/opregion.h b/trunk/drivers/gpu/drm/gma500/opregion.h index 4a90f8b0e16c..72dc6b921265 100644 --- a/trunk/drivers/gpu/drm/gma500/opregion.h +++ b/trunk/drivers/gpu/drm/gma500/opregion.h @@ -27,7 +27,6 @@ extern void psb_intel_opregion_asle_intr(struct drm_device *dev); extern void psb_intel_opregion_init(struct drm_device *dev); extern void psb_intel_opregion_fini(struct drm_device *dev); extern int psb_intel_opregion_setup(struct drm_device *dev); -extern void psb_intel_opregion_enable_asle(struct drm_device *dev); #else @@ -47,8 +46,4 @@ extern inline int psb_intel_opregion_setup(struct drm_device *dev) { return 0; } - -extern inline void psb_intel_opregion_enable_asle(struct drm_device *dev) -{ -} #endif diff --git a/trunk/drivers/gpu/drm/gma500/psb_device.c b/trunk/drivers/gpu/drm/gma500/psb_device.c index 5971bc82b765..eff039bf92d4 100644 --- a/trunk/drivers/gpu/drm/gma500/psb_device.c +++ b/trunk/drivers/gpu/drm/gma500/psb_device.c @@ -144,10 +144,6 @@ static int psb_backlight_init(struct drm_device *dev) psb_backlight_device->props.max_brightness = 100; backlight_update_status(psb_backlight_device); dev_priv->backlight_device = psb_backlight_device; - - /* This must occur after the backlight is properly initialised */ - psb_lid_timer_init(dev_priv); - return 0; } @@ -358,6 +354,13 @@ static int psb_chip_setup(struct drm_device *dev) return 0; } +/* Not exactly an erratum more an irritation */ +static void psb_chip_errata(struct drm_device *dev) +{ + struct drm_psb_private *dev_priv = dev->dev_private; + psb_lid_timer_init(dev_priv); +} + static void psb_chip_teardown(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; @@ -376,6 +379,7 @@ const struct psb_ops psb_chip_ops = { .sgx_offset = PSB_SGX_OFFSET, .chip_setup = psb_chip_setup, .chip_teardown = psb_chip_teardown, + .errata = psb_chip_errata, .crtc_helper = &psb_intel_helper_funcs, .crtc_funcs = &psb_intel_crtc_funcs, diff --git a/trunk/drivers/gpu/drm/gma500/psb_drv.c b/trunk/drivers/gpu/drm/gma500/psb_drv.c index a8858a907f47..caba6e08693c 100644 --- a/trunk/drivers/gpu/drm/gma500/psb_drv.c +++ b/trunk/drivers/gpu/drm/gma500/psb_drv.c @@ -374,7 +374,6 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset) if (ret) return ret; - psb_intel_opregion_enable_asle(dev); #if 0 /*enable runtime pm at last*/ pm_runtime_enable(&dev->pdev->dev); diff --git a/trunk/drivers/hid/Kconfig b/trunk/drivers/hid/Kconfig index 3fda8c87f02c..bef04c192768 100644 --- a/trunk/drivers/hid/Kconfig +++ b/trunk/drivers/hid/Kconfig @@ -386,7 +386,6 @@ config HID_MULTITOUCH - Unitec Panels - XAT optical touch panels - Xiroku optical touch panels - - Zytronic touch panels If unsure, say N. diff --git a/trunk/drivers/hid/hid-ids.h b/trunk/drivers/hid/hid-ids.h index 32039235cfee..875ff451842b 100644 --- a/trunk/drivers/hid/hid-ids.h +++ b/trunk/drivers/hid/hid-ids.h @@ -659,9 +659,6 @@ #define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001 #define USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE 0x0600 -#define USB_VENDOR_ID_SENNHEISER 0x1395 -#define USB_DEVICE_ID_SENNHEISER_BTD500USB 0x002c - #define USB_VENDOR_ID_SIGMA_MICRO 0x1c4f #define USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD 0x0002 @@ -811,9 +808,6 @@ #define USB_VENDOR_ID_ZYDACRON 0x13EC #define USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL 0x0006 -#define USB_VENDOR_ID_ZYTRONIC 0x14c8 -#define USB_DEVICE_ID_ZYTRONIC_ZXY100 0x0005 - #define USB_VENDOR_ID_PRIMAX 0x0461 #define USB_DEVICE_ID_PRIMAX_KEYBOARD 0x4e05 diff --git a/trunk/drivers/hid/hid-input.c b/trunk/drivers/hid/hid-input.c index 5301006f6c15..132b0019365e 100644 --- a/trunk/drivers/hid/hid-input.c +++ b/trunk/drivers/hid/hid-input.c @@ -301,9 +301,6 @@ static const struct hid_device_id hid_battery_quirks[] = { { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI), HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE }, - { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, - USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI), - HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE }, {} }; diff --git a/trunk/drivers/hid/hid-multitouch.c b/trunk/drivers/hid/hid-multitouch.c index 76479246d4ee..6e3332a99976 100644 --- a/trunk/drivers/hid/hid-multitouch.c +++ b/trunk/drivers/hid/hid-multitouch.c @@ -1048,11 +1048,6 @@ static const struct hid_device_id mt_devices[] = { MT_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_CSR2) }, - /* Zytronic panels */ - { .driver_data = MT_CLS_SERIAL, - MT_USB_DEVICE(USB_VENDOR_ID_ZYTRONIC, - USB_DEVICE_ID_ZYTRONIC_ZXY100) }, - /* Generic MT device */ { HID_DEVICE(HID_BUS_ANY, HID_GROUP_MULTITOUCH, HID_ANY_ID, HID_ANY_ID) }, { } diff --git a/trunk/drivers/hid/usbhid/hid-quirks.c b/trunk/drivers/hid/usbhid/hid-quirks.c index 903eef3d3e10..0597ee604f6e 100644 --- a/trunk/drivers/hid/usbhid/hid-quirks.c +++ b/trunk/drivers/hid/usbhid/hid-quirks.c @@ -76,7 +76,6 @@ static const struct hid_blacklist { { USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS, HID_QUIRK_NOGET }, { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NOGET }, { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008, HID_QUIRK_NOGET }, - { USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB, HID_QUIRK_NOGET }, { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET }, { USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_1, HID_QUIRK_NOGET }, { USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_2, HID_QUIRK_NOGET }, diff --git a/trunk/drivers/hwmon/acpi_power_meter.c b/trunk/drivers/hwmon/acpi_power_meter.c index e3fcf8146834..34ad5a27a7e9 100644 --- a/trunk/drivers/hwmon/acpi_power_meter.c +++ b/trunk/drivers/hwmon/acpi_power_meter.c @@ -929,25 +929,20 @@ static int acpi_power_meter_remove(struct acpi_device *device, int type) return 0; } -static int acpi_power_meter_resume(struct device *dev) +static int acpi_power_meter_resume(struct acpi_device *device) { struct acpi_power_meter_resource *resource; - if (!dev) - return -EINVAL; - - resource = acpi_driver_data(to_acpi_device(dev)); - if (!resource) + if (!device || !acpi_driver_data(device)) return -EINVAL; + resource = acpi_driver_data(device); free_capabilities(resource); read_capabilities(resource); return 0; } -static SIMPLE_DEV_PM_OPS(acpi_power_meter_pm, NULL, acpi_power_meter_resume); - static struct acpi_driver acpi_power_meter_driver = { .name = "power_meter", .class = ACPI_POWER_METER_CLASS, @@ -955,9 +950,9 @@ static struct acpi_driver acpi_power_meter_driver = { .ops = { .add = acpi_power_meter_add, .remove = acpi_power_meter_remove, + .resume = acpi_power_meter_resume, .notify = acpi_power_meter_notify, }, - .drv.pm = &acpi_power_meter_pm, }; /* Module init/exit routines */ diff --git a/trunk/drivers/hwmon/k10temp.c b/trunk/drivers/hwmon/k10temp.c index f2fe8078633b..7356b5ec8f67 100644 --- a/trunk/drivers/hwmon/k10temp.c +++ b/trunk/drivers/hwmon/k10temp.c @@ -33,6 +33,9 @@ static bool force; module_param(force, bool, 0444); MODULE_PARM_DESC(force, "force loading on processors with erratum 319"); +/* PCI-IDs for Northbridge devices not used anywhere else */ +#define PCI_DEVICE_ID_AMD_15H_M10H_NB_F3 0x1403 + /* CPUID function 0x80000001, ebx */ #define CPUID_PKGTYPE_MASK 0xf0000000 #define CPUID_PKGTYPE_F 0x00000000 @@ -210,7 +213,7 @@ static DEFINE_PCI_DEVICE_TABLE(k10temp_id_table) = { { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) }, - { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M10H_NB_F3) }, {} }; MODULE_DEVICE_TABLE(pci, k10temp_id_table); diff --git a/trunk/drivers/idle/intel_idle.c b/trunk/drivers/idle/intel_idle.c index fe95d5464a02..d0f59c3f87ef 100644 --- a/trunk/drivers/idle/intel_idle.c +++ b/trunk/drivers/idle/intel_idle.c @@ -96,7 +96,6 @@ static const struct idle_cpu *icpu; static struct cpuidle_device __percpu *intel_idle_cpuidle_devices; static int intel_idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index); -static int intel_idle_cpu_init(int cpu); static struct cpuidle_state *cpuidle_state_table; @@ -303,35 +302,22 @@ static void __setup_broadcast_timer(void *arg) clockevents_notify(reason, &cpu); } -static int cpu_hotplug_notify(struct notifier_block *n, - unsigned long action, void *hcpu) +static int setup_broadcast_cpuhp_notify(struct notifier_block *n, + unsigned long action, void *hcpu) { int hotcpu = (unsigned long)hcpu; - struct cpuidle_device *dev; switch (action & 0xf) { case CPU_ONLINE: - - if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) - smp_call_function_single(hotcpu, __setup_broadcast_timer, - (void *)true, 1); - - /* - * Some systems can hotplug a cpu at runtime after - * the kernel has booted, we have to initialize the - * driver in this case - */ - dev = per_cpu_ptr(intel_idle_cpuidle_devices, hotcpu); - if (!dev->registered) - intel_idle_cpu_init(hotcpu); - + smp_call_function_single(hotcpu, __setup_broadcast_timer, + (void *)true, 1); break; } return NOTIFY_OK; } -static struct notifier_block cpu_hotplug_notifier = { - .notifier_call = cpu_hotplug_notify, +static struct notifier_block setup_broadcast_notifier = { + .notifier_call = setup_broadcast_cpuhp_notify, }; static void auto_demotion_disable(void *dummy) @@ -419,10 +405,10 @@ static int intel_idle_probe(void) if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */ lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE; - else + else { on_each_cpu(__setup_broadcast_timer, (void *)true, 1); - - register_cpu_notifier(&cpu_hotplug_notifier); + register_cpu_notifier(&setup_broadcast_notifier); + } pr_debug(PREFIX "v" INTEL_IDLE_VERSION " model 0x%X\n", boot_cpu_data.x86_model); @@ -508,7 +494,7 @@ static int intel_idle_cpuidle_driver_init(void) * allocate, initialize, register cpuidle_devices * @cpu: cpu/core to initialize */ -static int intel_idle_cpu_init(int cpu) +int intel_idle_cpu_init(int cpu) { int cstate; struct cpuidle_device *dev; @@ -553,6 +539,7 @@ static int intel_idle_cpu_init(int cpu) return 0; } +EXPORT_SYMBOL_GPL(intel_idle_cpu_init); static int __init intel_idle_init(void) { @@ -594,10 +581,10 @@ static void __exit intel_idle_exit(void) intel_idle_cpuidle_devices_uninit(); cpuidle_unregister_driver(&intel_idle_driver); - - if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) + if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) { on_each_cpu(__setup_broadcast_timer, (void *)false, 1); - unregister_cpu_notifier(&cpu_hotplug_notifier); + unregister_cpu_notifier(&setup_broadcast_notifier); + } return; } diff --git a/trunk/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/trunk/drivers/infiniband/ulp/ipoib/ipoib_ib.c index f10221f40803..5c1bc995e560 100644 --- a/trunk/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/trunk/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -123,7 +123,7 @@ static void ipoib_ud_skb_put_frags(struct ipoib_dev_priv *priv, skb_frag_size_set(frag, size); skb->data_len += size; - skb->truesize += PAGE_SIZE; + skb->truesize += size; } else skb_put(skb, length); @@ -156,18 +156,14 @@ static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id) struct ipoib_dev_priv *priv = netdev_priv(dev); struct sk_buff *skb; int buf_size; - int tailroom; u64 *mapping; - if (ipoib_ud_need_sg(priv->max_ib_mtu)) { + if (ipoib_ud_need_sg(priv->max_ib_mtu)) buf_size = IPOIB_UD_HEAD_SIZE; - tailroom = 128; /* reserve some tailroom for IP/TCP headers */ - } else { + else buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu); - tailroom = 0; - } - skb = dev_alloc_skb(buf_size + tailroom + 4); + skb = dev_alloc_skb(buf_size + 4); if (unlikely(!skb)) return NULL; diff --git a/trunk/drivers/infiniband/ulp/srpt/ib_srpt.c b/trunk/drivers/infiniband/ulp/srpt/ib_srpt.c index 7a0ce8d42887..5f6b7f63cdef 100644 --- a/trunk/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/trunk/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -1377,14 +1377,10 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx) break; case SRPT_STATE_NEED_DATA: /* DMA_TO_DEVICE (write) - RDMA read error. */ - - /* XXX(hch): this is a horrible layering violation.. */ spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags); ioctx->cmd.transport_state |= CMD_T_LUN_STOP; - ioctx->cmd.transport_state &= ~CMD_T_ACTIVE; spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags); - - complete(&ioctx->cmd.transport_lun_stop_comp); + transport_generic_handle_data(&ioctx->cmd); break; case SRPT_STATE_CMD_RSP_SENT: /* @@ -1467,10 +1463,9 @@ static void srpt_handle_send_comp(struct srpt_rdma_ch *ch, /** * srpt_handle_rdma_comp() - Process an IB RDMA completion notification. * - * XXX: what is now target_execute_cmd used to be asynchronous, and unmapping - * the data that has been transferred via IB RDMA had to be postponed until the - * check_stop_free() callback. None of this is nessecary anymore and needs to - * be cleaned up. + * Note: transport_generic_handle_data() is asynchronous so unmapping the + * data that has been transferred via IB RDMA must be postponed until the + * check_stop_free() callback. */ static void srpt_handle_rdma_comp(struct srpt_rdma_ch *ch, struct srpt_send_ioctx *ioctx, @@ -1482,7 +1477,7 @@ static void srpt_handle_rdma_comp(struct srpt_rdma_ch *ch, if (opcode == SRPT_RDMA_READ_LAST) { if (srpt_test_and_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA, SRPT_STATE_DATA_IN)) - target_execute_cmd(&ioctx->cmd); + transport_generic_handle_data(&ioctx->cmd); else printk(KERN_ERR "%s[%d]: wrong state = %d\n", __func__, __LINE__, srpt_get_cmd_state(ioctx)); diff --git a/trunk/drivers/iommu/dmar.c b/trunk/drivers/iommu/dmar.c index 86e2f4a62b9a..3a74e4410fc0 100644 --- a/trunk/drivers/iommu/dmar.c +++ b/trunk/drivers/iommu/dmar.c @@ -26,8 +26,6 @@ * These routines are used by both DMA-remapping and Interrupt-remapping */ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* has to precede printk.h */ - #include #include #include @@ -41,6 +39,8 @@ #include #include +#define PREFIX "DMAR: " + /* No locks are needed as DMA remapping hardware unit * list is constructed at boot time and hotplug of * these units are not supported by the architecture. @@ -83,12 +83,16 @@ static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope, * ignore it */ if (!bus) { - pr_warn("Device scope bus [%d] not found\n", scope->bus); + printk(KERN_WARNING + PREFIX "Device scope bus [%d] not found\n", + scope->bus); break; } pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn)); if (!pdev) { - /* warning will be printed below */ + printk(KERN_WARNING PREFIX + "Device scope device [%04x:%02x:%02x.%02x] not found\n", + segment, bus->number, path->dev, path->fn); break; } path ++; @@ -96,8 +100,9 @@ static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope, bus = pdev->subordinate; } if (!pdev) { - pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n", - segment, scope->bus, path->dev, path->fn); + printk(KERN_WARNING PREFIX + "Device scope device [%04x:%02x:%02x.%02x] not found\n", + segment, scope->bus, path->dev, path->fn); *dev = NULL; return 0; } @@ -105,8 +110,9 @@ static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope, pdev->subordinate) || (scope->entry_type == \ ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) { pci_dev_put(pdev); - pr_warn("Device scope type does not match for %s\n", - pci_name(pdev)); + printk(KERN_WARNING PREFIX + "Device scope type does not match for %s\n", + pci_name(pdev)); return -EINVAL; } *dev = pdev; @@ -128,7 +134,8 @@ int __init dmar_parse_dev_scope(void *start, void *end, int *cnt, scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) (*cnt)++; else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC) { - pr_warn("Unsupported device scope\n"); + printk(KERN_WARNING PREFIX + "Unsupported device scope\n"); } start += scope->length; } @@ -254,23 +261,25 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header) case ACPI_DMAR_TYPE_HARDWARE_UNIT: drhd = container_of(header, struct acpi_dmar_hardware_unit, header); - pr_info("DRHD base: %#016Lx flags: %#x\n", + printk (KERN_INFO PREFIX + "DRHD base: %#016Lx flags: %#x\n", (unsigned long long)drhd->address, drhd->flags); break; case ACPI_DMAR_TYPE_RESERVED_MEMORY: rmrr = container_of(header, struct acpi_dmar_reserved_memory, header); - pr_info("RMRR base: %#016Lx end: %#016Lx\n", + printk (KERN_INFO PREFIX + "RMRR base: %#016Lx end: %#016Lx\n", (unsigned long long)rmrr->base_address, (unsigned long long)rmrr->end_address); break; case ACPI_DMAR_TYPE_ATSR: atsr = container_of(header, struct acpi_dmar_atsr, header); - pr_info("ATSR flags: %#x\n", atsr->flags); + printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags); break; case ACPI_DMAR_HARDWARE_AFFINITY: rhsa = container_of(header, struct acpi_dmar_rhsa, header); - pr_info("RHSA base: %#016Lx proximity domain: %#x\n", + printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n", (unsigned long long)rhsa->base_address, rhsa->proximity_domain); break; @@ -290,7 +299,7 @@ static int __init dmar_table_detect(void) &dmar_tbl_size); if (ACPI_SUCCESS(status) && !dmar_tbl) { - pr_warn("Unable to map DMAR\n"); + printk (KERN_WARNING PREFIX "Unable to map DMAR\n"); status = AE_NOT_FOUND; } @@ -324,18 +333,20 @@ parse_dmar_table(void) return -ENODEV; if (dmar->width < PAGE_SHIFT - 1) { - pr_warn("Invalid DMAR haw\n"); + printk(KERN_WARNING PREFIX "Invalid DMAR haw\n"); return -EINVAL; } - pr_info("Host address width %d\n", dmar->width + 1); + printk (KERN_INFO PREFIX "Host address width %d\n", + dmar->width + 1); entry_header = (struct acpi_dmar_header *)(dmar + 1); while (((unsigned long)entry_header) < (((unsigned long)dmar) + dmar_tbl->length)) { /* Avoid looping forever on bad ACPI tables */ if (entry_header->length == 0) { - pr_warn("Invalid 0-length structure\n"); + printk(KERN_WARNING PREFIX + "Invalid 0-length structure\n"); ret = -EINVAL; break; } @@ -358,7 +369,8 @@ parse_dmar_table(void) #endif break; default: - pr_warn("Unknown DMAR structure type %d\n", + printk(KERN_WARNING PREFIX + "Unknown DMAR structure type %d\n", entry_header->type); ret = 0; /* for forward compatibility */ break; @@ -457,12 +469,12 @@ int __init dmar_table_init(void) ret = parse_dmar_table(); if (ret) { if (ret != -ENODEV) - pr_info("parse DMAR table failure.\n"); + printk(KERN_INFO PREFIX "parse DMAR table failure.\n"); return ret; } if (list_empty(&dmar_drhd_units)) { - pr_info("No DMAR devices found\n"); + printk(KERN_INFO PREFIX "No DMAR devices found\n"); return -ENODEV; } @@ -494,7 +506,8 @@ int __init check_zero_address(void) (((unsigned long)dmar) + dmar_tbl->length)) { /* Avoid looping forever on bad ACPI tables */ if (entry_header->length == 0) { - pr_warn("Invalid 0-length structure\n"); + printk(KERN_WARNING PREFIX + "Invalid 0-length structure\n"); return 0; } @@ -545,7 +558,8 @@ int __init detect_intel_iommu(void) if (ret && irq_remapping_enabled && cpu_has_x2apic && dmar->flags & 0x1) - pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n"); + printk(KERN_INFO + "Queued invalidation will be enabled to support x2apic and Intr-remapping.\n"); if (ret && !no_iommu && !iommu_detected && !dmar_disabled) { iommu_detected = 1; @@ -565,89 +579,14 @@ int __init detect_intel_iommu(void) } -static void unmap_iommu(struct intel_iommu *iommu) -{ - iounmap(iommu->reg); - release_mem_region(iommu->reg_phys, iommu->reg_size); -} - -/** - * map_iommu: map the iommu's registers - * @iommu: the iommu to map - * @phys_addr: the physical address of the base resgister - * - * Memory map the iommu's registers. Start w/ a single page, and - * possibly expand if that turns out to be insufficent. - */ -static int map_iommu(struct intel_iommu *iommu, u64 phys_addr) -{ - int map_size, err=0; - - iommu->reg_phys = phys_addr; - iommu->reg_size = VTD_PAGE_SIZE; - - if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) { - pr_err("IOMMU: can't reserve memory\n"); - err = -EBUSY; - goto out; - } - - iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size); - if (!iommu->reg) { - pr_err("IOMMU: can't map the region\n"); - err = -ENOMEM; - goto release; - } - - iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); - iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); - - if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) { - err = -EINVAL; - warn_invalid_dmar(phys_addr, " returns all ones"); - goto unmap; - } - - /* the registers might be more than one page */ - map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), - cap_max_fault_reg_offset(iommu->cap)); - map_size = VTD_PAGE_ALIGN(map_size); - if (map_size > iommu->reg_size) { - iounmap(iommu->reg); - release_mem_region(iommu->reg_phys, iommu->reg_size); - iommu->reg_size = map_size; - if (!request_mem_region(iommu->reg_phys, iommu->reg_size, - iommu->name)) { - pr_err("IOMMU: can't reserve memory\n"); - err = -EBUSY; - goto out; - } - iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size); - if (!iommu->reg) { - pr_err("IOMMU: can't map the region\n"); - err = -ENOMEM; - goto release; - } - } - err = 0; - goto out; - -unmap: - iounmap(iommu->reg); -release: - release_mem_region(iommu->reg_phys, iommu->reg_size); -out: - return err; -} - int alloc_iommu(struct dmar_drhd_unit *drhd) { struct intel_iommu *iommu; + int map_size; u32 ver; static int iommu_allocated = 0; int agaw = 0; int msagaw = 0; - int err; if (!drhd->reg_base_addr) { warn_invalid_dmar(0, ""); @@ -661,22 +600,30 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) iommu->seq_id = iommu_allocated++; sprintf (iommu->name, "dmar%d", iommu->seq_id); - err = map_iommu(iommu, drhd->reg_base_addr); - if (err) { - pr_err("IOMMU: failed to map %s\n", iommu->name); + iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE); + if (!iommu->reg) { + printk(KERN_ERR "IOMMU: can't map the region\n"); goto error; } + iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); + iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); + + if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) { + warn_invalid_dmar(drhd->reg_base_addr, " returns all ones"); + goto err_unmap; + } - err = -EINVAL; agaw = iommu_calculate_agaw(iommu); if (agaw < 0) { - pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n", - iommu->seq_id); + printk(KERN_ERR + "Cannot get a valid agaw for iommu (seq_id = %d)\n", + iommu->seq_id); goto err_unmap; } msagaw = iommu_calculate_max_sagaw(iommu); if (msagaw < 0) { - pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n", + printk(KERN_ERR + "Cannot get a valid max agaw for iommu (seq_id = %d)\n", iommu->seq_id); goto err_unmap; } @@ -685,6 +632,19 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) iommu->node = -1; + /* the registers might be more than one page */ + map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), + cap_max_fault_reg_offset(iommu->cap)); + map_size = VTD_PAGE_ALIGN(map_size); + if (map_size > VTD_PAGE_SIZE) { + iounmap(iommu->reg); + iommu->reg = ioremap(drhd->reg_base_addr, map_size); + if (!iommu->reg) { + printk(KERN_ERR "IOMMU: can't map the region\n"); + goto error; + } + } + ver = readl(iommu->reg + DMAR_VER_REG); pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n", iommu->seq_id, @@ -699,10 +659,10 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) return 0; err_unmap: - unmap_iommu(iommu); + iounmap(iommu->reg); error: kfree(iommu); - return err; + return -1; } void free_iommu(struct intel_iommu *iommu) @@ -713,8 +673,7 @@ void free_iommu(struct intel_iommu *iommu) free_dmar_iommu(iommu); if (iommu->reg) - unmap_iommu(iommu); - + iounmap(iommu->reg); kfree(iommu); } @@ -751,7 +710,7 @@ static int qi_check_fault(struct intel_iommu *iommu, int index) if (fault & DMA_FSTS_IQE) { head = readl(iommu->reg + DMAR_IQH_REG); if ((head >> DMAR_IQ_SHIFT) == index) { - pr_err("VT-d detected invalid descriptor: " + printk(KERN_ERR "VT-d detected invalid descriptor: " "low=%llx, high=%llx\n", (unsigned long long)qi->desc[index].low, (unsigned long long)qi->desc[index].high); @@ -1170,14 +1129,15 @@ static int dmar_fault_do_one(struct intel_iommu *iommu, int type, reason = dmar_get_fault_reason(fault_reason, &fault_type); if (fault_type == INTR_REMAP) - pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] " + printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] " "fault index %llx\n" "INTR-REMAP:[fault reason %02d] %s\n", (source_id >> 8), PCI_SLOT(source_id & 0xFF), PCI_FUNC(source_id & 0xFF), addr >> 48, fault_reason, reason); else - pr_err("DMAR:[%s] Request device [%02x:%02x.%d] " + printk(KERN_ERR + "DMAR:[%s] Request device [%02x:%02x.%d] " "fault addr %llx \n" "DMAR:[fault reason %02d] %s\n", (type ? "DMA Read" : "DMA Write"), @@ -1197,7 +1157,8 @@ irqreturn_t dmar_fault(int irq, void *dev_id) raw_spin_lock_irqsave(&iommu->register_lock, flag); fault_status = readl(iommu->reg + DMAR_FSTS_REG); if (fault_status) - pr_err("DRHD: handling fault status reg %x\n", fault_status); + printk(KERN_ERR "DRHD: handling fault status reg %x\n", + fault_status); /* TBD: ignore advanced fault log currently */ if (!(fault_status & DMA_FSTS_PPF)) @@ -1263,7 +1224,7 @@ int dmar_set_interrupt(struct intel_iommu *iommu) irq = create_irq(); if (!irq) { - pr_err("IOMMU: no free vectors\n"); + printk(KERN_ERR "IOMMU: no free vectors\n"); return -EINVAL; } @@ -1280,7 +1241,7 @@ int dmar_set_interrupt(struct intel_iommu *iommu) ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu); if (ret) - pr_err("IOMMU: can't request irq\n"); + printk(KERN_ERR "IOMMU: can't request irq\n"); return ret; } @@ -1297,7 +1258,8 @@ int __init enable_drhd_fault_handling(void) ret = dmar_set_interrupt(iommu); if (ret) { - pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n", + printk(KERN_ERR "DRHD %Lx: failed to enable fault, " + " interrupt, ret %d\n", (unsigned long long)drhd->reg_base_addr, ret); return -1; } diff --git a/trunk/drivers/iommu/intel_irq_remapping.c b/trunk/drivers/iommu/intel_irq_remapping.c index e0b18f3ae9a8..6d347064b8b0 100644 --- a/trunk/drivers/iommu/intel_irq_remapping.c +++ b/trunk/drivers/iommu/intel_irq_remapping.c @@ -902,6 +902,7 @@ static int intel_setup_ioapic_entry(int irq, return 0; } +#ifdef CONFIG_SMP /* * Migrate the IO-APIC irq in the presence of intr-remapping. * @@ -923,10 +924,6 @@ intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, struct irq_cfg *cfg = data->chip_data; unsigned int dest, irq = data->irq; struct irte irte; - int err; - - if (!config_enabled(CONFIG_SMP)) - return -EINVAL; if (!cpumask_intersects(mask, cpu_online_mask)) return -EINVAL; @@ -934,16 +931,10 @@ intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, if (get_irte(irq, &irte)) return -EBUSY; - err = assign_irq_vector(irq, cfg, mask); - if (err) - return err; + if (assign_irq_vector(irq, cfg, mask)) + return -EBUSY; - err = apic->cpu_mask_to_apicid_and(cfg->domain, mask, &dest); - if (err) { - if (assign_irq_vector(irq, cfg, data->affinity)) - pr_err("Failed to recover vector for irq %d\n", irq); - return err; - } + dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask); irte.vector = cfg->vector; irte.dest_id = IRTE_DEST(dest); @@ -965,6 +956,7 @@ intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, cpumask_copy(data->affinity, mask); return 0; } +#endif static void intel_compose_msi_msg(struct pci_dev *pdev, unsigned int irq, unsigned int dest, @@ -1066,7 +1058,9 @@ struct irq_remap_ops intel_irq_remap_ops = { .reenable = reenable_irq_remapping, .enable_faulting = enable_drhd_fault_handling, .setup_ioapic_entry = intel_setup_ioapic_entry, +#ifdef CONFIG_SMP .set_affinity = intel_ioapic_set_affinity, +#endif .free_irq = free_irte, .compose_msi_msg = intel_compose_msi_msg, .msi_alloc_irq = intel_msi_alloc_irq, diff --git a/trunk/drivers/iommu/irq_remapping.c b/trunk/drivers/iommu/irq_remapping.c index 1d29b1c66e72..40cda8e98d87 100644 --- a/trunk/drivers/iommu/irq_remapping.c +++ b/trunk/drivers/iommu/irq_remapping.c @@ -111,15 +111,16 @@ int setup_ioapic_remapped_entry(int irq, vector, attr); } +#ifdef CONFIG_SMP int set_remapped_irq_affinity(struct irq_data *data, const struct cpumask *mask, bool force) { - if (!config_enabled(CONFIG_SMP) || !remap_ops || - !remap_ops->set_affinity) + if (!remap_ops || !remap_ops->set_affinity) return 0; return remap_ops->set_affinity(data, mask, force); } +#endif void free_remapped_irq(int irq) { diff --git a/trunk/drivers/iommu/irq_remapping.h b/trunk/drivers/iommu/irq_remapping.h index b12974cc1dfe..be9d72950c51 100644 --- a/trunk/drivers/iommu/irq_remapping.h +++ b/trunk/drivers/iommu/irq_remapping.h @@ -59,9 +59,11 @@ struct irq_remap_ops { unsigned int, int, struct io_apic_irq_attr *); +#ifdef CONFIG_SMP /* Set the CPU affinity of a remapped interrupt */ int (*set_affinity)(struct irq_data *data, const struct cpumask *mask, bool force); +#endif /* Free an IRQ */ int (*free_irq)(int); diff --git a/trunk/drivers/isdn/mISDN/stack.c b/trunk/drivers/isdn/mISDN/stack.c index 5f21f629b7ae..1a0ae4445ff2 100644 --- a/trunk/drivers/isdn/mISDN/stack.c +++ b/trunk/drivers/isdn/mISDN/stack.c @@ -135,8 +135,8 @@ send_layer2(struct mISDNstack *st, struct sk_buff *skb) skb = NULL; else if (*debug & DEBUG_SEND_ERR) printk(KERN_DEBUG - "%s mgr prim(%x) err %d\n", - __func__, hh->prim, ret); + "%s ch%d mgr prim(%x) addr(%x) err %d\n", + __func__, ch->nr, hh->prim, ch->addr, ret); } out: mutex_unlock(&st->lmutex); diff --git a/trunk/drivers/md/dm-raid1.c b/trunk/drivers/md/dm-raid1.c index b58b7a33914a..d039de8322f0 100644 --- a/trunk/drivers/md/dm-raid1.c +++ b/trunk/drivers/md/dm-raid1.c @@ -1084,7 +1084,6 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv) ti->split_io = dm_rh_get_region_size(ms->rh); ti->num_flush_requests = 1; ti->num_discard_requests = 1; - ti->discard_zeroes_data_unsupported = 1; ms->kmirrord_wq = alloc_workqueue("kmirrord", WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0); @@ -1215,7 +1214,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, * We need to dec pending if this was a write. */ if (rw == WRITE) { - if (!(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) + if (!(bio->bi_rw & REQ_FLUSH)) dm_rh_dec(ms->rh, map_context->ll); return error; } diff --git a/trunk/drivers/md/dm-region-hash.c b/trunk/drivers/md/dm-region-hash.c index 69732e03eb34..7771ed212182 100644 --- a/trunk/drivers/md/dm-region-hash.c +++ b/trunk/drivers/md/dm-region-hash.c @@ -404,9 +404,6 @@ void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio) return; } - if (bio->bi_rw & REQ_DISCARD) - return; - /* We must inform the log that the sync count has changed. */ log->type->set_region_sync(log, region, 0); @@ -527,7 +524,7 @@ void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios) struct bio *bio; for (bio = bios->head; bio; bio = bio->bi_next) { - if (bio->bi_rw & (REQ_FLUSH | REQ_DISCARD)) + if (bio->bi_rw & REQ_FLUSH) continue; rh_inc(rh, dm_rh_bio_to_region(rh, bio)); } diff --git a/trunk/drivers/md/dm-thin.c b/trunk/drivers/md/dm-thin.c index 68694da0d21d..ce59824fb414 100644 --- a/trunk/drivers/md/dm-thin.c +++ b/trunk/drivers/md/dm-thin.c @@ -1245,10 +1245,7 @@ static void process_discard(struct thin_c *tc, struct bio *bio) cell_release_singleton(cell, bio); cell_release_singleton(cell2, bio); - if ((!lookup_result.shared) && pool->pf.discard_passdown) - remap_and_issue(tc, bio, lookup_result.block); - else - bio_endio(bio, 0); + remap_and_issue(tc, bio, lookup_result.block); } break; @@ -2631,7 +2628,6 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv) if (tc->pool->pf.discard_enabled) { ti->discards_supported = 1; ti->num_discard_requests = 1; - ti->discard_zeroes_data_unsupported = 1; } dm_put(pool_md); diff --git a/trunk/drivers/md/md.c b/trunk/drivers/md/md.c index d5ab4493c8be..a4c219e3c859 100644 --- a/trunk/drivers/md/md.c +++ b/trunk/drivers/md/md.c @@ -2931,7 +2931,6 @@ offset_store(struct md_rdev *rdev, const char *buf, size_t len) * can be sane */ return -EBUSY; rdev->data_offset = offset; - rdev->new_data_offset = offset; return len; } @@ -3927,8 +3926,8 @@ array_state_show(struct mddev *mddev, char *page) return sprintf(page, "%s\n", array_states[st]); } -static int do_md_stop(struct mddev * mddev, int ro, struct block_device *bdev); -static int md_set_readonly(struct mddev * mddev, struct block_device *bdev); +static int do_md_stop(struct mddev * mddev, int ro, int is_open); +static int md_set_readonly(struct mddev * mddev, int is_open); static int do_md_run(struct mddev * mddev); static int restart_array(struct mddev *mddev); @@ -3944,14 +3943,14 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len) /* stopping an active array */ if (atomic_read(&mddev->openers) > 0) return -EBUSY; - err = do_md_stop(mddev, 0, NULL); + err = do_md_stop(mddev, 0, 0); break; case inactive: /* stopping an active array */ if (mddev->pers) { if (atomic_read(&mddev->openers) > 0) return -EBUSY; - err = do_md_stop(mddev, 2, NULL); + err = do_md_stop(mddev, 2, 0); } else err = 0; /* already inactive */ break; @@ -3959,7 +3958,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len) break; /* not supported yet */ case readonly: if (mddev->pers) - err = md_set_readonly(mddev, NULL); + err = md_set_readonly(mddev, 0); else { mddev->ro = 1; set_disk_ro(mddev->gendisk, 1); @@ -3969,7 +3968,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len) case read_auto: if (mddev->pers) { if (mddev->ro == 0) - err = md_set_readonly(mddev, NULL); + err = md_set_readonly(mddev, 0); else if (mddev->ro == 1) err = restart_array(mddev); if (err == 0) { @@ -5352,17 +5351,15 @@ void md_stop(struct mddev *mddev) } EXPORT_SYMBOL_GPL(md_stop); -static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) +static int md_set_readonly(struct mddev *mddev, int is_open) { int err = 0; mutex_lock(&mddev->open_mutex); - if (atomic_read(&mddev->openers) > !!bdev) { + if (atomic_read(&mddev->openers) > is_open) { printk("md: %s still in use.\n",mdname(mddev)); err = -EBUSY; goto out; } - if (bdev) - sync_blockdev(bdev); if (mddev->pers) { __md_stop_writes(mddev); @@ -5384,26 +5381,18 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) * 0 - completely stop and dis-assemble array * 2 - stop but do not disassemble array */ -static int do_md_stop(struct mddev * mddev, int mode, - struct block_device *bdev) +static int do_md_stop(struct mddev * mddev, int mode, int is_open) { struct gendisk *disk = mddev->gendisk; struct md_rdev *rdev; mutex_lock(&mddev->open_mutex); - if (atomic_read(&mddev->openers) > !!bdev || + if (atomic_read(&mddev->openers) > is_open || mddev->sysfs_active) { printk("md: %s still in use.\n",mdname(mddev)); mutex_unlock(&mddev->open_mutex); return -EBUSY; } - if (bdev) - /* It is possible IO was issued on some other - * open file which was closed before we took ->open_mutex. - * As that was not the last close __blkdev_put will not - * have called sync_blockdev, so we must. - */ - sync_blockdev(bdev); if (mddev->pers) { if (mddev->ro) @@ -5477,7 +5466,7 @@ static void autorun_array(struct mddev *mddev) err = do_md_run(mddev); if (err) { printk(KERN_WARNING "md: do_md_run() returned %d\n", err); - do_md_stop(mddev, 0, NULL); + do_md_stop(mddev, 0, 0); } } @@ -6492,11 +6481,11 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode, goto done_unlock; case STOP_ARRAY: - err = do_md_stop(mddev, 0, bdev); + err = do_md_stop(mddev, 0, 1); goto done_unlock; case STOP_ARRAY_RO: - err = md_set_readonly(mddev, bdev); + err = md_set_readonly(mddev, 1); goto done_unlock; case BLKROSET: diff --git a/trunk/drivers/md/raid1.c b/trunk/drivers/md/raid1.c index cacd008d6864..240ff3125040 100644 --- a/trunk/drivers/md/raid1.c +++ b/trunk/drivers/md/raid1.c @@ -1818,14 +1818,8 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio) if (atomic_dec_and_test(&r1_bio->remaining)) { /* if we're here, all write(s) have completed, so clean up */ - int s = r1_bio->sectors; - if (test_bit(R1BIO_MadeGood, &r1_bio->state) || - test_bit(R1BIO_WriteError, &r1_bio->state)) - reschedule_retry(r1_bio); - else { - put_buf(r1_bio); - md_done_sync(mddev, s, 1); - } + md_done_sync(mddev, r1_bio->sectors, 1); + put_buf(r1_bio); } } diff --git a/trunk/drivers/media/video/cx25821/cx25821-core.c b/trunk/drivers/media/video/cx25821/cx25821-core.c index f11f6f07e915..83c1aa6b2e6c 100644 --- a/trunk/drivers/media/video/cx25821/cx25821-core.c +++ b/trunk/drivers/media/video/cx25821/cx25821-core.c @@ -904,6 +904,9 @@ static int cx25821_dev_setup(struct cx25821_dev *dev) list_add_tail(&dev->devlist, &cx25821_devlist); mutex_unlock(&cx25821_devlist_mutex); + strcpy(cx25821_boards[UNKNOWN_BOARD].name, "unknown"); + strcpy(cx25821_boards[CX25821_BOARD].name, "cx25821"); + if (dev->pci->device != 0x8210) { pr_info("%s(): Exiting. Incorrect Hardware device = 0x%02x\n", __func__, dev->pci->device); diff --git a/trunk/drivers/media/video/cx25821/cx25821.h b/trunk/drivers/media/video/cx25821/cx25821.h index 029f2934a6d8..b9aa801b00a7 100644 --- a/trunk/drivers/media/video/cx25821/cx25821.h +++ b/trunk/drivers/media/video/cx25821/cx25821.h @@ -187,7 +187,7 @@ enum port { }; struct cx25821_board { - const char *name; + char *name; enum port porta; enum port portb; enum port portc; diff --git a/trunk/drivers/media/video/v4l2-dev.c b/trunk/drivers/media/video/v4l2-dev.c index 0cbada18f6f5..83dbb2ddff10 100644 --- a/trunk/drivers/media/video/v4l2-dev.c +++ b/trunk/drivers/media/video/v4l2-dev.c @@ -681,7 +681,6 @@ static void determine_valid_ioctls(struct video_device *vdev) SET_VALID_IOCTL(ops, VIDIOC_G_DV_TIMINGS, vidioc_g_dv_timings); SET_VALID_IOCTL(ops, VIDIOC_ENUM_DV_TIMINGS, vidioc_enum_dv_timings); SET_VALID_IOCTL(ops, VIDIOC_QUERY_DV_TIMINGS, vidioc_query_dv_timings); - SET_VALID_IOCTL(ops, VIDIOC_DV_TIMINGS_CAP, vidioc_dv_timings_cap); /* yes, really vidioc_subscribe_event */ SET_VALID_IOCTL(ops, VIDIOC_DQEVENT, vidioc_subscribe_event); SET_VALID_IOCTL(ops, VIDIOC_SUBSCRIBE_EVENT, vidioc_subscribe_event); diff --git a/trunk/drivers/media/video/zoran/zoran.h b/trunk/drivers/media/video/zoran/zoran.h index ca2754a3cd63..d7166afc255e 100644 --- a/trunk/drivers/media/video/zoran/zoran.h +++ b/trunk/drivers/media/video/zoran/zoran.h @@ -172,10 +172,8 @@ struct zoran_jpg_settings { struct v4l2_jpegcompression jpg_comp; /* JPEG-specific capture settings */ }; -struct zoran_fh; - struct zoran_mapping { - struct zoran_fh *fh; + struct file *file; int count; }; diff --git a/trunk/drivers/media/video/zoran/zoran_driver.c b/trunk/drivers/media/video/zoran/zoran_driver.c index c6ccdeb6d8d6..c57310931810 100644 --- a/trunk/drivers/media/video/zoran/zoran_driver.c +++ b/trunk/drivers/media/video/zoran/zoran_driver.c @@ -2811,7 +2811,7 @@ static void zoran_vm_close (struct vm_area_struct *vma) { struct zoran_mapping *map = vma->vm_private_data; - struct zoran_fh *fh = map->fh; + struct zoran_fh *fh = map->file->private_data; struct zoran *zr = fh->zr; int i; @@ -2938,7 +2938,7 @@ zoran_mmap (struct file *file, res = -ENOMEM; goto mmap_unlock_and_return; } - map->fh = fh; + map->file = file; map->count = 1; vma->vm_ops = &zoran_vm_ops; diff --git a/trunk/drivers/mfd/tps65217.c b/trunk/drivers/mfd/tps65217.c index 61c097a98f5d..db194e433c08 100644 --- a/trunk/drivers/mfd/tps65217.c +++ b/trunk/drivers/mfd/tps65217.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include @@ -133,61 +132,6 @@ int tps65217_clear_bits(struct tps65217 *tps, unsigned int reg, } EXPORT_SYMBOL_GPL(tps65217_clear_bits); -#ifdef CONFIG_OF -static struct of_regulator_match reg_matches[] = { - { .name = "dcdc1", .driver_data = (void *)TPS65217_DCDC_1 }, - { .name = "dcdc2", .driver_data = (void *)TPS65217_DCDC_2 }, - { .name = "dcdc3", .driver_data = (void *)TPS65217_DCDC_3 }, - { .name = "ldo1", .driver_data = (void *)TPS65217_LDO_1 }, - { .name = "ldo2", .driver_data = (void *)TPS65217_LDO_2 }, - { .name = "ldo3", .driver_data = (void *)TPS65217_LDO_3 }, - { .name = "ldo4", .driver_data = (void *)TPS65217_LDO_4 }, -}; - -static struct tps65217_board *tps65217_parse_dt(struct i2c_client *client) -{ - struct device_node *node = client->dev.of_node; - struct tps65217_board *pdata; - struct device_node *regs; - int count = ARRAY_SIZE(reg_matches); - int ret, i; - - regs = of_find_node_by_name(node, "regulators"); - if (!regs) - return NULL; - - ret = of_regulator_match(&client->dev, regs, reg_matches, count); - of_node_put(regs); - if ((ret < 0) || (ret > count)) - return NULL; - - count = ret; - pdata = devm_kzalloc(&client->dev, count * sizeof(*pdata), GFP_KERNEL); - if (!pdata) - return NULL; - - for (i = 0; i < count; i++) { - if (!reg_matches[i].init_data || !reg_matches[i].of_node) - continue; - - pdata->tps65217_init_data[i] = reg_matches[i].init_data; - pdata->of_node[i] = reg_matches[i].of_node; - } - - return pdata; -} - -static struct of_device_id tps65217_of_match[] = { - { .compatible = "ti,tps65217", }, - { }, -}; -#else -static struct tps65217_board *tps65217_parse_dt(struct i2c_client *client) -{ - return NULL; -} -#endif - static struct regmap_config tps65217_regmap_config = { .reg_bits = 8, .val_bits = 8, @@ -197,14 +141,10 @@ static int __devinit tps65217_probe(struct i2c_client *client, const struct i2c_device_id *ids) { struct tps65217 *tps; - struct regulator_init_data *reg_data; struct tps65217_board *pdata = client->dev.platform_data; int i, ret; unsigned int version; - if (!pdata && client->dev.of_node) - pdata = tps65217_parse_dt(client); - tps = devm_kzalloc(&client->dev, sizeof(*tps), GFP_KERNEL); if (!tps) return -ENOMEM; @@ -242,9 +182,8 @@ static int __devinit tps65217_probe(struct i2c_client *client, } pdev->dev.parent = tps->dev; - pdev->dev.of_node = pdata->of_node[i]; - reg_data = pdata->tps65217_init_data[i]; - platform_device_add_data(pdev, reg_data, sizeof(*reg_data)); + platform_device_add_data(pdev, &pdata->tps65217_init_data[i], + sizeof(pdata->tps65217_init_data[i])); tps->regulator_pdev[i] = pdev; platform_device_add(pdev); @@ -273,8 +212,6 @@ MODULE_DEVICE_TABLE(i2c, tps65217_id_table); static struct i2c_driver tps65217_driver = { .driver = { .name = "tps65217", - .owner = THIS_MODULE, - .of_match_table = of_match_ptr(tps65217_of_match), }, .id_table = tps65217_id_table, .probe = tps65217_probe, diff --git a/trunk/drivers/mmc/card/block.c b/trunk/drivers/mmc/card/block.c index f1c84decb192..276d21ce6bc1 100644 --- a/trunk/drivers/mmc/card/block.c +++ b/trunk/drivers/mmc/card/block.c @@ -850,7 +850,9 @@ static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) goto retry; if (!err) mmc_blk_reset_success(md, type); - blk_end_request(req, err, blk_rq_bytes(req)); + spin_lock_irq(&md->lock); + __blk_end_request(req, err, blk_rq_bytes(req)); + spin_unlock_irq(&md->lock); return err ? 0 : 1; } @@ -932,7 +934,9 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq, if (!err) mmc_blk_reset_success(md, type); out: - blk_end_request(req, err, blk_rq_bytes(req)); + spin_lock_irq(&md->lock); + __blk_end_request(req, err, blk_rq_bytes(req)); + spin_unlock_irq(&md->lock); return err ? 0 : 1; } @@ -947,7 +951,9 @@ static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req) if (ret) ret = -EIO; - blk_end_request_all(req, ret); + spin_lock_irq(&md->lock); + __blk_end_request_all(req, ret); + spin_unlock_irq(&md->lock); return ret ? 0 : 1; } @@ -1246,10 +1252,14 @@ static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card, blocks = mmc_sd_num_wr_blocks(card); if (blocks != (u32)-1) { - ret = blk_end_request(req, 0, blocks << 9); + spin_lock_irq(&md->lock); + ret = __blk_end_request(req, 0, blocks << 9); + spin_unlock_irq(&md->lock); } } else { - ret = blk_end_request(req, 0, brq->data.bytes_xfered); + spin_lock_irq(&md->lock); + ret = __blk_end_request(req, 0, brq->data.bytes_xfered); + spin_unlock_irq(&md->lock); } return ret; } @@ -1301,8 +1311,10 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) * A block was successfully transferred. */ mmc_blk_reset_success(md, type); - ret = blk_end_request(req, 0, + spin_lock_irq(&md->lock); + ret = __blk_end_request(req, 0, brq->data.bytes_xfered); + spin_unlock_irq(&md->lock); /* * If the blk_end_request function returns non-zero even * though all data has been transferred and no errors @@ -1352,8 +1364,10 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) * time, so we only reach here after trying to * read a single sector. */ - ret = blk_end_request(req, -EIO, + spin_lock_irq(&md->lock); + ret = __blk_end_request(req, -EIO, brq->data.blksz); + spin_unlock_irq(&md->lock); if (!ret) goto start_new_req; break; @@ -1374,10 +1388,12 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) return 1; cmd_abort: + spin_lock_irq(&md->lock); if (mmc_card_removed(card)) req->cmd_flags |= REQ_QUIET; while (ret) - ret = blk_end_request(req, -EIO, blk_rq_cur_bytes(req)); + ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req)); + spin_unlock_irq(&md->lock); start_new_req: if (rqc) { @@ -1401,7 +1417,9 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) ret = mmc_blk_part_switch(card, md); if (ret) { if (req) { - blk_end_request_all(req, -EIO); + spin_lock_irq(&md->lock); + __blk_end_request_all(req, -EIO); + spin_unlock_irq(&md->lock); } ret = 0; goto out; diff --git a/trunk/drivers/mmc/core/Makefile b/trunk/drivers/mmc/core/Makefile index 38ed210ce2f3..dca4428380f1 100644 --- a/trunk/drivers/mmc/core/Makefile +++ b/trunk/drivers/mmc/core/Makefile @@ -7,6 +7,6 @@ mmc_core-y := core.o bus.o host.o \ mmc.o mmc_ops.o sd.o sd_ops.o \ sdio.o sdio_ops.o sdio_bus.o \ sdio_cis.o sdio_io.o sdio_irq.o \ - quirks.o slot-gpio.o + quirks.o cd-gpio.o mmc_core-$(CONFIG_DEBUG_FS) += debugfs.o diff --git a/trunk/drivers/mmc/core/cd-gpio.c b/trunk/drivers/mmc/core/cd-gpio.c new file mode 100644 index 000000000000..8f5dc08d6598 --- /dev/null +++ b/trunk/drivers/mmc/core/cd-gpio.c @@ -0,0 +1,83 @@ +/* + * Generic GPIO card-detect helper + * + * Copyright (C) 2011, Guennadi Liakhovetski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +struct mmc_cd_gpio { + unsigned int gpio; + char label[0]; +}; + +static irqreturn_t mmc_cd_gpio_irqt(int irq, void *dev_id) +{ + /* Schedule a card detection after a debounce timeout */ + mmc_detect_change(dev_id, msecs_to_jiffies(100)); + return IRQ_HANDLED; +} + +int mmc_cd_gpio_request(struct mmc_host *host, unsigned int gpio) +{ + size_t len = strlen(dev_name(host->parent)) + 4; + struct mmc_cd_gpio *cd; + int irq = gpio_to_irq(gpio); + int ret; + + if (irq < 0) + return irq; + + cd = kmalloc(sizeof(*cd) + len, GFP_KERNEL); + if (!cd) + return -ENOMEM; + + snprintf(cd->label, len, "%s cd", dev_name(host->parent)); + + ret = gpio_request_one(gpio, GPIOF_DIR_IN, cd->label); + if (ret < 0) + goto egpioreq; + + ret = request_threaded_irq(irq, NULL, mmc_cd_gpio_irqt, + IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | + IRQF_ONESHOT, cd->label, host); + if (ret < 0) + goto eirqreq; + + cd->gpio = gpio; + host->hotplug.irq = irq; + host->hotplug.handler_priv = cd; + + return 0; + +eirqreq: + gpio_free(gpio); +egpioreq: + kfree(cd); + return ret; +} +EXPORT_SYMBOL(mmc_cd_gpio_request); + +void mmc_cd_gpio_free(struct mmc_host *host) +{ + struct mmc_cd_gpio *cd = host->hotplug.handler_priv; + + if (!cd) + return; + + free_irq(host->hotplug.irq, host); + gpio_free(cd->gpio); + kfree(cd); +} +EXPORT_SYMBOL(mmc_cd_gpio_free); diff --git a/trunk/drivers/mmc/core/core.c b/trunk/drivers/mmc/core/core.c index 8ac5246e2ab2..0b6141d29dbd 100644 --- a/trunk/drivers/mmc/core/core.c +++ b/trunk/drivers/mmc/core/core.c @@ -404,7 +404,6 @@ int mmc_interrupt_hpi(struct mmc_card *card) { int err; u32 status; - unsigned long prg_wait; BUG_ON(!card); @@ -420,38 +419,30 @@ int mmc_interrupt_hpi(struct mmc_card *card) goto out; } - switch (R1_CURRENT_STATE(status)) { - case R1_STATE_IDLE: - case R1_STATE_READY: - case R1_STATE_STBY: - /* - * In idle states, HPI is not needed and the caller - * can issue the next intended command immediately - */ - goto out; - case R1_STATE_PRG: - break; - default: - /* In all other states, it's illegal to issue HPI */ - pr_debug("%s: HPI cannot be sent. Card state=%d\n", - mmc_hostname(card->host), R1_CURRENT_STATE(status)); - err = -EINVAL; - goto out; - } - - err = mmc_send_hpi_cmd(card, &status); - if (err) - goto out; - - prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time); - do { - err = mmc_send_status(card, &status); + /* + * If the card status is in PRG-state, we can send the HPI command. + */ + if (R1_CURRENT_STATE(status) == R1_STATE_PRG) { + do { + /* + * We don't know when the HPI command will finish + * processing, so we need to resend HPI until out + * of prg-state, and keep checking the card status + * with SEND_STATUS. If a timeout error occurs when + * sending the HPI command, we are already out of + * prg-state. + */ + err = mmc_send_hpi_cmd(card, &status); + if (err) + pr_debug("%s: abort HPI (%d error)\n", + mmc_hostname(card->host), err); - if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN) - break; - if (time_after(jiffies, prg_wait)) - err = -ETIMEDOUT; - } while (!err); + err = mmc_send_status(card, &status); + if (err) + break; + } while (R1_CURRENT_STATE(status) == R1_STATE_PRG); + } else + pr_debug("%s: Left prg-state\n", mmc_hostname(card->host)); out: mmc_release_host(card->host); @@ -950,7 +941,7 @@ int mmc_regulator_get_ocrmask(struct regulator *supply) return result; } -EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask); +EXPORT_SYMBOL(mmc_regulator_get_ocrmask); /** * mmc_regulator_set_ocr - set regulator to match host->ios voltage @@ -1020,30 +1011,7 @@ int mmc_regulator_set_ocr(struct mmc_host *mmc, "could not set regulator OCR (%d)\n", result); return result; } -EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr); - -int mmc_regulator_get_supply(struct mmc_host *mmc) -{ - struct device *dev = mmc_dev(mmc); - struct regulator *supply; - int ret; - - supply = devm_regulator_get(dev, "vmmc"); - mmc->supply.vmmc = supply; - mmc->supply.vqmmc = devm_regulator_get(dev, "vqmmc"); - - if (IS_ERR(supply)) - return PTR_ERR(supply); - - ret = mmc_regulator_get_ocrmask(supply); - if (ret > 0) - mmc->ocr_avail = ret; - else - dev_warn(mmc_dev(mmc), "Failed getting OCR mask: %d\n", ret); - - return 0; -} -EXPORT_SYMBOL_GPL(mmc_regulator_get_supply); +EXPORT_SYMBOL(mmc_regulator_set_ocr); #endif /* CONFIG_REGULATOR */ @@ -1212,9 +1180,6 @@ static void mmc_power_up(struct mmc_host *host) host->ios.timing = MMC_TIMING_LEGACY; mmc_set_ios(host); - /* Set signal voltage to 3.3V */ - mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, false); - /* * This delay should be sufficient to allow the power supply * to reach the minimum voltage. @@ -1966,6 +1931,9 @@ static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq) */ mmc_hw_reset_for_init(host); + /* Initialization should be done at 3.3 V I/O voltage. */ + mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, 0); + /* * sdio_reset sends CMD52 to reset card. Since we do not know * if the card is being re-initialized, just send it. CMD52 @@ -2107,7 +2075,6 @@ void mmc_rescan(struct work_struct *work) void mmc_start_host(struct mmc_host *host) { host->f_init = max(freqs[0], host->f_min); - host->rescan_disable = 0; mmc_power_up(host); mmc_detect_change(host, 0); } @@ -2121,7 +2088,6 @@ void mmc_stop_host(struct mmc_host *host) spin_unlock_irqrestore(&host->lock, flags); #endif - host->rescan_disable = 1; cancel_delayed_work_sync(&host->detect); mmc_flush_scheduled_work(); diff --git a/trunk/drivers/mmc/core/host.c b/trunk/drivers/mmc/core/host.c index 597f189b4427..91c84c7a1829 100644 --- a/trunk/drivers/mmc/core/host.c +++ b/trunk/drivers/mmc/core/host.c @@ -32,7 +32,6 @@ static void mmc_host_classdev_release(struct device *dev) { struct mmc_host *host = cls_dev_to_mmc_host(dev); - mutex_destroy(&host->slot.lock); kfree(host); } @@ -313,8 +312,6 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev) if (!host) return NULL; - /* scanning will be enabled when we're ready */ - host->rescan_disable = 1; spin_lock(&mmc_host_lock); err = idr_get_new(&mmc_host_idr, host, &host->index); spin_unlock(&mmc_host_lock); @@ -330,9 +327,6 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev) mmc_host_clk_init(host); - mutex_init(&host->slot.lock); - host->slot.cd_irq = -EINVAL; - spin_lock_init(&host->lock); init_waitqueue_head(&host->wq); INIT_DELAYED_WORK(&host->detect, mmc_rescan); diff --git a/trunk/drivers/mmc/core/mmc.c b/trunk/drivers/mmc/core/mmc.c index 396b25891bb9..4f4489aa6bae 100644 --- a/trunk/drivers/mmc/core/mmc.c +++ b/trunk/drivers/mmc/core/mmc.c @@ -818,6 +818,9 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, if (!mmc_host_is_spi(host)) mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN); + /* Initialization should be done at 3.3 V I/O voltage. */ + mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, 0); + /* * Since we're changing the OCR value, we seem to * need to tell some cards to go back to the idle diff --git a/trunk/drivers/mmc/core/mmc_ops.c b/trunk/drivers/mmc/core/mmc_ops.c index 0ed2cc5f35b6..69370f494e05 100644 --- a/trunk/drivers/mmc/core/mmc_ops.c +++ b/trunk/drivers/mmc/core/mmc_ops.c @@ -569,6 +569,7 @@ int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status) cmd.opcode = opcode; cmd.arg = card->rca << 16 | 1; + cmd.cmd_timeout_ms = card->ext_csd.out_of_int_time; err = mmc_wait_for_cmd(card->host, &cmd, 0); if (err) { diff --git a/trunk/drivers/mmc/core/sd.c b/trunk/drivers/mmc/core/sd.c index 74972c241dff..b2b43f624b9e 100644 --- a/trunk/drivers/mmc/core/sd.c +++ b/trunk/drivers/mmc/core/sd.c @@ -244,7 +244,7 @@ static int mmc_read_ssr(struct mmc_card *card) * bitfield positions accordingly. */ au = UNSTUFF_BITS(ssr, 428 - 384, 4); - if (au > 0 && au <= 9) { + if (au > 0 || au <= 9) { card->ssr.au = 1 << (au + 4); es = UNSTUFF_BITS(ssr, 408 - 384, 16); et = UNSTUFF_BITS(ssr, 402 - 384, 6); @@ -290,12 +290,8 @@ static int mmc_read_switch(struct mmc_card *card) return -ENOMEM; } - /* - * Find out the card's support bits with a mode 0 operation. - * The argument does not matter, as the support bits do not - * change with the arguments. - */ - err = mmc_sd_switch(card, 0, 0, 0, status); + /* Find out the supported Bus Speed Modes. */ + err = mmc_sd_switch(card, 0, 0, 1, status); if (err) { /* * If the host or the card can't do the switch, @@ -316,8 +312,46 @@ static int mmc_read_switch(struct mmc_card *card) if (card->scr.sda_spec3) { card->sw_caps.sd3_bus_mode = status[13]; - /* Driver Strengths supported by the card */ + + /* Find out Driver Strengths supported by the card */ + err = mmc_sd_switch(card, 0, 2, 1, status); + if (err) { + /* + * If the host or the card can't do the switch, + * fail more gracefully. + */ + if (err != -EINVAL && err != -ENOSYS && err != -EFAULT) + goto out; + + pr_warning("%s: problem reading " + "Driver Strength.\n", + mmc_hostname(card->host)); + err = 0; + + goto out; + } + card->sw_caps.sd3_drv_type = status[9]; + + /* Find out Current Limits supported by the card */ + err = mmc_sd_switch(card, 0, 3, 1, status); + if (err) { + /* + * If the host or the card can't do the switch, + * fail more gracefully. + */ + if (err != -EINVAL && err != -ENOSYS && err != -EFAULT) + goto out; + + pr_warning("%s: problem reading " + "Current Limit.\n", + mmc_hostname(card->host)); + err = 0; + + goto out; + } + + card->sw_caps.sd3_curr_limit = status[7]; } out: @@ -517,80 +551,60 @@ static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status) return 0; } -/* Get host's max current setting at its current voltage */ -static u32 sd_get_host_max_current(struct mmc_host *host) -{ - u32 voltage, max_current; - - voltage = 1 << host->ios.vdd; - switch (voltage) { - case MMC_VDD_165_195: - max_current = host->max_current_180; - break; - case MMC_VDD_29_30: - case MMC_VDD_30_31: - max_current = host->max_current_300; - break; - case MMC_VDD_32_33: - case MMC_VDD_33_34: - max_current = host->max_current_330; - break; - default: - max_current = 0; - } - - return max_current; -} - static int sd_set_current_limit(struct mmc_card *card, u8 *status) { - int current_limit = SD_SET_CURRENT_NO_CHANGE; + int current_limit = 0; int err; - u32 max_current; /* * Current limit switch is only defined for SDR50, SDR104, and DDR50 - * bus speed modes. For other bus speed modes, we do not change the - * current limit. + * bus speed modes. For other bus speed modes, we set the default + * current limit of 200mA. */ - if ((card->sd_bus_speed != UHS_SDR50_BUS_SPEED) && - (card->sd_bus_speed != UHS_SDR104_BUS_SPEED) && - (card->sd_bus_speed != UHS_DDR50_BUS_SPEED)) - return 0; - - /* - * Host has different current capabilities when operating at - * different voltages, so find out its max current first. - */ - max_current = sd_get_host_max_current(card->host); - - /* - * We only check host's capability here, if we set a limit that is - * higher than the card's maximum current, the card will be using its - * maximum current, e.g. if the card's maximum current is 300ma, and - * when we set current limit to 200ma, the card will draw 200ma, and - * when we set current limit to 400/600/800ma, the card will draw its - * maximum 300ma from the host. - */ - if (max_current >= 800) - current_limit = SD_SET_CURRENT_LIMIT_800; - else if (max_current >= 600) - current_limit = SD_SET_CURRENT_LIMIT_600; - else if (max_current >= 400) - current_limit = SD_SET_CURRENT_LIMIT_400; - else if (max_current >= 200) + if ((card->sd_bus_speed == UHS_SDR50_BUS_SPEED) || + (card->sd_bus_speed == UHS_SDR104_BUS_SPEED) || + (card->sd_bus_speed == UHS_DDR50_BUS_SPEED)) { + if (card->host->caps & MMC_CAP_MAX_CURRENT_800) { + if (card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_800) + current_limit = SD_SET_CURRENT_LIMIT_800; + else if (card->sw_caps.sd3_curr_limit & + SD_MAX_CURRENT_600) + current_limit = SD_SET_CURRENT_LIMIT_600; + else if (card->sw_caps.sd3_curr_limit & + SD_MAX_CURRENT_400) + current_limit = SD_SET_CURRENT_LIMIT_400; + else if (card->sw_caps.sd3_curr_limit & + SD_MAX_CURRENT_200) + current_limit = SD_SET_CURRENT_LIMIT_200; + } else if (card->host->caps & MMC_CAP_MAX_CURRENT_600) { + if (card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_600) + current_limit = SD_SET_CURRENT_LIMIT_600; + else if (card->sw_caps.sd3_curr_limit & + SD_MAX_CURRENT_400) + current_limit = SD_SET_CURRENT_LIMIT_400; + else if (card->sw_caps.sd3_curr_limit & + SD_MAX_CURRENT_200) + current_limit = SD_SET_CURRENT_LIMIT_200; + } else if (card->host->caps & MMC_CAP_MAX_CURRENT_400) { + if (card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_400) + current_limit = SD_SET_CURRENT_LIMIT_400; + else if (card->sw_caps.sd3_curr_limit & + SD_MAX_CURRENT_200) + current_limit = SD_SET_CURRENT_LIMIT_200; + } else if (card->host->caps & MMC_CAP_MAX_CURRENT_200) { + if (card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_200) + current_limit = SD_SET_CURRENT_LIMIT_200; + } + } else current_limit = SD_SET_CURRENT_LIMIT_200; - if (current_limit != SD_SET_CURRENT_NO_CHANGE) { - err = mmc_sd_switch(card, 1, 3, current_limit, status); - if (err) - return err; - - if (((status[15] >> 4) & 0x0F) != current_limit) - pr_warning("%s: Problem setting current limit!\n", - mmc_hostname(card->host)); + err = mmc_sd_switch(card, 1, 3, current_limit, status); + if (err) + return err; - } + if (((status[15] >> 4) & 0x0F) != current_limit) + pr_warning("%s: Problem setting current limit!\n", + mmc_hostname(card->host)); return 0; } @@ -712,7 +726,6 @@ struct device_type sd_type = { int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr) { int err; - u32 max_current; /* * Since we're changing the OCR value, we seem to @@ -740,12 +753,9 @@ int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr) MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50)) ocr |= SD_OCR_S18R; - /* - * If the host can supply more than 150mA at current voltage, - * XPC should be set to 1. - */ - max_current = sd_get_host_max_current(host); - if (max_current > 150) + /* If the host can supply more than 150mA, XPC should be set to 1. */ + if (host->caps & (MMC_CAP_SET_XPC_330 | MMC_CAP_SET_XPC_300 | + MMC_CAP_SET_XPC_180)) ocr |= SD_OCR_XPC; try_again: @@ -901,6 +911,9 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr, BUG_ON(!host); WARN_ON(!host->claimed); + /* The initialization should be done at 3.3 V I/O voltage. */ + mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, 0); + err = mmc_sd_get_cid(host, ocr, cid, &rocr); if (err) return err; diff --git a/trunk/drivers/mmc/core/sdio.c b/trunk/drivers/mmc/core/sdio.c index d4619e2ec030..41c5fd8848f4 100644 --- a/trunk/drivers/mmc/core/sdio.c +++ b/trunk/drivers/mmc/core/sdio.c @@ -591,6 +591,9 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr, * Inform the card of the voltage */ if (!powered_resume) { + /* The initialization should be done at 3.3 V I/O voltage. */ + mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, 0); + err = mmc_send_io_op_cond(host, host->ocr, &ocr); if (err) goto err; @@ -1003,6 +1006,10 @@ static int mmc_sdio_power_restore(struct mmc_host *host) * restore the correct voltage setting of the card. */ + /* The initialization should be done at 3.3 V I/O voltage. */ + if (!mmc_card_keep_power(host)) + mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, 0); + sdio_reset(host); mmc_go_idle(host); mmc_send_if_cond(host, host->ocr_avail); diff --git a/trunk/drivers/mmc/core/sdio_cis.c b/trunk/drivers/mmc/core/sdio_cis.c index 8e94e555b788..f1c7ed8f4d85 100644 --- a/trunk/drivers/mmc/core/sdio_cis.c +++ b/trunk/drivers/mmc/core/sdio_cis.c @@ -313,7 +313,7 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func) if (ret == -ENOENT) { /* warn about unknown tuples */ - pr_warn_ratelimited("%s: queuing unknown" + pr_warning("%s: queuing unknown" " CIS tuple 0x%02x (%u bytes)\n", mmc_hostname(card->host), tpl_code, tpl_link); diff --git a/trunk/drivers/mmc/core/slot-gpio.c b/trunk/drivers/mmc/core/slot-gpio.c deleted file mode 100644 index 058242916cef..000000000000 --- a/trunk/drivers/mmc/core/slot-gpio.c +++ /dev/null @@ -1,188 +0,0 @@ -/* - * Generic GPIO card-detect helper - * - * Copyright (C) 2011, Guennadi Liakhovetski - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -struct mmc_gpio { - int ro_gpio; - int cd_gpio; - char *ro_label; - char cd_label[0]; -}; - -static irqreturn_t mmc_gpio_cd_irqt(int irq, void *dev_id) -{ - /* Schedule a card detection after a debounce timeout */ - mmc_detect_change(dev_id, msecs_to_jiffies(100)); - return IRQ_HANDLED; -} - -static int mmc_gpio_alloc(struct mmc_host *host) -{ - size_t len = strlen(dev_name(host->parent)) + 4; - struct mmc_gpio *ctx; - - mutex_lock(&host->slot.lock); - - ctx = host->slot.handler_priv; - if (!ctx) { - /* - * devm_kzalloc() can be called after device_initialize(), even - * before device_add(), i.e., between mmc_alloc_host() and - * mmc_add_host() - */ - ctx = devm_kzalloc(&host->class_dev, sizeof(*ctx) + 2 * len, - GFP_KERNEL); - if (ctx) { - ctx->ro_label = ctx->cd_label + len; - snprintf(ctx->cd_label, len, "%s cd", dev_name(host->parent)); - snprintf(ctx->ro_label, len, "%s ro", dev_name(host->parent)); - ctx->cd_gpio = -EINVAL; - ctx->ro_gpio = -EINVAL; - host->slot.handler_priv = ctx; - } - } - - mutex_unlock(&host->slot.lock); - - return ctx ? 0 : -ENOMEM; -} - -int mmc_gpio_get_ro(struct mmc_host *host) -{ - struct mmc_gpio *ctx = host->slot.handler_priv; - - if (!ctx || !gpio_is_valid(ctx->ro_gpio)) - return -ENOSYS; - - return !gpio_get_value_cansleep(ctx->ro_gpio) ^ - !!(host->caps2 & MMC_CAP2_RO_ACTIVE_HIGH); -} -EXPORT_SYMBOL(mmc_gpio_get_ro); - -int mmc_gpio_get_cd(struct mmc_host *host) -{ - struct mmc_gpio *ctx = host->slot.handler_priv; - - if (!ctx || !gpio_is_valid(ctx->cd_gpio)) - return -ENOSYS; - - return !gpio_get_value_cansleep(ctx->cd_gpio) ^ - !!(host->caps2 & MMC_CAP2_CD_ACTIVE_HIGH); -} -EXPORT_SYMBOL(mmc_gpio_get_cd); - -int mmc_gpio_request_ro(struct mmc_host *host, unsigned int gpio) -{ - struct mmc_gpio *ctx; - int ret; - - if (!gpio_is_valid(gpio)) - return -EINVAL; - - ret = mmc_gpio_alloc(host); - if (ret < 0) - return ret; - - ctx = host->slot.handler_priv; - - return gpio_request_one(gpio, GPIOF_DIR_IN, ctx->ro_label); -} -EXPORT_SYMBOL(mmc_gpio_request_ro); - -int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio) -{ - struct mmc_gpio *ctx; - int irq = gpio_to_irq(gpio); - int ret; - - ret = mmc_gpio_alloc(host); - if (ret < 0) - return ret; - - ctx = host->slot.handler_priv; - - ret = gpio_request_one(gpio, GPIOF_DIR_IN, ctx->cd_label); - if (ret < 0) - /* - * don't bother freeing memory. It might still get used by other - * slot functions, in any case it will be freed, when the device - * is destroyed. - */ - return ret; - - /* - * Even if gpio_to_irq() returns a valid IRQ number, the platform might - * still prefer to poll, e.g., because that IRQ number is already used - * by another unit and cannot be shared. - */ - if (irq >= 0 && host->caps & MMC_CAP_NEEDS_POLL) - irq = -EINVAL; - - if (irq >= 0) { - ret = request_threaded_irq(irq, NULL, mmc_gpio_cd_irqt, - IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, - ctx->cd_label, host); - if (ret < 0) - irq = ret; - } - - host->slot.cd_irq = irq; - - if (irq < 0) - host->caps |= MMC_CAP_NEEDS_POLL; - - ctx->cd_gpio = gpio; - - return 0; -} -EXPORT_SYMBOL(mmc_gpio_request_cd); - -void mmc_gpio_free_ro(struct mmc_host *host) -{ - struct mmc_gpio *ctx = host->slot.handler_priv; - int gpio; - - if (!ctx || !gpio_is_valid(ctx->ro_gpio)) - return; - - gpio = ctx->ro_gpio; - ctx->ro_gpio = -EINVAL; - - gpio_free(gpio); -} -EXPORT_SYMBOL(mmc_gpio_free_ro); - -void mmc_gpio_free_cd(struct mmc_host *host) -{ - struct mmc_gpio *ctx = host->slot.handler_priv; - int gpio; - - if (!ctx || !gpio_is_valid(ctx->cd_gpio)) - return; - - if (host->slot.cd_irq >= 0) { - free_irq(host->slot.cd_irq, host); - host->slot.cd_irq = -EINVAL; - } - - gpio = ctx->cd_gpio; - ctx->cd_gpio = -EINVAL; - - gpio_free(gpio); -} -EXPORT_SYMBOL(mmc_gpio_free_cd); diff --git a/trunk/drivers/mmc/host/atmel-mci.c b/trunk/drivers/mmc/host/atmel-mci.c index 322412cec4ee..f2c115e06438 100644 --- a/trunk/drivers/mmc/host/atmel-mci.c +++ b/trunk/drivers/mmc/host/atmel-mci.c @@ -391,17 +391,11 @@ static int atmci_regs_show(struct seq_file *s, void *v) clk_disable(host->mck); spin_unlock_bh(&host->lock); - seq_printf(s, "MR:\t0x%08x%s%s ", + seq_printf(s, "MR:\t0x%08x%s%s CLKDIV=%u\n", buf[ATMCI_MR / 4], buf[ATMCI_MR / 4] & ATMCI_MR_RDPROOF ? " RDPROOF" : "", - buf[ATMCI_MR / 4] & ATMCI_MR_WRPROOF ? " WRPROOF" : ""); - if (host->caps.has_odd_clk_div) - seq_printf(s, "{CLKDIV,CLKODD}=%u\n", - ((buf[ATMCI_MR / 4] & 0xff) << 1) - | ((buf[ATMCI_MR / 4] >> 16) & 1)); - else - seq_printf(s, "CLKDIV=%u\n", - (buf[ATMCI_MR / 4] & 0xff)); + buf[ATMCI_MR / 4] & ATMCI_MR_WRPROOF ? " WRPROOF" : "", + buf[ATMCI_MR / 4] & 0xff); seq_printf(s, "DTOR:\t0x%08x\n", buf[ATMCI_DTOR / 4]); seq_printf(s, "SDCR:\t0x%08x\n", buf[ATMCI_SDCR / 4]); seq_printf(s, "ARGR:\t0x%08x\n", buf[ATMCI_ARGR / 4]); @@ -1691,6 +1685,7 @@ static void atmci_tasklet_func(unsigned long priv) dev_dbg(&host->pdev->dev, "FSM: cmd ready\n"); host->cmd = NULL; + host->data = NULL; data->bytes_xfered = data->blocks * data->blksz; data->error = 0; atmci_command_complete(host, mrq->stop); @@ -1704,7 +1699,6 @@ static void atmci_tasklet_func(unsigned long priv) atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); state = STATE_WAITING_NOTBUSY; } - host->data = NULL; break; case STATE_END_REQUEST: diff --git a/trunk/drivers/mmc/host/dw_mmc.c b/trunk/drivers/mmc/host/dw_mmc.c index 72dc3cde646d..1ca5e72ceb65 100644 --- a/trunk/drivers/mmc/host/dw_mmc.c +++ b/trunk/drivers/mmc/host/dw_mmc.c @@ -405,23 +405,11 @@ static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len) static int dw_mci_idmac_init(struct dw_mci *host) { struct idmac_desc *p; - int i, dma_support; + int i; /* Number of descriptors in the ring buffer */ host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc); - /* Check if Hardware Configuration Register has support for DMA */ - dma_support = (mci_readl(host, HCON) >> 16) & 0x3; - - if (!dma_support || dma_support > 2) { - dev_err(&host->dev, - "Host Controller does not support IDMA Tx.\n"); - host->dma_ops = NULL; - return -ENODEV; - } - - dev_info(&host->dev, "Using internal DMA controller.\n"); - /* Forward link the descriptor list */ for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++) p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1)); @@ -1888,6 +1876,7 @@ static void dw_mci_init_dma(struct dw_mci *host) /* Determine which DMA interface to use */ #ifdef CONFIG_MMC_DW_IDMAC host->dma_ops = &dw_mci_idmac_ops; + dev_info(&host->dev, "Using internal DMA controller.\n"); #endif if (!host->dma_ops) @@ -2186,7 +2175,7 @@ int dw_mci_resume(struct dw_mci *host) return ret; } - if (host->use_dma && host->dma_ops->init) + if (host->dma_ops->init) host->dma_ops->init(host); /* Restore the old value at FIFOTH register */ diff --git a/trunk/drivers/mmc/host/mxs-mmc.c b/trunk/drivers/mmc/host/mxs-mmc.c index a51f9309ffbb..277161d279b8 100644 --- a/trunk/drivers/mmc/host/mxs-mmc.c +++ b/trunk/drivers/mmc/host/mxs-mmc.c @@ -164,23 +164,16 @@ struct mxs_mmc_host { spinlock_t lock; int sdio_irq_en; int wp_gpio; - bool wp_inverted; }; static int mxs_mmc_get_ro(struct mmc_host *mmc) { struct mxs_mmc_host *host = mmc_priv(mmc); - int ret; if (!gpio_is_valid(host->wp_gpio)) return -EINVAL; - ret = gpio_get_value(host->wp_gpio); - - if (host->wp_inverted) - ret = !ret; - - return ret; + return gpio_get_value(host->wp_gpio); } static int mxs_mmc_get_cd(struct mmc_host *mmc) @@ -714,8 +707,6 @@ static int mxs_mmc_probe(struct platform_device *pdev) struct pinctrl *pinctrl; int ret = 0, irq_err, irq_dma; dma_cap_mask_t mask; - struct regulator *reg_vmmc; - enum of_gpio_flags flags; iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0); @@ -756,16 +747,6 @@ static int mxs_mmc_probe(struct platform_device *pdev) host->mmc = mmc; host->sdio_irq_en = 0; - reg_vmmc = devm_regulator_get(&pdev->dev, "vmmc"); - if (!IS_ERR(reg_vmmc)) { - ret = regulator_enable(reg_vmmc); - if (ret) { - dev_err(&pdev->dev, - "Failed to enable vmmc regulator: %d\n", ret); - goto out_mmc_free; - } - } - pinctrl = devm_pinctrl_get_select_default(&pdev->dev); if (IS_ERR(pinctrl)) { ret = PTR_ERR(pinctrl); @@ -804,10 +785,7 @@ static int mxs_mmc_probe(struct platform_device *pdev) mmc->caps |= MMC_CAP_4_BIT_DATA; else if (bus_width == 8) mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA; - host->wp_gpio = of_get_named_gpio_flags(np, "wp-gpios", 0, - &flags); - if (flags & OF_GPIO_ACTIVE_LOW) - host->wp_inverted = 1; + host->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0); } else { if (pdata->flags & SLOTF_8_BIT_CAPABLE) mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA; diff --git a/trunk/drivers/mmc/host/omap_hsmmc.c b/trunk/drivers/mmc/host/omap_hsmmc.c index bc28627af66b..389a3eedfc24 100644 --- a/trunk/drivers/mmc/host/omap_hsmmc.c +++ b/trunk/drivers/mmc/host/omap_hsmmc.c @@ -1089,7 +1089,7 @@ static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd) /* Disable the clocks */ pm_runtime_put_sync(host->dev); if (host->dbclk) - clk_disable_unprepare(host->dbclk); + clk_disable(host->dbclk); /* Turn the power off */ ret = mmc_slot(host).set_power(host->dev, host->slot_id, 0, 0); @@ -1100,7 +1100,7 @@ static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd) vdd); pm_runtime_get_sync(host->dev); if (host->dbclk) - clk_prepare_enable(host->dbclk); + clk_enable(host->dbclk); if (ret != 0) goto err; @@ -1899,7 +1899,7 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev) if (IS_ERR(host->dbclk)) { dev_warn(mmc_dev(host->mmc), "Failed to get debounce clk\n"); host->dbclk = NULL; - } else if (clk_prepare_enable(host->dbclk) != 0) { + } else if (clk_enable(host->dbclk) != 0) { dev_warn(mmc_dev(host->mmc), "Failed to enable debounce clk\n"); clk_put(host->dbclk); host->dbclk = NULL; @@ -1931,7 +1931,6 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev) res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx"); if (!res) { dev_err(mmc_dev(host->mmc), "cannot get DMA TX channel\n"); - ret = -ENXIO; goto err_irq; } host->dma_line_tx = res->start; @@ -1939,7 +1938,6 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev) res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx"); if (!res) { dev_err(mmc_dev(host->mmc), "cannot get DMA RX channel\n"); - ret = -ENXIO; goto err_irq; } host->dma_line_rx = res->start; @@ -2025,7 +2023,7 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev) pm_runtime_disable(host->dev); clk_put(host->fclk); if (host->dbclk) { - clk_disable_unprepare(host->dbclk); + clk_disable(host->dbclk); clk_put(host->dbclk); } err1: @@ -2060,7 +2058,7 @@ static int __devexit omap_hsmmc_remove(struct platform_device *pdev) pm_runtime_disable(host->dev); clk_put(host->fclk); if (host->dbclk) { - clk_disable_unprepare(host->dbclk); + clk_disable(host->dbclk); clk_put(host->dbclk); } @@ -2118,7 +2116,7 @@ static int omap_hsmmc_suspend(struct device *dev) } if (host->dbclk) - clk_disable_unprepare(host->dbclk); + clk_disable(host->dbclk); err: pm_runtime_put_sync(host->dev); return ret; @@ -2139,7 +2137,7 @@ static int omap_hsmmc_resume(struct device *dev) pm_runtime_get_sync(host->dev); if (host->dbclk) - clk_prepare_enable(host->dbclk); + clk_enable(host->dbclk); if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER)) omap_hsmmc_conf_bus_power(host); diff --git a/trunk/drivers/mmc/host/s3cmci.c b/trunk/drivers/mmc/host/s3cmci.c index bd5a5cce122c..c3622a69f432 100644 --- a/trunk/drivers/mmc/host/s3cmci.c +++ b/trunk/drivers/mmc/host/s3cmci.c @@ -26,6 +26,7 @@ #include #include +#include #include @@ -1236,9 +1237,12 @@ static void s3cmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) switch (ios->power_mode) { case MMC_POWER_ON: case MMC_POWER_UP: - /* Configure GPE5...GPE10 pins in SD mode */ - s3c_gpio_cfgall_range(S3C2410_GPE(5), 6, S3C_GPIO_SFN(2), - S3C_GPIO_PULL_NONE); + s3c2410_gpio_cfgpin(S3C2410_GPE(5), S3C2410_GPE5_SDCLK); + s3c2410_gpio_cfgpin(S3C2410_GPE(6), S3C2410_GPE6_SDCMD); + s3c2410_gpio_cfgpin(S3C2410_GPE(7), S3C2410_GPE7_SDDAT0); + s3c2410_gpio_cfgpin(S3C2410_GPE(8), S3C2410_GPE8_SDDAT1); + s3c2410_gpio_cfgpin(S3C2410_GPE(9), S3C2410_GPE9_SDDAT2); + s3c2410_gpio_cfgpin(S3C2410_GPE(10), S3C2410_GPE10_SDDAT3); if (host->pdata->set_power) host->pdata->set_power(ios->power_mode, ios->vdd); diff --git a/trunk/drivers/mmc/host/sdhci-dove.c b/trunk/drivers/mmc/host/sdhci-dove.c index a6e53a1ebb08..177f697b5835 100644 --- a/trunk/drivers/mmc/host/sdhci-dove.c +++ b/trunk/drivers/mmc/host/sdhci-dove.c @@ -20,17 +20,11 @@ */ #include -#include -#include #include #include #include "sdhci-pltfm.h" -struct sdhci_dove_priv { - struct clk *clk; -}; - static u16 sdhci_dove_readw(struct sdhci_host *host, int reg) { u16 ret; @@ -72,57 +66,16 @@ static struct sdhci_pltfm_data sdhci_dove_pdata = { .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER | SDHCI_QUIRK_NO_BUSY_IRQ | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | - SDHCI_QUIRK_FORCE_DMA | - SDHCI_QUIRK_NO_HISPD_BIT, + SDHCI_QUIRK_FORCE_DMA, }; static int __devinit sdhci_dove_probe(struct platform_device *pdev) { - struct sdhci_host *host; - struct sdhci_pltfm_host *pltfm_host; - struct sdhci_dove_priv *priv; - int ret; - - ret = sdhci_pltfm_register(pdev, &sdhci_dove_pdata); - if (ret) - goto sdhci_dove_register_fail; - - priv = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_dove_priv), - GFP_KERNEL); - if (!priv) { - dev_err(&pdev->dev, "unable to allocate private data"); - ret = -ENOMEM; - goto sdhci_dove_allocate_fail; - } - - host = platform_get_drvdata(pdev); - pltfm_host = sdhci_priv(host); - pltfm_host->priv = priv; - - priv->clk = clk_get(&pdev->dev, NULL); - if (!IS_ERR(priv->clk)) - clk_prepare_enable(priv->clk); - return 0; - -sdhci_dove_allocate_fail: - sdhci_pltfm_unregister(pdev); -sdhci_dove_register_fail: - return ret; + return sdhci_pltfm_register(pdev, &sdhci_dove_pdata); } static int __devexit sdhci_dove_remove(struct platform_device *pdev) { - struct sdhci_host *host = platform_get_drvdata(pdev); - struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - struct sdhci_dove_priv *priv = pltfm_host->priv; - - if (priv->clk) { - if (!IS_ERR(priv->clk)) { - clk_disable_unprepare(priv->clk); - clk_put(priv->clk); - } - devm_kfree(&pdev->dev, priv->clk); - } return sdhci_pltfm_unregister(pdev); } diff --git a/trunk/drivers/mmc/host/sdhci-esdhc-imx.c b/trunk/drivers/mmc/host/sdhci-esdhc-imx.c index e23f8134591c..ebbe984e5d00 100644 --- a/trunk/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/trunk/drivers/mmc/host/sdhci-esdhc-imx.c @@ -299,8 +299,6 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg) static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg) { - struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - struct pltfm_imx_data *imx_data = pltfm_host->priv; u32 new_val; switch (reg) { @@ -317,11 +315,8 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg) SDHCI_CTRL_D3CD); /* ensure the endianess */ new_val |= ESDHC_HOST_CONTROL_LE; - /* bits 8&9 are reserved on mx25 */ - if (!is_imx25_esdhc(imx_data)) { - /* DMA mode bits are shifted */ - new_val |= (val & SDHCI_CTRL_DMA_MASK) << 5; - } + /* DMA mode bits are shifted */ + new_val |= (val & SDHCI_CTRL_DMA_MASK) << 5; esdhc_clrset_le(host, 0xffff, new_val, reg); return; diff --git a/trunk/drivers/mmc/host/sdhci-pci.c b/trunk/drivers/mmc/host/sdhci-pci.c index 504da715a41a..69ef0beae104 100644 --- a/trunk/drivers/mmc/host/sdhci-pci.c +++ b/trunk/drivers/mmc/host/sdhci-pci.c @@ -157,7 +157,6 @@ static const struct sdhci_pci_fixes sdhci_ene_714 = { static const struct sdhci_pci_fixes sdhci_cafe = { .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER | SDHCI_QUIRK_NO_BUSY_IRQ | - SDHCI_QUIRK_BROKEN_CARD_DETECTION | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL, }; diff --git a/trunk/drivers/mmc/host/sdhci-pxav2.c b/trunk/drivers/mmc/host/sdhci-pxav2.c index b6ee8857e226..dbb75bfbcffb 100644 --- a/trunk/drivers/mmc/host/sdhci-pxav2.c +++ b/trunk/drivers/mmc/host/sdhci-pxav2.c @@ -28,9 +28,6 @@ #include #include #include -#include -#include - #include "sdhci.h" #include "sdhci-pltfm.h" @@ -124,48 +121,6 @@ static struct sdhci_ops pxav2_sdhci_ops = { .platform_8bit_width = pxav2_mmc_set_width, }; -#ifdef CONFIG_OF -static const struct of_device_id sdhci_pxav2_of_match[] = { - { - .compatible = "mrvl,pxav2-mmc", - }, - {}, -}; -MODULE_DEVICE_TABLE(of, sdhci_pxav2_of_match); - -static struct sdhci_pxa_platdata *pxav2_get_mmc_pdata(struct device *dev) -{ - struct sdhci_pxa_platdata *pdata; - struct device_node *np = dev->of_node; - u32 bus_width; - u32 clk_delay_cycles; - - pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); - if (!pdata) - return NULL; - - if (of_find_property(np, "non-removable", NULL)) - pdata->flags |= PXA_FLAG_CARD_PERMANENT; - - of_property_read_u32(np, "bus-width", &bus_width); - if (bus_width == 8) - pdata->flags |= PXA_FLAG_SD_8_BIT_CAPABLE_SLOT; - - of_property_read_u32(np, "mrvl,clk-delay-cycles", &clk_delay_cycles); - if (clk_delay_cycles > 0) { - pdata->clk_delay_sel = 1; - pdata->clk_delay_cycles = clk_delay_cycles; - } - - return pdata; -} -#else -static inline struct sdhci_pxa_platdata *pxav2_get_mmc_pdata(struct device *dev) -{ - return NULL; -} -#endif - static int __devinit sdhci_pxav2_probe(struct platform_device *pdev) { struct sdhci_pltfm_host *pltfm_host; @@ -173,8 +128,6 @@ static int __devinit sdhci_pxav2_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct sdhci_host *host = NULL; struct sdhci_pxa *pxa = NULL; - const struct of_device_id *match; - int ret; struct clk *clk; @@ -203,10 +156,6 @@ static int __devinit sdhci_pxav2_probe(struct platform_device *pdev) | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN; - match = of_match_device(of_match_ptr(sdhci_pxav2_of_match), &pdev->dev); - if (match) { - pdata = pxav2_get_mmc_pdata(dev); - } if (pdata) { if (pdata->flags & PXA_FLAG_CARD_PERMANENT) { /* on-chip device */ @@ -269,9 +218,6 @@ static struct platform_driver sdhci_pxav2_driver = { .driver = { .name = "sdhci-pxav2", .owner = THIS_MODULE, -#ifdef CONFIG_OF - .of_match_table = sdhci_pxav2_of_match, -#endif .pm = SDHCI_PLTFM_PMOPS, }, .probe = sdhci_pxav2_probe, diff --git a/trunk/drivers/mmc/host/sdhci-pxav3.c b/trunk/drivers/mmc/host/sdhci-pxav3.c index 07fe3834fe0b..f29695683556 100644 --- a/trunk/drivers/mmc/host/sdhci-pxav3.c +++ b/trunk/drivers/mmc/host/sdhci-pxav3.c @@ -28,9 +28,6 @@ #include #include #include -#include -#include - #include "sdhci.h" #include "sdhci-pltfm.h" @@ -167,46 +164,6 @@ static struct sdhci_ops pxav3_sdhci_ops = { .platform_send_init_74_clocks = pxav3_gen_init_74_clocks, }; -#ifdef CONFIG_OF -static const struct of_device_id sdhci_pxav3_of_match[] = { - { - .compatible = "mrvl,pxav3-mmc", - }, - {}, -}; -MODULE_DEVICE_TABLE(of, sdhci_pxav3_of_match); - -static struct sdhci_pxa_platdata *pxav3_get_mmc_pdata(struct device *dev) -{ - struct sdhci_pxa_platdata *pdata; - struct device_node *np = dev->of_node; - u32 bus_width; - u32 clk_delay_cycles; - - pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); - if (!pdata) - return NULL; - - if (of_find_property(np, "non-removable", NULL)) - pdata->flags |= PXA_FLAG_CARD_PERMANENT; - - of_property_read_u32(np, "bus-width", &bus_width); - if (bus_width == 8) - pdata->flags |= PXA_FLAG_SD_8_BIT_CAPABLE_SLOT; - - of_property_read_u32(np, "mrvl,clk-delay-cycles", &clk_delay_cycles); - if (clk_delay_cycles > 0) - pdata->clk_delay_cycles = clk_delay_cycles; - - return pdata; -} -#else -static inline struct sdhci_pxa_platdata *pxav3_get_mmc_pdata(struct device *dev) -{ - return NULL; -} -#endif - static int __devinit sdhci_pxav3_probe(struct platform_device *pdev) { struct sdhci_pltfm_host *pltfm_host; @@ -214,8 +171,6 @@ static int __devinit sdhci_pxav3_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct sdhci_host *host = NULL; struct sdhci_pxa *pxa = NULL; - const struct of_device_id *match; - int ret; struct clk *clk; @@ -247,10 +202,6 @@ static int __devinit sdhci_pxav3_probe(struct platform_device *pdev) /* enable 1/8V DDR capable */ host->mmc->caps |= MMC_CAP_1_8V_DDR; - match = of_match_device(of_match_ptr(sdhci_pxav3_of_match), &pdev->dev); - if (match) - pdata = pxav3_get_mmc_pdata(dev); - if (pdata) { if (pdata->flags & PXA_FLAG_CARD_PERMANENT) { /* on-chip device */ @@ -312,9 +263,6 @@ static int __devexit sdhci_pxav3_remove(struct platform_device *pdev) static struct platform_driver sdhci_pxav3_driver = { .driver = { .name = "sdhci-pxav3", -#ifdef CONFIG_OF - .of_match_table = sdhci_pxav3_of_match, -#endif .owner = THIS_MODULE, .pm = SDHCI_PLTFM_PMOPS, }, diff --git a/trunk/drivers/mmc/host/sdhci.c b/trunk/drivers/mmc/host/sdhci.c index 9a11dc39921c..f4b8b4db3a9a 100644 --- a/trunk/drivers/mmc/host/sdhci.c +++ b/trunk/drivers/mmc/host/sdhci.c @@ -27,7 +27,6 @@ #include #include -#include #include "sdhci.h" @@ -245,19 +244,6 @@ static void sdhci_init(struct sdhci_host *host, int soft) static void sdhci_reinit(struct sdhci_host *host) { sdhci_init(host, 0); - /* - * Retuning stuffs are affected by different cards inserted and only - * applicable to UHS-I cards. So reset these fields to their initial - * value when card is removed. - */ - if (host->flags & SDHCI_USING_RETUNING_TIMER) { - host->flags &= ~SDHCI_USING_RETUNING_TIMER; - - del_timer_sync(&host->tuning_timer); - host->flags &= ~SDHCI_NEEDS_RETUNING; - host->mmc->max_blk_count = - (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535; - } sdhci_enable_card_detection(host); } @@ -1259,7 +1245,6 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) struct sdhci_host *host; bool present; unsigned long flags; - u32 tuning_opcode; host = mmc_priv(mmc); @@ -1307,12 +1292,8 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) */ if ((host->flags & SDHCI_NEEDS_RETUNING) && !(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ))) { - /* eMMC uses cmd21 while sd and sdio use cmd19 */ - tuning_opcode = mmc->card->type == MMC_TYPE_MMC ? - MMC_SEND_TUNING_BLOCK_HS200 : - MMC_SEND_TUNING_BLOCK; spin_unlock_irqrestore(&host->lock, flags); - sdhci_execute_tuning(mmc, tuning_opcode); + sdhci_execute_tuning(mmc, mrq->cmd->opcode); spin_lock_irqsave(&host->lock, flags); /* Restore original mmc_request structure */ @@ -1682,15 +1663,11 @@ static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host, pwr = sdhci_readb(host, SDHCI_POWER_CONTROL); pwr &= ~SDHCI_POWER_ON; sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); - if (host->vmmc) - regulator_disable(host->vmmc); /* Wait for 1ms as per the spec */ usleep_range(1000, 1500); pwr |= SDHCI_POWER_ON; sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); - if (host->vmmc) - regulator_enable(host->vmmc); pr_info(DRIVER_NAME ": Switching to 1.8V signalling " "voltage failed, retrying with S18R set to 0\n"); @@ -1878,7 +1855,6 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) */ if (!(host->flags & SDHCI_NEEDS_RETUNING) && host->tuning_count && (host->tuning_mode == SDHCI_TUNING_MODE_1)) { - host->flags |= SDHCI_USING_RETUNING_TIMER; mod_timer(&host->tuning_timer, jiffies + host->tuning_count * HZ); /* Tuning mode 1 limits the maximum data length to 4MB */ @@ -1896,10 +1872,10 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) * try tuning again at a later time, when the re-tuning timer expires. * So for these controllers, we return 0. Since there might be other * controllers who do not have this capability, we return error for - * them. SDHCI_USING_RETUNING_TIMER means the host is currently using - * a retuning timer to do the retuning for the card. + * them. */ - if (err && (host->flags & SDHCI_USING_RETUNING_TIMER)) + if (err && host->tuning_count && + host->tuning_mode == SDHCI_TUNING_MODE_1) err = 0; sdhci_clear_set_irqs(host, SDHCI_INT_DATA_AVAIL, ier); @@ -2406,6 +2382,7 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id) int sdhci_suspend_host(struct sdhci_host *host) { int ret; + bool has_tuning_timer; if (host->ops->platform_suspend) host->ops->platform_suspend(host); @@ -2413,14 +2390,16 @@ int sdhci_suspend_host(struct sdhci_host *host) sdhci_disable_card_detection(host); /* Disable tuning since we are suspending */ - if (host->flags & SDHCI_USING_RETUNING_TIMER) { + has_tuning_timer = host->version >= SDHCI_SPEC_300 && + host->tuning_count && host->tuning_mode == SDHCI_TUNING_MODE_1; + if (has_tuning_timer) { del_timer_sync(&host->tuning_timer); host->flags &= ~SDHCI_NEEDS_RETUNING; } ret = mmc_suspend_host(host->mmc); if (ret) { - if (host->flags & SDHCI_USING_RETUNING_TIMER) { + if (has_tuning_timer) { host->flags |= SDHCI_NEEDS_RETUNING; mod_timer(&host->tuning_timer, jiffies + host->tuning_count * HZ); @@ -2471,7 +2450,8 @@ int sdhci_resume_host(struct sdhci_host *host) host->ops->platform_resume(host); /* Set the re-tuning expiration flag */ - if (host->flags & SDHCI_USING_RETUNING_TIMER) + if ((host->version >= SDHCI_SPEC_300) && host->tuning_count && + (host->tuning_mode == SDHCI_TUNING_MODE_1)) host->flags |= SDHCI_NEEDS_RETUNING; return ret; @@ -2510,7 +2490,8 @@ int sdhci_runtime_suspend_host(struct sdhci_host *host) int ret = 0; /* Disable tuning since we are suspending */ - if (host->flags & SDHCI_USING_RETUNING_TIMER) { + if (host->version >= SDHCI_SPEC_300 && + host->tuning_mode == SDHCI_TUNING_MODE_1) { del_timer_sync(&host->tuning_timer); host->flags &= ~SDHCI_NEEDS_RETUNING; } @@ -2551,7 +2532,8 @@ int sdhci_runtime_resume_host(struct sdhci_host *host) sdhci_do_enable_preset_value(host, true); /* Set the re-tuning expiration flag */ - if (host->flags & SDHCI_USING_RETUNING_TIMER) + if ((host->version >= SDHCI_SPEC_300) && host->tuning_count && + (host->tuning_mode == SDHCI_TUNING_MODE_1)) host->flags |= SDHCI_NEEDS_RETUNING; spin_lock_irqsave(&host->lock, flags); @@ -2602,7 +2584,7 @@ EXPORT_SYMBOL_GPL(sdhci_alloc_host); int sdhci_add_host(struct sdhci_host *host) { struct mmc_host *mmc; - u32 caps[2] = {0, 0}; + u32 caps[2]; u32 max_current_caps; unsigned int ocr_avail; int ret; @@ -2632,10 +2614,8 @@ int sdhci_add_host(struct sdhci_host *host) caps[0] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps : sdhci_readl(host, SDHCI_CAPABILITIES); - if (host->version >= SDHCI_SPEC_300) - caps[1] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? - host->caps1 : - sdhci_readl(host, SDHCI_CAPABILITIES_1); + caps[1] = (host->version >= SDHCI_SPEC_300) ? + sdhci_readl(host, SDHCI_CAPABILITIES_1) : 0; if (host->quirks & SDHCI_QUIRK_FORCE_DMA) host->flags |= SDHCI_USE_SDMA; @@ -2799,7 +2779,7 @@ int sdhci_add_host(struct sdhci_host *host) mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && - !(host->mmc->caps & MMC_CAP_NONREMOVABLE)) + mmc_card_is_removable(mmc)) mmc->caps |= MMC_CAP_NEEDS_POLL; /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */ @@ -2857,30 +2837,6 @@ int sdhci_add_host(struct sdhci_host *host) SDHCI_RETUNING_MODE_SHIFT; ocr_avail = 0; - - host->vmmc = regulator_get(mmc_dev(mmc), "vmmc"); - if (IS_ERR(host->vmmc)) { - pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc)); - host->vmmc = NULL; - } - -#ifdef CONFIG_REGULATOR - if (host->vmmc) { - ret = regulator_is_supported_voltage(host->vmmc, 3300000, - 3300000); - if ((ret <= 0) || (!(caps[0] & SDHCI_CAN_VDD_330))) - caps[0] &= ~SDHCI_CAN_VDD_330; - ret = regulator_is_supported_voltage(host->vmmc, 3000000, - 3000000); - if ((ret <= 0) || (!(caps[0] & SDHCI_CAN_VDD_300))) - caps[0] &= ~SDHCI_CAN_VDD_300; - ret = regulator_is_supported_voltage(host->vmmc, 1800000, - 1800000); - if ((ret <= 0) || (!(caps[0] & SDHCI_CAN_VDD_180))) - caps[0] &= ~SDHCI_CAN_VDD_180; - } -#endif /* CONFIG_REGULATOR */ - /* * According to SD Host Controller spec v3.00, if the Host System * can afford more than 150mA, Host Driver should set XPC to 1. Also @@ -2889,45 +2845,55 @@ int sdhci_add_host(struct sdhci_host *host) * value. */ max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT); - if (!max_current_caps && host->vmmc) { - u32 curr = regulator_get_current_limit(host->vmmc); - if (curr > 0) { - - /* convert to SDHCI_MAX_CURRENT format */ - curr = curr/1000; /* convert to mA */ - curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER; - - curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT); - max_current_caps = - (curr << SDHCI_MAX_CURRENT_330_SHIFT) | - (curr << SDHCI_MAX_CURRENT_300_SHIFT) | - (curr << SDHCI_MAX_CURRENT_180_SHIFT); - } - } if (caps[0] & SDHCI_CAN_VDD_330) { + int max_current_330; + ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34; - mmc->max_current_330 = ((max_current_caps & + max_current_330 = ((max_current_caps & SDHCI_MAX_CURRENT_330_MASK) >> SDHCI_MAX_CURRENT_330_SHIFT) * SDHCI_MAX_CURRENT_MULTIPLIER; + + if (max_current_330 > 150) + mmc->caps |= MMC_CAP_SET_XPC_330; } if (caps[0] & SDHCI_CAN_VDD_300) { + int max_current_300; + ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31; - mmc->max_current_300 = ((max_current_caps & + max_current_300 = ((max_current_caps & SDHCI_MAX_CURRENT_300_MASK) >> SDHCI_MAX_CURRENT_300_SHIFT) * SDHCI_MAX_CURRENT_MULTIPLIER; + + if (max_current_300 > 150) + mmc->caps |= MMC_CAP_SET_XPC_300; } if (caps[0] & SDHCI_CAN_VDD_180) { + int max_current_180; + ocr_avail |= MMC_VDD_165_195; - mmc->max_current_180 = ((max_current_caps & + max_current_180 = ((max_current_caps & SDHCI_MAX_CURRENT_180_MASK) >> SDHCI_MAX_CURRENT_180_SHIFT) * SDHCI_MAX_CURRENT_MULTIPLIER; + + if (max_current_180 > 150) + mmc->caps |= MMC_CAP_SET_XPC_180; + + /* Maximum current capabilities of the host at 1.8V */ + if (max_current_180 >= 800) + mmc->caps |= MMC_CAP_MAX_CURRENT_800; + else if (max_current_180 >= 600) + mmc->caps |= MMC_CAP_MAX_CURRENT_600; + else if (max_current_180 >= 400) + mmc->caps |= MMC_CAP_MAX_CURRENT_400; + else + mmc->caps |= MMC_CAP_MAX_CURRENT_200; } mmc->ocr_avail = ocr_avail; @@ -3026,10 +2992,13 @@ int sdhci_add_host(struct sdhci_host *host) ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED, mmc_hostname(mmc), host); - if (ret) { - pr_err("%s: Failed to request IRQ %d: %d\n", - mmc_hostname(mmc), host->irq, ret); + if (ret) goto untasklet; + + host->vmmc = regulator_get(mmc_dev(mmc), "vmmc"); + if (IS_ERR(host->vmmc)) { + pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc)); + host->vmmc = NULL; } sdhci_init(host, 0); @@ -3047,11 +3016,8 @@ int sdhci_add_host(struct sdhci_host *host) host->led.brightness_set = sdhci_led_control; ret = led_classdev_register(mmc_dev(mmc), &host->led); - if (ret) { - pr_err("%s: Failed to register LED device: %d\n", - mmc_hostname(mmc), ret); + if (ret) goto reset; - } #endif mmiowb(); @@ -3115,6 +3081,8 @@ void sdhci_remove_host(struct sdhci_host *host, int dead) free_irq(host->irq, host); del_timer_sync(&host->timer); + if (host->version >= SDHCI_SPEC_300) + del_timer_sync(&host->tuning_timer); tasklet_kill(&host->card_tasklet); tasklet_kill(&host->finish_tasklet); diff --git a/trunk/drivers/mmc/host/sdhci.h b/trunk/drivers/mmc/host/sdhci.h index 97653ea8942b..f761f23d2a28 100644 --- a/trunk/drivers/mmc/host/sdhci.h +++ b/trunk/drivers/mmc/host/sdhci.h @@ -205,7 +205,6 @@ #define SDHCI_CAPABILITIES_1 0x44 #define SDHCI_MAX_CURRENT 0x48 -#define SDHCI_MAX_CURRENT_LIMIT 0xFF #define SDHCI_MAX_CURRENT_330_MASK 0x0000FF #define SDHCI_MAX_CURRENT_330_SHIFT 0 #define SDHCI_MAX_CURRENT_300_MASK 0x00FF00 diff --git a/trunk/drivers/mmc/host/sh_mmcif.c b/trunk/drivers/mmc/host/sh_mmcif.c index b2af7136cd27..724b35e85a26 100644 --- a/trunk/drivers/mmc/host/sh_mmcif.c +++ b/trunk/drivers/mmc/host/sh_mmcif.c @@ -54,8 +54,6 @@ #include #include #include -#include -#include #include #include #include @@ -386,9 +384,6 @@ static void sh_mmcif_request_dma(struct sh_mmcif_host *host, struct sh_dmae_slave *tx, *rx; host->dma_active = false; - if (!pdata) - return; - /* We can only either use DMA for both Tx and Rx or not use it at all */ if (pdata->dma) { dev_warn(&host->pd->dev, @@ -449,14 +444,13 @@ static void sh_mmcif_release_dma(struct sh_mmcif_host *host) static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk) { struct sh_mmcif_plat_data *p = host->pd->dev.platform_data; - bool sup_pclk = p ? p->sup_pclk : false; sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE); sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR); if (!clk) return; - if (sup_pclk && clk == host->clk) + if (p->sup_pclk && clk == host->clk) sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_SUP_PCLK); else sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR & @@ -898,15 +892,21 @@ static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq) switch (mrq->cmd->opcode) { /* MMCIF does not support SD/SDIO command */ - case MMC_SLEEP_AWAKE: /* = SD_IO_SEND_OP_COND (5) */ - case MMC_SEND_EXT_CSD: /* = SD_SEND_IF_COND (8) */ - if ((mrq->cmd->flags & MMC_CMD_MASK) != MMC_CMD_BCR) - break; + case SD_IO_SEND_OP_COND: case MMC_APP_CMD: host->state = STATE_IDLE; mrq->cmd->error = -ETIMEDOUT; mmc_request_done(mmc, mrq); return; + case MMC_SEND_EXT_CSD: /* = SD_SEND_IF_COND (8) */ + if (!mrq->data) { + /* send_if_cond cmd (not support) */ + host->state = STATE_IDLE; + mrq->cmd->error = -ETIMEDOUT; + mmc_request_done(mmc, mrq); + return; + } + break; default: break; } @@ -916,35 +916,10 @@ static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq) sh_mmcif_start_cmd(host, mrq); } -static int sh_mmcif_clk_update(struct sh_mmcif_host *host) -{ - int ret = clk_enable(host->hclk); - - if (!ret) { - host->clk = clk_get_rate(host->hclk); - host->mmc->f_max = host->clk / 2; - host->mmc->f_min = host->clk / 512; - } - - return ret; -} - -static void sh_mmcif_set_power(struct sh_mmcif_host *host, struct mmc_ios *ios) -{ - struct sh_mmcif_plat_data *pd = host->pd->dev.platform_data; - struct mmc_host *mmc = host->mmc; - - if (pd && pd->set_pwr) - pd->set_pwr(host->pd, ios->power_mode != MMC_POWER_OFF); - if (!IS_ERR(mmc->supply.vmmc)) - /* Errors ignored... */ - mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, - ios->power_mode ? ios->vdd : 0); -} - static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { struct sh_mmcif_host *host = mmc_priv(mmc); + struct sh_mmcif_plat_data *p = host->pd->dev.platform_data; unsigned long flags; spin_lock_irqsave(&host->lock, flags); @@ -962,7 +937,6 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) sh_mmcif_request_dma(host, host->pd->dev.platform_data); host->card_present = true; } - sh_mmcif_set_power(host, ios); } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) { /* clock stop */ sh_mmcif_clock_control(host, 0); @@ -974,10 +948,9 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) } if (host->power) { pm_runtime_put(&host->pd->dev); - clk_disable(host->hclk); host->power = false; - if (ios->power_mode == MMC_POWER_OFF) - sh_mmcif_set_power(host, ios); + if (p->down_pwr && ios->power_mode == MMC_POWER_OFF) + p->down_pwr(host->pd); } host->state = STATE_IDLE; return; @@ -985,7 +958,8 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) if (ios->clock) { if (!host->power) { - sh_mmcif_clk_update(host); + if (p->set_pwr) + p->set_pwr(host->pd, ios->power_mode); pm_runtime_get_sync(&host->pd->dev); host->power = true; sh_mmcif_sync_reset(host); @@ -1001,12 +975,8 @@ static int sh_mmcif_get_cd(struct mmc_host *mmc) { struct sh_mmcif_host *host = mmc_priv(mmc); struct sh_mmcif_plat_data *p = host->pd->dev.platform_data; - int ret = mmc_gpio_get_cd(mmc); - - if (ret >= 0) - return ret; - if (!p || !p->get_cd) + if (!p->get_cd) return -ENOSYS; else return p->get_cd(host->pd); @@ -1272,28 +1242,12 @@ static void mmcif_timeout_work(struct work_struct *work) mmc_request_done(host->mmc, mrq); } -static void sh_mmcif_init_ocr(struct sh_mmcif_host *host) -{ - struct sh_mmcif_plat_data *pd = host->pd->dev.platform_data; - struct mmc_host *mmc = host->mmc; - - mmc_regulator_get_supply(mmc); - - if (!pd) - return; - - if (!mmc->ocr_avail) - mmc->ocr_avail = pd->ocr; - else if (pd->ocr) - dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n"); -} - static int __devinit sh_mmcif_probe(struct platform_device *pdev) { int ret = 0, irq[2]; struct mmc_host *mmc; struct sh_mmcif_host *host; - struct sh_mmcif_plat_data *pd = pdev->dev.platform_data; + struct sh_mmcif_plat_data *pd; struct resource *res; void __iomem *reg; char clk_name[8]; @@ -1314,26 +1268,42 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev) dev_err(&pdev->dev, "ioremap error.\n"); return -ENOMEM; } - + pd = pdev->dev.platform_data; + if (!pd) { + dev_err(&pdev->dev, "sh_mmcif plat data error.\n"); + ret = -ENXIO; + goto clean_up; + } mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), &pdev->dev); if (!mmc) { ret = -ENOMEM; - goto ealloch; + goto clean_up; } host = mmc_priv(mmc); host->mmc = mmc; host->addr = reg; host->timeout = 1000; + snprintf(clk_name, sizeof(clk_name), "mmc%d", pdev->id); + host->hclk = clk_get(&pdev->dev, clk_name); + if (IS_ERR(host->hclk)) { + dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); + ret = PTR_ERR(host->hclk); + goto clean_up1; + } + clk_enable(host->hclk); + host->clk = clk_get_rate(host->hclk); host->pd = pdev; spin_lock_init(&host->lock); mmc->ops = &sh_mmcif_ops; - sh_mmcif_init_ocr(host); - + mmc->f_max = host->clk / 2; + mmc->f_min = host->clk / 512; + if (pd->ocr) + mmc->ocr_avail = pd->ocr; mmc->caps = MMC_CAP_MMC_HIGHSPEED; - if (pd && pd->caps) + if (pd->caps) mmc->caps |= pd->caps; mmc->max_segs = 32; mmc->max_blk_size = 512; @@ -1341,52 +1311,34 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev) mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size; mmc->max_seg_size = mmc->max_req_size; + sh_mmcif_sync_reset(host); platform_set_drvdata(pdev, host); pm_runtime_enable(&pdev->dev); host->power = false; - snprintf(clk_name, sizeof(clk_name), "mmc%d", pdev->id); - host->hclk = clk_get(&pdev->dev, clk_name); - if (IS_ERR(host->hclk)) { - ret = PTR_ERR(host->hclk); - dev_err(&pdev->dev, "cannot get clock \"%s\": %d\n", clk_name, ret); - goto eclkget; - } - ret = sh_mmcif_clk_update(host); - if (ret < 0) - goto eclkupdate; - ret = pm_runtime_resume(&pdev->dev); if (ret < 0) - goto eresume; + goto clean_up2; INIT_DELAYED_WORK(&host->timeout_work, mmcif_timeout_work); - sh_mmcif_sync_reset(host); sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); ret = request_threaded_irq(irq[0], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:error", host); if (ret) { dev_err(&pdev->dev, "request_irq error (sh_mmc:error)\n"); - goto ereqirq0; + goto clean_up3; } ret = request_threaded_irq(irq[1], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:int", host); if (ret) { dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n"); - goto ereqirq1; - } - - if (pd && pd->use_cd_gpio) { - ret = mmc_gpio_request_cd(mmc, pd->cd_gpio); - if (ret < 0) - goto erqcd; + goto clean_up4; } - clk_disable(host->hclk); ret = mmc_add_host(mmc); if (ret < 0) - goto emmcaddh; + goto clean_up5; dev_pm_qos_expose_latency_limit(&pdev->dev, 100); @@ -1395,42 +1347,33 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev) sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0x0000ffff); return ret; -emmcaddh: - if (pd && pd->use_cd_gpio) - mmc_gpio_free_cd(mmc); -erqcd: +clean_up5: free_irq(irq[1], host); -ereqirq1: +clean_up4: free_irq(irq[0], host); -ereqirq0: +clean_up3: pm_runtime_suspend(&pdev->dev); -eresume: - clk_disable(host->hclk); -eclkupdate: - clk_put(host->hclk); -eclkget: +clean_up2: pm_runtime_disable(&pdev->dev); + clk_disable(host->hclk); +clean_up1: mmc_free_host(mmc); -ealloch: - iounmap(reg); +clean_up: + if (reg) + iounmap(reg); return ret; } static int __devexit sh_mmcif_remove(struct platform_device *pdev) { struct sh_mmcif_host *host = platform_get_drvdata(pdev); - struct sh_mmcif_plat_data *pd = pdev->dev.platform_data; int irq[2]; host->dying = true; - clk_enable(host->hclk); pm_runtime_get_sync(&pdev->dev); dev_pm_qos_hide_latency_limit(&pdev->dev); - if (pd && pd->use_cd_gpio) - mmc_gpio_free_cd(host->mmc); - mmc_remove_host(host->mmc); sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); @@ -1452,9 +1395,9 @@ static int __devexit sh_mmcif_remove(struct platform_device *pdev) platform_set_drvdata(pdev, NULL); + clk_disable(host->hclk); mmc_free_host(host->mmc); pm_runtime_put_sync(&pdev->dev); - clk_disable(host->hclk); pm_runtime_disable(&pdev->dev); return 0; @@ -1463,18 +1406,24 @@ static int __devexit sh_mmcif_remove(struct platform_device *pdev) #ifdef CONFIG_PM static int sh_mmcif_suspend(struct device *dev) { - struct sh_mmcif_host *host = dev_get_drvdata(dev); + struct platform_device *pdev = to_platform_device(dev); + struct sh_mmcif_host *host = platform_get_drvdata(pdev); int ret = mmc_suspend_host(host->mmc); - if (!ret) + if (!ret) { sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); + clk_disable(host->hclk); + } return ret; } static int sh_mmcif_resume(struct device *dev) { - struct sh_mmcif_host *host = dev_get_drvdata(dev); + struct platform_device *pdev = to_platform_device(dev); + struct sh_mmcif_host *host = platform_get_drvdata(pdev); + + clk_enable(host->hclk); return mmc_resume_host(host->mmc); } @@ -1483,12 +1432,6 @@ static int sh_mmcif_resume(struct device *dev) #define sh_mmcif_resume NULL #endif /* CONFIG_PM */ -static const struct of_device_id mmcif_of_match[] = { - { .compatible = "renesas,sh-mmcif" }, - { } -}; -MODULE_DEVICE_TABLE(of, mmcif_of_match); - static const struct dev_pm_ops sh_mmcif_dev_pm_ops = { .suspend = sh_mmcif_suspend, .resume = sh_mmcif_resume, @@ -1500,8 +1443,6 @@ static struct platform_driver sh_mmcif_driver = { .driver = { .name = DRIVER_NAME, .pm = &sh_mmcif_dev_pm_ops, - .owner = THIS_MODULE, - .of_match_table = mmcif_of_match, }, }; diff --git a/trunk/drivers/mmc/host/sh_mobile_sdhi.c b/trunk/drivers/mmc/host/sh_mobile_sdhi.c index a842939e4655..934b68e9efc3 100644 --- a/trunk/drivers/mmc/host/sh_mobile_sdhi.c +++ b/trunk/drivers/mmc/host/sh_mobile_sdhi.c @@ -21,7 +21,6 @@ #include #include #include -#include #include #include #include @@ -40,39 +39,22 @@ struct sh_mobile_sdhi { struct tmio_mmc_dma dma_priv; }; -static int sh_mobile_sdhi_clk_enable(struct platform_device *pdev, unsigned int *f) -{ - struct mmc_host *mmc = dev_get_drvdata(&pdev->dev); - struct tmio_mmc_host *host = mmc_priv(mmc); - struct sh_mobile_sdhi *priv = container_of(host->pdata, struct sh_mobile_sdhi, mmc_data); - int ret = clk_enable(priv->clk); - if (ret < 0) - return ret; - - *f = clk_get_rate(priv->clk); - return 0; -} - -static void sh_mobile_sdhi_clk_disable(struct platform_device *pdev) -{ - struct mmc_host *mmc = dev_get_drvdata(&pdev->dev); - struct tmio_mmc_host *host = mmc_priv(mmc); - struct sh_mobile_sdhi *priv = container_of(host->pdata, struct sh_mobile_sdhi, mmc_data); - clk_disable(priv->clk); -} - static void sh_mobile_sdhi_set_pwr(struct platform_device *pdev, int state) { struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; - p->set_pwr(pdev, state); + if (p && p->set_pwr) + p->set_pwr(pdev, state); } static int sh_mobile_sdhi_get_cd(struct platform_device *pdev) { struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; - return p->get_cd(pdev); + if (p && p->get_cd) + return p->get_cd(pdev); + else + return -ENOSYS; } static int sh_mobile_sdhi_wait_idle(struct tmio_mmc_host *host) @@ -134,14 +116,12 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev) } mmc_data = &priv->mmc_data; + p->pdata = mmc_data; - if (p) { - p->pdata = mmc_data; - if (p->init) { - ret = p->init(pdev, &sdhi_ops); - if (ret) - goto einit; - } + if (p->init) { + ret = p->init(pdev, &sdhi_ops); + if (ret) + goto einit; } snprintf(clk_name, sizeof(clk_name), "sdhi%d", pdev->id); @@ -152,8 +132,9 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev) goto eclkget; } - mmc_data->clk_enable = sh_mobile_sdhi_clk_enable; - mmc_data->clk_disable = sh_mobile_sdhi_clk_disable; + mmc_data->hclk = clk_get_rate(priv->clk); + mmc_data->set_pwr = sh_mobile_sdhi_set_pwr; + mmc_data->get_cd = sh_mobile_sdhi_get_cd; mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED; if (p) { mmc_data->flags = p->tmio_flags; @@ -161,12 +142,7 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev) mmc_data->write16_hook = sh_mobile_sdhi_write16_hook; mmc_data->ocr_mask = p->tmio_ocr_mask; mmc_data->capabilities |= p->tmio_caps; - mmc_data->capabilities2 |= p->tmio_caps2; mmc_data->cd_gpio = p->cd_gpio; - if (p->set_pwr) - mmc_data->set_pwr = sh_mobile_sdhi_set_pwr; - if (p->get_cd) - mmc_data->get_cd = sh_mobile_sdhi_get_cd; if (p->dma_slave_tx > 0 && p->dma_slave_rx > 0) { priv->param_tx.slave_id = p->dma_slave_tx; @@ -272,7 +248,7 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev) eprobe: clk_put(priv->clk); eclkget: - if (p && p->cleanup) + if (p->cleanup) p->cleanup(pdev); einit: kfree(priv); @@ -287,8 +263,7 @@ static int sh_mobile_sdhi_remove(struct platform_device *pdev) struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; int i = 0, irq; - if (p) - p->pdata = NULL; + p->pdata = NULL; tmio_mmc_host_remove(host); @@ -301,7 +276,7 @@ static int sh_mobile_sdhi_remove(struct platform_device *pdev) clk_put(priv->clk); - if (p && p->cleanup) + if (p->cleanup) p->cleanup(pdev); kfree(priv); @@ -316,18 +291,11 @@ static const struct dev_pm_ops tmio_mmc_dev_pm_ops = { .runtime_resume = tmio_mmc_host_runtime_resume, }; -static const struct of_device_id sh_mobile_sdhi_of_match[] = { - { .compatible = "renesas,shmobile-sdhi" }, - { } -}; -MODULE_DEVICE_TABLE(of, sh_mobile_sdhi_of_match); - static struct platform_driver sh_mobile_sdhi_driver = { .driver = { .name = "sh_mobile_sdhi", .owner = THIS_MODULE, .pm = &tmio_mmc_dev_pm_ops, - .of_match_table = sh_mobile_sdhi_of_match, }, .probe = sh_mobile_sdhi_probe, .remove = __devexit_p(sh_mobile_sdhi_remove), diff --git a/trunk/drivers/mmc/host/tmio_mmc_pio.c b/trunk/drivers/mmc/host/tmio_mmc_pio.c index 0d8a9bbe30be..9a7996ade58e 100644 --- a/trunk/drivers/mmc/host/tmio_mmc_pio.c +++ b/trunk/drivers/mmc/host/tmio_mmc_pio.c @@ -34,9 +34,8 @@ #include #include #include +#include #include -#include -#include #include #include #include @@ -306,8 +305,8 @@ static int tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command int c = cmd->opcode; u32 irq_mask = TMIO_MASK_CMD; - /* CMD12 is handled by hardware */ - if (cmd->opcode == MMC_STOP_TRANSMISSION && !cmd->arg) { + /* Command 12 is handled by hardware */ + if (cmd->opcode == 12 && !cmd->arg) { sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001); return 0; } @@ -450,7 +449,7 @@ void tmio_mmc_do_data_irq(struct tmio_mmc_host *host) } if (stop) { - if (stop->opcode == MMC_STOP_TRANSMISSION && !stop->arg) + if (stop->opcode == 12 && !stop->arg) sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000); else BUG(); @@ -752,34 +751,6 @@ static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) mmc_request_done(mmc, mrq); } -static int tmio_mmc_clk_update(struct mmc_host *mmc) -{ - struct tmio_mmc_host *host = mmc_priv(mmc); - struct tmio_mmc_data *pdata = host->pdata; - int ret; - - if (!pdata->clk_enable) - return -ENOTSUPP; - - ret = pdata->clk_enable(host->pdev, &mmc->f_max); - if (!ret) - mmc->f_min = mmc->f_max / 512; - - return ret; -} - -static void tmio_mmc_set_power(struct tmio_mmc_host *host, struct mmc_ios *ios) -{ - struct mmc_host *mmc = host->mmc; - - if (host->set_pwr) - host->set_pwr(host->pdev, ios->power_mode != MMC_POWER_OFF); - if (!IS_ERR(mmc->supply.vmmc)) - /* Errors ignored... */ - mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, - ios->power_mode ? ios->vdd : 0); -} - /* Set MMC clock / power. * Note: This controller uses a simple divider scheme therefore it cannot * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as @@ -826,37 +797,32 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) */ if (ios->power_mode == MMC_POWER_ON && ios->clock) { if (!host->power) { - tmio_mmc_clk_update(mmc); pm_runtime_get_sync(dev); host->power = true; } tmio_mmc_set_clock(host, ios->clock); /* power up SD bus */ - tmio_mmc_set_power(host, ios); + if (host->set_pwr) + host->set_pwr(host->pdev, 1); /* start bus clock */ tmio_mmc_clk_start(host); } else if (ios->power_mode != MMC_POWER_UP) { - if (ios->power_mode == MMC_POWER_OFF) - tmio_mmc_set_power(host, ios); + if (host->set_pwr && ios->power_mode == MMC_POWER_OFF) + host->set_pwr(host->pdev, 0); if (host->power) { - struct tmio_mmc_data *pdata = host->pdata; - tmio_mmc_clk_stop(host); host->power = false; pm_runtime_put(dev); - if (pdata->clk_disable) - pdata->clk_disable(host->pdev); } + tmio_mmc_clk_stop(host); } - if (host->power) { - switch (ios->bus_width) { - case MMC_BUS_WIDTH_1: - sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0); - break; - case MMC_BUS_WIDTH_4: - sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0); - break; - } + switch (ios->bus_width) { + case MMC_BUS_WIDTH_1: + sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0); + break; + case MMC_BUS_WIDTH_4: + sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0); + break; } /* Let things settle. delay taken from winCE driver */ @@ -875,9 +841,6 @@ static int tmio_mmc_get_ro(struct mmc_host *mmc) { struct tmio_mmc_host *host = mmc_priv(mmc); struct tmio_mmc_data *pdata = host->pdata; - int ret = mmc_gpio_get_ro(mmc); - if (ret >= 0) - return ret; return !((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) || (sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT)); @@ -887,9 +850,6 @@ static int tmio_mmc_get_cd(struct mmc_host *mmc) { struct tmio_mmc_host *host = mmc_priv(mmc); struct tmio_mmc_data *pdata = host->pdata; - int ret = mmc_gpio_get_cd(mmc); - if (ret >= 0) - return ret; if (!pdata->get_cd) return -ENOSYS; @@ -905,19 +865,6 @@ static const struct mmc_host_ops tmio_mmc_ops = { .enable_sdio_irq = tmio_mmc_enable_sdio_irq, }; -static void tmio_mmc_init_ocr(struct tmio_mmc_host *host) -{ - struct tmio_mmc_data *pdata = host->pdata; - struct mmc_host *mmc = host->mmc; - - mmc_regulator_get_supply(mmc); - - if (!mmc->ocr_avail) - mmc->ocr_avail = pdata->ocr_mask ? : MMC_VDD_32_33 | MMC_VDD_33_34; - else if (pdata->ocr_mask) - dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n"); -} - int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host, struct platform_device *pdev, struct tmio_mmc_data *pdata) @@ -957,14 +904,18 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host, mmc->ops = &tmio_mmc_ops; mmc->caps = MMC_CAP_4_BIT_DATA | pdata->capabilities; - mmc->caps2 = pdata->capabilities2; + mmc->f_max = pdata->hclk; + mmc->f_min = mmc->f_max / 512; mmc->max_segs = 32; mmc->max_blk_size = 512; mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) * mmc->max_segs; mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; mmc->max_seg_size = mmc->max_req_size; - tmio_mmc_init_ocr(_host); + if (pdata->ocr_mask) + mmc->ocr_avail = pdata->ocr_mask; + else + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; _host->native_hotplug = !(pdata->flags & TMIO_MMC_USE_GPIO_CD || mmc->caps & MMC_CAP_NEEDS_POLL || @@ -976,11 +927,6 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host, if (ret < 0) goto pm_disable; - if (tmio_mmc_clk_update(mmc) < 0) { - mmc->f_max = pdata->hclk; - mmc->f_min = mmc->f_max / 512; - } - /* * There are 4 different scenarios for the card detection: * 1) an external gpio irq handles the cd (best for power savings) @@ -991,6 +937,7 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host, * While we increment the runtime PM counter for all scenarios when * the mmc core activates us by calling an appropriate set_ios(), we * must additionally ensure that in case 2) the tmio mmc hardware stays + * additionally ensure that in case 2) the tmio mmc hardware stays * powered on during runtime for the card detection to work. */ if (_host->native_hotplug) @@ -1001,17 +948,6 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host, _host->sdcard_irq_mask = sd_ctrl_read32(_host, CTL_IRQ_MASK); tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL); - - /* Unmask the IRQs we want to know about */ - if (!_host->chan_rx) - irq_mask |= TMIO_MASK_READOP; - if (!_host->chan_tx) - irq_mask |= TMIO_MASK_WRITEOP; - if (!_host->native_hotplug) - irq_mask &= ~(TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT); - - _host->sdcard_irq_mask &= ~irq_mask; - if (pdata->flags & TMIO_MMC_SDIO_IRQ) tmio_mmc_enable_sdio_irq(mmc, 0); @@ -1025,18 +961,22 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host, /* See if we also get DMA */ tmio_mmc_request_dma(_host, pdata); - ret = mmc_add_host(mmc); - if (pdata->clk_disable) - pdata->clk_disable(pdev); - if (ret < 0) { - tmio_mmc_host_remove(_host); - return ret; - } + mmc_add_host(mmc); dev_pm_qos_expose_latency_limit(&pdev->dev, 100); + /* Unmask the IRQs we want to know about */ + if (!_host->chan_rx) + irq_mask |= TMIO_MASK_READOP; + if (!_host->chan_tx) + irq_mask |= TMIO_MASK_WRITEOP; + if (!_host->native_hotplug) + irq_mask &= ~(TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT); + + tmio_mmc_enable_mmc_irqs(_host, irq_mask); + if (pdata->flags & TMIO_MMC_USE_GPIO_CD) { - ret = mmc_gpio_request_cd(mmc, pdata->cd_gpio); + ret = mmc_cd_gpio_request(mmc, pdata->cd_gpio); if (ret < 0) { tmio_mmc_host_remove(_host); return ret; @@ -1068,7 +1008,7 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host) * This means we can miss a card-eject, but this is anyway * possible, because of delayed processing of hotplug events. */ - mmc_gpio_free_cd(mmc); + mmc_cd_gpio_free(mmc); if (!host->native_hotplug) pm_runtime_get_sync(&pdev->dev); diff --git a/trunk/drivers/mtd/mtdsuper.c b/trunk/drivers/mtd/mtdsuper.c index 334da5f583c0..a90bfe79916d 100644 --- a/trunk/drivers/mtd/mtdsuper.c +++ b/trunk/drivers/mtd/mtdsuper.c @@ -63,7 +63,7 @@ static struct dentry *mount_mtd_aux(struct file_system_type *fs_type, int flags, struct super_block *sb; int ret; - sb = sget(fs_type, get_sb_mtd_compare, get_sb_mtd_set, flags, mtd); + sb = sget(fs_type, get_sb_mtd_compare, get_sb_mtd_set, mtd); if (IS_ERR(sb)) goto out_error; @@ -74,6 +74,8 @@ static struct dentry *mount_mtd_aux(struct file_system_type *fs_type, int flags, pr_debug("MTDSB: New superblock for device %d (\"%s\")\n", mtd->index, mtd->name); + sb->s_flags = flags; + ret = fill_super(sb, data, flags & MS_SILENT ? 1 : 0); if (ret < 0) { deactivate_locked_super(sb); diff --git a/trunk/drivers/mtd/ubi/Kconfig b/trunk/drivers/mtd/ubi/Kconfig index 738ee8dc16cd..ea4b95b5451c 100644 --- a/trunk/drivers/mtd/ubi/Kconfig +++ b/trunk/drivers/mtd/ubi/Kconfig @@ -29,7 +29,7 @@ config MTD_UBI_WL_THRESHOLD config MTD_UBI_BEB_RESERVE int "Percentage of reserved eraseblocks for bad eraseblocks handling" - default 1 + default 2 range 0 25 help If the MTD device admits of bad eraseblocks (e.g. NAND flash), UBI diff --git a/trunk/drivers/net/bonding/bond_debugfs.c b/trunk/drivers/net/bonding/bond_debugfs.c index 2cf084eb9d52..3680aa251dea 100644 --- a/trunk/drivers/net/bonding/bond_debugfs.c +++ b/trunk/drivers/net/bonding/bond_debugfs.c @@ -6,7 +6,7 @@ #include "bonding.h" #include "bond_alb.h" -#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_NET_NS) +#ifdef CONFIG_DEBUG_FS #include #include diff --git a/trunk/drivers/net/bonding/bond_main.c b/trunk/drivers/net/bonding/bond_main.c index 2ee76993f052..b9c2ae62166d 100644 --- a/trunk/drivers/net/bonding/bond_main.c +++ b/trunk/drivers/net/bonding/bond_main.c @@ -3227,12 +3227,6 @@ static int bond_master_netdev_event(unsigned long event, switch (event) { case NETDEV_CHANGENAME: return bond_event_changename(event_bond); - case NETDEV_UNREGISTER: - bond_remove_proc_entry(event_bond); - break; - case NETDEV_REGISTER: - bond_create_proc_entry(event_bond); - break; default: break; } @@ -4417,6 +4411,8 @@ static void bond_uninit(struct net_device *bond_dev) bond_work_cancel_all(bond); + bond_remove_proc_entry(bond); + bond_debug_unregister(bond); __hw_addr_flush(&bond->mc_list); @@ -4818,6 +4814,7 @@ static int bond_init(struct net_device *bond_dev) bond_set_lockdep_class(bond_dev); + bond_create_proc_entry(bond); list_add_tail(&bond->bond_list, &bn->dev_list); bond_prepare_sysfs_group(bond); diff --git a/trunk/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/trunk/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 1f78b63d5efe..9cc15701101b 100644 --- a/trunk/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/trunk/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -261,6 +261,7 @@ static void atl1c_check_link_status(struct atl1c_adapter *adapter) if ((phy_data & BMSR_LSTATUS) == 0) { /* link down */ netif_carrier_off(netdev); + netif_stop_queue(netdev); hw->hibernate = true; if (atl1c_reset_mac(hw) != 0) if (netif_msg_hw(adapter)) diff --git a/trunk/drivers/net/ethernet/broadcom/b44.c b/trunk/drivers/net/ethernet/broadcom/b44.c index d09c6b583d17..46b8b7d81633 100644 --- a/trunk/drivers/net/ethernet/broadcom/b44.c +++ b/trunk/drivers/net/ethernet/broadcom/b44.c @@ -656,7 +656,7 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ, DMA_FROM_DEVICE); dev_kfree_skb_any(skb); - skb = alloc_skb(RX_PKT_BUF_SZ, GFP_ATOMIC | GFP_DMA); + skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA); if (skb == NULL) return -ENOMEM; mapping = dma_map_single(bp->sdev->dma_dev, skb->data, @@ -967,7 +967,7 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev) dma_unmap_single(bp->sdev->dma_dev, mapping, len, DMA_TO_DEVICE); - bounce_skb = alloc_skb(len, GFP_ATOMIC | GFP_DMA); + bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA); if (!bounce_skb) goto err_out; diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2.c b/trunk/drivers/net/ethernet/broadcom/bnx2.c index 1fa4927a45b1..ac7b74488531 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2.c +++ b/trunk/drivers/net/ethernet/broadcom/bnx2.c @@ -5372,7 +5372,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp) int k, last; if (skb == NULL) { - j = NEXT_TX_BD(j); + j++; continue; } @@ -5384,8 +5384,8 @@ bnx2_free_tx_skbs(struct bnx2 *bp) tx_buf->skb = NULL; last = tx_buf->nr_frags; - j = NEXT_TX_BD(j); - for (k = 0; k < last; k++, j = NEXT_TX_BD(j)) { + j++; + for (k = 0; k < last; k++, j++) { tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)]; dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), diff --git a/trunk/drivers/net/ethernet/broadcom/cnic.c b/trunk/drivers/net/ethernet/broadcom/cnic.c index 2c89d17cbb29..c95e7b5e2b85 100644 --- a/trunk/drivers/net/ethernet/broadcom/cnic.c +++ b/trunk/drivers/net/ethernet/broadcom/cnic.c @@ -534,8 +534,7 @@ int cnic_unregister_driver(int ulp_type) } if (atomic_read(&ulp_ops->ref_count) != 0) - pr_warn("%s: Failed waiting for ref count to go to zero\n", - __func__); + netdev_warn(dev->netdev, "Failed waiting for ref count to go to zero\n"); return 0; out_unlock: @@ -1054,13 +1053,12 @@ static int cnic_init_uio(struct cnic_dev *dev) uinfo = &udev->cnic_uinfo; - uinfo->mem[0].addr = pci_resource_start(dev->pcidev, 0); + uinfo->mem[0].addr = dev->netdev->base_addr; uinfo->mem[0].internal_addr = dev->regview; + uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start; uinfo->mem[0].memtype = UIO_MEM_PHYS; if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { - uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID + - TX_MAX_TSS_RINGS + 1); uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen & PAGE_MASK; if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) @@ -1070,8 +1068,6 @@ static int cnic_init_uio(struct cnic_dev *dev) uinfo->name = "bnx2_cnic"; } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { - uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0); - uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk & PAGE_MASK; uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk); diff --git a/trunk/drivers/net/ethernet/freescale/gianfar.c b/trunk/drivers/net/ethernet/freescale/gianfar.c index ab1d80ff0791..f2db8fca46a1 100644 --- a/trunk/drivers/net/ethernet/freescale/gianfar.c +++ b/trunk/drivers/net/ethernet/freescale/gianfar.c @@ -2063,9 +2063,10 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } - if (skb->sk) - skb_set_owner_w(skb_new, skb->sk); - consume_skb(skb); + /* Steal sock reference for processing TX time stamps */ + swap(skb_new->sk, skb->sk); + swap(skb_new->destructor, skb->destructor); + kfree_skb(skb); skb = skb_new; } diff --git a/trunk/drivers/net/ethernet/intel/e1000e/82571.c b/trunk/drivers/net/ethernet/intel/e1000e/82571.c index 1f063dcd8f85..36db4df09aed 100644 --- a/trunk/drivers/net/ethernet/intel/e1000e/82571.c +++ b/trunk/drivers/net/ethernet/intel/e1000e/82571.c @@ -1572,9 +1572,6 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) ctrl = er32(CTRL); status = er32(STATUS); rxcw = er32(RXCW); - /* SYNCH bit and IV bit are sticky */ - udelay(10); - rxcw = er32(RXCW); if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) { diff --git a/trunk/drivers/net/ethernet/intel/e1000e/ich8lan.c b/trunk/drivers/net/ethernet/intel/e1000e/ich8lan.c index e3a7b07df629..238ab2f8a5e7 100644 --- a/trunk/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/trunk/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -325,46 +325,24 @@ static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val) **/ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw) { - u16 phy_reg = 0; - u32 phy_id = 0; - s32 ret_val; - u16 retry_count; - - for (retry_count = 0; retry_count < 2; retry_count++) { - ret_val = e1e_rphy_locked(hw, PHY_ID1, &phy_reg); - if (ret_val || (phy_reg == 0xFFFF)) - continue; - phy_id = (u32)(phy_reg << 16); + u16 phy_reg; + u32 phy_id; - ret_val = e1e_rphy_locked(hw, PHY_ID2, &phy_reg); - if (ret_val || (phy_reg == 0xFFFF)) { - phy_id = 0; - continue; - } - phy_id |= (u32)(phy_reg & PHY_REVISION_MASK); - break; - } + e1e_rphy_locked(hw, PHY_ID1, &phy_reg); + phy_id = (u32)(phy_reg << 16); + e1e_rphy_locked(hw, PHY_ID2, &phy_reg); + phy_id |= (u32)(phy_reg & PHY_REVISION_MASK); if (hw->phy.id) { if (hw->phy.id == phy_id) return true; - } else if (phy_id) { - hw->phy.id = phy_id; - hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK); + } else { + if ((phy_id != 0) && (phy_id != PHY_REVISION_MASK)) + hw->phy.id = phy_id; return true; } - /* - * In case the PHY needs to be in mdio slow mode, - * set slow mode and try to get the PHY id again. - */ - hw->phy.ops.release(hw); - ret_val = e1000_set_mdio_slow_mode_hv(hw); - if (!ret_val) - ret_val = e1000e_get_phy_id(hw); - hw->phy.ops.acquire(hw); - - return !ret_val; + return false; } /** diff --git a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index e242104ab471..18ca3bcadf0c 100644 --- a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -6647,11 +6647,6 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) return -EINVAL; } - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { - e_err(drv, "Enable failed, SR-IOV enabled\n"); - return -EINVAL; - } - /* Hardware supports up to 8 traffic classes */ if (tc > adapter->dcb_cfg.num_tcs.pg_tcs || (hw->mac.type == ixgbe_mac_82598EB && diff --git a/trunk/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/trunk/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 41e32257a4e8..f69ec4288b10 100644 --- a/trunk/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/trunk/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -201,9 +201,6 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter, unsigned int i, eop, count = 0; unsigned int total_bytes = 0, total_packets = 0; - if (test_bit(__IXGBEVF_DOWN, &adapter->state)) - return true; - i = tx_ring->next_to_clean; eop = tx_ring->tx_buffer_info[i].next_to_watch; eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); @@ -972,6 +969,8 @@ static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data) r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); for (i = 0; i < q_vector->txr_count; i++) { tx_ring = &(adapter->tx_ring[r_idx]); + tx_ring->total_bytes = 0; + tx_ring->total_packets = 0; ixgbevf_clean_tx_irq(adapter, tx_ring); r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, r_idx + 1); @@ -995,6 +994,16 @@ static irqreturn_t ixgbevf_msix_clean_rx(int irq, void *data) struct ixgbe_hw *hw = &adapter->hw; struct ixgbevf_ring *rx_ring; int r_idx; + int i; + + r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); + for (i = 0; i < q_vector->rxr_count; i++) { + rx_ring = &(adapter->rx_ring[r_idx]); + rx_ring->total_bytes = 0; + rx_ring->total_packets = 0; + r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, + r_idx + 1); + } if (!q_vector->rxr_count) return IRQ_HANDLED; diff --git a/trunk/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/trunk/drivers/net/ethernet/stmicro/stmmac/ring_mode.c index 4b785e10f2ed..fb8377da1687 100644 --- a/trunk/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +++ b/trunk/drivers/net/ethernet/stmicro/stmmac/ring_mode.c @@ -51,7 +51,7 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) desc->des3 = desc->des2 + BUF_SIZE_4KiB; priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum); - wmb(); + entry = (++priv->cur_tx) % txsize; desc = priv->dma_tx + entry; @@ -59,7 +59,6 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) len, DMA_TO_DEVICE); desc->des3 = desc->des2 + BUF_SIZE_4KiB; priv->hw->desc->prepare_tx_desc(desc, 0, len, csum); - wmb(); priv->hw->desc->set_tx_owner(desc); priv->tx_skbuff[entry] = NULL; } else { diff --git a/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index ea3003edde18..51b3b68528ee 100644 --- a/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1212,7 +1212,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion); wmb(); priv->hw->desc->set_tx_owner(desc); - wmb(); } /* Interrupt on completition only for the latest segment */ @@ -1228,7 +1227,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) /* To avoid raise condition */ priv->hw->desc->set_tx_owner(first); - wmb(); priv->cur_tx++; @@ -1292,7 +1290,6 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv) } wmb(); priv->hw->desc->set_rx_owner(p + entry); - wmb(); } } diff --git a/trunk/drivers/net/phy/mdio-mux.c b/trunk/drivers/net/phy/mdio-mux.c index 5c120189ec86..39ea0674dcde 100644 --- a/trunk/drivers/net/phy/mdio-mux.c +++ b/trunk/drivers/net/phy/mdio-mux.c @@ -46,13 +46,7 @@ static int mdio_mux_read(struct mii_bus *bus, int phy_id, int regnum) struct mdio_mux_parent_bus *pb = cb->parent; int r; - /* In theory multiple mdio_mux could be stacked, thus creating - * more than a single level of nesting. But in practice, - * SINGLE_DEPTH_NESTING will cover the vast majority of use - * cases. We use it, instead of trying to handle the general - * case. - */ - mutex_lock_nested(&pb->mii_bus->mdio_lock, SINGLE_DEPTH_NESTING); + mutex_lock(&pb->mii_bus->mdio_lock); r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data); if (r) goto out; @@ -77,7 +71,7 @@ static int mdio_mux_write(struct mii_bus *bus, int phy_id, int r; - mutex_lock_nested(&pb->mii_bus->mdio_lock, SINGLE_DEPTH_NESTING); + mutex_lock(&pb->mii_bus->mdio_lock); r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data); if (r) goto out; diff --git a/trunk/drivers/net/usb/qmi_wwan.c b/trunk/drivers/net/usb/qmi_wwan.c index a051cedd64bd..b01960fcfbc9 100644 --- a/trunk/drivers/net/usb/qmi_wwan.c +++ b/trunk/drivers/net/usb/qmi_wwan.c @@ -346,15 +346,6 @@ static const struct driver_info qmi_wwan_force_int1 = { .data = BIT(1), /* interface whitelist bitmap */ }; -static const struct driver_info qmi_wwan_force_int2 = { - .description = "Qualcomm WWAN/QMI device", - .flags = FLAG_WWAN, - .bind = qmi_wwan_bind_shared, - .unbind = qmi_wwan_unbind_shared, - .manage_power = qmi_wwan_manage_power, - .data = BIT(2), /* interface whitelist bitmap */ -}; - static const struct driver_info qmi_wwan_force_int3 = { .description = "Qualcomm WWAN/QMI device", .flags = FLAG_WWAN, @@ -507,15 +498,6 @@ static const struct usb_device_id products[] = { .bInterfaceProtocol = 0xff, .driver_info = (unsigned long)&qmi_wwan_force_int4, }, - { /* ZTE MF60 */ - .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = 0x19d2, - .idProduct = 0x1402, - .bInterfaceClass = 0xff, - .bInterfaceSubClass = 0xff, - .bInterfaceProtocol = 0xff, - .driver_info = (unsigned long)&qmi_wwan_force_int2, - }, { /* Sierra Wireless MC77xx in QMI mode */ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x1199, diff --git a/trunk/drivers/net/wireless/b43legacy/dma.c b/trunk/drivers/net/wireless/b43legacy/dma.c index c8baf020c20f..f1f8bd09bd87 100644 --- a/trunk/drivers/net/wireless/b43legacy/dma.c +++ b/trunk/drivers/net/wireless/b43legacy/dma.c @@ -1072,7 +1072,7 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring, meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); /* create a bounce buffer in zone_dma on mapping failure. */ if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { - bounce_skb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); + bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); if (!bounce_skb) { ring->current_slot = old_top_slot; ring->used_slots = old_used_slots; diff --git a/trunk/drivers/net/wireless/iwlegacy/4965-mac.c b/trunk/drivers/net/wireless/iwlegacy/4965-mac.c index ff5d689e13f3..509301a5e7e2 100644 --- a/trunk/drivers/net/wireless/iwlegacy/4965-mac.c +++ b/trunk/drivers/net/wireless/iwlegacy/4965-mac.c @@ -3405,7 +3405,7 @@ il4965_remove_dynamic_key(struct il_priv *il, return 0; } - if (il->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_INVALID) { + if (il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) { IL_WARN("Removing wrong key %d 0x%x\n", keyconf->keyidx, key_flags); spin_unlock_irqrestore(&il->sta_lock, flags); @@ -3420,7 +3420,7 @@ il4965_remove_dynamic_key(struct il_priv *il, memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo)); il->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID; - il->stations[sta_id].sta.key.key_offset = keyconf->hw_key_idx; + il->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET; il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; diff --git a/trunk/drivers/net/wireless/iwlegacy/common.c b/trunk/drivers/net/wireless/iwlegacy/common.c index 5d4807c2b56d..cbf2dc18341f 100644 --- a/trunk/drivers/net/wireless/iwlegacy/common.c +++ b/trunk/drivers/net/wireless/iwlegacy/common.c @@ -4767,12 +4767,14 @@ il_bg_watchdog(unsigned long data) return; /* monitor and check for other stuck queues */ - for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) { - /* skip as we already checked the command queue */ - if (cnt == il->cmd_queue) - continue; - if (il_check_stuck_queue(il, cnt)) - return; + if (il_is_any_associated(il)) { + for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) { + /* skip as we already checked the command queue */ + if (cnt == il->cmd_queue) + continue; + if (il_check_stuck_queue(il, cnt)) + return; + } } mod_timer(&il->watchdog, diff --git a/trunk/drivers/net/wireless/mwifiex/cfg80211.c b/trunk/drivers/net/wireless/mwifiex/cfg80211.c index 5c7fd185373c..ce61b6fae1c9 100644 --- a/trunk/drivers/net/wireless/mwifiex/cfg80211.c +++ b/trunk/drivers/net/wireless/mwifiex/cfg80211.c @@ -958,7 +958,6 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy, case NL80211_HIDDEN_SSID_ZERO_CONTENTS: /* firmware doesn't support this type of hidden SSID */ default: - kfree(bss_cfg); return -EINVAL; } diff --git a/trunk/drivers/net/wireless/rt2x00/rt2x00usb.c b/trunk/drivers/net/wireless/rt2x00/rt2x00usb.c index 74ecc33fdd90..d357d1ed92f6 100644 --- a/trunk/drivers/net/wireless/rt2x00/rt2x00usb.c +++ b/trunk/drivers/net/wireless/rt2x00/rt2x00usb.c @@ -436,8 +436,8 @@ void rt2x00usb_kick_queue(struct data_queue *queue) case QID_RX: if (!rt2x00queue_full(queue)) rt2x00queue_for_each_entry(queue, - Q_INDEX, Q_INDEX_DONE, + Q_INDEX, NULL, rt2x00usb_kick_rx_entry); break; diff --git a/trunk/drivers/oprofile/oprofile_perf.c b/trunk/drivers/oprofile/oprofile_perf.c index f3cfa0b9adfa..efc4b7f308cf 100644 --- a/trunk/drivers/oprofile/oprofile_perf.c +++ b/trunk/drivers/oprofile/oprofile_perf.c @@ -1,6 +1,5 @@ /* * Copyright 2010 ARM Ltd. - * Copyright 2012 Advanced Micro Devices, Inc., Robert Richter * * Perf-events backend for OProfile. */ @@ -26,7 +25,7 @@ static int oprofile_perf_enabled; static DEFINE_MUTEX(oprofile_perf_mutex); static struct op_counter_config *counter_config; -static DEFINE_PER_CPU(struct perf_event **, perf_events); +static struct perf_event **perf_events[NR_CPUS]; static int num_counters; /* @@ -39,7 +38,7 @@ static void op_overflow_handler(struct perf_event *event, u32 cpu = smp_processor_id(); for (id = 0; id < num_counters; ++id) - if (per_cpu(perf_events, cpu)[id] == event) + if (perf_events[cpu][id] == event) break; if (id != num_counters) @@ -75,7 +74,7 @@ static int op_create_counter(int cpu, int event) { struct perf_event *pevent; - if (!counter_config[event].enabled || per_cpu(perf_events, cpu)[event]) + if (!counter_config[event].enabled || perf_events[cpu][event]) return 0; pevent = perf_event_create_kernel_counter(&counter_config[event].attr, @@ -92,18 +91,18 @@ static int op_create_counter(int cpu, int event) return -EBUSY; } - per_cpu(perf_events, cpu)[event] = pevent; + perf_events[cpu][event] = pevent; return 0; } static void op_destroy_counter(int cpu, int event) { - struct perf_event *pevent = per_cpu(perf_events, cpu)[event]; + struct perf_event *pevent = perf_events[cpu][event]; if (pevent) { perf_event_release_kernel(pevent); - per_cpu(perf_events, cpu)[event] = NULL; + perf_events[cpu][event] = NULL; } } @@ -258,12 +257,12 @@ void oprofile_perf_exit(void) for_each_possible_cpu(cpu) { for (id = 0; id < num_counters; ++id) { - event = per_cpu(perf_events, cpu)[id]; + event = perf_events[cpu][id]; if (event) perf_event_release_kernel(event); } - kfree(per_cpu(perf_events, cpu)); + kfree(perf_events[cpu]); } kfree(counter_config); @@ -278,6 +277,8 @@ int __init oprofile_perf_init(struct oprofile_operations *ops) if (ret) return ret; + memset(&perf_events, 0, sizeof(perf_events)); + num_counters = perf_num_counters(); if (num_counters <= 0) { pr_info("oprofile: no performance counters\n"); @@ -297,9 +298,9 @@ int __init oprofile_perf_init(struct oprofile_operations *ops) } for_each_possible_cpu(cpu) { - per_cpu(perf_events, cpu) = kcalloc(num_counters, + perf_events[cpu] = kcalloc(num_counters, sizeof(struct perf_event *), GFP_KERNEL); - if (!per_cpu(perf_events, cpu)) { + if (!perf_events[cpu]) { pr_info("oprofile: failed to allocate %d perf events " "for cpu %d\n", num_counters, cpu); ret = -ENOMEM; diff --git a/trunk/drivers/pinctrl/pinctrl-imx.c b/trunk/drivers/pinctrl/pinctrl-imx.c index 90c837f469a6..dd6d93aa5334 100644 --- a/trunk/drivers/pinctrl/pinctrl-imx.c +++ b/trunk/drivers/pinctrl/pinctrl-imx.c @@ -474,9 +474,7 @@ static int __devinit imx_pinctrl_parse_groups(struct device_node *np, grp->configs[j] = config & ~IMX_PAD_SION; } -#ifdef DEBUG IMX_PMX_DUMP(info, grp->pins, grp->mux_mode, grp->configs, grp->npins); -#endif return 0; } diff --git a/trunk/drivers/pinctrl/pinctrl-imx6q.c b/trunk/drivers/pinctrl/pinctrl-imx6q.c index e9bf71fbedca..7737d4d71a3c 100644 --- a/trunk/drivers/pinctrl/pinctrl-imx6q.c +++ b/trunk/drivers/pinctrl/pinctrl-imx6q.c @@ -1950,8 +1950,6 @@ static struct imx_pin_reg imx6q_pin_regs[] = { IMX_PIN_REG(MX6Q_PAD_SD2_DAT3, 0x0744, 0x035C, 5, 0x0000, 0), /* MX6Q_PAD_SD2_DAT3__GPIO_1_12 */ IMX_PIN_REG(MX6Q_PAD_SD2_DAT3, 0x0744, 0x035C, 6, 0x0000, 0), /* MX6Q_PAD_SD2_DAT3__SJC_DONE */ IMX_PIN_REG(MX6Q_PAD_SD2_DAT3, 0x0744, 0x035C, 7, 0x0000, 0), /* MX6Q_PAD_SD2_DAT3__ANATOP_TESTO_3 */ - IMX_PIN_REG(MX6Q_PAD_ENET_RX_ER, 0x04EC, 0x01D8, 0, 0x0000, 0), /* MX6Q_PAD_ENET_RX_ER__ANATOP_USBOTG_ID */ - IMX_PIN_REG(MX6Q_PAD_GPIO_1, 0x05F4, 0x0224, 3, 0x0000, 0), /* MX6Q_PAD_GPIO_1__ANATOP_USBOTG_ID */ }; /* Pad names for the pinmux subsystem */ diff --git a/trunk/drivers/platform/x86/acer-wmi.c b/trunk/drivers/platform/x86/acer-wmi.c index c8f40c9c0428..ce875dc365e5 100644 --- a/trunk/drivers/platform/x86/acer-wmi.c +++ b/trunk/drivers/platform/x86/acer-wmi.c @@ -1877,7 +1877,8 @@ static int acer_platform_remove(struct platform_device *device) return 0; } -static int acer_suspend(struct device *dev) +static int acer_platform_suspend(struct platform_device *dev, +pm_message_t state) { u32 value; struct acer_data *data = &interface->data; @@ -1899,7 +1900,7 @@ static int acer_suspend(struct device *dev) return 0; } -static int acer_resume(struct device *dev) +static int acer_platform_resume(struct platform_device *device) { struct acer_data *data = &interface->data; @@ -1915,8 +1916,6 @@ static int acer_resume(struct device *dev) return 0; } -static SIMPLE_DEV_PM_OPS(acer_pm, acer_suspend, acer_resume); - static void acer_platform_shutdown(struct platform_device *device) { struct acer_data *data = &interface->data; @@ -1932,10 +1931,11 @@ static struct platform_driver acer_platform_driver = { .driver = { .name = "acer-wmi", .owner = THIS_MODULE, - .pm = &acer_pm, }, .probe = acer_platform_probe, .remove = acer_platform_remove, + .suspend = acer_platform_suspend, + .resume = acer_platform_resume, .shutdown = acer_platform_shutdown, }; diff --git a/trunk/drivers/platform/x86/classmate-laptop.c b/trunk/drivers/platform/x86/classmate-laptop.c index e2230a2b2f8e..94f93b621d7b 100644 --- a/trunk/drivers/platform/x86/classmate-laptop.c +++ b/trunk/drivers/platform/x86/classmate-laptop.c @@ -362,18 +362,15 @@ static int cmpc_tablet_remove(struct acpi_device *acpi, int type) return cmpc_remove_acpi_notify_device(acpi); } -static int cmpc_tablet_resume(struct device *dev) +static int cmpc_tablet_resume(struct acpi_device *acpi) { - struct input_dev *inputdev = dev_get_drvdata(dev); - + struct input_dev *inputdev = dev_get_drvdata(&acpi->dev); unsigned long long val = 0; - if (ACPI_SUCCESS(cmpc_get_tablet(to_acpi_device(dev)->handle, &val))) + if (ACPI_SUCCESS(cmpc_get_tablet(acpi->handle, &val))) input_report_switch(inputdev, SW_TABLET_MODE, !val); return 0; } -static SIMPLE_DEV_PM_OPS(cmpc_tablet_pm, NULL, cmpc_tablet_resume); - static const struct acpi_device_id cmpc_tablet_device_ids[] = { {CMPC_TABLET_HID, 0}, {"", 0} @@ -387,9 +384,9 @@ static struct acpi_driver cmpc_tablet_acpi_driver = { .ops = { .add = cmpc_tablet_add, .remove = cmpc_tablet_remove, + .resume = cmpc_tablet_resume, .notify = cmpc_tablet_handler, - }, - .drv.pm = &cmpc_tablet_pm, + } }; diff --git a/trunk/drivers/platform/x86/fujitsu-tablet.c b/trunk/drivers/platform/x86/fujitsu-tablet.c index d2e41735a47b..da267eae8ba8 100644 --- a/trunk/drivers/platform/x86/fujitsu-tablet.c +++ b/trunk/drivers/platform/x86/fujitsu-tablet.c @@ -440,14 +440,12 @@ static int __devexit acpi_fujitsu_remove(struct acpi_device *adev, int type) return 0; } -static int acpi_fujitsu_resume(struct device *dev) +static int acpi_fujitsu_resume(struct acpi_device *adev) { fujitsu_reset(); return 0; } -static SIMPLE_DEV_PM_OPS(acpi_fujitsu_pm, NULL, acpi_fujitsu_resume); - static struct acpi_driver acpi_fujitsu_driver = { .name = MODULENAME, .class = "hotkey", @@ -455,8 +453,8 @@ static struct acpi_driver acpi_fujitsu_driver = { .ops = { .add = acpi_fujitsu_add, .remove = acpi_fujitsu_remove, - }, - .drv.pm = &acpi_fujitsu_pm, + .resume = acpi_fujitsu_resume, + } }; static int __init fujitsu_module_init(void) diff --git a/trunk/drivers/platform/x86/hdaps.c b/trunk/drivers/platform/x86/hdaps.c index d9ab6f64dcec..24a3ae065f1b 100644 --- a/trunk/drivers/platform/x86/hdaps.c +++ b/trunk/drivers/platform/x86/hdaps.c @@ -305,19 +305,17 @@ static int hdaps_probe(struct platform_device *dev) return 0; } -static int hdaps_resume(struct device *dev) +static int hdaps_resume(struct platform_device *dev) { return hdaps_device_init(); } -static SIMPLE_DEV_PM_OPS(hdaps_pm, NULL, hdaps_resume); - static struct platform_driver hdaps_driver = { .probe = hdaps_probe, + .resume = hdaps_resume, .driver = { .name = "hdaps", .owner = THIS_MODULE, - .pm = &hdaps_pm, }, }; diff --git a/trunk/drivers/platform/x86/hp_accel.c b/trunk/drivers/platform/x86/hp_accel.c index f4d91154ad67..22b2dfa73148 100644 --- a/trunk/drivers/platform/x86/hp_accel.c +++ b/trunk/drivers/platform/x86/hp_accel.c @@ -353,22 +353,20 @@ static int lis3lv02d_remove(struct acpi_device *device, int type) #ifdef CONFIG_PM -static int lis3lv02d_suspend(struct device *dev) +static int lis3lv02d_suspend(struct acpi_device *device, pm_message_t state) { /* make sure the device is off when we suspend */ lis3lv02d_poweroff(&lis3_dev); return 0; } -static int lis3lv02d_resume(struct device *dev) +static int lis3lv02d_resume(struct acpi_device *device) { return lis3lv02d_poweron(&lis3_dev); } - -static SIMPLE_DEV_PM_OPS(hp_accel_pm, lis3lv02d_suspend, lis3lv02d_resume); -#define HP_ACCEL_PM (&hp_accel_pm) #else -#define HP_ACCEL_PM NULL +#define lis3lv02d_suspend NULL +#define lis3lv02d_resume NULL #endif /* For the HP MDPS aka 3D Driveguard */ @@ -379,8 +377,9 @@ static struct acpi_driver lis3lv02d_driver = { .ops = { .add = lis3lv02d_add, .remove = lis3lv02d_remove, - }, - .drv.pm = HP_ACCEL_PM, + .suspend = lis3lv02d_suspend, + .resume = lis3lv02d_resume, + } }; static int __init lis3lv02d_init_module(void) diff --git a/trunk/drivers/platform/x86/ideapad-laptop.c b/trunk/drivers/platform/x86/ideapad-laptop.c index 17f6dfd8dbfb..4f20f8dd3d7c 100644 --- a/trunk/drivers/platform/x86/ideapad-laptop.c +++ b/trunk/drivers/platform/x86/ideapad-laptop.c @@ -694,10 +694,10 @@ MODULE_DEVICE_TABLE(acpi, ideapad_device_ids); static int __devinit ideapad_acpi_add(struct acpi_device *adevice) { int ret, i; - int cfg; + unsigned long cfg; struct ideapad_private *priv; - if (read_method_int(adevice->handle, "_CFG", &cfg)) + if (read_method_int(adevice->handle, "_CFG", (int *)&cfg)) return -ENODEV; priv = kzalloc(sizeof(*priv), GFP_KERNEL); @@ -721,7 +721,7 @@ static int __devinit ideapad_acpi_add(struct acpi_device *adevice) goto input_failed; for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++) { - if (test_bit(ideapad_rfk_data[i].cfgbit, &priv->cfg)) + if (test_bit(ideapad_rfk_data[i].cfgbit, &cfg)) ideapad_register_rfkill(adevice, i); else priv->rfk[i] = NULL; diff --git a/trunk/drivers/platform/x86/intel_ips.c b/trunk/drivers/platform/x86/intel_ips.c index 5051aa970e0a..0ffdb3cde2bb 100644 --- a/trunk/drivers/platform/x86/intel_ips.c +++ b/trunk/drivers/platform/x86/intel_ips.c @@ -72,7 +72,6 @@ #include #include #include -#include #include #include #include @@ -1486,24 +1485,6 @@ static DEFINE_PCI_DEVICE_TABLE(ips_id_table) = { MODULE_DEVICE_TABLE(pci, ips_id_table); -static int ips_blacklist_callback(const struct dmi_system_id *id) -{ - pr_info("Blacklisted intel_ips for %s\n", id->ident); - return 1; -} - -static const struct dmi_system_id ips_blacklist[] = { - { - .callback = ips_blacklist_callback, - .ident = "HP ProBook", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook"), - }, - }, - { } /* terminating entry */ -}; - static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id) { u64 platform_info; @@ -1513,9 +1494,6 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id) u16 htshi, trc, trc_required_mask; u8 tse; - if (dmi_check_system(ips_blacklist)) - return -ENODEV; - ips = kzalloc(sizeof(struct ips_driver), GFP_KERNEL); if (!ips) return -ENOMEM; @@ -1719,6 +1697,21 @@ static void ips_remove(struct pci_dev *dev) dev_dbg(&dev->dev, "IPS driver removed\n"); } +#ifdef CONFIG_PM +static int ips_suspend(struct pci_dev *dev, pm_message_t state) +{ + return 0; +} + +static int ips_resume(struct pci_dev *dev) +{ + return 0; +} +#else +#define ips_suspend NULL +#define ips_resume NULL +#endif /* CONFIG_PM */ + static void ips_shutdown(struct pci_dev *dev) { } @@ -1728,6 +1721,8 @@ static struct pci_driver ips_pci_driver = { .id_table = ips_id_table, .probe = ips_probe, .remove = ips_remove, + .suspend = ips_suspend, + .resume = ips_resume, .shutdown = ips_shutdown, }; diff --git a/trunk/drivers/platform/x86/intel_mid_thermal.c b/trunk/drivers/platform/x86/intel_mid_thermal.c index ea7422f6fa03..5ae9cd9c7e6e 100644 --- a/trunk/drivers/platform/x86/intel_mid_thermal.c +++ b/trunk/drivers/platform/x86/intel_mid_thermal.c @@ -418,23 +418,23 @@ static struct thermal_device_info *initialize_sensor(int index) /** * mid_thermal_resume - resume routine - * @dev: device structure + * @pdev: platform device structure * * mid thermal resume: re-initializes the adc. Can sleep. */ -static int mid_thermal_resume(struct device *dev) +static int mid_thermal_resume(struct platform_device *pdev) { - return mid_initialize_adc(dev); + return mid_initialize_adc(&pdev->dev); } /** * mid_thermal_suspend - suspend routine - * @dev: device structure + * @pdev: platform device structure * * mid thermal suspend implements the suspend functionality * by stopping the ADC. Can sleep. */ -static int mid_thermal_suspend(struct device *dev) +static int mid_thermal_suspend(struct platform_device *pdev, pm_message_t mesg) { /* * This just stops the ADC and does not disable it. @@ -444,9 +444,6 @@ static int mid_thermal_suspend(struct device *dev) return configure_adc(0); } -static SIMPLE_DEV_PM_OPS(mid_thermal_pm, - mid_thermal_suspend, mid_thermal_resume); - /** * read_curr_temp - reads the current temperature and stores in temp * @temp: holds the current temperature value after reading @@ -560,9 +557,10 @@ static struct platform_driver mid_thermal_driver = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, - .pm = &mid_thermal_pm, }, .probe = mid_thermal_probe, + .suspend = mid_thermal_suspend, + .resume = mid_thermal_resume, .remove = __devexit_p(mid_thermal_remove), .id_table = therm_id_table, }; diff --git a/trunk/drivers/platform/x86/msi-laptop.c b/trunk/drivers/platform/x86/msi-laptop.c index f64441844317..bb5132128b33 100644 --- a/trunk/drivers/platform/x86/msi-laptop.c +++ b/trunk/drivers/platform/x86/msi-laptop.c @@ -85,8 +85,7 @@ #define MSI_STANDARD_EC_TOUCHPAD_ADDRESS 0xe4 #define MSI_STANDARD_EC_TOUCHPAD_MASK (1 << 4) -static int msi_laptop_resume(struct device *device); -static SIMPLE_DEV_PM_OPS(msi_laptop_pm, NULL, msi_laptop_resume); +static int msi_laptop_resume(struct platform_device *device); #define MSI_STANDARD_EC_DEVICES_EXISTS_ADDRESS 0x2f @@ -438,8 +437,8 @@ static struct platform_driver msipf_driver = { .driver = { .name = "msi-laptop-pf", .owner = THIS_MODULE, - .pm = &msi_laptop_pm, }, + .resume = msi_laptop_resume, }; static struct platform_device *msipf_device; @@ -753,7 +752,7 @@ static int rfkill_init(struct platform_device *sdev) return retval; } -static int msi_laptop_resume(struct device *device) +static int msi_laptop_resume(struct platform_device *device) { u8 data; int result; diff --git a/trunk/drivers/platform/x86/panasonic-laptop.c b/trunk/drivers/platform/x86/panasonic-laptop.c index 24480074bcf0..ffff8b4b4949 100644 --- a/trunk/drivers/platform/x86/panasonic-laptop.c +++ b/trunk/drivers/platform/x86/panasonic-laptop.c @@ -177,6 +177,7 @@ enum SINF_BITS { SINF_NUM_BATTERIES = 0, static int acpi_pcc_hotkey_add(struct acpi_device *device); static int acpi_pcc_hotkey_remove(struct acpi_device *device, int type); +static int acpi_pcc_hotkey_resume(struct acpi_device *device); static void acpi_pcc_hotkey_notify(struct acpi_device *device, u32 event); static const struct acpi_device_id pcc_device_ids[] = { @@ -188,9 +189,6 @@ static const struct acpi_device_id pcc_device_ids[] = { }; MODULE_DEVICE_TABLE(acpi, pcc_device_ids); -static int acpi_pcc_hotkey_resume(struct device *dev); -static SIMPLE_DEV_PM_OPS(acpi_pcc_hotkey_pm, NULL, acpi_pcc_hotkey_resume); - static struct acpi_driver acpi_pcc_driver = { .name = ACPI_PCC_DRIVER_NAME, .class = ACPI_PCC_CLASS, @@ -198,9 +196,9 @@ static struct acpi_driver acpi_pcc_driver = { .ops = { .add = acpi_pcc_hotkey_add, .remove = acpi_pcc_hotkey_remove, + .resume = acpi_pcc_hotkey_resume, .notify = acpi_pcc_hotkey_notify, }, - .drv.pm = &acpi_pcc_hotkey_pm, }; static const struct key_entry panasonic_keymap[] = { @@ -540,15 +538,11 @@ static void acpi_pcc_destroy_input(struct pcc_acpi *pcc) /* kernel module interface */ -static int acpi_pcc_hotkey_resume(struct device *dev) +static int acpi_pcc_hotkey_resume(struct acpi_device *device) { - struct pcc_acpi *pcc; - - if (!dev) - return -EINVAL; + struct pcc_acpi *pcc = acpi_driver_data(device); - pcc = acpi_driver_data(to_acpi_device(dev)); - if (!pcc) + if (device == NULL || pcc == NULL) return -EINVAL; ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Sticky mode restore: %d\n", diff --git a/trunk/drivers/platform/x86/sony-laptop.c b/trunk/drivers/platform/x86/sony-laptop.c index 9363969ad07a..210d4ae547c2 100644 --- a/trunk/drivers/platform/x86/sony-laptop.c +++ b/trunk/drivers/platform/x86/sony-laptop.c @@ -973,7 +973,7 @@ static ssize_t sony_nc_sysfs_store(struct device *dev, struct device_attribute *attr, const char *buffer, size_t count) { - int value; + unsigned long value = 0; int ret = 0; struct sony_nc_value *item = container_of(attr, struct sony_nc_value, devattr); @@ -984,7 +984,7 @@ static ssize_t sony_nc_sysfs_store(struct device *dev, if (count > 31) return -EINVAL; - if (kstrtoint(buffer, 10, &value)) + if (kstrtoul(buffer, 10, &value)) return -EINVAL; if (item->validate) @@ -994,7 +994,7 @@ static ssize_t sony_nc_sysfs_store(struct device *dev, return value; ret = sony_nc_int_call(sony_nc_acpi_handle, *item->acpiset, - &value, NULL); + (int *)&value, NULL); if (ret < 0) return -EIO; @@ -1010,7 +1010,6 @@ static ssize_t sony_nc_sysfs_store(struct device *dev, struct sony_backlight_props { struct backlight_device *dev; int handle; - int cmd_base; u8 offset; u8 maxlvl; }; @@ -1038,7 +1037,7 @@ static int sony_nc_get_brightness_ng(struct backlight_device *bd) struct sony_backlight_props *sdev = (struct sony_backlight_props *)bl_get_data(bd); - sony_call_snc_handle(sdev->handle, sdev->cmd_base + 0x100, &result); + sony_call_snc_handle(sdev->handle, 0x0200, &result); return (result & 0xff) - sdev->offset; } @@ -1050,8 +1049,7 @@ static int sony_nc_update_status_ng(struct backlight_device *bd) (struct sony_backlight_props *)bl_get_data(bd); value = bd->props.brightness + sdev->offset; - if (sony_call_snc_handle(sdev->handle, sdev->cmd_base | (value << 0x10), - &result)) + if (sony_call_snc_handle(sdev->handle, 0x0100 | (value << 16), &result)) return -EIO; return value; @@ -1174,11 +1172,6 @@ static int sony_nc_hotkeys_decode(u32 event, unsigned int handle) /* * ACPI callbacks */ -enum event_types { - HOTKEY = 1, - KILLSWITCH, - GFX_SWITCH -}; static void sony_nc_notify(struct acpi_device *device, u32 event) { u32 real_ev = event; @@ -1203,7 +1196,7 @@ static void sony_nc_notify(struct acpi_device *device, u32 event) /* hotkey event */ case 0x0100: case 0x0127: - ev_type = HOTKEY; + ev_type = 1; real_ev = sony_nc_hotkeys_decode(event, handle); if (real_ev > 0) @@ -1223,7 +1216,7 @@ static void sony_nc_notify(struct acpi_device *device, u32 event) * update the rfkill device status when the * switch is moved. */ - ev_type = KILLSWITCH; + ev_type = 2; sony_call_snc_handle(handle, 0x0100, &result); real_ev = result & 0x03; @@ -1233,24 +1226,6 @@ static void sony_nc_notify(struct acpi_device *device, u32 event) break; - case 0x0128: - case 0x0146: - /* Hybrid GFX switching */ - sony_call_snc_handle(handle, 0x0000, &result); - dprintk("GFX switch event received (reason: %s)\n", - (result & 0x01) ? - "switch change" : "unknown"); - - /* verify the switch state - * 1: discrete GFX - * 0: integrated GFX - */ - sony_call_snc_handle(handle, 0x0100, &result); - - ev_type = GFX_SWITCH; - real_ev = result & 0xff; - break; - default: dprintk("Unknown event 0x%x for handle 0x%x\n", event, handle); @@ -1263,7 +1238,7 @@ static void sony_nc_notify(struct acpi_device *device, u32 event) } else { /* old style event */ - ev_type = HOTKEY; + ev_type = 1; sony_laptop_report_input_event(real_ev); } @@ -1477,7 +1452,7 @@ static void sony_nc_function_resume(void) &result); } -static int sony_nc_resume(struct device *dev) +static int sony_nc_resume(struct acpi_device *device) { struct sony_nc_value *item; acpi_handle handle; @@ -1509,8 +1484,6 @@ static int sony_nc_resume(struct device *dev) return 0; } -static SIMPLE_DEV_PM_OPS(sony_nc_pm, NULL, sony_nc_resume); - static void sony_nc_rfkill_cleanup(void) { int i; @@ -1920,33 +1893,32 @@ static ssize_t sony_nc_battery_care_limit_store(struct device *dev, * bits 4,5: store the limit into the EC * bits 6,7: store the limit into the battery */ - cmd = 0; - - if (value > 0) { - if (value <= 50) - cmd = 0x20; - else if (value <= 80) - cmd = 0x10; + /* + * handle 0x0115 should allow storing on battery too; + * handle 0x0136 same as 0x0115 + health status; + * handle 0x013f, same as 0x0136 but no storing on the battery + * + * Store only inside the EC for now, regardless the handle number + */ + if (value == 0) + /* disable limits */ + cmd = 0x0; - else if (value <= 100) - cmd = 0x30; + else if (value <= 50) + cmd = 0x21; - else - return -EINVAL; + else if (value <= 80) + cmd = 0x11; - /* - * handle 0x0115 should allow storing on battery too; - * handle 0x0136 same as 0x0115 + health status; - * handle 0x013f, same as 0x0136 but no storing on the battery - */ - if (bcare_ctl->handle != 0x013f) - cmd = cmd | (cmd << 2); + else if (value <= 100) + cmd = 0x31; - cmd = (cmd | 0x1) << 0x10; - } + else + return -EINVAL; - if (sony_call_snc_handle(bcare_ctl->handle, cmd | 0x0100, &result)) + if (sony_call_snc_handle(bcare_ctl->handle, (cmd << 0x10) | 0x0100, + &result)) return -EIO; return count; @@ -2141,7 +2113,7 @@ static ssize_t sony_nc_thermal_mode_show(struct device *dev, struct device_attribute *attr, char *buffer) { ssize_t count = 0; - int mode = sony_nc_thermal_mode_get(); + unsigned int mode = sony_nc_thermal_mode_get(); if (mode < 0) return mode; @@ -2500,7 +2472,6 @@ static void sony_nc_backlight_ng_read_limits(int handle, { u64 offset; int i; - int lvl_table_len = 0; u8 min = 0xff, max = 0x00; unsigned char buffer[32] = { 0 }; @@ -2509,6 +2480,8 @@ static void sony_nc_backlight_ng_read_limits(int handle, props->maxlvl = 0xff; offset = sony_find_snc_handle(handle); + if (offset < 0) + return; /* try to read the boundaries from ACPI tables, if we fail the above * defaults should be reasonable @@ -2518,21 +2491,11 @@ static void sony_nc_backlight_ng_read_limits(int handle, if (i < 0) return; - switch (handle) { - case 0x012f: - case 0x0137: - lvl_table_len = 9; - break; - case 0x143: - lvl_table_len = 16; - break; - } - /* the buffer lists brightness levels available, brightness levels are * from position 0 to 8 in the array, other values are used by ALS * control. */ - for (i = 0; i < lvl_table_len && i < ARRAY_SIZE(buffer); i++) { + for (i = 0; i < 9 && i < ARRAY_SIZE(buffer); i++) { dprintk("Brightness level: %d\n", buffer[i]); @@ -2557,24 +2520,16 @@ static void sony_nc_backlight_setup(void) const struct backlight_ops *ops = NULL; struct backlight_properties props; - if (sony_find_snc_handle(0x12f) >= 0) { + if (sony_find_snc_handle(0x12f) != -1) { ops = &sony_backlight_ng_ops; - sony_bl_props.cmd_base = 0x0100; sony_nc_backlight_ng_read_limits(0x12f, &sony_bl_props); max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset; - } else if (sony_find_snc_handle(0x137) >= 0) { + } else if (sony_find_snc_handle(0x137) != -1) { ops = &sony_backlight_ng_ops; - sony_bl_props.cmd_base = 0x0100; sony_nc_backlight_ng_read_limits(0x137, &sony_bl_props); max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset; - } else if (sony_find_snc_handle(0x143) >= 0) { - ops = &sony_backlight_ng_ops; - sony_bl_props.cmd_base = 0x3000; - sony_nc_backlight_ng_read_limits(0x143, &sony_bl_props); - max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset; - } else if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "GBRT", &unused))) { ops = &sony_backlight_ops; @@ -2642,12 +2597,6 @@ static int sony_nc_add(struct acpi_device *device) } } - result = sony_laptop_setup_input(device); - if (result) { - pr_err("Unable to create input devices\n"); - goto outplatform; - } - if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON", &handle))) { int arg = 1; @@ -2665,6 +2614,12 @@ static int sony_nc_add(struct acpi_device *device) } /* setup input devices and helper fifo */ + result = sony_laptop_setup_input(device); + if (result) { + pr_err("Unable to create input devices\n"); + goto outsnc; + } + if (acpi_video_backlight_support()) { pr_info("brightness ignored, must be controlled by ACPI video driver\n"); } else { @@ -2712,21 +2667,22 @@ static int sony_nc_add(struct acpi_device *device) return 0; -out_sysfs: + out_sysfs: for (item = sony_nc_values; item->name; ++item) { device_remove_file(&sony_pf_device->dev, &item->devattr); } sony_nc_backlight_cleanup(); - sony_nc_function_cleanup(sony_pf_device); - sony_nc_handles_cleanup(sony_pf_device); -outplatform: sony_laptop_remove_input(); -outpresent: + outsnc: + sony_nc_function_cleanup(sony_pf_device); + sony_nc_handles_cleanup(sony_pf_device); + + outpresent: sony_pf_remove(); -outwalk: + outwalk: sony_nc_rfkill_cleanup(); return result; } @@ -2772,9 +2728,9 @@ static struct acpi_driver sony_nc_driver = { .ops = { .add = sony_nc_add, .remove = sony_nc_remove, + .resume = sony_nc_resume, .notify = sony_nc_notify, }, - .drv.pm = &sony_nc_pm, }; /*********** SPIC (SNY6001) Device ***********/ @@ -4287,22 +4243,19 @@ static int sony_pic_add(struct acpi_device *device) return result; } -static int sony_pic_suspend(struct device *dev) +static int sony_pic_suspend(struct acpi_device *device, pm_message_t state) { - if (sony_pic_disable(to_acpi_device(dev))) + if (sony_pic_disable(device)) return -ENXIO; return 0; } -static int sony_pic_resume(struct device *dev) +static int sony_pic_resume(struct acpi_device *device) { - sony_pic_enable(to_acpi_device(dev), - spic_dev.cur_ioport, spic_dev.cur_irq); + sony_pic_enable(device, spic_dev.cur_ioport, spic_dev.cur_irq); return 0; } -static SIMPLE_DEV_PM_OPS(sony_pic_pm, sony_pic_suspend, sony_pic_resume); - static const struct acpi_device_id sony_pic_device_ids[] = { {SONY_PIC_HID, 0}, {"", 0}, @@ -4316,8 +4269,9 @@ static struct acpi_driver sony_pic_driver = { .ops = { .add = sony_pic_add, .remove = sony_pic_remove, + .suspend = sony_pic_suspend, + .resume = sony_pic_resume, }, - .drv.pm = &sony_pic_pm, }; static struct dmi_system_id __initdata sonypi_dmi_table[] = { diff --git a/trunk/drivers/platform/x86/thinkpad_acpi.c b/trunk/drivers/platform/x86/thinkpad_acpi.c index d5fd4a1193f8..8b5610d88418 100644 --- a/trunk/drivers/platform/x86/thinkpad_acpi.c +++ b/trunk/drivers/platform/x86/thinkpad_acpi.c @@ -277,7 +277,7 @@ struct ibm_struct { int (*write) (char *); void (*exit) (void); void (*resume) (void); - void (*suspend) (void); + void (*suspend) (pm_message_t state); void (*shutdown) (void); struct list_head all_drivers; @@ -922,7 +922,8 @@ static struct input_dev *tpacpi_inputdev; static struct mutex tpacpi_inputdev_send_mutex; static LIST_HEAD(tpacpi_all_drivers); -static int tpacpi_suspend_handler(struct device *dev) +static int tpacpi_suspend_handler(struct platform_device *pdev, + pm_message_t state) { struct ibm_struct *ibm, *itmp; @@ -930,13 +931,13 @@ static int tpacpi_suspend_handler(struct device *dev) &tpacpi_all_drivers, all_drivers) { if (ibm->suspend) - (ibm->suspend)(); + (ibm->suspend)(state); } return 0; } -static int tpacpi_resume_handler(struct device *dev) +static int tpacpi_resume_handler(struct platform_device *pdev) { struct ibm_struct *ibm, *itmp; @@ -950,9 +951,6 @@ static int tpacpi_resume_handler(struct device *dev) return 0; } -static SIMPLE_DEV_PM_OPS(tpacpi_pm, - tpacpi_suspend_handler, tpacpi_resume_handler); - static void tpacpi_shutdown_handler(struct platform_device *pdev) { struct ibm_struct *ibm, *itmp; @@ -969,8 +967,9 @@ static struct platform_driver tpacpi_pdriver = { .driver = { .name = TPACPI_DRVR_NAME, .owner = THIS_MODULE, - .pm = &tpacpi_pm, }, + .suspend = tpacpi_suspend_handler, + .resume = tpacpi_resume_handler, .shutdown = tpacpi_shutdown_handler, }; @@ -3759,7 +3758,7 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event) } } -static void hotkey_suspend(void) +static void hotkey_suspend(pm_message_t state) { /* Do these on suspend, we get the events on early resume! */ hotkey_wakeup_reason = TP_ACPI_WAKEUP_NONE; @@ -6330,7 +6329,7 @@ static int __init brightness_init(struct ibm_init_struct *iibm) return 0; } -static void brightness_suspend(void) +static void brightness_suspend(pm_message_t state) { tpacpi_brightness_checkpoint_nvram(); } @@ -6749,7 +6748,7 @@ static struct snd_kcontrol_new volume_alsa_control_mute __devinitdata = { .get = volume_alsa_mute_get, }; -static void volume_suspend(void) +static void volume_suspend(pm_message_t state) { tpacpi_volume_checkpoint_nvram(); } @@ -8108,7 +8107,7 @@ static void fan_exit(void) flush_workqueue(tpacpi_wq); } -static void fan_suspend(void) +static void fan_suspend(pm_message_t state) { int rc; diff --git a/trunk/drivers/platform/x86/toshiba_acpi.c b/trunk/drivers/platform/x86/toshiba_acpi.c index c13ba5bac93f..dab10f6edcd4 100644 --- a/trunk/drivers/platform/x86/toshiba_acpi.c +++ b/trunk/drivers/platform/x86/toshiba_acpi.c @@ -1296,9 +1296,10 @@ static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event) } } -static int toshiba_acpi_suspend(struct device *device) +static int toshiba_acpi_suspend(struct acpi_device *acpi_dev, + pm_message_t state) { - struct toshiba_acpi_dev *dev = acpi_driver_data(to_acpi_device(device)); + struct toshiba_acpi_dev *dev = acpi_driver_data(acpi_dev); u32 result; if (dev->hotkey_dev) @@ -1307,9 +1308,9 @@ static int toshiba_acpi_suspend(struct device *device) return 0; } -static int toshiba_acpi_resume(struct device *device) +static int toshiba_acpi_resume(struct acpi_device *acpi_dev) { - struct toshiba_acpi_dev *dev = acpi_driver_data(to_acpi_device(device)); + struct toshiba_acpi_dev *dev = acpi_driver_data(acpi_dev); u32 result; if (dev->hotkey_dev) @@ -1318,9 +1319,6 @@ static int toshiba_acpi_resume(struct device *device) return 0; } -static SIMPLE_DEV_PM_OPS(toshiba_acpi_pm, - toshiba_acpi_suspend, toshiba_acpi_resume); - static struct acpi_driver toshiba_acpi_driver = { .name = "Toshiba ACPI driver", .owner = THIS_MODULE, @@ -1330,8 +1328,9 @@ static struct acpi_driver toshiba_acpi_driver = { .add = toshiba_acpi_add, .remove = toshiba_acpi_remove, .notify = toshiba_acpi_notify, + .suspend = toshiba_acpi_suspend, + .resume = toshiba_acpi_resume, }, - .drv.pm = &toshiba_acpi_pm, }; static int __init toshiba_acpi_init(void) diff --git a/trunk/drivers/platform/x86/toshiba_bluetooth.c b/trunk/drivers/platform/x86/toshiba_bluetooth.c index 715a43cb5e3c..5fb7186694df 100644 --- a/trunk/drivers/platform/x86/toshiba_bluetooth.c +++ b/trunk/drivers/platform/x86/toshiba_bluetooth.c @@ -34,6 +34,7 @@ MODULE_LICENSE("GPL"); static int toshiba_bt_rfkill_add(struct acpi_device *device); static int toshiba_bt_rfkill_remove(struct acpi_device *device, int type); static void toshiba_bt_rfkill_notify(struct acpi_device *device, u32 event); +static int toshiba_bt_resume(struct acpi_device *device); static const struct acpi_device_id bt_device_ids[] = { { "TOS6205", 0}, @@ -41,9 +42,6 @@ static const struct acpi_device_id bt_device_ids[] = { }; MODULE_DEVICE_TABLE(acpi, bt_device_ids); -static int toshiba_bt_resume(struct device *dev); -static SIMPLE_DEV_PM_OPS(toshiba_bt_pm, NULL, toshiba_bt_resume); - static struct acpi_driver toshiba_bt_rfkill_driver = { .name = "Toshiba BT", .class = "Toshiba", @@ -52,9 +50,9 @@ static struct acpi_driver toshiba_bt_rfkill_driver = { .add = toshiba_bt_rfkill_add, .remove = toshiba_bt_rfkill_remove, .notify = toshiba_bt_rfkill_notify, + .resume = toshiba_bt_resume, }, .owner = THIS_MODULE, - .drv.pm = &toshiba_bt_pm, }; @@ -90,9 +88,9 @@ static void toshiba_bt_rfkill_notify(struct acpi_device *device, u32 event) toshiba_bluetooth_enable(device->handle); } -static int toshiba_bt_resume(struct device *dev) +static int toshiba_bt_resume(struct acpi_device *device) { - return toshiba_bluetooth_enable(to_acpi_device(dev)->handle); + return toshiba_bluetooth_enable(device->handle); } static int toshiba_bt_rfkill_add(struct acpi_device *device) diff --git a/trunk/drivers/platform/x86/xo15-ebook.c b/trunk/drivers/platform/x86/xo15-ebook.c index 849c07c13bf6..fad153dc0355 100644 --- a/trunk/drivers/platform/x86/xo15-ebook.c +++ b/trunk/drivers/platform/x86/xo15-ebook.c @@ -77,13 +77,11 @@ static void ebook_switch_notify(struct acpi_device *device, u32 event) } } -static int ebook_switch_resume(struct device *dev) +static int ebook_switch_resume(struct acpi_device *device) { - return ebook_send_state(to_acpi_device(dev)); + return ebook_send_state(device); } -static SIMPLE_DEV_PM_OPS(ebook_switch_pm, NULL, ebook_switch_resume); - static int ebook_switch_add(struct acpi_device *device) { struct ebook_switch *button; @@ -163,10 +161,10 @@ static struct acpi_driver xo15_ebook_driver = { .ids = ebook_device_ids, .ops = { .add = ebook_switch_add, + .resume = ebook_switch_resume, .remove = ebook_switch_remove, .notify = ebook_switch_notify, }, - .drv.pm = &ebook_switch_pm, }; static int __init xo15_ebook_init(void) diff --git a/trunk/drivers/regulator/Kconfig b/trunk/drivers/regulator/Kconfig index f34c3be6c9fe..c86b8864e411 100644 --- a/trunk/drivers/regulator/Kconfig +++ b/trunk/drivers/regulator/Kconfig @@ -20,7 +20,6 @@ menuconfig REGULATOR If unsure, say no. - if REGULATOR config REGULATOR_DEBUG @@ -89,13 +88,6 @@ config REGULATOR_AAT2870 If you have a AnalogicTech AAT2870 say Y to enable the regulator driver. -config REGULATOR_ARIZONA - tristate "Wolfson Arizona class devices" - depends on MFD_ARIZONA - help - Support for the regulators found on Wolfson Arizona class - devices. - config REGULATOR_DA903X tristate "Dialog Semiconductor DA9030/DA9034 regulators" depends on PMIC_DA903X @@ -203,14 +195,6 @@ config REGULATOR_MAX8998 via I2C bus. The provided regulator is suitable for S3C6410 and S5PC1XX chips to control VCC_CORE and VCC_USIM voltages. -config REGULATOR_MAX77686 - tristate "Maxim 77686 regulator" - depends on MFD_MAX77686 - help - This driver controls a Maxim 77686 regulator - via I2C bus. The provided regulator is suitable for - Exynos-4 chips to control VARM and VINT voltages. - config REGULATOR_PCAP tristate "Motorola PCAP2 regulator driver" depends on EZX_PCAP @@ -232,19 +216,6 @@ config REGULATOR_LP3972 Say Y here to support the voltage regulators and convertors on National Semiconductors LP3972 PMIC -config REGULATOR_LP872X - bool "TI/National Semiconductor LP8720/LP8725 voltage regulators" - depends on I2C=y - select REGMAP_I2C - help - This driver supports LP8720/LP8725 PMIC - -config REGULATOR_LP8788 - bool "TI LP8788 Power Regulators" - depends on MFD_LP8788 - help - This driver supports LP8788 voltage regulator chip. - config REGULATOR_PCF50633 tristate "NXP PCF50633 regulator driver" depends on MFD_PCF50633 @@ -262,14 +233,6 @@ config REGULATOR_RC5T583 through regulator interface. The device supports multiple DCDC/LDO outputs which can be controlled by i2c communication. -config REGULATOR_S2MPS11 - tristate "Samsung S2MPS11 voltage regulator" - depends on MFD_SEC_CORE - help - This driver supports a Samsung S2MPS11 voltage output regulator - via I2C bus. S2MPS11 is comprised of high efficient Buck converters - including Dual-Phase Buck converter, Buck-Boost converter, various LDOs. - config REGULATOR_S5M8767 tristate "Samsung S5M8767A voltage regulator" depends on MFD_S5M_CORE diff --git a/trunk/drivers/regulator/Makefile b/trunk/drivers/regulator/Makefile index 3342615cf25e..977fd46909ab 100644 --- a/trunk/drivers/regulator/Makefile +++ b/trunk/drivers/regulator/Makefile @@ -15,7 +15,6 @@ obj-$(CONFIG_REGULATOR_AB3100) += ab3100.o obj-$(CONFIG_REGULATOR_AB8500) += ab8500.o obj-$(CONFIG_REGULATOR_AD5398) += ad5398.o obj-$(CONFIG_REGULATOR_ANATOP) += anatop-regulator.o -obj-$(CONFIG_REGULATOR_ARIZONA) += arizona-micsupp.o arizona-ldo1.o obj-$(CONFIG_REGULATOR_DA903X) += da903x.o obj-$(CONFIG_REGULATOR_DA9052) += da9052-regulator.o obj-$(CONFIG_REGULATOR_DBX500_PRCMU) += dbx500-prcmu.o @@ -24,9 +23,6 @@ obj-$(CONFIG_REGULATOR_GPIO) += gpio-regulator.o obj-$(CONFIG_REGULATOR_ISL6271A) += isl6271a-regulator.o obj-$(CONFIG_REGULATOR_LP3971) += lp3971.o obj-$(CONFIG_REGULATOR_LP3972) += lp3972.o -obj-$(CONFIG_REGULATOR_LP872X) += lp872x.o -obj-$(CONFIG_REGULATOR_LP8788) += lp8788-buck.o -obj-$(CONFIG_REGULATOR_LP8788) += lp8788-ldo.o obj-$(CONFIG_REGULATOR_MAX1586) += max1586.o obj-$(CONFIG_REGULATOR_MAX8649) += max8649.o obj-$(CONFIG_REGULATOR_MAX8660) += max8660.o @@ -34,7 +30,6 @@ obj-$(CONFIG_REGULATOR_MAX8925) += max8925-regulator.o obj-$(CONFIG_REGULATOR_MAX8952) += max8952.o obj-$(CONFIG_REGULATOR_MAX8997) += max8997.o obj-$(CONFIG_REGULATOR_MAX8998) += max8998.o -obj-$(CONFIG_REGULATOR_MAX77686) += max77686.o obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o obj-$(CONFIG_REGULATOR_MC13XXX_CORE) += mc13xxx-regulator-core.o @@ -42,7 +37,6 @@ obj-$(CONFIG_REGULATOR_PALMAS) += palmas-regulator.o obj-$(CONFIG_REGULATOR_PCAP) += pcap-regulator.o obj-$(CONFIG_REGULATOR_PCF50633) += pcf50633-regulator.o obj-$(CONFIG_REGULATOR_RC5T583) += rc5t583-regulator.o -obj-$(CONFIG_REGULATOR_S2MPS11) += s2mps11.o obj-$(CONFIG_REGULATOR_S5M8767) += s5m8767.o obj-$(CONFIG_REGULATOR_TPS6105X) += tps6105x-regulator.o obj-$(CONFIG_REGULATOR_TPS62360) += tps62360-regulator.o diff --git a/trunk/drivers/regulator/aat2870-regulator.c b/trunk/drivers/regulator/aat2870-regulator.c index 6f45bfd22e83..06776ca945f2 100644 --- a/trunk/drivers/regulator/aat2870-regulator.c +++ b/trunk/drivers/regulator/aat2870-regulator.c @@ -33,6 +33,11 @@ struct aat2870_regulator { struct aat2870_data *aat2870; struct regulator_desc desc; + const int *voltages; /* uV */ + + int min_uV; + int max_uV; + u8 enable_addr; u8 enable_shift; u8 enable_mask; @@ -42,6 +47,14 @@ struct aat2870_regulator { u8 voltage_mask; }; +static int aat2870_ldo_list_voltage(struct regulator_dev *rdev, + unsigned selector) +{ + struct aat2870_regulator *ri = rdev_get_drvdata(rdev); + + return ri->voltages[selector]; +} + static int aat2870_ldo_set_voltage_sel(struct regulator_dev *rdev, unsigned selector) { @@ -98,7 +111,7 @@ static int aat2870_ldo_is_enabled(struct regulator_dev *rdev) } static struct regulator_ops aat2870_ldo_ops = { - .list_voltage = regulator_list_voltage_table, + .list_voltage = aat2870_ldo_list_voltage, .set_voltage_sel = aat2870_ldo_set_voltage_sel, .get_voltage_sel = aat2870_ldo_get_voltage_sel, .enable = aat2870_ldo_enable, @@ -106,7 +119,7 @@ static struct regulator_ops aat2870_ldo_ops = { .is_enabled = aat2870_ldo_is_enabled, }; -static const unsigned int aat2870_ldo_voltages[] = { +static const int aat2870_ldo_voltages[] = { 1200000, 1300000, 1500000, 1600000, 1800000, 2000000, 2200000, 2500000, 2600000, 2700000, 2800000, 2900000, @@ -119,11 +132,13 @@ static const unsigned int aat2870_ldo_voltages[] = { .name = #ids, \ .id = AAT2870_ID_##ids, \ .n_voltages = ARRAY_SIZE(aat2870_ldo_voltages), \ - .volt_table = aat2870_ldo_voltages, \ .ops = &aat2870_ldo_ops, \ .type = REGULATOR_VOLTAGE, \ .owner = THIS_MODULE, \ }, \ + .voltages = aat2870_ldo_voltages, \ + .min_uV = 1200000, \ + .max_uV = 3300000, \ } static struct aat2870_regulator aat2870_regulators[] = { diff --git a/trunk/drivers/regulator/ab3100.c b/trunk/drivers/regulator/ab3100.c index 182b553059c9..03f4d9c604ec 100644 --- a/trunk/drivers/regulator/ab3100.c +++ b/trunk/drivers/regulator/ab3100.c @@ -43,12 +43,20 @@ * @dev: handle to the device * @plfdata: AB3100 platform data passed in at probe time * @regreg: regulator register number in the AB3100 + * @fixed_voltage: a fixed voltage for this regulator, if this + * 0 the voltages array is used instead. + * @typ_voltages: an array of available typical voltages for + * this regulator + * @voltages_len: length of the array of available voltages */ struct ab3100_regulator { struct regulator_dev *rdev; struct device *dev; struct ab3100_platform_data *plfdata; u8 regreg; + int fixed_voltage; + int const *typ_voltages; + u8 voltages_len; }; /* The order in which registers are initialized */ @@ -72,7 +80,7 @@ static const u8 ab3100_reg_init_order[AB3100_NUM_REGULATORS+2] = { #define LDO_C_VOLTAGE 2650000 #define LDO_D_VOLTAGE 2650000 -static const unsigned int ldo_e_buck_typ_voltages[] = { +static const int ldo_e_buck_typ_voltages[] = { 1800000, 1400000, 1300000, @@ -82,7 +90,7 @@ static const unsigned int ldo_e_buck_typ_voltages[] = { 900000, }; -static const unsigned int ldo_f_typ_voltages[] = { +static const int ldo_f_typ_voltages[] = { 1800000, 1400000, 1300000, @@ -93,21 +101,21 @@ static const unsigned int ldo_f_typ_voltages[] = { 2650000, }; -static const unsigned int ldo_g_typ_voltages[] = { +static const int ldo_g_typ_voltages[] = { 2850000, 2750000, 1800000, 1500000, }; -static const unsigned int ldo_h_typ_voltages[] = { +static const int ldo_h_typ_voltages[] = { 2750000, 1800000, 1500000, 1200000, }; -static const unsigned int ldo_k_typ_voltages[] = { +static const int ldo_k_typ_voltages[] = { 2750000, 1800000, }; @@ -118,27 +126,40 @@ static struct ab3100_regulator ab3100_regulators[AB3100_NUM_REGULATORS] = { { .regreg = AB3100_LDO_A, + .fixed_voltage = LDO_A_VOLTAGE, }, { .regreg = AB3100_LDO_C, + .fixed_voltage = LDO_C_VOLTAGE, }, { .regreg = AB3100_LDO_D, + .fixed_voltage = LDO_D_VOLTAGE, }, { .regreg = AB3100_LDO_E, + .typ_voltages = ldo_e_buck_typ_voltages, + .voltages_len = ARRAY_SIZE(ldo_e_buck_typ_voltages), }, { .regreg = AB3100_LDO_F, + .typ_voltages = ldo_f_typ_voltages, + .voltages_len = ARRAY_SIZE(ldo_f_typ_voltages), }, { .regreg = AB3100_LDO_G, + .typ_voltages = ldo_g_typ_voltages, + .voltages_len = ARRAY_SIZE(ldo_g_typ_voltages), }, { .regreg = AB3100_LDO_H, + .typ_voltages = ldo_h_typ_voltages, + .voltages_len = ARRAY_SIZE(ldo_h_typ_voltages), }, { .regreg = AB3100_LDO_K, + .typ_voltages = ldo_k_typ_voltages, + .voltages_len = ARRAY_SIZE(ldo_k_typ_voltages), }, { .regreg = AB3100_LDO_EXT, @@ -146,6 +167,8 @@ ab3100_regulators[AB3100_NUM_REGULATORS] = { }, { .regreg = AB3100_BUCK, + .typ_voltages = ldo_e_buck_typ_voltages, + .voltages_len = ARRAY_SIZE(ldo_e_buck_typ_voltages), }, }; @@ -155,7 +178,7 @@ ab3100_regulators[AB3100_NUM_REGULATORS] = { */ static int ab3100_enable_regulator(struct regulator_dev *reg) { - struct ab3100_regulator *abreg = rdev_get_drvdata(reg); + struct ab3100_regulator *abreg = reg->reg_data; int err; u8 regval; @@ -186,7 +209,7 @@ static int ab3100_enable_regulator(struct regulator_dev *reg) static int ab3100_disable_regulator(struct regulator_dev *reg) { - struct ab3100_regulator *abreg = rdev_get_drvdata(reg); + struct ab3100_regulator *abreg = reg->reg_data; int err; u8 regval; @@ -219,7 +242,7 @@ static int ab3100_disable_regulator(struct regulator_dev *reg) static int ab3100_is_enabled_regulator(struct regulator_dev *reg) { - struct ab3100_regulator *abreg = rdev_get_drvdata(reg); + struct ab3100_regulator *abreg = reg->reg_data; u8 regval; int err; @@ -234,12 +257,26 @@ static int ab3100_is_enabled_regulator(struct regulator_dev *reg) return regval & AB3100_REG_ON_MASK; } +static int ab3100_list_voltage_regulator(struct regulator_dev *reg, + unsigned selector) +{ + struct ab3100_regulator *abreg = reg->reg_data; + + if (selector >= abreg->voltages_len) + return -EINVAL; + return abreg->typ_voltages[selector]; +} + static int ab3100_get_voltage_regulator(struct regulator_dev *reg) { - struct ab3100_regulator *abreg = rdev_get_drvdata(reg); + struct ab3100_regulator *abreg = reg->reg_data; u8 regval; int err; + /* Return the voltage for fixed regulators immediately */ + if (abreg->fixed_voltage) + return abreg->fixed_voltage; + /* * For variable types, read out setting and index into * supplied voltage list. @@ -257,20 +294,20 @@ static int ab3100_get_voltage_regulator(struct regulator_dev *reg) regval &= 0xE0; regval >>= 5; - if (regval >= reg->desc->n_voltages) { + if (regval >= abreg->voltages_len) { dev_err(®->dev, "regulator register %02x contains an illegal voltage setting\n", abreg->regreg); return -EINVAL; } - return reg->desc->volt_table[regval]; + return abreg->typ_voltages[regval]; } static int ab3100_set_voltage_regulator_sel(struct regulator_dev *reg, unsigned selector) { - struct ab3100_regulator *abreg = rdev_get_drvdata(reg); + struct ab3100_regulator *abreg = reg->reg_data; u8 regval; int err; @@ -299,7 +336,7 @@ static int ab3100_set_voltage_regulator_sel(struct regulator_dev *reg, static int ab3100_set_suspend_voltage_regulator(struct regulator_dev *reg, int uV) { - struct ab3100_regulator *abreg = rdev_get_drvdata(reg); + struct ab3100_regulator *abreg = reg->reg_data; u8 regval; int err; int bestindex; @@ -342,22 +379,42 @@ static int ab3100_set_suspend_voltage_regulator(struct regulator_dev *reg, */ static int ab3100_get_voltage_regulator_external(struct regulator_dev *reg) { - struct ab3100_regulator *abreg = rdev_get_drvdata(reg); + struct ab3100_regulator *abreg = reg->reg_data; return abreg->plfdata->external_voltage; } -static int ab3100_get_fixed_voltage_regulator(struct regulator_dev *reg) +static int ab3100_enable_time_regulator(struct regulator_dev *reg) { - return reg->desc->min_uV; + struct ab3100_regulator *abreg = reg->reg_data; + + /* Per-regulator power on delay from spec */ + switch (abreg->regreg) { + case AB3100_LDO_A: /* Fallthrough */ + case AB3100_LDO_C: /* Fallthrough */ + case AB3100_LDO_D: /* Fallthrough */ + case AB3100_LDO_E: /* Fallthrough */ + case AB3100_LDO_H: /* Fallthrough */ + case AB3100_LDO_K: + return 200; + case AB3100_LDO_F: + return 600; + case AB3100_LDO_G: + return 400; + case AB3100_BUCK: + return 1000; + default: + break; + } + return 0; } static struct regulator_ops regulator_ops_fixed = { - .list_voltage = regulator_list_voltage_linear, .enable = ab3100_enable_regulator, .disable = ab3100_disable_regulator, .is_enabled = ab3100_is_enabled_regulator, - .get_voltage = ab3100_get_fixed_voltage_regulator, + .get_voltage = ab3100_get_voltage_regulator, + .enable_time = ab3100_enable_time_regulator, }; static struct regulator_ops regulator_ops_variable = { @@ -366,7 +423,8 @@ static struct regulator_ops regulator_ops_variable = { .is_enabled = ab3100_is_enabled_regulator, .get_voltage = ab3100_get_voltage_regulator, .set_voltage_sel = ab3100_set_voltage_regulator_sel, - .list_voltage = regulator_list_voltage_table, + .list_voltage = ab3100_list_voltage_regulator, + .enable_time = ab3100_enable_time_regulator, }; static struct regulator_ops regulator_ops_variable_sleepable = { @@ -376,7 +434,8 @@ static struct regulator_ops regulator_ops_variable_sleepable = { .get_voltage = ab3100_get_voltage_regulator, .set_voltage_sel = ab3100_set_voltage_regulator_sel, .set_suspend_voltage = ab3100_set_suspend_voltage_regulator, - .list_voltage = regulator_list_voltage_table, + .list_voltage = ab3100_list_voltage_regulator, + .enable_time = ab3100_enable_time_regulator, }; /* @@ -398,81 +457,62 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = { .name = "LDO_A", .id = AB3100_LDO_A, .ops = ®ulator_ops_fixed, - .n_voltages = 1, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, - .min_uV = LDO_A_VOLTAGE, - .enable_time = 200, }, { .name = "LDO_C", .id = AB3100_LDO_C, .ops = ®ulator_ops_fixed, - .n_voltages = 1, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, - .min_uV = LDO_C_VOLTAGE, - .enable_time = 200, }, { .name = "LDO_D", .id = AB3100_LDO_D, .ops = ®ulator_ops_fixed, - .n_voltages = 1, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, - .min_uV = LDO_D_VOLTAGE, - .enable_time = 200, }, { .name = "LDO_E", .id = AB3100_LDO_E, .ops = ®ulator_ops_variable_sleepable, .n_voltages = ARRAY_SIZE(ldo_e_buck_typ_voltages), - .volt_table = ldo_e_buck_typ_voltages, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, - .enable_time = 200, }, { .name = "LDO_F", .id = AB3100_LDO_F, .ops = ®ulator_ops_variable, .n_voltages = ARRAY_SIZE(ldo_f_typ_voltages), - .volt_table = ldo_f_typ_voltages, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, - .enable_time = 600, }, { .name = "LDO_G", .id = AB3100_LDO_G, .ops = ®ulator_ops_variable, .n_voltages = ARRAY_SIZE(ldo_g_typ_voltages), - .volt_table = ldo_g_typ_voltages, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, - .enable_time = 400, }, { .name = "LDO_H", .id = AB3100_LDO_H, .ops = ®ulator_ops_variable, .n_voltages = ARRAY_SIZE(ldo_h_typ_voltages), - .volt_table = ldo_h_typ_voltages, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, - .enable_time = 200, }, { .name = "LDO_K", .id = AB3100_LDO_K, .ops = ®ulator_ops_variable, .n_voltages = ARRAY_SIZE(ldo_k_typ_voltages), - .volt_table = ldo_k_typ_voltages, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, - .enable_time = 200, }, { .name = "LDO_EXT", @@ -488,7 +528,6 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = { .n_voltages = ARRAY_SIZE(ldo_e_buck_typ_voltages), .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, - .enable_time = 1000, }, }; diff --git a/trunk/drivers/regulator/ab8500.c b/trunk/drivers/regulator/ab8500.c index 13d424fc1c14..a739f5ca936a 100644 --- a/trunk/drivers/regulator/ab8500.c +++ b/trunk/drivers/regulator/ab8500.c @@ -30,6 +30,9 @@ * @dev: device pointer * @desc: regulator description * @regulator_dev: regulator device + * @max_uV: maximum voltage (for variable voltage supplies) + * @min_uV: minimum voltage (for variable voltage supplies) + * @fixed_uV: typical voltage (for fixed voltage supplies) * @update_bank: bank to control on/off * @update_reg: register to control on/off * @update_mask: mask to enable/disable regulator @@ -37,12 +40,17 @@ * @voltage_bank: bank to control regulator voltage * @voltage_reg: register to control regulator voltage * @voltage_mask: mask to control regulator voltage + * @voltages: supported voltage table + * @voltages_len: number of supported voltages for the regulator * @delay: startup/set voltage delay in us */ struct ab8500_regulator_info { struct device *dev; struct regulator_desc desc; struct regulator_dev *regulator; + int max_uV; + int min_uV; + int fixed_uV; u8 update_bank; u8 update_reg; u8 update_mask; @@ -50,11 +58,13 @@ struct ab8500_regulator_info { u8 voltage_bank; u8 voltage_reg; u8 voltage_mask; + int const *voltages; + int voltages_len; unsigned int delay; }; /* voltage tables for the vauxn/vintcore supplies */ -static const unsigned int ldo_vauxn_voltages[] = { +static const int ldo_vauxn_voltages[] = { 1100000, 1200000, 1300000, @@ -73,7 +83,7 @@ static const unsigned int ldo_vauxn_voltages[] = { 3300000, }; -static const unsigned int ldo_vaux3_voltages[] = { +static const int ldo_vaux3_voltages[] = { 1200000, 1500000, 1800000, @@ -84,7 +94,7 @@ static const unsigned int ldo_vaux3_voltages[] = { 2910000, }; -static const unsigned int ldo_vintcore_voltages[] = { +static const int ldo_vintcore_voltages[] = { 1200000, 1225000, 1250000, @@ -175,6 +185,25 @@ static int ab8500_regulator_is_enabled(struct regulator_dev *rdev) return false; } +static int ab8500_list_voltage(struct regulator_dev *rdev, unsigned selector) +{ + struct ab8500_regulator_info *info = rdev_get_drvdata(rdev); + + if (info == NULL) { + dev_err(rdev_get_dev(rdev), "regulator info null pointer\n"); + return -EINVAL; + } + + /* return the uV for the fixed regulators */ + if (info->fixed_uV) + return info->fixed_uV; + + if (selector >= info->voltages_len) + return -EINVAL; + + return info->voltages[selector]; +} + static int ab8500_regulator_get_voltage_sel(struct regulator_dev *rdev) { int ret, val; @@ -250,7 +279,14 @@ static int ab8500_regulator_set_voltage_time_sel(struct regulator_dev *rdev, unsigned int new_sel) { struct ab8500_regulator_info *info = rdev_get_drvdata(rdev); + int ret; + /* If the regulator isn't on, it won't take time here */ + ret = ab8500_regulator_is_enabled(rdev); + if (ret < 0) + return ret; + if (!ret) + return 0; return info->delay; } @@ -260,14 +296,21 @@ static struct regulator_ops ab8500_regulator_ops = { .is_enabled = ab8500_regulator_is_enabled, .get_voltage_sel = ab8500_regulator_get_voltage_sel, .set_voltage_sel = ab8500_regulator_set_voltage_sel, - .list_voltage = regulator_list_voltage_table, + .list_voltage = ab8500_list_voltage, .enable_time = ab8500_regulator_enable_time, .set_voltage_time_sel = ab8500_regulator_set_voltage_time_sel, }; static int ab8500_fixed_get_voltage(struct regulator_dev *rdev) { - return rdev->desc->min_uV; + struct ab8500_regulator_info *info = rdev_get_drvdata(rdev); + + if (info == NULL) { + dev_err(rdev_get_dev(rdev), "regulator info null pointer\n"); + return -EINVAL; + } + + return info->fixed_uV; } static struct regulator_ops ab8500_regulator_fixed_ops = { @@ -275,8 +318,9 @@ static struct regulator_ops ab8500_regulator_fixed_ops = { .disable = ab8500_regulator_disable, .is_enabled = ab8500_regulator_is_enabled, .get_voltage = ab8500_fixed_get_voltage, - .list_voltage = regulator_list_voltage_linear, + .list_voltage = ab8500_list_voltage, .enable_time = ab8500_regulator_enable_time, + .set_voltage_time_sel = ab8500_regulator_set_voltage_time_sel, }; static struct ab8500_regulator_info @@ -285,7 +329,7 @@ static struct ab8500_regulator_info * Variable Voltage Regulators * name, min mV, max mV, * update bank, reg, mask, enable val - * volt bank, reg, mask + * volt bank, reg, mask, table, table length */ [AB8500_LDO_AUX1] = { .desc = { @@ -295,8 +339,9 @@ static struct ab8500_regulator_info .id = AB8500_LDO_AUX1, .owner = THIS_MODULE, .n_voltages = ARRAY_SIZE(ldo_vauxn_voltages), - .volt_table = ldo_vauxn_voltages, }, + .min_uV = 1100000, + .max_uV = 3300000, .update_bank = 0x04, .update_reg = 0x09, .update_mask = 0x03, @@ -304,6 +349,8 @@ static struct ab8500_regulator_info .voltage_bank = 0x04, .voltage_reg = 0x1f, .voltage_mask = 0x0f, + .voltages = ldo_vauxn_voltages, + .voltages_len = ARRAY_SIZE(ldo_vauxn_voltages), }, [AB8500_LDO_AUX2] = { .desc = { @@ -313,8 +360,9 @@ static struct ab8500_regulator_info .id = AB8500_LDO_AUX2, .owner = THIS_MODULE, .n_voltages = ARRAY_SIZE(ldo_vauxn_voltages), - .volt_table = ldo_vauxn_voltages, }, + .min_uV = 1100000, + .max_uV = 3300000, .update_bank = 0x04, .update_reg = 0x09, .update_mask = 0x0c, @@ -322,6 +370,8 @@ static struct ab8500_regulator_info .voltage_bank = 0x04, .voltage_reg = 0x20, .voltage_mask = 0x0f, + .voltages = ldo_vauxn_voltages, + .voltages_len = ARRAY_SIZE(ldo_vauxn_voltages), }, [AB8500_LDO_AUX3] = { .desc = { @@ -331,8 +381,9 @@ static struct ab8500_regulator_info .id = AB8500_LDO_AUX3, .owner = THIS_MODULE, .n_voltages = ARRAY_SIZE(ldo_vaux3_voltages), - .volt_table = ldo_vaux3_voltages, }, + .min_uV = 1100000, + .max_uV = 3300000, .update_bank = 0x04, .update_reg = 0x0a, .update_mask = 0x03, @@ -340,6 +391,8 @@ static struct ab8500_regulator_info .voltage_bank = 0x04, .voltage_reg = 0x21, .voltage_mask = 0x07, + .voltages = ldo_vaux3_voltages, + .voltages_len = ARRAY_SIZE(ldo_vaux3_voltages), }, [AB8500_LDO_INTCORE] = { .desc = { @@ -349,8 +402,9 @@ static struct ab8500_regulator_info .id = AB8500_LDO_INTCORE, .owner = THIS_MODULE, .n_voltages = ARRAY_SIZE(ldo_vintcore_voltages), - .volt_table = ldo_vintcore_voltages, }, + .min_uV = 1100000, + .max_uV = 3300000, .update_bank = 0x03, .update_reg = 0x80, .update_mask = 0x44, @@ -358,6 +412,8 @@ static struct ab8500_regulator_info .voltage_bank = 0x03, .voltage_reg = 0x80, .voltage_mask = 0x38, + .voltages = ldo_vintcore_voltages, + .voltages_len = ARRAY_SIZE(ldo_vintcore_voltages), }, /* @@ -373,9 +429,9 @@ static struct ab8500_regulator_info .id = AB8500_LDO_TVOUT, .owner = THIS_MODULE, .n_voltages = 1, - .min_uV = 2000000, }, .delay = 10000, + .fixed_uV = 2000000, .update_bank = 0x03, .update_reg = 0x80, .update_mask = 0x82, @@ -389,8 +445,8 @@ static struct ab8500_regulator_info .id = AB8500_LDO_USB, .owner = THIS_MODULE, .n_voltages = 1, - .min_uV = 3300000, }, + .fixed_uV = 3300000, .update_bank = 0x03, .update_reg = 0x82, .update_mask = 0x03, @@ -404,8 +460,8 @@ static struct ab8500_regulator_info .id = AB8500_LDO_AUDIO, .owner = THIS_MODULE, .n_voltages = 1, - .min_uV = 2000000, }, + .fixed_uV = 2000000, .update_bank = 0x03, .update_reg = 0x83, .update_mask = 0x02, @@ -419,8 +475,8 @@ static struct ab8500_regulator_info .id = AB8500_LDO_ANAMIC1, .owner = THIS_MODULE, .n_voltages = 1, - .min_uV = 2050000, }, + .fixed_uV = 2050000, .update_bank = 0x03, .update_reg = 0x83, .update_mask = 0x08, @@ -434,8 +490,8 @@ static struct ab8500_regulator_info .id = AB8500_LDO_ANAMIC2, .owner = THIS_MODULE, .n_voltages = 1, - .min_uV = 2050000, }, + .fixed_uV = 2050000, .update_bank = 0x03, .update_reg = 0x83, .update_mask = 0x10, @@ -449,8 +505,8 @@ static struct ab8500_regulator_info .id = AB8500_LDO_DMIC, .owner = THIS_MODULE, .n_voltages = 1, - .min_uV = 1800000, }, + .fixed_uV = 1800000, .update_bank = 0x03, .update_reg = 0x83, .update_mask = 0x04, @@ -464,8 +520,8 @@ static struct ab8500_regulator_info .id = AB8500_LDO_ANA, .owner = THIS_MODULE, .n_voltages = 1, - .min_uV = 1200000, }, + .fixed_uV = 1200000, .update_bank = 0x04, .update_reg = 0x06, .update_mask = 0x0c, @@ -713,7 +769,9 @@ static __devinit int ab8500_regulator_register(struct platform_device *pdev, if (info->desc.id == AB8500_LDO_AUX3) { info->desc.n_voltages = ARRAY_SIZE(ldo_vauxn_voltages); - info->desc.volt_table = ldo_vauxn_voltages; + info->voltages = ldo_vauxn_voltages; + info->voltages_len = + ARRAY_SIZE(ldo_vauxn_voltages); info->voltage_mask = 0xf; } } diff --git a/trunk/drivers/regulator/ad5398.c b/trunk/drivers/regulator/ad5398.c index f123f7e3b752..46d05f38baf8 100644 --- a/trunk/drivers/regulator/ad5398.c +++ b/trunk/drivers/regulator/ad5398.c @@ -89,12 +89,9 @@ static int ad5398_set_current_limit(struct regulator_dev *rdev, int min_uA, int unsigned short data; int ret; - if (min_uA < chip->min_uA) - min_uA = chip->min_uA; - if (max_uA > chip->max_uA) - max_uA = chip->max_uA; - - if (min_uA > chip->max_uA || max_uA < chip->min_uA) + if (min_uA > chip->max_uA || min_uA < chip->min_uA) + return -EINVAL; + if (max_uA > chip->max_uA || max_uA < chip->min_uA) return -EINVAL; selector = DIV_ROUND_UP((min_uA - chip->min_uA) * chip->current_level, diff --git a/trunk/drivers/regulator/anatop-regulator.c b/trunk/drivers/regulator/anatop-regulator.c index e9c2085f9dfb..e82e7eaac0f1 100644 --- a/trunk/drivers/regulator/anatop-regulator.c +++ b/trunk/drivers/regulator/anatop-regulator.c @@ -43,15 +43,33 @@ struct anatop_regulator { struct regulator_init_data *initdata; }; -static int anatop_set_voltage_sel(struct regulator_dev *reg, unsigned selector) +static int anatop_set_voltage(struct regulator_dev *reg, int min_uV, + int max_uV, unsigned *selector) { struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg); - u32 val, mask; + u32 val, sel, mask; + int uv; + + uv = min_uV; + dev_dbg(®->dev, "%s: uv %d, min %d, max %d\n", __func__, + uv, anatop_reg->min_voltage, + anatop_reg->max_voltage); + + if (uv < anatop_reg->min_voltage) { + if (max_uV > anatop_reg->min_voltage) + uv = anatop_reg->min_voltage; + else + return -EINVAL; + } if (!anatop_reg->control_reg) return -ENOTSUPP; - val = anatop_reg->min_bit_val + selector; + sel = DIV_ROUND_UP(uv - anatop_reg->min_voltage, 25000); + if (sel * 25000 + anatop_reg->min_voltage > anatop_reg->max_voltage) + return -EINVAL; + val = anatop_reg->min_bit_val + sel; + *selector = sel; dev_dbg(®->dev, "%s: calculated val %d\n", __func__, val); mask = ((1 << anatop_reg->vol_bit_width) - 1) << anatop_reg->vol_bit_shift; @@ -76,11 +94,21 @@ static int anatop_get_voltage_sel(struct regulator_dev *reg) return val - anatop_reg->min_bit_val; } +static int anatop_list_voltage(struct regulator_dev *reg, unsigned selector) +{ + struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg); + int uv; + + uv = anatop_reg->min_voltage + selector * 25000; + dev_dbg(®->dev, "vddio = %d, selector = %u\n", uv, selector); + + return uv; +} + static struct regulator_ops anatop_rops = { - .set_voltage_sel = anatop_set_voltage_sel, + .set_voltage = anatop_set_voltage, .get_voltage_sel = anatop_get_voltage_sel, - .list_voltage = regulator_list_voltage_linear, - .map_voltage = regulator_map_voltage_linear, + .list_voltage = anatop_list_voltage, }; static int __devinit anatop_regulator_probe(struct platform_device *pdev) @@ -148,8 +176,6 @@ static int __devinit anatop_regulator_probe(struct platform_device *pdev) rdesc->n_voltages = (sreg->max_voltage - sreg->min_voltage) / 25000 + 1; - rdesc->min_uV = sreg->min_voltage; - rdesc->uV_step = 25000; config.dev = &pdev->dev; config.init_data = initdata; diff --git a/trunk/drivers/regulator/arizona-ldo1.c b/trunk/drivers/regulator/arizona-ldo1.c deleted file mode 100644 index c8f95c07adb6..000000000000 --- a/trunk/drivers/regulator/arizona-ldo1.c +++ /dev/null @@ -1,138 +0,0 @@ -/* - * arizona-ldo1.c -- LDO1 supply for Arizona devices - * - * Copyright 2012 Wolfson Microelectronics PLC. - * - * Author: Mark Brown - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -struct arizona_ldo1 { - struct regulator_dev *regulator; - struct arizona *arizona; - - struct regulator_consumer_supply supply; - struct regulator_init_data init_data; -}; - -static struct regulator_ops arizona_ldo1_ops = { - .list_voltage = regulator_list_voltage_linear, - .map_voltage = regulator_map_voltage_linear, - .get_voltage_sel = regulator_get_voltage_sel_regmap, - .set_voltage_sel = regulator_set_voltage_sel_regmap, -}; - -static const struct regulator_desc arizona_ldo1 = { - .name = "LDO1", - .supply_name = "LDOVDD", - .type = REGULATOR_VOLTAGE, - .ops = &arizona_ldo1_ops, - - .vsel_reg = ARIZONA_LDO1_CONTROL_1, - .vsel_mask = ARIZONA_LDO1_VSEL_MASK, - .min_uV = 900000, - .uV_step = 50000, - .n_voltages = 7, - - .owner = THIS_MODULE, -}; - -static const struct regulator_init_data arizona_ldo1_default = { - .constraints = { - .valid_ops_mask = REGULATOR_CHANGE_STATUS, - }, - .num_consumer_supplies = 1, -}; - -static __devinit int arizona_ldo1_probe(struct platform_device *pdev) -{ - struct arizona *arizona = dev_get_drvdata(pdev->dev.parent); - struct regulator_config config = { }; - struct arizona_ldo1 *ldo1; - int ret; - - ldo1 = devm_kzalloc(&pdev->dev, sizeof(*ldo1), GFP_KERNEL); - if (ldo1 == NULL) { - dev_err(&pdev->dev, "Unable to allocate private data\n"); - return -ENOMEM; - } - - ldo1->arizona = arizona; - - /* - * Since the chip usually supplies itself we provide some - * default init_data for it. This will be overridden with - * platform data if provided. - */ - ldo1->init_data = arizona_ldo1_default; - ldo1->init_data.consumer_supplies = &ldo1->supply; - ldo1->supply.supply = "DCVDD"; - ldo1->supply.dev_name = dev_name(arizona->dev); - - config.dev = arizona->dev; - config.driver_data = ldo1; - config.regmap = arizona->regmap; - config.ena_gpio = arizona->pdata.ldoena; - - if (arizona->pdata.ldo1) - config.init_data = arizona->pdata.ldo1; - else - config.init_data = &ldo1->init_data; - - ldo1->regulator = regulator_register(&arizona_ldo1, &config); - if (IS_ERR(ldo1->regulator)) { - ret = PTR_ERR(ldo1->regulator); - dev_err(arizona->dev, "Failed to register LDO1 supply: %d\n", - ret); - return ret; - } - - platform_set_drvdata(pdev, ldo1); - - return 0; -} - -static __devexit int arizona_ldo1_remove(struct platform_device *pdev) -{ - struct arizona_ldo1 *ldo1 = platform_get_drvdata(pdev); - - regulator_unregister(ldo1->regulator); - - return 0; -} - -static struct platform_driver arizona_ldo1_driver = { - .probe = arizona_ldo1_probe, - .remove = __devexit_p(arizona_ldo1_remove), - .driver = { - .name = "arizona-ldo1", - .owner = THIS_MODULE, - }, -}; - -module_platform_driver(arizona_ldo1_driver); - -/* Module information */ -MODULE_AUTHOR("Mark Brown "); -MODULE_DESCRIPTION("Arizona LDO1 driver"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:arizona-ldo1"); diff --git a/trunk/drivers/regulator/arizona-micsupp.c b/trunk/drivers/regulator/arizona-micsupp.c deleted file mode 100644 index 450a069aa9b6..000000000000 --- a/trunk/drivers/regulator/arizona-micsupp.c +++ /dev/null @@ -1,188 +0,0 @@ -/* - * arizona-micsupp.c -- Microphone supply for Arizona devices - * - * Copyright 2012 Wolfson Microelectronics PLC. - * - * Author: Mark Brown - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#define ARIZONA_MICSUPP_MAX_SELECTOR 0x1f - -struct arizona_micsupp { - struct regulator_dev *regulator; - struct arizona *arizona; - - struct regulator_consumer_supply supply; - struct regulator_init_data init_data; -}; - -static int arizona_micsupp_list_voltage(struct regulator_dev *rdev, - unsigned int selector) -{ - if (selector > ARIZONA_MICSUPP_MAX_SELECTOR) - return -EINVAL; - - if (selector == ARIZONA_MICSUPP_MAX_SELECTOR) - return 3300000; - else - return (selector * 50000) + 1700000; -} - -static int arizona_micsupp_map_voltage(struct regulator_dev *rdev, - int min_uV, int max_uV) -{ - unsigned int voltage; - int selector; - - if (min_uV < 1700000) - min_uV = 1700000; - - if (min_uV > 3200000) - selector = ARIZONA_MICSUPP_MAX_SELECTOR; - else - selector = DIV_ROUND_UP(min_uV - 1700000, 50000); - - if (selector < 0) - return -EINVAL; - - voltage = arizona_micsupp_list_voltage(rdev, selector); - if (voltage < min_uV || voltage > max_uV) - return -EINVAL; - - return selector; -} - -static struct regulator_ops arizona_micsupp_ops = { - .enable = regulator_enable_regmap, - .disable = regulator_disable_regmap, - .is_enabled = regulator_is_enabled_regmap, - - .list_voltage = arizona_micsupp_list_voltage, - .map_voltage = arizona_micsupp_map_voltage, - - .get_voltage_sel = regulator_get_voltage_sel_regmap, - .set_voltage_sel = regulator_set_voltage_sel_regmap, -}; - -static const struct regulator_desc arizona_micsupp = { - .name = "MICVDD", - .supply_name = "CPVDD", - .type = REGULATOR_VOLTAGE, - .n_voltages = ARIZONA_MICSUPP_MAX_SELECTOR + 1, - .ops = &arizona_micsupp_ops, - - .vsel_reg = ARIZONA_LDO2_CONTROL_1, - .vsel_mask = ARIZONA_LDO2_VSEL_MASK, - .enable_reg = ARIZONA_MIC_CHARGE_PUMP_1, - .enable_mask = ARIZONA_CPMIC_ENA, - - .owner = THIS_MODULE, -}; - -static const struct regulator_init_data arizona_micsupp_default = { - .constraints = { - .valid_ops_mask = REGULATOR_CHANGE_STATUS | - REGULATOR_CHANGE_VOLTAGE, - .min_uV = 1700000, - .max_uV = 3300000, - }, - - .num_consumer_supplies = 1, -}; - -static __devinit int arizona_micsupp_probe(struct platform_device *pdev) -{ - struct arizona *arizona = dev_get_drvdata(pdev->dev.parent); - struct regulator_config config = { }; - struct arizona_micsupp *micsupp; - int ret; - - micsupp = devm_kzalloc(&pdev->dev, sizeof(*micsupp), GFP_KERNEL); - if (micsupp == NULL) { - dev_err(&pdev->dev, "Unable to allocate private data\n"); - return -ENOMEM; - } - - micsupp->arizona = arizona; - - /* - * Since the chip usually supplies itself we provide some - * default init_data for it. This will be overridden with - * platform data if provided. - */ - micsupp->init_data = arizona_micsupp_default; - micsupp->init_data.consumer_supplies = &micsupp->supply; - micsupp->supply.supply = "MICVDD"; - micsupp->supply.dev_name = dev_name(arizona->dev); - - config.dev = arizona->dev; - config.driver_data = micsupp; - config.regmap = arizona->regmap; - - if (arizona->pdata.micvdd) - config.init_data = arizona->pdata.micvdd; - else - config.init_data = &micsupp->init_data; - - /* Default to regulated mode until the API supports bypass */ - regmap_update_bits(arizona->regmap, ARIZONA_MIC_CHARGE_PUMP_1, - ARIZONA_CPMIC_BYPASS, 0); - - micsupp->regulator = regulator_register(&arizona_micsupp, &config); - if (IS_ERR(micsupp->regulator)) { - ret = PTR_ERR(micsupp->regulator); - dev_err(arizona->dev, "Failed to register mic supply: %d\n", - ret); - return ret; - } - - platform_set_drvdata(pdev, micsupp); - - return 0; -} - -static __devexit int arizona_micsupp_remove(struct platform_device *pdev) -{ - struct arizona_micsupp *micsupp = platform_get_drvdata(pdev); - - regulator_unregister(micsupp->regulator); - - return 0; -} - -static struct platform_driver arizona_micsupp_driver = { - .probe = arizona_micsupp_probe, - .remove = __devexit_p(arizona_micsupp_remove), - .driver = { - .name = "arizona-micsupp", - .owner = THIS_MODULE, - }, -}; - -module_platform_driver(arizona_micsupp_driver); - -/* Module information */ -MODULE_AUTHOR("Mark Brown "); -MODULE_DESCRIPTION("Arizona microphone supply driver"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:arizona-micsupp"); diff --git a/trunk/drivers/regulator/core.c b/trunk/drivers/regulator/core.c index 2e31dffbefe7..8b4b3829d9e7 100644 --- a/trunk/drivers/regulator/core.c +++ b/trunk/drivers/regulator/core.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include #include @@ -109,6 +108,28 @@ static const char *rdev_get_name(struct regulator_dev *rdev) return ""; } +/* gets the regulator for a given consumer device */ +static struct regulator *get_device_regulator(struct device *dev) +{ + struct regulator *regulator = NULL; + struct regulator_dev *rdev; + + mutex_lock(®ulator_list_mutex); + list_for_each_entry(rdev, ®ulator_list, list) { + mutex_lock(&rdev->mutex); + list_for_each_entry(regulator, &rdev->consumer_list, list) { + if (regulator->dev == dev) { + mutex_unlock(&rdev->mutex); + mutex_unlock(®ulator_list_mutex); + return regulator; + } + } + mutex_unlock(&rdev->mutex); + } + mutex_unlock(®ulator_list_mutex); + return NULL; +} + /** * of_get_regulator - get a regulator device node based on supply name * @dev: Device pointer for the consumer (of regulator) device @@ -282,6 +303,18 @@ static int regulator_check_drms(struct regulator_dev *rdev) return 0; } +static ssize_t device_requested_uA_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct regulator *regulator; + + regulator = get_device_regulator(dev); + if (regulator == NULL) + return 0; + + return sprintf(buf, "%d\n", regulator->uA_load); +} + static ssize_t regulator_uV_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -394,9 +427,6 @@ static ssize_t regulator_status_show(struct device *dev, case REGULATOR_STATUS_STANDBY: label = "standby"; break; - case REGULATOR_STATUS_UNDEFINED: - label = "undefined"; - break; default: return -ERANGE; } @@ -937,14 +967,6 @@ static int set_machine_constraints(struct regulator_dev *rdev, } } - if (rdev->constraints->ramp_delay && ops->set_ramp_delay) { - ret = ops->set_ramp_delay(rdev, rdev->constraints->ramp_delay); - if (ret < 0) { - rdev_err(rdev, "failed to set ramp_delay\n"); - goto out; - } - } - print_constraints(rdev); return 0; out: @@ -1075,29 +1097,48 @@ static struct regulator *create_regulator(struct regulator_dev *rdev, list_add(®ulator->list, &rdev->consumer_list); if (dev) { + /* create a 'requested_microamps_name' sysfs entry */ + size = scnprintf(buf, REG_STR_SIZE, + "microamps_requested_%s-%s", + dev_name(dev), supply_name); + if (size >= REG_STR_SIZE) + goto overflow_err; + regulator->dev = dev; + sysfs_attr_init(®ulator->dev_attr.attr); + regulator->dev_attr.attr.name = kstrdup(buf, GFP_KERNEL); + if (regulator->dev_attr.attr.name == NULL) + goto attr_name_err; + + regulator->dev_attr.attr.mode = 0444; + regulator->dev_attr.show = device_requested_uA_show; + err = device_create_file(dev, ®ulator->dev_attr); + if (err < 0) { + rdev_warn(rdev, "could not add regulator_dev requested microamps sysfs entry\n"); + goto attr_name_err; + } - /* Add a link to the device sysfs entry */ + /* also add a link to the device sysfs entry */ size = scnprintf(buf, REG_STR_SIZE, "%s-%s", dev->kobj.name, supply_name); if (size >= REG_STR_SIZE) - goto overflow_err; + goto attr_err; regulator->supply_name = kstrdup(buf, GFP_KERNEL); if (regulator->supply_name == NULL) - goto overflow_err; + goto attr_err; err = sysfs_create_link(&rdev->dev.kobj, &dev->kobj, buf); if (err) { rdev_warn(rdev, "could not add device link %s err %d\n", dev->kobj.name, err); - /* non-fatal */ + goto link_name_err; } } else { regulator->supply_name = kstrdup(supply_name, GFP_KERNEL); if (regulator->supply_name == NULL) - goto overflow_err; + goto attr_err; } regulator->debugfs = debugfs_create_dir(regulator->supply_name, @@ -1124,6 +1165,12 @@ static struct regulator *create_regulator(struct regulator_dev *rdev, mutex_unlock(&rdev->mutex); return regulator; +link_name_err: + kfree(regulator->supply_name); +attr_err: + device_remove_file(regulator->dev, ®ulator->dev_attr); +attr_name_err: + kfree(regulator->dev_attr.attr.name); overflow_err: list_del(®ulator->list); kfree(regulator); @@ -1134,7 +1181,7 @@ static struct regulator *create_regulator(struct regulator_dev *rdev, static int _regulator_get_enable_time(struct regulator_dev *rdev) { if (!rdev->desc->ops->enable_time) - return rdev->desc->enable_time; + return 0; return rdev->desc->ops->enable_time(rdev); } @@ -1373,8 +1420,11 @@ void regulator_put(struct regulator *regulator) debugfs_remove_recursive(regulator->debugfs); /* remove any sysfs entries */ - if (regulator->dev) + if (regulator->dev) { sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name); + device_remove_file(regulator->dev, ®ulator->dev_attr); + kfree(regulator->dev_attr.attr.name); + } kfree(regulator->supply_name); list_del(®ulator->list); kfree(regulator); @@ -1409,61 +1459,19 @@ void devm_regulator_put(struct regulator *regulator) { int rc; - rc = devres_release(regulator->dev, devm_regulator_release, + rc = devres_destroy(regulator->dev, devm_regulator_release, devm_regulator_match, regulator); - if (rc != 0) + if (rc == 0) + regulator_put(regulator); + else WARN_ON(rc); } EXPORT_SYMBOL_GPL(devm_regulator_put); -static int _regulator_do_enable(struct regulator_dev *rdev) -{ - int ret, delay; - - /* Query before enabling in case configuration dependent. */ - ret = _regulator_get_enable_time(rdev); - if (ret >= 0) { - delay = ret; - } else { - rdev_warn(rdev, "enable_time() failed: %d\n", ret); - delay = 0; - } - - trace_regulator_enable(rdev_get_name(rdev)); - - if (rdev->ena_gpio) { - gpio_set_value_cansleep(rdev->ena_gpio, - !rdev->ena_gpio_invert); - rdev->ena_gpio_state = 1; - } else if (rdev->desc->ops->enable) { - ret = rdev->desc->ops->enable(rdev); - if (ret < 0) - return ret; - } else { - return -EINVAL; - } - - /* Allow the regulator to ramp; it would be useful to extend - * this for bulk operations so that the regulators can ramp - * together. */ - trace_regulator_enable_delay(rdev_get_name(rdev)); - - if (delay >= 1000) { - mdelay(delay / 1000); - udelay(delay % 1000); - } else if (delay) { - udelay(delay); - } - - trace_regulator_enable_complete(rdev_get_name(rdev)); - - return 0; -} - /* locks held by regulator_enable() */ static int _regulator_enable(struct regulator_dev *rdev) { - int ret; + int ret, delay; /* check voltage and requested load before enabling */ if (rdev->constraints && @@ -1477,10 +1485,40 @@ static int _regulator_enable(struct regulator_dev *rdev) if (!_regulator_can_change_status(rdev)) return -EPERM; - ret = _regulator_do_enable(rdev); + if (!rdev->desc->ops->enable) + return -EINVAL; + + /* Query before enabling in case configuration + * dependent. */ + ret = _regulator_get_enable_time(rdev); + if (ret >= 0) { + delay = ret; + } else { + rdev_warn(rdev, "enable_time() failed: %d\n", + ret); + delay = 0; + } + + trace_regulator_enable(rdev_get_name(rdev)); + + /* Allow the regulator to ramp; it would be useful + * to extend this for bulk operations so that the + * regulators can ramp together. */ + ret = rdev->desc->ops->enable(rdev); if (ret < 0) return ret; + trace_regulator_enable_delay(rdev_get_name(rdev)); + + if (delay >= 1000) { + mdelay(delay / 1000); + udelay(delay % 1000); + } else if (delay) { + udelay(delay); + } + + trace_regulator_enable_complete(rdev_get_name(rdev)); + } else if (ret < 0) { rdev_err(rdev, "is_enabled() failed: %d\n", ret); return ret; @@ -1529,30 +1567,6 @@ int regulator_enable(struct regulator *regulator) } EXPORT_SYMBOL_GPL(regulator_enable); -static int _regulator_do_disable(struct regulator_dev *rdev) -{ - int ret; - - trace_regulator_disable(rdev_get_name(rdev)); - - if (rdev->ena_gpio) { - gpio_set_value_cansleep(rdev->ena_gpio, - rdev->ena_gpio_invert); - rdev->ena_gpio_state = 0; - - } else if (rdev->desc->ops->disable) { - ret = rdev->desc->ops->disable(rdev); - if (ret != 0) - return ret; - } - - trace_regulator_disable_complete(rdev_get_name(rdev)); - - _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE, - NULL); - return 0; -} - /* locks held by regulator_disable() */ static int _regulator_disable(struct regulator_dev *rdev) { @@ -1567,12 +1581,20 @@ static int _regulator_disable(struct regulator_dev *rdev) (rdev->constraints && !rdev->constraints->always_on)) { /* we are last user */ - if (_regulator_can_change_status(rdev)) { - ret = _regulator_do_disable(rdev); + if (_regulator_can_change_status(rdev) && + rdev->desc->ops->disable) { + trace_regulator_disable(rdev_get_name(rdev)); + + ret = rdev->desc->ops->disable(rdev); if (ret < 0) { rdev_err(rdev, "failed to disable\n"); return ret; } + + trace_regulator_disable_complete(rdev_get_name(rdev)); + + _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE, + NULL); } rdev->use_count = 0; @@ -1790,10 +1812,6 @@ EXPORT_SYMBOL_GPL(regulator_disable_regmap); static int _regulator_is_enabled(struct regulator_dev *rdev) { - /* A GPIO control always takes precedence */ - if (rdev->ena_gpio) - return rdev->ena_gpio_state; - /* If we don't know then assume that the regulator is always on */ if (!rdev->desc->ops->is_enabled) return 1; @@ -1864,31 +1882,6 @@ int regulator_list_voltage_linear(struct regulator_dev *rdev, } EXPORT_SYMBOL_GPL(regulator_list_voltage_linear); -/** - * regulator_list_voltage_table - List voltages with table based mapping - * - * @rdev: Regulator device - * @selector: Selector to convert into a voltage - * - * Regulators with table based mapping between voltages and - * selectors can set volt_table in the regulator descriptor - * and then use this function as their list_voltage() operation. - */ -int regulator_list_voltage_table(struct regulator_dev *rdev, - unsigned int selector) -{ - if (!rdev->desc->volt_table) { - BUG_ON(!rdev->desc->volt_table); - return -EINVAL; - } - - if (selector >= rdev->desc->n_voltages) - return -EINVAL; - - return rdev->desc->volt_table[selector]; -} -EXPORT_SYMBOL_GPL(regulator_list_voltage_table); - /** * regulator_list_voltage - enumerate supported voltages * @regulator: regulator source @@ -1935,18 +1928,8 @@ EXPORT_SYMBOL_GPL(regulator_list_voltage); int regulator_is_supported_voltage(struct regulator *regulator, int min_uV, int max_uV) { - struct regulator_dev *rdev = regulator->rdev; int i, voltages, ret; - /* If we can't change voltage check the current voltage */ - if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) { - ret = regulator_get_voltage(regulator); - if (ret >= 0) - return (min_uV >= ret && ret <= max_uV); - else - return ret; - } - ret = regulator_count_voltages(regulator); if (ret < 0) return ret; @@ -2062,14 +2045,6 @@ int regulator_map_voltage_linear(struct regulator_dev *rdev, { int ret, voltage; - /* Allow uV_step to be 0 for fixed voltage */ - if (rdev->desc->n_voltages == 1 && rdev->desc->uV_step == 0) { - if (min_uV <= rdev->desc->min_uV && rdev->desc->min_uV <= max_uV) - return 0; - else - return -EINVAL; - } - if (!rdev->desc->uV_step) { BUG_ON(!rdev->desc->uV_step); return -EINVAL; @@ -2096,7 +2071,7 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev, { int ret; int delay = 0; - int best_val = 0; + int best_val; unsigned int selector; int old_selector = -1; @@ -2109,8 +2084,7 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev, * If we can't obtain the old selector there is not enough * info to call set_voltage_time_sel(). */ - if (_regulator_is_enabled(rdev) && - rdev->desc->ops->set_voltage_time_sel && + if (rdev->desc->ops->set_voltage_time_sel && rdev->desc->ops->get_voltage_sel) { old_selector = rdev->desc->ops->get_voltage_sel(rdev); if (old_selector < 0) @@ -2120,45 +2094,29 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev, if (rdev->desc->ops->set_voltage) { ret = rdev->desc->ops->set_voltage(rdev, min_uV, max_uV, &selector); - - if (ret >= 0) { - if (rdev->desc->ops->list_voltage) - best_val = rdev->desc->ops->list_voltage(rdev, - selector); - else - best_val = _regulator_get_voltage(rdev); - } - } else if (rdev->desc->ops->set_voltage_sel) { - if (rdev->desc->ops->map_voltage) { + if (rdev->desc->ops->map_voltage) ret = rdev->desc->ops->map_voltage(rdev, min_uV, max_uV); - } else { - if (rdev->desc->ops->list_voltage == - regulator_list_voltage_linear) - ret = regulator_map_voltage_linear(rdev, - min_uV, max_uV); - else - ret = regulator_map_voltage_iterate(rdev, - min_uV, max_uV); - } + else + ret = regulator_map_voltage_iterate(rdev, min_uV, + max_uV); if (ret >= 0) { - best_val = rdev->desc->ops->list_voltage(rdev, ret); - if (min_uV <= best_val && max_uV >= best_val) { - selector = ret; - ret = rdev->desc->ops->set_voltage_sel(rdev, - ret); - } else { - ret = -EINVAL; - } + selector = ret; + ret = rdev->desc->ops->set_voltage_sel(rdev, ret); } } else { ret = -EINVAL; } + if (rdev->desc->ops->list_voltage) + best_val = rdev->desc->ops->list_voltage(rdev, selector); + else + best_val = -1; + /* Call set_voltage_time_sel if successfully obtained old_selector */ - if (ret == 0 && _regulator_is_enabled(rdev) && old_selector >= 0 && + if (ret == 0 && old_selector >= 0 && rdev->desc->ops->set_voltage_time_sel) { delay = rdev->desc->ops->set_voltage_time_sel(rdev, @@ -2168,19 +2126,19 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev, delay); delay = 0; } + } - /* Insert any necessary delays */ - if (delay >= 1000) { - mdelay(delay / 1000); - udelay(delay % 1000); - } else if (delay) { - udelay(delay); - } + /* Insert any necessary delays */ + if (delay >= 1000) { + mdelay(delay / 1000); + udelay(delay % 1000); + } else if (delay) { + udelay(delay); } - if (ret == 0 && best_val >= 0) + if (ret == 0) _notifier_call_chain(rdev, REGULATOR_EVENT_VOLTAGE_CHANGE, - (void *)best_val); + NULL); trace_regulator_set_voltage_complete(rdev_get_name(rdev), best_val); @@ -2290,46 +2248,6 @@ int regulator_set_voltage_time(struct regulator *regulator, } EXPORT_SYMBOL_GPL(regulator_set_voltage_time); -/** - *regulator_set_voltage_time_sel - get raise/fall time - * @regulator: regulator source - * @old_selector: selector for starting voltage - * @new_selector: selector for target voltage - * - * Provided with the starting and target voltage selectors, this function - * returns time in microseconds required to rise or fall to this new voltage - * - * Drivers providing ramp_delay in regulation_constraints can use this as their - * set_voltage_time_sel() operation. - */ -int regulator_set_voltage_time_sel(struct regulator_dev *rdev, - unsigned int old_selector, - unsigned int new_selector) -{ - unsigned int ramp_delay = 0; - int old_volt, new_volt; - - if (rdev->constraints->ramp_delay) - ramp_delay = rdev->constraints->ramp_delay; - else if (rdev->desc->ramp_delay) - ramp_delay = rdev->desc->ramp_delay; - - if (ramp_delay == 0) { - rdev_warn(rdev, "ramp_delay not set\n"); - return 0; - } - - /* sanity check */ - if (!rdev->desc->ops->list_voltage) - return -EINVAL; - - old_volt = rdev->desc->ops->list_voltage(rdev, old_selector); - new_volt = rdev->desc->ops->list_voltage(rdev, new_selector); - - return DIV_ROUND_UP(abs(new_volt - old_volt), ramp_delay); -} -EXPORT_SYMBOL_GPL(regulator_set_voltage_time_sel); - /** * regulator_sync_voltage - re-apply last regulator output voltage * @regulator: regulator source @@ -2710,7 +2628,7 @@ static void _notifier_call_chain(struct regulator_dev *rdev, unsigned long event, void *data) { /* call rdev chain first */ - blocking_notifier_call_chain(&rdev->notifier, event, data); + blocking_notifier_call_chain(&rdev->notifier, event, NULL); } /** @@ -2991,10 +2909,10 @@ int regulator_mode_to_status(unsigned int mode) return REGULATOR_STATUS_NORMAL; case REGULATOR_MODE_IDLE: return REGULATOR_STATUS_IDLE; - case REGULATOR_MODE_STANDBY: + case REGULATOR_STATUS_STANDBY: return REGULATOR_STATUS_STANDBY; default: - return REGULATOR_STATUS_UNDEFINED; + return 0; } } EXPORT_SYMBOL_GPL(regulator_mode_to_status); @@ -3187,10 +3105,7 @@ regulator_register(const struct regulator_desc *regulator_desc, rdev->reg_data = config->driver_data; rdev->owner = regulator_desc->owner; rdev->desc = regulator_desc; - if (config->regmap) - rdev->regmap = config->regmap; - else - rdev->regmap = dev_get_regmap(dev, NULL); + rdev->regmap = config->regmap; INIT_LIST_HEAD(&rdev->consumer_list); INIT_LIST_HEAD(&rdev->list); BLOCKING_INIT_NOTIFIER_HEAD(&rdev->notifier); @@ -3217,26 +3132,6 @@ regulator_register(const struct regulator_desc *regulator_desc, dev_set_drvdata(&rdev->dev, rdev); - if (config->ena_gpio) { - ret = gpio_request_one(config->ena_gpio, - GPIOF_DIR_OUT | config->ena_gpio_flags, - rdev_get_name(rdev)); - if (ret != 0) { - rdev_err(rdev, "Failed to request enable GPIO%d: %d\n", - config->ena_gpio, ret); - goto clean; - } - - rdev->ena_gpio = config->ena_gpio; - rdev->ena_gpio_invert = config->ena_gpio_invert; - - if (config->ena_gpio_flags & GPIOF_OUT_INIT_HIGH) - rdev->ena_gpio_state = 1; - - if (rdev->ena_gpio_invert) - rdev->ena_gpio_state = !rdev->ena_gpio_state; - } - /* set regulator constraints */ if (init_data) constraints = &init_data->constraints; @@ -3305,8 +3200,6 @@ regulator_register(const struct regulator_desc *regulator_desc, scrub: if (rdev->supply) regulator_put(rdev->supply); - if (rdev->ena_gpio) - gpio_free(rdev->ena_gpio); kfree(rdev->constraints); device_unregister(&rdev->dev); /* device core frees rdev */ @@ -3340,8 +3233,6 @@ void regulator_unregister(struct regulator_dev *rdev) unset_regulator_supplies(rdev); list_del(&rdev->list); kfree(rdev->constraints); - if (rdev->ena_gpio) - gpio_free(rdev->ena_gpio); device_unregister(&rdev->dev); mutex_unlock(®ulator_list_mutex); } @@ -3581,15 +3472,6 @@ static int __init regulator_init_complete(void) struct regulation_constraints *c; int enabled, ret; - /* - * Since DT doesn't provide an idiomatic mechanism for - * enabling full constraints and since it's much more natural - * with DT to provide them just assume that a DT enabled - * system has full constraints. - */ - if (of_have_populated_dt()) - has_full_constraints = true; - mutex_lock(®ulator_list_mutex); /* If we have a full configuration then disable any regulators diff --git a/trunk/drivers/regulator/da903x.c b/trunk/drivers/regulator/da903x.c index 36c5b92fe0af..1005f5f7e603 100644 --- a/trunk/drivers/regulator/da903x.c +++ b/trunk/drivers/regulator/da903x.c @@ -107,9 +107,6 @@ static int da903x_set_voltage_sel(struct regulator_dev *rdev, unsigned selector) struct device *da9034_dev = to_da903x_dev(rdev); uint8_t val, mask; - if (rdev->desc->n_voltages == 1) - return -EINVAL; - val = selector << info->vol_shift; mask = ((1 << info->vol_nbits) - 1) << info->vol_shift; @@ -123,9 +120,6 @@ static int da903x_get_voltage_sel(struct regulator_dev *rdev) uint8_t val, mask; int ret; - if (rdev->desc->n_voltages == 1) - return 0; - ret = da903x_read(da9034_dev, info->vol_reg, &val); if (ret) return ret; diff --git a/trunk/drivers/regulator/da9052-regulator.c b/trunk/drivers/regulator/da9052-regulator.c index 903299cf15cf..88976d8d44ed 100644 --- a/trunk/drivers/regulator/da9052-regulator.c +++ b/trunk/drivers/regulator/da9052-regulator.c @@ -405,12 +405,12 @@ static int __devinit da9052_regulator_probe(struct platform_device *pdev) if (!nproot) return -ENODEV; - for_each_child_of_node(nproot, np) { + for (np = of_get_next_child(nproot, NULL); np; + np = of_get_next_child(nproot, np)) { if (!of_node_cmp(np->name, regulator->info->reg_desc.name)) { config.init_data = of_get_regulator_init_data( &pdev->dev, np); - config.of_node = np; break; } } diff --git a/trunk/drivers/regulator/fixed-helper.c b/trunk/drivers/regulator/fixed-helper.c index f9d027992aae..cacd33c9d042 100644 --- a/trunk/drivers/regulator/fixed-helper.c +++ b/trunk/drivers/regulator/fixed-helper.c @@ -1,5 +1,4 @@ #include -#include #include #include #include @@ -14,20 +13,17 @@ static void regulator_fixed_release(struct device *dev) { struct fixed_regulator_data *data = container_of(dev, struct fixed_regulator_data, pdev.dev); - kfree(data->cfg.supply_name); kfree(data); } /** - * regulator_register_fixed_name - register a no-op fixed regulator + * regulator_register_fixed - register a no-op fixed regulator * @id: platform device id - * @name: name to be used for the regulator * @supplies: consumers for this regulator * @num_supplies: number of consumers - * @uv: voltage in microvolts */ -struct platform_device *regulator_register_always_on(int id, const char *name, - struct regulator_consumer_supply *supplies, int num_supplies, int uv) +struct platform_device *regulator_register_fixed(int id, + struct regulator_consumer_supply *supplies, int num_supplies) { struct fixed_regulator_data *data; @@ -35,13 +31,8 @@ struct platform_device *regulator_register_always_on(int id, const char *name, if (!data) return NULL; - data->cfg.supply_name = kstrdup(name, GFP_KERNEL); - if (!data->cfg.supply_name) { - kfree(data); - return NULL; - } - - data->cfg.microvolts = uv; + data->cfg.supply_name = "fixed-dummy"; + data->cfg.microvolts = 0; data->cfg.gpio = -EINVAL; data->cfg.enabled_at_boot = 1; data->cfg.init_data = &data->init_data; diff --git a/trunk/drivers/regulator/fixed.c b/trunk/drivers/regulator/fixed.c index 185468c4d38f..f09fe7b20e82 100644 --- a/trunk/drivers/regulator/fixed.c +++ b/trunk/drivers/regulator/fixed.c @@ -35,6 +35,10 @@ struct fixed_voltage_data { struct regulator_desc desc; struct regulator_dev *dev; int microvolts; + int gpio; + unsigned startup_delay; + bool enable_high; + bool is_enabled; }; @@ -57,11 +61,11 @@ of_get_fixed_voltage_config(struct device *dev) config = devm_kzalloc(dev, sizeof(struct fixed_voltage_config), GFP_KERNEL); if (!config) - return ERR_PTR(-ENOMEM); + return NULL; config->init_data = of_get_regulator_init_data(dev, dev->of_node); if (!config->init_data) - return ERR_PTR(-EINVAL); + return NULL; init_data = config->init_data; init_data->constraints.apply_uV = 0; @@ -72,26 +76,13 @@ of_get_fixed_voltage_config(struct device *dev) } else { dev_err(dev, "Fixed regulator specified with variable voltages\n"); - return ERR_PTR(-EINVAL); + return NULL; } if (init_data->constraints.boot_on) config->enabled_at_boot = true; config->gpio = of_get_named_gpio(np, "gpio", 0); - /* - * of_get_named_gpio() currently returns ENODEV rather than - * EPROBE_DEFER. This code attempts to be compatible with both - * for now; the ENODEV check can be removed once the API is fixed. - * of_get_named_gpio() doesn't differentiate between a missing - * property (which would be fine here, since the GPIO is optional) - * and some other error. Patches have been posted for both issues. - * Once they are check in, we should replace this with: - * if (config->gpio < 0 && config->gpio != -ENOENT) - */ - if ((config->gpio == -ENODEV) || (config->gpio == -EPROBE_DEFER)) - return ERR_PTR(-EPROBE_DEFER); - delay = of_get_property(np, "startup-delay-us", NULL); if (delay) config->startup_delay = be32_to_cpu(*delay); @@ -102,12 +93,43 @@ of_get_fixed_voltage_config(struct device *dev) if (of_find_property(np, "gpio-open-drain", NULL)) config->gpio_is_open_drain = true; - if (of_find_property(np, "vin-supply", NULL)) - config->input_supply = "vin"; - return config; } +static int fixed_voltage_is_enabled(struct regulator_dev *dev) +{ + struct fixed_voltage_data *data = rdev_get_drvdata(dev); + + return data->is_enabled; +} + +static int fixed_voltage_enable(struct regulator_dev *dev) +{ + struct fixed_voltage_data *data = rdev_get_drvdata(dev); + + gpio_set_value_cansleep(data->gpio, data->enable_high); + data->is_enabled = true; + + return 0; +} + +static int fixed_voltage_disable(struct regulator_dev *dev) +{ + struct fixed_voltage_data *data = rdev_get_drvdata(dev); + + gpio_set_value_cansleep(data->gpio, !data->enable_high); + data->is_enabled = false; + + return 0; +} + +static int fixed_voltage_enable_time(struct regulator_dev *dev) +{ + struct fixed_voltage_data *data = rdev_get_drvdata(dev); + + return data->startup_delay; +} + static int fixed_voltage_get_voltage(struct regulator_dev *dev) { struct fixed_voltage_data *data = rdev_get_drvdata(dev); @@ -129,6 +151,15 @@ static int fixed_voltage_list_voltage(struct regulator_dev *dev, return data->microvolts; } +static struct regulator_ops fixed_voltage_gpio_ops = { + .is_enabled = fixed_voltage_is_enabled, + .enable = fixed_voltage_enable, + .disable = fixed_voltage_disable, + .enable_time = fixed_voltage_enable_time, + .get_voltage = fixed_voltage_get_voltage, + .list_voltage = fixed_voltage_list_voltage, +}; + static struct regulator_ops fixed_voltage_ops = { .get_voltage = fixed_voltage_get_voltage, .list_voltage = fixed_voltage_list_voltage, @@ -141,13 +172,10 @@ static int __devinit reg_fixed_voltage_probe(struct platform_device *pdev) struct regulator_config cfg = { }; int ret; - if (pdev->dev.of_node) { + if (pdev->dev.of_node) config = of_get_fixed_voltage_config(&pdev->dev); - if (IS_ERR(config)) - return PTR_ERR(config); - } else { + else config = pdev->dev.platform_data; - } if (!config) return -ENOMEM; @@ -168,44 +196,59 @@ static int __devinit reg_fixed_voltage_probe(struct platform_device *pdev) } drvdata->desc.type = REGULATOR_VOLTAGE; drvdata->desc.owner = THIS_MODULE; - drvdata->desc.ops = &fixed_voltage_ops; - drvdata->desc.enable_time = config->startup_delay; + if (config->microvolts) + drvdata->desc.n_voltages = 1; - if (config->input_supply) { - drvdata->desc.supply_name = kstrdup(config->input_supply, - GFP_KERNEL); - if (!drvdata->desc.supply_name) { + drvdata->microvolts = config->microvolts; + drvdata->gpio = config->gpio; + drvdata->startup_delay = config->startup_delay; + + if (gpio_is_valid(config->gpio)) { + int gpio_flag; + drvdata->enable_high = config->enable_high; + + /* FIXME: Remove below print warning + * + * config->gpio must be set to -EINVAL by platform code if + * GPIO control is not required. However, early adopters + * not requiring GPIO control may forget to initialize + * config->gpio to -EINVAL. This will cause GPIO 0 to be used + * for GPIO control. + * + * This warning will be removed once there are a couple of users + * for this driver. + */ + if (!config->gpio) + dev_warn(&pdev->dev, + "using GPIO 0 for regulator enable control\n"); + + /* + * set output direction without changing state + * to prevent glitch + */ + drvdata->is_enabled = config->enabled_at_boot; + ret = drvdata->is_enabled ? + config->enable_high : !config->enable_high; + gpio_flag = ret ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW; + + if (config->gpio_is_open_drain) + gpio_flag |= GPIOF_OPEN_DRAIN; + + ret = gpio_request_one(config->gpio, gpio_flag, + config->supply_name); + if (ret) { dev_err(&pdev->dev, - "Failed to allocate input supply\n"); - ret = -ENOMEM; + "Could not obtain regulator enable GPIO %d: %d\n", + config->gpio, ret); goto err_name; } - } - - if (config->microvolts) - drvdata->desc.n_voltages = 1; - drvdata->microvolts = config->microvolts; + drvdata->desc.ops = &fixed_voltage_gpio_ops; - if (config->gpio >= 0) - cfg.ena_gpio = config->gpio; - cfg.ena_gpio_invert = !config->enable_high; - if (config->enabled_at_boot) { - if (config->enable_high) { - cfg.ena_gpio_flags |= GPIOF_OUT_INIT_HIGH; - } else { - cfg.ena_gpio_flags |= GPIOF_OUT_INIT_LOW; - } } else { - if (config->enable_high) { - cfg.ena_gpio_flags |= GPIOF_OUT_INIT_LOW; - } else { - cfg.ena_gpio_flags |= GPIOF_OUT_INIT_HIGH; - } + drvdata->desc.ops = &fixed_voltage_ops; } - if (config->gpio_is_open_drain) - cfg.ena_gpio_flags |= GPIOF_OPEN_DRAIN; cfg.dev = &pdev->dev; cfg.init_data = config->init_data; @@ -216,7 +259,7 @@ static int __devinit reg_fixed_voltage_probe(struct platform_device *pdev) if (IS_ERR(drvdata->dev)) { ret = PTR_ERR(drvdata->dev); dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret); - goto err_input; + goto err_gpio; } platform_set_drvdata(pdev, drvdata); @@ -226,8 +269,9 @@ static int __devinit reg_fixed_voltage_probe(struct platform_device *pdev) return 0; -err_input: - kfree(drvdata->desc.supply_name); +err_gpio: + if (gpio_is_valid(config->gpio)) + gpio_free(config->gpio); err_name: kfree(drvdata->desc.name); err: @@ -239,7 +283,8 @@ static int __devexit reg_fixed_voltage_remove(struct platform_device *pdev) struct fixed_voltage_data *drvdata = platform_get_drvdata(pdev); regulator_unregister(drvdata->dev); - kfree(drvdata->desc.supply_name); + if (gpio_is_valid(drvdata->gpio)) + gpio_free(drvdata->gpio); kfree(drvdata->desc.name); return 0; @@ -251,6 +296,8 @@ static const struct of_device_id fixed_of_match[] __devinitconst = { {}, }; MODULE_DEVICE_TABLE(of, fixed_of_match); +#else +#define fixed_of_match NULL #endif static struct platform_driver regulator_fixed_voltage_driver = { @@ -259,7 +306,7 @@ static struct platform_driver regulator_fixed_voltage_driver = { .driver = { .name = "reg-fixed-voltage", .owner = THIS_MODULE, - .of_match_table = of_match_ptr(fixed_of_match), + .of_match_table = fixed_of_match, }, }; diff --git a/trunk/drivers/regulator/gpio-regulator.c b/trunk/drivers/regulator/gpio-regulator.c index 34b67bee9323..242851a4c1a6 100644 --- a/trunk/drivers/regulator/gpio-regulator.c +++ b/trunk/drivers/regulator/gpio-regulator.c @@ -36,6 +36,11 @@ struct gpio_regulator_data { struct regulator_desc desc; struct regulator_dev *dev; + int enable_gpio; + bool enable_high; + bool is_enabled; + unsigned startup_delay; + struct gpio *gpios; int nr_gpios; @@ -45,6 +50,44 @@ struct gpio_regulator_data { int state; }; +static int gpio_regulator_is_enabled(struct regulator_dev *dev) +{ + struct gpio_regulator_data *data = rdev_get_drvdata(dev); + + return data->is_enabled; +} + +static int gpio_regulator_enable(struct regulator_dev *dev) +{ + struct gpio_regulator_data *data = rdev_get_drvdata(dev); + + if (gpio_is_valid(data->enable_gpio)) { + gpio_set_value_cansleep(data->enable_gpio, data->enable_high); + data->is_enabled = true; + } + + return 0; +} + +static int gpio_regulator_disable(struct regulator_dev *dev) +{ + struct gpio_regulator_data *data = rdev_get_drvdata(dev); + + if (gpio_is_valid(data->enable_gpio)) { + gpio_set_value_cansleep(data->enable_gpio, !data->enable_high); + data->is_enabled = false; + } + + return 0; +} + +static int gpio_regulator_enable_time(struct regulator_dev *dev) +{ + struct gpio_regulator_data *data = rdev_get_drvdata(dev); + + return data->startup_delay; +} + static int gpio_regulator_get_value(struct regulator_dev *dev) { struct gpio_regulator_data *data = rdev_get_drvdata(dev); @@ -110,12 +153,20 @@ static int gpio_regulator_set_current_limit(struct regulator_dev *dev, } static struct regulator_ops gpio_regulator_voltage_ops = { + .is_enabled = gpio_regulator_is_enabled, + .enable = gpio_regulator_enable, + .disable = gpio_regulator_disable, + .enable_time = gpio_regulator_enable_time, .get_voltage = gpio_regulator_get_value, .set_voltage = gpio_regulator_set_voltage, .list_voltage = gpio_regulator_list_voltage, }; static struct regulator_ops gpio_regulator_current_ops = { + .is_enabled = gpio_regulator_is_enabled, + .enable = gpio_regulator_enable, + .disable = gpio_regulator_disable, + .enable_time = gpio_regulator_enable_time, .get_current_limit = gpio_regulator_get_value, .set_current_limit = gpio_regulator_set_current_limit, }; @@ -162,7 +213,6 @@ static int __devinit gpio_regulator_probe(struct platform_device *pdev) drvdata->nr_states = config->nr_states; drvdata->desc.owner = THIS_MODULE; - drvdata->desc.enable_time = config->startup_delay; /* handle regulator type*/ switch (config->type) { @@ -182,12 +232,52 @@ static int __devinit gpio_regulator_probe(struct platform_device *pdev) break; } + drvdata->enable_gpio = config->enable_gpio; + drvdata->startup_delay = config->startup_delay; + + if (gpio_is_valid(config->enable_gpio)) { + drvdata->enable_high = config->enable_high; + + ret = gpio_request(config->enable_gpio, config->supply_name); + if (ret) { + dev_err(&pdev->dev, + "Could not obtain regulator enable GPIO %d: %d\n", + config->enable_gpio, ret); + goto err_memstate; + } + + /* set output direction without changing state + * to prevent glitch + */ + if (config->enabled_at_boot) { + drvdata->is_enabled = true; + ret = gpio_direction_output(config->enable_gpio, + config->enable_high); + } else { + drvdata->is_enabled = false; + ret = gpio_direction_output(config->enable_gpio, + !config->enable_high); + } + + if (ret) { + dev_err(&pdev->dev, + "Could not configure regulator enable GPIO %d direction: %d\n", + config->enable_gpio, ret); + goto err_enablegpio; + } + } else { + /* Regulator without GPIO control is considered + * always enabled + */ + drvdata->is_enabled = true; + } + drvdata->nr_gpios = config->nr_gpios; ret = gpio_request_array(drvdata->gpios, drvdata->nr_gpios); if (ret) { dev_err(&pdev->dev, "Could not obtain regulator setting GPIOs: %d\n", ret); - goto err_memstate; + goto err_enablegpio; } /* build initial state from gpio init data. */ @@ -202,21 +292,6 @@ static int __devinit gpio_regulator_probe(struct platform_device *pdev) cfg.init_data = config->init_data; cfg.driver_data = drvdata; - if (config->enable_gpio >= 0) - cfg.ena_gpio = config->enable_gpio; - cfg.ena_gpio_invert = !config->enable_high; - if (config->enabled_at_boot) { - if (config->enable_high) - cfg.ena_gpio_flags |= GPIOF_OUT_INIT_HIGH; - else - cfg.ena_gpio_flags |= GPIOF_OUT_INIT_LOW; - } else { - if (config->enable_high) - cfg.ena_gpio_flags |= GPIOF_OUT_INIT_LOW; - else - cfg.ena_gpio_flags |= GPIOF_OUT_INIT_HIGH; - } - drvdata->dev = regulator_register(&drvdata->desc, &cfg); if (IS_ERR(drvdata->dev)) { ret = PTR_ERR(drvdata->dev); @@ -230,6 +305,9 @@ static int __devinit gpio_regulator_probe(struct platform_device *pdev) err_stategpio: gpio_free_array(drvdata->gpios, drvdata->nr_gpios); +err_enablegpio: + if (gpio_is_valid(config->enable_gpio)) + gpio_free(config->enable_gpio); err_memstate: kfree(drvdata->states); err_memgpio: @@ -251,6 +329,9 @@ static int __devexit gpio_regulator_remove(struct platform_device *pdev) kfree(drvdata->states); kfree(drvdata->gpios); + if (gpio_is_valid(drvdata->enable_gpio)) + gpio_free(drvdata->enable_gpio); + kfree(drvdata->desc.name); return 0; diff --git a/trunk/drivers/regulator/isl6271a-regulator.c b/trunk/drivers/regulator/isl6271a-regulator.c index 1d145a07ada9..56d273f25603 100644 --- a/trunk/drivers/regulator/isl6271a-regulator.c +++ b/trunk/drivers/regulator/isl6271a-regulator.c @@ -75,12 +75,19 @@ static struct regulator_ops isl_core_ops = { static int isl6271a_get_fixed_voltage(struct regulator_dev *dev) { - return dev->desc->min_uV; + int id = rdev_get_id(dev); + return (id == 1) ? 1100000 : 1300000; +} + +static int isl6271a_list_fixed_voltage(struct regulator_dev *dev, unsigned selector) +{ + int id = rdev_get_id(dev); + return (id == 1) ? 1100000 : 1300000; } static struct regulator_ops isl_fixed_ops = { .get_voltage = isl6271a_get_fixed_voltage, - .list_voltage = regulator_list_voltage_linear, + .list_voltage = isl6271a_list_fixed_voltage, }; static const struct regulator_desc isl_rd[] = { @@ -100,7 +107,6 @@ static const struct regulator_desc isl_rd[] = { .ops = &isl_fixed_ops, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, - .min_uV = 1100000, }, { .name = "LDO2", .id = 2, @@ -108,7 +114,6 @@ static const struct regulator_desc isl_rd[] = { .ops = &isl_fixed_ops, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, - .min_uV = 1300000, }, }; diff --git a/trunk/drivers/regulator/lp3971.c b/trunk/drivers/regulator/lp3971.c index 7c6e3b8ff484..981bea9cb9d7 100644 --- a/trunk/drivers/regulator/lp3971.c +++ b/trunk/drivers/regulator/lp3971.c @@ -65,11 +65,11 @@ static const int buck_base_addr[] = { #define LP3971_BUCK_TARGET_VOL1_REG(x) (buck_base_addr[x]) #define LP3971_BUCK_TARGET_VOL2_REG(x) (buck_base_addr[x]+1) -static const unsigned int buck_voltage_map[] = { - 0, 800000, 850000, 900000, 950000, 1000000, 1050000, 1100000, - 1150000, 1200000, 1250000, 1300000, 1350000, 1400000, 1450000, 1500000, - 1550000, 1600000, 1650000, 1700000, 1800000, 1900000, 2500000, 2800000, - 3000000, 3300000, +static const int buck_voltage_map[] = { + 0, 800, 850, 900, 950, 1000, 1050, 1100, + 1150, 1200, 1250, 1300, 1350, 1400, 1450, 1500, + 1550, 1600, 1650, 1700, 1800, 1900, 2500, 2800, + 3000, 3300, }; #define BUCK_TARGET_VOL_MASK 0x3f @@ -98,19 +98,39 @@ static const unsigned int buck_voltage_map[] = { #define LDO_VOL_CONTR_SHIFT(x) ((x & 1) << 2) #define LDO_VOL_CONTR_MASK 0x0f -static const unsigned int ldo45_voltage_map[] = { - 1000000, 1050000, 1100000, 1150000, 1200000, 1250000, 1300000, 1350000, - 1400000, 1500000, 1800000, 1900000, 2500000, 2800000, 3000000, 3300000, +static const int ldo45_voltage_map[] = { + 1000, 1050, 1100, 1150, 1200, 1250, 1300, 1350, + 1400, 1500, 1800, 1900, 2500, 2800, 3000, 3300, }; -static const unsigned int ldo123_voltage_map[] = { - 1800000, 1900000, 2000000, 2100000, 2200000, 2300000, 2400000, 2500000, - 2600000, 2700000, 2800000, 2900000, 3000000, 3100000, 3200000, 3300000, +static const int ldo123_voltage_map[] = { + 1800, 1900, 2000, 2100, 2200, 2300, 2400, 2500, + 2600, 2700, 2800, 2900, 3000, 3100, 3200, 3300, }; +static const int *ldo_voltage_map[] = { + ldo123_voltage_map, /* LDO1 */ + ldo123_voltage_map, /* LDO2 */ + ldo123_voltage_map, /* LDO3 */ + ldo45_voltage_map, /* LDO4 */ + ldo45_voltage_map, /* LDO5 */ +}; + +#define LDO_VOL_VALUE_MAP(x) (ldo_voltage_map[(x - LP3971_LDO1)]) + #define LDO_VOL_MIN_IDX 0x00 #define LDO_VOL_MAX_IDX 0x0f +static int lp3971_ldo_list_voltage(struct regulator_dev *dev, unsigned index) +{ + int ldo = rdev_get_id(dev) - LP3971_LDO1; + + if (index > LDO_VOL_MAX_IDX) + return -EINVAL; + + return 1000 * LDO_VOL_VALUE_MAP(ldo)[index]; +} + static int lp3971_ldo_is_enabled(struct regulator_dev *dev) { struct lp3971 *lp3971 = rdev_get_drvdata(dev); @@ -149,7 +169,7 @@ static int lp3971_ldo_get_voltage(struct regulator_dev *dev) reg = lp3971_reg_read(lp3971, LP3971_LDO_VOL_CONTR_REG(ldo)); val = (reg >> LDO_VOL_CONTR_SHIFT(ldo)) & LDO_VOL_CONTR_MASK; - return dev->desc->volt_table[val]; + return 1000 * LDO_VOL_VALUE_MAP(ldo)[val]; } static int lp3971_ldo_set_voltage_sel(struct regulator_dev *dev, @@ -164,7 +184,7 @@ static int lp3971_ldo_set_voltage_sel(struct regulator_dev *dev, } static struct regulator_ops lp3971_ldo_ops = { - .list_voltage = regulator_list_voltage_table, + .list_voltage = lp3971_ldo_list_voltage, .is_enabled = lp3971_ldo_is_enabled, .enable = lp3971_ldo_enable, .disable = lp3971_ldo_disable, @@ -172,6 +192,14 @@ static struct regulator_ops lp3971_ldo_ops = { .set_voltage_sel = lp3971_ldo_set_voltage_sel, }; +static int lp3971_dcdc_list_voltage(struct regulator_dev *dev, unsigned index) +{ + if (index < BUCK_TARGET_VOL_MIN_IDX || index > BUCK_TARGET_VOL_MAX_IDX) + return -EINVAL; + + return 1000 * buck_voltage_map[index]; +} + static int lp3971_dcdc_is_enabled(struct regulator_dev *dev) { struct lp3971 *lp3971 = rdev_get_drvdata(dev); @@ -212,7 +240,7 @@ static int lp3971_dcdc_get_voltage(struct regulator_dev *dev) reg &= BUCK_TARGET_VOL_MASK; if (reg <= BUCK_TARGET_VOL_MAX_IDX) - val = buck_voltage_map[reg]; + val = 1000 * buck_voltage_map[reg]; else { val = 0; dev_warn(&dev->dev, "chip reported incorrect voltage value.\n"); @@ -245,7 +273,7 @@ static int lp3971_dcdc_set_voltage_sel(struct regulator_dev *dev, } static struct regulator_ops lp3971_dcdc_ops = { - .list_voltage = regulator_list_voltage_table, + .list_voltage = lp3971_dcdc_list_voltage, .is_enabled = lp3971_dcdc_is_enabled, .enable = lp3971_dcdc_enable, .disable = lp3971_dcdc_disable, @@ -259,7 +287,6 @@ static const struct regulator_desc regulators[] = { .id = LP3971_LDO1, .ops = &lp3971_ldo_ops, .n_voltages = ARRAY_SIZE(ldo123_voltage_map), - .volt_table = ldo123_voltage_map, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, @@ -268,7 +295,6 @@ static const struct regulator_desc regulators[] = { .id = LP3971_LDO2, .ops = &lp3971_ldo_ops, .n_voltages = ARRAY_SIZE(ldo123_voltage_map), - .volt_table = ldo123_voltage_map, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, @@ -277,7 +303,6 @@ static const struct regulator_desc regulators[] = { .id = LP3971_LDO3, .ops = &lp3971_ldo_ops, .n_voltages = ARRAY_SIZE(ldo123_voltage_map), - .volt_table = ldo123_voltage_map, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, @@ -286,7 +311,6 @@ static const struct regulator_desc regulators[] = { .id = LP3971_LDO4, .ops = &lp3971_ldo_ops, .n_voltages = ARRAY_SIZE(ldo45_voltage_map), - .volt_table = ldo45_voltage_map, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, @@ -295,7 +319,6 @@ static const struct regulator_desc regulators[] = { .id = LP3971_LDO5, .ops = &lp3971_ldo_ops, .n_voltages = ARRAY_SIZE(ldo45_voltage_map), - .volt_table = ldo45_voltage_map, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, @@ -304,7 +327,6 @@ static const struct regulator_desc regulators[] = { .id = LP3971_DCDC1, .ops = &lp3971_dcdc_ops, .n_voltages = ARRAY_SIZE(buck_voltage_map), - .volt_table = buck_voltage_map, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, @@ -313,7 +335,6 @@ static const struct regulator_desc regulators[] = { .id = LP3971_DCDC2, .ops = &lp3971_dcdc_ops, .n_voltages = ARRAY_SIZE(buck_voltage_map), - .volt_table = buck_voltage_map, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, @@ -322,7 +343,6 @@ static const struct regulator_desc regulators[] = { .id = LP3971_DCDC3, .ops = &lp3971_dcdc_ops, .n_voltages = ARRAY_SIZE(buck_voltage_map), - .volt_table = buck_voltage_map, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, diff --git a/trunk/drivers/regulator/lp3972.c b/trunk/drivers/regulator/lp3972.c index 3cdc755d9b22..de073df7d344 100644 --- a/trunk/drivers/regulator/lp3972.c +++ b/trunk/drivers/regulator/lp3972.c @@ -74,40 +74,54 @@ struct lp3972 { #define LP3972_OVER2_LDO4_EN BIT(4) #define LP3972_OVER1_S_EN BIT(2) -static const unsigned int ldo1_voltage_map[] = { - 1700000, 1725000, 1750000, 1775000, 1800000, 1825000, 1850000, 1875000, - 1900000, 1925000, 1950000, 1975000, 2000000, +static const int ldo1_voltage_map[] = { + 1700, 1725, 1750, 1775, 1800, 1825, 1850, 1875, + 1900, 1925, 1950, 1975, 2000, }; -static const unsigned int ldo23_voltage_map[] = { - 1800000, 1900000, 2000000, 2100000, 2200000, 2300000, 2400000, 2500000, - 2600000, 2700000, 2800000, 2900000, 3000000, 3100000, 3200000, 3300000, +static const int ldo23_voltage_map[] = { + 1800, 1900, 2000, 2100, 2200, 2300, 2400, 2500, + 2600, 2700, 2800, 2900, 3000, 3100, 3200, 3300, }; -static const unsigned int ldo4_voltage_map[] = { - 1000000, 1050000, 1100000, 1150000, 1200000, 1250000, 1300000, 1350000, - 1400000, 1500000, 1800000, 1900000, 2500000, 2800000, 3000000, 3300000, +static const int ldo4_voltage_map[] = { + 1000, 1050, 1100, 1150, 1200, 1250, 1300, 1350, + 1400, 1500, 1800, 1900, 2500, 2800, 3000, 3300, }; -static const unsigned int ldo5_voltage_map[] = { - 0, 0, 0, 0, 0, 850000, 875000, 900000, - 925000, 950000, 975000, 1000000, 1025000, 1050000, 1075000, 1100000, - 1125000, 1150000, 1175000, 1200000, 1225000, 1250000, 1275000, 1300000, - 1325000, 1350000, 1375000, 1400000, 1425000, 1450000, 1475000, 1500000, +static const int ldo5_voltage_map[] = { + 0, 0, 0, 0, 0, 850, 875, 900, + 925, 950, 975, 1000, 1025, 1050, 1075, 1100, + 1125, 1150, 1175, 1200, 1225, 1250, 1275, 1300, + 1325, 1350, 1375, 1400, 1425, 1450, 1475, 1500, }; -static const unsigned int buck1_voltage_map[] = { - 725000, 750000, 775000, 800000, 825000, 850000, 875000, 900000, - 925000, 950000, 975000, 1000000, 1025000, 1050000, 1075000, 1100000, - 1125000, 1150000, 1175000, 1200000, 1225000, 1250000, 1275000, 1300000, - 1325000, 1350000, 1375000, 1400000, 1425000, 1450000, 1475000, 1500000, +static const int buck1_voltage_map[] = { + 725, 750, 775, 800, 825, 850, 875, 900, + 925, 950, 975, 1000, 1025, 1050, 1075, 1100, + 1125, 1150, 1175, 1200, 1225, 1250, 1275, 1300, + 1325, 1350, 1375, 1400, 1425, 1450, 1475, 1500, }; -static const unsigned int buck23_voltage_map[] = { - 0, 800000, 850000, 900000, 950000, 1000000, 1050000, 1100000, - 1150000, 1200000, 1250000, 1300000, 1350000, 1400000, 1450000, 1500000, - 1550000, 1600000, 1650000, 1700000, 1800000, 1900000, 2500000, 2800000, - 3000000, 3300000, +static const int buck23_voltage_map[] = { + 0, 800, 850, 900, 950, 1000, 1050, 1100, + 1150, 1200, 1250, 1300, 1350, 1400, 1450, 1500, + 1550, 1600, 1650, 1700, 1800, 1900, 2500, 2800, + 3000, 3300, +}; + +static const int *ldo_voltage_map[] = { + ldo1_voltage_map, + ldo23_voltage_map, + ldo23_voltage_map, + ldo4_voltage_map, + ldo5_voltage_map, +}; + +static const int *buck_voltage_map[] = { + buck1_voltage_map, + buck23_voltage_map, + buck23_voltage_map, }; static const int ldo_output_enable_mask[] = { @@ -146,6 +160,7 @@ static const int buck_base_addr[] = { LP3972_B3TV_REG, }; +#define LP3972_LDO_VOL_VALUE_MAP(x) (ldo_voltage_map[x]) #define LP3972_LDO_OUTPUT_ENABLE_MASK(x) (ldo_output_enable_mask[x]) #define LP3972_LDO_OUTPUT_ENABLE_REG(x) (ldo_output_enable_addr[x]) @@ -162,6 +177,7 @@ static const int buck_base_addr[] = { #define LP3972_LDO_VOL_MIN_IDX(x) (((x) == 4) ? 0x05 : 0x00) #define LP3972_LDO_VOL_MAX_IDX(x) ((x) ? (((x) == 4) ? 0x1f : 0x0f) : 0x0c) +#define LP3972_BUCK_VOL_VALUE_MAP(x) (buck_voltage_map[x]) #define LP3972_BUCK_VOL_ENABLE_REG(x) (buck_vol_enable_addr[x]) #define LP3972_BUCK_VOL1_REG(x) (buck_base_addr[x]) #define LP3972_BUCK_VOL_MASK 0x1f @@ -226,6 +242,17 @@ static int lp3972_set_bits(struct lp3972 *lp3972, u8 reg, u16 mask, u16 val) return ret; } +static int lp3972_ldo_list_voltage(struct regulator_dev *dev, unsigned index) +{ + int ldo = rdev_get_id(dev) - LP3972_LDO1; + + if (index < LP3972_LDO_VOL_MIN_IDX(ldo) || + index > LP3972_LDO_VOL_MAX_IDX(ldo)) + return -EINVAL; + + return 1000 * LP3972_LDO_VOL_VALUE_MAP(ldo)[index]; +} + static int lp3972_ldo_is_enabled(struct regulator_dev *dev) { struct lp3972 *lp3972 = rdev_get_drvdata(dev); @@ -267,7 +294,7 @@ static int lp3972_ldo_get_voltage(struct regulator_dev *dev) reg = lp3972_reg_read(lp3972, LP3972_LDO_VOL_CONTR_REG(ldo)); val = (reg >> LP3972_LDO_VOL_CONTR_SHIFT(ldo)) & mask; - return dev->desc->volt_table[val]; + return 1000 * LP3972_LDO_VOL_VALUE_MAP(ldo)[val]; } static int lp3972_ldo_set_voltage_sel(struct regulator_dev *dev, @@ -310,7 +337,7 @@ static int lp3972_ldo_set_voltage_sel(struct regulator_dev *dev, } static struct regulator_ops lp3972_ldo_ops = { - .list_voltage = regulator_list_voltage_table, + .list_voltage = lp3972_ldo_list_voltage, .is_enabled = lp3972_ldo_is_enabled, .enable = lp3972_ldo_enable, .disable = lp3972_ldo_disable, @@ -318,6 +345,17 @@ static struct regulator_ops lp3972_ldo_ops = { .set_voltage_sel = lp3972_ldo_set_voltage_sel, }; +static int lp3972_dcdc_list_voltage(struct regulator_dev *dev, unsigned index) +{ + int buck = rdev_get_id(dev) - LP3972_DCDC1; + + if (index < LP3972_BUCK_VOL_MIN_IDX(buck) || + index > LP3972_BUCK_VOL_MAX_IDX(buck)) + return -EINVAL; + + return 1000 * buck_voltage_map[buck][index]; +} + static int lp3972_dcdc_is_enabled(struct regulator_dev *dev) { struct lp3972 *lp3972 = rdev_get_drvdata(dev); @@ -363,7 +401,7 @@ static int lp3972_dcdc_get_voltage(struct regulator_dev *dev) reg = lp3972_reg_read(lp3972, LP3972_BUCK_VOL1_REG(buck)); reg &= LP3972_BUCK_VOL_MASK; if (reg <= LP3972_BUCK_VOL_MAX_IDX(buck)) - val = dev->desc->volt_table[reg]; + val = 1000 * buck_voltage_map[buck][reg]; else { val = 0; dev_warn(&dev->dev, "chip reported incorrect voltage value." @@ -398,7 +436,7 @@ static int lp3972_dcdc_set_voltage_sel(struct regulator_dev *dev, } static struct regulator_ops lp3972_dcdc_ops = { - .list_voltage = regulator_list_voltage_table, + .list_voltage = lp3972_dcdc_list_voltage, .is_enabled = lp3972_dcdc_is_enabled, .enable = lp3972_dcdc_enable, .disable = lp3972_dcdc_disable, @@ -412,7 +450,6 @@ static const struct regulator_desc regulators[] = { .id = LP3972_LDO1, .ops = &lp3972_ldo_ops, .n_voltages = ARRAY_SIZE(ldo1_voltage_map), - .volt_table = ldo1_voltage_map, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, @@ -421,7 +458,6 @@ static const struct regulator_desc regulators[] = { .id = LP3972_LDO2, .ops = &lp3972_ldo_ops, .n_voltages = ARRAY_SIZE(ldo23_voltage_map), - .volt_table = ldo23_voltage_map, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, @@ -430,7 +466,6 @@ static const struct regulator_desc regulators[] = { .id = LP3972_LDO3, .ops = &lp3972_ldo_ops, .n_voltages = ARRAY_SIZE(ldo23_voltage_map), - .volt_table = ldo23_voltage_map, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, @@ -439,7 +474,6 @@ static const struct regulator_desc regulators[] = { .id = LP3972_LDO4, .ops = &lp3972_ldo_ops, .n_voltages = ARRAY_SIZE(ldo4_voltage_map), - .volt_table = ldo4_voltage_map, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, @@ -448,7 +482,6 @@ static const struct regulator_desc regulators[] = { .id = LP3972_LDO5, .ops = &lp3972_ldo_ops, .n_voltages = ARRAY_SIZE(ldo5_voltage_map), - .volt_table = ldo5_voltage_map, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, @@ -457,7 +490,6 @@ static const struct regulator_desc regulators[] = { .id = LP3972_DCDC1, .ops = &lp3972_dcdc_ops, .n_voltages = ARRAY_SIZE(buck1_voltage_map), - .volt_table = buck1_voltage_map, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, @@ -466,7 +498,6 @@ static const struct regulator_desc regulators[] = { .id = LP3972_DCDC2, .ops = &lp3972_dcdc_ops, .n_voltages = ARRAY_SIZE(buck23_voltage_map), - .volt_table = buck23_voltage_map, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, @@ -475,7 +506,6 @@ static const struct regulator_desc regulators[] = { .id = LP3972_DCDC3, .ops = &lp3972_dcdc_ops, .n_voltages = ARRAY_SIZE(buck23_voltage_map), - .volt_table = buck23_voltage_map, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, diff --git a/trunk/drivers/regulator/lp872x.c b/trunk/drivers/regulator/lp872x.c deleted file mode 100644 index 212c38eaba70..000000000000 --- a/trunk/drivers/regulator/lp872x.c +++ /dev/null @@ -1,943 +0,0 @@ -/* - * Copyright 2012 Texas Instruments - * - * Author: Milo(Woogyom) Kim - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* Registers : LP8720/8725 shared */ -#define LP872X_GENERAL_CFG 0x00 -#define LP872X_LDO1_VOUT 0x01 -#define LP872X_LDO2_VOUT 0x02 -#define LP872X_LDO3_VOUT 0x03 -#define LP872X_LDO4_VOUT 0x04 -#define LP872X_LDO5_VOUT 0x05 - -/* Registers : LP8720 */ -#define LP8720_BUCK_VOUT1 0x06 -#define LP8720_BUCK_VOUT2 0x07 -#define LP8720_ENABLE 0x08 - -/* Registers : LP8725 */ -#define LP8725_LILO1_VOUT 0x06 -#define LP8725_LILO2_VOUT 0x07 -#define LP8725_BUCK1_VOUT1 0x08 -#define LP8725_BUCK1_VOUT2 0x09 -#define LP8725_BUCK2_VOUT1 0x0A -#define LP8725_BUCK2_VOUT2 0x0B -#define LP8725_BUCK_CTRL 0x0C -#define LP8725_LDO_CTRL 0x0D - -/* Mask/shift : LP8720/LP8725 shared */ -#define LP872X_VOUT_M 0x1F -#define LP872X_START_DELAY_M 0xE0 -#define LP872X_START_DELAY_S 5 -#define LP872X_EN_LDO1_M BIT(0) -#define LP872X_EN_LDO2_M BIT(1) -#define LP872X_EN_LDO3_M BIT(2) -#define LP872X_EN_LDO4_M BIT(3) -#define LP872X_EN_LDO5_M BIT(4) - -/* Mask/shift : LP8720 */ -#define LP8720_TIMESTEP_S 0 /* Addr 00h */ -#define LP8720_TIMESTEP_M BIT(0) -#define LP8720_EXT_DVS_M BIT(2) -#define LP8720_BUCK_FPWM_S 5 /* Addr 07h */ -#define LP8720_BUCK_FPWM_M BIT(5) -#define LP8720_EN_BUCK_M BIT(5) /* Addr 08h */ -#define LP8720_DVS_SEL_M BIT(7) - -/* Mask/shift : LP8725 */ -#define LP8725_TIMESTEP_M 0xC0 /* Addr 00h */ -#define LP8725_TIMESTEP_S 6 -#define LP8725_BUCK1_EN_M BIT(0) -#define LP8725_DVS1_M BIT(2) -#define LP8725_DVS2_M BIT(3) -#define LP8725_BUCK2_EN_M BIT(4) -#define LP8725_BUCK_CL_M 0xC0 /* Addr 09h, 0Bh */ -#define LP8725_BUCK_CL_S 6 -#define LP8725_BUCK1_FPWM_S 1 /* Addr 0Ch */ -#define LP8725_BUCK1_FPWM_M BIT(1) -#define LP8725_BUCK2_FPWM_S 5 -#define LP8725_BUCK2_FPWM_M BIT(5) -#define LP8725_EN_LILO1_M BIT(5) /* Addr 0Dh */ -#define LP8725_EN_LILO2_M BIT(6) - -/* PWM mode */ -#define LP872X_FORCE_PWM 1 -#define LP872X_AUTO_PWM 0 - -#define LP8720_NUM_REGULATORS 6 -#define LP8725_NUM_REGULATORS 9 -#define EXTERN_DVS_USED 0 -#define MAX_DELAY 6 - -/* dump registers in regmap-debugfs */ -#define MAX_REGISTERS 0x0F - -enum lp872x_id { - LP8720, - LP8725, -}; - -struct lp872x { - struct regmap *regmap; - struct device *dev; - enum lp872x_id chipid; - struct lp872x_platform_data *pdata; - struct regulator_dev **regulators; - int num_regulators; - enum lp872x_dvs_state dvs_pin; - int dvs_gpio; -}; - -/* LP8720/LP8725 shared voltage table for LDOs */ -static const unsigned int lp872x_ldo_vtbl[] = { - 1200000, 1250000, 1300000, 1350000, 1400000, 1450000, 1500000, 1550000, - 1600000, 1650000, 1700000, 1750000, 1800000, 1850000, 1900000, 2000000, - 2100000, 2200000, 2300000, 2400000, 2500000, 2600000, 2650000, 2700000, - 2750000, 2800000, 2850000, 2900000, 2950000, 3000000, 3100000, 3300000, -}; - -/* LP8720 LDO4 voltage table */ -static const unsigned int lp8720_ldo4_vtbl[] = { - 800000, 850000, 900000, 1000000, 1100000, 1200000, 1250000, 1300000, - 1350000, 1400000, 1450000, 1500000, 1550000, 1600000, 1650000, 1700000, - 1750000, 1800000, 1850000, 1900000, 2000000, 2100000, 2200000, 2300000, - 2400000, 2500000, 2600000, 2650000, 2700000, 2750000, 2800000, 2850000, -}; - -/* LP8725 LILO(Low Input Low Output) voltage table */ -static const unsigned int lp8725_lilo_vtbl[] = { - 800000, 850000, 900000, 950000, 1000000, 1050000, 1100000, 1150000, - 1200000, 1250000, 1300000, 1350000, 1400000, 1500000, 1600000, 1700000, - 1800000, 1900000, 2000000, 2100000, 2200000, 2300000, 2400000, 2500000, - 2600000, 2700000, 2800000, 2850000, 2900000, 3000000, 3100000, 3300000, -}; - -/* LP8720 BUCK voltage table */ -#define EXT_R 0 /* external resistor divider */ -static const unsigned int lp8720_buck_vtbl[] = { - EXT_R, 800000, 850000, 900000, 950000, 1000000, 1050000, 1100000, - 1150000, 1200000, 1250000, 1300000, 1350000, 1400000, 1450000, 1500000, - 1550000, 1600000, 1650000, 1700000, 1750000, 1800000, 1850000, 1900000, - 1950000, 2000000, 2050000, 2100000, 2150000, 2200000, 2250000, 2300000, -}; - -/* LP8725 BUCK voltage table */ -static const unsigned int lp8725_buck_vtbl[] = { - 800000, 850000, 900000, 950000, 1000000, 1050000, 1100000, 1150000, - 1200000, 1250000, 1300000, 1350000, 1400000, 1500000, 1600000, 1700000, - 1750000, 1800000, 1850000, 1900000, 2000000, 2100000, 2200000, 2300000, - 2400000, 2500000, 2600000, 2700000, 2800000, 2850000, 2900000, 3000000, -}; - -/* LP8725 BUCK current limit */ -static const unsigned int lp8725_buck_uA[] = { - 460000, 780000, 1050000, 1370000, -}; - -static int lp872x_read_byte(struct lp872x *lp, u8 addr, u8 *data) -{ - int ret; - unsigned int val; - - ret = regmap_read(lp->regmap, addr, &val); - if (ret < 0) { - dev_err(lp->dev, "failed to read 0x%.2x\n", addr); - return ret; - } - - *data = (u8)val; - return 0; -} - -static inline int lp872x_write_byte(struct lp872x *lp, u8 addr, u8 data) -{ - return regmap_write(lp->regmap, addr, data); -} - -static inline int lp872x_update_bits(struct lp872x *lp, u8 addr, - unsigned int mask, u8 data) -{ - return regmap_update_bits(lp->regmap, addr, mask, data); -} - -static int _rdev_to_offset(struct regulator_dev *rdev) -{ - enum lp872x_regulator_id id = rdev_get_id(rdev); - - switch (id) { - case LP8720_ID_LDO1 ... LP8720_ID_BUCK: - return id; - case LP8725_ID_LDO1 ... LP8725_ID_BUCK2: - return id - LP8725_ID_BASE; - default: - return -EINVAL; - } -} - -static int lp872x_get_timestep_usec(struct lp872x *lp) -{ - enum lp872x_id chip = lp->chipid; - u8 val, mask, shift; - int *time_usec, size, ret; - int lp8720_time_usec[] = { 25, 50 }; - int lp8725_time_usec[] = { 32, 64, 128, 256 }; - - switch (chip) { - case LP8720: - mask = LP8720_TIMESTEP_M; - shift = LP8720_TIMESTEP_S; - time_usec = &lp8720_time_usec[0]; - size = ARRAY_SIZE(lp8720_time_usec); - break; - case LP8725: - mask = LP8725_TIMESTEP_M; - shift = LP8725_TIMESTEP_S; - time_usec = &lp8725_time_usec[0]; - size = ARRAY_SIZE(lp8725_time_usec); - break; - default: - return -EINVAL; - } - - ret = lp872x_read_byte(lp, LP872X_GENERAL_CFG, &val); - if (ret) - return -EINVAL; - - val = (val & mask) >> shift; - if (val >= size) - return -EINVAL; - - return *(time_usec + val); -} - -static int lp872x_regulator_enable_time(struct regulator_dev *rdev) -{ - struct lp872x *lp = rdev_get_drvdata(rdev); - enum lp872x_regulator_id regulator = rdev_get_id(rdev); - int time_step_us = lp872x_get_timestep_usec(lp); - int ret, offset; - u8 addr, val; - - if (time_step_us < 0) - return -EINVAL; - - switch (regulator) { - case LP8720_ID_LDO1 ... LP8720_ID_LDO5: - case LP8725_ID_LDO1 ... LP8725_ID_LILO2: - offset = _rdev_to_offset(rdev); - if (offset < 0) - return -EINVAL; - - addr = LP872X_LDO1_VOUT + offset; - break; - case LP8720_ID_BUCK: - addr = LP8720_BUCK_VOUT1; - break; - case LP8725_ID_BUCK1: - addr = LP8725_BUCK1_VOUT1; - break; - case LP8725_ID_BUCK2: - addr = LP8725_BUCK2_VOUT1; - break; - default: - return -EINVAL; - } - - ret = lp872x_read_byte(lp, addr, &val); - if (ret) - return ret; - - val = (val & LP872X_START_DELAY_M) >> LP872X_START_DELAY_S; - - return val > MAX_DELAY ? 0 : val * time_step_us; -} - -static void lp872x_set_dvs(struct lp872x *lp, int gpio) -{ - enum lp872x_dvs_sel dvs_sel = lp->pdata->dvs->vsel; - enum lp872x_dvs_state state; - - state = dvs_sel == SEL_V1 ? DVS_HIGH : DVS_LOW; - gpio_set_value(gpio, state); - lp->dvs_pin = state; -} - -static u8 lp872x_select_buck_vout_addr(struct lp872x *lp, - enum lp872x_regulator_id buck) -{ - u8 val, addr; - - if (lp872x_read_byte(lp, LP872X_GENERAL_CFG, &val)) - return 0; - - switch (buck) { - case LP8720_ID_BUCK: - if (val & LP8720_EXT_DVS_M) { - addr = (lp->dvs_pin == DVS_HIGH) ? - LP8720_BUCK_VOUT1 : LP8720_BUCK_VOUT2; - } else { - if (lp872x_read_byte(lp, LP8720_ENABLE, &val)) - return 0; - - addr = val & LP8720_DVS_SEL_M ? - LP8720_BUCK_VOUT1 : LP8720_BUCK_VOUT2; - } - break; - case LP8725_ID_BUCK1: - if (val & LP8725_DVS1_M) - addr = LP8725_BUCK1_VOUT1; - else - addr = (lp->dvs_pin == DVS_HIGH) ? - LP8725_BUCK1_VOUT1 : LP8725_BUCK1_VOUT2; - break; - case LP8725_ID_BUCK2: - addr = val & LP8725_DVS2_M ? - LP8725_BUCK2_VOUT1 : LP8725_BUCK2_VOUT2; - break; - default: - return 0; - } - - return addr; -} - -static bool lp872x_is_valid_buck_addr(u8 addr) -{ - switch (addr) { - case LP8720_BUCK_VOUT1: - case LP8720_BUCK_VOUT2: - case LP8725_BUCK1_VOUT1: - case LP8725_BUCK1_VOUT2: - case LP8725_BUCK2_VOUT1: - case LP8725_BUCK2_VOUT2: - return true; - default: - return false; - } -} - -static int lp872x_buck_set_voltage_sel(struct regulator_dev *rdev, - unsigned selector) -{ - struct lp872x *lp = rdev_get_drvdata(rdev); - enum lp872x_regulator_id buck = rdev_get_id(rdev); - u8 addr, mask = LP872X_VOUT_M; - struct lp872x_dvs *dvs = lp->pdata->dvs; - - if (dvs && gpio_is_valid(dvs->gpio)) - lp872x_set_dvs(lp, dvs->gpio); - - addr = lp872x_select_buck_vout_addr(lp, buck); - if (!lp872x_is_valid_buck_addr(addr)) - return -EINVAL; - - return lp872x_update_bits(lp, addr, mask, selector); -} - -static int lp872x_buck_get_voltage_sel(struct regulator_dev *rdev) -{ - struct lp872x *lp = rdev_get_drvdata(rdev); - enum lp872x_regulator_id buck = rdev_get_id(rdev); - u8 addr, val; - int ret; - - addr = lp872x_select_buck_vout_addr(lp, buck); - if (!lp872x_is_valid_buck_addr(addr)) - return -EINVAL; - - ret = lp872x_read_byte(lp, addr, &val); - if (ret) - return ret; - - return val & LP872X_VOUT_M; -} - -static int lp8725_buck_set_current_limit(struct regulator_dev *rdev, - int min_uA, int max_uA) -{ - struct lp872x *lp = rdev_get_drvdata(rdev); - enum lp872x_regulator_id buck = rdev_get_id(rdev); - int i, max = ARRAY_SIZE(lp8725_buck_uA); - u8 addr, val; - - switch (buck) { - case LP8725_ID_BUCK1: - addr = LP8725_BUCK1_VOUT2; - break; - case LP8725_ID_BUCK2: - addr = LP8725_BUCK2_VOUT2; - break; - default: - return -EINVAL; - } - - for (i = 0 ; i < max ; i++) - if (lp8725_buck_uA[i] >= min_uA && - lp8725_buck_uA[i] <= max_uA) - break; - - if (i == max) - return -EINVAL; - - val = i << LP8725_BUCK_CL_S; - - return lp872x_update_bits(lp, addr, LP8725_BUCK_CL_M, val); -} - -static int lp8725_buck_get_current_limit(struct regulator_dev *rdev) -{ - struct lp872x *lp = rdev_get_drvdata(rdev); - enum lp872x_regulator_id buck = rdev_get_id(rdev); - u8 addr, val; - int ret; - - switch (buck) { - case LP8725_ID_BUCK1: - addr = LP8725_BUCK1_VOUT2; - break; - case LP8725_ID_BUCK2: - addr = LP8725_BUCK2_VOUT2; - break; - default: - return -EINVAL; - } - - ret = lp872x_read_byte(lp, addr, &val); - if (ret) - return ret; - - val = (val & LP8725_BUCK_CL_M) >> LP8725_BUCK_CL_S; - - return (val < ARRAY_SIZE(lp8725_buck_uA)) ? - lp8725_buck_uA[val] : -EINVAL; -} - -static int lp872x_buck_set_mode(struct regulator_dev *rdev, unsigned int mode) -{ - struct lp872x *lp = rdev_get_drvdata(rdev); - enum lp872x_regulator_id buck = rdev_get_id(rdev); - u8 addr, mask, shift, val; - - switch (buck) { - case LP8720_ID_BUCK: - addr = LP8720_BUCK_VOUT2; - mask = LP8720_BUCK_FPWM_M; - shift = LP8720_BUCK_FPWM_S; - break; - case LP8725_ID_BUCK1: - addr = LP8725_BUCK_CTRL; - mask = LP8725_BUCK1_FPWM_M; - shift = LP8725_BUCK1_FPWM_S; - break; - case LP8725_ID_BUCK2: - addr = LP8725_BUCK_CTRL; - mask = LP8725_BUCK2_FPWM_M; - shift = LP8725_BUCK2_FPWM_S; - break; - default: - return -EINVAL; - } - - if (mode == REGULATOR_MODE_FAST) - val = LP872X_FORCE_PWM << shift; - else if (mode == REGULATOR_MODE_NORMAL) - val = LP872X_AUTO_PWM << shift; - else - return -EINVAL; - - return lp872x_update_bits(lp, addr, mask, val); -} - -static unsigned int lp872x_buck_get_mode(struct regulator_dev *rdev) -{ - struct lp872x *lp = rdev_get_drvdata(rdev); - enum lp872x_regulator_id buck = rdev_get_id(rdev); - u8 addr, mask, val; - int ret; - - switch (buck) { - case LP8720_ID_BUCK: - addr = LP8720_BUCK_VOUT2; - mask = LP8720_BUCK_FPWM_M; - break; - case LP8725_ID_BUCK1: - addr = LP8725_BUCK_CTRL; - mask = LP8725_BUCK1_FPWM_M; - break; - case LP8725_ID_BUCK2: - addr = LP8725_BUCK_CTRL; - mask = LP8725_BUCK2_FPWM_M; - break; - default: - return -EINVAL; - } - - ret = lp872x_read_byte(lp, addr, &val); - if (ret) - return ret; - - return val & mask ? REGULATOR_MODE_FAST : REGULATOR_MODE_NORMAL; -} - -static struct regulator_ops lp872x_ldo_ops = { - .list_voltage = regulator_list_voltage_table, - .set_voltage_sel = regulator_set_voltage_sel_regmap, - .get_voltage_sel = regulator_get_voltage_sel_regmap, - .enable = regulator_enable_regmap, - .disable = regulator_disable_regmap, - .is_enabled = regulator_is_enabled_regmap, - .enable_time = lp872x_regulator_enable_time, -}; - -static struct regulator_ops lp8720_buck_ops = { - .list_voltage = regulator_list_voltage_table, - .set_voltage_sel = lp872x_buck_set_voltage_sel, - .get_voltage_sel = lp872x_buck_get_voltage_sel, - .enable = regulator_enable_regmap, - .disable = regulator_disable_regmap, - .is_enabled = regulator_is_enabled_regmap, - .enable_time = lp872x_regulator_enable_time, - .set_mode = lp872x_buck_set_mode, - .get_mode = lp872x_buck_get_mode, -}; - -static struct regulator_ops lp8725_buck_ops = { - .list_voltage = regulator_list_voltage_table, - .set_voltage_sel = lp872x_buck_set_voltage_sel, - .get_voltage_sel = lp872x_buck_get_voltage_sel, - .enable = regulator_enable_regmap, - .disable = regulator_disable_regmap, - .is_enabled = regulator_is_enabled_regmap, - .enable_time = lp872x_regulator_enable_time, - .set_mode = lp872x_buck_set_mode, - .get_mode = lp872x_buck_get_mode, - .set_current_limit = lp8725_buck_set_current_limit, - .get_current_limit = lp8725_buck_get_current_limit, -}; - -static struct regulator_desc lp8720_regulator_desc[] = { - { - .name = "ldo1", - .id = LP8720_ID_LDO1, - .ops = &lp872x_ldo_ops, - .n_voltages = ARRAY_SIZE(lp872x_ldo_vtbl), - .volt_table = lp872x_ldo_vtbl, - .type = REGULATOR_VOLTAGE, - .owner = THIS_MODULE, - .vsel_reg = LP872X_LDO1_VOUT, - .vsel_mask = LP872X_VOUT_M, - .enable_reg = LP8720_ENABLE, - .enable_mask = LP872X_EN_LDO1_M, - }, - { - .name = "ldo2", - .id = LP8720_ID_LDO2, - .ops = &lp872x_ldo_ops, - .n_voltages = ARRAY_SIZE(lp872x_ldo_vtbl), - .volt_table = lp872x_ldo_vtbl, - .type = REGULATOR_VOLTAGE, - .owner = THIS_MODULE, - .vsel_reg = LP872X_LDO2_VOUT, - .vsel_mask = LP872X_VOUT_M, - .enable_reg = LP8720_ENABLE, - .enable_mask = LP872X_EN_LDO2_M, - }, - { - .name = "ldo3", - .id = LP8720_ID_LDO3, - .ops = &lp872x_ldo_ops, - .n_voltages = ARRAY_SIZE(lp872x_ldo_vtbl), - .volt_table = lp872x_ldo_vtbl, - .type = REGULATOR_VOLTAGE, - .owner = THIS_MODULE, - .vsel_reg = LP872X_LDO3_VOUT, - .vsel_mask = LP872X_VOUT_M, - .enable_reg = LP8720_ENABLE, - .enable_mask = LP872X_EN_LDO3_M, - }, - { - .name = "ldo4", - .id = LP8720_ID_LDO4, - .ops = &lp872x_ldo_ops, - .n_voltages = ARRAY_SIZE(lp8720_ldo4_vtbl), - .volt_table = lp8720_ldo4_vtbl, - .type = REGULATOR_VOLTAGE, - .owner = THIS_MODULE, - .vsel_reg = LP872X_LDO4_VOUT, - .vsel_mask = LP872X_VOUT_M, - .enable_reg = LP8720_ENABLE, - .enable_mask = LP872X_EN_LDO4_M, - }, - { - .name = "ldo5", - .id = LP8720_ID_LDO5, - .ops = &lp872x_ldo_ops, - .n_voltages = ARRAY_SIZE(lp872x_ldo_vtbl), - .volt_table = lp872x_ldo_vtbl, - .type = REGULATOR_VOLTAGE, - .owner = THIS_MODULE, - .vsel_reg = LP872X_LDO5_VOUT, - .vsel_mask = LP872X_VOUT_M, - .enable_reg = LP8720_ENABLE, - .enable_mask = LP872X_EN_LDO5_M, - }, - { - .name = "buck", - .id = LP8720_ID_BUCK, - .ops = &lp8720_buck_ops, - .n_voltages = ARRAY_SIZE(lp8720_buck_vtbl), - .volt_table = lp8720_buck_vtbl, - .type = REGULATOR_VOLTAGE, - .owner = THIS_MODULE, - .enable_reg = LP8720_ENABLE, - .enable_mask = LP8720_EN_BUCK_M, - }, -}; - -static struct regulator_desc lp8725_regulator_desc[] = { - { - .name = "ldo1", - .id = LP8725_ID_LDO1, - .ops = &lp872x_ldo_ops, - .n_voltages = ARRAY_SIZE(lp872x_ldo_vtbl), - .volt_table = lp872x_ldo_vtbl, - .type = REGULATOR_VOLTAGE, - .owner = THIS_MODULE, - .vsel_reg = LP872X_LDO1_VOUT, - .vsel_mask = LP872X_VOUT_M, - .enable_reg = LP8725_LDO_CTRL, - .enable_mask = LP872X_EN_LDO1_M, - }, - { - .name = "ldo2", - .id = LP8725_ID_LDO2, - .ops = &lp872x_ldo_ops, - .n_voltages = ARRAY_SIZE(lp872x_ldo_vtbl), - .volt_table = lp872x_ldo_vtbl, - .type = REGULATOR_VOLTAGE, - .owner = THIS_MODULE, - .vsel_reg = LP872X_LDO2_VOUT, - .vsel_mask = LP872X_VOUT_M, - .enable_reg = LP8725_LDO_CTRL, - .enable_mask = LP872X_EN_LDO2_M, - }, - { - .name = "ldo3", - .id = LP8725_ID_LDO3, - .ops = &lp872x_ldo_ops, - .n_voltages = ARRAY_SIZE(lp872x_ldo_vtbl), - .volt_table = lp872x_ldo_vtbl, - .type = REGULATOR_VOLTAGE, - .owner = THIS_MODULE, - .vsel_reg = LP872X_LDO3_VOUT, - .vsel_mask = LP872X_VOUT_M, - .enable_reg = LP8725_LDO_CTRL, - .enable_mask = LP872X_EN_LDO3_M, - }, - { - .name = "ldo4", - .id = LP8725_ID_LDO4, - .ops = &lp872x_ldo_ops, - .n_voltages = ARRAY_SIZE(lp872x_ldo_vtbl), - .volt_table = lp872x_ldo_vtbl, - .type = REGULATOR_VOLTAGE, - .owner = THIS_MODULE, - .vsel_reg = LP872X_LDO4_VOUT, - .vsel_mask = LP872X_VOUT_M, - .enable_reg = LP8725_LDO_CTRL, - .enable_mask = LP872X_EN_LDO4_M, - }, - { - .name = "ldo5", - .id = LP8725_ID_LDO5, - .ops = &lp872x_ldo_ops, - .n_voltages = ARRAY_SIZE(lp872x_ldo_vtbl), - .volt_table = lp872x_ldo_vtbl, - .type = REGULATOR_VOLTAGE, - .owner = THIS_MODULE, - .vsel_reg = LP872X_LDO5_VOUT, - .vsel_mask = LP872X_VOUT_M, - .enable_reg = LP8725_LDO_CTRL, - .enable_mask = LP872X_EN_LDO5_M, - }, - { - .name = "lilo1", - .id = LP8725_ID_LILO1, - .ops = &lp872x_ldo_ops, - .n_voltages = ARRAY_SIZE(lp8725_lilo_vtbl), - .volt_table = lp8725_lilo_vtbl, - .type = REGULATOR_VOLTAGE, - .owner = THIS_MODULE, - .vsel_reg = LP8725_LILO1_VOUT, - .vsel_mask = LP872X_VOUT_M, - .enable_reg = LP8725_LDO_CTRL, - .enable_mask = LP8725_EN_LILO1_M, - }, - { - .name = "lilo2", - .id = LP8725_ID_LILO2, - .ops = &lp872x_ldo_ops, - .n_voltages = ARRAY_SIZE(lp8725_lilo_vtbl), - .volt_table = lp8725_lilo_vtbl, - .type = REGULATOR_VOLTAGE, - .owner = THIS_MODULE, - .vsel_reg = LP8725_LILO2_VOUT, - .vsel_mask = LP872X_VOUT_M, - .enable_reg = LP8725_LDO_CTRL, - .enable_mask = LP8725_EN_LILO2_M, - }, - { - .name = "buck1", - .id = LP8725_ID_BUCK1, - .ops = &lp8725_buck_ops, - .n_voltages = ARRAY_SIZE(lp8725_buck_vtbl), - .volt_table = lp8725_buck_vtbl, - .type = REGULATOR_VOLTAGE, - .owner = THIS_MODULE, - .enable_reg = LP872X_GENERAL_CFG, - .enable_mask = LP8725_BUCK1_EN_M, - }, - { - .name = "buck2", - .id = LP8725_ID_BUCK2, - .ops = &lp8725_buck_ops, - .n_voltages = ARRAY_SIZE(lp8725_buck_vtbl), - .volt_table = lp8725_buck_vtbl, - .type = REGULATOR_VOLTAGE, - .owner = THIS_MODULE, - .enable_reg = LP872X_GENERAL_CFG, - .enable_mask = LP8725_BUCK2_EN_M, - }, -}; - -static int lp872x_check_dvs_validity(struct lp872x *lp) -{ - struct lp872x_dvs *dvs = lp->pdata->dvs; - u8 val = 0; - int ret; - - ret = lp872x_read_byte(lp, LP872X_GENERAL_CFG, &val); - if (ret) - return ret; - - ret = 0; - if (lp->chipid == LP8720) { - if (val & LP8720_EXT_DVS_M) - ret = dvs ? 0 : -EINVAL; - } else { - if ((val & LP8725_DVS1_M) == EXTERN_DVS_USED) - ret = dvs ? 0 : -EINVAL; - } - - return ret; -} - -static int lp872x_init_dvs(struct lp872x *lp) -{ - int ret, gpio; - struct lp872x_dvs *dvs = lp->pdata->dvs; - enum lp872x_dvs_state pinstate; - - ret = lp872x_check_dvs_validity(lp); - if (ret) { - dev_warn(lp->dev, "invalid dvs data: %d\n", ret); - return ret; - } - - gpio = dvs->gpio; - if (!gpio_is_valid(gpio)) { - dev_err(lp->dev, "invalid gpio: %d\n", gpio); - return -EINVAL; - } - - pinstate = dvs->init_state; - ret = devm_gpio_request_one(lp->dev, gpio, pinstate, "LP872X DVS"); - if (ret) { - dev_err(lp->dev, "gpio request err: %d\n", ret); - return ret; - } - - lp->dvs_pin = pinstate; - lp->dvs_gpio = gpio; - - return 0; -} - -static int lp872x_config(struct lp872x *lp) -{ - struct lp872x_platform_data *pdata = lp->pdata; - int ret; - - if (!pdata->update_config) - return 0; - - ret = lp872x_write_byte(lp, LP872X_GENERAL_CFG, pdata->general_config); - if (ret) - return ret; - - return lp872x_init_dvs(lp); -} - -static struct regulator_init_data -*lp872x_find_regulator_init_data(int id, struct lp872x *lp) -{ - int i; - - for (i = 0; i < lp->num_regulators; i++) { - if (lp->pdata->regulator_data[i].id == id) - return lp->pdata->regulator_data[i].init_data; - } - - return NULL; -} - -static int lp872x_regulator_register(struct lp872x *lp) -{ - struct regulator_desc *desc; - struct regulator_config cfg = { }; - struct regulator_dev *rdev; - int i, ret; - - for (i = 0 ; i < lp->num_regulators ; i++) { - desc = (lp->chipid == LP8720) ? &lp8720_regulator_desc[i] : - &lp8725_regulator_desc[i]; - - cfg.dev = lp->dev; - cfg.init_data = lp872x_find_regulator_init_data(desc->id, lp); - cfg.driver_data = lp; - cfg.regmap = lp->regmap; - - rdev = regulator_register(desc, &cfg); - if (IS_ERR(rdev)) { - dev_err(lp->dev, "regulator register err"); - ret = PTR_ERR(rdev); - goto err; - } - - *(lp->regulators + i) = rdev; - } - - return 0; -err: - while (--i >= 0) { - rdev = *(lp->regulators + i); - regulator_unregister(rdev); - } - return ret; -} - -static void lp872x_regulator_unregister(struct lp872x *lp) -{ - struct regulator_dev *rdev; - int i; - - for (i = 0 ; i < lp->num_regulators ; i++) { - rdev = *(lp->regulators + i); - regulator_unregister(rdev); - } -} - -static const struct regmap_config lp872x_regmap_config = { - .reg_bits = 8, - .val_bits = 8, - .max_register = MAX_REGISTERS, -}; - -static int lp872x_probe(struct i2c_client *cl, const struct i2c_device_id *id) -{ - struct lp872x *lp; - struct lp872x_platform_data *pdata = cl->dev.platform_data; - int ret, size, num_regulators; - const int lp872x_num_regulators[] = { - [LP8720] = LP8720_NUM_REGULATORS, - [LP8725] = LP8725_NUM_REGULATORS, - }; - - if (!pdata) { - dev_err(&cl->dev, "no platform data\n"); - return -EINVAL; - } - - lp = devm_kzalloc(&cl->dev, sizeof(struct lp872x), GFP_KERNEL); - if (!lp) - goto err_mem; - - num_regulators = lp872x_num_regulators[id->driver_data]; - size = sizeof(struct regulator_dev *) * num_regulators; - - lp->regulators = devm_kzalloc(&cl->dev, size, GFP_KERNEL); - if (!lp->regulators) - goto err_mem; - - lp->regmap = devm_regmap_init_i2c(cl, &lp872x_regmap_config); - if (IS_ERR(lp->regmap)) { - ret = PTR_ERR(lp->regmap); - dev_err(&cl->dev, "regmap init i2c err: %d\n", ret); - goto err_dev; - } - - lp->dev = &cl->dev; - lp->pdata = pdata; - lp->chipid = id->driver_data; - lp->num_regulators = num_regulators; - i2c_set_clientdata(cl, lp); - - ret = lp872x_config(lp); - if (ret) - goto err_dev; - - return lp872x_regulator_register(lp); - -err_mem: - return -ENOMEM; -err_dev: - return ret; -} - -static int __devexit lp872x_remove(struct i2c_client *cl) -{ - struct lp872x *lp = i2c_get_clientdata(cl); - - lp872x_regulator_unregister(lp); - return 0; -} - -static const struct i2c_device_id lp872x_ids[] = { - {"lp8720", LP8720}, - {"lp8725", LP8725}, - { } -}; -MODULE_DEVICE_TABLE(i2c, lp872x_ids); - -static struct i2c_driver lp872x_driver = { - .driver = { - .name = "lp872x", - .owner = THIS_MODULE, - }, - .probe = lp872x_probe, - .remove = __devexit_p(lp872x_remove), - .id_table = lp872x_ids, -}; - -module_i2c_driver(lp872x_driver); - -MODULE_DESCRIPTION("TI/National Semiconductor LP872x PMU Regulator Driver"); -MODULE_AUTHOR("Milo Kim"); -MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/regulator/lp8788-buck.c b/trunk/drivers/regulator/lp8788-buck.c deleted file mode 100644 index 6356e821400f..000000000000 --- a/trunk/drivers/regulator/lp8788-buck.c +++ /dev/null @@ -1,629 +0,0 @@ -/* - * TI LP8788 MFD - buck regulator driver - * - * Copyright 2012 Texas Instruments - * - * Author: Milo(Woogyom) Kim - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - */ - -#include -#include -#include -#include -#include -#include -#include - -/* register address */ -#define LP8788_EN_BUCK 0x0C -#define LP8788_BUCK_DVS_SEL 0x1D -#define LP8788_BUCK1_VOUT0 0x1E -#define LP8788_BUCK1_VOUT1 0x1F -#define LP8788_BUCK1_VOUT2 0x20 -#define LP8788_BUCK1_VOUT3 0x21 -#define LP8788_BUCK2_VOUT0 0x22 -#define LP8788_BUCK2_VOUT1 0x23 -#define LP8788_BUCK2_VOUT2 0x24 -#define LP8788_BUCK2_VOUT3 0x25 -#define LP8788_BUCK3_VOUT 0x26 -#define LP8788_BUCK4_VOUT 0x27 -#define LP8788_BUCK1_TIMESTEP 0x28 -#define LP8788_BUCK_PWM 0x2D - -/* mask/shift bits */ -#define LP8788_EN_BUCK1_M BIT(0) /* Addr 0Ch */ -#define LP8788_EN_BUCK2_M BIT(1) -#define LP8788_EN_BUCK3_M BIT(2) -#define LP8788_EN_BUCK4_M BIT(3) -#define LP8788_BUCK1_DVS_SEL_M 0x04 /* Addr 1Dh */ -#define LP8788_BUCK1_DVS_M 0x03 -#define LP8788_BUCK1_DVS_S 0 -#define LP8788_BUCK2_DVS_SEL_M 0x40 -#define LP8788_BUCK2_DVS_M 0x30 -#define LP8788_BUCK2_DVS_S 4 -#define LP8788_BUCK1_DVS_I2C BIT(2) -#define LP8788_BUCK2_DVS_I2C BIT(6) -#define LP8788_BUCK1_DVS_PIN (0 << 2) -#define LP8788_BUCK2_DVS_PIN (0 << 6) -#define LP8788_VOUT_M 0x1F /* Addr 1Eh ~ 27h */ -#define LP8788_STARTUP_TIME_M 0xF8 /* Addr 28h ~ 2Bh */ -#define LP8788_STARTUP_TIME_S 3 -#define LP8788_FPWM_BUCK1_M BIT(0) /* Addr 2Dh */ -#define LP8788_FPWM_BUCK1_S 0 -#define LP8788_FPWM_BUCK2_M BIT(1) -#define LP8788_FPWM_BUCK2_S 1 -#define LP8788_FPWM_BUCK3_M BIT(2) -#define LP8788_FPWM_BUCK3_S 2 -#define LP8788_FPWM_BUCK4_M BIT(3) -#define LP8788_FPWM_BUCK4_S 3 - -#define INVALID_ADDR 0xFF -#define LP8788_FORCE_PWM 1 -#define LP8788_AUTO_PWM 0 -#define PIN_LOW 0 -#define PIN_HIGH 1 -#define ENABLE_TIME_USEC 32 - -enum lp8788_dvs_state { - DVS_LOW = GPIOF_OUT_INIT_LOW, - DVS_HIGH = GPIOF_OUT_INIT_HIGH, -}; - -enum lp8788_dvs_mode { - REGISTER, - EXTPIN, -}; - -enum lp8788_buck_id { - BUCK1, - BUCK2, - BUCK3, - BUCK4, -}; - -struct lp8788_pwm_map { - u8 mask; - u8 shift; -}; - -struct lp8788_buck { - struct lp8788 *lp; - struct regulator_dev *regulator; - struct lp8788_pwm_map *pmap; - void *dvs; -}; - -/* BUCK 1 ~ 4 voltage table */ -static const int lp8788_buck_vtbl[] = { - 500000, 800000, 850000, 900000, 950000, 1000000, 1050000, 1100000, - 1150000, 1200000, 1250000, 1300000, 1350000, 1400000, 1450000, 1500000, - 1550000, 1600000, 1650000, 1700000, 1750000, 1800000, 1850000, 1900000, - 1950000, 2000000, -}; - -/* buck pwm mode selection : used for set/get_mode in regulator ops - * @forced pwm : fast mode - * @auto pwm : normal mode - */ -static struct lp8788_pwm_map buck_pmap[] = { - [BUCK1] = { - .mask = LP8788_FPWM_BUCK1_M, - .shift = LP8788_FPWM_BUCK1_S, - }, - [BUCK2] = { - .mask = LP8788_FPWM_BUCK2_M, - .shift = LP8788_FPWM_BUCK2_S, - }, - [BUCK3] = { - .mask = LP8788_FPWM_BUCK3_M, - .shift = LP8788_FPWM_BUCK3_S, - }, - [BUCK4] = { - .mask = LP8788_FPWM_BUCK4_M, - .shift = LP8788_FPWM_BUCK4_S, - }, -}; - -static const u8 buck1_vout_addr[] = { - LP8788_BUCK1_VOUT0, LP8788_BUCK1_VOUT1, - LP8788_BUCK1_VOUT2, LP8788_BUCK1_VOUT3, -}; - -static const u8 buck2_vout_addr[] = { - LP8788_BUCK2_VOUT0, LP8788_BUCK2_VOUT1, - LP8788_BUCK2_VOUT2, LP8788_BUCK2_VOUT3, -}; - -static void lp8788_buck1_set_dvs(struct lp8788_buck *buck) -{ - struct lp8788_buck1_dvs *dvs = (struct lp8788_buck1_dvs *)buck->dvs; - enum lp8788_dvs_state pinstate; - - if (!dvs) - return; - - pinstate = dvs->vsel == DVS_SEL_V0 ? DVS_LOW : DVS_HIGH; - if (gpio_is_valid(dvs->gpio)) - gpio_set_value(dvs->gpio, pinstate); -} - -static void lp8788_buck2_set_dvs(struct lp8788_buck *buck) -{ - struct lp8788_buck2_dvs *dvs = (struct lp8788_buck2_dvs *)buck->dvs; - enum lp8788_dvs_state pin1, pin2; - - if (!dvs) - return; - - switch (dvs->vsel) { - case DVS_SEL_V0: - pin1 = DVS_LOW; - pin2 = DVS_LOW; - break; - case DVS_SEL_V1: - pin1 = DVS_HIGH; - pin2 = DVS_LOW; - break; - case DVS_SEL_V2: - pin1 = DVS_LOW; - pin2 = DVS_HIGH; - break; - case DVS_SEL_V3: - pin1 = DVS_HIGH; - pin2 = DVS_HIGH; - break; - default: - return; - } - - if (gpio_is_valid(dvs->gpio[0])) - gpio_set_value(dvs->gpio[0], pin1); - - if (gpio_is_valid(dvs->gpio[1])) - gpio_set_value(dvs->gpio[1], pin2); -} - -static void lp8788_set_dvs(struct lp8788_buck *buck, enum lp8788_buck_id id) -{ - switch (id) { - case BUCK1: - lp8788_buck1_set_dvs(buck); - break; - case BUCK2: - lp8788_buck2_set_dvs(buck); - break; - default: - break; - } -} - -static enum lp8788_dvs_mode -lp8788_get_buck_dvs_ctrl_mode(struct lp8788_buck *buck, enum lp8788_buck_id id) -{ - u8 val, mask; - - switch (id) { - case BUCK1: - mask = LP8788_BUCK1_DVS_SEL_M; - break; - case BUCK2: - mask = LP8788_BUCK2_DVS_SEL_M; - break; - default: - return REGISTER; - } - - lp8788_read_byte(buck->lp, LP8788_BUCK_DVS_SEL, &val); - - return val & mask ? REGISTER : EXTPIN; -} - -static bool lp8788_is_valid_buck_addr(u8 addr) -{ - switch (addr) { - case LP8788_BUCK1_VOUT0: - case LP8788_BUCK1_VOUT1: - case LP8788_BUCK1_VOUT2: - case LP8788_BUCK1_VOUT3: - case LP8788_BUCK2_VOUT0: - case LP8788_BUCK2_VOUT1: - case LP8788_BUCK2_VOUT2: - case LP8788_BUCK2_VOUT3: - return true; - default: - return false; - } -} - -static u8 lp8788_select_buck_vout_addr(struct lp8788_buck *buck, - enum lp8788_buck_id id) -{ - enum lp8788_dvs_mode mode = lp8788_get_buck_dvs_ctrl_mode(buck, id); - struct lp8788_buck1_dvs *b1_dvs; - struct lp8788_buck2_dvs *b2_dvs; - u8 val, idx, addr; - int pin1, pin2; - - switch (id) { - case BUCK1: - if (mode == EXTPIN) { - b1_dvs = (struct lp8788_buck1_dvs *)buck->dvs; - if (!b1_dvs) - goto err; - - idx = gpio_get_value(b1_dvs->gpio) ? 1 : 0; - } else { - lp8788_read_byte(buck->lp, LP8788_BUCK_DVS_SEL, &val); - idx = (val & LP8788_BUCK1_DVS_M) >> LP8788_BUCK1_DVS_S; - } - addr = buck1_vout_addr[idx]; - break; - case BUCK2: - if (mode == EXTPIN) { - b2_dvs = (struct lp8788_buck2_dvs *)buck->dvs; - if (!b2_dvs) - goto err; - - pin1 = gpio_get_value(b2_dvs->gpio[0]); - pin2 = gpio_get_value(b2_dvs->gpio[1]); - - if (pin1 == PIN_LOW && pin2 == PIN_LOW) - idx = 0; - else if (pin1 == PIN_LOW && pin2 == PIN_HIGH) - idx = 2; - else if (pin1 == PIN_HIGH && pin2 == PIN_LOW) - idx = 1; - else - idx = 3; - } else { - lp8788_read_byte(buck->lp, LP8788_BUCK_DVS_SEL, &val); - idx = (val & LP8788_BUCK2_DVS_M) >> LP8788_BUCK2_DVS_S; - } - addr = buck2_vout_addr[idx]; - break; - default: - goto err; - } - - return addr; -err: - return INVALID_ADDR; -} - -static int lp8788_buck12_set_voltage_sel(struct regulator_dev *rdev, - unsigned selector) -{ - struct lp8788_buck *buck = rdev_get_drvdata(rdev); - enum lp8788_buck_id id = rdev_get_id(rdev); - u8 addr; - - if (buck->dvs) - lp8788_set_dvs(buck, id); - - addr = lp8788_select_buck_vout_addr(buck, id); - if (!lp8788_is_valid_buck_addr(addr)) - return -EINVAL; - - return lp8788_update_bits(buck->lp, addr, LP8788_VOUT_M, selector); -} - -static int lp8788_buck12_get_voltage_sel(struct regulator_dev *rdev) -{ - struct lp8788_buck *buck = rdev_get_drvdata(rdev); - enum lp8788_buck_id id = rdev_get_id(rdev); - int ret; - u8 val, addr; - - addr = lp8788_select_buck_vout_addr(buck, id); - if (!lp8788_is_valid_buck_addr(addr)) - return -EINVAL; - - ret = lp8788_read_byte(buck->lp, addr, &val); - if (ret) - return ret; - - return val & LP8788_VOUT_M; -} - -static int lp8788_buck_enable_time(struct regulator_dev *rdev) -{ - struct lp8788_buck *buck = rdev_get_drvdata(rdev); - enum lp8788_buck_id id = rdev_get_id(rdev); - u8 val, addr = LP8788_BUCK1_TIMESTEP + id; - - if (lp8788_read_byte(buck->lp, addr, &val)) - return -EINVAL; - - val = (val & LP8788_STARTUP_TIME_M) >> LP8788_STARTUP_TIME_S; - - return ENABLE_TIME_USEC * val; -} - -static int lp8788_buck_set_mode(struct regulator_dev *rdev, unsigned int mode) -{ - struct lp8788_buck *buck = rdev_get_drvdata(rdev); - struct lp8788_pwm_map *pmap = buck->pmap; - u8 val; - - if (!pmap) - return -EINVAL; - - switch (mode) { - case REGULATOR_MODE_FAST: - val = LP8788_FORCE_PWM << pmap->shift; - break; - case REGULATOR_MODE_NORMAL: - val = LP8788_AUTO_PWM << pmap->shift; - break; - default: - return -EINVAL; - } - - return lp8788_update_bits(buck->lp, LP8788_BUCK_PWM, pmap->mask, val); -} - -static unsigned int lp8788_buck_get_mode(struct regulator_dev *rdev) -{ - struct lp8788_buck *buck = rdev_get_drvdata(rdev); - struct lp8788_pwm_map *pmap = buck->pmap; - u8 val; - int ret; - - if (!pmap) - return -EINVAL; - - ret = lp8788_read_byte(buck->lp, LP8788_BUCK_PWM, &val); - if (ret) - return ret; - - return val & pmap->mask ? REGULATOR_MODE_FAST : REGULATOR_MODE_NORMAL; -} - -static struct regulator_ops lp8788_buck12_ops = { - .list_voltage = regulator_list_voltage_table, - .set_voltage_sel = lp8788_buck12_set_voltage_sel, - .get_voltage_sel = lp8788_buck12_get_voltage_sel, - .enable = regulator_enable_regmap, - .disable = regulator_disable_regmap, - .is_enabled = regulator_is_enabled_regmap, - .enable_time = lp8788_buck_enable_time, - .set_mode = lp8788_buck_set_mode, - .get_mode = lp8788_buck_get_mode, -}; - -static struct regulator_ops lp8788_buck34_ops = { - .list_voltage = regulator_list_voltage_table, - .set_voltage_sel = regulator_set_voltage_sel_regmap, - .get_voltage_sel = regulator_get_voltage_sel_regmap, - .enable = regulator_enable_regmap, - .disable = regulator_disable_regmap, - .is_enabled = regulator_is_enabled_regmap, - .enable_time = lp8788_buck_enable_time, - .set_mode = lp8788_buck_set_mode, - .get_mode = lp8788_buck_get_mode, -}; - -static struct regulator_desc lp8788_buck_desc[] = { - { - .name = "buck1", - .id = BUCK1, - .ops = &lp8788_buck12_ops, - .n_voltages = ARRAY_SIZE(lp8788_buck_vtbl), - .volt_table = lp8788_buck_vtbl, - .type = REGULATOR_VOLTAGE, - .owner = THIS_MODULE, - .enable_reg = LP8788_EN_BUCK, - .enable_mask = LP8788_EN_BUCK1_M, - }, - { - .name = "buck2", - .id = BUCK2, - .ops = &lp8788_buck12_ops, - .n_voltages = ARRAY_SIZE(lp8788_buck_vtbl), - .volt_table = lp8788_buck_vtbl, - .type = REGULATOR_VOLTAGE, - .owner = THIS_MODULE, - .enable_reg = LP8788_EN_BUCK, - .enable_mask = LP8788_EN_BUCK2_M, - }, - { - .name = "buck3", - .id = BUCK3, - .ops = &lp8788_buck34_ops, - .n_voltages = ARRAY_SIZE(lp8788_buck_vtbl), - .volt_table = lp8788_buck_vtbl, - .type = REGULATOR_VOLTAGE, - .owner = THIS_MODULE, - .vsel_reg = LP8788_BUCK3_VOUT, - .vsel_mask = LP8788_VOUT_M, - .enable_reg = LP8788_EN_BUCK, - .enable_mask = LP8788_EN_BUCK3_M, - }, - { - .name = "buck4", - .id = BUCK4, - .ops = &lp8788_buck34_ops, - .n_voltages = ARRAY_SIZE(lp8788_buck_vtbl), - .volt_table = lp8788_buck_vtbl, - .type = REGULATOR_VOLTAGE, - .owner = THIS_MODULE, - .vsel_reg = LP8788_BUCK4_VOUT, - .vsel_mask = LP8788_VOUT_M, - .enable_reg = LP8788_EN_BUCK, - .enable_mask = LP8788_EN_BUCK4_M, - }, -}; - -static int lp8788_set_default_dvs_ctrl_mode(struct lp8788 *lp, - enum lp8788_buck_id id) -{ - u8 mask, val; - - switch (id) { - case BUCK1: - mask = LP8788_BUCK1_DVS_SEL_M; - val = LP8788_BUCK1_DVS_I2C; - break; - case BUCK2: - mask = LP8788_BUCK2_DVS_SEL_M; - val = LP8788_BUCK2_DVS_I2C; - break; - default: - return 0; - } - - return lp8788_update_bits(lp, LP8788_BUCK_DVS_SEL, mask, val); -} - -static int _gpio_request(struct lp8788_buck *buck, int gpio, char *name) -{ - struct device *dev = buck->lp->dev; - - if (!gpio_is_valid(gpio)) { - dev_err(dev, "invalid gpio: %d\n", gpio); - return -EINVAL; - } - - return devm_gpio_request_one(dev, gpio, DVS_LOW, name); -} - -static int lp8788_dvs_gpio_request(struct lp8788_buck *buck, - enum lp8788_buck_id id) -{ - struct lp8788_platform_data *pdata = buck->lp->pdata; - char *b1_name = "LP8788_B1_DVS"; - char *b2_name[] = { "LP8788_B2_DVS1", "LP8788_B2_DVS2" }; - int i, gpio, ret; - - switch (id) { - case BUCK1: - gpio = pdata->buck1_dvs->gpio; - ret = _gpio_request(buck, gpio, b1_name); - if (ret) - return ret; - - buck->dvs = pdata->buck1_dvs; - break; - case BUCK2: - for (i = 0 ; i < LP8788_NUM_BUCK2_DVS ; i++) { - gpio = pdata->buck2_dvs->gpio[i]; - ret = _gpio_request(buck, gpio, b2_name[i]); - if (ret) - return ret; - } - buck->dvs = pdata->buck2_dvs; - break; - default: - break; - } - - return 0; -} - -static int lp8788_init_dvs(struct lp8788_buck *buck, enum lp8788_buck_id id) -{ - struct lp8788_platform_data *pdata = buck->lp->pdata; - u8 mask[] = { LP8788_BUCK1_DVS_SEL_M, LP8788_BUCK2_DVS_SEL_M }; - u8 val[] = { LP8788_BUCK1_DVS_PIN, LP8788_BUCK2_DVS_PIN }; - - /* no dvs for buck3, 4 */ - if (id == BUCK3 || id == BUCK4) - return 0; - - /* no dvs platform data, then dvs will be selected by I2C registers */ - if (!pdata) - goto set_default_dvs_mode; - - if ((id == BUCK1 && !pdata->buck1_dvs) || - (id == BUCK2 && !pdata->buck2_dvs)) - goto set_default_dvs_mode; - - if (lp8788_dvs_gpio_request(buck, id)) - goto set_default_dvs_mode; - - return lp8788_update_bits(buck->lp, LP8788_BUCK_DVS_SEL, mask[id], - val[id]); - -set_default_dvs_mode: - return lp8788_set_default_dvs_ctrl_mode(buck->lp, id); -} - -static __devinit int lp8788_buck_probe(struct platform_device *pdev) -{ - struct lp8788 *lp = dev_get_drvdata(pdev->dev.parent); - int id = pdev->id; - struct lp8788_buck *buck; - struct regulator_config cfg = { }; - struct regulator_dev *rdev; - int ret; - - buck = devm_kzalloc(lp->dev, sizeof(struct lp8788_buck), GFP_KERNEL); - if (!buck) - return -ENOMEM; - - buck->lp = lp; - buck->pmap = &buck_pmap[id]; - - ret = lp8788_init_dvs(buck, id); - if (ret) - return ret; - - cfg.dev = lp->dev; - cfg.init_data = lp->pdata ? lp->pdata->buck_data[id] : NULL; - cfg.driver_data = buck; - cfg.regmap = lp->regmap; - - rdev = regulator_register(&lp8788_buck_desc[id], &cfg); - if (IS_ERR(rdev)) { - ret = PTR_ERR(rdev); - dev_err(lp->dev, "BUCK%d regulator register err = %d\n", - id + 1, ret); - return ret; - } - - buck->regulator = rdev; - platform_set_drvdata(pdev, buck); - - return 0; -} - -static int __devexit lp8788_buck_remove(struct platform_device *pdev) -{ - struct lp8788_buck *buck = platform_get_drvdata(pdev); - - platform_set_drvdata(pdev, NULL); - regulator_unregister(buck->regulator); - - return 0; -} - -static struct platform_driver lp8788_buck_driver = { - .probe = lp8788_buck_probe, - .remove = __devexit_p(lp8788_buck_remove), - .driver = { - .name = LP8788_DEV_BUCK, - .owner = THIS_MODULE, - }, -}; - -static int __init lp8788_buck_init(void) -{ - return platform_driver_register(&lp8788_buck_driver); -} -subsys_initcall(lp8788_buck_init); - -static void __exit lp8788_buck_exit(void) -{ - platform_driver_unregister(&lp8788_buck_driver); -} -module_exit(lp8788_buck_exit); - -MODULE_DESCRIPTION("TI LP8788 BUCK Driver"); -MODULE_AUTHOR("Milo Kim"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:lp8788-buck"); diff --git a/trunk/drivers/regulator/lp8788-ldo.c b/trunk/drivers/regulator/lp8788-ldo.c deleted file mode 100644 index d2122e41a96d..000000000000 --- a/trunk/drivers/regulator/lp8788-ldo.c +++ /dev/null @@ -1,842 +0,0 @@ -/* - * TI LP8788 MFD - ldo regulator driver - * - * Copyright 2012 Texas Instruments - * - * Author: Milo(Woogyom) Kim - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - */ - -#include -#include -#include -#include -#include -#include -#include - -/* register address */ -#define LP8788_EN_LDO_A 0x0D /* DLDO 1 ~ 8 */ -#define LP8788_EN_LDO_B 0x0E /* DLDO 9 ~ 12, ALDO 1 ~ 4 */ -#define LP8788_EN_LDO_C 0x0F /* ALDO 5 ~ 10 */ -#define LP8788_EN_SEL 0x10 -#define LP8788_DLDO1_VOUT 0x2E -#define LP8788_DLDO2_VOUT 0x2F -#define LP8788_DLDO3_VOUT 0x30 -#define LP8788_DLDO4_VOUT 0x31 -#define LP8788_DLDO5_VOUT 0x32 -#define LP8788_DLDO6_VOUT 0x33 -#define LP8788_DLDO7_VOUT 0x34 -#define LP8788_DLDO8_VOUT 0x35 -#define LP8788_DLDO9_VOUT 0x36 -#define LP8788_DLDO10_VOUT 0x37 -#define LP8788_DLDO11_VOUT 0x38 -#define LP8788_DLDO12_VOUT 0x39 -#define LP8788_ALDO1_VOUT 0x3A -#define LP8788_ALDO2_VOUT 0x3B -#define LP8788_ALDO3_VOUT 0x3C -#define LP8788_ALDO4_VOUT 0x3D -#define LP8788_ALDO5_VOUT 0x3E -#define LP8788_ALDO6_VOUT 0x3F -#define LP8788_ALDO7_VOUT 0x40 -#define LP8788_ALDO8_VOUT 0x41 -#define LP8788_ALDO9_VOUT 0x42 -#define LP8788_ALDO10_VOUT 0x43 -#define LP8788_DLDO1_TIMESTEP 0x44 - -/* mask/shift bits */ -#define LP8788_EN_DLDO1_M BIT(0) /* Addr 0Dh ~ 0Fh */ -#define LP8788_EN_DLDO2_M BIT(1) -#define LP8788_EN_DLDO3_M BIT(2) -#define LP8788_EN_DLDO4_M BIT(3) -#define LP8788_EN_DLDO5_M BIT(4) -#define LP8788_EN_DLDO6_M BIT(5) -#define LP8788_EN_DLDO7_M BIT(6) -#define LP8788_EN_DLDO8_M BIT(7) -#define LP8788_EN_DLDO9_M BIT(0) -#define LP8788_EN_DLDO10_M BIT(1) -#define LP8788_EN_DLDO11_M BIT(2) -#define LP8788_EN_DLDO12_M BIT(3) -#define LP8788_EN_ALDO1_M BIT(4) -#define LP8788_EN_ALDO2_M BIT(5) -#define LP8788_EN_ALDO3_M BIT(6) -#define LP8788_EN_ALDO4_M BIT(7) -#define LP8788_EN_ALDO5_M BIT(0) -#define LP8788_EN_ALDO6_M BIT(1) -#define LP8788_EN_ALDO7_M BIT(2) -#define LP8788_EN_ALDO8_M BIT(3) -#define LP8788_EN_ALDO9_M BIT(4) -#define LP8788_EN_ALDO10_M BIT(5) -#define LP8788_EN_SEL_DLDO911_M BIT(0) /* Addr 10h */ -#define LP8788_EN_SEL_DLDO7_M BIT(1) -#define LP8788_EN_SEL_ALDO7_M BIT(2) -#define LP8788_EN_SEL_ALDO5_M BIT(3) -#define LP8788_EN_SEL_ALDO234_M BIT(4) -#define LP8788_EN_SEL_ALDO1_M BIT(5) -#define LP8788_VOUT_5BIT_M 0x1F /* Addr 2Eh ~ 43h */ -#define LP8788_VOUT_4BIT_M 0x0F -#define LP8788_VOUT_3BIT_M 0x07 -#define LP8788_VOUT_1BIT_M 0x01 -#define LP8788_STARTUP_TIME_M 0xF8 /* Addr 44h ~ 59h */ -#define LP8788_STARTUP_TIME_S 3 - -#define ENABLE_TIME_USEC 32 -#define ENABLE GPIOF_OUT_INIT_HIGH -#define DISABLE GPIOF_OUT_INIT_LOW - -enum lp8788_enable_mode { - REGISTER, - EXTPIN, -}; - -enum lp8788_ldo_id { - DLDO1, - DLDO2, - DLDO3, - DLDO4, - DLDO5, - DLDO6, - DLDO7, - DLDO8, - DLDO9, - DLDO10, - DLDO11, - DLDO12, - ALDO1, - ALDO2, - ALDO3, - ALDO4, - ALDO5, - ALDO6, - ALDO7, - ALDO8, - ALDO9, - ALDO10, -}; - -struct lp8788_ldo { - struct lp8788 *lp; - struct regulator_desc *desc; - struct regulator_dev *regulator; - struct lp8788_ldo_enable_pin *en_pin; -}; - -/* DLDO 1, 2, 3, 9 voltage table */ -const int lp8788_dldo1239_vtbl[] = { - 1800000, 1900000, 2000000, 2100000, 2200000, 2300000, 2400000, 2500000, - 2600000, 2700000, 2800000, 2900000, 3000000, 2850000, 2850000, 2850000, - 2850000, 2850000, 2850000, 2850000, 2850000, 2850000, 2850000, 2850000, - 2850000, 2850000, 2850000, 2850000, 2850000, 2850000, 2850000, 2850000, -}; - -/* DLDO 4 voltage table */ -static const int lp8788_dldo4_vtbl[] = { 1800000, 3000000 }; - -/* DLDO 5, 7, 8 and ALDO 6 voltage table */ -static const int lp8788_dldo578_aldo6_vtbl[] = { - 1800000, 1900000, 2000000, 2100000, 2200000, 2300000, 2400000, 2500000, - 2600000, 2700000, 2800000, 2900000, 3000000, 3000000, 3000000, 3000000, -}; - -/* DLDO 6 voltage table */ -static const int lp8788_dldo6_vtbl[] = { - 3000000, 3100000, 3200000, 3300000, 3400000, 3500000, 3600000, 3600000, -}; - -/* DLDO 10, 11 voltage table */ -static const int lp8788_dldo1011_vtbl[] = { - 1100000, 1150000, 1200000, 1250000, 1300000, 1350000, 1400000, 1450000, - 1500000, 1500000, 1500000, 1500000, 1500000, 1500000, 1500000, 1500000, -}; - -/* ALDO 1 voltage table */ -static const int lp8788_aldo1_vtbl[] = { 1800000, 2850000 }; - -/* ALDO 7 voltage table */ -static const int lp8788_aldo7_vtbl[] = { - 1200000, 1300000, 1400000, 1500000, 1600000, 1700000, 1800000, 1800000, -}; - -static enum lp8788_ldo_id lp8788_dldo_id[] = { - DLDO1, - DLDO2, - DLDO3, - DLDO4, - DLDO5, - DLDO6, - DLDO7, - DLDO8, - DLDO9, - DLDO10, - DLDO11, - DLDO12, -}; - -static enum lp8788_ldo_id lp8788_aldo_id[] = { - ALDO1, - ALDO2, - ALDO3, - ALDO4, - ALDO5, - ALDO6, - ALDO7, - ALDO8, - ALDO9, - ALDO10, -}; - -/* DLDO 7, 9 and 11, ALDO 1 ~ 5 and 7 - : can be enabled either by external pin or by i2c register */ -static enum lp8788_enable_mode -lp8788_get_ldo_enable_mode(struct lp8788_ldo *ldo, enum lp8788_ldo_id id) -{ - int ret; - u8 val, mask; - - ret = lp8788_read_byte(ldo->lp, LP8788_EN_SEL, &val); - if (ret) - return ret; - - switch (id) { - case DLDO7: - mask = LP8788_EN_SEL_DLDO7_M; - break; - case DLDO9: - case DLDO11: - mask = LP8788_EN_SEL_DLDO911_M; - break; - case ALDO1: - mask = LP8788_EN_SEL_ALDO1_M; - break; - case ALDO2 ... ALDO4: - mask = LP8788_EN_SEL_ALDO234_M; - break; - case ALDO5: - mask = LP8788_EN_SEL_ALDO5_M; - break; - case ALDO7: - mask = LP8788_EN_SEL_ALDO7_M; - break; - default: - return REGISTER; - } - - return val & mask ? EXTPIN : REGISTER; -} - -static int lp8788_ldo_ctrl_by_extern_pin(struct lp8788_ldo *ldo, int pinstate) -{ - struct lp8788_ldo_enable_pin *pin = ldo->en_pin; - - if (!pin) - return -EINVAL; - - if (gpio_is_valid(pin->gpio)) - gpio_set_value(pin->gpio, pinstate); - - return 0; -} - -static int lp8788_ldo_is_enabled_by_extern_pin(struct lp8788_ldo *ldo) -{ - struct lp8788_ldo_enable_pin *pin = ldo->en_pin; - - if (!pin) - return -EINVAL; - - return gpio_get_value(pin->gpio) ? 1 : 0; -} - -static int lp8788_ldo_enable(struct regulator_dev *rdev) -{ - struct lp8788_ldo *ldo = rdev_get_drvdata(rdev); - enum lp8788_ldo_id id = rdev_get_id(rdev); - enum lp8788_enable_mode mode = lp8788_get_ldo_enable_mode(ldo, id); - - switch (mode) { - case EXTPIN: - return lp8788_ldo_ctrl_by_extern_pin(ldo, ENABLE); - case REGISTER: - return regulator_enable_regmap(rdev); - default: - return -EINVAL; - } -} - -static int lp8788_ldo_disable(struct regulator_dev *rdev) -{ - struct lp8788_ldo *ldo = rdev_get_drvdata(rdev); - enum lp8788_ldo_id id = rdev_get_id(rdev); - enum lp8788_enable_mode mode = lp8788_get_ldo_enable_mode(ldo, id); - - switch (mode) { - case EXTPIN: - return lp8788_ldo_ctrl_by_extern_pin(ldo, DISABLE); - case REGISTER: - return regulator_disable_regmap(rdev); - default: - return -EINVAL; - } -} - -static int lp8788_ldo_is_enabled(struct regulator_dev *rdev) -{ - struct lp8788_ldo *ldo = rdev_get_drvdata(rdev); - enum lp8788_ldo_id id = rdev_get_id(rdev); - enum lp8788_enable_mode mode = lp8788_get_ldo_enable_mode(ldo, id); - - switch (mode) { - case EXTPIN: - return lp8788_ldo_is_enabled_by_extern_pin(ldo); - case REGISTER: - return regulator_is_enabled_regmap(rdev); - default: - return -EINVAL; - } -} - -static int lp8788_ldo_enable_time(struct regulator_dev *rdev) -{ - struct lp8788_ldo *ldo = rdev_get_drvdata(rdev); - enum lp8788_ldo_id id = rdev_get_id(rdev); - u8 val, addr = LP8788_DLDO1_TIMESTEP + id; - - if (lp8788_read_byte(ldo->lp, addr, &val)) - return -EINVAL; - - val = (val & LP8788_STARTUP_TIME_M) >> LP8788_STARTUP_TIME_S; - - return ENABLE_TIME_USEC * val; -} - -static int lp8788_ldo_fixed_get_voltage(struct regulator_dev *rdev) -{ - enum lp8788_ldo_id id = rdev_get_id(rdev); - - switch (id) { - case ALDO2 ... ALDO5: - return 2850000; - case DLDO12: - case ALDO8 ... ALDO9: - return 2500000; - case ALDO10: - return 1100000; - default: - return -EINVAL; - } -} - -static struct regulator_ops lp8788_ldo_voltage_table_ops = { - .list_voltage = regulator_list_voltage_table, - .set_voltage_sel = regulator_set_voltage_sel_regmap, - .get_voltage_sel = regulator_get_voltage_sel_regmap, - .enable = lp8788_ldo_enable, - .disable = lp8788_ldo_disable, - .is_enabled = lp8788_ldo_is_enabled, - .enable_time = lp8788_ldo_enable_time, -}; - -static struct regulator_ops lp8788_ldo_voltage_fixed_ops = { - .get_voltage = lp8788_ldo_fixed_get_voltage, - .enable = lp8788_ldo_enable, - .disable = lp8788_ldo_disable, - .is_enabled = lp8788_ldo_is_enabled, - .enable_time = lp8788_ldo_enable_time, -}; - -static struct regulator_desc lp8788_dldo_desc[] = { - { - .name = "dldo1", - .id = DLDO1, - .ops = &lp8788_ldo_voltage_table_ops, - .n_voltages = ARRAY_SIZE(lp8788_dldo1239_vtbl), - .volt_table = lp8788_dldo1239_vtbl, - .type = REGULATOR_VOLTAGE, - .owner = THIS_MODULE, - .vsel_reg = LP8788_DLDO1_VOUT, - .vsel_mask = LP8788_VOUT_5BIT_M, - .enable_reg = LP8788_EN_LDO_A, - .enable_mask = LP8788_EN_DLDO1_M, - }, - { - .name = "dldo2", - .id = DLDO2, - .ops = &lp8788_ldo_voltage_table_ops, - .n_voltages = ARRAY_SIZE(lp8788_dldo1239_vtbl), - .volt_table = lp8788_dldo1239_vtbl, - .type = REGULATOR_VOLTAGE, - .owner = THIS_MODULE, - .vsel_reg = LP8788_DLDO2_VOUT, - .vsel_mask = LP8788_VOUT_5BIT_M, - .enable_reg = LP8788_EN_LDO_A, - .enable_mask = LP8788_EN_DLDO2_M, - }, - { - .name = "dldo3", - .id = DLDO3, - .ops = &lp8788_ldo_voltage_table_ops, - .n_voltages = ARRAY_SIZE(lp8788_dldo1239_vtbl), - .volt_table = lp8788_dldo1239_vtbl, - .type = REGULATOR_VOLTAGE, - .owner = THIS_MODULE, - .vsel_reg = LP8788_DLDO3_VOUT, - .vsel_mask = LP8788_VOUT_5BIT_M, - .enable_reg = LP8788_EN_LDO_A, - .enable_mask = LP8788_EN_DLDO3_M, - }, - { - .name = "dldo4", - .id = DLDO4, - .ops = &lp8788_ldo_voltage_table_ops, - .n_voltages = ARRAY_SIZE(lp8788_dldo4_vtbl), - .volt_table = lp8788_dldo4_vtbl, - .type = REGULATOR_VOLTAGE, - .owner = THIS_MODULE, - .vsel_reg = LP8788_DLDO4_VOUT, - .vsel_mask = LP8788_VOUT_1BIT_M, - .enable_reg = LP8788_EN_LDO_A, - .enable_mask = LP8788_EN_DLDO4_M, - }, - { - .name = "dldo5", - .id = DLDO5, - .ops = &lp8788_ldo_voltage_table_ops, - .n_voltages = ARRAY_SIZE(lp8788_dldo578_aldo6_vtbl), - .volt_table = lp8788_dldo578_aldo6_vtbl, - .type = REGULATOR_VOLTAGE, - .owner = THIS_MODULE, - .vsel_reg = LP8788_DLDO5_VOUT, - .vsel_mask = LP8788_VOUT_4BIT_M, - .enable_reg = LP8788_EN_LDO_A, - .enable_mask = LP8788_EN_DLDO5_M, - }, - { - .name = "dldo6", - .id = DLDO6, - .ops = &lp8788_ldo_voltage_table_ops, - .n_voltages = ARRAY_SIZE(lp8788_dldo6_vtbl), - .volt_table = lp8788_dldo6_vtbl, - .type = REGULATOR_VOLTAGE, - .owner = THIS_MODULE, - .vsel_reg = LP8788_DLDO6_VOUT, - .vsel_mask = LP8788_VOUT_3BIT_M, - .enable_reg = LP8788_EN_LDO_A, - .enable_mask = LP8788_EN_DLDO6_M, - }, - { - .name = "dldo7", - .id = DLDO7, - .ops = &lp8788_ldo_voltage_table_ops, - .n_voltages = ARRAY_SIZE(lp8788_dldo578_aldo6_vtbl), - .volt_table = lp8788_dldo578_aldo6_vtbl, - .type = REGULATOR_VOLTAGE, - .owner = THIS_MODULE, - .vsel_reg = LP8788_DLDO7_VOUT, - .vsel_mask = LP8788_VOUT_4BIT_M, - .enable_reg = LP8788_EN_LDO_A, - .enable_mask = LP8788_EN_DLDO7_M, - }, - { - .name = "dldo8", - .id = DLDO8, - .ops = &lp8788_ldo_voltage_table_ops, - .n_voltages = ARRAY_SIZE(lp8788_dldo578_aldo6_vtbl), - .volt_table = lp8788_dldo578_aldo6_vtbl, - .type = REGULATOR_VOLTAGE, - .owner = THIS_MODULE, - .vsel_reg = LP8788_DLDO8_VOUT, - .vsel_mask = LP8788_VOUT_4BIT_M, - .enable_reg = LP8788_EN_LDO_A, - .enable_mask = LP8788_EN_DLDO8_M, - }, - { - .name = "dldo9", - .id = DLDO9, - .ops = &lp8788_ldo_voltage_table_ops, - .n_voltages = ARRAY_SIZE(lp8788_dldo1239_vtbl), - .volt_table = lp8788_dldo1239_vtbl, - .type = REGULATOR_VOLTAGE, - .owner = THIS_MODULE, - .vsel_reg = LP8788_DLDO9_VOUT, - .vsel_mask = LP8788_VOUT_5BIT_M, - .enable_reg = LP8788_EN_LDO_B, - .enable_mask = LP8788_EN_DLDO9_M, - }, - { - .name = "dldo10", - .id = DLDO10, - .ops = &lp8788_ldo_voltage_table_ops, - .n_voltages = ARRAY_SIZE(lp8788_dldo1011_vtbl), - .volt_table = lp8788_dldo1011_vtbl, - .type = REGULATOR_VOLTAGE, - .owner = THIS_MODULE, - .vsel_reg = LP8788_DLDO10_VOUT, - .vsel_mask = LP8788_VOUT_4BIT_M, - .enable_reg = LP8788_EN_LDO_B, - .enable_mask = LP8788_EN_DLDO10_M, - }, - { - .name = "dldo11", - .id = DLDO11, - .ops = &lp8788_ldo_voltage_table_ops, - .n_voltages = ARRAY_SIZE(lp8788_dldo1011_vtbl), - .volt_table = lp8788_dldo1011_vtbl, - .type = REGULATOR_VOLTAGE, - .owner = THIS_MODULE, - .vsel_reg = LP8788_DLDO11_VOUT, - .vsel_mask = LP8788_VOUT_4BIT_M, - .enable_reg = LP8788_EN_LDO_B, - .enable_mask = LP8788_EN_DLDO11_M, - }, - { - .name = "dldo12", - .id = DLDO12, - .ops = &lp8788_ldo_voltage_fixed_ops, - .type = REGULATOR_VOLTAGE, - .owner = THIS_MODULE, - .enable_reg = LP8788_EN_LDO_B, - .enable_mask = LP8788_EN_DLDO12_M, - }, -}; - -static struct regulator_desc lp8788_aldo_desc[] = { - { - .name = "aldo1", - .id = ALDO1, - .ops = &lp8788_ldo_voltage_table_ops, - .n_voltages = ARRAY_SIZE(lp8788_aldo1_vtbl), - .volt_table = lp8788_aldo1_vtbl, - .type = REGULATOR_VOLTAGE, - .owner = THIS_MODULE, - .vsel_reg = LP8788_ALDO1_VOUT, - .vsel_mask = LP8788_VOUT_1BIT_M, - .enable_reg = LP8788_EN_LDO_B, - .enable_mask = LP8788_EN_ALDO1_M, - }, - { - .name = "aldo2", - .id = ALDO2, - .ops = &lp8788_ldo_voltage_fixed_ops, - .type = REGULATOR_VOLTAGE, - .owner = THIS_MODULE, - .enable_reg = LP8788_EN_LDO_B, - .enable_mask = LP8788_EN_ALDO2_M, - }, - { - .name = "aldo3", - .id = ALDO3, - .ops = &lp8788_ldo_voltage_fixed_ops, - .type = REGULATOR_VOLTAGE, - .owner = THIS_MODULE, - .enable_reg = LP8788_EN_LDO_B, - .enable_mask = LP8788_EN_ALDO3_M, - }, - { - .name = "aldo4", - .id = ALDO4, - .ops = &lp8788_ldo_voltage_fixed_ops, - .type = REGULATOR_VOLTAGE, - .owner = THIS_MODULE, - .enable_reg = LP8788_EN_LDO_B, - .enable_mask = LP8788_EN_ALDO4_M, - }, - { - .name = "aldo5", - .id = ALDO5, - .ops = &lp8788_ldo_voltage_fixed_ops, - .type = REGULATOR_VOLTAGE, - .owner = THIS_MODULE, - .enable_reg = LP8788_EN_LDO_C, - .enable_mask = LP8788_EN_ALDO5_M, - }, - { - .name = "aldo6", - .id = ALDO6, - .ops = &lp8788_ldo_voltage_table_ops, - .n_voltages = ARRAY_SIZE(lp8788_dldo578_aldo6_vtbl), - .volt_table = lp8788_dldo578_aldo6_vtbl, - .type = REGULATOR_VOLTAGE, - .owner = THIS_MODULE, - .vsel_reg = LP8788_ALDO6_VOUT, - .vsel_mask = LP8788_VOUT_4BIT_M, - .enable_reg = LP8788_EN_LDO_C, - .enable_mask = LP8788_EN_ALDO6_M, - }, - { - .name = "aldo7", - .id = ALDO7, - .ops = &lp8788_ldo_voltage_table_ops, - .n_voltages = ARRAY_SIZE(lp8788_aldo7_vtbl), - .volt_table = lp8788_aldo7_vtbl, - .type = REGULATOR_VOLTAGE, - .owner = THIS_MODULE, - .vsel_reg = LP8788_ALDO7_VOUT, - .vsel_mask = LP8788_VOUT_3BIT_M, - .enable_reg = LP8788_EN_LDO_C, - .enable_mask = LP8788_EN_ALDO7_M, - }, - { - .name = "aldo8", - .id = ALDO8, - .ops = &lp8788_ldo_voltage_fixed_ops, - .type = REGULATOR_VOLTAGE, - .owner = THIS_MODULE, - .enable_reg = LP8788_EN_LDO_C, - .enable_mask = LP8788_EN_ALDO8_M, - }, - { - .name = "aldo9", - .id = ALDO9, - .ops = &lp8788_ldo_voltage_fixed_ops, - .type = REGULATOR_VOLTAGE, - .owner = THIS_MODULE, - .enable_reg = LP8788_EN_LDO_C, - .enable_mask = LP8788_EN_ALDO9_M, - }, - { - .name = "aldo10", - .id = ALDO10, - .ops = &lp8788_ldo_voltage_fixed_ops, - .type = REGULATOR_VOLTAGE, - .owner = THIS_MODULE, - .enable_reg = LP8788_EN_LDO_C, - .enable_mask = LP8788_EN_ALDO10_M, - }, -}; - -static int lp8788_gpio_request_ldo_en(struct lp8788_ldo *ldo, - enum lp8788_ext_ldo_en_id id) -{ - struct device *dev = ldo->lp->dev; - struct lp8788_ldo_enable_pin *pin = ldo->en_pin; - int ret, gpio, pinstate; - char *name[] = { - [EN_ALDO1] = "LP8788_EN_ALDO1", - [EN_ALDO234] = "LP8788_EN_ALDO234", - [EN_ALDO5] = "LP8788_EN_ALDO5", - [EN_ALDO7] = "LP8788_EN_ALDO7", - [EN_DLDO7] = "LP8788_EN_DLDO7", - [EN_DLDO911] = "LP8788_EN_DLDO911", - }; - - gpio = pin->gpio; - if (!gpio_is_valid(gpio)) { - dev_err(dev, "invalid gpio: %d\n", gpio); - return -EINVAL; - } - - pinstate = pin->init_state; - ret = devm_gpio_request_one(dev, gpio, pinstate, name[id]); - if (ret == -EBUSY) { - dev_warn(dev, "gpio%d already used\n", gpio); - return 0; - } - - return ret; -} - -static int lp8788_config_ldo_enable_mode(struct lp8788_ldo *ldo, - enum lp8788_ldo_id id) -{ - int ret; - struct lp8788 *lp = ldo->lp; - struct lp8788_platform_data *pdata = lp->pdata; - enum lp8788_ext_ldo_en_id enable_id; - u8 en_mask[] = { - [EN_ALDO1] = LP8788_EN_SEL_ALDO1_M, - [EN_ALDO234] = LP8788_EN_SEL_ALDO234_M, - [EN_ALDO5] = LP8788_EN_SEL_ALDO5_M, - [EN_ALDO7] = LP8788_EN_SEL_ALDO7_M, - [EN_DLDO7] = LP8788_EN_SEL_DLDO7_M, - [EN_DLDO911] = LP8788_EN_SEL_DLDO911_M, - }; - u8 val[] = { - [EN_ALDO1] = 0 << 5, - [EN_ALDO234] = 0 << 4, - [EN_ALDO5] = 0 << 3, - [EN_ALDO7] = 0 << 2, - [EN_DLDO7] = 0 << 1, - [EN_DLDO911] = 0 << 0, - }; - - switch (id) { - case DLDO7: - enable_id = EN_DLDO7; - break; - case DLDO9: - case DLDO11: - enable_id = EN_DLDO911; - break; - case ALDO1: - enable_id = EN_ALDO1; - break; - case ALDO2 ... ALDO4: - enable_id = EN_ALDO234; - break; - case ALDO5: - enable_id = EN_ALDO5; - break; - case ALDO7: - enable_id = EN_ALDO7; - break; - default: - return 0; - } - - /* if no platform data for ldo pin, then set default enable mode */ - if (!pdata || !pdata->ldo_pin || !pdata->ldo_pin[enable_id]) - goto set_default_ldo_enable_mode; - - ldo->en_pin = pdata->ldo_pin[enable_id]; - - ret = lp8788_gpio_request_ldo_en(ldo, enable_id); - if (ret) - goto set_default_ldo_enable_mode; - - return ret; - -set_default_ldo_enable_mode: - return lp8788_update_bits(lp, LP8788_EN_SEL, en_mask[enable_id], - val[enable_id]); -} - -static __devinit int lp8788_dldo_probe(struct platform_device *pdev) -{ - struct lp8788 *lp = dev_get_drvdata(pdev->dev.parent); - int id = pdev->id; - struct lp8788_ldo *ldo; - struct regulator_config cfg = { }; - struct regulator_dev *rdev; - int ret; - - ldo = devm_kzalloc(lp->dev, sizeof(struct lp8788_ldo), GFP_KERNEL); - if (!ldo) - return -ENOMEM; - - ldo->lp = lp; - ret = lp8788_config_ldo_enable_mode(ldo, lp8788_dldo_id[id]); - if (ret) - return ret; - - cfg.dev = lp->dev; - cfg.init_data = lp->pdata ? lp->pdata->dldo_data[id] : NULL; - cfg.driver_data = ldo; - cfg.regmap = lp->regmap; - - rdev = regulator_register(&lp8788_dldo_desc[id], &cfg); - if (IS_ERR(rdev)) { - ret = PTR_ERR(rdev); - dev_err(lp->dev, "DLDO%d regulator register err = %d\n", - id + 1, ret); - return ret; - } - - ldo->regulator = rdev; - platform_set_drvdata(pdev, ldo); - - return 0; -} - -static int __devexit lp8788_dldo_remove(struct platform_device *pdev) -{ - struct lp8788_ldo *ldo = platform_get_drvdata(pdev); - - platform_set_drvdata(pdev, NULL); - regulator_unregister(ldo->regulator); - - return 0; -} - -static struct platform_driver lp8788_dldo_driver = { - .probe = lp8788_dldo_probe, - .remove = __devexit_p(lp8788_dldo_remove), - .driver = { - .name = LP8788_DEV_DLDO, - .owner = THIS_MODULE, - }, -}; - -static __devinit int lp8788_aldo_probe(struct platform_device *pdev) -{ - struct lp8788 *lp = dev_get_drvdata(pdev->dev.parent); - int id = pdev->id; - struct lp8788_ldo *ldo; - struct regulator_config cfg = { }; - struct regulator_dev *rdev; - int ret; - - ldo = devm_kzalloc(lp->dev, sizeof(struct lp8788_ldo), GFP_KERNEL); - if (!ldo) - return -ENOMEM; - - ldo->lp = lp; - ret = lp8788_config_ldo_enable_mode(ldo, lp8788_aldo_id[id]); - if (ret) - return ret; - - cfg.dev = lp->dev; - cfg.init_data = lp->pdata ? lp->pdata->aldo_data[id] : NULL; - cfg.driver_data = ldo; - cfg.regmap = lp->regmap; - - rdev = regulator_register(&lp8788_aldo_desc[id], &cfg); - if (IS_ERR(rdev)) { - ret = PTR_ERR(rdev); - dev_err(lp->dev, "ALDO%d regulator register err = %d\n", - id + 1, ret); - return ret; - } - - ldo->regulator = rdev; - platform_set_drvdata(pdev, ldo); - - return 0; -} - -static int __devexit lp8788_aldo_remove(struct platform_device *pdev) -{ - struct lp8788_ldo *ldo = platform_get_drvdata(pdev); - - platform_set_drvdata(pdev, NULL); - regulator_unregister(ldo->regulator); - - return 0; -} - -static struct platform_driver lp8788_aldo_driver = { - .probe = lp8788_aldo_probe, - .remove = __devexit_p(lp8788_aldo_remove), - .driver = { - .name = LP8788_DEV_ALDO, - .owner = THIS_MODULE, - }, -}; - -static int __init lp8788_ldo_init(void) -{ - int ret; - - ret = platform_driver_register(&lp8788_dldo_driver); - if (ret) - return ret; - - return platform_driver_register(&lp8788_aldo_driver); -} -subsys_initcall(lp8788_ldo_init); - -static void __exit lp8788_ldo_exit(void) -{ - platform_driver_unregister(&lp8788_aldo_driver); - platform_driver_unregister(&lp8788_dldo_driver); -} -module_exit(lp8788_ldo_exit); - -MODULE_DESCRIPTION("TI LP8788 LDO Driver"); -MODULE_AUTHOR("Milo Kim"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:lp8788-dldo"); -MODULE_ALIAS("platform:lp8788-aldo"); diff --git a/trunk/drivers/regulator/max1586.c b/trunk/drivers/regulator/max1586.c index f67af3c1b963..b9444ee08da9 100644 --- a/trunk/drivers/regulator/max1586.c +++ b/trunk/drivers/regulator/max1586.c @@ -47,14 +47,6 @@ struct max1586_data { struct regulator_dev *rdev[0]; }; -/* - * V6 voltage - * On I2C bus, sending a "x" byte to the max1586 means : - * set V6 to either 0V, 1.8V, 2.5V, 3V depending on (x & 0x3) - * As regulator framework doesn't accept voltages to be 0V, we use 1uV. - */ -static int v6_voltages_uv[] = { 1, 1800000, 2500000, 3000000 }; - /* * V3 voltage * On I2C bus, sending a "x" byte to the max1586 means : @@ -63,49 +55,113 @@ static int v6_voltages_uv[] = { 1, 1800000, 2500000, 3000000 }; * R24 and R25=100kOhm as described in the data sheet. * The gain is approximately: 1 + R24/R25 + R24/185.5kOhm */ -static int max1586_v3_set_voltage_sel(struct regulator_dev *rdev, - unsigned selector) +static int max1586_v3_calc_voltage(struct max1586_data *max1586, + unsigned selector) +{ + unsigned range_uV = max1586->max_uV - max1586->min_uV; + + return max1586->min_uV + (selector * range_uV / MAX1586_V3_MAX_VSEL); +} + +static int max1586_v3_set(struct regulator_dev *rdev, int min_uV, int max_uV, + unsigned *selector) { struct max1586_data *max1586 = rdev_get_drvdata(rdev); struct i2c_client *client = max1586->client; + unsigned range_uV = max1586->max_uV - max1586->min_uV; u8 v3_prog; + if (min_uV > max1586->max_uV || max_uV < max1586->min_uV) + return -EINVAL; + if (min_uV < max1586->min_uV) + min_uV = max1586->min_uV; + + *selector = DIV_ROUND_UP((min_uV - max1586->min_uV) * + MAX1586_V3_MAX_VSEL, range_uV); + if (max1586_v3_calc_voltage(max1586, *selector) > max_uV) + return -EINVAL; + dev_dbg(&client->dev, "changing voltage v3 to %dmv\n", - regulator_list_voltage_linear(rdev, selector) / 1000); + max1586_v3_calc_voltage(max1586, *selector) / 1000); - v3_prog = I2C_V3_SELECT | (u8) selector; + v3_prog = I2C_V3_SELECT | (u8) *selector; return i2c_smbus_write_byte(client, v3_prog); } -static int max1586_v6_set_voltage_sel(struct regulator_dev *rdev, - unsigned int selector) +static int max1586_v3_list(struct regulator_dev *rdev, unsigned selector) +{ + struct max1586_data *max1586 = rdev_get_drvdata(rdev); + + if (selector > MAX1586_V3_MAX_VSEL) + return -EINVAL; + return max1586_v3_calc_voltage(max1586, selector); +} + +/* + * V6 voltage + * On I2C bus, sending a "x" byte to the max1586 means : + * set V6 to either 0V, 1.8V, 2.5V, 3V depending on (x & 0x3) + * As regulator framework doesn't accept voltages to be 0V, we use 1uV. + */ +static int max1586_v6_calc_voltage(unsigned selector) +{ + static int voltages_uv[] = { 1, 1800000, 2500000, 3000000 }; + + return voltages_uv[selector]; +} + +static int max1586_v6_set(struct regulator_dev *rdev, int min_uV, int max_uV, + unsigned int *selector) { struct i2c_client *client = rdev_get_drvdata(rdev); u8 v6_prog; + if (min_uV < MAX1586_V6_MIN_UV || min_uV > MAX1586_V6_MAX_UV) + return -EINVAL; + if (max_uV < MAX1586_V6_MIN_UV || max_uV > MAX1586_V6_MAX_UV) + return -EINVAL; + + if (min_uV < 1800000) + *selector = 0; + else if (min_uV < 2500000) + *selector = 1; + else if (min_uV < 3000000) + *selector = 2; + else if (min_uV >= 3000000) + *selector = 3; + + if (max1586_v6_calc_voltage(*selector) > max_uV) + return -EINVAL; + dev_dbg(&client->dev, "changing voltage v6 to %dmv\n", - rdev->desc->volt_table[selector] / 1000); + max1586_v6_calc_voltage(*selector) / 1000); - v6_prog = I2C_V6_SELECT | (u8) selector; + v6_prog = I2C_V6_SELECT | (u8) *selector; return i2c_smbus_write_byte(client, v6_prog); } +static int max1586_v6_list(struct regulator_dev *rdev, unsigned selector) +{ + if (selector > MAX1586_V6_MAX_VSEL) + return -EINVAL; + return max1586_v6_calc_voltage(selector); +} + /* * The Maxim 1586 controls V3 and V6 voltages, but offers no way of reading back * the set up value. */ static struct regulator_ops max1586_v3_ops = { - .set_voltage_sel = max1586_v3_set_voltage_sel, - .list_voltage = regulator_list_voltage_linear, - .map_voltage = regulator_map_voltage_linear, + .set_voltage = max1586_v3_set, + .list_voltage = max1586_v3_list, }; static struct regulator_ops max1586_v6_ops = { - .set_voltage_sel = max1586_v6_set_voltage_sel, - .list_voltage = regulator_list_voltage_table, + .set_voltage = max1586_v6_set, + .list_voltage = max1586_v6_list, }; -static struct regulator_desc max1586_reg[] = { +static const struct regulator_desc max1586_reg[] = { { .name = "Output_V3", .id = MAX1586_V3, @@ -120,7 +176,6 @@ static struct regulator_desc max1586_reg[] = { .ops = &max1586_v6_ops, .type = REGULATOR_VOLTAGE, .n_voltages = MAX1586_V6_MAX_VSEL + 1, - .volt_table = v6_voltages_uv, .owner = THIS_MODULE, }, }; @@ -158,13 +213,6 @@ static int __devinit max1586_pmic_probe(struct i2c_client *client, goto err; } - if (id == MAX1586_V3) { - max1586_reg[id].min_uV = max1586->min_uV; - max1586_reg[id].uV_step = - (max1586->max_uV - max1586->min_uV) / - MAX1586_V3_MAX_VSEL; - } - config.dev = &client->dev; config.init_data = pdata->subdevs[i].platform_data; config.driver_data = max1586; diff --git a/trunk/drivers/regulator/max77686.c b/trunk/drivers/regulator/max77686.c deleted file mode 100644 index c564af6f05a3..000000000000 --- a/trunk/drivers/regulator/max77686.c +++ /dev/null @@ -1,389 +0,0 @@ -/* - * max77686.c - Regulator driver for the Maxim 77686 - * - * Copyright (C) 2012 Samsung Electronics - * Chiwoong Byun - * Jonghwa Lee - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - * This driver is based on max8997.c - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define MAX77686_LDO_MINUV 800000 -#define MAX77686_LDO_UVSTEP 50000 -#define MAX77686_LDO_LOW_MINUV 800000 -#define MAX77686_LDO_LOW_UVSTEP 25000 -#define MAX77686_BUCK_MINUV 750000 -#define MAX77686_BUCK_UVSTEP 50000 -#define MAX77686_RAMP_DELAY 100000 /* uV/us */ -#define MAX77686_DVS_RAMP_DELAY 27500 /* uV/us */ -#define MAX77686_DVS_MINUV 600000 -#define MAX77686_DVS_UVSTEP 12500 - -#define MAX77686_OPMODE_SHIFT 6 -#define MAX77686_OPMODE_BUCK234_SHIFT 4 -#define MAX77686_OPMODE_MASK 0x3 - -#define MAX77686_VSEL_MASK 0x3F -#define MAX77686_DVS_VSEL_MASK 0xFF - -#define MAX77686_RAMP_RATE_MASK 0xC0 - -#define MAX77686_REGULATORS MAX77686_REG_MAX -#define MAX77686_LDOS 26 - -enum max77686_ramp_rate { - RAMP_RATE_13P75MV, - RAMP_RATE_27P5MV, - RAMP_RATE_55MV, - RAMP_RATE_NO_CTRL, /* 100mV/us */ -}; - -struct max77686_data { - struct regulator_dev **rdev; -}; - -static int max77686_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay) -{ - unsigned int ramp_value = RAMP_RATE_NO_CTRL; - - switch (ramp_delay) { - case 1 ... 13750: - ramp_value = RAMP_RATE_13P75MV; - break; - case 13751 ... 27500: - ramp_value = RAMP_RATE_27P5MV; - break; - case 27501 ... 55000: - ramp_value = RAMP_RATE_55MV; - break; - case 55001 ... 100000: - break; - default: - pr_warn("%s: ramp_delay: %d not supported, setting 100000\n", - rdev->desc->name, ramp_delay); - } - - return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg, - MAX77686_RAMP_RATE_MASK, ramp_value << 6); -} - -static struct regulator_ops max77686_ops = { - .list_voltage = regulator_list_voltage_linear, - .map_voltage = regulator_map_voltage_linear, - .is_enabled = regulator_is_enabled_regmap, - .enable = regulator_enable_regmap, - .disable = regulator_disable_regmap, - .get_voltage_sel = regulator_get_voltage_sel_regmap, - .set_voltage_sel = regulator_set_voltage_sel_regmap, - .set_voltage_time_sel = regulator_set_voltage_time_sel, -}; - -static struct regulator_ops max77686_buck_dvs_ops = { - .list_voltage = regulator_list_voltage_linear, - .map_voltage = regulator_map_voltage_linear, - .is_enabled = regulator_is_enabled_regmap, - .enable = regulator_enable_regmap, - .disable = regulator_disable_regmap, - .get_voltage_sel = regulator_get_voltage_sel_regmap, - .set_voltage_sel = regulator_set_voltage_sel_regmap, - .set_voltage_time_sel = regulator_set_voltage_time_sel, - .set_ramp_delay = max77686_set_ramp_delay, -}; - -#define regulator_desc_ldo(num) { \ - .name = "LDO"#num, \ - .id = MAX77686_LDO##num, \ - .ops = &max77686_ops, \ - .type = REGULATOR_VOLTAGE, \ - .owner = THIS_MODULE, \ - .min_uV = MAX77686_LDO_MINUV, \ - .uV_step = MAX77686_LDO_UVSTEP, \ - .ramp_delay = MAX77686_RAMP_DELAY, \ - .n_voltages = MAX77686_VSEL_MASK + 1, \ - .vsel_reg = MAX77686_REG_LDO1CTRL1 + num - 1, \ - .vsel_mask = MAX77686_VSEL_MASK, \ - .enable_reg = MAX77686_REG_LDO1CTRL1 + num - 1, \ - .enable_mask = MAX77686_OPMODE_MASK \ - << MAX77686_OPMODE_SHIFT, \ -} -#define regulator_desc_ldo_low(num) { \ - .name = "LDO"#num, \ - .id = MAX77686_LDO##num, \ - .ops = &max77686_ops, \ - .type = REGULATOR_VOLTAGE, \ - .owner = THIS_MODULE, \ - .min_uV = MAX77686_LDO_LOW_MINUV, \ - .uV_step = MAX77686_LDO_LOW_UVSTEP, \ - .ramp_delay = MAX77686_RAMP_DELAY, \ - .n_voltages = MAX77686_VSEL_MASK + 1, \ - .vsel_reg = MAX77686_REG_LDO1CTRL1 + num - 1, \ - .vsel_mask = MAX77686_VSEL_MASK, \ - .enable_reg = MAX77686_REG_LDO1CTRL1 + num - 1, \ - .enable_mask = MAX77686_OPMODE_MASK \ - << MAX77686_OPMODE_SHIFT, \ -} -#define regulator_desc_buck(num) { \ - .name = "BUCK"#num, \ - .id = MAX77686_BUCK##num, \ - .ops = &max77686_ops, \ - .type = REGULATOR_VOLTAGE, \ - .owner = THIS_MODULE, \ - .min_uV = MAX77686_BUCK_MINUV, \ - .uV_step = MAX77686_BUCK_UVSTEP, \ - .ramp_delay = MAX77686_RAMP_DELAY, \ - .n_voltages = MAX77686_VSEL_MASK + 1, \ - .vsel_reg = MAX77686_REG_BUCK5OUT + (num - 5) * 2, \ - .vsel_mask = MAX77686_VSEL_MASK, \ - .enable_reg = MAX77686_REG_BUCK5CTRL + (num - 5) * 2, \ - .enable_mask = MAX77686_OPMODE_MASK, \ -} -#define regulator_desc_buck1(num) { \ - .name = "BUCK"#num, \ - .id = MAX77686_BUCK##num, \ - .ops = &max77686_ops, \ - .type = REGULATOR_VOLTAGE, \ - .owner = THIS_MODULE, \ - .min_uV = MAX77686_BUCK_MINUV, \ - .uV_step = MAX77686_BUCK_UVSTEP, \ - .ramp_delay = MAX77686_RAMP_DELAY, \ - .n_voltages = MAX77686_VSEL_MASK + 1, \ - .vsel_reg = MAX77686_REG_BUCK1OUT, \ - .vsel_mask = MAX77686_VSEL_MASK, \ - .enable_reg = MAX77686_REG_BUCK1CTRL, \ - .enable_mask = MAX77686_OPMODE_MASK, \ -} -#define regulator_desc_buck_dvs(num) { \ - .name = "BUCK"#num, \ - .id = MAX77686_BUCK##num, \ - .ops = &max77686_buck_dvs_ops, \ - .type = REGULATOR_VOLTAGE, \ - .owner = THIS_MODULE, \ - .min_uV = MAX77686_DVS_MINUV, \ - .uV_step = MAX77686_DVS_UVSTEP, \ - .ramp_delay = MAX77686_DVS_RAMP_DELAY, \ - .n_voltages = MAX77686_DVS_VSEL_MASK + 1, \ - .vsel_reg = MAX77686_REG_BUCK2DVS1 + (num - 2) * 10, \ - .vsel_mask = MAX77686_DVS_VSEL_MASK, \ - .enable_reg = MAX77686_REG_BUCK2CTRL1 + (num - 2) * 10, \ - .enable_mask = MAX77686_OPMODE_MASK \ - << MAX77686_OPMODE_BUCK234_SHIFT, \ -} - -static struct regulator_desc regulators[] = { - regulator_desc_ldo_low(1), - regulator_desc_ldo_low(2), - regulator_desc_ldo(3), - regulator_desc_ldo(4), - regulator_desc_ldo(5), - regulator_desc_ldo_low(6), - regulator_desc_ldo_low(7), - regulator_desc_ldo_low(8), - regulator_desc_ldo(9), - regulator_desc_ldo(10), - regulator_desc_ldo(11), - regulator_desc_ldo(12), - regulator_desc_ldo(13), - regulator_desc_ldo(14), - regulator_desc_ldo_low(15), - regulator_desc_ldo(16), - regulator_desc_ldo(17), - regulator_desc_ldo(18), - regulator_desc_ldo(19), - regulator_desc_ldo(20), - regulator_desc_ldo(21), - regulator_desc_ldo(22), - regulator_desc_ldo(23), - regulator_desc_ldo(24), - regulator_desc_ldo(25), - regulator_desc_ldo(26), - regulator_desc_buck1(1), - regulator_desc_buck_dvs(2), - regulator_desc_buck_dvs(3), - regulator_desc_buck_dvs(4), - regulator_desc_buck(5), - regulator_desc_buck(6), - regulator_desc_buck(7), - regulator_desc_buck(8), - regulator_desc_buck(9), -}; - -#ifdef CONFIG_OF -static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev, - struct max77686_platform_data *pdata) -{ - struct device_node *pmic_np, *regulators_np; - struct max77686_regulator_data *rdata; - struct of_regulator_match rmatch; - unsigned int i; - - pmic_np = iodev->dev->of_node; - regulators_np = of_find_node_by_name(pmic_np, "voltage-regulators"); - if (!regulators_np) { - dev_err(iodev->dev, "could not find regulators sub-node\n"); - return -EINVAL; - } - - pdata->num_regulators = ARRAY_SIZE(regulators); - rdata = devm_kzalloc(iodev->dev, sizeof(*rdata) * - pdata->num_regulators, GFP_KERNEL); - if (!rdata) { - dev_err(iodev->dev, - "could not allocate memory for regulator data\n"); - return -ENOMEM; - } - - for (i = 0; i < pdata->num_regulators; i++) { - rmatch.name = regulators[i].name; - rmatch.init_data = NULL; - rmatch.of_node = NULL; - of_regulator_match(iodev->dev, regulators_np, &rmatch, 1); - rdata[i].initdata = rmatch.init_data; - } - - pdata->regulators = rdata; - - return 0; -} -#else -static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev, - struct max77686_platform_data *pdata) -{ - return 0; -} -#endif /* CONFIG_OF */ - -static __devinit int max77686_pmic_probe(struct platform_device *pdev) -{ - struct max77686_dev *iodev = dev_get_drvdata(pdev->dev.parent); - struct max77686_platform_data *pdata = dev_get_platdata(iodev->dev); - struct regulator_dev **rdev; - struct max77686_data *max77686; - int i, size; - int ret = 0; - struct regulator_config config = { }; - - dev_dbg(&pdev->dev, "%s\n", __func__); - - if (!pdata) { - dev_err(&pdev->dev, "no platform data found for regulator\n"); - return -ENODEV; - } - - if (iodev->dev->of_node) { - ret = max77686_pmic_dt_parse_pdata(iodev, pdata); - if (ret) - return ret; - } - - if (pdata->num_regulators != MAX77686_REGULATORS) { - dev_err(&pdev->dev, - "Invalid initial data for regulator's initialiation\n"); - return -EINVAL; - } - - max77686 = devm_kzalloc(&pdev->dev, sizeof(struct max77686_data), - GFP_KERNEL); - if (!max77686) - return -ENOMEM; - - size = sizeof(struct regulator_dev *) * MAX77686_REGULATORS; - max77686->rdev = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); - if (!max77686->rdev) - return -ENOMEM; - - rdev = max77686->rdev; - config.dev = &pdev->dev; - config.regmap = iodev->regmap; - platform_set_drvdata(pdev, max77686); - - for (i = 0; i < MAX77686_REGULATORS; i++) { - config.init_data = pdata->regulators[i].initdata; - - rdev[i] = regulator_register(®ulators[i], &config); - if (IS_ERR(rdev[i])) { - ret = PTR_ERR(rdev[i]); - dev_err(&pdev->dev, - "regulator init failed for %d\n", i); - rdev[i] = NULL; - goto err; - } - } - - return 0; -err: - while (--i >= 0) - regulator_unregister(rdev[i]); - return ret; -} - -static int __devexit max77686_pmic_remove(struct platform_device *pdev) -{ - struct max77686_data *max77686 = platform_get_drvdata(pdev); - struct regulator_dev **rdev = max77686->rdev; - int i; - - for (i = 0; i < MAX77686_REGULATORS; i++) - if (rdev[i]) - regulator_unregister(rdev[i]); - - return 0; -} - -static const struct platform_device_id max77686_pmic_id[] = { - {"max77686-pmic", 0}, - { }, -}; -MODULE_DEVICE_TABLE(platform, max77686_pmic_id); - -static struct platform_driver max77686_pmic_driver = { - .driver = { - .name = "max77686-pmic", - .owner = THIS_MODULE, - }, - .probe = max77686_pmic_probe, - .remove = __devexit_p(max77686_pmic_remove), - .id_table = max77686_pmic_id, -}; - -static int __init max77686_pmic_init(void) -{ - return platform_driver_register(&max77686_pmic_driver); -} -subsys_initcall(max77686_pmic_init); - -static void __exit max77686_pmic_cleanup(void) -{ - platform_driver_unregister(&max77686_pmic_driver); -} -module_exit(max77686_pmic_cleanup); - -MODULE_DESCRIPTION("MAXIM 77686 Regulator Driver"); -MODULE_AUTHOR("Chiwoong Byun "); -MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/regulator/max8952.c b/trunk/drivers/regulator/max8952.c index 355ca7bad9d5..910c9b26d499 100644 --- a/trunk/drivers/regulator/max8952.c +++ b/trunk/drivers/regulator/max8952.c @@ -51,6 +51,7 @@ struct max8952_data { bool vid0; bool vid1; + bool en; }; static int max8952_read_reg(struct max8952_data *max8952, u8 reg) @@ -79,6 +80,38 @@ static int max8952_list_voltage(struct regulator_dev *rdev, return (max8952->pdata->dvs_mode[selector] * 10 + 770) * 1000; } +static int max8952_is_enabled(struct regulator_dev *rdev) +{ + struct max8952_data *max8952 = rdev_get_drvdata(rdev); + return max8952->en; +} + +static int max8952_enable(struct regulator_dev *rdev) +{ + struct max8952_data *max8952 = rdev_get_drvdata(rdev); + + /* If not valid, assume "ALWAYS_HIGH" */ + if (gpio_is_valid(max8952->pdata->gpio_en)) + gpio_set_value(max8952->pdata->gpio_en, 1); + + max8952->en = true; + return 0; +} + +static int max8952_disable(struct regulator_dev *rdev) +{ + struct max8952_data *max8952 = rdev_get_drvdata(rdev); + + /* If not valid, assume "ALWAYS_HIGH" -> not permitted */ + if (gpio_is_valid(max8952->pdata->gpio_en)) + gpio_set_value(max8952->pdata->gpio_en, 0); + else + return -EPERM; + + max8952->en = false; + return 0; +} + static int max8952_get_voltage_sel(struct regulator_dev *rdev) { struct max8952_data *max8952 = rdev_get_drvdata(rdev); @@ -113,8 +146,12 @@ static int max8952_set_voltage_sel(struct regulator_dev *rdev, static struct regulator_ops max8952_ops = { .list_voltage = max8952_list_voltage, + .is_enabled = max8952_is_enabled, + .enable = max8952_enable, + .disable = max8952_disable, .get_voltage_sel = max8952_get_voltage_sel, .set_voltage_sel = max8952_set_voltage_sel, + .set_suspend_disable = max8952_disable, }; static const struct regulator_desc regulator = { @@ -157,10 +194,6 @@ static int __devinit max8952_pmic_probe(struct i2c_client *client, config.init_data = &pdata->reg_data; config.driver_data = max8952; - config.ena_gpio = pdata->gpio_en; - if (pdata->reg_data.constraints.boot_on) - config.ena_gpio_flags |= GPIOF_OUT_INIT_HIGH; - max8952->rdev = regulator_register(®ulator, &config); if (IS_ERR(max8952->rdev)) { @@ -169,9 +202,27 @@ static int __devinit max8952_pmic_probe(struct i2c_client *client, return ret; } + max8952->en = !!(pdata->reg_data.constraints.boot_on); max8952->vid0 = pdata->default_mode & 0x1; max8952->vid1 = (pdata->default_mode >> 1) & 0x1; + if (gpio_is_valid(pdata->gpio_en)) { + if (!gpio_request(pdata->gpio_en, "MAX8952 EN")) + gpio_direction_output(pdata->gpio_en, max8952->en); + else + err = 1; + } else + err = 2; + + if (err) { + dev_info(max8952->dev, "EN gpio invalid: assume that EN" + "is always High\n"); + max8952->en = 1; + pdata->gpio_en = -1; /* Mark invalid */ + } + + err = 0; + if (gpio_is_valid(pdata->gpio_vid0) && gpio_is_valid(pdata->gpio_vid1)) { if (!gpio_request(pdata->gpio_vid0, "MAX8952 VID0")) @@ -257,6 +308,7 @@ static int __devexit max8952_pmic_remove(struct i2c_client *client) gpio_free(pdata->gpio_vid0); gpio_free(pdata->gpio_vid1); + gpio_free(pdata->gpio_en); return 0; } diff --git a/trunk/drivers/regulator/max8997.c b/trunk/drivers/regulator/max8997.c index e39a0c7260dc..704cd49ef375 100644 --- a/trunk/drivers/regulator/max8997.c +++ b/trunk/drivers/regulator/max8997.c @@ -1025,6 +1025,7 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev) */ if (pdata->buck1_gpiodvs || pdata->buck2_gpiodvs || pdata->buck5_gpiodvs) { + bool gpio1set = false, gpio2set = false; if (!gpio_is_valid(pdata->buck125_gpios[0]) || !gpio_is_valid(pdata->buck125_gpios[1]) || @@ -1034,20 +1035,40 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev) goto err_out; } - ret = devm_gpio_request(&pdev->dev, pdata->buck125_gpios[0], - "MAX8997 SET1"); - if (ret) + ret = gpio_request(pdata->buck125_gpios[0], + "MAX8997 SET1"); + if (ret == -EBUSY) + dev_warn(&pdev->dev, "Duplicated gpio request" + " on SET1\n"); + else if (ret) goto err_out; - - ret = devm_gpio_request(&pdev->dev, pdata->buck125_gpios[1], - "MAX8997 SET2"); - if (ret) + else + gpio1set = true; + + ret = gpio_request(pdata->buck125_gpios[1], + "MAX8997 SET2"); + if (ret == -EBUSY) + dev_warn(&pdev->dev, "Duplicated gpio request" + " on SET2\n"); + else if (ret) { + if (gpio1set) + gpio_free(pdata->buck125_gpios[0]); goto err_out; + } else + gpio2set = true; - ret = devm_gpio_request(&pdev->dev, pdata->buck125_gpios[2], + ret = gpio_request(pdata->buck125_gpios[2], "MAX8997 SET3"); - if (ret) + if (ret == -EBUSY) + dev_warn(&pdev->dev, "Duplicated gpio request" + " on SET3\n"); + else if (ret) { + if (gpio1set) + gpio_free(pdata->buck125_gpios[0]); + if (gpio2set) + gpio_free(pdata->buck125_gpios[1]); goto err_out; + } gpio_direction_output(pdata->buck125_gpios[0], (max8997->buck125_gpioindex >> 2) @@ -1058,6 +1079,7 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev) gpio_direction_output(pdata->buck125_gpios[2], (max8997->buck125_gpioindex >> 0) & 0x1); /* SET3 */ + ret = 0; } /* DVS-GPIO disabled */ diff --git a/trunk/drivers/regulator/max8998.c b/trunk/drivers/regulator/max8998.c index 5dfa920ff0c8..18bb58b9b96e 100644 --- a/trunk/drivers/regulator/max8998.c +++ b/trunk/drivers/regulator/max8998.c @@ -111,6 +111,27 @@ static const struct voltage_map_desc *ldo_voltage_map[] = { &buck4_voltage_map_desc, /* BUCK4 */ }; +static int max8998_list_voltage(struct regulator_dev *rdev, + unsigned int selector) +{ + const struct voltage_map_desc *desc; + int ldo = rdev_get_id(rdev); + int val; + + if (ldo >= ARRAY_SIZE(ldo_voltage_map)) + return -EINVAL; + + desc = ldo_voltage_map[ldo]; + if (desc == NULL) + return -EINVAL; + + val = desc->min + desc->step * selector; + if (val > desc->max) + return -EINVAL; + + return val * 1000; +} + static int max8998_get_enable_register(struct regulator_dev *rdev, int *reg, int *shift) { @@ -276,18 +297,41 @@ static int max8998_get_voltage_sel(struct regulator_dev *rdev) return val; } -static int max8998_set_voltage_ldo_sel(struct regulator_dev *rdev, - unsigned selector) +static int max8998_set_voltage_ldo(struct regulator_dev *rdev, + int min_uV, int max_uV, unsigned *selector) { struct max8998_data *max8998 = rdev_get_drvdata(rdev); struct i2c_client *i2c = max8998->iodev->i2c; - int reg, shift = 0, mask, ret; + int min_vol = min_uV / 1000, max_vol = max_uV / 1000; + const struct voltage_map_desc *desc; + int ldo = rdev_get_id(rdev); + int reg, shift = 0, mask, ret, i; + + if (ldo >= ARRAY_SIZE(ldo_voltage_map)) + return -EINVAL; + + desc = ldo_voltage_map[ldo]; + if (desc == NULL) + return -EINVAL; + + if (max_vol < desc->min || min_vol > desc->max) + return -EINVAL; + + if (min_vol < desc->min) + min_vol = desc->min; + + i = DIV_ROUND_UP(min_vol - desc->min, desc->step); + + if (desc->min + desc->step*i > max_vol) + return -EINVAL; + + *selector = i; ret = max8998_get_voltage_register(rdev, ®, &shift, &mask); if (ret) return ret; - ret = max8998_update_reg(i2c, reg, selector<iodev->dev); struct i2c_client *i2c = max8998->iodev->i2c; + int min_vol = min_uV / 1000, max_vol = max_uV / 1000; + const struct voltage_map_desc *desc; int buck = rdev_get_id(rdev); int reg, shift = 0, mask, ret; - int j, previous_sel; + int i, j, previous_sel; static u8 buck1_last_val; + if (buck >= ARRAY_SIZE(ldo_voltage_map)) + return -EINVAL; + + desc = ldo_voltage_map[buck]; + + if (desc == NULL) + return -EINVAL; + + if (max_vol < desc->min || min_vol > desc->max) + return -EINVAL; + + if (min_vol < desc->min) + min_vol = desc->min; + + i = DIV_ROUND_UP(min_vol - desc->min, desc->step); + + if (desc->min + desc->step*i > max_vol) + return -EINVAL; + + *selector = i; + ret = max8998_get_voltage_register(rdev, ®, &shift, &mask); if (ret) return ret; @@ -323,19 +390,19 @@ static int max8998_set_voltage_buck_sel(struct regulator_dev *rdev, /* Check if voltage needs to be changed */ /* if previous_voltage equal new voltage, return */ - if (previous_sel == selector) { + if (previous_sel == i) { dev_dbg(max8998->dev, "No voltage change, old:%d, new:%d\n", - regulator_list_voltage_linear(rdev, previous_sel), - regulator_list_voltage_linear(rdev, selector)); + max8998_list_voltage(rdev, previous_sel), + max8998_list_voltage(rdev, i)); return ret; } switch (buck) { case MAX8998_BUCK1: dev_dbg(max8998->dev, - "BUCK1, selector:%d, buck1_vol1:%d, buck1_vol2:%d\n" + "BUCK1, i:%d, buck1_vol1:%d, buck1_vol2:%d\n" "buck1_vol3:%d, buck1_vol4:%d\n", - selector, max8998->buck1_vol[0], max8998->buck1_vol[1], + i, max8998->buck1_vol[0], max8998->buck1_vol[1], max8998->buck1_vol[2], max8998->buck1_vol[3]); if (gpio_is_valid(pdata->buck1_set1) && @@ -344,7 +411,7 @@ static int max8998_set_voltage_buck_sel(struct regulator_dev *rdev, /* check if requested voltage */ /* value is already defined */ for (j = 0; j < ARRAY_SIZE(max8998->buck1_vol); j++) { - if (max8998->buck1_vol[j] == selector) { + if (max8998->buck1_vol[j] == i) { max8998->buck1_idx = j; buck1_gpio_set(pdata->buck1_set1, pdata->buck1_set2, j); @@ -359,11 +426,11 @@ static int max8998_set_voltage_buck_sel(struct regulator_dev *rdev, max8998->buck1_idx = (buck1_last_val % 2) + 2; dev_dbg(max8998->dev, "max8998->buck1_idx:%d\n", max8998->buck1_idx); - max8998->buck1_vol[max8998->buck1_idx] = selector; + max8998->buck1_vol[max8998->buck1_idx] = i; ret = max8998_get_voltage_register(rdev, ®, &shift, &mask); - ret = max8998_write_reg(i2c, reg, selector); + ret = max8998_write_reg(i2c, reg, i); buck1_gpio_set(pdata->buck1_set1, pdata->buck1_set2, max8998->buck1_idx); buck1_last_val++; @@ -373,20 +440,20 @@ static int max8998_set_voltage_buck_sel(struct regulator_dev *rdev, gpio_get_value(pdata->buck1_set2)); break; } else { - ret = max8998_write_reg(i2c, reg, selector); + ret = max8998_write_reg(i2c, reg, i); } break; case MAX8998_BUCK2: dev_dbg(max8998->dev, - "BUCK2, selector:%d buck2_vol1:%d, buck2_vol2:%d\n", - selector, max8998->buck2_vol[0], max8998->buck2_vol[1]); + "BUCK2, i:%d buck2_vol1:%d, buck2_vol2:%d\n" + , i, max8998->buck2_vol[0], max8998->buck2_vol[1]); if (gpio_is_valid(pdata->buck2_set3)) { /* check if requested voltage */ /* value is already defined */ for (j = 0; j < ARRAY_SIZE(max8998->buck2_vol); j++) { - if (max8998->buck2_vol[j] == selector) { + if (max8998->buck2_vol[j] == i) { max8998->buck2_idx = j; buck2_gpio_set(pdata->buck2_set3, j); goto buck2_exit; @@ -398,21 +465,20 @@ static int max8998_set_voltage_buck_sel(struct regulator_dev *rdev, max8998_get_voltage_register(rdev, ®, &shift, &mask); - ret = max8998_write_reg(i2c, reg, selector); - max8998->buck2_vol[max8998->buck2_idx] = selector; + ret = max8998_write_reg(i2c, reg, i); + max8998->buck2_vol[max8998->buck2_idx] = i; buck2_gpio_set(pdata->buck2_set3, max8998->buck2_idx); buck2_exit: dev_dbg(max8998->dev, "%s: SET3:%d\n", i2c->name, gpio_get_value(pdata->buck2_set3)); } else { - ret = max8998_write_reg(i2c, reg, selector); + ret = max8998_write_reg(i2c, reg, i); } break; case MAX8998_BUCK3: case MAX8998_BUCK4: - ret = max8998_update_reg(i2c, reg, selector<max - desc->min) / desc->step + 1; - regulators[index].n_voltages = count; - regulators[index].min_uV = desc->min * 1000; - regulators[index].uV_step = desc->step * 1000; } config.dev = max8998->dev; diff --git a/trunk/drivers/regulator/mc13783-regulator.c b/trunk/drivers/regulator/mc13783-regulator.c index 4932e3449fe1..7dcdfa283e93 100644 --- a/trunk/drivers/regulator/mc13783-regulator.c +++ b/trunk/drivers/regulator/mc13783-regulator.c @@ -93,78 +93,78 @@ /* Voltage Values */ -static const unsigned int mc13783_sw3_val[] = { +static const int mc13783_sw3_val[] = { 5000000, 5000000, 5000000, 5500000, }; -static const unsigned int mc13783_vaudio_val[] = { +static const int mc13783_vaudio_val[] = { 2775000, }; -static const unsigned int mc13783_viohi_val[] = { +static const int mc13783_viohi_val[] = { 2775000, }; -static const unsigned int mc13783_violo_val[] = { +static const int mc13783_violo_val[] = { 1200000, 1300000, 1500000, 1800000, }; -static const unsigned int mc13783_vdig_val[] = { +static const int mc13783_vdig_val[] = { 1200000, 1300000, 1500000, 1800000, }; -static const unsigned int mc13783_vgen_val[] = { +static const int mc13783_vgen_val[] = { 1200000, 1300000, 1500000, 1800000, 1100000, 2000000, 2775000, 2400000, }; -static const unsigned int mc13783_vrfdig_val[] = { +static const int mc13783_vrfdig_val[] = { 1200000, 1500000, 1800000, 1875000, }; -static const unsigned int mc13783_vrfref_val[] = { +static const int mc13783_vrfref_val[] = { 2475000, 2600000, 2700000, 2775000, }; -static const unsigned int mc13783_vrfcp_val[] = { +static const int mc13783_vrfcp_val[] = { 2700000, 2775000, }; -static const unsigned int mc13783_vsim_val[] = { +static const int mc13783_vsim_val[] = { 1800000, 2900000, 3000000, }; -static const unsigned int mc13783_vesim_val[] = { +static const int mc13783_vesim_val[] = { 1800000, 2900000, }; -static const unsigned int mc13783_vcam_val[] = { +static const int mc13783_vcam_val[] = { 1500000, 1800000, 2500000, 2550000, 2600000, 2750000, 2800000, 3000000, }; -static const unsigned int mc13783_vrfbg_val[] = { +static const int mc13783_vrfbg_val[] = { 1250000, }; -static const unsigned int mc13783_vvib_val[] = { +static const int mc13783_vvib_val[] = { 1300000, 1800000, 2000000, 3000000, }; -static const unsigned int mc13783_vmmc_val[] = { +static const int mc13783_vmmc_val[] = { 1600000, 1800000, 2000000, 2600000, 2700000, 2800000, 2900000, 3000000, }; -static const unsigned int mc13783_vrf_val[] = { +static const int mc13783_vrf_val[] = { 1500000, 1875000, 2700000, 2775000, }; -static const unsigned int mc13783_gpo_val[] = { +static const int mc13783_gpo_val[] = { 3100000, }; -static const unsigned int mc13783_pwgtdrv_val[] = { +static const int mc13783_pwgtdrv_val[] = { 5500000, }; @@ -328,7 +328,7 @@ static struct regulator_ops mc13783_gpo_regulator_ops = { .enable = mc13783_gpo_regulator_enable, .disable = mc13783_gpo_regulator_disable, .is_enabled = mc13783_gpo_regulator_is_enabled, - .list_voltage = regulator_list_voltage_table, + .list_voltage = mc13xxx_regulator_list_voltage, .set_voltage = mc13xxx_fixed_regulator_set_voltage, .get_voltage = mc13xxx_fixed_regulator_get_voltage, }; diff --git a/trunk/drivers/regulator/mc13892-regulator.c b/trunk/drivers/regulator/mc13892-regulator.c index b388b746452e..970a233dbe46 100644 --- a/trunk/drivers/regulator/mc13892-regulator.c +++ b/trunk/drivers/regulator/mc13892-regulator.c @@ -150,12 +150,12 @@ #define MC13892_USB1 50 #define MC13892_USB1_VUSBEN (1<<3) -static const unsigned int mc13892_vcoincell[] = { +static const int mc13892_vcoincell[] = { 2500000, 2700000, 2800000, 2900000, 3000000, 3100000, 3200000, 3300000, }; -static const unsigned int mc13892_sw1[] = { +static const int mc13892_sw1[] = { 600000, 625000, 650000, 675000, 700000, 725000, 750000, 775000, 800000, 825000, 850000, 875000, 900000, 925000, 950000, 975000, 1000000, 1025000, @@ -164,7 +164,7 @@ static const unsigned int mc13892_sw1[] = { 1350000, 1375000 }; -static const unsigned int mc13892_sw[] = { +static const int mc13892_sw[] = { 600000, 625000, 650000, 675000, 700000, 725000, 750000, 775000, 800000, 825000, 850000, 875000, 900000, 925000, 950000, 975000, 1000000, 1025000, @@ -176,65 +176,65 @@ static const unsigned int mc13892_sw[] = { 1800000, 1825000, 1850000, 1875000 }; -static const unsigned int mc13892_swbst[] = { +static const int mc13892_swbst[] = { 5000000, }; -static const unsigned int mc13892_viohi[] = { +static const int mc13892_viohi[] = { 2775000, }; -static const unsigned int mc13892_vpll[] = { +static const int mc13892_vpll[] = { 1050000, 1250000, 1650000, 1800000, }; -static const unsigned int mc13892_vdig[] = { +static const int mc13892_vdig[] = { 1050000, 1250000, 1650000, 1800000, }; -static const unsigned int mc13892_vsd[] = { +static const int mc13892_vsd[] = { 1800000, 2000000, 2600000, 2700000, 2800000, 2900000, 3000000, 3150000, }; -static const unsigned int mc13892_vusb2[] = { +static const int mc13892_vusb2[] = { 2400000, 2600000, 2700000, 2775000, }; -static const unsigned int mc13892_vvideo[] = { +static const int mc13892_vvideo[] = { 2700000, 2775000, 2500000, 2600000, }; -static const unsigned int mc13892_vaudio[] = { +static const int mc13892_vaudio[] = { 2300000, 2500000, 2775000, 3000000, }; -static const unsigned int mc13892_vcam[] = { +static const int mc13892_vcam[] = { 2500000, 2600000, 2750000, 3000000, }; -static const unsigned int mc13892_vgen1[] = { +static const int mc13892_vgen1[] = { 1200000, 1500000, 2775000, 3150000, }; -static const unsigned int mc13892_vgen2[] = { +static const int mc13892_vgen2[] = { 1200000, 1500000, 1600000, 1800000, 2700000, 2800000, 3000000, 3150000, }; -static const unsigned int mc13892_vgen3[] = { +static const int mc13892_vgen3[] = { 1800000, 2900000, }; -static const unsigned int mc13892_vusb[] = { +static const int mc13892_vusb[] = { 3300000, }; -static const unsigned int mc13892_gpo[] = { +static const int mc13892_gpo[] = { 2750000, }; -static const unsigned int mc13892_pwgtdrv[] = { +static const int mc13892_pwgtdrv[] = { 5000000, }; @@ -394,7 +394,7 @@ static struct regulator_ops mc13892_gpo_regulator_ops = { .enable = mc13892_gpo_regulator_enable, .disable = mc13892_gpo_regulator_disable, .is_enabled = mc13892_gpo_regulator_is_enabled, - .list_voltage = regulator_list_voltage_table, + .list_voltage = mc13xxx_regulator_list_voltage, .set_voltage = mc13xxx_fixed_regulator_set_voltage, .get_voltage = mc13xxx_fixed_regulator_get_voltage, }; @@ -436,7 +436,7 @@ static int mc13892_sw_regulator_set_voltage_sel(struct regulator_dev *rdev, u32 valread; int ret; - value = rdev->desc->volt_table[selector]; + value = mc13892_regulators[id].voltages[selector]; mc13xxx_lock(priv->mc13xxx); ret = mc13xxx_reg_read(priv->mc13xxx, @@ -469,7 +469,8 @@ static int mc13892_sw_regulator_set_voltage_sel(struct regulator_dev *rdev, } static struct regulator_ops mc13892_sw_regulator_ops = { - .list_voltage = regulator_list_voltage_table, + .is_enabled = mc13xxx_sw_regulator_is_enabled, + .list_voltage = mc13xxx_regulator_list_voltage, .set_voltage_sel = mc13892_sw_regulator_set_voltage_sel, .get_voltage = mc13892_sw_regulator_get_voltage, }; diff --git a/trunk/drivers/regulator/mc13xxx-regulator-core.c b/trunk/drivers/regulator/mc13xxx-regulator-core.c index d6eda28ca5d0..4fa9704739bc 100644 --- a/trunk/drivers/regulator/mc13xxx-regulator-core.c +++ b/trunk/drivers/regulator/mc13xxx-regulator-core.c @@ -80,6 +80,20 @@ static int mc13xxx_regulator_is_enabled(struct regulator_dev *rdev) return (val & mc13xxx_regulators[id].enable_bit) != 0; } +int mc13xxx_regulator_list_voltage(struct regulator_dev *rdev, + unsigned selector) +{ + int id = rdev_get_id(rdev); + struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev); + struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators; + + if (selector >= mc13xxx_regulators[id].desc.n_voltages) + return -EINVAL; + + return mc13xxx_regulators[id].voltages[selector]; +} +EXPORT_SYMBOL_GPL(mc13xxx_regulator_list_voltage); + static int mc13xxx_regulator_set_voltage_sel(struct regulator_dev *rdev, unsigned selector) { @@ -121,14 +135,14 @@ static int mc13xxx_regulator_get_voltage(struct regulator_dev *rdev) BUG_ON(val >= mc13xxx_regulators[id].desc.n_voltages); - return rdev->desc->volt_table[val]; + return mc13xxx_regulators[id].voltages[val]; } struct regulator_ops mc13xxx_regulator_ops = { .enable = mc13xxx_regulator_enable, .disable = mc13xxx_regulator_disable, .is_enabled = mc13xxx_regulator_is_enabled, - .list_voltage = regulator_list_voltage_table, + .list_voltage = mc13xxx_regulator_list_voltage, .set_voltage_sel = mc13xxx_regulator_set_voltage_sel, .get_voltage = mc13xxx_regulator_get_voltage, }; @@ -137,13 +151,15 @@ EXPORT_SYMBOL_GPL(mc13xxx_regulator_ops); int mc13xxx_fixed_regulator_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV, unsigned *selector) { + struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev); + struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators; int id = rdev_get_id(rdev); dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n", __func__, id, min_uV, max_uV); - if (min_uV <= rdev->desc->volt_table[0] && - rdev->desc->volt_table[0] <= max_uV) + if (min_uV >= mc13xxx_regulators[id].voltages[0] && + max_uV <= mc13xxx_regulators[id].voltages[0]) return 0; else return -EINVAL; @@ -152,11 +168,13 @@ EXPORT_SYMBOL_GPL(mc13xxx_fixed_regulator_set_voltage); int mc13xxx_fixed_regulator_get_voltage(struct regulator_dev *rdev) { + struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev); + struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators; int id = rdev_get_id(rdev); dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id); - return rdev->desc->volt_table[0]; + return mc13xxx_regulators[id].voltages[0]; } EXPORT_SYMBOL_GPL(mc13xxx_fixed_regulator_get_voltage); @@ -164,12 +182,18 @@ struct regulator_ops mc13xxx_fixed_regulator_ops = { .enable = mc13xxx_regulator_enable, .disable = mc13xxx_regulator_disable, .is_enabled = mc13xxx_regulator_is_enabled, - .list_voltage = regulator_list_voltage_table, + .list_voltage = mc13xxx_regulator_list_voltage, .set_voltage = mc13xxx_fixed_regulator_set_voltage, .get_voltage = mc13xxx_fixed_regulator_get_voltage, }; EXPORT_SYMBOL_GPL(mc13xxx_fixed_regulator_ops); +int mc13xxx_sw_regulator_is_enabled(struct regulator_dev *rdev) +{ + return 1; +} +EXPORT_SYMBOL_GPL(mc13xxx_sw_regulator_is_enabled); + #ifdef CONFIG_OF int __devinit mc13xxx_get_num_regulators_dt(struct platform_device *pdev) { diff --git a/trunk/drivers/regulator/mc13xxx.h b/trunk/drivers/regulator/mc13xxx.h index eaff5510b6df..044aba4d28ec 100644 --- a/trunk/drivers/regulator/mc13xxx.h +++ b/trunk/drivers/regulator/mc13xxx.h @@ -22,6 +22,7 @@ struct mc13xxx_regulator { int vsel_shift; int vsel_mask; int hi_bit; + int const *voltages; }; struct mc13xxx_regulator_priv { @@ -32,6 +33,10 @@ struct mc13xxx_regulator_priv { struct regulator_dev *regulators[]; }; +extern int mc13xxx_sw_regulator(struct regulator_dev *rdev); +extern int mc13xxx_sw_regulator_is_enabled(struct regulator_dev *rdev); +extern int mc13xxx_regulator_list_voltage(struct regulator_dev *rdev, + unsigned selector); extern int mc13xxx_fixed_regulator_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV, unsigned *selector); extern int mc13xxx_fixed_regulator_get_voltage(struct regulator_dev *rdev); @@ -63,7 +68,6 @@ extern struct regulator_ops mc13xxx_fixed_regulator_ops; .desc = { \ .name = #_name, \ .n_voltages = ARRAY_SIZE(_voltages), \ - .volt_table = _voltages, \ .ops = &_ops, \ .type = REGULATOR_VOLTAGE, \ .id = prefix ## _name, \ @@ -74,6 +78,7 @@ extern struct regulator_ops mc13xxx_fixed_regulator_ops; .vsel_reg = prefix ## _vsel_reg, \ .vsel_shift = prefix ## _vsel_reg ## _ ## _name ## VSEL,\ .vsel_mask = prefix ## _vsel_reg ## _ ## _name ## VSEL_M,\ + .voltages = _voltages, \ } #define MC13xxx_FIXED_DEFINE(prefix, _name, _reg, _voltages, _ops) \ @@ -81,7 +86,6 @@ extern struct regulator_ops mc13xxx_fixed_regulator_ops; .desc = { \ .name = #_name, \ .n_voltages = ARRAY_SIZE(_voltages), \ - .volt_table = _voltages, \ .ops = &_ops, \ .type = REGULATOR_VOLTAGE, \ .id = prefix ## _name, \ @@ -89,6 +93,7 @@ extern struct regulator_ops mc13xxx_fixed_regulator_ops; }, \ .reg = prefix ## _reg, \ .enable_bit = prefix ## _reg ## _ ## _name ## EN, \ + .voltages = _voltages, \ } #define MC13xxx_GPO_DEFINE(prefix, _name, _reg, _voltages, _ops) \ @@ -96,7 +101,6 @@ extern struct regulator_ops mc13xxx_fixed_regulator_ops; .desc = { \ .name = #_name, \ .n_voltages = ARRAY_SIZE(_voltages), \ - .volt_table = _voltages, \ .ops = &_ops, \ .type = REGULATOR_VOLTAGE, \ .id = prefix ## _name, \ @@ -104,6 +108,7 @@ extern struct regulator_ops mc13xxx_fixed_regulator_ops; }, \ .reg = prefix ## _reg, \ .enable_bit = prefix ## _reg ## _ ## _name ## EN, \ + .voltages = _voltages, \ } #define MC13xxx_DEFINE_SW(_name, _reg, _vsel_reg, _voltages, ops) \ diff --git a/trunk/drivers/regulator/of_regulator.c b/trunk/drivers/regulator/of_regulator.c index 3e4106f2bda9..56593b75168a 100644 --- a/trunk/drivers/regulator/of_regulator.c +++ b/trunk/drivers/regulator/of_regulator.c @@ -20,7 +20,7 @@ static void of_get_regulation_constraints(struct device_node *np, struct regulator_init_data **init_data) { const __be32 *min_uV, *max_uV, *uV_offset; - const __be32 *min_uA, *max_uA, *ramp_delay; + const __be32 *min_uA, *max_uA; struct regulation_constraints *constraints = &(*init_data)->constraints; constraints->name = of_get_property(np, "regulator-name", NULL); @@ -60,10 +60,6 @@ static void of_get_regulation_constraints(struct device_node *np, constraints->always_on = true; else /* status change should be possible if not always on. */ constraints->valid_ops_mask |= REGULATOR_CHANGE_STATUS; - - ramp_delay = of_get_property(np, "regulator-ramp-delay", NULL); - if (ramp_delay) - constraints->ramp_delay = be32_to_cpu(*ramp_delay); } /** @@ -92,17 +88,15 @@ struct regulator_init_data *of_get_regulator_init_data(struct device *dev, EXPORT_SYMBOL_GPL(of_get_regulator_init_data); /** - * of_regulator_match - extract regulator init data when node - * property "regulator-compatible" matches with the regulator name. + * of_regulator_match - extract regulator init data * @dev: device requesting the data * @node: parent device node of the regulators * @matches: match table for the regulators * @num_matches: number of entries in match table * * This function uses a match table specified by the regulator driver and - * looks up the corresponding init data in the device tree if - * regulator-compatible matches. Note that the match table is modified - * in place. + * looks up the corresponding init data in the device tree. Note that the + * match table is modified in place. * * Returns the number of matches found or a negative error code on failure. */ @@ -112,40 +106,27 @@ int of_regulator_match(struct device *dev, struct device_node *node, { unsigned int count = 0; unsigned int i; - const char *regulator_comp; - struct device_node *child; if (!dev || !node) return -EINVAL; - for_each_child_of_node(node, child) { - regulator_comp = of_get_property(child, - "regulator-compatible", NULL); - if (!regulator_comp) { - dev_err(dev, "regulator-compatible is missing for node %s\n", - child->name); + for (i = 0; i < num_matches; i++) { + struct of_regulator_match *match = &matches[i]; + struct device_node *child; + + child = of_find_node_by_name(node, match->name); + if (!child) continue; + + match->init_data = of_get_regulator_init_data(dev, child); + if (!match->init_data) { + dev_err(dev, "failed to parse DT for regulator %s\n", + child->name); + return -EINVAL; } - for (i = 0; i < num_matches; i++) { - struct of_regulator_match *match = &matches[i]; - if (match->of_node) - continue; - - if (strcmp(match->name, regulator_comp)) - continue; - - match->init_data = - of_get_regulator_init_data(dev, child); - if (!match->init_data) { - dev_err(dev, - "failed to parse DT for regulator %s\n", - child->name); - return -EINVAL; - } - match->of_node = child; - count++; - break; - } + + match->of_node = child; + count++; } return count; diff --git a/trunk/drivers/regulator/palmas-regulator.c b/trunk/drivers/regulator/palmas-regulator.c index 17d19fbbc490..795f75a6ac33 100644 --- a/trunk/drivers/regulator/palmas-regulator.c +++ b/trunk/drivers/regulator/palmas-regulator.c @@ -257,7 +257,8 @@ static int palmas_set_mode_smps(struct regulator_dev *dev, unsigned int mode) unsigned int reg; palmas_smps_read(pmic->palmas, palmas_regs_info[id].ctrl_addr, ®); - reg &= ~PALMAS_SMPS12_CTRL_MODE_ACTIVE_MASK; + reg &= ~PALMAS_SMPS12_CTRL_STATUS_MASK; + reg >>= PALMAS_SMPS12_CTRL_STATUS_SHIFT; switch (mode) { case REGULATOR_MODE_NORMAL: @@ -373,22 +374,11 @@ static int palmas_set_voltage_smps_sel(struct regulator_dev *dev, static int palmas_map_voltage_smps(struct regulator_dev *rdev, int min_uV, int max_uV) { - struct palmas_pmic *pmic = rdev_get_drvdata(rdev); - int id = rdev_get_id(rdev); int ret, voltage; - if (min_uV == 0) - return 0; - - if (pmic->range[id]) { /* RANGE is x2 */ - if (min_uV < 1000000) - min_uV = 1000000; - ret = DIV_ROUND_UP(min_uV - 1000000, 20000) + 1; - } else { /* RANGE is x1 */ - if (min_uV < 500000) - min_uV = 500000; - ret = DIV_ROUND_UP(min_uV - 500000, 10000) + 1; - } + ret = ((min_uV - 500000) / 10000) + 1; + if (ret < 0) + return ret; /* Map back into a voltage to verify we're still in bounds */ voltage = palmas_list_voltage_smps(rdev, ret); @@ -410,14 +400,19 @@ static struct regulator_ops palmas_ops_smps = { .map_voltage = palmas_map_voltage_smps, }; +static int palmas_list_voltage_smps10(struct regulator_dev *dev, + unsigned selector) +{ + return 3750000 + (selector * 1250000); +} + static struct regulator_ops palmas_ops_smps10 = { .is_enabled = regulator_is_enabled_regmap, .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, .get_voltage_sel = regulator_get_voltage_sel_regmap, .set_voltage_sel = regulator_set_voltage_sel_regmap, - .list_voltage = regulator_list_voltage_linear, - .map_voltage = regulator_map_voltage_linear, + .list_voltage = palmas_list_voltage_smps10, }; static int palmas_is_enabled_ldo(struct regulator_dev *dev) @@ -527,15 +522,7 @@ static int palmas_smps_init(struct palmas *palmas, int id, if (ret) return ret; - switch (id) { - case PALMAS_REG_SMPS10: - if (reg_init->mode_sleep) { - reg &= ~PALMAS_SMPS10_CTRL_MODE_SLEEP_MASK; - reg |= reg_init->mode_sleep << - PALMAS_SMPS10_CTRL_MODE_SLEEP_SHIFT; - } - break; - default: + if (id != PALMAS_REG_SMPS10) { if (reg_init->warm_reset) reg |= PALMAS_SMPS12_CTRL_WR_S; @@ -547,8 +534,14 @@ static int palmas_smps_init(struct palmas *palmas, int id, reg |= reg_init->mode_sleep << PALMAS_SMPS12_CTRL_MODE_SLEEP_SHIFT; } - } + } else { + if (reg_init->mode_sleep) { + reg &= ~PALMAS_SMPS10_CTRL_MODE_SLEEP_MASK; + reg |= reg_init->mode_sleep << + PALMAS_SMPS10_CTRL_MODE_SLEEP_SHIFT; + } + } ret = palmas_smps_write(palmas, addr, reg); if (ret) return ret; @@ -672,8 +665,10 @@ static __devinit int palmas_probe(struct platform_device *pdev) pmic->desc[id].name = palmas_regs_info[id].name; pmic->desc[id].id = id; - switch (id) { - case PALMAS_REG_SMPS10: + if (id != PALMAS_REG_SMPS10) { + pmic->desc[id].ops = &palmas_ops_smps; + pmic->desc[id].n_voltages = PALMAS_SMPS_NUM_VOLTAGES; + } else { pmic->desc[id].n_voltages = PALMAS_SMPS10_NUM_VOLTAGES; pmic->desc[id].ops = &palmas_ops_smps10; pmic->desc[id].vsel_reg = PALMAS_SMPS10_CTRL; @@ -682,12 +677,6 @@ static __devinit int palmas_probe(struct platform_device *pdev) PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE, PALMAS_SMPS10_STATUS); pmic->desc[id].enable_mask = SMPS10_BOOST_EN; - pmic->desc[id].min_uV = 3750000; - pmic->desc[id].uV_step = 1250000; - break; - default: - pmic->desc[id].ops = &palmas_ops_smps; - pmic->desc[id].n_voltages = PALMAS_SMPS_NUM_VOLTAGES; } pmic->desc[id].type = REGULATOR_VOLTAGE; diff --git a/trunk/drivers/regulator/pcap-regulator.c b/trunk/drivers/regulator/pcap-regulator.c index 68777acc099f..8211101121f0 100644 --- a/trunk/drivers/regulator/pcap-regulator.c +++ b/trunk/drivers/regulator/pcap-regulator.c @@ -18,80 +18,80 @@ #include #include -static const unsigned int V1_table[] = { - 2775000, 1275000, 1600000, 1725000, 1825000, 1925000, 2075000, 2275000, +static const u16 V1_table[] = { + 2775, 1275, 1600, 1725, 1825, 1925, 2075, 2275, }; -static const unsigned int V2_table[] = { - 2500000, 2775000, +static const u16 V2_table[] = { + 2500, 2775, }; -static const unsigned int V3_table[] = { - 1075000, 1275000, 1550000, 1725000, 1876000, 1950000, 2075000, 2275000, +static const u16 V3_table[] = { + 1075, 1275, 1550, 1725, 1876, 1950, 2075, 2275, }; -static const unsigned int V4_table[] = { - 1275000, 1550000, 1725000, 1875000, 1950000, 2075000, 2275000, 2775000, +static const u16 V4_table[] = { + 1275, 1550, 1725, 1875, 1950, 2075, 2275, 2775, }; -static const unsigned int V5_table[] = { - 1875000, 2275000, 2475000, 2775000, +static const u16 V5_table[] = { + 1875, 2275, 2475, 2775, }; -static const unsigned int V6_table[] = { - 2475000, 2775000, +static const u16 V6_table[] = { + 2475, 2775, }; -static const unsigned int V7_table[] = { - 1875000, 2775000, +static const u16 V7_table[] = { + 1875, 2775, }; #define V8_table V4_table -static const unsigned int V9_table[] = { - 1575000, 1875000, 2475000, 2775000, +static const u16 V9_table[] = { + 1575, 1875, 2475, 2775, }; -static const unsigned int V10_table[] = { - 5000000, +static const u16 V10_table[] = { + 5000, }; -static const unsigned int VAUX1_table[] = { - 1875000, 2475000, 2775000, 3000000, +static const u16 VAUX1_table[] = { + 1875, 2475, 2775, 3000, }; #define VAUX2_table VAUX1_table -static const unsigned int VAUX3_table[] = { - 1200000, 1200000, 1200000, 1200000, 1400000, 1600000, 1800000, 2000000, - 2200000, 2400000, 2600000, 2800000, 3000000, 3200000, 3400000, 3600000, +static const u16 VAUX3_table[] = { + 1200, 1200, 1200, 1200, 1400, 1600, 1800, 2000, + 2200, 2400, 2600, 2800, 3000, 3200, 3400, 3600, }; -static const unsigned int VAUX4_table[] = { - 1800000, 1800000, 3000000, 5000000, +static const u16 VAUX4_table[] = { + 1800, 1800, 3000, 5000, }; -static const unsigned int VSIM_table[] = { - 1875000, 3000000, +static const u16 VSIM_table[] = { + 1875, 3000, }; -static const unsigned int VSIM2_table[] = { - 1875000, +static const u16 VSIM2_table[] = { + 1875, }; -static const unsigned int VVIB_table[] = { - 1300000, 1800000, 2000000, 3000000, +static const u16 VVIB_table[] = { + 1300, 1800, 2000, 3000, }; -static const unsigned int SW1_table[] = { - 900000, 950000, 1000000, 1050000, 1100000, 1150000, 1200000, 1250000, - 1300000, 1350000, 1400000, 1450000, 1500000, 1600000, 1875000, 2250000, +static const u16 SW1_table[] = { + 900, 950, 1000, 1050, 1100, 1150, 1200, 1250, + 1300, 1350, 1400, 1450, 1500, 1600, 1875, 2250, }; #define SW2_table SW1_table -static const unsigned int SW3_table[] = { - 4000000, 4500000, 5000000, 5500000, +static const u16 SW3_table[] = { + 4000, 4500, 5000, 5500, }; struct pcap_regulator { @@ -100,6 +100,8 @@ struct pcap_regulator { const u8 index; const u8 stby; const u8 lowpwr; + const u8 n_voltages; + const u16 *voltage_table; }; #define NA 0xff @@ -111,6 +113,8 @@ struct pcap_regulator { .index = _index, \ .stby = _stby, \ .lowpwr = _lowpwr, \ + .n_voltages = ARRAY_SIZE(_vreg##_table), \ + .voltage_table = _vreg##_table, \ } static struct pcap_regulator vreg_table[] = { @@ -153,11 +157,11 @@ static int pcap_regulator_set_voltage_sel(struct regulator_dev *rdev, void *pcap = rdev_get_drvdata(rdev); /* the regulator doesn't support voltage switching */ - if (rdev->desc->n_voltages == 1) + if (vreg->n_voltages == 1) return -EINVAL; return ezx_pcap_set_bits(pcap, vreg->reg, - (rdev->desc->n_voltages - 1) << vreg->index, + (vreg->n_voltages - 1) << vreg->index, selector << vreg->index); } @@ -167,11 +171,11 @@ static int pcap_regulator_get_voltage_sel(struct regulator_dev *rdev) void *pcap = rdev_get_drvdata(rdev); u32 tmp; - if (rdev->desc->n_voltages == 1) + if (vreg->n_voltages == 1) return 0; ezx_pcap_read(pcap, vreg->reg, &tmp); - tmp = ((tmp >> vreg->index) & (rdev->desc->n_voltages - 1)); + tmp = ((tmp >> vreg->index) & (vreg->n_voltages - 1)); return tmp; } @@ -210,8 +214,16 @@ static int pcap_regulator_is_enabled(struct regulator_dev *rdev) return (tmp >> vreg->en) & 1; } +static int pcap_regulator_list_voltage(struct regulator_dev *rdev, + unsigned int index) +{ + struct pcap_regulator *vreg = &vreg_table[rdev_get_id(rdev)]; + + return vreg->voltage_table[index] * 1000; +} + static struct regulator_ops pcap_regulator_ops = { - .list_voltage = regulator_list_voltage_table, + .list_voltage = pcap_regulator_list_voltage, .set_voltage_sel = pcap_regulator_set_voltage_sel, .get_voltage_sel = pcap_regulator_get_voltage_sel, .enable = pcap_regulator_enable, @@ -224,7 +236,6 @@ static struct regulator_ops pcap_regulator_ops = { .name = #_vreg, \ .id = _vreg, \ .n_voltages = ARRAY_SIZE(_vreg##_table), \ - .volt_table = _vreg##_table, \ .ops = &pcap_regulator_ops, \ .type = REGULATOR_VOLTAGE, \ .owner = THIS_MODULE, \ diff --git a/trunk/drivers/regulator/pcf50633-regulator.c b/trunk/drivers/regulator/pcf50633-regulator.c index 092e5cb848a1..3c9d14c0017b 100644 --- a/trunk/drivers/regulator/pcf50633-regulator.c +++ b/trunk/drivers/regulator/pcf50633-regulator.c @@ -100,12 +100,13 @@ static unsigned int ldo_voltage_value(u8 bits) return 900 + (bits * 100); } -static int pcf50633_regulator_map_voltage(struct regulator_dev *rdev, - int min_uV, int max_uV) +static int pcf50633_regulator_set_voltage(struct regulator_dev *rdev, + int min_uV, int max_uV, + unsigned *selector) { struct pcf50633 *pcf; int regulator_id, millivolts; - u8 volt_bits; + u8 volt_bits, regnr; pcf = rdev_get_drvdata(rdev); @@ -115,11 +116,15 @@ static int pcf50633_regulator_map_voltage(struct regulator_dev *rdev, millivolts = min_uV / 1000; + regnr = rdev->desc->vsel_reg; + switch (regulator_id) { case PCF50633_REGULATOR_AUTO: volt_bits = auto_voltage_bits(millivolts); break; case PCF50633_REGULATOR_DOWN1: + volt_bits = down_voltage_bits(millivolts); + break; case PCF50633_REGULATOR_DOWN2: volt_bits = down_voltage_bits(millivolts); break; @@ -137,7 +142,9 @@ static int pcf50633_regulator_map_voltage(struct regulator_dev *rdev, return -EINVAL; } - return volt_bits; + *selector = volt_bits; + + return pcf50633_reg_write(pcf, regnr, volt_bits); } static int pcf50633_regulator_list_voltage(struct regulator_dev *rdev, @@ -152,6 +159,8 @@ static int pcf50633_regulator_list_voltage(struct regulator_dev *rdev, millivolts = auto_voltage_value(index); break; case PCF50633_REGULATOR_DOWN1: + millivolts = down_voltage_value(index); + break; case PCF50633_REGULATOR_DOWN2: millivolts = down_voltage_value(index); break; @@ -173,10 +182,9 @@ static int pcf50633_regulator_list_voltage(struct regulator_dev *rdev, } static struct regulator_ops pcf50633_regulator_ops = { - .set_voltage_sel = regulator_set_voltage_sel_regmap, + .set_voltage = pcf50633_regulator_set_voltage, .get_voltage_sel = regulator_get_voltage_sel_regmap, .list_voltage = pcf50633_regulator_list_voltage, - .map_voltage = pcf50633_regulator_map_voltage, .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, .is_enabled = regulator_is_enabled_regmap, diff --git a/trunk/drivers/regulator/rc5t583-regulator.c b/trunk/drivers/regulator/rc5t583-regulator.c index 8bf4e8c9de9a..1d34e64a1307 100644 --- a/trunk/drivers/regulator/rc5t583-regulator.c +++ b/trunk/drivers/regulator/rc5t583-regulator.c @@ -42,6 +42,7 @@ struct rc5t583_regulator_info { /* Regulator specific turn-on delay and voltage settling time*/ int enable_uv_per_us; + int change_uv_per_us; /* Used by regulator core */ struct regulator_desc desc; @@ -65,6 +66,25 @@ static int rc5t583_regulator_enable_time(struct regulator_dev *rdev) return DIV_ROUND_UP(curr_uV, reg->reg_info->enable_uv_per_us); } +static int rc5t583_set_voltage_time_sel(struct regulator_dev *rdev, + unsigned int old_selector, unsigned int new_selector) +{ + struct rc5t583_regulator *reg = rdev_get_drvdata(rdev); + int old_uV, new_uV; + old_uV = regulator_list_voltage_linear(rdev, old_selector); + + if (old_uV < 0) + return old_uV; + + new_uV = regulator_list_voltage_linear(rdev, new_selector); + if (new_uV < 0) + return new_uV; + + return DIV_ROUND_UP(abs(old_uV - new_uV), + reg->reg_info->change_uv_per_us); +} + + static struct regulator_ops rc5t583_ops = { .is_enabled = regulator_is_enabled_regmap, .enable = regulator_enable_regmap, @@ -74,7 +94,7 @@ static struct regulator_ops rc5t583_ops = { .set_voltage_sel = regulator_set_voltage_sel_regmap, .list_voltage = regulator_list_voltage_linear, .map_voltage = regulator_map_voltage_linear, - .set_voltage_time_sel = regulator_set_voltage_time_sel, + .set_voltage_time_sel = rc5t583_set_voltage_time_sel, }; #define RC5T583_REG(_id, _en_reg, _en_bit, _disc_reg, _disc_bit, \ @@ -84,6 +104,7 @@ static struct regulator_ops rc5t583_ops = { .disc_bit = _disc_bit, \ .deepsleep_reg = RC5T583_REG_##_id##DAC_DS, \ .enable_uv_per_us = _enable_mv * 1000, \ + .change_uv_per_us = 40 * 1000, \ .deepsleep_id = RC5T583_DS_##_id, \ .desc = { \ .name = "rc5t583-regulator-"#_id, \ @@ -98,7 +119,6 @@ static struct regulator_ops rc5t583_ops = { .enable_mask = BIT(_en_bit), \ .min_uV = _min_mv * 1000, \ .uV_step = _step_uV, \ - .ramp_delay = 40 * 1000, \ }, \ } diff --git a/trunk/drivers/regulator/s2mps11.c b/trunk/drivers/regulator/s2mps11.c deleted file mode 100644 index 4669dc9ac74a..000000000000 --- a/trunk/drivers/regulator/s2mps11.c +++ /dev/null @@ -1,363 +0,0 @@ -/* - * s2mps11.c - * - * Copyright (c) 2012 Samsung Electronics Co., Ltd - * http://www.samsung.com - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -struct s2mps11_info { - struct regulator_dev **rdev; - - int ramp_delay2; - int ramp_delay34; - int ramp_delay5; - int ramp_delay16; - int ramp_delay7810; - int ramp_delay9; - - bool buck6_ramp; - bool buck2_ramp; - bool buck3_ramp; - bool buck4_ramp; -}; - -static int get_ramp_delay(int ramp_delay) -{ - unsigned char cnt = 0; - - ramp_delay /= 6; - - while (true) { - ramp_delay = ramp_delay >> 1; - if (ramp_delay == 0) - break; - cnt++; - } - return cnt; -} - -static struct regulator_ops s2mps11_ldo_ops = { - .list_voltage = regulator_list_voltage_linear, - .map_voltage = regulator_map_voltage_linear, - .is_enabled = regulator_is_enabled_regmap, - .enable = regulator_enable_regmap, - .disable = regulator_disable_regmap, - .get_voltage_sel = regulator_get_voltage_sel_regmap, - .set_voltage_sel = regulator_set_voltage_sel_regmap, - .set_voltage_time_sel = regulator_set_voltage_time_sel, -}; - -static struct regulator_ops s2mps11_buck_ops = { - .list_voltage = regulator_list_voltage_linear, - .map_voltage = regulator_map_voltage_linear, - .is_enabled = regulator_is_enabled_regmap, - .enable = regulator_enable_regmap, - .disable = regulator_disable_regmap, - .get_voltage_sel = regulator_get_voltage_sel_regmap, - .set_voltage_sel = regulator_set_voltage_sel_regmap, - .set_voltage_time_sel = regulator_set_voltage_time_sel, -}; - -#define regulator_desc_ldo1(num) { \ - .name = "LDO"#num, \ - .id = S2MPS11_LDO##num, \ - .ops = &s2mps11_ldo_ops, \ - .type = REGULATOR_VOLTAGE, \ - .owner = THIS_MODULE, \ - .min_uV = S2MPS11_LDO_MIN, \ - .uV_step = S2MPS11_LDO_STEP1, \ - .n_voltages = S2MPS11_LDO_N_VOLTAGES, \ - .vsel_reg = S2MPS11_REG_L1CTRL + num - 1, \ - .vsel_mask = S2MPS11_LDO_VSEL_MASK, \ - .enable_reg = S2MPS11_REG_L1CTRL + num - 1, \ - .enable_mask = S2MPS11_ENABLE_MASK \ -} -#define regulator_desc_ldo2(num) { \ - .name = "LDO"#num, \ - .id = S2MPS11_LDO##num, \ - .ops = &s2mps11_ldo_ops, \ - .type = REGULATOR_VOLTAGE, \ - .owner = THIS_MODULE, \ - .min_uV = S2MPS11_LDO_MIN, \ - .uV_step = S2MPS11_LDO_STEP2, \ - .n_voltages = S2MPS11_LDO_N_VOLTAGES, \ - .vsel_reg = S2MPS11_REG_L1CTRL + num - 1, \ - .vsel_mask = S2MPS11_LDO_VSEL_MASK, \ - .enable_reg = S2MPS11_REG_L1CTRL + num - 1, \ - .enable_mask = S2MPS11_ENABLE_MASK \ -} - -#define regulator_desc_buck1_4(num) { \ - .name = "BUCK"#num, \ - .id = S2MPS11_BUCK##num, \ - .ops = &s2mps11_buck_ops, \ - .type = REGULATOR_VOLTAGE, \ - .owner = THIS_MODULE, \ - .min_uV = S2MPS11_BUCK_MIN1, \ - .uV_step = S2MPS11_BUCK_STEP1, \ - .n_voltages = S2MPS11_BUCK_N_VOLTAGES, \ - .vsel_reg = S2MPS11_REG_B1CTRL2 + (num - 1) * 2, \ - .vsel_mask = S2MPS11_BUCK_VSEL_MASK, \ - .enable_reg = S2MPS11_REG_B1CTRL1 + (num - 1) * 2, \ - .enable_mask = S2MPS11_ENABLE_MASK \ -} - -#define regulator_desc_buck5 { \ - .name = "BUCK5", \ - .id = S2MPS11_BUCK5, \ - .ops = &s2mps11_buck_ops, \ - .type = REGULATOR_VOLTAGE, \ - .owner = THIS_MODULE, \ - .min_uV = S2MPS11_BUCK_MIN1, \ - .uV_step = S2MPS11_BUCK_STEP1, \ - .n_voltages = S2MPS11_BUCK_N_VOLTAGES, \ - .vsel_reg = S2MPS11_REG_B5CTRL2, \ - .vsel_mask = S2MPS11_BUCK_VSEL_MASK, \ - .enable_reg = S2MPS11_REG_B5CTRL1, \ - .enable_mask = S2MPS11_ENABLE_MASK \ -} - -#define regulator_desc_buck6_8(num) { \ - .name = "BUCK"#num, \ - .id = S2MPS11_BUCK##num, \ - .ops = &s2mps11_buck_ops, \ - .type = REGULATOR_VOLTAGE, \ - .owner = THIS_MODULE, \ - .min_uV = S2MPS11_BUCK_MIN1, \ - .uV_step = S2MPS11_BUCK_STEP1, \ - .n_voltages = S2MPS11_BUCK_N_VOLTAGES, \ - .vsel_reg = S2MPS11_REG_B6CTRL2 + (num - 6) * 2, \ - .vsel_mask = S2MPS11_BUCK_VSEL_MASK, \ - .enable_reg = S2MPS11_REG_B6CTRL1 + (num - 6) * 2, \ - .enable_mask = S2MPS11_ENABLE_MASK \ -} - -#define regulator_desc_buck9 { \ - .name = "BUCK9", \ - .id = S2MPS11_BUCK9, \ - .ops = &s2mps11_buck_ops, \ - .type = REGULATOR_VOLTAGE, \ - .owner = THIS_MODULE, \ - .min_uV = S2MPS11_BUCK_MIN3, \ - .uV_step = S2MPS11_BUCK_STEP3, \ - .n_voltages = S2MPS11_BUCK_N_VOLTAGES, \ - .vsel_reg = S2MPS11_REG_B9CTRL2, \ - .vsel_mask = S2MPS11_BUCK_VSEL_MASK, \ - .enable_reg = S2MPS11_REG_B9CTRL1, \ - .enable_mask = S2MPS11_ENABLE_MASK \ -} - -#define regulator_desc_buck10 { \ - .name = "BUCK10", \ - .id = S2MPS11_BUCK10, \ - .ops = &s2mps11_buck_ops, \ - .type = REGULATOR_VOLTAGE, \ - .owner = THIS_MODULE, \ - .min_uV = S2MPS11_BUCK_MIN2, \ - .uV_step = S2MPS11_BUCK_STEP2, \ - .n_voltages = S2MPS11_BUCK_N_VOLTAGES, \ - .vsel_reg = S2MPS11_REG_B9CTRL2, \ - .vsel_mask = S2MPS11_BUCK_VSEL_MASK, \ - .enable_reg = S2MPS11_REG_B9CTRL1, \ - .enable_mask = S2MPS11_ENABLE_MASK \ -} - -static struct regulator_desc regulators[] = { - regulator_desc_ldo2(1), - regulator_desc_ldo1(2), - regulator_desc_ldo1(3), - regulator_desc_ldo1(4), - regulator_desc_ldo1(5), - regulator_desc_ldo2(6), - regulator_desc_ldo1(7), - regulator_desc_ldo1(8), - regulator_desc_ldo1(9), - regulator_desc_ldo1(10), - regulator_desc_ldo2(11), - regulator_desc_ldo1(12), - regulator_desc_ldo1(13), - regulator_desc_ldo1(14), - regulator_desc_ldo1(15), - regulator_desc_ldo1(16), - regulator_desc_ldo1(17), - regulator_desc_ldo1(18), - regulator_desc_ldo1(19), - regulator_desc_ldo1(20), - regulator_desc_ldo1(21), - regulator_desc_ldo2(22), - regulator_desc_ldo2(23), - regulator_desc_ldo1(24), - regulator_desc_ldo1(25), - regulator_desc_ldo1(26), - regulator_desc_ldo2(27), - regulator_desc_ldo1(28), - regulator_desc_ldo1(29), - regulator_desc_ldo1(30), - regulator_desc_ldo1(31), - regulator_desc_ldo1(32), - regulator_desc_ldo1(33), - regulator_desc_ldo1(34), - regulator_desc_ldo1(35), - regulator_desc_ldo1(36), - regulator_desc_ldo1(37), - regulator_desc_ldo1(38), - regulator_desc_buck1_4(1), - regulator_desc_buck1_4(2), - regulator_desc_buck1_4(3), - regulator_desc_buck1_4(4), - regulator_desc_buck5, - regulator_desc_buck6_8(6), - regulator_desc_buck6_8(7), - regulator_desc_buck6_8(8), - regulator_desc_buck9, - regulator_desc_buck10, -}; - -static __devinit int s2mps11_pmic_probe(struct platform_device *pdev) -{ - struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent); - struct sec_platform_data *pdata = dev_get_platdata(iodev->dev); - struct regulator_config config = { }; - struct regulator_dev **rdev; - struct s2mps11_info *s2mps11; - int i, ret, size; - unsigned char ramp_enable, ramp_reg = 0; - - if (!pdata) { - dev_err(pdev->dev.parent, "Platform data not supplied\n"); - return -ENODEV; - } - - s2mps11 = devm_kzalloc(&pdev->dev, sizeof(struct s2mps11_info), - GFP_KERNEL); - if (!s2mps11) - return -ENOMEM; - - size = sizeof(struct regulator_dev *) * S2MPS11_REGULATOR_MAX; - s2mps11->rdev = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); - if (!s2mps11->rdev) { - return -ENOMEM; - } - - rdev = s2mps11->rdev; - platform_set_drvdata(pdev, s2mps11); - - s2mps11->ramp_delay2 = pdata->buck2_ramp_delay; - s2mps11->ramp_delay34 = pdata->buck34_ramp_delay; - s2mps11->ramp_delay5 = pdata->buck5_ramp_delay; - s2mps11->ramp_delay16 = pdata->buck16_ramp_delay; - s2mps11->ramp_delay7810 = pdata->buck7810_ramp_delay; - s2mps11->ramp_delay9 = pdata->buck9_ramp_delay; - - s2mps11->buck6_ramp = pdata->buck6_ramp_enable; - s2mps11->buck2_ramp = pdata->buck2_ramp_enable; - s2mps11->buck3_ramp = pdata->buck3_ramp_enable; - s2mps11->buck4_ramp = pdata->buck4_ramp_enable; - - ramp_enable = (s2mps11->buck2_ramp << 3) | (s2mps11->buck3_ramp << 2) | - (s2mps11->buck4_ramp << 1) | s2mps11->buck6_ramp ; - - if (ramp_enable) { - if (s2mps11->buck2_ramp) - ramp_reg |= get_ramp_delay(s2mps11->ramp_delay2) >> 6; - if (s2mps11->buck3_ramp || s2mps11->buck4_ramp) - ramp_reg |= get_ramp_delay(s2mps11->ramp_delay34) >> 4; - sec_reg_write(iodev, S2MPS11_REG_RAMP, ramp_reg | ramp_enable); - } - - ramp_reg &= 0x00; - ramp_reg |= get_ramp_delay(s2mps11->ramp_delay5) >> 6; - ramp_reg |= get_ramp_delay(s2mps11->ramp_delay16) >> 4; - ramp_reg |= get_ramp_delay(s2mps11->ramp_delay7810) >> 2; - ramp_reg |= get_ramp_delay(s2mps11->ramp_delay9); - sec_reg_write(iodev, S2MPS11_REG_RAMP_BUCK, ramp_reg); - - for (i = 0; i < S2MPS11_REGULATOR_MAX; i++) { - - config.dev = &pdev->dev; - config.regmap = iodev->regmap; - config.init_data = pdata->regulators[i].initdata; - config.driver_data = s2mps11; - - rdev[i] = regulator_register(®ulators[i], &config); - if (IS_ERR(rdev[i])) { - ret = PTR_ERR(rdev[i]); - dev_err(&pdev->dev, "regulator init failed for %d\n", - i); - rdev[i] = NULL; - goto err; - } - } - - return 0; -err: - for (i = 0; i < S2MPS11_REGULATOR_MAX; i++) - if (rdev[i]) - regulator_unregister(rdev[i]); - - return ret; -} - -static int __devexit s2mps11_pmic_remove(struct platform_device *pdev) -{ - struct s2mps11_info *s2mps11 = platform_get_drvdata(pdev); - struct regulator_dev **rdev = s2mps11->rdev; - int i; - - for (i = 0; i < S2MPS11_REGULATOR_MAX; i++) - if (rdev[i]) - regulator_unregister(rdev[i]); - - return 0; -} - -static const struct platform_device_id s2mps11_pmic_id[] = { - { "s2mps11-pmic", 0}, - { }, -}; -MODULE_DEVICE_TABLE(platform, s2mps11_pmic_id); - -static struct platform_driver s2mps11_pmic_driver = { - .driver = { - .name = "s2mps11-pmic", - .owner = THIS_MODULE, - }, - .probe = s2mps11_pmic_probe, - .remove = __devexit_p(s2mps11_pmic_remove), - .id_table = s2mps11_pmic_id, -}; - -static int __init s2mps11_pmic_init(void) -{ - return platform_driver_register(&s2mps11_pmic_driver); -} -subsys_initcall(s2mps11_pmic_init); - -static void __exit s2mps11_pmic_exit(void) -{ - platform_driver_unregister(&s2mps11_pmic_driver); -} -module_exit(s2mps11_pmic_exit); - -/* Module information */ -MODULE_AUTHOR("Sangbeom Kim "); -MODULE_DESCRIPTION("SAMSUNG S2MPS11 Regulator Driver"); -MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/regulator/s5m8767.c b/trunk/drivers/regulator/s5m8767.c index 102287fa7ecb..9caadb482178 100644 --- a/trunk/drivers/regulator/s5m8767.c +++ b/trunk/drivers/regulator/s5m8767.c @@ -41,7 +41,6 @@ struct s5m8767_info { u8 buck3_vol[8]; u8 buck4_vol[8]; int buck_gpios[3]; - int buck_ds[3]; int buck_gpioindex; }; @@ -121,6 +120,27 @@ static const struct s5m_voltage_desc *reg_voltage_map[] = { [S5M8767_BUCK9] = &buck_voltage_val3, }; +static int s5m8767_list_voltage(struct regulator_dev *rdev, + unsigned int selector) +{ + const struct s5m_voltage_desc *desc; + int reg_id = rdev_get_id(rdev); + int val; + + if (reg_id >= ARRAY_SIZE(reg_voltage_map) || reg_id < 0) + return -EINVAL; + + desc = reg_voltage_map[reg_id]; + if (desc == NULL) + return -EINVAL; + + val = desc->min + desc->step * selector; + if (val > desc->max) + return -EINVAL; + + return val; +} + static unsigned int s5m8767_opmode_reg[][4] = { /* {OFF, ON, LOWPOWER, SUSPEND} */ /* LDO1 ... LDO28 */ @@ -263,17 +283,17 @@ static int s5m8767_get_voltage_register(struct regulator_dev *rdev, int *_reg) reg = S5M8767_REG_BUCK1CTRL2; break; case S5M8767_BUCK2: - reg = S5M8767_REG_BUCK2DVS2; + reg = S5M8767_REG_BUCK2DVS1; if (s5m8767->buck2_gpiodvs) reg += s5m8767->buck_gpioindex; break; case S5M8767_BUCK3: - reg = S5M8767_REG_BUCK3DVS2; + reg = S5M8767_REG_BUCK3DVS1; if (s5m8767->buck3_gpiodvs) reg += s5m8767->buck_gpioindex; break; case S5M8767_BUCK4: - reg = S5M8767_REG_BUCK4DVS2; + reg = S5M8767_REG_BUCK4DVS1; if (s5m8767->buck4_gpiodvs) reg += s5m8767->buck_gpioindex; break; @@ -337,34 +357,32 @@ static int s5m8767_convert_voltage_to_sel( return selector; } -static inline int s5m8767_set_high(struct s5m8767_info *s5m8767) +static inline void s5m8767_set_high(struct s5m8767_info *s5m8767) { int temp_index = s5m8767->buck_gpioindex; gpio_set_value(s5m8767->buck_gpios[0], (temp_index >> 2) & 0x1); gpio_set_value(s5m8767->buck_gpios[1], (temp_index >> 1) & 0x1); gpio_set_value(s5m8767->buck_gpios[2], temp_index & 0x1); - - return 0; } -static inline int s5m8767_set_low(struct s5m8767_info *s5m8767) +static inline void s5m8767_set_low(struct s5m8767_info *s5m8767) { int temp_index = s5m8767->buck_gpioindex; gpio_set_value(s5m8767->buck_gpios[2], temp_index & 0x1); gpio_set_value(s5m8767->buck_gpios[1], (temp_index >> 1) & 0x1); gpio_set_value(s5m8767->buck_gpios[0], (temp_index >> 2) & 0x1); - - return 0; } -static int s5m8767_set_voltage_sel(struct regulator_dev *rdev, - unsigned selector) +static int s5m8767_set_voltage(struct regulator_dev *rdev, + int min_uV, int max_uV, unsigned *selector) { struct s5m8767_info *s5m8767 = rdev_get_drvdata(rdev); + const struct s5m_voltage_desc *desc; int reg_id = rdev_get_id(rdev); - int reg, mask, ret = 0, old_index, index = 0; + int sel, reg, mask, ret = 0, old_index, index = 0; + u8 val; u8 *buck234_vol = NULL; switch (reg_id) { @@ -389,9 +407,15 @@ static int s5m8767_set_voltage_sel(struct regulator_dev *rdev, return -EINVAL; } + desc = reg_voltage_map[reg_id]; + + sel = s5m8767_convert_voltage_to_sel(desc, min_uV, max_uV); + if (sel < 0) + return sel; + /* buck234_vol != NULL means to control buck234 voltage via DVS GPIO */ if (buck234_vol) { - while (*buck234_vol != selector) { + while (*buck234_vol != sel) { buck234_vol++; index++; } @@ -399,16 +423,22 @@ static int s5m8767_set_voltage_sel(struct regulator_dev *rdev, s5m8767->buck_gpioindex = index; if (index > old_index) - return s5m8767_set_high(s5m8767); + s5m8767_set_high(s5m8767); else - return s5m8767_set_low(s5m8767); + s5m8767_set_low(s5m8767); } else { ret = s5m8767_get_voltage_register(rdev, ®); if (ret) return ret; - return s5m_reg_update(s5m8767->iodev, reg, selector, mask); + s5m_reg_read(s5m8767->iodev, reg, &val); + val = (val & ~mask) | sel; + + ret = s5m_reg_write(s5m8767->iodev, reg, val); } + + *selector = sel; + return ret; } static int s5m8767_set_voltage_time_sel(struct regulator_dev *rdev, @@ -428,21 +458,15 @@ static int s5m8767_set_voltage_time_sel(struct regulator_dev *rdev, } static struct regulator_ops s5m8767_ops = { - .list_voltage = regulator_list_voltage_linear, + .list_voltage = s5m8767_list_voltage, .is_enabled = s5m8767_reg_is_enabled, .enable = s5m8767_reg_enable, .disable = s5m8767_reg_disable, .get_voltage_sel = s5m8767_get_voltage_sel, - .set_voltage_sel = s5m8767_set_voltage_sel, + .set_voltage = s5m8767_set_voltage, .set_voltage_time_sel = s5m8767_set_voltage_time_sel, }; -static struct regulator_ops s5m8767_buck78_ops = { - .is_enabled = s5m8767_reg_is_enabled, - .enable = s5m8767_reg_enable, - .disable = s5m8767_reg_disable, -}; - #define s5m8767_regulator_desc(_name) { \ .name = #_name, \ .id = S5M8767_##_name, \ @@ -451,14 +475,6 @@ static struct regulator_ops s5m8767_buck78_ops = { .owner = THIS_MODULE, \ } -#define s5m8767_regulator_buck78_desc(_name) { \ - .name = #_name, \ - .id = S5M8767_##_name, \ - .ops = &s5m8767_buck78_ops, \ - .type = REGULATOR_VOLTAGE, \ - .owner = THIS_MODULE, \ -} - static struct regulator_desc regulators[] = { s5m8767_regulator_desc(LDO1), s5m8767_regulator_desc(LDO2), @@ -494,8 +510,8 @@ static struct regulator_desc regulators[] = { s5m8767_regulator_desc(BUCK4), s5m8767_regulator_desc(BUCK5), s5m8767_regulator_desc(BUCK6), - s5m8767_regulator_buck78_desc(BUCK7), - s5m8767_regulator_buck78_desc(BUCK8), + s5m8767_regulator_desc(BUCK7), + s5m8767_regulator_desc(BUCK8), s5m8767_regulator_desc(BUCK9), }; @@ -506,7 +522,7 @@ static __devinit int s5m8767_pmic_probe(struct platform_device *pdev) struct regulator_config config = { }; struct regulator_dev **rdev; struct s5m8767_info *s5m8767; - int i, ret, size, buck_init; + int i, ret, size; if (!pdata) { dev_err(pdev->dev.parent, "Platform data not supplied\n"); @@ -557,37 +573,12 @@ static __devinit int s5m8767_pmic_probe(struct platform_device *pdev) s5m8767->buck_gpios[0] = pdata->buck_gpios[0]; s5m8767->buck_gpios[1] = pdata->buck_gpios[1]; s5m8767->buck_gpios[2] = pdata->buck_gpios[2]; - s5m8767->buck_ds[0] = pdata->buck_ds[0]; - s5m8767->buck_ds[1] = pdata->buck_ds[1]; - s5m8767->buck_ds[2] = pdata->buck_ds[2]; - s5m8767->ramp_delay = pdata->buck_ramp_delay; s5m8767->buck2_ramp = pdata->buck2_ramp_enable; s5m8767->buck3_ramp = pdata->buck3_ramp_enable; s5m8767->buck4_ramp = pdata->buck4_ramp_enable; s5m8767->opmode = pdata->opmode; - buck_init = s5m8767_convert_voltage_to_sel(&buck_voltage_val2, - pdata->buck2_init, - pdata->buck2_init + - buck_voltage_val2.step); - - s5m_reg_write(s5m8767->iodev, S5M8767_REG_BUCK2DVS2, buck_init); - - buck_init = s5m8767_convert_voltage_to_sel(&buck_voltage_val2, - pdata->buck3_init, - pdata->buck3_init + - buck_voltage_val2.step); - - s5m_reg_write(s5m8767->iodev, S5M8767_REG_BUCK3DVS2, buck_init); - - buck_init = s5m8767_convert_voltage_to_sel(&buck_voltage_val2, - pdata->buck4_init, - pdata->buck4_init + - buck_voltage_val2.step); - - s5m_reg_write(s5m8767->iodev, S5M8767_REG_BUCK4DVS2, buck_init); - for (i = 0; i < 8; i++) { if (s5m8767->buck2_gpiodvs) { s5m8767->buck2_vol[i] = @@ -617,70 +608,48 @@ static __devinit int s5m8767_pmic_probe(struct platform_device *pdev) } } - if (gpio_is_valid(pdata->buck_gpios[0]) && - gpio_is_valid(pdata->buck_gpios[1]) && - gpio_is_valid(pdata->buck_gpios[2])) { - ret = devm_gpio_request(&pdev->dev, pdata->buck_gpios[0], - "S5M8767 SET1"); - if (ret) - return ret; - - ret = devm_gpio_request(&pdev->dev, pdata->buck_gpios[1], - "S5M8767 SET2"); - if (ret) - return ret; - - ret = devm_gpio_request(&pdev->dev, pdata->buck_gpios[2], - "S5M8767 SET3"); - if (ret) + if (pdata->buck2_gpiodvs || pdata->buck3_gpiodvs || + pdata->buck4_gpiodvs) { + if (gpio_is_valid(pdata->buck_gpios[0]) && + gpio_is_valid(pdata->buck_gpios[1]) && + gpio_is_valid(pdata->buck_gpios[2])) { + ret = gpio_request(pdata->buck_gpios[0], + "S5M8767 SET1"); + if (ret == -EBUSY) + dev_warn(&pdev->dev, "Duplicated gpio request for SET1\n"); + + ret = gpio_request(pdata->buck_gpios[1], + "S5M8767 SET2"); + if (ret == -EBUSY) + dev_warn(&pdev->dev, "Duplicated gpio request for SET2\n"); + + ret = gpio_request(pdata->buck_gpios[2], + "S5M8767 SET3"); + if (ret == -EBUSY) + dev_warn(&pdev->dev, "Duplicated gpio request for SET3\n"); + /* SET1 GPIO */ + gpio_direction_output(pdata->buck_gpios[0], + (s5m8767->buck_gpioindex >> 2) & 0x1); + /* SET2 GPIO */ + gpio_direction_output(pdata->buck_gpios[1], + (s5m8767->buck_gpioindex >> 1) & 0x1); + /* SET3 GPIO */ + gpio_direction_output(pdata->buck_gpios[2], + (s5m8767->buck_gpioindex >> 0) & 0x1); + ret = 0; + } else { + dev_err(&pdev->dev, "GPIO NOT VALID\n"); + ret = -EINVAL; return ret; - - /* SET1 GPIO */ - gpio_direction_output(pdata->buck_gpios[0], - (s5m8767->buck_gpioindex >> 2) & 0x1); - /* SET2 GPIO */ - gpio_direction_output(pdata->buck_gpios[1], - (s5m8767->buck_gpioindex >> 1) & 0x1); - /* SET3 GPIO */ - gpio_direction_output(pdata->buck_gpios[2], - (s5m8767->buck_gpioindex >> 0) & 0x1); - } else { - dev_err(&pdev->dev, "GPIO NOT VALID\n"); - ret = -EINVAL; - return ret; + } } - ret = devm_gpio_request(&pdev->dev, pdata->buck_ds[0], "S5M8767 DS2"); - if (ret) - return ret; - - ret = devm_gpio_request(&pdev->dev, pdata->buck_ds[1], "S5M8767 DS3"); - if (ret) - return ret; - - ret = devm_gpio_request(&pdev->dev, pdata->buck_ds[2], "S5M8767 DS4"); - if (ret) - return ret; - - /* DS2 GPIO */ - gpio_direction_output(pdata->buck_ds[0], 0x0); - /* DS3 GPIO */ - gpio_direction_output(pdata->buck_ds[1], 0x0); - /* DS4 GPIO */ - gpio_direction_output(pdata->buck_ds[2], 0x0); - - if (pdata->buck2_gpiodvs || pdata->buck3_gpiodvs || - pdata->buck4_gpiodvs) { - s5m_reg_update(s5m8767->iodev, S5M8767_REG_BUCK2CTRL, - (pdata->buck2_gpiodvs) ? (1 << 1) : (0 << 1), - 1 << 1); - s5m_reg_update(s5m8767->iodev, S5M8767_REG_BUCK3CTRL, - (pdata->buck3_gpiodvs) ? (1 << 1) : (0 << 1), - 1 << 1); - s5m_reg_update(s5m8767->iodev, S5M8767_REG_BUCK4CTRL, - (pdata->buck4_gpiodvs) ? (1 << 1) : (0 << 1), - 1 << 1); - } + s5m_reg_update(s5m8767->iodev, S5M8767_REG_BUCK2CTRL, + (pdata->buck2_gpiodvs) ? (1 << 1) : (0 << 1), 1 << 1); + s5m_reg_update(s5m8767->iodev, S5M8767_REG_BUCK3CTRL, + (pdata->buck3_gpiodvs) ? (1 << 1) : (0 << 1), 1 << 1); + s5m_reg_update(s5m8767->iodev, S5M8767_REG_BUCK4CTRL, + (pdata->buck4_gpiodvs) ? (1 << 1) : (0 << 1), 1 << 1); /* Initialize GPIO DVS registers */ for (i = 0; i < 8; i++) { @@ -699,6 +668,9 @@ static __devinit int s5m8767_pmic_probe(struct platform_device *pdev) s5m8767->buck4_vol[i]); } } + s5m_reg_update(s5m8767->iodev, S5M8767_REG_BUCK2CTRL, 0x78, 0xff); + s5m_reg_update(s5m8767->iodev, S5M8767_REG_BUCK3CTRL, 0x58, 0xff); + s5m_reg_update(s5m8767->iodev, S5M8767_REG_BUCK4CTRL, 0x78, 0xff); if (s5m8767->buck2_ramp) s5m_reg_update(s5m8767->iodev, S5M8767_REG_DVSRAMP, 0x08, 0x08); @@ -712,13 +684,9 @@ static __devinit int s5m8767_pmic_probe(struct platform_device *pdev) if (s5m8767->buck2_ramp || s5m8767->buck3_ramp || s5m8767->buck4_ramp) { switch (s5m8767->ramp_delay) { - case 5: + case 15: s5m_reg_update(s5m8767->iodev, S5M8767_REG_DVSRAMP, - 0x40, 0xf0); - break; - case 10: - s5m_reg_update(s5m8767->iodev, S5M8767_REG_DVSRAMP, - 0x90, 0xf0); + 0xc0, 0xf0); break; case 25: s5m_reg_update(s5m8767->iodev, S5M8767_REG_DVSRAMP, @@ -743,12 +711,9 @@ static __devinit int s5m8767_pmic_probe(struct platform_device *pdev) int id = pdata->regulators[i].id; desc = reg_voltage_map[id]; - if (desc) { + if (desc) regulators[id].n_voltages = (desc->max - desc->min) / desc->step + 1; - regulators[id].min_uV = desc->min; - regulators[id].uV_step = desc->step; - } config.dev = s5m8767->dev; config.init_data = pdata->regulators[i].initdata; diff --git a/trunk/drivers/regulator/tps6105x-regulator.c b/trunk/drivers/regulator/tps6105x-regulator.c index 1378409efaec..d840d8440a91 100644 --- a/trunk/drivers/regulator/tps6105x-regulator.c +++ b/trunk/drivers/regulator/tps6105x-regulator.c @@ -20,7 +20,7 @@ #include #include -static const unsigned int tps6105x_voltages[] = { +static const int tps6105x_voltages[] = { 4500000, 5000000, 5250000, @@ -105,13 +105,22 @@ static int tps6105x_regulator_set_voltage_sel(struct regulator_dev *rdev, return 0; } +static int tps6105x_regulator_list_voltage(struct regulator_dev *rdev, + unsigned selector) +{ + if (selector >= ARRAY_SIZE(tps6105x_voltages)) + return -EINVAL; + + return tps6105x_voltages[selector]; +} + static struct regulator_ops tps6105x_regulator_ops = { .enable = tps6105x_regulator_enable, .disable = tps6105x_regulator_disable, .is_enabled = tps6105x_regulator_is_enabled, .get_voltage_sel = tps6105x_regulator_get_voltage_sel, .set_voltage_sel = tps6105x_regulator_set_voltage_sel, - .list_voltage = regulator_list_voltage_table, + .list_voltage = tps6105x_regulator_list_voltage, }; static const struct regulator_desc tps6105x_regulator_desc = { @@ -121,7 +130,6 @@ static const struct regulator_desc tps6105x_regulator_desc = { .id = 0, .owner = THIS_MODULE, .n_voltages = ARRAY_SIZE(tps6105x_voltages), - .volt_table = tps6105x_voltages, }; /* diff --git a/trunk/drivers/regulator/tps62360-regulator.c b/trunk/drivers/regulator/tps62360-regulator.c index 68729a7c8709..e534269ed44a 100644 --- a/trunk/drivers/regulator/tps62360-regulator.c +++ b/trunk/drivers/regulator/tps62360-regulator.c @@ -65,8 +65,10 @@ struct tps62360_chip { struct regulator_desc desc; struct regulator_dev *rdev; struct regmap *regmap; + int chip_id; int vsel0_gpio; int vsel1_gpio; + int voltage_base; u8 voltage_reg_mask; bool en_internal_pulldn; bool en_discharge; @@ -74,6 +76,7 @@ struct tps62360_chip { int lru_index[4]; int curr_vset_vsel[4]; int curr_vset_id; + int change_uv_per_us; }; /* @@ -172,6 +175,23 @@ static int tps62360_dcdc_set_voltage_sel(struct regulator_dev *dev, return 0; } +static int tps62360_set_voltage_time_sel(struct regulator_dev *rdev, + unsigned int old_selector, unsigned int new_selector) +{ + struct tps62360_chip *tps = rdev_get_drvdata(rdev); + int old_uV, new_uV; + + old_uV = regulator_list_voltage_linear(rdev, old_selector); + if (old_uV < 0) + return old_uV; + + new_uV = regulator_list_voltage_linear(rdev, new_selector); + if (new_uV < 0) + return new_uV; + + return DIV_ROUND_UP(abs(old_uV - new_uV), tps->change_uv_per_us); +} + static int tps62360_set_mode(struct regulator_dev *rdev, unsigned int mode) { struct tps62360_chip *tps = rdev_get_drvdata(rdev); @@ -238,7 +258,7 @@ static struct regulator_ops tps62360_dcdc_ops = { .set_voltage_sel = tps62360_dcdc_set_voltage_sel, .list_voltage = regulator_list_voltage_linear, .map_voltage = regulator_map_voltage_linear, - .set_voltage_time_sel = regulator_set_voltage_time_sel, + .set_voltage_time_sel = tps62360_set_voltage_time_sel, .set_mode = tps62360_set_mode, .get_mode = tps62360_get_mode, }; @@ -281,7 +301,7 @@ static int __devinit tps62360_init_dcdc(struct tps62360_chip *tps, ramp_ctrl = (ramp_ctrl >> 4) & 0x7; /* ramp mV/us = 32/(2^ramp_ctrl) */ - tps->desc.ramp_delay = DIV_ROUND_UP(32000, BIT(ramp_ctrl)); + tps->change_uv_per_us = DIV_ROUND_UP(32000, BIT(ramp_ctrl)); return ret; } @@ -388,13 +408,13 @@ static int __devinit tps62360_probe(struct i2c_client *client, switch (chip_id) { case TPS62360: case TPS62362: - tps->desc.min_uV = TPS62360_BASE_VOLTAGE; + tps->voltage_base = TPS62360_BASE_VOLTAGE; tps->voltage_reg_mask = 0x3F; tps->desc.n_voltages = TPS62360_N_VOLTAGES; break; case TPS62361: case TPS62363: - tps->desc.min_uV = TPS62361_BASE_VOLTAGE; + tps->voltage_base = TPS62361_BASE_VOLTAGE; tps->voltage_reg_mask = 0x7F; tps->desc.n_voltages = TPS62361_N_VOLTAGES; break; @@ -407,6 +427,7 @@ static int __devinit tps62360_probe(struct i2c_client *client, tps->desc.ops = &tps62360_dcdc_ops; tps->desc.type = REGULATOR_VOLTAGE; tps->desc.owner = THIS_MODULE; + tps->desc.min_uV = tps->voltage_base; tps->desc.uV_step = 10000; tps->regmap = devm_regmap_init_i2c(client, &tps62360_regmap_config); @@ -428,24 +449,24 @@ static int __devinit tps62360_probe(struct i2c_client *client, int gpio_flags; gpio_flags = (pdata->vsel0_def_state) ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW; - ret = devm_gpio_request_one(&client->dev, tps->vsel0_gpio, + ret = gpio_request_one(tps->vsel0_gpio, gpio_flags, "tps62360-vsel0"); if (ret) { dev_err(&client->dev, "%s(): Could not obtain vsel0 GPIO %d: %d\n", __func__, tps->vsel0_gpio, ret); - return ret; + goto err_gpio0; } gpio_flags = (pdata->vsel1_def_state) ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW; - ret = devm_gpio_request_one(&client->dev, tps->vsel1_gpio, + ret = gpio_request_one(tps->vsel1_gpio, gpio_flags, "tps62360-vsel1"); if (ret) { dev_err(&client->dev, "%s(): Could not obtain vsel1 GPIO %d: %d\n", __func__, tps->vsel1_gpio, ret); - return ret; + goto err_gpio1; } tps->valid_gpios = true; @@ -463,7 +484,7 @@ static int __devinit tps62360_probe(struct i2c_client *client, if (ret < 0) { dev_err(tps->dev, "%s(): Init failed with err = %d\n", __func__, ret); - return ret; + goto err_init; } config.dev = &client->dev; @@ -477,11 +498,21 @@ static int __devinit tps62360_probe(struct i2c_client *client, dev_err(tps->dev, "%s(): regulator register failed with err %s\n", __func__, id->name); - return PTR_ERR(rdev); + ret = PTR_ERR(rdev); + goto err_init; } tps->rdev = rdev; return 0; + +err_init: + if (gpio_is_valid(tps->vsel1_gpio)) + gpio_free(tps->vsel1_gpio); +err_gpio1: + if (gpio_is_valid(tps->vsel0_gpio)) + gpio_free(tps->vsel0_gpio); +err_gpio0: + return ret; } /** @@ -494,6 +525,12 @@ static int __devexit tps62360_remove(struct i2c_client *client) { struct tps62360_chip *tps = i2c_get_clientdata(client); + if (gpio_is_valid(tps->vsel1_gpio)) + gpio_free(tps->vsel1_gpio); + + if (gpio_is_valid(tps->vsel0_gpio)) + gpio_free(tps->vsel0_gpio); + regulator_unregister(tps->rdev); return 0; } diff --git a/trunk/drivers/regulator/tps65023-regulator.c b/trunk/drivers/regulator/tps65023-regulator.c index 6998d579d07b..8f1be8586c72 100644 --- a/trunk/drivers/regulator/tps65023-regulator.c +++ b/trunk/drivers/regulator/tps65023-regulator.c @@ -69,6 +69,10 @@ #define TPS65023_REG_CTRL2_DCDC1 BIT(1) #define TPS65023_REG_CTRL2_DCDC3 BIT(0) +/* LDO_CTRL bitfields */ +#define TPS65023_LDO_CTRL_LDOx_SHIFT(ldo_id) ((ldo_id)*4) +#define TPS65023_LDO_CTRL_LDOx_MASK(ldo_id) (0x07 << ((ldo_id)*4)) + /* Number of step-down converters available */ #define TPS65023_NUM_DCDC 3 /* Number of LDO voltage regulators available */ @@ -87,53 +91,48 @@ #define TPS65023_MAX_REG_ID TPS65023_LDO_2 /* Supported voltage values for regulators */ -static const unsigned int VCORE_VSEL_table[] = { - 800000, 825000, 850000, 875000, - 900000, 925000, 950000, 975000, - 1000000, 1025000, 1050000, 1075000, - 1100000, 1125000, 1150000, 1175000, - 1200000, 1225000, 1250000, 1275000, - 1300000, 1325000, 1350000, 1375000, - 1400000, 1425000, 1450000, 1475000, - 1500000, 1525000, 1550000, 1600000, -}; - -static const unsigned int DCDC_FIXED_3300000_VSEL_table[] = { - 3300000, -}; - -static const unsigned int DCDC_FIXED_1800000_VSEL_table[] = { - 1800000, +static const u16 VCORE_VSEL_table[] = { + 800, 825, 850, 875, + 900, 925, 950, 975, + 1000, 1025, 1050, 1075, + 1100, 1125, 1150, 1175, + 1200, 1225, 1250, 1275, + 1300, 1325, 1350, 1375, + 1400, 1425, 1450, 1475, + 1500, 1525, 1550, 1600, }; /* Supported voltage values for LDO regulators for tps65020 */ -static const unsigned int TPS65020_LDO1_VSEL_table[] = { - 1000000, 1050000, 1100000, 1300000, - 1800000, 2500000, 3000000, 3300000, +static const u16 TPS65020_LDO1_VSEL_table[] = { + 1000, 1050, 1100, 1300, + 1800, 2500, 3000, 3300, }; -static const unsigned int TPS65020_LDO2_VSEL_table[] = { - 1000000, 1050000, 1100000, 1300000, - 1800000, 2500000, 3000000, 3300000, +static const u16 TPS65020_LDO2_VSEL_table[] = { + 1000, 1050, 1100, 1300, + 1800, 2500, 3000, 3300, }; /* Supported voltage values for LDO regulators * for tps65021 and tps65023 */ -static const unsigned int TPS65023_LDO1_VSEL_table[] = { - 1000000, 1100000, 1300000, 1800000, - 2200000, 2600000, 2800000, 3150000, +static const u16 TPS65023_LDO1_VSEL_table[] = { + 1000, 1100, 1300, 1800, + 2200, 2600, 2800, 3150, }; -static const unsigned int TPS65023_LDO2_VSEL_table[] = { - 1050000, 1200000, 1300000, 1800000, - 2500000, 2800000, 3000000, 3300000, +static const u16 TPS65023_LDO2_VSEL_table[] = { + 1050, 1200, 1300, 1800, + 2500, 2800, 3000, 3300, }; /* Regulator specific details */ struct tps_info { const char *name; + unsigned min_uV; + unsigned max_uV; + bool fixed; u8 table_len; - const unsigned int *table; + const u16 *table; }; /* PMIC details */ @@ -151,7 +150,7 @@ struct tps_driver_data { u8 core_regulator; }; -static int tps65023_dcdc_get_voltage_sel(struct regulator_dev *dev) +static int tps65023_dcdc_get_voltage(struct regulator_dev *dev) { struct tps_pmic *tps = rdev_get_drvdata(dev); int ret; @@ -165,9 +164,9 @@ static int tps65023_dcdc_get_voltage_sel(struct regulator_dev *dev) if (ret != 0) return ret; data &= (tps->info[dcdc]->table_len - 1); - return data; + return tps->info[dcdc]->table[data] * 1000; } else - return 0; + return tps->info[dcdc]->min_uV; } static int tps65023_dcdc_set_voltage_sel(struct regulator_dev *dev, @@ -194,14 +193,76 @@ static int tps65023_dcdc_set_voltage_sel(struct regulator_dev *dev, return ret; } +static int tps65023_ldo_get_voltage(struct regulator_dev *dev) +{ + struct tps_pmic *tps = rdev_get_drvdata(dev); + int data, ldo = rdev_get_id(dev); + int ret; + + if (ldo < TPS65023_LDO_1 || ldo > TPS65023_LDO_2) + return -EINVAL; + + ret = regmap_read(tps->regmap, TPS65023_REG_LDO_CTRL, &data); + if (ret != 0) + return ret; + + data >>= (TPS65023_LDO_CTRL_LDOx_SHIFT(ldo - TPS65023_LDO_1)); + data &= (tps->info[ldo]->table_len - 1); + return tps->info[ldo]->table[data] * 1000; +} + +static int tps65023_ldo_set_voltage_sel(struct regulator_dev *dev, + unsigned selector) +{ + struct tps_pmic *tps = rdev_get_drvdata(dev); + int ldo_index = rdev_get_id(dev) - TPS65023_LDO_1; + + return regmap_update_bits(tps->regmap, TPS65023_REG_LDO_CTRL, + TPS65023_LDO_CTRL_LDOx_MASK(ldo_index), + selector << TPS65023_LDO_CTRL_LDOx_SHIFT(ldo_index)); +} + +static int tps65023_dcdc_list_voltage(struct regulator_dev *dev, + unsigned selector) +{ + struct tps_pmic *tps = rdev_get_drvdata(dev); + int dcdc = rdev_get_id(dev); + + if (dcdc < TPS65023_DCDC_1 || dcdc > TPS65023_DCDC_3) + return -EINVAL; + + if (dcdc == tps->core_regulator) { + if (selector >= tps->info[dcdc]->table_len) + return -EINVAL; + else + return tps->info[dcdc]->table[selector] * 1000; + } else + return tps->info[dcdc]->min_uV; +} + +static int tps65023_ldo_list_voltage(struct regulator_dev *dev, + unsigned selector) +{ + struct tps_pmic *tps = rdev_get_drvdata(dev); + int ldo = rdev_get_id(dev); + + if (ldo < TPS65023_LDO_1 || ldo > TPS65023_LDO_2) + return -EINVAL; + + if (selector >= tps->info[ldo]->table_len) + return -EINVAL; + else + return tps->info[ldo]->table[selector] * 1000; +} + /* Operations permitted on VDCDCx */ static struct regulator_ops tps65023_dcdc_ops = { .is_enabled = regulator_is_enabled_regmap, .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, - .get_voltage_sel = tps65023_dcdc_get_voltage_sel, + .get_voltage = tps65023_dcdc_get_voltage, .set_voltage_sel = tps65023_dcdc_set_voltage_sel, - .list_voltage = regulator_list_voltage_table, + .list_voltage = tps65023_dcdc_list_voltage, }; /* Operations permitted on LDOx */ @@ -209,9 +270,9 @@ static struct regulator_ops tps65023_ldo_ops = { .is_enabled = regulator_is_enabled_regmap, .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, - .get_voltage_sel = regulator_get_voltage_sel_regmap, - .set_voltage_sel = regulator_set_voltage_sel_regmap, - .list_voltage = regulator_list_voltage_table, + .get_voltage = tps65023_ldo_get_voltage, + .set_voltage_sel = tps65023_ldo_set_voltage_sel, + .list_voltage = tps65023_ldo_list_voltage, }; static struct regmap_config tps65023_regmap_config = { @@ -264,28 +325,19 @@ static int __devinit tps_65023_probe(struct i2c_client *client, tps->desc[i].name = info->name; tps->desc[i].id = i; tps->desc[i].n_voltages = info->table_len; - tps->desc[i].volt_table = info->table; tps->desc[i].ops = (i > TPS65023_DCDC_3 ? &tps65023_ldo_ops : &tps65023_dcdc_ops); tps->desc[i].type = REGULATOR_VOLTAGE; tps->desc[i].owner = THIS_MODULE; tps->desc[i].enable_reg = TPS65023_REG_REG_CTRL; - switch (i) { - case TPS65023_LDO_1: - tps->desc[i].vsel_reg = TPS65023_REG_LDO_CTRL; - tps->desc[i].vsel_mask = 0x07; + if (i == TPS65023_LDO_1) tps->desc[i].enable_mask = 1 << 1; - break; - case TPS65023_LDO_2: - tps->desc[i].vsel_reg = TPS65023_REG_LDO_CTRL; - tps->desc[i].vsel_mask = 0x70; + else if (i == TPS65023_LDO_2) tps->desc[i].enable_mask = 1 << 2; - break; - default: /* DCDCx */ + else /* DCDCx */ tps->desc[i].enable_mask = 1 << (TPS65023_NUM_REGULATOR - i); - } config.dev = &client->dev; config.init_data = init_data; @@ -332,26 +384,35 @@ static int __devexit tps_65023_remove(struct i2c_client *client) static const struct tps_info tps65020_regs[] = { { .name = "VDCDC1", - .table_len = ARRAY_SIZE(DCDC_FIXED_3300000_VSEL_table), - .table = DCDC_FIXED_3300000_VSEL_table, + .min_uV = 3300000, + .max_uV = 3300000, + .fixed = 1, }, { .name = "VDCDC2", - .table_len = ARRAY_SIZE(DCDC_FIXED_1800000_VSEL_table), - .table = DCDC_FIXED_1800000_VSEL_table, + .min_uV = 1800000, + .max_uV = 1800000, + .fixed = 1, }, { .name = "VDCDC3", + .min_uV = 800000, + .max_uV = 1600000, .table_len = ARRAY_SIZE(VCORE_VSEL_table), .table = VCORE_VSEL_table, }, + { .name = "LDO1", + .min_uV = 1000000, + .max_uV = 3150000, .table_len = ARRAY_SIZE(TPS65020_LDO1_VSEL_table), .table = TPS65020_LDO1_VSEL_table, }, { .name = "LDO2", + .min_uV = 1050000, + .max_uV = 3300000, .table_len = ARRAY_SIZE(TPS65020_LDO2_VSEL_table), .table = TPS65020_LDO2_VSEL_table, }, @@ -360,26 +421,34 @@ static const struct tps_info tps65020_regs[] = { static const struct tps_info tps65021_regs[] = { { .name = "VDCDC1", - .table_len = ARRAY_SIZE(DCDC_FIXED_3300000_VSEL_table), - .table = DCDC_FIXED_3300000_VSEL_table, + .min_uV = 3300000, + .max_uV = 3300000, + .fixed = 1, }, { .name = "VDCDC2", - .table_len = ARRAY_SIZE(DCDC_FIXED_1800000_VSEL_table), - .table = DCDC_FIXED_1800000_VSEL_table, + .min_uV = 1800000, + .max_uV = 1800000, + .fixed = 1, }, { .name = "VDCDC3", + .min_uV = 800000, + .max_uV = 1600000, .table_len = ARRAY_SIZE(VCORE_VSEL_table), .table = VCORE_VSEL_table, }, { .name = "LDO1", + .min_uV = 1000000, + .max_uV = 3150000, .table_len = ARRAY_SIZE(TPS65023_LDO1_VSEL_table), .table = TPS65023_LDO1_VSEL_table, }, { .name = "LDO2", + .min_uV = 1050000, + .max_uV = 3300000, .table_len = ARRAY_SIZE(TPS65023_LDO2_VSEL_table), .table = TPS65023_LDO2_VSEL_table, }, @@ -388,26 +457,34 @@ static const struct tps_info tps65021_regs[] = { static const struct tps_info tps65023_regs[] = { { .name = "VDCDC1", + .min_uV = 800000, + .max_uV = 1600000, .table_len = ARRAY_SIZE(VCORE_VSEL_table), .table = VCORE_VSEL_table, }, { .name = "VDCDC2", - .table_len = ARRAY_SIZE(DCDC_FIXED_3300000_VSEL_table), - .table = DCDC_FIXED_3300000_VSEL_table, + .min_uV = 3300000, + .max_uV = 3300000, + .fixed = 1, }, { .name = "VDCDC3", - .table_len = ARRAY_SIZE(DCDC_FIXED_1800000_VSEL_table), - .table = DCDC_FIXED_1800000_VSEL_table, + .min_uV = 1800000, + .max_uV = 1800000, + .fixed = 1, }, { .name = "LDO1", + .min_uV = 1000000, + .max_uV = 3150000, .table_len = ARRAY_SIZE(TPS65023_LDO1_VSEL_table), .table = TPS65023_LDO1_VSEL_table, }, { .name = "LDO2", + .min_uV = 1050000, + .max_uV = 3300000, .table_len = ARRAY_SIZE(TPS65023_LDO2_VSEL_table), .table = TPS65023_LDO2_VSEL_table, }, diff --git a/trunk/drivers/regulator/tps6507x-regulator.c b/trunk/drivers/regulator/tps6507x-regulator.c index 07d01ccdf308..da38be1016aa 100644 --- a/trunk/drivers/regulator/tps6507x-regulator.c +++ b/trunk/drivers/regulator/tps6507x-regulator.c @@ -43,40 +43,58 @@ /* Number of total regulators available */ #define TPS6507X_NUM_REGULATOR (TPS6507X_NUM_DCDC + TPS6507X_NUM_LDO) -/* Supported voltage values for regulators (in microVolts) */ -static const unsigned int VDCDCx_VSEL_table[] = { - 725000, 750000, 775000, 800000, - 825000, 850000, 875000, 900000, - 925000, 950000, 975000, 1000000, - 1025000, 1050000, 1075000, 1100000, - 1125000, 1150000, 1175000, 1200000, - 1225000, 1250000, 1275000, 1300000, - 1325000, 1350000, 1375000, 1400000, - 1425000, 1450000, 1475000, 1500000, - 1550000, 1600000, 1650000, 1700000, - 1750000, 1800000, 1850000, 1900000, - 1950000, 2000000, 2050000, 2100000, - 2150000, 2200000, 2250000, 2300000, - 2350000, 2400000, 2450000, 2500000, - 2550000, 2600000, 2650000, 2700000, - 2750000, 2800000, 2850000, 2900000, - 3000000, 3100000, 3200000, 3300000, +/* Supported voltage values for regulators (in milliVolts) */ +static const u16 VDCDCx_VSEL_table[] = { + 725, 750, 775, 800, + 825, 850, 875, 900, + 925, 950, 975, 1000, + 1025, 1050, 1075, 1100, + 1125, 1150, 1175, 1200, + 1225, 1250, 1275, 1300, + 1325, 1350, 1375, 1400, + 1425, 1450, 1475, 1500, + 1550, 1600, 1650, 1700, + 1750, 1800, 1850, 1900, + 1950, 2000, 2050, 2100, + 2150, 2200, 2250, 2300, + 2350, 2400, 2450, 2500, + 2550, 2600, 2650, 2700, + 2750, 2800, 2850, 2900, + 3000, 3100, 3200, 3300, }; -static const unsigned int LDO1_VSEL_table[] = { - 1000000, 1100000, 1200000, 1250000, - 1300000, 1350000, 1400000, 1500000, - 1600000, 1800000, 2500000, 2750000, - 2800000, 3000000, 3100000, 3300000, +static const u16 LDO1_VSEL_table[] = { + 1000, 1100, 1200, 1250, + 1300, 1350, 1400, 1500, + 1600, 1800, 2500, 2750, + 2800, 3000, 3100, 3300, }; -/* The voltage mapping table for LDO2 is the same as VDCDCx */ -#define LDO2_VSEL_table VDCDCx_VSEL_table +static const u16 LDO2_VSEL_table[] = { + 725, 750, 775, 800, + 825, 850, 875, 900, + 925, 950, 975, 1000, + 1025, 1050, 1075, 1100, + 1125, 1150, 1175, 1200, + 1225, 1250, 1275, 1300, + 1325, 1350, 1375, 1400, + 1425, 1450, 1475, 1500, + 1550, 1600, 1650, 1700, + 1750, 1800, 1850, 1900, + 1950, 2000, 2050, 2100, + 2150, 2200, 2250, 2300, + 2350, 2400, 2450, 2500, + 2550, 2600, 2650, 2700, + 2750, 2800, 2850, 2900, + 3000, 3100, 3200, 3300, +}; struct tps_info { const char *name; + unsigned min_uV; + unsigned max_uV; u8 table_len; - const unsigned int *table; + const u16 *table; /* Does DCDC high or the low register defines output voltage? */ bool defdcdc_default; @@ -85,26 +103,36 @@ struct tps_info { static struct tps_info tps6507x_pmic_regs[] = { { .name = "VDCDC1", + .min_uV = 725000, + .max_uV = 3300000, .table_len = ARRAY_SIZE(VDCDCx_VSEL_table), .table = VDCDCx_VSEL_table, }, { .name = "VDCDC2", + .min_uV = 725000, + .max_uV = 3300000, .table_len = ARRAY_SIZE(VDCDCx_VSEL_table), .table = VDCDCx_VSEL_table, }, { .name = "VDCDC3", + .min_uV = 725000, + .max_uV = 3300000, .table_len = ARRAY_SIZE(VDCDCx_VSEL_table), .table = VDCDCx_VSEL_table, }, { .name = "LDO1", + .min_uV = 1000000, + .max_uV = 3300000, .table_len = ARRAY_SIZE(LDO1_VSEL_table), .table = LDO1_VSEL_table, }, { .name = "LDO2", + .min_uV = 725000, + .max_uV = 3300000, .table_len = ARRAY_SIZE(LDO2_VSEL_table), .table = LDO2_VSEL_table, }, @@ -347,13 +375,28 @@ static int tps6507x_pmic_set_voltage_sel(struct regulator_dev *dev, return tps6507x_pmic_reg_write(tps, reg, data); } +static int tps6507x_pmic_list_voltage(struct regulator_dev *dev, + unsigned selector) +{ + struct tps6507x_pmic *tps = rdev_get_drvdata(dev); + int rid = rdev_get_id(dev); + + if (rid < TPS6507X_DCDC_1 || rid > TPS6507X_LDO_2) + return -EINVAL; + + if (selector >= tps->info[rid]->table_len) + return -EINVAL; + else + return tps->info[rid]->table[selector] * 1000; +} + static struct regulator_ops tps6507x_pmic_ops = { .is_enabled = tps6507x_pmic_is_enabled, .enable = tps6507x_pmic_enable, .disable = tps6507x_pmic_disable, .get_voltage_sel = tps6507x_pmic_get_voltage_sel, .set_voltage_sel = tps6507x_pmic_set_voltage_sel, - .list_voltage = regulator_list_voltage_table, + .list_voltage = tps6507x_pmic_list_voltage, }; static __devinit int tps6507x_pmic_probe(struct platform_device *pdev) @@ -406,7 +449,6 @@ static __devinit int tps6507x_pmic_probe(struct platform_device *pdev) tps->desc[i].name = info->name; tps->desc[i].id = i; tps->desc[i].n_voltages = info->table_len; - tps->desc[i].volt_table = info->table; tps->desc[i].ops = &tps6507x_pmic_ops; tps->desc[i].type = REGULATOR_VOLTAGE; tps->desc[i].owner = THIS_MODULE; diff --git a/trunk/drivers/regulator/tps65217-regulator.c b/trunk/drivers/regulator/tps65217-regulator.c index 6caa222af77a..9d371d2cbcae 100644 --- a/trunk/drivers/regulator/tps65217-regulator.c +++ b/trunk/drivers/regulator/tps65217-regulator.c @@ -26,7 +26,7 @@ #include #include -#define TPS65217_REGULATOR(_name, _id, _ops, _n, _vr, _vm, _em, _t) \ +#define TPS65217_REGULATOR(_name, _id, _ops, _n) \ { \ .name = _name, \ .id = _id, \ @@ -34,23 +34,23 @@ .n_voltages = _n, \ .type = REGULATOR_VOLTAGE, \ .owner = THIS_MODULE, \ - .vsel_reg = _vr, \ - .vsel_mask = _vm, \ - .enable_reg = TPS65217_REG_ENABLE, \ - .enable_mask = _em, \ - .volt_table = _t, \ } \ -#define TPS65217_INFO(_nm, _min, _max, _f1, _f2) \ +#define TPS65217_INFO(_nm, _min, _max, _f1, _f2, _t, _n, _em, _vr, _vm) \ { \ .name = _nm, \ .min_uV = _min, \ .max_uV = _max, \ .vsel_to_uv = _f1, \ .uv_to_vsel = _f2, \ + .table = _t, \ + .table_len = _n, \ + .enable_mask = _em, \ + .set_vout_reg = _vr, \ + .set_vout_mask = _vm, \ } -static const unsigned int LDO1_VSEL_table[] = { +static const int LDO1_VSEL_table[] = { 1000000, 1100000, 1200000, 1250000, 1300000, 1350000, 1400000, 1500000, 1600000, 1800000, 2500000, 2750000, @@ -78,7 +78,7 @@ static int tps65217_vsel_to_uv1(unsigned int vsel) static int tps65217_uv_to_vsel1(int uV, unsigned int *vsel) { - if (uV < 0 || uV > 3300000) + if ((uV < 0) && (uV > 3300000)) return -EINVAL; if (uV <= 1500000) @@ -112,7 +112,7 @@ static int tps65217_vsel_to_uv2(unsigned int vsel) static int tps65217_uv_to_vsel2(int uV, unsigned int *vsel) { - if (uV < 0 || uV > 3300000) + if ((uV < 0) && (uV > 3300000)) return -EINVAL; if (uV <= 1900000) @@ -127,20 +127,46 @@ static int tps65217_uv_to_vsel2(int uV, unsigned int *vsel) static struct tps_info tps65217_pmic_regs[] = { TPS65217_INFO("DCDC1", 900000, 1800000, tps65217_vsel_to_uv1, - tps65217_uv_to_vsel1), + tps65217_uv_to_vsel1, NULL, 64, TPS65217_ENABLE_DC1_EN, + TPS65217_REG_DEFDCDC1, TPS65217_DEFDCDCX_DCDC_MASK), TPS65217_INFO("DCDC2", 900000, 3300000, tps65217_vsel_to_uv1, - tps65217_uv_to_vsel1), + tps65217_uv_to_vsel1, NULL, 64, TPS65217_ENABLE_DC2_EN, + TPS65217_REG_DEFDCDC2, TPS65217_DEFDCDCX_DCDC_MASK), TPS65217_INFO("DCDC3", 900000, 1500000, tps65217_vsel_to_uv1, - tps65217_uv_to_vsel1), - TPS65217_INFO("LDO1", 1000000, 3300000, NULL, NULL), + tps65217_uv_to_vsel1, NULL, 64, TPS65217_ENABLE_DC3_EN, + TPS65217_REG_DEFDCDC3, TPS65217_DEFDCDCX_DCDC_MASK), + TPS65217_INFO("LDO1", 1000000, 3300000, NULL, NULL, LDO1_VSEL_table, + 16, TPS65217_ENABLE_LDO1_EN, TPS65217_REG_DEFLDO1, + TPS65217_DEFLDO1_LDO1_MASK), TPS65217_INFO("LDO2", 900000, 3300000, tps65217_vsel_to_uv1, - tps65217_uv_to_vsel1), + tps65217_uv_to_vsel1, NULL, 64, TPS65217_ENABLE_LDO2_EN, + TPS65217_REG_DEFLDO2, TPS65217_DEFLDO2_LDO2_MASK), TPS65217_INFO("LDO3", 1800000, 3300000, tps65217_vsel_to_uv2, - tps65217_uv_to_vsel2), + tps65217_uv_to_vsel2, NULL, 32, + TPS65217_ENABLE_LS1_EN | TPS65217_DEFLDO3_LDO3_EN, + TPS65217_REG_DEFLS1, TPS65217_DEFLDO3_LDO3_MASK), TPS65217_INFO("LDO4", 1800000, 3300000, tps65217_vsel_to_uv2, - tps65217_uv_to_vsel2), + tps65217_uv_to_vsel2, NULL, 32, + TPS65217_ENABLE_LS2_EN | TPS65217_DEFLDO4_LDO4_EN, + TPS65217_REG_DEFLS2, TPS65217_DEFLDO4_LDO4_MASK), }; +static int tps65217_pmic_is_enabled(struct regulator_dev *dev) +{ + int ret; + struct tps65217 *tps = rdev_get_drvdata(dev); + unsigned int data, rid = rdev_get_id(dev); + + if (rid < TPS65217_DCDC_1 || rid > TPS65217_LDO_4) + return -EINVAL; + + ret = tps65217_reg_read(tps, TPS65217_REG_ENABLE, &data); + if (ret) + return ret; + + return (data & tps->info[rid]->enable_mask) ? 1 : 0; +} + static int tps65217_pmic_enable(struct regulator_dev *dev) { struct tps65217 *tps = rdev_get_drvdata(dev); @@ -151,8 +177,9 @@ static int tps65217_pmic_enable(struct regulator_dev *dev) /* Enable the regulator and password protection is level 1 */ return tps65217_set_bits(tps, TPS65217_REG_ENABLE, - dev->desc->enable_mask, dev->desc->enable_mask, - TPS65217_PROTECT_L1); + tps->info[rid]->enable_mask, + tps->info[rid]->enable_mask, + TPS65217_PROTECT_L1); } static int tps65217_pmic_disable(struct regulator_dev *dev) @@ -165,7 +192,25 @@ static int tps65217_pmic_disable(struct regulator_dev *dev) /* Disable the regulator and password protection is level 1 */ return tps65217_clear_bits(tps, TPS65217_REG_ENABLE, - dev->desc->enable_mask, TPS65217_PROTECT_L1); + tps->info[rid]->enable_mask, TPS65217_PROTECT_L1); +} + +static int tps65217_pmic_get_voltage_sel(struct regulator_dev *dev) +{ + int ret; + struct tps65217 *tps = rdev_get_drvdata(dev); + unsigned int selector, rid = rdev_get_id(dev); + + if (rid < TPS65217_DCDC_1 || rid > TPS65217_LDO_4) + return -EINVAL; + + ret = tps65217_reg_read(tps, tps->info[rid]->set_vout_reg, &selector); + if (ret) + return ret; + + selector &= tps->info[rid]->set_vout_mask; + + return selector; } static int tps65217_pmic_set_voltage_sel(struct regulator_dev *dev, @@ -176,7 +221,8 @@ static int tps65217_pmic_set_voltage_sel(struct regulator_dev *dev, unsigned int rid = rdev_get_id(dev); /* Set the voltage based on vsel value and write protect level is 2 */ - ret = tps65217_set_bits(tps, dev->desc->vsel_reg, dev->desc->vsel_mask, + ret = tps65217_set_bits(tps, tps->info[rid]->set_vout_reg, + tps->info[rid]->set_vout_mask, selector, TPS65217_PROTECT_L2); /* Set GO bit for DCDCx to initiate voltage transistion */ @@ -206,10 +252,10 @@ static int tps65217_pmic_map_voltage(struct regulator_dev *dev, if (rid < TPS65217_DCDC_1 || rid > TPS65217_LDO_4) return -EINVAL; - if (min_uV < tps->info[rid]->min_uV) - min_uV = tps->info[rid]->min_uV; + if (min_uV < tps->info[rid]->min_uV || min_uV > tps->info[rid]->max_uV) + return -EINVAL; - if (max_uV < tps->info[rid]->min_uV || min_uV > tps->info[rid]->max_uV) + if (max_uV < tps->info[rid]->min_uV || max_uV > tps->info[rid]->max_uV) return -EINVAL; ret = tps->info[rid]->uv_to_vsel(min_uV, &sel); @@ -228,18 +274,21 @@ static int tps65217_pmic_list_voltage(struct regulator_dev *dev, if (rid < TPS65217_DCDC_1 || rid > TPS65217_LDO_4) return -EINVAL; - if (selector >= dev->desc->n_voltages) + if (selector >= tps->info[rid]->table_len) return -EINVAL; + if (tps->info[rid]->table) + return tps->info[rid]->table[selector]; + return tps->info[rid]->vsel_to_uv(selector); } /* Operations permitted on DCDCx, LDO2, LDO3 and LDO4 */ static struct regulator_ops tps65217_pmic_ops = { - .is_enabled = regulator_is_enabled_regmap, + .is_enabled = tps65217_pmic_is_enabled, .enable = tps65217_pmic_enable, .disable = tps65217_pmic_disable, - .get_voltage_sel = regulator_get_voltage_sel_regmap, + .get_voltage_sel = tps65217_pmic_get_voltage_sel, .set_voltage_sel = tps65217_pmic_set_voltage_sel, .list_voltage = tps65217_pmic_list_voltage, .map_voltage = tps65217_pmic_map_voltage, @@ -247,38 +296,22 @@ static struct regulator_ops tps65217_pmic_ops = { /* Operations permitted on LDO1 */ static struct regulator_ops tps65217_pmic_ldo1_ops = { - .is_enabled = regulator_is_enabled_regmap, + .is_enabled = tps65217_pmic_is_enabled, .enable = tps65217_pmic_enable, .disable = tps65217_pmic_disable, - .get_voltage_sel = regulator_get_voltage_sel_regmap, + .get_voltage_sel = tps65217_pmic_get_voltage_sel, .set_voltage_sel = tps65217_pmic_set_voltage_sel, - .list_voltage = regulator_list_voltage_table, + .list_voltage = tps65217_pmic_list_voltage, }; static const struct regulator_desc regulators[] = { - TPS65217_REGULATOR("DCDC1", TPS65217_DCDC_1, tps65217_pmic_ops, 64, - TPS65217_REG_DEFDCDC1, TPS65217_DEFDCDCX_DCDC_MASK, - TPS65217_ENABLE_DC1_EN, NULL), - TPS65217_REGULATOR("DCDC2", TPS65217_DCDC_2, tps65217_pmic_ops, 64, - TPS65217_REG_DEFDCDC2, TPS65217_DEFDCDCX_DCDC_MASK, - TPS65217_ENABLE_DC2_EN, NULL), - TPS65217_REGULATOR("DCDC3", TPS65217_DCDC_3, tps65217_pmic_ops, 64, - TPS65217_REG_DEFDCDC3, TPS65217_DEFDCDCX_DCDC_MASK, - TPS65217_ENABLE_DC3_EN, NULL), - TPS65217_REGULATOR("LDO1", TPS65217_LDO_1, tps65217_pmic_ldo1_ops, 16, - TPS65217_REG_DEFLDO1, TPS65217_DEFLDO1_LDO1_MASK, - TPS65217_ENABLE_LDO1_EN, LDO1_VSEL_table), - TPS65217_REGULATOR("LDO2", TPS65217_LDO_2, tps65217_pmic_ops, 64, - TPS65217_REG_DEFLDO2, TPS65217_DEFLDO2_LDO2_MASK, - TPS65217_ENABLE_LDO2_EN, NULL), - TPS65217_REGULATOR("LDO3", TPS65217_LDO_3, tps65217_pmic_ops, 32, - TPS65217_REG_DEFLS1, TPS65217_DEFLDO3_LDO3_MASK, - TPS65217_ENABLE_LS1_EN | TPS65217_DEFLDO3_LDO3_EN, - NULL), - TPS65217_REGULATOR("LDO4", TPS65217_LDO_4, tps65217_pmic_ops, 32, - TPS65217_REG_DEFLS2, TPS65217_DEFLDO4_LDO4_MASK, - TPS65217_ENABLE_LS2_EN | TPS65217_DEFLDO4_LDO4_EN, - NULL), + TPS65217_REGULATOR("DCDC1", TPS65217_DCDC_1, tps65217_pmic_ops, 64), + TPS65217_REGULATOR("DCDC2", TPS65217_DCDC_2, tps65217_pmic_ops, 64), + TPS65217_REGULATOR("DCDC3", TPS65217_DCDC_3, tps65217_pmic_ops, 64), + TPS65217_REGULATOR("LDO1", TPS65217_LDO_1, tps65217_pmic_ldo1_ops, 16), + TPS65217_REGULATOR("LDO2", TPS65217_LDO_2, tps65217_pmic_ops, 64), + TPS65217_REGULATOR("LDO3", TPS65217_LDO_3, tps65217_pmic_ops, 32), + TPS65217_REGULATOR("LDO4", TPS65217_LDO_4, tps65217_pmic_ops, 32), }; static int __devinit tps65217_regulator_probe(struct platform_device *pdev) @@ -293,7 +326,6 @@ static int __devinit tps65217_regulator_probe(struct platform_device *pdev) tps->info[pdev->id] = info; config.dev = &pdev->dev; - config.of_node = pdev->dev.of_node; config.init_data = pdev->dev.platform_data; config.driver_data = tps; diff --git a/trunk/drivers/regulator/tps6524x-regulator.c b/trunk/drivers/regulator/tps6524x-regulator.c index 947ece933d90..1b299aacf22f 100644 --- a/trunk/drivers/regulator/tps6524x-regulator.c +++ b/trunk/drivers/regulator/tps6524x-regulator.c @@ -110,6 +110,9 @@ #define N_SWITCH 2 #define N_REGULATORS (N_DCDC + N_LDO + N_SWITCH) +#define FIXED_ILIMSEL BIT(0) +#define FIXED_VOLTAGE BIT(1) + #define CMD_READ(reg) ((reg) << 6) #define CMD_WRITE(reg) (BIT(5) | (reg) << 6) #define STAT_CLK BIT(3) @@ -126,9 +129,12 @@ struct field { struct supply_info { const char *name; int n_voltages; - const unsigned int *voltages; + const int *voltages; + int fixed_voltage; int n_ilimsels; - const unsigned int *ilimsels; + const int *ilimsels; + int fixed_ilimsel; + int flags; struct field enable, voltage, ilimsel; }; @@ -301,7 +307,7 @@ static int write_field(struct tps6524x *hw, const struct field *field, val << field->shift); } -static const unsigned int dcdc1_voltages[] = { +static const int dcdc1_voltages[] = { 800000, 825000, 850000, 875000, 900000, 925000, 950000, 975000, 1000000, 1025000, 1050000, 1075000, @@ -312,7 +318,7 @@ static const unsigned int dcdc1_voltages[] = { 1500000, 1525000, 1550000, 1575000, }; -static const unsigned int dcdc2_voltages[] = { +static const int dcdc2_voltages[] = { 1400000, 1450000, 1500000, 1550000, 1600000, 1650000, 1700000, 1750000, 1800000, 1850000, 1900000, 1950000, @@ -323,7 +329,7 @@ static const unsigned int dcdc2_voltages[] = { 2800000, 2850000, 2900000, 2950000, }; -static const unsigned int dcdc3_voltages[] = { +static const int dcdc3_voltages[] = { 2400000, 2450000, 2500000, 2550000, 2600000, 2650000, 2700000, 2750000, 2800000, 2850000, 2900000, 2950000, 3000000, 3050000, 3100000, @@ -331,54 +337,38 @@ static const unsigned int dcdc3_voltages[] = { 3400000, 3450000, 3500000, 3550000, 3600000, }; -static const unsigned int ldo1_voltages[] = { +static const int ldo1_voltages[] = { 4300000, 4350000, 4400000, 4450000, 4500000, 4550000, 4600000, 4650000, 4700000, 4750000, 4800000, 4850000, 4900000, 4950000, 5000000, 5050000, }; -static const unsigned int ldo2_voltages[] = { +static const int ldo2_voltages[] = { 1100000, 1150000, 1200000, 1250000, 1300000, 1700000, 1750000, 1800000, 1850000, 1900000, 3150000, 3200000, 3250000, 3300000, 3350000, 3400000, }; -static const unsigned int fixed_5000000_voltage[] = { - 5000000 -}; - -static const unsigned int ldo_ilimsel[] = { +static const int ldo_ilimsel[] = { 400000, 1500000 }; -static const unsigned int usb_ilimsel[] = { +static const int usb_ilimsel[] = { 200000, 400000, 800000, 1000000 }; -static const unsigned int fixed_2400000_ilimsel[] = { - 2400000 -}; - -static const unsigned int fixed_1200000_ilimsel[] = { - 1200000 -}; - -static const unsigned int fixed_400000_ilimsel[] = { - 400000 -}; - #define __MK_FIELD(_reg, _mask, _shift) \ { .reg = (_reg), .mask = (_mask), .shift = (_shift), } static const struct supply_info supply_info[N_REGULATORS] = { { .name = "DCDC1", + .flags = FIXED_ILIMSEL, .n_voltages = ARRAY_SIZE(dcdc1_voltages), .voltages = dcdc1_voltages, - .n_ilimsels = ARRAY_SIZE(fixed_2400000_ilimsel), - .ilimsels = fixed_2400000_ilimsel, + .fixed_ilimsel = 2400000, .enable = __MK_FIELD(REG_DCDC_EN, DCDCDCDC_EN_MASK, DCDCDCDC1_EN_SHIFT), .voltage = __MK_FIELD(REG_DCDC_SET, DCDC_VDCDC_MASK, @@ -386,10 +376,10 @@ static const struct supply_info supply_info[N_REGULATORS] = { }, { .name = "DCDC2", + .flags = FIXED_ILIMSEL, .n_voltages = ARRAY_SIZE(dcdc2_voltages), .voltages = dcdc2_voltages, - .n_ilimsels = ARRAY_SIZE(fixed_1200000_ilimsel), - .ilimsels = fixed_1200000_ilimsel, + .fixed_ilimsel = 1200000, .enable = __MK_FIELD(REG_DCDC_EN, DCDCDCDC_EN_MASK, DCDCDCDC2_EN_SHIFT), .voltage = __MK_FIELD(REG_DCDC_SET, DCDC_VDCDC_MASK, @@ -397,10 +387,10 @@ static const struct supply_info supply_info[N_REGULATORS] = { }, { .name = "DCDC3", + .flags = FIXED_ILIMSEL, .n_voltages = ARRAY_SIZE(dcdc3_voltages), .voltages = dcdc3_voltages, - .n_ilimsels = ARRAY_SIZE(fixed_1200000_ilimsel), - .ilimsels = fixed_1200000_ilimsel, + .fixed_ilimsel = 1200000, .enable = __MK_FIELD(REG_DCDC_EN, DCDCDCDC_EN_MASK, DCDCDCDC3_EN_SHIFT), .voltage = __MK_FIELD(REG_DCDC_SET, DCDC_VDCDC_MASK, @@ -434,8 +424,8 @@ static const struct supply_info supply_info[N_REGULATORS] = { }, { .name = "USB", - .n_voltages = ARRAY_SIZE(fixed_5000000_voltage), - .voltages = fixed_5000000_voltage, + .flags = FIXED_VOLTAGE, + .fixed_voltage = 5000000, .n_ilimsels = ARRAY_SIZE(usb_ilimsel), .ilimsels = usb_ilimsel, .enable = __MK_FIELD(REG_BLOCK_EN, BLOCK_MASK, @@ -445,15 +435,29 @@ static const struct supply_info supply_info[N_REGULATORS] = { }, { .name = "LCD", - .n_voltages = ARRAY_SIZE(fixed_5000000_voltage), - .voltages = fixed_5000000_voltage, - .n_ilimsels = ARRAY_SIZE(fixed_400000_ilimsel), - .ilimsels = fixed_400000_ilimsel, + .flags = FIXED_VOLTAGE | FIXED_ILIMSEL, + .fixed_voltage = 5000000, + .fixed_ilimsel = 400000, .enable = __MK_FIELD(REG_BLOCK_EN, BLOCK_MASK, BLOCK_LCD_SHIFT), }, }; +static int list_voltage(struct regulator_dev *rdev, unsigned selector) +{ + const struct supply_info *info; + struct tps6524x *hw; + + hw = rdev_get_drvdata(rdev); + info = &supply_info[rdev_get_id(rdev)]; + + if (info->flags & FIXED_VOLTAGE) + return selector ? -EINVAL : info->fixed_voltage; + + return ((selector < info->n_voltages) ? + info->voltages[selector] : -EINVAL); +} + static int set_voltage_sel(struct regulator_dev *rdev, unsigned selector) { const struct supply_info *info; @@ -462,7 +466,7 @@ static int set_voltage_sel(struct regulator_dev *rdev, unsigned selector) hw = rdev_get_drvdata(rdev); info = &supply_info[rdev_get_id(rdev)]; - if (rdev->desc->n_voltages == 1) + if (info->flags & FIXED_VOLTAGE) return -EINVAL; return write_field(hw, &info->voltage, selector); @@ -477,7 +481,7 @@ static int get_voltage_sel(struct regulator_dev *rdev) hw = rdev_get_drvdata(rdev); info = &supply_info[rdev_get_id(rdev)]; - if (rdev->desc->n_voltages == 1) + if (info->flags & FIXED_VOLTAGE) return 0; ret = read_field(hw, &info->voltage); @@ -499,7 +503,7 @@ static int set_current_limit(struct regulator_dev *rdev, int min_uA, hw = rdev_get_drvdata(rdev); info = &supply_info[rdev_get_id(rdev)]; - if (info->n_ilimsels == 1) + if (info->flags & FIXED_ILIMSEL) return -EINVAL; for (i = 0; i < info->n_ilimsels; i++) @@ -522,8 +526,8 @@ static int get_current_limit(struct regulator_dev *rdev) hw = rdev_get_drvdata(rdev); info = &supply_info[rdev_get_id(rdev)]; - if (info->n_ilimsels == 1) - return info->ilimsels[0]; + if (info->flags & FIXED_ILIMSEL) + return info->fixed_ilimsel; ret = read_field(hw, &info->ilimsel); if (ret < 0) @@ -573,7 +577,7 @@ static struct regulator_ops regulator_ops = { .disable = disable_supply, .get_voltage_sel = get_voltage_sel, .set_voltage_sel = set_voltage_sel, - .list_voltage = regulator_list_voltage_table, + .list_voltage = list_voltage, .set_current_limit = set_current_limit, .get_current_limit = get_current_limit, }; @@ -625,11 +629,13 @@ static int __devinit pmic_probe(struct spi_device *spi) hw->desc[i].name = info->name; hw->desc[i].id = i; hw->desc[i].n_voltages = info->n_voltages; - hw->desc[i].volt_table = info->voltages; hw->desc[i].ops = ®ulator_ops; hw->desc[i].type = REGULATOR_VOLTAGE; hw->desc[i].owner = THIS_MODULE; + if (info->flags & FIXED_VOLTAGE) + hw->desc[i].n_voltages = 1; + config.dev = dev; config.init_data = init_data; config.driver_data = hw; diff --git a/trunk/drivers/regulator/tps6586x-regulator.c b/trunk/drivers/regulator/tps6586x-regulator.c index e6da90ab5153..c0a214575380 100644 --- a/trunk/drivers/regulator/tps6586x-regulator.c +++ b/trunk/drivers/regulator/tps6586x-regulator.c @@ -63,6 +63,8 @@ struct tps6586x_regulator { int enable_bit[2]; int enable_reg[2]; + int *voltages; + /* for DVM regulators */ int go_reg; int go_bit; @@ -70,9 +72,22 @@ struct tps6586x_regulator { static inline struct device *to_tps6586x_dev(struct regulator_dev *rdev) { - return rdev_get_dev(rdev)->parent; + return rdev_get_dev(rdev)->parent->parent; } +static int tps6586x_list_voltage(struct regulator_dev *rdev, unsigned selector) +{ + struct tps6586x_regulator *info = rdev_get_drvdata(rdev); + int rid = rdev_get_id(rdev); + + /* LDO0 has minimal voltage 1.2V rather than 1.25V */ + if ((rid == TPS6586X_ID_LDO_0) && (selector == 0)) + return (info->voltages[0] - 50) * 1000; + + return info->voltages[selector] * 1000; +} + + static int tps6586x_set_voltage_sel(struct regulator_dev *rdev, unsigned selector) { @@ -153,7 +168,7 @@ static int tps6586x_regulator_is_enabled(struct regulator_dev *rdev) } static struct regulator_ops tps6586x_regulator_ops = { - .list_voltage = regulator_list_voltage_table, + .list_voltage = tps6586x_list_voltage, .get_voltage_sel = tps6586x_get_voltage_sel, .set_voltage_sel = tps6586x_set_voltage_sel, @@ -162,45 +177,39 @@ static struct regulator_ops tps6586x_regulator_ops = { .disable = tps6586x_regulator_disable, }; -static const unsigned int tps6586x_ldo0_voltages[] = { - 1200000, 1500000, 1800000, 2500000, 2700000, 2850000, 3100000, 3300000, -}; - -static const unsigned int tps6586x_ldo4_voltages[] = { - 1700000, 1725000, 1750000, 1775000, 1800000, 1825000, 1850000, 1875000, - 1900000, 1925000, 1950000, 1975000, 2000000, 2025000, 2050000, 2075000, - 2100000, 2125000, 2150000, 2175000, 2200000, 2225000, 2250000, 2275000, - 2300000, 2325000, 2350000, 2375000, 2400000, 2425000, 2450000, 2475000, +static int tps6586x_ldo_voltages[] = { + 1250, 1500, 1800, 2500, 2700, 2850, 3100, 3300, }; -static const unsigned int tps6586x_ldo_voltages[] = { - 1250000, 1500000, 1800000, 2500000, 2700000, 2850000, 3100000, 3300000, +static int tps6586x_ldo4_voltages[] = { + 1700, 1725, 1750, 1775, 1800, 1825, 1850, 1875, + 1900, 1925, 1950, 1975, 2000, 2025, 2050, 2075, + 2100, 2125, 2150, 2175, 2200, 2225, 2250, 2275, + 2300, 2325, 2350, 2375, 2400, 2425, 2450, 2475, }; -static const unsigned int tps6586x_sm2_voltages[] = { - 3000000, 3050000, 3100000, 3150000, 3200000, 3250000, 3300000, 3350000, - 3400000, 3450000, 3500000, 3550000, 3600000, 3650000, 3700000, 3750000, - 3800000, 3850000, 3900000, 3950000, 4000000, 4050000, 4100000, 4150000, - 4200000, 4250000, 4300000, 4350000, 4400000, 4450000, 4500000, 4550000, +static int tps6586x_sm2_voltages[] = { + 3000, 3050, 3100, 3150, 3200, 3250, 3300, 3350, + 3400, 3450, 3500, 3550, 3600, 3650, 3700, 3750, + 3800, 3850, 3900, 3950, 4000, 4050, 4100, 4150, + 4200, 4250, 4300, 4350, 4400, 4450, 4500, 4550, }; -static const unsigned int tps6586x_dvm_voltages[] = { - 725000, 750000, 775000, 800000, 825000, 850000, 875000, 900000, - 925000, 950000, 975000, 1000000, 1025000, 1050000, 1075000, 1100000, - 1125000, 1150000, 1175000, 1200000, 1225000, 1250000, 1275000, 1300000, - 1325000, 1350000, 1375000, 1400000, 1425000, 1450000, 1475000, 1500000, +static int tps6586x_dvm_voltages[] = { + 725, 750, 775, 800, 825, 850, 875, 900, + 925, 950, 975, 1000, 1025, 1050, 1075, 1100, + 1125, 1150, 1175, 1200, 1225, 1250, 1275, 1300, + 1325, 1350, 1375, 1400, 1425, 1450, 1475, 1500, }; -#define TPS6586X_REGULATOR(_id, _pin_name, vdata, vreg, shift, nbits, \ +#define TPS6586X_REGULATOR(_id, vdata, vreg, shift, nbits, \ ereg0, ebit0, ereg1, ebit1) \ .desc = { \ - .supply_name = _pin_name, \ .name = "REG-" #_id, \ .ops = &tps6586x_regulator_ops, \ .type = REGULATOR_VOLTAGE, \ .id = TPS6586X_ID_##_id, \ .n_voltages = ARRAY_SIZE(tps6586x_##vdata##_voltages), \ - .volt_table = tps6586x_##vdata##_voltages, \ .owner = THIS_MODULE, \ }, \ .volt_reg = TPS6586X_##vreg, \ @@ -209,45 +218,44 @@ static const unsigned int tps6586x_dvm_voltages[] = { .enable_reg[0] = TPS6586X_SUPPLY##ereg0, \ .enable_bit[0] = (ebit0), \ .enable_reg[1] = TPS6586X_SUPPLY##ereg1, \ - .enable_bit[1] = (ebit1), + .enable_bit[1] = (ebit1), \ + .voltages = tps6586x_##vdata##_voltages, #define TPS6586X_REGULATOR_DVM_GOREG(goreg, gobit) \ .go_reg = TPS6586X_##goreg, \ .go_bit = (gobit), -#define TPS6586X_LDO(_id, _pname, vdata, vreg, shift, nbits, \ +#define TPS6586X_LDO(_id, vdata, vreg, shift, nbits, \ ereg0, ebit0, ereg1, ebit1) \ { \ - TPS6586X_REGULATOR(_id, _pname, vdata, vreg, shift, nbits, \ + TPS6586X_REGULATOR(_id, vdata, vreg, shift, nbits, \ ereg0, ebit0, ereg1, ebit1) \ } -#define TPS6586X_DVM(_id, _pname, vdata, vreg, shift, nbits, \ +#define TPS6586X_DVM(_id, vdata, vreg, shift, nbits, \ ereg0, ebit0, ereg1, ebit1, goreg, gobit) \ { \ - TPS6586X_REGULATOR(_id, _pname, vdata, vreg, shift, nbits, \ + TPS6586X_REGULATOR(_id, vdata, vreg, shift, nbits, \ ereg0, ebit0, ereg1, ebit1) \ TPS6586X_REGULATOR_DVM_GOREG(goreg, gobit) \ } static struct tps6586x_regulator tps6586x_regulator[] = { - TPS6586X_LDO(LDO_0, "vinldo01", ldo0, SUPPLYV1, 5, 3, ENC, 0, END, 0), - TPS6586X_LDO(LDO_3, "vinldo23", ldo, SUPPLYV4, 0, 3, ENC, 2, END, 2), - TPS6586X_LDO(LDO_5, NULL, ldo, SUPPLYV6, 0, 3, ENE, 6, ENE, 6), - TPS6586X_LDO(LDO_6, "vinldo678", ldo, SUPPLYV3, 0, 3, ENC, 4, END, 4), - TPS6586X_LDO(LDO_7, "vinldo678", ldo, SUPPLYV3, 3, 3, ENC, 5, END, 5), - TPS6586X_LDO(LDO_8, "vinldo678", ldo, SUPPLYV2, 5, 3, ENC, 6, END, 6), - TPS6586X_LDO(LDO_9, "vinldo9", ldo, SUPPLYV6, 3, 3, ENE, 7, ENE, 7), - TPS6586X_LDO(LDO_RTC, NULL, ldo, SUPPLYV4, 3, 3, V4, 7, V4, 7), - TPS6586X_LDO(LDO_1, "vinldo01", dvm, SUPPLYV1, 0, 5, ENC, 1, END, 1), - TPS6586X_LDO(SM_2, "sm2", sm2, SUPPLYV2, 0, 5, ENC, 7, END, 7), - - TPS6586X_DVM(LDO_2, "vinldo23", dvm, LDO2BV1, 0, 5, ENA, 3, - ENB, 3, VCC2, 6), - TPS6586X_DVM(LDO_4, "vinldo4", ldo4, LDO4V1, 0, 5, ENC, 3, - END, 3, VCC1, 6), - TPS6586X_DVM(SM_0, "sm0", dvm, SM0V1, 0, 5, ENA, 1, ENB, 1, VCC1, 2), - TPS6586X_DVM(SM_1, "sm1", dvm, SM1V1, 0, 5, ENA, 0, ENB, 0, VCC1, 0), + TPS6586X_LDO(LDO_0, ldo, SUPPLYV1, 5, 3, ENC, 0, END, 0), + TPS6586X_LDO(LDO_3, ldo, SUPPLYV4, 0, 3, ENC, 2, END, 2), + TPS6586X_LDO(LDO_5, ldo, SUPPLYV6, 0, 3, ENE, 6, ENE, 6), + TPS6586X_LDO(LDO_6, ldo, SUPPLYV3, 0, 3, ENC, 4, END, 4), + TPS6586X_LDO(LDO_7, ldo, SUPPLYV3, 3, 3, ENC, 5, END, 5), + TPS6586X_LDO(LDO_8, ldo, SUPPLYV2, 5, 3, ENC, 6, END, 6), + TPS6586X_LDO(LDO_9, ldo, SUPPLYV6, 3, 3, ENE, 7, ENE, 7), + TPS6586X_LDO(LDO_RTC, ldo, SUPPLYV4, 3, 3, V4, 7, V4, 7), + TPS6586X_LDO(LDO_1, dvm, SUPPLYV1, 0, 5, ENC, 1, END, 1), + TPS6586X_LDO(SM_2, sm2, SUPPLYV2, 0, 5, ENC, 7, END, 7), + + TPS6586X_DVM(LDO_2, dvm, LDO2BV1, 0, 5, ENA, 3, ENB, 3, VCC2, 6), + TPS6586X_DVM(LDO_4, ldo4, LDO4V1, 0, 5, ENC, 3, END, 3, VCC1, 6), + TPS6586X_DVM(SM_0, dvm, SM0V1, 0, 5, ENA, 1, ENB, 1, VCC1, 2), + TPS6586X_DVM(SM_1, dvm, SM1V1, 0, 5, ENA, 0, ENB, 0, VCC1, 0), }; /* @@ -354,7 +362,7 @@ static int __devinit tps6586x_regulator_probe(struct platform_device *pdev) if (err) return err; - config.dev = pdev->dev.parent; + config.dev = &pdev->dev; config.of_node = pdev->dev.of_node; config.init_data = pdev->dev.platform_data; config.driver_data = ri; diff --git a/trunk/drivers/regulator/tps65910-regulator.c b/trunk/drivers/regulator/tps65910-regulator.c index 793adda560c3..6bf864b4bdf6 100644 --- a/trunk/drivers/regulator/tps65910-regulator.c +++ b/trunk/drivers/regulator/tps65910-regulator.c @@ -31,147 +31,160 @@ TPS65910_SLEEP_CONTROL_EXT_INPUT_EN3 | \ TPS65911_SLEEP_CONTROL_EXT_INPUT_SLEEP) -/* supported VIO voltages in microvolts */ -static const unsigned int VIO_VSEL_table[] = { - 1500000, 1800000, 2500000, 3300000, +/* supported VIO voltages in millivolts */ +static const u16 VIO_VSEL_table[] = { + 1500, 1800, 2500, 3300, }; /* VSEL tables for TPS65910 specific LDOs and dcdc's */ -/* supported VDD3 voltages in microvolts */ -static const unsigned int VDD3_VSEL_table[] = { - 5000000, +/* supported VDD3 voltages in millivolts */ +static const u16 VDD3_VSEL_table[] = { + 5000, }; -/* supported VDIG1 voltages in microvolts */ -static const unsigned int VDIG1_VSEL_table[] = { - 1200000, 1500000, 1800000, 2700000, +/* supported VDIG1 voltages in millivolts */ +static const u16 VDIG1_VSEL_table[] = { + 1200, 1500, 1800, 2700, }; -/* supported VDIG2 voltages in microvolts */ -static const unsigned int VDIG2_VSEL_table[] = { - 1000000, 1100000, 1200000, 1800000, +/* supported VDIG2 voltages in millivolts */ +static const u16 VDIG2_VSEL_table[] = { + 1000, 1100, 1200, 1800, }; -/* supported VPLL voltages in microvolts */ -static const unsigned int VPLL_VSEL_table[] = { - 1000000, 1100000, 1800000, 2500000, +/* supported VPLL voltages in millivolts */ +static const u16 VPLL_VSEL_table[] = { + 1000, 1100, 1800, 2500, }; -/* supported VDAC voltages in microvolts */ -static const unsigned int VDAC_VSEL_table[] = { - 1800000, 2600000, 2800000, 2850000, +/* supported VDAC voltages in millivolts */ +static const u16 VDAC_VSEL_table[] = { + 1800, 2600, 2800, 2850, }; -/* supported VAUX1 voltages in microvolts */ -static const unsigned int VAUX1_VSEL_table[] = { - 1800000, 2500000, 2800000, 2850000, +/* supported VAUX1 voltages in millivolts */ +static const u16 VAUX1_VSEL_table[] = { + 1800, 2500, 2800, 2850, }; -/* supported VAUX2 voltages in microvolts */ -static const unsigned int VAUX2_VSEL_table[] = { - 1800000, 2800000, 2900000, 3300000, +/* supported VAUX2 voltages in millivolts */ +static const u16 VAUX2_VSEL_table[] = { + 1800, 2800, 2900, 3300, }; -/* supported VAUX33 voltages in microvolts */ -static const unsigned int VAUX33_VSEL_table[] = { - 1800000, 2000000, 2800000, 3300000, +/* supported VAUX33 voltages in millivolts */ +static const u16 VAUX33_VSEL_table[] = { + 1800, 2000, 2800, 3300, }; -/* supported VMMC voltages in microvolts */ -static const unsigned int VMMC_VSEL_table[] = { - 1800000, 2800000, 3000000, 3300000, +/* supported VMMC voltages in millivolts */ +static const u16 VMMC_VSEL_table[] = { + 1800, 2800, 3000, 3300, }; struct tps_info { const char *name; - const char *vin_name; + unsigned min_uV; + unsigned max_uV; u8 n_voltages; - const unsigned int *voltage_table; + const u16 *voltage_table; int enable_time_us; }; static struct tps_info tps65910_regs[] = { { .name = "vrtc", - .vin_name = "vcc7", .enable_time_us = 2200, }, { .name = "vio", - .vin_name = "vccio", + .min_uV = 1500000, + .max_uV = 3300000, .n_voltages = ARRAY_SIZE(VIO_VSEL_table), .voltage_table = VIO_VSEL_table, .enable_time_us = 350, }, { .name = "vdd1", - .vin_name = "vcc1", + .min_uV = 600000, + .max_uV = 4500000, .enable_time_us = 350, }, { .name = "vdd2", - .vin_name = "vcc2", + .min_uV = 600000, + .max_uV = 4500000, .enable_time_us = 350, }, { .name = "vdd3", + .min_uV = 5000000, + .max_uV = 5000000, .n_voltages = ARRAY_SIZE(VDD3_VSEL_table), .voltage_table = VDD3_VSEL_table, .enable_time_us = 200, }, { .name = "vdig1", - .vin_name = "vcc6", + .min_uV = 1200000, + .max_uV = 2700000, .n_voltages = ARRAY_SIZE(VDIG1_VSEL_table), .voltage_table = VDIG1_VSEL_table, .enable_time_us = 100, }, { .name = "vdig2", - .vin_name = "vcc6", + .min_uV = 1000000, + .max_uV = 1800000, .n_voltages = ARRAY_SIZE(VDIG2_VSEL_table), .voltage_table = VDIG2_VSEL_table, .enable_time_us = 100, }, { .name = "vpll", - .vin_name = "vcc5", + .min_uV = 1000000, + .max_uV = 2500000, .n_voltages = ARRAY_SIZE(VPLL_VSEL_table), .voltage_table = VPLL_VSEL_table, .enable_time_us = 100, }, { .name = "vdac", - .vin_name = "vcc5", + .min_uV = 1800000, + .max_uV = 2850000, .n_voltages = ARRAY_SIZE(VDAC_VSEL_table), .voltage_table = VDAC_VSEL_table, .enable_time_us = 100, }, { .name = "vaux1", - .vin_name = "vcc4", + .min_uV = 1800000, + .max_uV = 2850000, .n_voltages = ARRAY_SIZE(VAUX1_VSEL_table), .voltage_table = VAUX1_VSEL_table, .enable_time_us = 100, }, { .name = "vaux2", - .vin_name = "vcc4", + .min_uV = 1800000, + .max_uV = 3300000, .n_voltages = ARRAY_SIZE(VAUX2_VSEL_table), .voltage_table = VAUX2_VSEL_table, .enable_time_us = 100, }, { .name = "vaux33", - .vin_name = "vcc3", + .min_uV = 1800000, + .max_uV = 3300000, .n_voltages = ARRAY_SIZE(VAUX33_VSEL_table), .voltage_table = VAUX33_VSEL_table, .enable_time_us = 100, }, { .name = "vmmc", - .vin_name = "vcc3", + .min_uV = 1800000, + .max_uV = 3300000, .n_voltages = ARRAY_SIZE(VMMC_VSEL_table), .voltage_table = VMMC_VSEL_table, .enable_time_us = 100, @@ -181,79 +194,91 @@ static struct tps_info tps65910_regs[] = { static struct tps_info tps65911_regs[] = { { .name = "vrtc", - .vin_name = "vcc7", .enable_time_us = 2200, }, { .name = "vio", - .vin_name = "vccio", + .min_uV = 1500000, + .max_uV = 3300000, .n_voltages = ARRAY_SIZE(VIO_VSEL_table), .voltage_table = VIO_VSEL_table, .enable_time_us = 350, }, { .name = "vdd1", - .vin_name = "vcc1", - .n_voltages = 0x4C, + .min_uV = 600000, + .max_uV = 4500000, + .n_voltages = 73, .enable_time_us = 350, }, { .name = "vdd2", - .vin_name = "vcc2", - .n_voltages = 0x4C, + .min_uV = 600000, + .max_uV = 4500000, + .n_voltages = 73, .enable_time_us = 350, }, { .name = "vddctrl", - .n_voltages = 0x44, + .min_uV = 600000, + .max_uV = 1400000, + .n_voltages = 65, .enable_time_us = 900, }, { .name = "ldo1", - .vin_name = "vcc6", - .n_voltages = 0x33, + .min_uV = 1000000, + .max_uV = 3300000, + .n_voltages = 47, .enable_time_us = 420, }, { .name = "ldo2", - .vin_name = "vcc6", - .n_voltages = 0x33, + .min_uV = 1000000, + .max_uV = 3300000, + .n_voltages = 47, .enable_time_us = 420, }, { .name = "ldo3", - .vin_name = "vcc5", - .n_voltages = 0x1A, + .min_uV = 1000000, + .max_uV = 3300000, + .n_voltages = 24, .enable_time_us = 230, }, { .name = "ldo4", - .vin_name = "vcc5", - .n_voltages = 0x33, + .min_uV = 1000000, + .max_uV = 3300000, + .n_voltages = 47, .enable_time_us = 230, }, { .name = "ldo5", - .vin_name = "vcc4", - .n_voltages = 0x1A, + .min_uV = 1000000, + .max_uV = 3300000, + .n_voltages = 24, .enable_time_us = 230, }, { .name = "ldo6", - .vin_name = "vcc3", - .n_voltages = 0x1A, + .min_uV = 1000000, + .max_uV = 3300000, + .n_voltages = 24, .enable_time_us = 230, }, { .name = "ldo7", - .vin_name = "vcc3", - .n_voltages = 0x1A, + .min_uV = 1000000, + .max_uV = 3300000, + .n_voltages = 24, .enable_time_us = 230, }, { .name = "ldo8", - .vin_name = "vcc3", - .n_voltages = 0x1A, + .min_uV = 1000000, + .max_uV = 3300000, + .n_voltages = 24, .enable_time_us = 230, }, }; @@ -296,6 +321,7 @@ struct tps65910_reg { struct tps65910 *mfd; struct regulator_dev **rdev; struct tps_info **info; + struct mutex mutex; int num_regulators; int mode; int (*get_ctrl_reg)(int); @@ -303,6 +329,71 @@ struct tps65910_reg { unsigned int board_ext_control[TPS65910_NUM_REGS]; }; +static inline int tps65910_read(struct tps65910_reg *pmic, u8 reg) +{ + unsigned int val; + int err; + + err = tps65910_reg_read(pmic->mfd, reg, &val); + if (err) + return err; + + return val; +} + +static int tps65910_modify_bits(struct tps65910_reg *pmic, u8 reg, + u8 set_mask, u8 clear_mask) +{ + int err, data; + + mutex_lock(&pmic->mutex); + + data = tps65910_read(pmic, reg); + if (data < 0) { + dev_err(pmic->mfd->dev, "Read from reg 0x%x failed\n", reg); + err = data; + goto out; + } + + data &= ~clear_mask; + data |= set_mask; + err = tps65910_reg_write(pmic->mfd, reg, data); + if (err) + dev_err(pmic->mfd->dev, "Write for reg 0x%x failed\n", reg); + +out: + mutex_unlock(&pmic->mutex); + return err; +} + +static int tps65910_reg_read_locked(struct tps65910_reg *pmic, u8 reg) +{ + int data; + + mutex_lock(&pmic->mutex); + + data = tps65910_read(pmic, reg); + if (data < 0) + dev_err(pmic->mfd->dev, "Read from reg 0x%x failed\n", reg); + + mutex_unlock(&pmic->mutex); + return data; +} + +static int tps65910_reg_write_locked(struct tps65910_reg *pmic, u8 reg, u8 val) +{ + int err; + + mutex_lock(&pmic->mutex); + + err = tps65910_reg_write(pmic->mfd, reg, val); + if (err < 0) + dev_err(pmic->mfd->dev, "Write for reg 0x%x failed\n", reg); + + mutex_unlock(&pmic->mutex); + return err; +} + static int tps65910_get_ctrl_register(int id) { switch (id) { @@ -371,6 +462,13 @@ static int tps65911_get_ctrl_register(int id) } } +static int tps65910_enable_time(struct regulator_dev *dev) +{ + struct tps65910_reg *pmic = rdev_get_drvdata(dev); + int id = rdev_get_id(dev); + return pmic->info[id]->enable_time_us; +} + static int tps65910_set_mode(struct regulator_dev *dev, unsigned int mode) { struct tps65910_reg *pmic = rdev_get_drvdata(dev); @@ -383,9 +481,8 @@ static int tps65910_set_mode(struct regulator_dev *dev, unsigned int mode) switch (mode) { case REGULATOR_MODE_NORMAL: - return tps65910_reg_update_bits(pmic->mfd, reg, - LDO_ST_MODE_BIT | LDO_ST_ON_BIT, - LDO_ST_ON_BIT); + return tps65910_modify_bits(pmic, reg, LDO_ST_ON_BIT, + LDO_ST_MODE_BIT); case REGULATOR_MODE_IDLE: value = LDO_ST_ON_BIT | LDO_ST_MODE_BIT; return tps65910_reg_set_bits(mfd, reg, value); @@ -399,15 +496,15 @@ static int tps65910_set_mode(struct regulator_dev *dev, unsigned int mode) static unsigned int tps65910_get_mode(struct regulator_dev *dev) { struct tps65910_reg *pmic = rdev_get_drvdata(dev); - int ret, reg, value, id = rdev_get_id(dev); + int reg, value, id = rdev_get_id(dev); reg = pmic->get_ctrl_reg(id); if (reg < 0) return reg; - ret = tps65910_reg_read(pmic->mfd, reg, &value); - if (ret < 0) - return ret; + value = tps65910_reg_read_locked(pmic, reg); + if (value < 0) + return value; if (!(value & LDO_ST_ON_BIT)) return REGULATOR_MODE_STANDBY; @@ -420,51 +517,33 @@ static unsigned int tps65910_get_mode(struct regulator_dev *dev) static int tps65910_get_voltage_dcdc_sel(struct regulator_dev *dev) { struct tps65910_reg *pmic = rdev_get_drvdata(dev); - int ret, id = rdev_get_id(dev); + int id = rdev_get_id(dev); int opvsel = 0, srvsel = 0, vselmax = 0, mult = 0, sr = 0; switch (id) { case TPS65910_REG_VDD1: - ret = tps65910_reg_read(pmic->mfd, TPS65910_VDD1_OP, &opvsel); - if (ret < 0) - return ret; - ret = tps65910_reg_read(pmic->mfd, TPS65910_VDD1, &mult); - if (ret < 0) - return ret; + opvsel = tps65910_reg_read_locked(pmic, TPS65910_VDD1_OP); + mult = tps65910_reg_read_locked(pmic, TPS65910_VDD1); mult = (mult & VDD1_VGAIN_SEL_MASK) >> VDD1_VGAIN_SEL_SHIFT; - ret = tps65910_reg_read(pmic->mfd, TPS65910_VDD1_SR, &srvsel); - if (ret < 0) - return ret; + srvsel = tps65910_reg_read_locked(pmic, TPS65910_VDD1_SR); sr = opvsel & VDD1_OP_CMD_MASK; opvsel &= VDD1_OP_SEL_MASK; srvsel &= VDD1_SR_SEL_MASK; vselmax = 75; break; case TPS65910_REG_VDD2: - ret = tps65910_reg_read(pmic->mfd, TPS65910_VDD2_OP, &opvsel); - if (ret < 0) - return ret; - ret = tps65910_reg_read(pmic->mfd, TPS65910_VDD2, &mult); - if (ret < 0) - return ret; + opvsel = tps65910_reg_read_locked(pmic, TPS65910_VDD2_OP); + mult = tps65910_reg_read_locked(pmic, TPS65910_VDD2); mult = (mult & VDD2_VGAIN_SEL_MASK) >> VDD2_VGAIN_SEL_SHIFT; - ret = tps65910_reg_read(pmic->mfd, TPS65910_VDD2_SR, &srvsel); - if (ret < 0) - return ret; + srvsel = tps65910_reg_read_locked(pmic, TPS65910_VDD2_SR); sr = opvsel & VDD2_OP_CMD_MASK; opvsel &= VDD2_OP_SEL_MASK; srvsel &= VDD2_SR_SEL_MASK; vselmax = 75; break; case TPS65911_REG_VDDCTRL: - ret = tps65910_reg_read(pmic->mfd, TPS65911_VDDCTRL_OP, - &opvsel); - if (ret < 0) - return ret; - ret = tps65910_reg_read(pmic->mfd, TPS65911_VDDCTRL_SR, - &srvsel); - if (ret < 0) - return ret; + opvsel = tps65910_reg_read_locked(pmic, TPS65911_VDDCTRL_OP); + srvsel = tps65910_reg_read_locked(pmic, TPS65911_VDDCTRL_SR); sr = opvsel & VDDCTRL_OP_CMD_MASK; opvsel &= VDDCTRL_OP_SEL_MASK; srvsel &= VDDCTRL_SR_SEL_MASK; @@ -498,15 +577,15 @@ static int tps65910_get_voltage_dcdc_sel(struct regulator_dev *dev) static int tps65910_get_voltage_sel(struct regulator_dev *dev) { struct tps65910_reg *pmic = rdev_get_drvdata(dev); - int ret, reg, value, id = rdev_get_id(dev); + int reg, value, id = rdev_get_id(dev); reg = pmic->get_ctrl_reg(id); if (reg < 0) return reg; - ret = tps65910_reg_read(pmic->mfd, reg, &value); - if (ret < 0) - return ret; + value = tps65910_reg_read_locked(pmic, reg); + if (value < 0) + return value; switch (id) { case TPS65910_REG_VIO: @@ -530,20 +609,18 @@ static int tps65910_get_voltage_sel(struct regulator_dev *dev) static int tps65910_get_voltage_vdd3(struct regulator_dev *dev) { - return dev->desc->volt_table[0]; + return 5 * 1000 * 1000; } static int tps65911_get_voltage_sel(struct regulator_dev *dev) { struct tps65910_reg *pmic = rdev_get_drvdata(dev); - int ret, id = rdev_get_id(dev); - unsigned int value, reg; + int id = rdev_get_id(dev); + u8 value, reg; reg = pmic->get_ctrl_reg(id); - ret = tps65910_reg_read(pmic->mfd, reg, &value); - if (ret < 0) - return ret; + value = tps65910_reg_read_locked(pmic, reg); switch (id) { case TPS65911_REG_LDO1: @@ -585,10 +662,10 @@ static int tps65910_set_voltage_dcdc_sel(struct regulator_dev *dev, dcdc_mult--; vsel = (selector % VDD1_2_NUM_VOLT_FINE) + 3; - tps65910_reg_update_bits(pmic->mfd, TPS65910_VDD1, - VDD1_VGAIN_SEL_MASK, - dcdc_mult << VDD1_VGAIN_SEL_SHIFT); - tps65910_reg_write(pmic->mfd, TPS65910_VDD1_OP, vsel); + tps65910_modify_bits(pmic, TPS65910_VDD1, + (dcdc_mult << VDD1_VGAIN_SEL_SHIFT), + VDD1_VGAIN_SEL_MASK); + tps65910_reg_write_locked(pmic, TPS65910_VDD1_OP, vsel); break; case TPS65910_REG_VDD2: dcdc_mult = (selector / VDD1_2_NUM_VOLT_FINE) + 1; @@ -596,14 +673,14 @@ static int tps65910_set_voltage_dcdc_sel(struct regulator_dev *dev, dcdc_mult--; vsel = (selector % VDD1_2_NUM_VOLT_FINE) + 3; - tps65910_reg_update_bits(pmic->mfd, TPS65910_VDD2, - VDD1_VGAIN_SEL_MASK, - dcdc_mult << VDD2_VGAIN_SEL_SHIFT); - tps65910_reg_write(pmic->mfd, TPS65910_VDD2_OP, vsel); + tps65910_modify_bits(pmic, TPS65910_VDD2, + (dcdc_mult << VDD2_VGAIN_SEL_SHIFT), + VDD1_VGAIN_SEL_MASK); + tps65910_reg_write_locked(pmic, TPS65910_VDD2_OP, vsel); break; case TPS65911_REG_VDDCTRL: vsel = selector + 3; - tps65910_reg_write(pmic->mfd, TPS65911_VDDCTRL_OP, vsel); + tps65910_reg_write_locked(pmic, TPS65911_VDDCTRL_OP, vsel); } return 0; @@ -629,8 +706,8 @@ static int tps65910_set_voltage_sel(struct regulator_dev *dev, case TPS65910_REG_VAUX2: case TPS65910_REG_VAUX33: case TPS65910_REG_VMMC: - return tps65910_reg_update_bits(pmic->mfd, reg, LDO_SEL_MASK, - selector << LDO_SEL_SHIFT); + return tps65910_modify_bits(pmic, reg, + (selector << LDO_SEL_SHIFT), LDO_SEL_MASK); } return -EINVAL; @@ -650,18 +727,18 @@ static int tps65911_set_voltage_sel(struct regulator_dev *dev, case TPS65911_REG_LDO1: case TPS65911_REG_LDO2: case TPS65911_REG_LDO4: - return tps65910_reg_update_bits(pmic->mfd, reg, LDO1_SEL_MASK, - selector << LDO_SEL_SHIFT); + return tps65910_modify_bits(pmic, reg, + (selector << LDO_SEL_SHIFT), LDO1_SEL_MASK); case TPS65911_REG_LDO3: case TPS65911_REG_LDO5: case TPS65911_REG_LDO6: case TPS65911_REG_LDO7: case TPS65911_REG_LDO8: - return tps65910_reg_update_bits(pmic->mfd, reg, LDO3_SEL_MASK, - selector << LDO_SEL_SHIFT); + return tps65910_modify_bits(pmic, reg, + (selector << LDO_SEL_SHIFT), LDO3_SEL_MASK); case TPS65910_REG_VIO: - return tps65910_reg_update_bits(pmic->mfd, reg, LDO_SEL_MASK, - selector << LDO_SEL_SHIFT); + return tps65910_modify_bits(pmic, reg, + (selector << LDO_SEL_SHIFT), LDO_SEL_MASK); } return -EINVAL; @@ -691,6 +768,23 @@ static int tps65910_list_voltage_dcdc(struct regulator_dev *dev, return volt * 100 * mult; } +static int tps65910_list_voltage(struct regulator_dev *dev, + unsigned selector) +{ + struct tps65910_reg *pmic = rdev_get_drvdata(dev); + int id = rdev_get_id(dev), voltage; + + if (id < TPS65910_REG_VIO || id > TPS65910_REG_VMMC) + return -EINVAL; + + if (selector >= pmic->info[id]->n_voltages) + return -EINVAL; + else + voltage = pmic->info[id]->voltage_table[selector] * 1000; + + return voltage; +} + static int tps65911_list_voltage(struct regulator_dev *dev, unsigned selector) { struct tps65910_reg *pmic = rdev_get_drvdata(dev); @@ -722,7 +816,7 @@ static int tps65911_list_voltage(struct regulator_dev *dev, unsigned selector) step_mv = 100; break; case TPS65910_REG_VIO: - return pmic->info[id]->voltage_table[selector]; + return pmic->info[id]->voltage_table[selector] * 1000; default: return -EINVAL; } @@ -730,16 +824,42 @@ static int tps65911_list_voltage(struct regulator_dev *dev, unsigned selector) return (LDO_MIN_VOLT + selector * step_mv) * 1000; } +static int tps65910_set_voltage_dcdc_time_sel(struct regulator_dev *dev, + unsigned int old_selector, unsigned int new_selector) +{ + int id = rdev_get_id(dev); + int old_volt, new_volt; + + old_volt = tps65910_list_voltage_dcdc(dev, old_selector); + if (old_volt < 0) + return old_volt; + + new_volt = tps65910_list_voltage_dcdc(dev, new_selector); + if (new_volt < 0) + return new_volt; + + /* VDD1 and VDD2 are 12.5mV/us, VDDCTRL is 100mV/20us */ + switch (id) { + case TPS65910_REG_VDD1: + case TPS65910_REG_VDD2: + return DIV_ROUND_UP(abs(old_volt - new_volt), 12500); + case TPS65911_REG_VDDCTRL: + return DIV_ROUND_UP(abs(old_volt - new_volt), 5000); + } + return -EINVAL; +} + /* Regulator ops (except VRTC) */ static struct regulator_ops tps65910_ops_dcdc = { .is_enabled = regulator_is_enabled_regmap, .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, + .enable_time = tps65910_enable_time, .set_mode = tps65910_set_mode, .get_mode = tps65910_get_mode, .get_voltage_sel = tps65910_get_voltage_dcdc_sel, .set_voltage_sel = tps65910_set_voltage_dcdc_sel, - .set_voltage_time_sel = regulator_set_voltage_time_sel, + .set_voltage_time_sel = tps65910_set_voltage_dcdc_time_sel, .list_voltage = tps65910_list_voltage_dcdc, }; @@ -747,27 +867,30 @@ static struct regulator_ops tps65910_ops_vdd3 = { .is_enabled = regulator_is_enabled_regmap, .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, + .enable_time = tps65910_enable_time, .set_mode = tps65910_set_mode, .get_mode = tps65910_get_mode, .get_voltage = tps65910_get_voltage_vdd3, - .list_voltage = regulator_list_voltage_table, + .list_voltage = tps65910_list_voltage, }; static struct regulator_ops tps65910_ops = { .is_enabled = regulator_is_enabled_regmap, .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, + .enable_time = tps65910_enable_time, .set_mode = tps65910_set_mode, .get_mode = tps65910_get_mode, .get_voltage_sel = tps65910_get_voltage_sel, .set_voltage_sel = tps65910_set_voltage_sel, - .list_voltage = regulator_list_voltage_table, + .list_voltage = tps65910_list_voltage, }; static struct regulator_ops tps65911_ops = { .is_enabled = regulator_is_enabled_regmap, .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, + .enable_time = tps65910_enable_time, .set_mode = tps65910_set_mode, .get_mode = tps65910_get_mode, .get_voltage_sel = tps65911_get_voltage_sel, @@ -873,27 +996,19 @@ static int tps65910_set_ext_sleep_config(struct tps65910_reg *pmic, (tps65910_chip_id(mfd) == TPS65911))) { int op_reg_add = pmic->get_ctrl_reg(id) + 1; int sr_reg_add = pmic->get_ctrl_reg(id) + 2; - int opvsel, srvsel; - - ret = tps65910_reg_read(pmic->mfd, op_reg_add, &opvsel); - if (ret < 0) - return ret; - ret = tps65910_reg_read(pmic->mfd, sr_reg_add, &srvsel); - if (ret < 0) - return ret; - + int opvsel = tps65910_reg_read_locked(pmic, op_reg_add); + int srvsel = tps65910_reg_read_locked(pmic, sr_reg_add); if (opvsel & VDD1_OP_CMD_MASK) { u8 reg_val = srvsel & VDD1_OP_SEL_MASK; - - ret = tps65910_reg_write(pmic->mfd, op_reg_add, - reg_val); + ret = tps65910_reg_write_locked(pmic, op_reg_add, + reg_val); if (ret < 0) { dev_err(mfd->dev, "Error in configuring op register\n"); return ret; } } - ret = tps65910_reg_write(pmic->mfd, sr_reg_add, 0); + ret = tps65910_reg_write_locked(pmic, sr_reg_add, 0); if (ret < 0) { dev_err(mfd->dev, "Error in settting sr register\n"); return ret; @@ -1011,7 +1126,6 @@ static struct tps65910_board *tps65910_parse_dt_reg_data( "ti,regulator-ext-sleep-control", &prop); if (!ret) pmic_plat_data->regulator_ext_sleep_control[idx] = prop; - } return pmic_plat_data; @@ -1022,7 +1136,7 @@ static inline struct tps65910_board *tps65910_parse_dt_reg_data( struct of_regulator_match **tps65910_reg_matches) { *tps65910_reg_matches = NULL; - return NULL; + return 0; } #endif @@ -1054,6 +1168,7 @@ static __devinit int tps65910_probe(struct platform_device *pdev) return -ENOMEM; } + mutex_init(&pmic->mutex); pmic->mfd = tps65910; platform_set_drvdata(pdev, pmic); @@ -1114,31 +1229,23 @@ static __devinit int tps65910_probe(struct platform_device *pdev) pmic->info[i] = info; pmic->desc[i].name = info->name; - pmic->desc[i].supply_name = info->vin_name; pmic->desc[i].id = i; pmic->desc[i].n_voltages = info->n_voltages; - pmic->desc[i].enable_time = info->enable_time_us; if (i == TPS65910_REG_VDD1 || i == TPS65910_REG_VDD2) { pmic->desc[i].ops = &tps65910_ops_dcdc; pmic->desc[i].n_voltages = VDD1_2_NUM_VOLT_FINE * VDD1_2_NUM_VOLT_COARSE; - pmic->desc[i].ramp_delay = 12500; } else if (i == TPS65910_REG_VDD3) { - if (tps65910_chip_id(tps65910) == TPS65910) { + if (tps65910_chip_id(tps65910) == TPS65910) pmic->desc[i].ops = &tps65910_ops_vdd3; - pmic->desc[i].volt_table = info->voltage_table; - } else { + else pmic->desc[i].ops = &tps65910_ops_dcdc; - pmic->desc[i].ramp_delay = 5000; - } } else { - if (tps65910_chip_id(tps65910) == TPS65910) { + if (tps65910_chip_id(tps65910) == TPS65910) pmic->desc[i].ops = &tps65910_ops; - pmic->desc[i].volt_table = info->voltage_table; - } else { + else pmic->desc[i].ops = &tps65911_ops; - } } err = tps65910_set_ext_sleep_config(pmic, i, diff --git a/trunk/drivers/regulator/twl-regulator.c b/trunk/drivers/regulator/twl-regulator.c index 242fe90dc565..c7390711d954 100644 --- a/trunk/drivers/regulator/twl-regulator.c +++ b/trunk/drivers/regulator/twl-regulator.c @@ -43,6 +43,9 @@ struct twlreg_info { u8 table_len; const u16 *table; + /* regulator specific turn-on delay */ + u16 delay; + /* State REMAP default configuration */ u8 remap; @@ -220,6 +223,20 @@ static int twl6030reg_enable(struct regulator_dev *rdev) return ret; } +static int twl4030reg_enable_time(struct regulator_dev *rdev) +{ + struct twlreg_info *info = rdev_get_drvdata(rdev); + + return info->delay; +} + +static int twl6030reg_enable_time(struct regulator_dev *rdev) +{ + struct twlreg_info *info = rdev_get_drvdata(rdev); + + return info->delay; +} + static int twl4030reg_disable(struct regulator_dev *rdev) { struct twlreg_info *info = rdev_get_drvdata(rdev); @@ -491,6 +508,7 @@ static struct regulator_ops twl4030ldo_ops = { .enable = twl4030reg_enable, .disable = twl4030reg_disable, .is_enabled = twl4030reg_is_enabled, + .enable_time = twl4030reg_enable_time, .set_mode = twl4030reg_set_mode, @@ -559,53 +577,59 @@ static struct regulator_ops twl6030coresmps_ops = { .get_voltage = twl6030coresmps_get_voltage, }; -static int twl6030ldo_list_voltage(struct regulator_dev *rdev, unsigned sel) +static int twl6030ldo_list_voltage(struct regulator_dev *rdev, unsigned index) { - struct twlreg_info *info = rdev_get_drvdata(rdev); + struct twlreg_info *info = rdev_get_drvdata(rdev); - switch (sel) { - case 0: - return 0; - case 1 ... 24: - /* Linear mapping from 00000001 to 00011000: - * Absolute voltage value = 1.0 V + 0.1 V × (sel – 00000001) - */ - return (info->min_mV + 100 * (sel - 1)) * 1000; - case 25 ... 30: - return -EINVAL; - case 31: - return 2750000; - default: - return -EINVAL; - } + return ((info->min_mV + (index * 100)) * 1000); } static int -twl6030ldo_set_voltage_sel(struct regulator_dev *rdev, unsigned selector) +twl6030ldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV, + unsigned *selector) { struct twlreg_info *info = rdev_get_drvdata(rdev); + int vsel; + + if ((min_uV/1000 < info->min_mV) || (max_uV/1000 > info->max_mV)) + return -EDOM; + + /* + * Use the below formula to calculate vsel + * mV = 1000mv + 100mv * (vsel - 1) + */ + vsel = (min_uV/1000 - 1000)/100 + 1; + *selector = vsel; + return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE, vsel); - return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE, - selector); } -static int twl6030ldo_get_voltage_sel(struct regulator_dev *rdev) +static int twl6030ldo_get_voltage(struct regulator_dev *rdev) { struct twlreg_info *info = rdev_get_drvdata(rdev); - int vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE); + int vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER, + VREG_VOLTAGE); + + if (vsel < 0) + return vsel; - return vsel; + /* + * Use the below formula to calculate vsel + * mV = 1000mv + 100mv * (vsel - 1) + */ + return (1000 + (100 * (vsel - 1))) * 1000; } static struct regulator_ops twl6030ldo_ops = { .list_voltage = twl6030ldo_list_voltage, - .set_voltage_sel = twl6030ldo_set_voltage_sel, - .get_voltage_sel = twl6030ldo_get_voltage_sel, + .set_voltage = twl6030ldo_set_voltage, + .get_voltage = twl6030ldo_get_voltage, .enable = twl6030reg_enable, .disable = twl6030reg_disable, .is_enabled = twl6030reg_is_enabled, + .enable_time = twl6030reg_enable_time, .set_mode = twl6030reg_set_mode, @@ -639,6 +663,7 @@ static struct regulator_ops twl4030fixed_ops = { .enable = twl4030reg_enable, .disable = twl4030reg_disable, .is_enabled = twl4030reg_is_enabled, + .enable_time = twl4030reg_enable_time, .set_mode = twl4030reg_set_mode, @@ -653,6 +678,7 @@ static struct regulator_ops twl6030fixed_ops = { .enable = twl6030reg_enable, .disable = twl6030reg_disable, .is_enabled = twl6030reg_is_enabled, + .enable_time = twl6030reg_enable_time, .set_mode = twl6030reg_set_mode, @@ -663,6 +689,7 @@ static struct regulator_ops twl6030_fixed_resource = { .enable = twl6030reg_enable, .disable = twl6030reg_disable, .is_enabled = twl6030reg_is_enabled, + .enable_time = twl6030reg_enable_time, .get_status = twl6030reg_get_status, }; @@ -859,6 +886,7 @@ static struct regulator_ops twlsmps_ops = { .enable = twl6030reg_enable, .disable = twl6030reg_disable, .is_enabled = twl6030reg_is_enabled, + .enable_time = twl6030reg_enable_time, .set_mode = twl6030reg_set_mode, @@ -881,6 +909,7 @@ static struct twlreg_info TWL4030_INFO_##label = { \ .id = num, \ .table_len = ARRAY_SIZE(label##_VSEL_table), \ .table = label##_VSEL_table, \ + .delay = turnon_delay, \ .remap = remap_conf, \ .desc = { \ .name = #label, \ @@ -889,7 +918,6 @@ static struct twlreg_info TWL4030_INFO_##label = { \ .ops = &twl4030ldo_ops, \ .type = REGULATOR_VOLTAGE, \ .owner = THIS_MODULE, \ - .enable_time = turnon_delay, \ }, \ } @@ -897,6 +925,7 @@ static struct twlreg_info TWL4030_INFO_##label = { \ static struct twlreg_info TWL4030_INFO_##label = { \ .base = offset, \ .id = num, \ + .delay = turnon_delay, \ .remap = remap_conf, \ .desc = { \ .name = #label, \ @@ -904,7 +933,6 @@ static struct twlreg_info TWL4030_INFO_##label = { \ .ops = &twl4030smps_ops, \ .type = REGULATOR_VOLTAGE, \ .owner = THIS_MODULE, \ - .enable_time = turnon_delay, \ }, \ } @@ -927,7 +955,7 @@ static struct twlreg_info TWL6030_INFO_##label = { \ .desc = { \ .name = #label, \ .id = TWL6030_REG_##label, \ - .n_voltages = 32, \ + .n_voltages = (max_mVolts - min_mVolts)/100 + 1, \ .ops = &twl6030ldo_ops, \ .type = REGULATOR_VOLTAGE, \ .owner = THIS_MODULE, \ @@ -942,7 +970,7 @@ static struct twlreg_info TWL6025_INFO_##label = { \ .desc = { \ .name = #label, \ .id = TWL6025_REG_##label, \ - .n_voltages = 32, \ + .n_voltages = ((max_mVolts - min_mVolts)/100) + 1, \ .ops = &twl6030ldo_ops, \ .type = REGULATOR_VOLTAGE, \ .owner = THIS_MODULE, \ @@ -955,6 +983,7 @@ static struct twlreg_info TWLFIXED_INFO_##label = { \ .base = offset, \ .id = num, \ .min_mV = mVolts, \ + .delay = turnon_delay, \ .remap = remap_conf, \ .desc = { \ .name = #label, \ @@ -963,20 +992,19 @@ static struct twlreg_info TWLFIXED_INFO_##label = { \ .ops = &operations, \ .type = REGULATOR_VOLTAGE, \ .owner = THIS_MODULE, \ - .enable_time = turnon_delay, \ }, \ } #define TWL6030_FIXED_RESOURCE(label, offset, turnon_delay) \ static struct twlreg_info TWLRES_INFO_##label = { \ .base = offset, \ + .delay = turnon_delay, \ .desc = { \ .name = #label, \ .id = TWL6030_REG_##label, \ .ops = &twl6030_fixed_resource, \ .type = REGULATOR_VOLTAGE, \ .owner = THIS_MODULE, \ - .enable_time = turnon_delay, \ }, \ } @@ -1081,6 +1109,7 @@ static u8 twl_get_smps_mult(void) #define TWL6030_OF_MATCH(comp, label) TWL_OF_MATCH(comp, TWL6030, label) #define TWL6025_OF_MATCH(comp, label) TWL_OF_MATCH(comp, TWL6025, label) #define TWLFIXED_OF_MATCH(comp, label) TWL_OF_MATCH(comp, TWLFIXED, label) +#define TWLRES_OF_MATCH(comp, label) TWL_OF_MATCH(comp, TWLRES, label) #define TWLSMPS_OF_MATCH(comp, label) TWL_OF_MATCH(comp, TWLSMPS, label) static const struct of_device_id twl_of_match[] __devinitconst = { @@ -1128,6 +1157,7 @@ static const struct of_device_id twl_of_match[] __devinitconst = { TWLFIXED_OF_MATCH("ti,twl6030-vusb", VUSB), TWLFIXED_OF_MATCH("ti,twl6030-v1v8", V1V8), TWLFIXED_OF_MATCH("ti,twl6030-v2v1", V2V1), + TWLRES_OF_MATCH("ti,twl6030-clk32kg", CLK32KG), TWLSMPS_OF_MATCH("ti,twl6025-smps3", SMPS3), TWLSMPS_OF_MATCH("ti,twl6025-smps4", SMPS4), TWLSMPS_OF_MATCH("ti,twl6025-vio", VIO), diff --git a/trunk/drivers/regulator/wm831x-dcdc.c b/trunk/drivers/regulator/wm831x-dcdc.c index 7413885be01b..099da11e989f 100644 --- a/trunk/drivers/regulator/wm831x-dcdc.c +++ b/trunk/drivers/regulator/wm831x-dcdc.c @@ -215,8 +215,8 @@ static int wm831x_buckv_list_voltage(struct regulator_dev *rdev, return -EINVAL; } -static int wm831x_buckv_map_voltage(struct regulator_dev *rdev, - int min_uV, int max_uV) +static int wm831x_buckv_select_min_voltage(struct regulator_dev *rdev, + int min_uV, int max_uV) { u16 vsel; @@ -251,14 +251,20 @@ static int wm831x_buckv_set_dvs(struct regulator_dev *rdev, int state) return 0; } -static int wm831x_buckv_set_voltage_sel(struct regulator_dev *rdev, - unsigned vsel) +static int wm831x_buckv_set_voltage(struct regulator_dev *rdev, + int min_uV, int max_uV, unsigned *selector) { struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); struct wm831x *wm831x = dcdc->wm831x; int on_reg = dcdc->base + WM831X_DCDC_ON_CONFIG; int dvs_reg = dcdc->base + WM831X_DCDC_DVS_CONTROL; - int ret; + int vsel, ret; + + vsel = wm831x_buckv_select_min_voltage(rdev, min_uV, max_uV); + if (vsel < 0) + return vsel; + + *selector = vsel; /* If this value is already set then do a GPIO update if we can */ if (dcdc->dvs_gpio && dcdc->on_vsel == vsel) @@ -309,7 +315,7 @@ static int wm831x_buckv_set_suspend_voltage(struct regulator_dev *rdev, u16 reg = dcdc->base + WM831X_DCDC_SLEEP_CONTROL; int vsel; - vsel = wm831x_buckv_map_voltage(rdev, uV, uV); + vsel = wm831x_buckv_select_min_voltage(rdev, uV, uV); if (vsel < 0) return vsel; @@ -367,10 +373,9 @@ static int wm831x_buckv_get_current_limit(struct regulator_dev *rdev) } static struct regulator_ops wm831x_buckv_ops = { - .set_voltage_sel = wm831x_buckv_set_voltage_sel, + .set_voltage = wm831x_buckv_set_voltage, .get_voltage_sel = wm831x_buckv_get_voltage_sel, .list_voltage = wm831x_buckv_list_voltage, - .map_voltage = wm831x_buckv_map_voltage, .set_suspend_voltage = wm831x_buckv_set_suspend_voltage, .set_current_limit = wm831x_buckv_set_current_limit, .get_current_limit = wm831x_buckv_get_current_limit, @@ -594,25 +599,60 @@ static struct platform_driver wm831x_buckv_driver = { * BUCKP specifics */ -static int wm831x_buckp_set_suspend_voltage(struct regulator_dev *rdev, int uV) +static int wm831x_buckp_list_voltage(struct regulator_dev *rdev, + unsigned selector) +{ + if (selector <= WM831X_BUCKP_MAX_SELECTOR) + return 850000 + (selector * 25000); + else + return -EINVAL; +} + +static int wm831x_buckp_set_voltage_int(struct regulator_dev *rdev, int reg, + int min_uV, int max_uV, int *selector) { struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); struct wm831x *wm831x = dcdc->wm831x; - u16 reg = dcdc->base + WM831X_DCDC_SLEEP_CONTROL; - int sel; + u16 vsel; + + if (min_uV <= 34000000) + vsel = (min_uV - 850000) / 25000; + else + return -EINVAL; - sel = regulator_map_voltage_linear(rdev, uV, uV); - if (sel < 0) - return sel; + if (wm831x_buckp_list_voltage(rdev, vsel) > max_uV) + return -EINVAL; + + *selector = vsel; + + return wm831x_set_bits(wm831x, reg, WM831X_DC3_ON_VSEL_MASK, vsel); +} + +static int wm831x_buckp_set_voltage(struct regulator_dev *rdev, + int min_uV, int max_uV, + unsigned *selector) +{ + struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); + u16 reg = dcdc->base + WM831X_DCDC_ON_CONFIG; + + return wm831x_buckp_set_voltage_int(rdev, reg, min_uV, max_uV, + selector); +} + +static int wm831x_buckp_set_suspend_voltage(struct regulator_dev *rdev, + int uV) +{ + struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); + u16 reg = dcdc->base + WM831X_DCDC_SLEEP_CONTROL; + unsigned selector; - return wm831x_set_bits(wm831x, reg, WM831X_DC3_ON_VSEL_MASK, sel); + return wm831x_buckp_set_voltage_int(rdev, reg, uV, uV, &selector); } static struct regulator_ops wm831x_buckp_ops = { - .set_voltage_sel = regulator_set_voltage_sel_regmap, + .set_voltage = wm831x_buckp_set_voltage, .get_voltage_sel = regulator_get_voltage_sel_regmap, - .list_voltage = regulator_list_voltage_linear, - .map_voltage = regulator_map_voltage_linear, + .list_voltage = wm831x_buckp_list_voltage, .set_suspend_voltage = wm831x_buckp_set_suspend_voltage, .is_enabled = regulator_is_enabled_regmap, @@ -675,8 +715,6 @@ static __devinit int wm831x_buckp_probe(struct platform_device *pdev) dcdc->desc.vsel_mask = WM831X_DC3_ON_VSEL_MASK; dcdc->desc.enable_reg = WM831X_DCDC_ENABLE; dcdc->desc.enable_mask = 1 << id; - dcdc->desc.min_uV = 850000; - dcdc->desc.uV_step = 25000; config.dev = pdev->dev.parent; if (pdata) diff --git a/trunk/drivers/regulator/wm831x-ldo.c b/trunk/drivers/regulator/wm831x-ldo.c index 5cb70ca1e98d..a9a28d8ac185 100644 --- a/trunk/drivers/regulator/wm831x-ldo.c +++ b/trunk/drivers/regulator/wm831x-ldo.c @@ -78,10 +78,13 @@ static int wm831x_gp_ldo_list_voltage(struct regulator_dev *rdev, return -EINVAL; } -static int wm831x_gp_ldo_map_voltage(struct regulator_dev *rdev, - int min_uV, int max_uV) +static int wm831x_gp_ldo_set_voltage_int(struct regulator_dev *rdev, int reg, + int min_uV, int max_uV, + unsigned *selector) { - int volt, vsel; + struct wm831x_ldo *ldo = rdev_get_drvdata(rdev); + struct wm831x *wm831x = ldo->wm831x; + int vsel, ret; if (min_uV < 900000) vsel = 0; @@ -91,25 +94,36 @@ static int wm831x_gp_ldo_map_voltage(struct regulator_dev *rdev, vsel = ((min_uV - 1700000) / 100000) + WM831X_GP_LDO_SELECTOR_LOW + 1; - volt = wm831x_gp_ldo_list_voltage(rdev, vsel); - if (volt < min_uV || volt > max_uV) + ret = wm831x_gp_ldo_list_voltage(rdev, vsel); + if (ret < 0) + return ret; + if (ret < min_uV || ret > max_uV) return -EINVAL; - return vsel; + *selector = vsel; + + return wm831x_set_bits(wm831x, reg, WM831X_LDO1_ON_VSEL_MASK, vsel); +} + +static int wm831x_gp_ldo_set_voltage(struct regulator_dev *rdev, + int min_uV, int max_uV, + unsigned *selector) +{ + struct wm831x_ldo *ldo = rdev_get_drvdata(rdev); + int reg = ldo->base + WM831X_LDO_ON_CONTROL; + + return wm831x_gp_ldo_set_voltage_int(rdev, reg, min_uV, max_uV, + selector); } static int wm831x_gp_ldo_set_suspend_voltage(struct regulator_dev *rdev, int uV) { struct wm831x_ldo *ldo = rdev_get_drvdata(rdev); - struct wm831x *wm831x = ldo->wm831x; - int sel, reg = ldo->base + WM831X_LDO_SLEEP_CONTROL; + int reg = ldo->base + WM831X_LDO_SLEEP_CONTROL; + unsigned int selector; - sel = wm831x_gp_ldo_map_voltage(rdev, uV, uV); - if (sel < 0) - return sel; - - return wm831x_set_bits(wm831x, reg, WM831X_LDO1_ON_VSEL_MASK, sel); + return wm831x_gp_ldo_set_voltage_int(rdev, reg, uV, uV, &selector); } static unsigned int wm831x_gp_ldo_get_mode(struct regulator_dev *rdev) @@ -229,9 +243,8 @@ static unsigned int wm831x_gp_ldo_get_optimum_mode(struct regulator_dev *rdev, static struct regulator_ops wm831x_gp_ldo_ops = { .list_voltage = wm831x_gp_ldo_list_voltage, - .map_voltage = wm831x_gp_ldo_map_voltage, .get_voltage_sel = regulator_get_voltage_sel_regmap, - .set_voltage_sel = regulator_set_voltage_sel_regmap, + .set_voltage = wm831x_gp_ldo_set_voltage, .set_suspend_voltage = wm831x_gp_ldo_set_suspend_voltage, .get_mode = wm831x_gp_ldo_get_mode, .set_mode = wm831x_gp_ldo_set_mode, @@ -371,10 +384,13 @@ static int wm831x_aldo_list_voltage(struct regulator_dev *rdev, return -EINVAL; } -static int wm831x_aldo_map_voltage(struct regulator_dev *rdev, - int min_uV, int max_uV) +static int wm831x_aldo_set_voltage_int(struct regulator_dev *rdev, int reg, + int min_uV, int max_uV, + unsigned *selector) { - int volt, vsel; + struct wm831x_ldo *ldo = rdev_get_drvdata(rdev); + struct wm831x *wm831x = ldo->wm831x; + int vsel, ret; if (min_uV < 1000000) vsel = 0; @@ -384,26 +400,35 @@ static int wm831x_aldo_map_voltage(struct regulator_dev *rdev, vsel = ((min_uV - 1700000) / 100000) + WM831X_ALDO_SELECTOR_LOW + 1; - volt = wm831x_aldo_list_voltage(rdev, vsel); - if (volt < min_uV || volt > max_uV) + ret = wm831x_aldo_list_voltage(rdev, vsel); + if (ret < 0) + return ret; + if (ret < min_uV || ret > max_uV) return -EINVAL; - return vsel; + *selector = vsel; + + return wm831x_set_bits(wm831x, reg, WM831X_LDO7_ON_VSEL_MASK, vsel); +} + +static int wm831x_aldo_set_voltage(struct regulator_dev *rdev, + int min_uV, int max_uV, unsigned *selector) +{ + struct wm831x_ldo *ldo = rdev_get_drvdata(rdev); + int reg = ldo->base + WM831X_LDO_ON_CONTROL; + return wm831x_aldo_set_voltage_int(rdev, reg, min_uV, max_uV, + selector); } static int wm831x_aldo_set_suspend_voltage(struct regulator_dev *rdev, int uV) { struct wm831x_ldo *ldo = rdev_get_drvdata(rdev); - struct wm831x *wm831x = ldo->wm831x; - int sel, reg = ldo->base + WM831X_LDO_SLEEP_CONTROL; - - sel = wm831x_aldo_map_voltage(rdev, uV, uV); - if (sel < 0) - return sel; + int reg = ldo->base + WM831X_LDO_SLEEP_CONTROL; + unsigned int selector; - return wm831x_set_bits(wm831x, reg, WM831X_LDO7_ON_VSEL_MASK, sel); + return wm831x_aldo_set_voltage_int(rdev, reg, uV, uV, &selector); } static unsigned int wm831x_aldo_get_mode(struct regulator_dev *rdev) @@ -481,9 +506,8 @@ static int wm831x_aldo_get_status(struct regulator_dev *rdev) static struct regulator_ops wm831x_aldo_ops = { .list_voltage = wm831x_aldo_list_voltage, - .map_voltage = wm831x_aldo_map_voltage, .get_voltage_sel = regulator_get_voltage_sel_regmap, - .set_voltage_sel = regulator_set_voltage_sel_regmap, + .set_voltage = wm831x_aldo_set_voltage, .set_suspend_voltage = wm831x_aldo_set_suspend_voltage, .get_mode = wm831x_aldo_get_mode, .set_mode = wm831x_aldo_set_mode, @@ -604,18 +628,47 @@ static struct platform_driver wm831x_aldo_driver = { #define WM831X_ALIVE_LDO_MAX_SELECTOR 0xf -static int wm831x_alive_ldo_set_suspend_voltage(struct regulator_dev *rdev, - int uV) +static int wm831x_alive_ldo_set_voltage_int(struct regulator_dev *rdev, + int reg, + int min_uV, int max_uV, + unsigned *selector) { struct wm831x_ldo *ldo = rdev_get_drvdata(rdev); struct wm831x *wm831x = ldo->wm831x; - int sel, reg = ldo->base + WM831X_ALIVE_LDO_SLEEP_CONTROL; + int vsel, ret; + + vsel = (min_uV - 800000) / 50000; - sel = regulator_map_voltage_linear(rdev, uV, uV); - if (sel < 0) - return sel; + ret = regulator_list_voltage_linear(rdev, vsel); + if (ret < 0) + return ret; + if (ret < min_uV || ret > max_uV) + return -EINVAL; + + *selector = vsel; + + return wm831x_set_bits(wm831x, reg, WM831X_LDO11_ON_VSEL_MASK, vsel); +} + +static int wm831x_alive_ldo_set_voltage(struct regulator_dev *rdev, + int min_uV, int max_uV, + unsigned *selector) +{ + struct wm831x_ldo *ldo = rdev_get_drvdata(rdev); + int reg = ldo->base + WM831X_ALIVE_LDO_ON_CONTROL; + + return wm831x_alive_ldo_set_voltage_int(rdev, reg, min_uV, max_uV, + selector); +} + +static int wm831x_alive_ldo_set_suspend_voltage(struct regulator_dev *rdev, + int uV) +{ + struct wm831x_ldo *ldo = rdev_get_drvdata(rdev); + int reg = ldo->base + WM831X_ALIVE_LDO_SLEEP_CONTROL; + unsigned selector; - return wm831x_set_bits(wm831x, reg, WM831X_LDO11_ON_VSEL_MASK, sel); + return wm831x_alive_ldo_set_voltage_int(rdev, reg, uV, uV, &selector); } static int wm831x_alive_ldo_get_status(struct regulator_dev *rdev) @@ -637,9 +690,8 @@ static int wm831x_alive_ldo_get_status(struct regulator_dev *rdev) static struct regulator_ops wm831x_alive_ldo_ops = { .list_voltage = regulator_list_voltage_linear, - .map_voltage = regulator_map_voltage_linear, .get_voltage_sel = regulator_get_voltage_sel_regmap, - .set_voltage_sel = regulator_set_voltage_sel_regmap, + .set_voltage = wm831x_alive_ldo_set_voltage, .set_suspend_voltage = wm831x_alive_ldo_set_suspend_voltage, .get_status = wm831x_alive_ldo_get_status, @@ -701,7 +753,6 @@ static __devinit int wm831x_alive_ldo_probe(struct platform_device *pdev) ldo->desc.enable_mask = 1 << id; ldo->desc.min_uV = 800000; ldo->desc.uV_step = 50000; - ldo->desc.enable_time = 1000; config.dev = pdev->dev.parent; if (pdata) diff --git a/trunk/drivers/regulator/wm8350-regulator.c b/trunk/drivers/regulator/wm8350-regulator.c index 7f0fa22ef2aa..94e550dc70b6 100644 --- a/trunk/drivers/regulator/wm8350-regulator.c +++ b/trunk/drivers/regulator/wm8350-regulator.c @@ -108,6 +108,33 @@ static int get_isink_val(int min_uA, int max_uA, u16 *setting) return -EINVAL; } +static inline int wm8350_ldo_val_to_mvolts(unsigned int val) +{ + if (val < 16) + return (val * 50) + 900; + else + return ((val - 16) * 100) + 1800; + +} + +static inline unsigned int wm8350_ldo_mvolts_to_val(int mV) +{ + if (mV < 1800) + return (mV - 900) / 50; + else + return ((mV - 1800) / 100) + 16; +} + +static inline int wm8350_dcdc_val_to_mvolts(unsigned int val) +{ + return (val * 25) + 850; +} + +static inline unsigned int wm8350_dcdc_mvolts_to_val(int mV) +{ + return (mV - 850) / 25; +} + static int wm8350_isink_set_current(struct regulator_dev *rdev, int min_uA, int max_uA) { @@ -332,13 +359,104 @@ int wm8350_isink_set_flash(struct wm8350 *wm8350, int isink, u16 mode, } EXPORT_SYMBOL_GPL(wm8350_isink_set_flash); +static int wm8350_dcdc_set_voltage(struct regulator_dev *rdev, int min_uV, + int max_uV, unsigned *selector) +{ + struct wm8350 *wm8350 = rdev_get_drvdata(rdev); + int volt_reg, dcdc = rdev_get_id(rdev), mV, + min_mV = min_uV / 1000, max_mV = max_uV / 1000; + u16 val; + + if (min_mV < 850 || min_mV > 4025) + return -EINVAL; + if (max_mV < 850 || max_mV > 4025) + return -EINVAL; + + /* step size is 25mV */ + mV = (min_mV - 826) / 25; + if (wm8350_dcdc_val_to_mvolts(mV) > max_mV) + return -EINVAL; + BUG_ON(wm8350_dcdc_val_to_mvolts(mV) < min_mV); + + switch (dcdc) { + case WM8350_DCDC_1: + volt_reg = WM8350_DCDC1_CONTROL; + break; + case WM8350_DCDC_3: + volt_reg = WM8350_DCDC3_CONTROL; + break; + case WM8350_DCDC_4: + volt_reg = WM8350_DCDC4_CONTROL; + break; + case WM8350_DCDC_6: + volt_reg = WM8350_DCDC6_CONTROL; + break; + case WM8350_DCDC_2: + case WM8350_DCDC_5: + default: + return -EINVAL; + } + + *selector = mV; + + /* all DCDCs have same mV bits */ + val = wm8350_reg_read(wm8350, volt_reg) & ~WM8350_DC1_VSEL_MASK; + wm8350_reg_write(wm8350, volt_reg, val | mV); + return 0; +} + +static int wm8350_dcdc_get_voltage_sel(struct regulator_dev *rdev) +{ + struct wm8350 *wm8350 = rdev_get_drvdata(rdev); + int volt_reg, dcdc = rdev_get_id(rdev); + + switch (dcdc) { + case WM8350_DCDC_1: + volt_reg = WM8350_DCDC1_CONTROL; + break; + case WM8350_DCDC_3: + volt_reg = WM8350_DCDC3_CONTROL; + break; + case WM8350_DCDC_4: + volt_reg = WM8350_DCDC4_CONTROL; + break; + case WM8350_DCDC_6: + volt_reg = WM8350_DCDC6_CONTROL; + break; + case WM8350_DCDC_2: + case WM8350_DCDC_5: + default: + return -EINVAL; + } + + /* all DCDCs have same mV bits */ + return wm8350_reg_read(wm8350, volt_reg) & WM8350_DC1_VSEL_MASK; +} + +static int wm8350_dcdc_list_voltage(struct regulator_dev *rdev, + unsigned selector) +{ + if (selector > WM8350_DCDC_MAX_VSEL) + return -EINVAL; + return wm8350_dcdc_val_to_mvolts(selector) * 1000; +} + static int wm8350_dcdc_set_suspend_voltage(struct regulator_dev *rdev, int uV) { struct wm8350 *wm8350 = rdev_get_drvdata(rdev); - int sel, volt_reg, dcdc = rdev_get_id(rdev); + int volt_reg, mV = uV / 1000, dcdc = rdev_get_id(rdev); u16 val; - dev_dbg(wm8350->dev, "%s %d mV %d\n", __func__, dcdc, uV / 1000); + dev_dbg(wm8350->dev, "%s %d mV %d\n", __func__, dcdc, mV); + + if (mV && (mV < 850 || mV > 4025)) { + dev_err(wm8350->dev, + "DCDC%d suspend voltage %d mV out of range\n", + dcdc, mV); + return -EINVAL; + } + if (mV == 0) + mV = 850; switch (dcdc) { case WM8350_DCDC_1: @@ -359,13 +477,10 @@ static int wm8350_dcdc_set_suspend_voltage(struct regulator_dev *rdev, int uV) return -EINVAL; } - sel = regulator_map_voltage_linear(rdev, uV, uV); - if (sel < 0) - return -EINVAL; - /* all DCDCs have same mV bits */ val = wm8350_reg_read(wm8350, volt_reg) & ~WM8350_DC1_VSEL_MASK; - wm8350_reg_write(wm8350, volt_reg, val | sel); + wm8350_reg_write(wm8350, volt_reg, + val | wm8350_dcdc_mvolts_to_val(mV)); return 0; } @@ -542,49 +657,19 @@ static int wm8350_dcdc_set_suspend_mode(struct regulator_dev *rdev, return 0; } -static int wm8350_ldo_list_voltage(struct regulator_dev *rdev, - unsigned selector) -{ - if (selector > WM8350_LDO1_VSEL_MASK) - return -EINVAL; - - if (selector < 16) - return (selector * 50000) + 900000; - else - return ((selector - 16) * 100000) + 1800000; -} - -static int wm8350_ldo_map_voltage(struct regulator_dev *rdev, int min_uV, - int max_uV) -{ - int volt, sel; - int min_mV = min_uV / 1000; - int max_mV = max_uV / 1000; - - if (min_mV < 900 || min_mV > 3300) - return -EINVAL; - if (max_mV < 900 || max_mV > 3300) - return -EINVAL; - - if (min_mV < 1800) /* step size is 50mV < 1800mV */ - sel = DIV_ROUND_UP(min_uV - 900, 50); - else /* step size is 100mV > 1800mV */ - sel = DIV_ROUND_UP(min_uV - 1800, 100) + 16; - - volt = wm8350_ldo_list_voltage(rdev, sel); - if (volt < min_uV || volt > max_uV) - return -EINVAL; - - return sel; -} - static int wm8350_ldo_set_suspend_voltage(struct regulator_dev *rdev, int uV) { struct wm8350 *wm8350 = rdev_get_drvdata(rdev); - int sel, volt_reg, ldo = rdev_get_id(rdev); + int volt_reg, mV = uV / 1000, ldo = rdev_get_id(rdev); u16 val; - dev_dbg(wm8350->dev, "%s %d mV %d\n", __func__, ldo, uV / 1000); + dev_dbg(wm8350->dev, "%s %d mV %d\n", __func__, ldo, mV); + + if (mV < 900 || mV > 3300) { + dev_err(wm8350->dev, "LDO%d voltage %d mV out of range\n", + ldo, mV); + return -EINVAL; + } switch (ldo) { case WM8350_LDO_1: @@ -603,13 +688,10 @@ static int wm8350_ldo_set_suspend_voltage(struct regulator_dev *rdev, int uV) return -EINVAL; } - sel = wm8350_ldo_map_voltage(rdev, uV, uV); - if (sel < 0) - return -EINVAL; - /* all LDOs have same mV bits */ val = wm8350_reg_read(wm8350, volt_reg) & ~WM8350_LDO1_VSEL_MASK; - wm8350_reg_write(wm8350, volt_reg, val | sel); + wm8350_reg_write(wm8350, volt_reg, + val | wm8350_ldo_mvolts_to_val(mV)); return 0; } @@ -671,6 +753,92 @@ static int wm8350_ldo_set_suspend_disable(struct regulator_dev *rdev) return 0; } +static int wm8350_ldo_set_voltage(struct regulator_dev *rdev, int min_uV, + int max_uV, unsigned *selector) +{ + struct wm8350 *wm8350 = rdev_get_drvdata(rdev); + int volt_reg, ldo = rdev_get_id(rdev), mV, min_mV = min_uV / 1000, + max_mV = max_uV / 1000; + u16 val; + + if (min_mV < 900 || min_mV > 3300) + return -EINVAL; + if (max_mV < 900 || max_mV > 3300) + return -EINVAL; + + if (min_mV < 1800) { + /* step size is 50mV < 1800mV */ + mV = (min_mV - 851) / 50; + if (wm8350_ldo_val_to_mvolts(mV) > max_mV) + return -EINVAL; + BUG_ON(wm8350_ldo_val_to_mvolts(mV) < min_mV); + } else { + /* step size is 100mV > 1800mV */ + mV = ((min_mV - 1701) / 100) + 16; + if (wm8350_ldo_val_to_mvolts(mV) > max_mV) + return -EINVAL; + BUG_ON(wm8350_ldo_val_to_mvolts(mV) < min_mV); + } + + switch (ldo) { + case WM8350_LDO_1: + volt_reg = WM8350_LDO1_CONTROL; + break; + case WM8350_LDO_2: + volt_reg = WM8350_LDO2_CONTROL; + break; + case WM8350_LDO_3: + volt_reg = WM8350_LDO3_CONTROL; + break; + case WM8350_LDO_4: + volt_reg = WM8350_LDO4_CONTROL; + break; + default: + return -EINVAL; + } + + *selector = mV; + + /* all LDOs have same mV bits */ + val = wm8350_reg_read(wm8350, volt_reg) & ~WM8350_LDO1_VSEL_MASK; + wm8350_reg_write(wm8350, volt_reg, val | mV); + return 0; +} + +static int wm8350_ldo_get_voltage_sel(struct regulator_dev *rdev) +{ + struct wm8350 *wm8350 = rdev_get_drvdata(rdev); + int volt_reg, ldo = rdev_get_id(rdev); + + switch (ldo) { + case WM8350_LDO_1: + volt_reg = WM8350_LDO1_CONTROL; + break; + case WM8350_LDO_2: + volt_reg = WM8350_LDO2_CONTROL; + break; + case WM8350_LDO_3: + volt_reg = WM8350_LDO3_CONTROL; + break; + case WM8350_LDO_4: + volt_reg = WM8350_LDO4_CONTROL; + break; + default: + return -EINVAL; + } + + /* all LDOs have same mV bits */ + return wm8350_reg_read(wm8350, volt_reg) & WM8350_LDO1_VSEL_MASK; +} + +static int wm8350_ldo_list_voltage(struct regulator_dev *rdev, + unsigned selector) +{ + if (selector > WM8350_LDO1_VSEL_MASK) + return -EINVAL; + return wm8350_ldo_val_to_mvolts(selector) * 1000; +} + int wm8350_dcdc_set_slot(struct wm8350 *wm8350, int dcdc, u16 start, u16 stop, u16 fault) { @@ -791,6 +959,63 @@ int wm8350_dcdc25_set_mode(struct wm8350 *wm8350, int dcdc, u16 mode, } EXPORT_SYMBOL_GPL(wm8350_dcdc25_set_mode); +static int wm8350_dcdc_enable(struct regulator_dev *rdev) +{ + struct wm8350 *wm8350 = rdev_get_drvdata(rdev); + int dcdc = rdev_get_id(rdev); + u16 shift; + + if (dcdc < WM8350_DCDC_1 || dcdc > WM8350_DCDC_6) + return -EINVAL; + + shift = dcdc - WM8350_DCDC_1; + wm8350_set_bits(wm8350, WM8350_DCDC_LDO_REQUESTED, 1 << shift); + return 0; +} + +static int wm8350_dcdc_disable(struct regulator_dev *rdev) +{ + struct wm8350 *wm8350 = rdev_get_drvdata(rdev); + int dcdc = rdev_get_id(rdev); + u16 shift; + + if (dcdc < WM8350_DCDC_1 || dcdc > WM8350_DCDC_6) + return -EINVAL; + + shift = dcdc - WM8350_DCDC_1; + wm8350_clear_bits(wm8350, WM8350_DCDC_LDO_REQUESTED, 1 << shift); + + return 0; +} + +static int wm8350_ldo_enable(struct regulator_dev *rdev) +{ + struct wm8350 *wm8350 = rdev_get_drvdata(rdev); + int ldo = rdev_get_id(rdev); + u16 shift; + + if (ldo < WM8350_LDO_1 || ldo > WM8350_LDO_4) + return -EINVAL; + + shift = (ldo - WM8350_LDO_1) + 8; + wm8350_set_bits(wm8350, WM8350_DCDC_LDO_REQUESTED, 1 << shift); + return 0; +} + +static int wm8350_ldo_disable(struct regulator_dev *rdev) +{ + struct wm8350 *wm8350 = rdev_get_drvdata(rdev); + int ldo = rdev_get_id(rdev); + u16 shift; + + if (ldo < WM8350_LDO_1 || ldo > WM8350_LDO_4) + return -EINVAL; + + shift = (ldo - WM8350_LDO_1) + 8; + wm8350_clear_bits(wm8350, WM8350_DCDC_LDO_REQUESTED, 1 << shift); + return 0; +} + static int force_continuous_enable(struct wm8350 *wm8350, int dcdc, int enable) { int reg = 0, ret; @@ -972,17 +1197,42 @@ static unsigned int wm8350_dcdc_get_optimum_mode(struct regulator_dev *rdev, return mode; } +static int wm8350_dcdc_is_enabled(struct regulator_dev *rdev) +{ + struct wm8350 *wm8350 = rdev_get_drvdata(rdev); + int dcdc = rdev_get_id(rdev), shift; + + if (dcdc < WM8350_DCDC_1 || dcdc > WM8350_DCDC_6) + return -EINVAL; + + shift = dcdc - WM8350_DCDC_1; + return wm8350_reg_read(wm8350, WM8350_DCDC_LDO_REQUESTED) + & (1 << shift); +} + +static int wm8350_ldo_is_enabled(struct regulator_dev *rdev) +{ + struct wm8350 *wm8350 = rdev_get_drvdata(rdev); + int ldo = rdev_get_id(rdev), shift; + + if (ldo < WM8350_LDO_1 || ldo > WM8350_LDO_4) + return -EINVAL; + + shift = (ldo - WM8350_LDO_1) + 8; + return wm8350_reg_read(wm8350, WM8350_DCDC_LDO_REQUESTED) + & (1 << shift); +} + static struct regulator_ops wm8350_dcdc_ops = { - .set_voltage_sel = regulator_set_voltage_sel_regmap, - .get_voltage_sel = regulator_get_voltage_sel_regmap, - .list_voltage = regulator_list_voltage_linear, - .map_voltage = regulator_map_voltage_linear, - .enable = regulator_enable_regmap, - .disable = regulator_disable_regmap, - .is_enabled = regulator_is_enabled_regmap, + .set_voltage = wm8350_dcdc_set_voltage, + .get_voltage_sel = wm8350_dcdc_get_voltage_sel, + .list_voltage = wm8350_dcdc_list_voltage, + .enable = wm8350_dcdc_enable, + .disable = wm8350_dcdc_disable, .get_mode = wm8350_dcdc_get_mode, .set_mode = wm8350_dcdc_set_mode, .get_optimum_mode = wm8350_dcdc_get_optimum_mode, + .is_enabled = wm8350_dcdc_is_enabled, .set_suspend_voltage = wm8350_dcdc_set_suspend_voltage, .set_suspend_enable = wm8350_dcdc_set_suspend_enable, .set_suspend_disable = wm8350_dcdc_set_suspend_disable, @@ -990,21 +1240,20 @@ static struct regulator_ops wm8350_dcdc_ops = { }; static struct regulator_ops wm8350_dcdc2_5_ops = { - .enable = regulator_enable_regmap, - .disable = regulator_disable_regmap, - .is_enabled = regulator_is_enabled_regmap, + .enable = wm8350_dcdc_enable, + .disable = wm8350_dcdc_disable, + .is_enabled = wm8350_dcdc_is_enabled, .set_suspend_enable = wm8350_dcdc25_set_suspend_enable, .set_suspend_disable = wm8350_dcdc25_set_suspend_disable, }; static struct regulator_ops wm8350_ldo_ops = { - .map_voltage = wm8350_ldo_map_voltage, - .set_voltage_sel = regulator_set_voltage_sel_regmap, - .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage = wm8350_ldo_set_voltage, + .get_voltage_sel = wm8350_ldo_get_voltage_sel, .list_voltage = wm8350_ldo_list_voltage, - .enable = regulator_enable_regmap, - .disable = regulator_disable_regmap, - .is_enabled = regulator_is_enabled_regmap, + .enable = wm8350_ldo_enable, + .disable = wm8350_ldo_disable, + .is_enabled = wm8350_ldo_is_enabled, .get_mode = wm8350_ldo_get_mode, .set_suspend_voltage = wm8350_ldo_set_suspend_voltage, .set_suspend_enable = wm8350_ldo_set_suspend_enable, @@ -1028,12 +1277,6 @@ static const struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = { .irq = WM8350_IRQ_UV_DC1, .type = REGULATOR_VOLTAGE, .n_voltages = WM8350_DCDC_MAX_VSEL + 1, - .min_uV = 850000, - .uV_step = 25000, - .vsel_reg = WM8350_DCDC1_CONTROL, - .vsel_mask = WM8350_DC1_VSEL_MASK, - .enable_reg = WM8350_DCDC_LDO_REQUESTED, - .enable_mask = WM8350_DC1_ENA, .owner = THIS_MODULE, }, { @@ -1042,8 +1285,6 @@ static const struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = { .ops = &wm8350_dcdc2_5_ops, .irq = WM8350_IRQ_UV_DC2, .type = REGULATOR_VOLTAGE, - .enable_reg = WM8350_DCDC_LDO_REQUESTED, - .enable_mask = WM8350_DC2_ENA, .owner = THIS_MODULE, }, { @@ -1053,12 +1294,6 @@ static const struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = { .irq = WM8350_IRQ_UV_DC3, .type = REGULATOR_VOLTAGE, .n_voltages = WM8350_DCDC_MAX_VSEL + 1, - .min_uV = 850000, - .uV_step = 25000, - .vsel_reg = WM8350_DCDC3_CONTROL, - .vsel_mask = WM8350_DC3_VSEL_MASK, - .enable_reg = WM8350_DCDC_LDO_REQUESTED, - .enable_mask = WM8350_DC3_ENA, .owner = THIS_MODULE, }, { @@ -1068,12 +1303,6 @@ static const struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = { .irq = WM8350_IRQ_UV_DC4, .type = REGULATOR_VOLTAGE, .n_voltages = WM8350_DCDC_MAX_VSEL + 1, - .min_uV = 850000, - .uV_step = 25000, - .vsel_reg = WM8350_DCDC4_CONTROL, - .vsel_mask = WM8350_DC4_VSEL_MASK, - .enable_reg = WM8350_DCDC_LDO_REQUESTED, - .enable_mask = WM8350_DC4_ENA, .owner = THIS_MODULE, }, { @@ -1082,8 +1311,6 @@ static const struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = { .ops = &wm8350_dcdc2_5_ops, .irq = WM8350_IRQ_UV_DC5, .type = REGULATOR_VOLTAGE, - .enable_reg = WM8350_DCDC_LDO_REQUESTED, - .enable_mask = WM8350_DC5_ENA, .owner = THIS_MODULE, }, { @@ -1093,12 +1320,6 @@ static const struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = { .irq = WM8350_IRQ_UV_DC6, .type = REGULATOR_VOLTAGE, .n_voltages = WM8350_DCDC_MAX_VSEL + 1, - .min_uV = 850000, - .uV_step = 25000, - .vsel_reg = WM8350_DCDC6_CONTROL, - .vsel_mask = WM8350_DC6_VSEL_MASK, - .enable_reg = WM8350_DCDC_LDO_REQUESTED, - .enable_mask = WM8350_DC6_ENA, .owner = THIS_MODULE, }, { @@ -1108,10 +1329,6 @@ static const struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = { .irq = WM8350_IRQ_UV_LDO1, .type = REGULATOR_VOLTAGE, .n_voltages = WM8350_LDO1_VSEL_MASK + 1, - .vsel_reg = WM8350_LDO1_CONTROL, - .vsel_mask = WM8350_LDO1_VSEL_MASK, - .enable_reg = WM8350_DCDC_LDO_REQUESTED, - .enable_mask = WM8350_LDO1_ENA, .owner = THIS_MODULE, }, { @@ -1121,10 +1338,6 @@ static const struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = { .irq = WM8350_IRQ_UV_LDO2, .type = REGULATOR_VOLTAGE, .n_voltages = WM8350_LDO2_VSEL_MASK + 1, - .vsel_reg = WM8350_LDO2_CONTROL, - .vsel_mask = WM8350_LDO2_VSEL_MASK, - .enable_reg = WM8350_DCDC_LDO_REQUESTED, - .enable_mask = WM8350_LDO2_ENA, .owner = THIS_MODULE, }, { @@ -1134,10 +1347,6 @@ static const struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = { .irq = WM8350_IRQ_UV_LDO3, .type = REGULATOR_VOLTAGE, .n_voltages = WM8350_LDO3_VSEL_MASK + 1, - .vsel_reg = WM8350_LDO3_CONTROL, - .vsel_mask = WM8350_LDO3_VSEL_MASK, - .enable_reg = WM8350_DCDC_LDO_REQUESTED, - .enable_mask = WM8350_LDO3_ENA, .owner = THIS_MODULE, }, { @@ -1147,10 +1356,6 @@ static const struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = { .irq = WM8350_IRQ_UV_LDO4, .type = REGULATOR_VOLTAGE, .n_voltages = WM8350_LDO4_VSEL_MASK + 1, - .vsel_reg = WM8350_LDO4_CONTROL, - .vsel_mask = WM8350_LDO4_VSEL_MASK, - .enable_reg = WM8350_DCDC_LDO_REQUESTED, - .enable_mask = WM8350_LDO4_ENA, .owner = THIS_MODULE, }, { @@ -1224,7 +1429,6 @@ static int wm8350_regulator_probe(struct platform_device *pdev) config.dev = &pdev->dev; config.init_data = pdev->dev.platform_data; config.driver_data = dev_get_drvdata(&pdev->dev); - config.regmap = wm8350->regmap; /* register regulator */ rdev = regulator_register(&wm8350_reg[pdev->id], &config); diff --git a/trunk/drivers/regulator/wm8400-regulator.c b/trunk/drivers/regulator/wm8400-regulator.c index 9035dd053611..69a2b7ce5e4a 100644 --- a/trunk/drivers/regulator/wm8400-regulator.c +++ b/trunk/drivers/regulator/wm8400-regulator.c @@ -28,26 +28,34 @@ static int wm8400_ldo_list_voltage(struct regulator_dev *dev, if (selector < 15) return 900000 + (selector * 50000); else - return 1700000 + ((selector - 15) * 100000); + return 1600000 + ((selector - 14) * 100000); } static int wm8400_ldo_map_voltage(struct regulator_dev *dev, int min_uV, int max_uV) { u16 val; - int volt; if (min_uV < 900000 || min_uV > 3300000) return -EINVAL; - if (min_uV < 1700000) /* Steps of 50mV from 900mV; */ + if (min_uV < 1700000) { + /* Steps of 50mV from 900mV; */ val = DIV_ROUND_UP(min_uV - 900000, 50000); - else /* Steps of 100mV from 1700mV */ - val = DIV_ROUND_UP(min_uV - 1700000, 100000) + 15; - volt = wm8400_ldo_list_voltage(dev, val); - if (volt < min_uV || volt > max_uV) - return -EINVAL; + if ((val * 50000) + 900000 > max_uV) + return -EINVAL; + BUG_ON((val * 50000) + 900000 < min_uV); + } else { + /* Steps of 100mV from 1700mV */ + val = DIV_ROUND_UP(min_uV - 1700000, 100000); + + if ((val * 100000) + 1700000 > max_uV) + return -EINVAL; + BUG_ON((val * 100000) + 1700000 < min_uV); + + val += 0xf; + } return val; } @@ -144,7 +152,6 @@ static struct regulator_ops wm8400_dcdc_ops = { .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, .list_voltage = regulator_list_voltage_linear, - .map_voltage = regulator_map_voltage_linear, .get_voltage_sel = regulator_get_voltage_sel_regmap, .set_voltage_sel = regulator_set_voltage_sel_regmap, .get_mode = wm8400_dcdc_get_mode, diff --git a/trunk/drivers/regulator/wm8994-regulator.c b/trunk/drivers/regulator/wm8994-regulator.c index 86bb48db149e..9a994316e63c 100644 --- a/trunk/drivers/regulator/wm8994-regulator.c +++ b/trunk/drivers/regulator/wm8994-regulator.c @@ -26,6 +26,8 @@ #include struct wm8994_ldo { + int enable; + bool is_enabled; struct regulator_dev *regulator; struct wm8994 *wm8994; }; @@ -33,9 +35,64 @@ struct wm8994_ldo { #define WM8994_LDO1_MAX_SELECTOR 0x7 #define WM8994_LDO2_MAX_SELECTOR 0x3 +static int wm8994_ldo_enable(struct regulator_dev *rdev) +{ + struct wm8994_ldo *ldo = rdev_get_drvdata(rdev); + + /* If we have no soft control assume that the LDO is always enabled. */ + if (!ldo->enable) + return 0; + + gpio_set_value_cansleep(ldo->enable, 1); + ldo->is_enabled = true; + + return 0; +} + +static int wm8994_ldo_disable(struct regulator_dev *rdev) +{ + struct wm8994_ldo *ldo = rdev_get_drvdata(rdev); + + /* If we have no soft control assume that the LDO is always enabled. */ + if (!ldo->enable) + return -EINVAL; + + gpio_set_value_cansleep(ldo->enable, 0); + ldo->is_enabled = false; + + return 0; +} + +static int wm8994_ldo_is_enabled(struct regulator_dev *rdev) +{ + struct wm8994_ldo *ldo = rdev_get_drvdata(rdev); + + return ldo->is_enabled; +} + +static int wm8994_ldo_enable_time(struct regulator_dev *rdev) +{ + /* 3ms is fairly conservative but this shouldn't be too performance + * critical; can be tweaked per-system if required. */ + return 3000; +} + +static int wm8994_ldo1_list_voltage(struct regulator_dev *rdev, + unsigned int selector) +{ + if (selector > WM8994_LDO1_MAX_SELECTOR) + return -EINVAL; + + return (selector * 100000) + 2400000; +} + static struct regulator_ops wm8994_ldo1_ops = { - .list_voltage = regulator_list_voltage_linear, - .map_voltage = regulator_map_voltage_linear, + .enable = wm8994_ldo_enable, + .disable = wm8994_ldo_disable, + .is_enabled = wm8994_ldo_is_enabled, + .enable_time = wm8994_ldo_enable_time, + + .list_voltage = wm8994_ldo1_list_voltage, .get_voltage_sel = regulator_get_voltage_sel_regmap, .set_voltage_sel = regulator_set_voltage_sel_regmap, }; @@ -67,6 +124,11 @@ static int wm8994_ldo2_list_voltage(struct regulator_dev *rdev, } static struct regulator_ops wm8994_ldo2_ops = { + .enable = wm8994_ldo_enable, + .disable = wm8994_ldo_disable, + .is_enabled = wm8994_ldo_is_enabled, + .enable_time = wm8994_ldo_enable_time, + .list_voltage = wm8994_ldo2_list_voltage, .get_voltage_sel = regulator_get_voltage_sel_regmap, .set_voltage_sel = regulator_set_voltage_sel_regmap, @@ -81,9 +143,6 @@ static const struct regulator_desc wm8994_ldo_desc[] = { .vsel_reg = WM8994_LDO_1, .vsel_mask = WM8994_LDO1_VSEL_MASK, .ops = &wm8994_ldo1_ops, - .min_uV = 2400000, - .uV_step = 100000, - .enable_time = 3000, .owner = THIS_MODULE, }, { @@ -94,7 +153,6 @@ static const struct regulator_desc wm8994_ldo_desc[] = { .vsel_reg = WM8994_LDO_2, .vsel_mask = WM8994_LDO2_VSEL_MASK, .ops = &wm8994_ldo2_ops, - .enable_time = 3000, .owner = THIS_MODULE, }, }; @@ -118,26 +176,39 @@ static __devinit int wm8994_ldo_probe(struct platform_device *pdev) ldo->wm8994 = wm8994; + if (pdata->ldo[id].enable && gpio_is_valid(pdata->ldo[id].enable)) { + ldo->enable = pdata->ldo[id].enable; + + ret = gpio_request_one(ldo->enable, 0, "WM8994 LDO enable"); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to get enable GPIO: %d\n", + ret); + goto err; + } + } else + ldo->is_enabled = true; + config.dev = wm8994->dev; config.driver_data = ldo; config.regmap = wm8994->regmap; - if (pdata) { + if (pdata) config.init_data = pdata->ldo[id].init_data; - config.ena_gpio = pdata->ldo[id].enable; - } ldo->regulator = regulator_register(&wm8994_ldo_desc[id], &config); if (IS_ERR(ldo->regulator)) { ret = PTR_ERR(ldo->regulator); dev_err(wm8994->dev, "Failed to register LDO%d: %d\n", id + 1, ret); - goto err; + goto err_gpio; } platform_set_drvdata(pdev, ldo); return 0; +err_gpio: + if (gpio_is_valid(ldo->enable)) + gpio_free(ldo->enable); err: return ret; } @@ -149,6 +220,8 @@ static __devexit int wm8994_ldo_remove(struct platform_device *pdev) platform_set_drvdata(pdev, NULL); regulator_unregister(ldo->regulator); + if (gpio_is_valid(ldo->enable)) + gpio_free(ldo->enable); return 0; } diff --git a/trunk/drivers/rpmsg/virtio_rpmsg_bus.c b/trunk/drivers/rpmsg/virtio_rpmsg_bus.c index f56c8ba3a861..39d3aa41adda 100644 --- a/trunk/drivers/rpmsg/virtio_rpmsg_bus.c +++ b/trunk/drivers/rpmsg/virtio_rpmsg_bus.c @@ -1085,7 +1085,7 @@ static int __init rpmsg_init(void) return ret; } -subsys_initcall(rpmsg_init); +module_init(rpmsg_init); static void __exit rpmsg_fini(void) { diff --git a/trunk/drivers/rtc/rtc-cmos.c b/trunk/drivers/rtc/rtc-cmos.c index 132333d75408..4267789ca995 100644 --- a/trunk/drivers/rtc/rtc-cmos.c +++ b/trunk/drivers/rtc/rtc-cmos.c @@ -568,7 +568,6 @@ static irqreturn_t cmos_interrupt(int irq, void *p) hpet_mask_rtc_irq_bit(RTC_AIE); CMOS_READ(RTC_INTR_FLAGS); - pm_wakeup_event(cmos_rtc.dev, 0); } spin_unlock(&rtc_lock); diff --git a/trunk/drivers/scsi/qla2xxx/qla_target.c b/trunk/drivers/scsi/qla2xxx/qla_target.c index 5b30132960c7..77759c78cc21 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_target.c +++ b/trunk/drivers/scsi/qla2xxx/qla_target.c @@ -2643,9 +2643,19 @@ static void qlt_do_work(struct work_struct *work) spin_lock_irqsave(&ha->hardware_lock, flags); sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id); - /* Do kref_get() before dropping qla_hw_data->hardware_lock. */ - if (sess) - kref_get(&sess->se_sess->sess_kref); + if (sess) { + if (unlikely(sess->tearing_down)) { + sess = NULL; + spin_unlock_irqrestore(&ha->hardware_lock, flags); + goto out_term; + } else { + /* + * Do the extra kref_get() before dropping + * qla_hw_data->hardware_lock. + */ + kref_get(&sess->se_sess->sess_kref); + } + } spin_unlock_irqrestore(&ha->hardware_lock, flags); if (unlikely(!sess)) { diff --git a/trunk/drivers/scsi/qla2xxx/qla_target.h b/trunk/drivers/scsi/qla2xxx/qla_target.h index 170af1571214..9f9ef1644fd9 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_target.h +++ b/trunk/drivers/scsi/qla2xxx/qla_target.h @@ -639,7 +639,7 @@ struct qla_tgt_func_tmpl { int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *, unsigned char *, uint32_t, int, int, int); - void (*handle_data)(struct qla_tgt_cmd *); + int (*handle_data)(struct qla_tgt_cmd *); int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint8_t, uint32_t); void (*free_cmd)(struct qla_tgt_cmd *); @@ -813,6 +813,7 @@ struct qla_tgt_sess { unsigned int conf_compl_supported:1; unsigned int deleted:1; unsigned int local:1; + unsigned int tearing_down:1; struct se_session *se_sess; struct scsi_qla_host *vha; diff --git a/trunk/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/trunk/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 4752f65a9272..6e64314dbbb3 100644 --- a/trunk/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/trunk/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -38,6 +38,8 @@ #include #include #include +#include +#include #include #include #include @@ -464,7 +466,8 @@ static int tcm_qla2xxx_shutdown_session(struct se_session *se_sess) vha = sess->vha; spin_lock_irqsave(&vha->hw->hardware_lock, flags); - target_sess_cmd_list_set_waiting(se_sess); + sess->tearing_down = 1; + target_splice_sess_cmd_list(se_sess); spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); return 1; @@ -597,15 +600,28 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd, return -EINVAL; } - return target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0], + target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0], cmd->unpacked_lun, data_length, fcp_task_attr, data_dir, flags); + return 0; } -static void tcm_qla2xxx_handle_data_work(struct work_struct *work) +static void tcm_qla2xxx_do_rsp(struct work_struct *work) { struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); + /* + * Dispatch ->queue_status from workqueue process context + */ + transport_generic_request_failure(&cmd->se_cmd); +} +/* + * Called from qla_target.c:qlt_do_ctio_completion() + */ +static int tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd) +{ + struct se_cmd *se_cmd = &cmd->se_cmd; + unsigned long flags; /* * Ensure that the complete FCP WRITE payload has been received. * Otherwise return an exception via CHECK_CONDITION status. @@ -615,26 +631,24 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work) * Check if se_cmd has already been aborted via LUN_RESET, and * waiting upon completion in tcm_qla2xxx_write_pending_status() */ - if (cmd->se_cmd.transport_state & CMD_T_ABORTED) { - complete(&cmd->se_cmd.t_transport_stop_comp); - return; + spin_lock_irqsave(&se_cmd->t_state_lock, flags); + if (se_cmd->transport_state & CMD_T_ABORTED) { + spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); + complete(&se_cmd->t_transport_stop_comp); + return 0; } + spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); - cmd->se_cmd.scsi_sense_reason = TCM_CHECK_CONDITION_ABORT_CMD; - transport_generic_request_failure(&cmd->se_cmd); - return; + se_cmd->scsi_sense_reason = TCM_CHECK_CONDITION_ABORT_CMD; + INIT_WORK(&cmd->work, tcm_qla2xxx_do_rsp); + queue_work(tcm_qla2xxx_free_wq, &cmd->work); + return 0; } - - return target_execute_cmd(&cmd->se_cmd); -} - -/* - * Called from qla_target.c:qlt_do_ctio_completion() - */ -static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd) -{ - INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work); - queue_work(tcm_qla2xxx_free_wq, &cmd->work); + /* + * We now tell TCM to queue this WRITE CDB with TRANSPORT_PROCESS_WRITE + * status to the backstore processing thread. + */ + return transport_generic_handle_data(&cmd->se_cmd); } /* @@ -1676,6 +1690,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_ops = { .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl, .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl, .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, + .new_cmd_map = NULL, .check_stop_free = tcm_qla2xxx_check_stop_free, .release_cmd = tcm_qla2xxx_release_cmd, .put_session = tcm_qla2xxx_put_session, diff --git a/trunk/drivers/scsi/scsi_wait_scan.c b/trunk/drivers/scsi/scsi_wait_scan.c index 072734538876..ae7814874618 100644 --- a/trunk/drivers/scsi/scsi_wait_scan.c +++ b/trunk/drivers/scsi/scsi_wait_scan.c @@ -22,6 +22,11 @@ static int __init wait_scan_init(void) * and might not yet have reached the scsi async scanning */ wait_for_device_probe(); + /* + * and then we wait for the actual asynchronous scsi scan + * to finish. + */ + scsi_complete_async_scans(); return 0; } diff --git a/trunk/drivers/target/Makefile b/trunk/drivers/target/Makefile index 9fdcb561422f..61648d84fbb6 100644 --- a/trunk/drivers/target/Makefile +++ b/trunk/drivers/target/Makefile @@ -9,8 +9,7 @@ target_core_mod-y := target_core_configfs.o \ target_core_tmr.o \ target_core_tpg.o \ target_core_transport.o \ - target_core_sbc.o \ - target_core_spc.o \ + target_core_cdb.o \ target_core_ua.o \ target_core_rd.o \ target_core_stat.o diff --git a/trunk/drivers/target/iscsi/iscsi_target.c b/trunk/drivers/target/iscsi/iscsi_target.c index 97c0f78c3c9c..d57d10cb2e47 100644 --- a/trunk/drivers/target/iscsi/iscsi_target.c +++ b/trunk/drivers/target/iscsi/iscsi_target.c @@ -429,8 +429,19 @@ int iscsit_reset_np_thread( int iscsit_del_np_comm(struct iscsi_np *np) { - if (np->np_socket) - sock_release(np->np_socket); + if (!np->np_socket) + return 0; + + /* + * Some network transports allocate their own struct sock->file, + * see if we need to free any additional allocated resources. + */ + if (np->np_flags & NPF_SCTP_STRUCT_FILE) { + kfree(np->np_socket->file); + np->np_socket->file = NULL; + } + + sock_release(np->np_socket); return 0; } @@ -1402,10 +1413,8 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf) spin_unlock_bh(&cmd->istate_lock); iscsit_stop_dataout_timer(cmd); - if (ooo_cmdsn) - return 0; - target_execute_cmd(&cmd->se_cmd); - return 0; + return (!ooo_cmdsn) ? transport_generic_handle_data( + &cmd->se_cmd) : 0; } else /* DATAOUT_CANNOT_RECOVER */ return -1; @@ -2674,7 +2683,7 @@ static int iscsit_send_logout_response( */ logout_conn = iscsit_get_conn_from_cid_rcfr(sess, cmd->logout_cid); - if (logout_conn) { + if ((logout_conn)) { iscsit_connection_reinstatement_rcfr(logout_conn); iscsit_dec_conn_usage_count(logout_conn); } @@ -4068,8 +4077,13 @@ int iscsit_close_connection( kfree(conn->conn_ops); conn->conn_ops = NULL; - if (conn->sock) + if (conn->sock) { + if (conn->conn_flags & CONNFLAG_SCTP_STRUCT_FILE) { + kfree(conn->sock->file); + conn->sock->file = NULL; + } sock_release(conn->sock); + } conn->thread_set = NULL; pr_debug("Moving to TARG_CONN_STATE_FREE.\n"); diff --git a/trunk/drivers/target/iscsi/iscsi_target_configfs.c b/trunk/drivers/target/iscsi/iscsi_target_configfs.c index a7b25e783b58..69dc8e35c03a 100644 --- a/trunk/drivers/target/iscsi/iscsi_target_configfs.c +++ b/trunk/drivers/target/iscsi/iscsi_target_configfs.c @@ -47,6 +47,28 @@ struct lio_target_configfs_attribute { ssize_t (*store)(void *, const char *, size_t); }; +struct iscsi_portal_group *lio_get_tpg_from_tpg_item( + struct config_item *item, + struct iscsi_tiqn **tiqn_out) +{ + struct se_portal_group *se_tpg = container_of(to_config_group(item), + struct se_portal_group, tpg_group); + struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; + int ret; + + if (!tpg) { + pr_err("Unable to locate struct iscsi_portal_group " + "pointer\n"); + return NULL; + } + ret = iscsit_get_tpg(tpg); + if (ret < 0) + return NULL; + + *tiqn_out = tpg->tpg_tiqn; + return tpg; +} + /* Start items for lio_target_portal_cit */ static ssize_t lio_target_np_show_sctp( diff --git a/trunk/drivers/target/iscsi/iscsi_target_core.h b/trunk/drivers/target/iscsi/iscsi_target_core.h index 8a908b28d8b2..1c70144cdaf1 100644 --- a/trunk/drivers/target/iscsi/iscsi_target_core.h +++ b/trunk/drivers/target/iscsi/iscsi_target_core.h @@ -224,6 +224,7 @@ enum iscsi_timer_flags_table { /* Used for struct iscsi_np->np_flags */ enum np_flags_table { NPF_IP_NETWORK = 0x00, + NPF_SCTP_STRUCT_FILE = 0x01 /* Bugfix */ }; /* Used for struct iscsi_np->np_thread_state */ @@ -480,7 +481,6 @@ struct iscsi_tmr_req { bool task_reassign:1; u32 ref_cmd_sn; u32 exp_data_sn; - struct iscsi_cmd *ref_cmd; struct iscsi_conn_recovery *conn_recovery; struct se_tmr_req *se_tmr_req; }; @@ -503,6 +503,7 @@ struct iscsi_conn { u16 local_port; int net_size; u32 auth_id; +#define CONNFLAG_SCTP_STRUCT_FILE 0x01 u32 conn_flags; /* Used for iscsi_tx_login_rsp() */ u32 login_itt; diff --git a/trunk/drivers/target/iscsi/iscsi_target_erl1.c b/trunk/drivers/target/iscsi/iscsi_target_erl1.c index 3df8a2cef86f..ecdd46deedda 100644 --- a/trunk/drivers/target/iscsi/iscsi_target_erl1.c +++ b/trunk/drivers/target/iscsi/iscsi_target_erl1.c @@ -965,8 +965,8 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo) if (cmd->immediate_data) { if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) { spin_unlock_bh(&cmd->istate_lock); - target_execute_cmd(&cmd->se_cmd); - return 0; + return transport_generic_handle_data( + &cmd->se_cmd); } spin_unlock_bh(&cmd->istate_lock); diff --git a/trunk/drivers/target/iscsi/iscsi_target_login.c b/trunk/drivers/target/iscsi/iscsi_target_login.c index 0694d9b1bce6..a3656c9903a1 100644 --- a/trunk/drivers/target/iscsi/iscsi_target_login.c +++ b/trunk/drivers/target/iscsi/iscsi_target_login.c @@ -518,7 +518,7 @@ int iscsi_login_post_auth_non_zero_tsih( * initiator and release the new connection. */ conn_ptr = iscsit_get_conn_from_cid_rcfr(sess, cid); - if (conn_ptr) { + if ((conn_ptr)) { pr_err("Connection exists with CID %hu for %s," " performing connection reinstatement.\n", conn_ptr->cid, sess->sess_ops->InitiatorName); @@ -539,7 +539,7 @@ int iscsi_login_post_auth_non_zero_tsih( if (sess->sess_ops->ErrorRecoveryLevel == 2) { cr = iscsit_get_inactive_connection_recovery_entry( sess, cid); - if (cr) { + if ((cr)) { pr_debug("Performing implicit logout" " for connection recovery on CID: %hu\n", conn->cid); @@ -794,6 +794,22 @@ int iscsi_target_setup_login_socket( return ret; } np->np_socket = sock; + /* + * The SCTP stack needs struct socket->file. + */ + if ((np->np_network_transport == ISCSI_SCTP_TCP) || + (np->np_network_transport == ISCSI_SCTP_UDP)) { + if (!sock->file) { + sock->file = kzalloc(sizeof(struct file), GFP_KERNEL); + if (!sock->file) { + pr_err("Unable to allocate struct" + " file for SCTP\n"); + ret = -ENOMEM; + goto fail; + } + np->np_flags |= NPF_SCTP_STRUCT_FILE; + } + } /* * Setup the np->np_sockaddr from the passed sockaddr setup * in iscsi_target_configfs.c code.. @@ -853,15 +869,21 @@ int iscsi_target_setup_login_socket( fail: np->np_socket = NULL; - if (sock) + if (sock) { + if (np->np_flags & NPF_SCTP_STRUCT_FILE) { + kfree(sock->file); + sock->file = NULL; + } + sock_release(sock); + } return ret; } static int __iscsi_target_login_thread(struct iscsi_np *np) { u8 buffer[ISCSI_HDR_LEN], iscsi_opcode, zero_tsih = 0; - int err, ret = 0, stop; + int err, ret = 0, set_sctp_conn_flag, stop; struct iscsi_conn *conn = NULL; struct iscsi_login *login; struct iscsi_portal_group *tpg = NULL; @@ -872,6 +894,7 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) struct sockaddr_in6 sock_in6; flush_signals(current); + set_sctp_conn_flag = 0; sock = np->np_socket; spin_lock_bh(&np->np_thread_lock); @@ -894,12 +917,35 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) spin_unlock_bh(&np->np_thread_lock); goto out; } + /* + * The SCTP stack needs struct socket->file. + */ + if ((np->np_network_transport == ISCSI_SCTP_TCP) || + (np->np_network_transport == ISCSI_SCTP_UDP)) { + if (!new_sock->file) { + new_sock->file = kzalloc( + sizeof(struct file), GFP_KERNEL); + if (!new_sock->file) { + pr_err("Unable to allocate struct" + " file for SCTP\n"); + sock_release(new_sock); + /* Get another socket */ + return 1; + } + set_sctp_conn_flag = 1; + } + } + iscsi_start_login_thread_timer(np); conn = kzalloc(sizeof(struct iscsi_conn), GFP_KERNEL); if (!conn) { pr_err("Could not allocate memory for" " new connection\n"); + if (set_sctp_conn_flag) { + kfree(new_sock->file); + new_sock->file = NULL; + } sock_release(new_sock); /* Get another socket */ return 1; @@ -909,6 +955,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) conn->conn_state = TARG_CONN_STATE_FREE; conn->sock = new_sock; + if (set_sctp_conn_flag) + conn->conn_flags |= CONNFLAG_SCTP_STRUCT_FILE; + pr_debug("Moving to TARG_CONN_STATE_XPT_UP.\n"); conn->conn_state = TARG_CONN_STATE_XPT_UP; @@ -1032,7 +1081,7 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) goto new_sess_out; zero_tsih = (pdu->tsih == 0x0000); - if (zero_tsih) { + if ((zero_tsih)) { /* * This is the leading connection of a new session. * We wait until after authentication to check for @@ -1156,8 +1205,13 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) iscsi_release_param_list(conn->param_list); conn->param_list = NULL; } - if (conn->sock) + if (conn->sock) { + if (conn->conn_flags & CONNFLAG_SCTP_STRUCT_FILE) { + kfree(conn->sock->file); + conn->sock->file = NULL; + } sock_release(conn->sock); + } kfree(conn); if (tpg) { diff --git a/trunk/drivers/target/iscsi/iscsi_target_parameters.c b/trunk/drivers/target/iscsi/iscsi_target_parameters.c index 0c4760fabfc0..ed5241e7f12a 100644 --- a/trunk/drivers/target/iscsi/iscsi_target_parameters.c +++ b/trunk/drivers/target/iscsi/iscsi_target_parameters.c @@ -681,7 +681,7 @@ int iscsi_update_param_value(struct iscsi_param *param, char *value) param->value = kzalloc(strlen(value) + 1, GFP_KERNEL); if (!param->value) { pr_err("Unable to allocate memory for value.\n"); - return -ENOMEM; + return -1; } memcpy(param->value, value, strlen(value)); diff --git a/trunk/drivers/target/iscsi/iscsi_target_tmr.c b/trunk/drivers/target/iscsi/iscsi_target_tmr.c index f62fe123d902..f4e640b51fd1 100644 --- a/trunk/drivers/target/iscsi/iscsi_target_tmr.c +++ b/trunk/drivers/target/iscsi/iscsi_target_tmr.c @@ -19,7 +19,6 @@ ******************************************************************************/ #include -#include #include #include #include @@ -62,7 +61,7 @@ u8 iscsit_tmr_abort_task( } se_tmr->ref_task_tag = hdr->rtt; - tmr_req->ref_cmd = ref_cmd; + se_tmr->ref_cmd = &ref_cmd->se_cmd; tmr_req->ref_cmd_sn = hdr->refcmdsn; tmr_req->exp_data_sn = hdr->exp_datasn; @@ -122,7 +121,7 @@ u8 iscsit_tmr_task_reassign( struct iscsi_tmr_req *tmr_req = cmd->tmr_req; struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req; struct iscsi_tm *hdr = (struct iscsi_tm *) buf; - int ret, ref_lun; + int ret; pr_debug("Got TASK_REASSIGN TMR ITT: 0x%08x," " RefTaskTag: 0x%08x, ExpDataSN: 0x%08x, CID: %hu\n", @@ -156,16 +155,9 @@ u8 iscsit_tmr_task_reassign( return ISCSI_TMF_RSP_REJECTED; } - ref_lun = scsilun_to_int(&hdr->lun); - if (ref_lun != ref_cmd->se_cmd.orig_fe_lun) { - pr_err("Unable to perform connection recovery for" - " differing ref_lun: %d ref_cmd orig_fe_lun: %u\n", - ref_lun, ref_cmd->se_cmd.orig_fe_lun); - return ISCSI_TMF_RSP_REJECTED; - } - se_tmr->ref_task_tag = hdr->rtt; - tmr_req->ref_cmd = ref_cmd; + se_tmr->ref_cmd = &ref_cmd->se_cmd; + se_tmr->ref_task_lun = get_unaligned_le64(&hdr->lun); tmr_req->ref_cmd_sn = hdr->refcmdsn; tmr_req->exp_data_sn = hdr->exp_datasn; tmr_req->conn_recovery = cr; @@ -199,7 +191,9 @@ static int iscsit_task_reassign_complete_nop_out( struct iscsi_tmr_req *tmr_req, struct iscsi_conn *conn) { - struct iscsi_cmd *cmd = tmr_req->ref_cmd; + struct se_tmr_req *se_tmr = tmr_req->se_tmr_req; + struct se_cmd *se_cmd = se_tmr->ref_cmd; + struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); struct iscsi_conn_recovery *cr; if (!cmd->cr) { @@ -257,8 +251,7 @@ static int iscsit_task_reassign_complete_write( pr_debug("WRITE ITT: 0x%08x: t_state: %d" " never sent to transport\n", cmd->init_task_tag, cmd->se_cmd.t_state); - target_execute_cmd(se_cmd); - return 0; + return transport_generic_handle_data(se_cmd); } cmd->i_state = ISTATE_SEND_STATUS; @@ -367,7 +360,9 @@ static int iscsit_task_reassign_complete_scsi_cmnd( struct iscsi_tmr_req *tmr_req, struct iscsi_conn *conn) { - struct iscsi_cmd *cmd = tmr_req->ref_cmd; + struct se_tmr_req *se_tmr = tmr_req->se_tmr_req; + struct se_cmd *se_cmd = se_tmr->ref_cmd; + struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); struct iscsi_conn_recovery *cr; if (!cmd->cr) { @@ -390,7 +385,7 @@ static int iscsit_task_reassign_complete_scsi_cmnd( list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); spin_unlock_bh(&conn->cmd_lock); - if (cmd->se_cmd.se_cmd_flags & SCF_SENT_CHECK_CONDITION) { + if (se_cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { cmd->i_state = ISTATE_SEND_STATUS; iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); return 0; @@ -416,14 +411,17 @@ static int iscsit_task_reassign_complete( struct iscsi_tmr_req *tmr_req, struct iscsi_conn *conn) { + struct se_tmr_req *se_tmr = tmr_req->se_tmr_req; + struct se_cmd *se_cmd; struct iscsi_cmd *cmd; int ret = 0; - if (!tmr_req->ref_cmd) { + if (!se_tmr->ref_cmd) { pr_err("TMR Request is missing a RefCmd struct iscsi_cmd.\n"); return -1; } - cmd = tmr_req->ref_cmd; + se_cmd = se_tmr->ref_cmd; + cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); cmd->conn = conn; @@ -549,7 +547,9 @@ int iscsit_task_reassign_prepare_write( struct iscsi_tmr_req *tmr_req, struct iscsi_conn *conn) { - struct iscsi_cmd *cmd = tmr_req->ref_cmd; + struct se_tmr_req *se_tmr = tmr_req->se_tmr_req; + struct se_cmd *se_cmd = se_tmr->ref_cmd; + struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); struct iscsi_pdu *pdu = NULL; struct iscsi_r2t *r2t = NULL, *r2t_tmp; int first_incomplete_r2t = 1, i = 0; @@ -782,12 +782,14 @@ int iscsit_check_task_reassign_expdatasn( struct iscsi_tmr_req *tmr_req, struct iscsi_conn *conn) { - struct iscsi_cmd *ref_cmd = tmr_req->ref_cmd; + struct se_tmr_req *se_tmr = tmr_req->se_tmr_req; + struct se_cmd *se_cmd = se_tmr->ref_cmd; + struct iscsi_cmd *ref_cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); if (ref_cmd->iscsi_opcode != ISCSI_OP_SCSI_CMD) return 0; - if (ref_cmd->se_cmd.se_cmd_flags & SCF_SENT_CHECK_CONDITION) + if (se_cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) return 0; if (ref_cmd->data_direction == DMA_NONE) diff --git a/trunk/drivers/target/iscsi/iscsi_target_tpg.c b/trunk/drivers/target/iscsi/iscsi_target_tpg.c index a38a3f8ab0d9..879d8d0fa3fe 100644 --- a/trunk/drivers/target/iscsi/iscsi_target_tpg.c +++ b/trunk/drivers/target/iscsi/iscsi_target_tpg.c @@ -303,7 +303,6 @@ int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg) { struct iscsi_param *param; struct iscsi_tiqn *tiqn = tpg->tpg_tiqn; - int ret; spin_lock(&tpg->tpg_state_lock); if (tpg->tpg_state == TPG_STATE_ACTIVE) { @@ -320,19 +319,19 @@ int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg) param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list); if (!param) { spin_unlock(&tpg->tpg_state_lock); - return -EINVAL; + return -ENOMEM; } if (ISCSI_TPG_ATTRIB(tpg)->authentication) { - if (!strcmp(param->value, NONE)) { - ret = iscsi_update_param_value(param, CHAP); - if (ret) - goto err; + if (!strcmp(param->value, NONE)) + if (iscsi_update_param_value(param, CHAP) < 0) { + spin_unlock(&tpg->tpg_state_lock); + return -ENOMEM; + } + if (iscsit_ta_authentication(tpg, 1) < 0) { + spin_unlock(&tpg->tpg_state_lock); + return -ENOMEM; } - - ret = iscsit_ta_authentication(tpg, 1); - if (ret < 0) - goto err; } tpg->tpg_state = TPG_STATE_ACTIVE; @@ -345,10 +344,6 @@ int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg) spin_unlock(&tiqn->tiqn_tpg_lock); return 0; - -err: - spin_unlock(&tpg->tpg_state_lock); - return ret; } int iscsit_tpg_disable_portal_group(struct iscsi_portal_group *tpg, int force) @@ -563,7 +558,7 @@ int iscsit_ta_authentication(struct iscsi_portal_group *tpg, u32 authentication) if ((authentication != 1) && (authentication != 0)) { pr_err("Illegal value for authentication parameter:" " %u, ignoring request.\n", authentication); - return -EINVAL; + return -1; } memset(buf1, 0, sizeof(buf1)); @@ -598,7 +593,7 @@ int iscsit_ta_authentication(struct iscsi_portal_group *tpg, u32 authentication) } else { snprintf(buf1, sizeof(buf1), "%s", param->value); none = strstr(buf1, NONE); - if (none) + if ((none)) goto out; strncat(buf1, ",", strlen(",")); strncat(buf1, NONE, strlen(NONE)); diff --git a/trunk/drivers/target/loopback/tcm_loop.c b/trunk/drivers/target/loopback/tcm_loop.c index 5491c632a15e..38dfac2b0a1c 100644 --- a/trunk/drivers/target/loopback/tcm_loop.c +++ b/trunk/drivers/target/loopback/tcm_loop.c @@ -211,11 +211,12 @@ static void tcm_loop_submission_work(struct work_struct *work) /* * Because some userspace code via scsi-generic do not memset their * associated read buffers, go ahead and do that here for type - * non-data CDBs. Also note that this is currently guaranteed to be a - * single SGL for this case by target core in - * target_setup_cmd_from_cdb() -> transport_generic_cmd_sequencer(). + * SCF_SCSI_CONTROL_SG_IO_CDB. Also note that this is currently + * guaranteed to be a single SGL for SCF_SCSI_CONTROL_SG_IO_CDB + * by target core in target_setup_cmd_from_cdb() -> + * transport_generic_cmd_sequencer(). */ - if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && + if (se_cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB && se_cmd->data_direction == DMA_FROM_DEVICE) { struct scatterlist *sg = scsi_sglist(sc); unsigned char *buf = kmap(sg_page(sg)) + sg->offset; @@ -778,7 +779,7 @@ static int tcm_loop_write_pending(struct se_cmd *se_cmd) * We now tell TCM to add this WRITE CDB directly into the TCM storage * object execution queue. */ - target_execute_cmd(se_cmd); + transport_generic_process_write(se_cmd); return 0; } diff --git a/trunk/drivers/target/sbp/sbp_target.c b/trunk/drivers/target/sbp/sbp_target.c index 39ddba584b30..7e6136e2ce81 100644 --- a/trunk/drivers/target/sbp/sbp_target.c +++ b/trunk/drivers/target/sbp/sbp_target.c @@ -1219,14 +1219,28 @@ static void sbp_handle_command(struct sbp_target_request *req) ret = sbp_fetch_command(req); if (ret) { pr_debug("sbp_handle_command: fetch command failed: %d\n", ret); - goto err; + req->status.status |= cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) | + STATUS_BLOCK_DEAD(0) | + STATUS_BLOCK_LEN(1) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR)); + sbp_send_status(req); + sbp_free_request(req); + return; } ret = sbp_fetch_page_table(req); if (ret) { pr_debug("sbp_handle_command: fetch page table failed: %d\n", ret); - goto err; + req->status.status |= cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) | + STATUS_BLOCK_DEAD(0) | + STATUS_BLOCK_LEN(1) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR)); + sbp_send_status(req); + sbp_free_request(req); + return; } unpacked_lun = req->login->lun->unpacked_lun; @@ -1235,21 +1249,9 @@ static void sbp_handle_command(struct sbp_target_request *req) pr_debug("sbp_handle_command ORB:0x%llx unpacked_lun:%d data_len:%d data_dir:%d\n", req->orb_pointer, unpacked_lun, data_length, data_dir); - if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf, - req->sense_buf, unpacked_lun, data_length, - MSG_SIMPLE_TAG, data_dir, 0)) - goto err; - - return; - -err: - req->status.status |= cpu_to_be32( - STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) | - STATUS_BLOCK_DEAD(0) | - STATUS_BLOCK_LEN(1) | - STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR)); - sbp_send_status(req); - sbp_free_request(req); + target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf, + req->sense_buf, unpacked_lun, data_length, + MSG_SIMPLE_TAG, data_dir, 0); } /* @@ -1782,7 +1784,8 @@ static int sbp_write_pending(struct se_cmd *se_cmd) return ret; } - target_execute_cmd(se_cmd); + transport_generic_process_write(se_cmd); + return 0; } diff --git a/trunk/drivers/target/target_core_spc.c b/trunk/drivers/target/target_core_cdb.c similarity index 76% rename from trunk/drivers/target/target_core_spc.c rename to trunk/drivers/target/target_core_cdb.c index 4c861de538c9..9888693a18fe 100644 --- a/trunk/drivers/target/target_core_spc.c +++ b/trunk/drivers/target/target_core_cdb.c @@ -1,5 +1,5 @@ /* - * SCSI Primary Commands (SPC) parsing and emulation. + * CDB emulation for non-READ/WRITE commands. * * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc. * Copyright (c) 2005, 2006, 2007 SBE, Inc. @@ -26,21 +26,17 @@ #include #include #include - #include -#include #include #include #include #include "target_core_internal.h" -#include "target_core_alua.h" -#include "target_core_pr.h" #include "target_core_ua.h" - -static void spc_fill_alua_data(struct se_port *port, unsigned char *buf) +static void +target_fill_alua_data(struct se_port *port, unsigned char *buf) { struct t10_alua_tg_pt_gp *tg_pt_gp; struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; @@ -69,7 +65,8 @@ static void spc_fill_alua_data(struct se_port *port, unsigned char *buf) spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); } -static int spc_emulate_inquiry_std(struct se_cmd *cmd, char *buf) +static int +target_emulate_inquiry_std(struct se_cmd *cmd, char *buf) { struct se_lun *lun = cmd->se_lun; struct se_device *dev = cmd->se_dev; @@ -96,7 +93,7 @@ static int spc_emulate_inquiry_std(struct se_cmd *cmd, char *buf) * Enable SCCS and TPGS fields for Emulated ALUA */ if (dev->se_sub_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) - spc_fill_alua_data(lun->lun_sep, buf); + target_fill_alua_data(lun->lun_sep, buf); buf[7] = 0x2; /* CmdQue=1 */ @@ -109,7 +106,8 @@ static int spc_emulate_inquiry_std(struct se_cmd *cmd, char *buf) } /* unit serial number */ -static int spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) +static int +target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) { struct se_device *dev = cmd->se_dev; u16 len = 0; @@ -129,8 +127,8 @@ static int spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) return 0; } -static void spc_parse_naa_6h_vendor_specific(struct se_device *dev, - unsigned char *buf) +static void +target_parse_naa_6h_vendor_specific(struct se_device *dev, unsigned char *buf) { unsigned char *p = &dev->se_sub_dev->t10_wwn.unit_serial[0]; int cnt; @@ -164,7 +162,8 @@ static void spc_parse_naa_6h_vendor_specific(struct se_device *dev, * Device identification VPD, for a complete list of * DESIGNATOR TYPEs see spc4r17 Table 459. */ -static int spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) +static int +target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) { struct se_device *dev = cmd->se_dev; struct se_lun *lun = cmd->se_lun; @@ -221,7 +220,7 @@ static int spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) * VENDOR_SPECIFIC_IDENTIFIER and * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION */ - spc_parse_naa_6h_vendor_specific(dev, &buf[off]); + target_parse_naa_6h_vendor_specific(dev, &buf[off]); len = 20; off = (len + 4); @@ -415,7 +414,8 @@ static int spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) } /* Extended INQUIRY Data VPD Page */ -static int spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) +static int +target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) { buf[3] = 0x3c; /* Set HEADSUP, ORDSUP, SIMPSUP */ @@ -428,14 +428,15 @@ static int spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) } /* Block Limits VPD page */ -static int spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) +static int +target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) { struct se_device *dev = cmd->se_dev; u32 max_sectors; int have_tp = 0; /* - * Following spc3r22 section 6.5.3 Block Limits VPD page, when + * Following sbc3r22 section 6.5.3 Block Limits VPD page, when * emulate_tpu=1 or emulate_tpws=1 we will be expect a * different page length for Thin Provisioning. */ @@ -499,7 +500,8 @@ static int spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) } /* Block Device Characteristics VPD page */ -static int spc_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf) +static int +target_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf) { struct se_device *dev = cmd->se_dev; @@ -511,12 +513,13 @@ static int spc_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf) } /* Thin Provisioning VPD */ -static int spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf) +static int +target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf) { struct se_device *dev = cmd->se_dev; /* - * From spc3r22 section 6.5.4 Thin Provisioning VPD page: + * From sbc3r22 section 6.5.4 Thin Provisioning VPD page: * * The PAGE LENGTH field is defined in SPC-4. If the DP bit is set to * zero, then the page length shall be set to 0004h. If the DP bit @@ -561,23 +564,25 @@ static int spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf) return 0; } -static int spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf); +static int +target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf); static struct { uint8_t page; int (*emulate)(struct se_cmd *, unsigned char *); } evpd_handlers[] = { - { .page = 0x00, .emulate = spc_emulate_evpd_00 }, - { .page = 0x80, .emulate = spc_emulate_evpd_80 }, - { .page = 0x83, .emulate = spc_emulate_evpd_83 }, - { .page = 0x86, .emulate = spc_emulate_evpd_86 }, - { .page = 0xb0, .emulate = spc_emulate_evpd_b0 }, - { .page = 0xb1, .emulate = spc_emulate_evpd_b1 }, - { .page = 0xb2, .emulate = spc_emulate_evpd_b2 }, + { .page = 0x00, .emulate = target_emulate_evpd_00 }, + { .page = 0x80, .emulate = target_emulate_evpd_80 }, + { .page = 0x83, .emulate = target_emulate_evpd_83 }, + { .page = 0x86, .emulate = target_emulate_evpd_86 }, + { .page = 0xb0, .emulate = target_emulate_evpd_b0 }, + { .page = 0xb1, .emulate = target_emulate_evpd_b1 }, + { .page = 0xb2, .emulate = target_emulate_evpd_b2 }, }; /* supported vital product data pages */ -static int spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf) +static int +target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf) { int p; @@ -596,7 +601,7 @@ static int spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf) return 0; } -static int spc_emulate_inquiry(struct se_cmd *cmd) +int target_emulate_inquiry(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg; @@ -638,7 +643,7 @@ static int spc_emulate_inquiry(struct se_cmd *cmd) goto out; } - ret = spc_emulate_inquiry_std(cmd, buf); + ret = target_emulate_inquiry_std(cmd, buf); goto out; } @@ -666,7 +671,70 @@ static int spc_emulate_inquiry(struct se_cmd *cmd) return ret; } -static int spc_modesense_rwrecovery(unsigned char *p) +int target_emulate_readcapacity(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + unsigned char *buf; + unsigned long long blocks_long = dev->transport->get_blocks(dev); + u32 blocks; + + if (blocks_long >= 0x00000000ffffffff) + blocks = 0xffffffff; + else + blocks = (u32)blocks_long; + + buf = transport_kmap_data_sg(cmd); + + buf[0] = (blocks >> 24) & 0xff; + buf[1] = (blocks >> 16) & 0xff; + buf[2] = (blocks >> 8) & 0xff; + buf[3] = blocks & 0xff; + buf[4] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff; + buf[5] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff; + buf[6] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff; + buf[7] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff; + + transport_kunmap_data_sg(cmd); + + target_complete_cmd(cmd, GOOD); + return 0; +} + +int target_emulate_readcapacity_16(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + unsigned char *buf; + unsigned long long blocks = dev->transport->get_blocks(dev); + + buf = transport_kmap_data_sg(cmd); + + buf[0] = (blocks >> 56) & 0xff; + buf[1] = (blocks >> 48) & 0xff; + buf[2] = (blocks >> 40) & 0xff; + buf[3] = (blocks >> 32) & 0xff; + buf[4] = (blocks >> 24) & 0xff; + buf[5] = (blocks >> 16) & 0xff; + buf[6] = (blocks >> 8) & 0xff; + buf[7] = blocks & 0xff; + buf[8] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff; + buf[9] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff; + buf[10] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff; + buf[11] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff; + /* + * Set Thin Provisioning Enable bit following sbc3r22 in section + * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled. + */ + if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws) + buf[14] = 0x80; + + transport_kunmap_data_sg(cmd); + + target_complete_cmd(cmd, GOOD); + return 0; +} + +static int +target_modesense_rwrecovery(unsigned char *p) { p[0] = 0x01; p[1] = 0x0a; @@ -674,7 +742,8 @@ static int spc_modesense_rwrecovery(unsigned char *p) return 12; } -static int spc_modesense_control(struct se_device *dev, unsigned char *p) +static int +target_modesense_control(struct se_device *dev, unsigned char *p) { p[0] = 0x0a; p[1] = 0x0a; @@ -759,7 +828,8 @@ static int spc_modesense_control(struct se_device *dev, unsigned char *p) return 12; } -static int spc_modesense_caching(struct se_device *dev, unsigned char *p) +static int +target_modesense_caching(struct se_device *dev, unsigned char *p) { p[0] = 0x08; p[1] = 0x12; @@ -770,7 +840,8 @@ static int spc_modesense_caching(struct se_device *dev, unsigned char *p) return 20; } -static void spc_modesense_write_protect(unsigned char *buf, int type) +static void +target_modesense_write_protect(unsigned char *buf, int type) { /* * I believe that the WP bit (bit 7) in the mode header is the same for @@ -785,7 +856,8 @@ static void spc_modesense_write_protect(unsigned char *buf, int type) } } -static void spc_modesense_dpofua(unsigned char *buf, int type) +static void +target_modesense_dpofua(unsigned char *buf, int type) { switch (type) { case TYPE_DISK: @@ -796,7 +868,7 @@ static void spc_modesense_dpofua(unsigned char *buf, int type) } } -static int spc_emulate_modesense(struct se_cmd *cmd) +int target_emulate_modesense(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; char *cdb = cmd->t_task_cdb; @@ -811,18 +883,18 @@ static int spc_emulate_modesense(struct se_cmd *cmd) switch (cdb[2] & 0x3f) { case 0x01: - length = spc_modesense_rwrecovery(&buf[offset]); + length = target_modesense_rwrecovery(&buf[offset]); break; case 0x08: - length = spc_modesense_caching(dev, &buf[offset]); + length = target_modesense_caching(dev, &buf[offset]); break; case 0x0a: - length = spc_modesense_control(dev, &buf[offset]); + length = target_modesense_control(dev, &buf[offset]); break; case 0x3f: - length = spc_modesense_rwrecovery(&buf[offset]); - length += spc_modesense_caching(dev, &buf[offset+length]); - length += spc_modesense_control(dev, &buf[offset+length]); + length = target_modesense_rwrecovery(&buf[offset]); + length += target_modesense_caching(dev, &buf[offset+length]); + length += target_modesense_control(dev, &buf[offset+length]); break; default: pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n", @@ -840,11 +912,11 @@ static int spc_emulate_modesense(struct se_cmd *cmd) if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) || (cmd->se_deve && (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY))) - spc_modesense_write_protect(&buf[3], type); + target_modesense_write_protect(&buf[3], type); if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) && (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0)) - spc_modesense_dpofua(&buf[3], type); + target_modesense_dpofua(&buf[3], type); if ((offset + 2) > cmd->data_length) offset = cmd->data_length; @@ -856,11 +928,11 @@ static int spc_emulate_modesense(struct se_cmd *cmd) if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) || (cmd->se_deve && (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY))) - spc_modesense_write_protect(&buf[2], type); + target_modesense_write_protect(&buf[2], type); if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) && (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0)) - spc_modesense_dpofua(&buf[2], type); + target_modesense_dpofua(&buf[2], type); if ((offset + 1) > cmd->data_length) offset = cmd->data_length; @@ -874,7 +946,7 @@ static int spc_emulate_modesense(struct se_cmd *cmd) return 0; } -static int spc_emulate_request_sense(struct se_cmd *cmd) +int target_emulate_request_sense(struct se_cmd *cmd) { unsigned char *cdb = cmd->t_task_cdb; unsigned char *buf; @@ -933,172 +1005,126 @@ static int spc_emulate_request_sense(struct se_cmd *cmd) return 0; } -static int spc_emulate_testunitready(struct se_cmd *cmd) +/* + * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support. + * Note this is not used for TCM/pSCSI passthrough + */ +int target_emulate_unmap(struct se_cmd *cmd) { - target_complete_cmd(cmd, GOOD); - return 0; + struct se_device *dev = cmd->se_dev; + unsigned char *buf, *ptr = NULL; + unsigned char *cdb = &cmd->t_task_cdb[0]; + sector_t lba; + unsigned int size = cmd->data_length, range; + int ret = 0, offset; + unsigned short dl, bd_dl; + + if (!dev->transport->do_discard) { + pr_err("UNMAP emulation not supported for: %s\n", + dev->transport->name); + cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; + return -ENOSYS; + } + + /* First UNMAP block descriptor starts at 8 byte offset */ + offset = 8; + size -= 8; + dl = get_unaligned_be16(&cdb[0]); + bd_dl = get_unaligned_be16(&cdb[2]); + + buf = transport_kmap_data_sg(cmd); + + ptr = &buf[offset]; + pr_debug("UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu" + " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr); + + while (size) { + lba = get_unaligned_be64(&ptr[0]); + range = get_unaligned_be32(&ptr[8]); + pr_debug("UNMAP: Using lba: %llu and range: %u\n", + (unsigned long long)lba, range); + + ret = dev->transport->do_discard(dev, lba, range); + if (ret < 0) { + pr_err("blkdev_issue_discard() failed: %d\n", + ret); + goto err; + } + + ptr += 16; + size -= 16; + } + +err: + transport_kunmap_data_sg(cmd); + if (!ret) + target_complete_cmd(cmd, GOOD); + return ret; } -int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size) +/* + * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support. + * Note this is not used for TCM/pSCSI passthrough + */ +int target_emulate_write_same(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; - struct se_subsystem_dev *su_dev = dev->se_sub_dev; - unsigned char *cdb = cmd->t_task_cdb; + sector_t range; + sector_t lba = cmd->t_task_lba; + u32 num_blocks; + int ret; + + if (!dev->transport->do_discard) { + pr_err("WRITE_SAME emulation not supported" + " for: %s\n", dev->transport->name); + cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; + return -ENOSYS; + } - switch (cdb[0]) { - case MODE_SELECT: - *size = cdb[4]; - break; - case MODE_SELECT_10: - *size = (cdb[7] << 8) + cdb[8]; - break; - case MODE_SENSE: - *size = cdb[4]; - cmd->execute_cmd = spc_emulate_modesense; - break; - case MODE_SENSE_10: - *size = (cdb[7] << 8) + cdb[8]; - cmd->execute_cmd = spc_emulate_modesense; - break; - case LOG_SELECT: - case LOG_SENSE: - *size = (cdb[7] << 8) + cdb[8]; - break; - case PERSISTENT_RESERVE_IN: - if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS) - cmd->execute_cmd = target_scsi3_emulate_pr_in; - *size = (cdb[7] << 8) + cdb[8]; - break; - case PERSISTENT_RESERVE_OUT: - if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS) - cmd->execute_cmd = target_scsi3_emulate_pr_out; - *size = (cdb[7] << 8) + cdb[8]; - break; - case RELEASE: - case RELEASE_10: - if (cdb[0] == RELEASE_10) - *size = (cdb[7] << 8) | cdb[8]; - else - *size = cmd->data_length; - - if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH) - cmd->execute_cmd = target_scsi2_reservation_release; - break; - case RESERVE: - case RESERVE_10: - /* - * The SPC-2 RESERVE does not contain a size in the SCSI CDB. - * Assume the passthrough or $FABRIC_MOD will tell us about it. - */ - if (cdb[0] == RESERVE_10) - *size = (cdb[7] << 8) | cdb[8]; - else - *size = cmd->data_length; + if (cmd->t_task_cdb[0] == WRITE_SAME) + num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]); + else if (cmd->t_task_cdb[0] == WRITE_SAME_16) + num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]); + else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */ + num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]); - /* - * Setup the legacy emulated handler for SPC-2 and - * >= SPC-3 compatible reservation handling (CRH=1) - * Otherwise, we assume the underlying SCSI logic is - * is running in SPC_PASSTHROUGH, and wants reservations - * emulation disabled. - */ - if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH) - cmd->execute_cmd = target_scsi2_reservation_reserve; - break; - case REQUEST_SENSE: - *size = cdb[4]; - cmd->execute_cmd = spc_emulate_request_sense; - break; - case INQUIRY: - *size = (cdb[3] << 8) + cdb[4]; + /* + * Use the explicit range when non zero is supplied, otherwise calculate + * the remaining range based on ->get_blocks() - starting LBA. + */ + if (num_blocks != 0) + range = num_blocks; + else + range = (dev->transport->get_blocks(dev) - lba); - /* - * Do implict HEAD_OF_QUEUE processing for INQUIRY. - * See spc4r17 section 5.3 - */ - if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) - cmd->sam_task_attr = MSG_HEAD_TAG; - cmd->execute_cmd = spc_emulate_inquiry; - break; - case SECURITY_PROTOCOL_IN: - case SECURITY_PROTOCOL_OUT: - *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; - break; - case EXTENDED_COPY: - case READ_ATTRIBUTE: - case RECEIVE_COPY_RESULTS: - case WRITE_ATTRIBUTE: - *size = (cdb[10] << 24) | (cdb[11] << 16) | - (cdb[12] << 8) | cdb[13]; - break; - case RECEIVE_DIAGNOSTIC: - case SEND_DIAGNOSTIC: - *size = (cdb[3] << 8) | cdb[4]; - break; - case WRITE_BUFFER: - *size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; - break; - case REPORT_LUNS: - cmd->execute_cmd = target_report_luns; - *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; - /* - * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS - * See spc4r17 section 5.3 - */ - if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) - cmd->sam_task_attr = MSG_HEAD_TAG; - break; - case TEST_UNIT_READY: - cmd->execute_cmd = spc_emulate_testunitready; - *size = 0; - break; - case MAINTENANCE_IN: - if (dev->transport->get_device_type(dev) != TYPE_ROM) { - /* - * MAINTENANCE_IN from SCC-2 - * Check for emulated MI_REPORT_TARGET_PGS - */ - if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS && - su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) { - cmd->execute_cmd = - target_emulate_report_target_port_groups; - } - *size = get_unaligned_be32(&cdb[6]); - } else { - /* - * GPCMD_SEND_KEY from multi media commands - */ - *size = get_unaligned_be16(&cdb[8]); - } - break; - case MAINTENANCE_OUT: - if (dev->transport->get_device_type(dev) != TYPE_ROM) { - /* - * MAINTENANCE_OUT from SCC-2 - * Check for emulated MO_SET_TARGET_PGS. - */ - if (cdb[1] == MO_SET_TARGET_PGS && - su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) { - cmd->execute_cmd = - target_emulate_set_target_port_groups; - } - *size = get_unaligned_be32(&cdb[6]); - } else { - /* - * GPCMD_SEND_KEY from multi media commands - */ - *size = get_unaligned_be16(&cdb[8]); - } - break; - default: - pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode" - " 0x%02x, sending CHECK_CONDITION.\n", - cmd->se_tfo->get_fabric_name(), cdb[0]); - cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; + pr_debug("WRITE_SAME UNMAP: LBA: %llu Range: %llu\n", + (unsigned long long)lba, (unsigned long long)range); + + ret = dev->transport->do_discard(dev, lba, range); + if (ret < 0) { + pr_debug("blkdev_issue_discard() failed for WRITE_SAME\n"); + return ret; + } + + target_complete_cmd(cmd, GOOD); + return 0; +} + +int target_emulate_synchronize_cache(struct se_cmd *cmd) +{ + if (!cmd->se_dev->transport->do_sync_cache) { + pr_err("SYNCHRONIZE_CACHE emulation not supported" + " for: %s\n", cmd->se_dev->transport->name); cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; - return -EINVAL; + return -ENOSYS; } + cmd->se_dev->transport->do_sync_cache(cmd); + return 0; +} + +int target_emulate_noop(struct se_cmd *cmd) +{ + target_complete_cmd(cmd, GOOD); return 0; } -EXPORT_SYMBOL(spc_parse_cdb); diff --git a/trunk/drivers/target/target_core_device.c b/trunk/drivers/target/target_core_device.c index cf2c66f3c116..5ad972856a8d 100644 --- a/trunk/drivers/target/target_core_device.c +++ b/trunk/drivers/target/target_core_device.c @@ -300,8 +300,8 @@ int core_free_device_list_for_node( lun = deve->se_lun; spin_unlock_irq(&nacl->device_list_lock); - core_disable_device_list_for_node(lun, NULL, deve->mapped_lun, - TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg); + core_update_device_list_for_node(lun, NULL, deve->mapped_lun, + TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0); spin_lock_irq(&nacl->device_list_lock); } spin_unlock_irq(&nacl->device_list_lock); @@ -342,46 +342,72 @@ void core_update_device_list_access( spin_unlock_irq(&nacl->device_list_lock); } -/* core_enable_device_list_for_node(): +/* core_update_device_list_for_node(): * * */ -int core_enable_device_list_for_node( +int core_update_device_list_for_node( struct se_lun *lun, struct se_lun_acl *lun_acl, u32 mapped_lun, u32 lun_access, struct se_node_acl *nacl, - struct se_portal_group *tpg) + struct se_portal_group *tpg, + int enable) { struct se_port *port = lun->lun_sep; - struct se_dev_entry *deve; - - spin_lock_irq(&nacl->device_list_lock); - - deve = nacl->device_list[mapped_lun]; - + struct se_dev_entry *deve = nacl->device_list[mapped_lun]; + int trans = 0; /* - * Check if the call is handling demo mode -> explict LUN ACL - * transition. This transition must be for the same struct se_lun - * + mapped_lun that was setup in demo mode.. + * If the MappedLUN entry is being disabled, the entry in + * port->sep_alua_list must be removed now before clearing the + * struct se_dev_entry pointers below as logic in + * core_alua_do_transition_tg_pt() depends on these being present. */ - if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { - if (deve->se_lun_acl != NULL) { - pr_err("struct se_dev_entry->se_lun_acl" - " already set for demo mode -> explict" - " LUN ACL transition\n"); - spin_unlock_irq(&nacl->device_list_lock); - return -EINVAL; - } - if (deve->se_lun != lun) { - pr_err("struct se_dev_entry->se_lun does" - " match passed struct se_lun for demo mode" - " -> explict LUN ACL transition\n"); - spin_unlock_irq(&nacl->device_list_lock); - return -EINVAL; + if (!enable) { + /* + * deve->se_lun_acl will be NULL for demo-mode created LUNs + * that have not been explicitly concerted to MappedLUNs -> + * struct se_lun_acl, but we remove deve->alua_port_list from + * port->sep_alua_list. This also means that active UAs and + * NodeACL context specific PR metadata for demo-mode + * MappedLUN *deve will be released below.. + */ + spin_lock_bh(&port->sep_alua_lock); + list_del(&deve->alua_port_list); + spin_unlock_bh(&port->sep_alua_lock); + } + + spin_lock_irq(&nacl->device_list_lock); + if (enable) { + /* + * Check if the call is handling demo mode -> explict LUN ACL + * transition. This transition must be for the same struct se_lun + * + mapped_lun that was setup in demo mode.. + */ + if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { + if (deve->se_lun_acl != NULL) { + pr_err("struct se_dev_entry->se_lun_acl" + " already set for demo mode -> explict" + " LUN ACL transition\n"); + spin_unlock_irq(&nacl->device_list_lock); + return -EINVAL; + } + if (deve->se_lun != lun) { + pr_err("struct se_dev_entry->se_lun does" + " match passed struct se_lun for demo mode" + " -> explict LUN ACL transition\n"); + spin_unlock_irq(&nacl->device_list_lock); + return -EINVAL; + } + deve->se_lun_acl = lun_acl; + trans = 1; + } else { + deve->se_lun = lun; + deve->se_lun_acl = lun_acl; + deve->mapped_lun = mapped_lun; + deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS; } - deve->se_lun_acl = lun_acl; if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY; @@ -391,72 +417,27 @@ int core_enable_device_list_for_node( deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; } + if (trans) { + spin_unlock_irq(&nacl->device_list_lock); + return 0; + } + deve->creation_time = get_jiffies_64(); + deve->attach_count++; spin_unlock_irq(&nacl->device_list_lock); - return 0; - } - deve->se_lun = lun; - deve->se_lun_acl = lun_acl; - deve->mapped_lun = mapped_lun; - deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS; + spin_lock_bh(&port->sep_alua_lock); + list_add_tail(&deve->alua_port_list, &port->sep_alua_list); + spin_unlock_bh(&port->sep_alua_lock); - if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { - deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY; - deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE; - } else { - deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE; - deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; + return 0; } - - deve->creation_time = get_jiffies_64(); - deve->attach_count++; - spin_unlock_irq(&nacl->device_list_lock); - - spin_lock_bh(&port->sep_alua_lock); - list_add_tail(&deve->alua_port_list, &port->sep_alua_list); - spin_unlock_bh(&port->sep_alua_lock); - - return 0; -} - -/* core_disable_device_list_for_node(): - * - * - */ -int core_disable_device_list_for_node( - struct se_lun *lun, - struct se_lun_acl *lun_acl, - u32 mapped_lun, - u32 lun_access, - struct se_node_acl *nacl, - struct se_portal_group *tpg) -{ - struct se_port *port = lun->lun_sep; - struct se_dev_entry *deve = nacl->device_list[mapped_lun]; - - /* - * If the MappedLUN entry is being disabled, the entry in - * port->sep_alua_list must be removed now before clearing the - * struct se_dev_entry pointers below as logic in - * core_alua_do_transition_tg_pt() depends on these being present. - * - * deve->se_lun_acl will be NULL for demo-mode created LUNs - * that have not been explicitly converted to MappedLUNs -> - * struct se_lun_acl, but we remove deve->alua_port_list from - * port->sep_alua_list. This also means that active UAs and - * NodeACL context specific PR metadata for demo-mode - * MappedLUN *deve will be released below.. - */ - spin_lock_bh(&port->sep_alua_lock); - list_del(&deve->alua_port_list); - spin_unlock_bh(&port->sep_alua_lock); /* * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE * PR operation to complete. */ + spin_unlock_irq(&nacl->device_list_lock); while (atomic_read(&deve->pr_ref_count) != 0) cpu_relax(); - spin_lock_irq(&nacl->device_list_lock); /* * Disable struct se_dev_entry LUN ACL mapping @@ -494,9 +475,9 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) continue; spin_unlock_irq(&nacl->device_list_lock); - core_disable_device_list_for_node(lun, NULL, + core_update_device_list_for_node(lun, NULL, deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS, - nacl, tpg); + nacl, tpg, 0); spin_lock_irq(&nacl->device_list_lock); } @@ -734,7 +715,7 @@ void se_release_device_for_hba(struct se_device *dev) se_dev_stop(dev); if (dev->dev_ptr) { - destroy_workqueue(dev->tmr_wq); + kthread_stop(dev->process_thread); if (dev->transport->free_device) dev->transport->free_device(dev->dev_ptr); } @@ -841,7 +822,7 @@ int se_dev_check_shutdown(struct se_device *dev) return ret; } -static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size) +u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size) { u32 tmp, aligned_max_sectors; /* @@ -1292,6 +1273,7 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size) struct se_lun *core_dev_add_lun( struct se_portal_group *tpg, + struct se_hba *hba, struct se_device *dev, u32 lun) { @@ -1316,7 +1298,7 @@ struct se_lun *core_dev_add_lun( pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from" " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(), tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun, - tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id); + tpg->se_tpg_tfo->get_fabric_name(), hba->hba_id); /* * Update LUN maps for dynamically added initiators when * generate_node_acl is enabled. @@ -1488,8 +1470,8 @@ int core_dev_add_initiator_node_lun_acl( lacl->se_lun = lun; - if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun, - lun_access, nacl, tpg) < 0) + if (core_update_device_list_for_node(lun, lacl, lacl->mapped_lun, + lun_access, nacl, tpg, 1) < 0) return -EINVAL; spin_lock(&lun->lun_acl_lock); @@ -1532,8 +1514,8 @@ int core_dev_del_initiator_node_lun_acl( smp_mb__after_atomic_dec(); spin_unlock(&lun->lun_acl_lock); - core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun, - TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg); + core_update_device_list_for_node(lun, NULL, lacl->mapped_lun, + TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0); lacl->se_lun = NULL; diff --git a/trunk/drivers/target/target_core_fabric_configfs.c b/trunk/drivers/target/target_core_fabric_configfs.c index ea479e54f5fd..405cc98eaed6 100644 --- a/trunk/drivers/target/target_core_fabric_configfs.c +++ b/trunk/drivers/target/target_core_fabric_configfs.c @@ -764,7 +764,8 @@ static int target_fabric_port_link( goto out; } - lun_p = core_dev_add_lun(se_tpg, dev, lun->unpacked_lun); + lun_p = core_dev_add_lun(se_tpg, dev->se_hba, dev, + lun->unpacked_lun); if (IS_ERR(lun_p)) { pr_err("core_dev_add_lun() failed\n"); ret = PTR_ERR(lun_p); diff --git a/trunk/drivers/target/target_core_file.c b/trunk/drivers/target/target_core_file.c index 9e2100551c78..9f99d0404908 100644 --- a/trunk/drivers/target/target_core_file.c +++ b/trunk/drivers/target/target_core_file.c @@ -331,7 +331,7 @@ static int fd_do_writev(struct se_cmd *cmd, struct scatterlist *sgl, return 1; } -static int fd_execute_sync_cache(struct se_cmd *cmd) +static void fd_emulate_sync_cache(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; struct fd_dev *fd_dev = dev->dev_ptr; @@ -365,7 +365,7 @@ static int fd_execute_sync_cache(struct se_cmd *cmd) pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret); if (immed) - return 0; + return; if (ret) { cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; @@ -373,15 +373,11 @@ static int fd_execute_sync_cache(struct se_cmd *cmd) } else { target_complete_cmd(cmd, SAM_STAT_GOOD); } - - return 0; } -static int fd_execute_rw(struct se_cmd *cmd) +static int fd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl, + u32 sgl_nents, enum dma_data_direction data_direction) { - struct scatterlist *sgl = cmd->t_data_sg; - u32 sgl_nents = cmd->t_data_nents; - enum dma_data_direction data_direction = cmd->data_direction; struct se_device *dev = cmd->se_dev; int ret = 0; @@ -554,16 +550,6 @@ static sector_t fd_get_blocks(struct se_device *dev) return div_u64(dev_size, dev->se_sub_dev->se_dev_attrib.block_size); } -static struct spc_ops fd_spc_ops = { - .execute_rw = fd_execute_rw, - .execute_sync_cache = fd_execute_sync_cache, -}; - -static int fd_parse_cdb(struct se_cmd *cmd) -{ - return sbc_parse_cdb(cmd, &fd_spc_ops); -} - static struct se_subsystem_api fileio_template = { .name = "fileio", .owner = THIS_MODULE, @@ -575,7 +561,8 @@ static struct se_subsystem_api fileio_template = { .allocate_virtdevice = fd_allocate_virtdevice, .create_virtdevice = fd_create_virtdevice, .free_device = fd_free_device, - .parse_cdb = fd_parse_cdb, + .execute_cmd = fd_execute_cmd, + .do_sync_cache = fd_emulate_sync_cache, .check_configfs_dev_params = fd_check_configfs_dev_params, .set_configfs_dev_params = fd_set_configfs_dev_params, .show_configfs_dev_params = fd_show_configfs_dev_params, diff --git a/trunk/drivers/target/target_core_iblock.c b/trunk/drivers/target/target_core_iblock.c index 76db75e836ed..fd47950727b4 100644 --- a/trunk/drivers/target/target_core_iblock.c +++ b/trunk/drivers/target/target_core_iblock.c @@ -40,7 +40,6 @@ #include #include #include -#include #include #include @@ -97,7 +96,6 @@ static struct se_device *iblock_create_virtdevice( struct request_queue *q; struct queue_limits *limits; u32 dev_flags = 0; - fmode_t mode; int ret = -EINVAL; if (!ib_dev) { @@ -119,11 +117,8 @@ static struct se_device *iblock_create_virtdevice( pr_debug( "IBLOCK: Claiming struct block_device: %s\n", ib_dev->ibd_udev_path); - mode = FMODE_READ|FMODE_EXCL; - if (!ib_dev->ibd_readonly) - mode |= FMODE_WRITE; - - bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev); + bd = blkdev_get_by_path(ib_dev->ibd_udev_path, + FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev); if (IS_ERR(bd)) { ret = PTR_ERR(bd); goto failed; @@ -297,7 +292,7 @@ static void iblock_end_io_flush(struct bio *bio, int err) * Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must * always flush the whole cache. */ -static int iblock_execute_sync_cache(struct se_cmd *cmd) +static void iblock_emulate_sync_cache(struct se_cmd *cmd) { struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; int immed = (cmd->t_task_cdb[1] & 0x2); @@ -316,98 +311,23 @@ static int iblock_execute_sync_cache(struct se_cmd *cmd) if (!immed) bio->bi_private = cmd; submit_bio(WRITE_FLUSH, bio); - return 0; } -static int iblock_execute_unmap(struct se_cmd *cmd) +static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range) { - struct se_device *dev = cmd->se_dev; struct iblock_dev *ibd = dev->dev_ptr; - unsigned char *buf, *ptr = NULL; - sector_t lba; - int size = cmd->data_length; - u32 range; - int ret = 0; - int dl, bd_dl; - - buf = transport_kmap_data_sg(cmd); - - dl = get_unaligned_be16(&buf[0]); - bd_dl = get_unaligned_be16(&buf[2]); - - size = min(size - 8, bd_dl); - if (size / 16 > dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) { - cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; - ret = -EINVAL; - goto err; - } - - /* First UNMAP block descriptor starts at 8 byte offset */ - ptr = &buf[8]; - pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u" - " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr); - - while (size >= 16) { - lba = get_unaligned_be64(&ptr[0]); - range = get_unaligned_be32(&ptr[8]); - pr_debug("UNMAP: Using lba: %llu and range: %u\n", - (unsigned long long)lba, range); - - if (range > dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count) { - cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; - ret = -EINVAL; - goto err; - } - - if (lba + range > dev->transport->get_blocks(dev) + 1) { - cmd->scsi_sense_reason = TCM_ADDRESS_OUT_OF_RANGE; - ret = -EINVAL; - goto err; - } - - ret = blkdev_issue_discard(ibd->ibd_bd, lba, range, - GFP_KERNEL, 0); - if (ret < 0) { - pr_err("blkdev_issue_discard() failed: %d\n", - ret); - goto err; - } - - ptr += 16; - size -= 16; - } - -err: - transport_kunmap_data_sg(cmd); - if (!ret) - target_complete_cmd(cmd, GOOD); - return ret; -} - -static int iblock_execute_write_same(struct se_cmd *cmd) -{ - struct iblock_dev *ibd = cmd->se_dev->dev_ptr; - int ret; - - ret = blkdev_issue_discard(ibd->ibd_bd, cmd->t_task_lba, - spc_get_write_same_sectors(cmd), GFP_KERNEL, - 0); - if (ret < 0) { - pr_debug("blkdev_issue_discard() failed for WRITE_SAME\n"); - return ret; - } + struct block_device *bd = ibd->ibd_bd; + int barrier = 0; - target_complete_cmd(cmd, GOOD); - return 0; + return blkdev_issue_discard(bd, lba, range, GFP_KERNEL, barrier); } enum { - Opt_udev_path, Opt_readonly, Opt_force, Opt_err + Opt_udev_path, Opt_force, Opt_err }; static match_table_t tokens = { {Opt_udev_path, "udev_path=%s"}, - {Opt_readonly, "readonly=%d"}, {Opt_force, "force=%d"}, {Opt_err, NULL} }; @@ -420,7 +340,6 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba, char *orig, *ptr, *arg_p, *opts; substring_t args[MAX_OPT_ARGS]; int ret = 0, token; - unsigned long tmp_readonly; opts = kstrdup(page, GFP_KERNEL); if (!opts) @@ -453,22 +372,6 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba, ib_dev->ibd_udev_path); ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH; break; - case Opt_readonly: - arg_p = match_strdup(&args[0]); - if (!arg_p) { - ret = -ENOMEM; - break; - } - ret = strict_strtoul(arg_p, 0, &tmp_readonly); - kfree(arg_p); - if (ret < 0) { - pr_err("strict_strtoul() failed for" - " readonly=\n"); - goto out; - } - ib_dev->ibd_readonly = tmp_readonly; - pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly); - break; case Opt_force: break; default: @@ -508,10 +411,11 @@ static ssize_t iblock_show_configfs_dev_params( if (bd) bl += sprintf(b + bl, "iBlock device: %s", bdevname(bd, buf)); - if (ibd->ibd_flags & IBDF_HAS_UDEV_PATH) - bl += sprintf(b + bl, " UDEV PATH: %s", + if (ibd->ibd_flags & IBDF_HAS_UDEV_PATH) { + bl += sprintf(b + bl, " UDEV PATH: %s\n", ibd->ibd_udev_path); - bl += sprintf(b + bl, " readonly: %d\n", ibd->ibd_readonly); + } else + bl += sprintf(b + bl, "\n"); bl += sprintf(b + bl, " "); if (bd) { @@ -589,11 +493,9 @@ static void iblock_submit_bios(struct bio_list *list, int rw) blk_finish_plug(&plug); } -static int iblock_execute_rw(struct se_cmd *cmd) +static int iblock_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl, + u32 sgl_nents, enum dma_data_direction data_direction) { - struct scatterlist *sgl = cmd->t_data_sg; - u32 sgl_nents = cmd->t_data_nents; - enum dma_data_direction data_direction = cmd->data_direction; struct se_device *dev = cmd->se_dev; struct iblock_req *ibr; struct bio *bio; @@ -740,18 +642,6 @@ static void iblock_bio_done(struct bio *bio, int err) iblock_complete_cmd(cmd); } -static struct spc_ops iblock_spc_ops = { - .execute_rw = iblock_execute_rw, - .execute_sync_cache = iblock_execute_sync_cache, - .execute_write_same = iblock_execute_write_same, - .execute_unmap = iblock_execute_unmap, -}; - -static int iblock_parse_cdb(struct se_cmd *cmd) -{ - return sbc_parse_cdb(cmd, &iblock_spc_ops); -} - static struct se_subsystem_api iblock_template = { .name = "iblock", .owner = THIS_MODULE, @@ -763,7 +653,9 @@ static struct se_subsystem_api iblock_template = { .allocate_virtdevice = iblock_allocate_virtdevice, .create_virtdevice = iblock_create_virtdevice, .free_device = iblock_free_device, - .parse_cdb = iblock_parse_cdb, + .execute_cmd = iblock_execute_cmd, + .do_discard = iblock_do_discard, + .do_sync_cache = iblock_emulate_sync_cache, .check_configfs_dev_params = iblock_check_configfs_dev_params, .set_configfs_dev_params = iblock_set_configfs_dev_params, .show_configfs_dev_params = iblock_show_configfs_dev_params, diff --git a/trunk/drivers/target/target_core_iblock.h b/trunk/drivers/target/target_core_iblock.h index 533627ae79ec..66cf7b9e205e 100644 --- a/trunk/drivers/target/target_core_iblock.h +++ b/trunk/drivers/target/target_core_iblock.h @@ -18,7 +18,6 @@ struct iblock_dev { u32 ibd_flags; struct bio_set *ibd_bio_set; struct block_device *ibd_bd; - bool ibd_readonly; } ____cacheline_aligned; #endif /* TARGET_CORE_IBLOCK_H */ diff --git a/trunk/drivers/target/target_core_internal.h b/trunk/drivers/target/target_core_internal.h index 0fd428225d11..165e82429687 100644 --- a/trunk/drivers/target/target_core_internal.h +++ b/trunk/drivers/target/target_core_internal.h @@ -4,16 +4,25 @@ /* target_core_alua.c */ extern struct t10_alua_lu_gp *default_lu_gp; +/* target_core_cdb.c */ +int target_emulate_inquiry(struct se_cmd *cmd); +int target_emulate_readcapacity(struct se_cmd *cmd); +int target_emulate_readcapacity_16(struct se_cmd *cmd); +int target_emulate_modesense(struct se_cmd *cmd); +int target_emulate_request_sense(struct se_cmd *cmd); +int target_emulate_unmap(struct se_cmd *cmd); +int target_emulate_write_same(struct se_cmd *cmd); +int target_emulate_synchronize_cache(struct se_cmd *cmd); +int target_emulate_noop(struct se_cmd *cmd); + /* target_core_device.c */ struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16); int core_free_device_list_for_node(struct se_node_acl *, struct se_portal_group *); void core_dec_lacl_count(struct se_node_acl *, struct se_cmd *); void core_update_device_list_access(u32, u32, struct se_node_acl *); -int core_enable_device_list_for_node(struct se_lun *, struct se_lun_acl *, - u32, u32, struct se_node_acl *, struct se_portal_group *); -int core_disable_device_list_for_node(struct se_lun *, struct se_lun_acl *, - u32, u32, struct se_node_acl *, struct se_portal_group *); +int core_update_device_list_for_node(struct se_lun *, struct se_lun_acl *, + u32, u32, struct se_node_acl *, struct se_portal_group *, int); void core_clear_lun_from_tpg(struct se_lun *, struct se_portal_group *); int core_dev_export(struct se_device *, struct se_portal_group *, struct se_lun *); @@ -47,7 +56,8 @@ int se_dev_set_max_sectors(struct se_device *, u32); int se_dev_set_fabric_max_sectors(struct se_device *, u32); int se_dev_set_optimal_sectors(struct se_device *, u32); int se_dev_set_block_size(struct se_device *, u32); -struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_device *, u32); +struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_hba *, + struct se_device *, u32); int core_dev_del_lun(struct se_portal_group *, u32); struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32); struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *, @@ -94,6 +104,7 @@ void release_se_kmem_caches(void); u32 scsi_get_new_index(scsi_index_t); void transport_subsystem_check_init(void); void transport_cmd_finish_abort(struct se_cmd *, int); +void __target_remove_from_execute_list(struct se_cmd *); unsigned char *transport_dump_cmd_direction(struct se_cmd *); void transport_dump_dev_state(struct se_device *, char *, int *); void transport_dump_dev_info(struct se_device *, struct se_lun *, @@ -105,7 +116,6 @@ int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int); bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags); int transport_clear_lun_from_sessions(struct se_lun *); void transport_send_task_abort(struct se_cmd *); -int target_cmd_size_check(struct se_cmd *cmd, unsigned int size); /* target_core_stat.c */ void target_stat_setup_dev_default_groups(struct se_subsystem_dev *); diff --git a/trunk/drivers/target/target_core_pr.c b/trunk/drivers/target/target_core_pr.c index 1e946502c378..85564998500a 100644 --- a/trunk/drivers/target/target_core_pr.c +++ b/trunk/drivers/target/target_core_pr.c @@ -507,7 +507,7 @@ static int core_scsi3_pr_seq_non_holder( * Check if write exclusive initiator ports *NOT* holding the * WRITE_EXCLUSIVE_* reservation. */ - if (we && !registered_nexus) { + if ((we) && !(registered_nexus)) { if (cmd->data_direction == DMA_TO_DEVICE) { /* * Conflict for write exclusive @@ -2031,7 +2031,7 @@ static int __core_scsi3_write_aptpl_to_file( if (IS_ERR(file) || !file || !file->f_dentry) { pr_err("filp_open(%s) for APTPL metadata" " failed\n", path); - return IS_ERR(file) ? PTR_ERR(file) : -ENOENT; + return (PTR_ERR(file) < 0 ? PTR_ERR(file) : -ENOENT); } iov[0].iov_base = &buf[0]; @@ -2486,7 +2486,7 @@ static int core_scsi3_pro_reserve( */ spin_lock(&dev->dev_reservation_lock); pr_res_holder = dev->dev_pr_res_holder; - if (pr_res_holder) { + if ((pr_res_holder)) { /* * From spc4r17 Section 5.7.9: Reserving: * @@ -3818,7 +3818,7 @@ int target_scsi3_emulate_pr_out(struct se_cmd *cmd) " SPC-2 reservation is held, returning" " RESERVATION_CONFLICT\n"); cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; - ret = -EINVAL; + ret = EINVAL; goto out; } @@ -3828,8 +3828,7 @@ int target_scsi3_emulate_pr_out(struct se_cmd *cmd) */ if (!cmd->se_sess) { cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - ret = -EINVAL; - goto out; + return -EINVAL; } if (cmd->data_length < 24) { @@ -4030,7 +4029,7 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd) spin_lock(&se_dev->dev_reservation_lock); pr_reg = se_dev->dev_pr_res_holder; - if (pr_reg) { + if ((pr_reg)) { /* * Set the hardcoded Additional Length */ diff --git a/trunk/drivers/target/target_core_pscsi.c b/trunk/drivers/target/target_core_pscsi.c index 6e32ff6f2fa0..4ce2cf642fce 100644 --- a/trunk/drivers/target/target_core_pscsi.c +++ b/trunk/drivers/target/target_core_pscsi.c @@ -35,10 +35,8 @@ #include #include #include -#include +#include #include -#include - #include #include #include @@ -48,14 +46,12 @@ #include #include -#include "target_core_alua.h" #include "target_core_pscsi.h" #define ISPRINT(a) ((a >= ' ') && (a <= '~')) static struct se_subsystem_api pscsi_template; -static int pscsi_execute_cmd(struct se_cmd *cmd); static void pscsi_req_done(struct request *, int); /* pscsi_attach_hba(): @@ -1023,79 +1019,9 @@ static int pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, return -ENOMEM; } -/* - * Clear a lun set in the cdb if the initiator talking to use spoke - * and old standards version, as we can't assume the underlying device - * won't choke up on it. - */ -static inline void pscsi_clear_cdb_lun(unsigned char *cdb) -{ - switch (cdb[0]) { - case READ_10: /* SBC - RDProtect */ - case READ_12: /* SBC - RDProtect */ - case READ_16: /* SBC - RDProtect */ - case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */ - case VERIFY: /* SBC - VRProtect */ - case VERIFY_16: /* SBC - VRProtect */ - case WRITE_VERIFY: /* SBC - VRProtect */ - case WRITE_VERIFY_12: /* SBC - VRProtect */ - case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */ - break; - default: - cdb[1] &= 0x1f; /* clear logical unit number */ - break; - } -} - -static int pscsi_parse_cdb(struct se_cmd *cmd) -{ - unsigned char *cdb = cmd->t_task_cdb; - unsigned int dummy_size; - int ret; - - if (cmd->se_cmd_flags & SCF_BIDI) { - cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; - return -EINVAL; - } - - pscsi_clear_cdb_lun(cdb); - - /* - * For REPORT LUNS we always need to emulate the response, for everything - * else the default for pSCSI is to pass the command to the underlying - * LLD / physical hardware. - */ - switch (cdb[0]) { - case REPORT_LUNS: - ret = spc_parse_cdb(cmd, &dummy_size); - if (ret) - return ret; - break; - case READ_6: - case READ_10: - case READ_12: - case READ_16: - case WRITE_6: - case WRITE_10: - case WRITE_12: - case WRITE_16: - case WRITE_VERIFY: - cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; - /* FALLTHROUGH*/ - default: - cmd->execute_cmd = pscsi_execute_cmd; - break; - } - - return 0; -} - -static int pscsi_execute_cmd(struct se_cmd *cmd) +static int pscsi_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl, + u32 sgl_nents, enum dma_data_direction data_direction) { - struct scatterlist *sgl = cmd->t_data_sg; - u32 sgl_nents = cmd->t_data_nents; - enum dma_data_direction data_direction = cmd->data_direction; struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr; struct pscsi_plugin_task *pt; struct request *req; @@ -1116,7 +1042,7 @@ static int pscsi_execute_cmd(struct se_cmd *cmd) memcpy(pt->pscsi_cdb, cmd->t_task_cdb, scsi_command_size(cmd->t_task_cdb)); - if (!sgl) { + if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) { req = blk_get_request(pdv->pdv_sd->request_queue, (data_direction == DMA_TO_DEVICE), GFP_KERNEL); @@ -1262,7 +1188,7 @@ static struct se_subsystem_api pscsi_template = { .create_virtdevice = pscsi_create_virtdevice, .free_device = pscsi_free_device, .transport_complete = pscsi_transport_complete, - .parse_cdb = pscsi_parse_cdb, + .execute_cmd = pscsi_execute_cmd, .check_configfs_dev_params = pscsi_check_configfs_dev_params, .set_configfs_dev_params = pscsi_set_configfs_dev_params, .show_configfs_dev_params = pscsi_show_configfs_dev_params, diff --git a/trunk/drivers/target/target_core_rd.c b/trunk/drivers/target/target_core_rd.c index d00bbe33ff8b..d0ceb873c0e5 100644 --- a/trunk/drivers/target/target_core_rd.c +++ b/trunk/drivers/target/target_core_rd.c @@ -284,11 +284,9 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page) return NULL; } -static int rd_execute_rw(struct se_cmd *cmd) +static int rd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl, + u32 sgl_nents, enum dma_data_direction data_direction) { - struct scatterlist *sgl = cmd->t_data_sg; - u32 sgl_nents = cmd->t_data_nents; - enum dma_data_direction data_direction = cmd->data_direction; struct se_device *se_dev = cmd->se_dev; struct rd_dev *dev = se_dev->dev_ptr; struct rd_dev_sg_table *table; @@ -462,15 +460,6 @@ static sector_t rd_get_blocks(struct se_device *dev) return blocks_long; } -static struct spc_ops rd_spc_ops = { - .execute_rw = rd_execute_rw, -}; - -static int rd_parse_cdb(struct se_cmd *cmd) -{ - return sbc_parse_cdb(cmd, &rd_spc_ops); -} - static struct se_subsystem_api rd_mcp_template = { .name = "rd_mcp", .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV, @@ -479,7 +468,7 @@ static struct se_subsystem_api rd_mcp_template = { .allocate_virtdevice = rd_allocate_virtdevice, .create_virtdevice = rd_create_virtdevice, .free_device = rd_free_device, - .parse_cdb = rd_parse_cdb, + .execute_cmd = rd_execute_cmd, .check_configfs_dev_params = rd_check_configfs_dev_params, .set_configfs_dev_params = rd_set_configfs_dev_params, .show_configfs_dev_params = rd_show_configfs_dev_params, diff --git a/trunk/drivers/target/target_core_sbc.c b/trunk/drivers/target/target_core_sbc.c deleted file mode 100644 index a9dd9469e3bd..000000000000 --- a/trunk/drivers/target/target_core_sbc.c +++ /dev/null @@ -1,581 +0,0 @@ -/* - * SCSI Block Commands (SBC) parsing and emulation. - * - * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc. - * Copyright (c) 2005, 2006, 2007 SBE, Inc. - * Copyright (c) 2007-2010 Rising Tide Systems - * Copyright (c) 2008-2010 Linux-iSCSI.org - * - * Nicholas A. Bellinger - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - */ - -#include -#include -#include -#include -#include - -#include -#include -#include - -#include "target_core_internal.h" -#include "target_core_ua.h" - - -static int sbc_emulate_readcapacity(struct se_cmd *cmd) -{ - struct se_device *dev = cmd->se_dev; - unsigned char *buf; - unsigned long long blocks_long = dev->transport->get_blocks(dev); - u32 blocks; - - if (blocks_long >= 0x00000000ffffffff) - blocks = 0xffffffff; - else - blocks = (u32)blocks_long; - - buf = transport_kmap_data_sg(cmd); - - buf[0] = (blocks >> 24) & 0xff; - buf[1] = (blocks >> 16) & 0xff; - buf[2] = (blocks >> 8) & 0xff; - buf[3] = blocks & 0xff; - buf[4] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff; - buf[5] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff; - buf[6] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff; - buf[7] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff; - - transport_kunmap_data_sg(cmd); - - target_complete_cmd(cmd, GOOD); - return 0; -} - -static int sbc_emulate_readcapacity_16(struct se_cmd *cmd) -{ - struct se_device *dev = cmd->se_dev; - unsigned char *buf; - unsigned long long blocks = dev->transport->get_blocks(dev); - - buf = transport_kmap_data_sg(cmd); - - buf[0] = (blocks >> 56) & 0xff; - buf[1] = (blocks >> 48) & 0xff; - buf[2] = (blocks >> 40) & 0xff; - buf[3] = (blocks >> 32) & 0xff; - buf[4] = (blocks >> 24) & 0xff; - buf[5] = (blocks >> 16) & 0xff; - buf[6] = (blocks >> 8) & 0xff; - buf[7] = blocks & 0xff; - buf[8] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff; - buf[9] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff; - buf[10] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff; - buf[11] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff; - /* - * Set Thin Provisioning Enable bit following sbc3r22 in section - * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled. - */ - if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws) - buf[14] = 0x80; - - transport_kunmap_data_sg(cmd); - - target_complete_cmd(cmd, GOOD); - return 0; -} - -int spc_get_write_same_sectors(struct se_cmd *cmd) -{ - u32 num_blocks; - - if (cmd->t_task_cdb[0] == WRITE_SAME) - num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]); - else if (cmd->t_task_cdb[0] == WRITE_SAME_16) - num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]); - else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */ - num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]); - - /* - * Use the explicit range when non zero is supplied, otherwise calculate - * the remaining range based on ->get_blocks() - starting LBA. - */ - if (num_blocks) - return num_blocks; - - return cmd->se_dev->transport->get_blocks(cmd->se_dev) - - cmd->t_task_lba + 1; -} -EXPORT_SYMBOL(spc_get_write_same_sectors); - -static int sbc_emulate_verify(struct se_cmd *cmd) -{ - target_complete_cmd(cmd, GOOD); - return 0; -} - -static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors) -{ - return cmd->se_dev->se_sub_dev->se_dev_attrib.block_size * sectors; -} - -static int sbc_check_valid_sectors(struct se_cmd *cmd) -{ - struct se_device *dev = cmd->se_dev; - unsigned long long end_lba; - u32 sectors; - - sectors = cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size; - end_lba = dev->transport->get_blocks(dev) + 1; - - if (cmd->t_task_lba + sectors > end_lba) { - pr_err("target: lba %llu, sectors %u exceeds end lba %llu\n", - cmd->t_task_lba, sectors, end_lba); - return -EINVAL; - } - - return 0; -} - -static inline u32 transport_get_sectors_6(unsigned char *cdb) -{ - /* - * Use 8-bit sector value. SBC-3 says: - * - * A TRANSFER LENGTH field set to zero specifies that 256 - * logical blocks shall be written. Any other value - * specifies the number of logical blocks that shall be - * written. - */ - return cdb[4] ? : 256; -} - -static inline u32 transport_get_sectors_10(unsigned char *cdb) -{ - return (u32)(cdb[7] << 8) + cdb[8]; -} - -static inline u32 transport_get_sectors_12(unsigned char *cdb) -{ - return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9]; -} - -static inline u32 transport_get_sectors_16(unsigned char *cdb) -{ - return (u32)(cdb[10] << 24) + (cdb[11] << 16) + - (cdb[12] << 8) + cdb[13]; -} - -/* - * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants - */ -static inline u32 transport_get_sectors_32(unsigned char *cdb) -{ - return (u32)(cdb[28] << 24) + (cdb[29] << 16) + - (cdb[30] << 8) + cdb[31]; - -} - -static inline u32 transport_lba_21(unsigned char *cdb) -{ - return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3]; -} - -static inline u32 transport_lba_32(unsigned char *cdb) -{ - return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; -} - -static inline unsigned long long transport_lba_64(unsigned char *cdb) -{ - unsigned int __v1, __v2; - - __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; - __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; - - return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; -} - -/* - * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs - */ -static inline unsigned long long transport_lba_64_ext(unsigned char *cdb) -{ - unsigned int __v1, __v2; - - __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15]; - __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19]; - - return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; -} - -static int sbc_write_same_supported(struct se_device *dev, - unsigned char *flags) -{ - if ((flags[0] & 0x04) || (flags[0] & 0x02)) { - pr_err("WRITE_SAME PBDATA and LBDATA" - " bits not supported for Block Discard" - " Emulation\n"); - return -ENOSYS; - } - - /* - * Currently for the emulated case we only accept - * tpws with the UNMAP=1 bit set. - */ - if (!(flags[0] & 0x08)) { - pr_err("WRITE_SAME w/o UNMAP bit not" - " supported for Block Discard Emulation\n"); - return -ENOSYS; - } - - return 0; -} - -static void xdreadwrite_callback(struct se_cmd *cmd) -{ - unsigned char *buf, *addr; - struct scatterlist *sg; - unsigned int offset; - int i; - int count; - /* - * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command - * - * 1) read the specified logical block(s); - * 2) transfer logical blocks from the data-out buffer; - * 3) XOR the logical blocks transferred from the data-out buffer with - * the logical blocks read, storing the resulting XOR data in a buffer; - * 4) if the DISABLE WRITE bit is set to zero, then write the logical - * blocks transferred from the data-out buffer; and - * 5) transfer the resulting XOR data to the data-in buffer. - */ - buf = kmalloc(cmd->data_length, GFP_KERNEL); - if (!buf) { - pr_err("Unable to allocate xor_callback buf\n"); - return; - } - /* - * Copy the scatterlist WRITE buffer located at cmd->t_data_sg - * into the locally allocated *buf - */ - sg_copy_to_buffer(cmd->t_data_sg, - cmd->t_data_nents, - buf, - cmd->data_length); - - /* - * Now perform the XOR against the BIDI read memory located at - * cmd->t_mem_bidi_list - */ - - offset = 0; - for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) { - addr = kmap_atomic(sg_page(sg)); - if (!addr) - goto out; - - for (i = 0; i < sg->length; i++) - *(addr + sg->offset + i) ^= *(buf + offset + i); - - offset += sg->length; - kunmap_atomic(addr); - } - -out: - kfree(buf); -} - -int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops) -{ - struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev; - struct se_device *dev = cmd->se_dev; - unsigned char *cdb = cmd->t_task_cdb; - unsigned int size; - u32 sectors = 0; - int ret; - - switch (cdb[0]) { - case READ_6: - sectors = transport_get_sectors_6(cdb); - cmd->t_task_lba = transport_lba_21(cdb); - cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; - cmd->execute_cmd = ops->execute_rw; - break; - case READ_10: - sectors = transport_get_sectors_10(cdb); - cmd->t_task_lba = transport_lba_32(cdb); - cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; - cmd->execute_cmd = ops->execute_rw; - break; - case READ_12: - sectors = transport_get_sectors_12(cdb); - cmd->t_task_lba = transport_lba_32(cdb); - cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; - cmd->execute_cmd = ops->execute_rw; - break; - case READ_16: - sectors = transport_get_sectors_16(cdb); - cmd->t_task_lba = transport_lba_64(cdb); - cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; - cmd->execute_cmd = ops->execute_rw; - break; - case WRITE_6: - sectors = transport_get_sectors_6(cdb); - cmd->t_task_lba = transport_lba_21(cdb); - cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; - cmd->execute_cmd = ops->execute_rw; - break; - case WRITE_10: - case WRITE_VERIFY: - sectors = transport_get_sectors_10(cdb); - cmd->t_task_lba = transport_lba_32(cdb); - if (cdb[1] & 0x8) - cmd->se_cmd_flags |= SCF_FUA; - cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; - cmd->execute_cmd = ops->execute_rw; - break; - case WRITE_12: - sectors = transport_get_sectors_12(cdb); - cmd->t_task_lba = transport_lba_32(cdb); - if (cdb[1] & 0x8) - cmd->se_cmd_flags |= SCF_FUA; - cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; - cmd->execute_cmd = ops->execute_rw; - break; - case WRITE_16: - sectors = transport_get_sectors_16(cdb); - cmd->t_task_lba = transport_lba_64(cdb); - if (cdb[1] & 0x8) - cmd->se_cmd_flags |= SCF_FUA; - cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; - cmd->execute_cmd = ops->execute_rw; - break; - case XDWRITEREAD_10: - if ((cmd->data_direction != DMA_TO_DEVICE) || - !(cmd->se_cmd_flags & SCF_BIDI)) - goto out_invalid_cdb_field; - sectors = transport_get_sectors_10(cdb); - - cmd->t_task_lba = transport_lba_32(cdb); - cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; - - /* - * Setup BIDI XOR callback to be run after I/O completion. - */ - cmd->execute_cmd = ops->execute_rw; - cmd->transport_complete_callback = &xdreadwrite_callback; - if (cdb[1] & 0x8) - cmd->se_cmd_flags |= SCF_FUA; - break; - case VARIABLE_LENGTH_CMD: - { - u16 service_action = get_unaligned_be16(&cdb[8]); - switch (service_action) { - case XDWRITEREAD_32: - sectors = transport_get_sectors_32(cdb); - - /* - * Use WRITE_32 and READ_32 opcodes for the emulated - * XDWRITE_READ_32 logic. - */ - cmd->t_task_lba = transport_lba_64_ext(cdb); - cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; - - /* - * Setup BIDI XOR callback to be run during after I/O - * completion. - */ - cmd->execute_cmd = ops->execute_rw; - cmd->transport_complete_callback = &xdreadwrite_callback; - if (cdb[1] & 0x8) - cmd->se_cmd_flags |= SCF_FUA; - break; - case WRITE_SAME_32: - if (!ops->execute_write_same) - goto out_unsupported_cdb; - - sectors = transport_get_sectors_32(cdb); - if (!sectors) { - pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not" - " supported\n"); - goto out_invalid_cdb_field; - } - - size = sbc_get_size(cmd, 1); - cmd->t_task_lba = get_unaligned_be64(&cdb[12]); - - if (sbc_write_same_supported(dev, &cdb[10]) < 0) - goto out_unsupported_cdb; - cmd->execute_cmd = ops->execute_write_same; - break; - default: - pr_err("VARIABLE_LENGTH_CMD service action" - " 0x%04x not supported\n", service_action); - goto out_unsupported_cdb; - } - break; - } - case READ_CAPACITY: - size = READ_CAP_LEN; - cmd->execute_cmd = sbc_emulate_readcapacity; - break; - case SERVICE_ACTION_IN: - switch (cmd->t_task_cdb[1] & 0x1f) { - case SAI_READ_CAPACITY_16: - cmd->execute_cmd = sbc_emulate_readcapacity_16; - break; - default: - pr_err("Unsupported SA: 0x%02x\n", - cmd->t_task_cdb[1] & 0x1f); - goto out_invalid_cdb_field; - } - size = (cdb[10] << 24) | (cdb[11] << 16) | - (cdb[12] << 8) | cdb[13]; - break; - case SYNCHRONIZE_CACHE: - case SYNCHRONIZE_CACHE_16: - if (!ops->execute_sync_cache) - goto out_unsupported_cdb; - - /* - * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE - */ - if (cdb[0] == SYNCHRONIZE_CACHE) { - sectors = transport_get_sectors_10(cdb); - cmd->t_task_lba = transport_lba_32(cdb); - } else { - sectors = transport_get_sectors_16(cdb); - cmd->t_task_lba = transport_lba_64(cdb); - } - - size = sbc_get_size(cmd, sectors); - - /* - * Check to ensure that LBA + Range does not exceed past end of - * device for IBLOCK and FILEIO ->do_sync_cache() backend calls - */ - if (cmd->t_task_lba || sectors) { - if (sbc_check_valid_sectors(cmd) < 0) - goto out_invalid_cdb_field; - } - cmd->execute_cmd = ops->execute_sync_cache; - break; - case UNMAP: - if (!ops->execute_unmap) - goto out_unsupported_cdb; - - size = get_unaligned_be16(&cdb[7]); - cmd->execute_cmd = ops->execute_unmap; - break; - case WRITE_SAME_16: - if (!ops->execute_write_same) - goto out_unsupported_cdb; - - sectors = transport_get_sectors_16(cdb); - if (!sectors) { - pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); - goto out_invalid_cdb_field; - } - - size = sbc_get_size(cmd, 1); - cmd->t_task_lba = get_unaligned_be64(&cdb[2]); - - if (sbc_write_same_supported(dev, &cdb[1]) < 0) - goto out_unsupported_cdb; - cmd->execute_cmd = ops->execute_write_same; - break; - case WRITE_SAME: - if (!ops->execute_write_same) - goto out_unsupported_cdb; - - sectors = transport_get_sectors_10(cdb); - if (!sectors) { - pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); - goto out_invalid_cdb_field; - } - - size = sbc_get_size(cmd, 1); - cmd->t_task_lba = get_unaligned_be32(&cdb[2]); - - /* - * Follow sbcr26 with WRITE_SAME (10) and check for the existence - * of byte 1 bit 3 UNMAP instead of original reserved field - */ - if (sbc_write_same_supported(dev, &cdb[1]) < 0) - goto out_unsupported_cdb; - cmd->execute_cmd = ops->execute_write_same; - break; - case VERIFY: - size = 0; - cmd->execute_cmd = sbc_emulate_verify; - break; - default: - ret = spc_parse_cdb(cmd, &size); - if (ret) - return ret; - } - - /* reject any command that we don't have a handler for */ - if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->execute_cmd) - goto out_unsupported_cdb; - - if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { - unsigned long long end_lba; - - if (sectors > su_dev->se_dev_attrib.fabric_max_sectors) { - printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" - " big sectors %u exceeds fabric_max_sectors:" - " %u\n", cdb[0], sectors, - su_dev->se_dev_attrib.fabric_max_sectors); - goto out_invalid_cdb_field; - } - if (sectors > su_dev->se_dev_attrib.hw_max_sectors) { - printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" - " big sectors %u exceeds backend hw_max_sectors:" - " %u\n", cdb[0], sectors, - su_dev->se_dev_attrib.hw_max_sectors); - goto out_invalid_cdb_field; - } - - end_lba = dev->transport->get_blocks(dev) + 1; - if (cmd->t_task_lba + sectors > end_lba) { - pr_err("cmd exceeds last lba %llu " - "(lba %llu, sectors %u)\n", - end_lba, cmd->t_task_lba, sectors); - goto out_invalid_cdb_field; - } - - size = sbc_get_size(cmd, sectors); - } - - ret = target_cmd_size_check(cmd, size); - if (ret < 0) - return ret; - - return 0; - -out_unsupported_cdb: - cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; - return -EINVAL; -out_invalid_cdb_field: - cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; - return -EINVAL; -} -EXPORT_SYMBOL(sbc_parse_cdb); diff --git a/trunk/drivers/target/target_core_tmr.c b/trunk/drivers/target/target_core_tmr.c index 1c59a3c23b2c..84caf1bed9a3 100644 --- a/trunk/drivers/target/target_core_tmr.c +++ b/trunk/drivers/target/target_core_tmr.c @@ -295,6 +295,9 @@ static void core_tmr_drain_state_list( list_move_tail(&cmd->state_list, &drain_task_list); cmd->state_active = false; + + if (!list_empty(&cmd->execute_list)) + __target_remove_from_execute_list(cmd); } spin_unlock_irqrestore(&dev->execute_task_lock, flags); @@ -351,6 +354,57 @@ static void core_tmr_drain_state_list( } } +static void core_tmr_drain_cmd_list( + struct se_device *dev, + struct se_cmd *prout_cmd, + struct se_node_acl *tmr_nacl, + int tas, + struct list_head *preempt_and_abort_list) +{ + LIST_HEAD(drain_cmd_list); + struct se_queue_obj *qobj = &dev->dev_queue_obj; + struct se_cmd *cmd, *tcmd; + unsigned long flags; + + /* + * Release all commands remaining in the per-device command queue. + * + * This follows the same logic as above for the state list. + */ + spin_lock_irqsave(&qobj->cmd_queue_lock, flags); + list_for_each_entry_safe(cmd, tcmd, &qobj->qobj_list, se_queue_node) { + /* + * For PREEMPT_AND_ABORT usage, only process commands + * with a matching reservation key. + */ + if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd)) + continue; + /* + * Not aborting PROUT PREEMPT_AND_ABORT CDB.. + */ + if (prout_cmd == cmd) + continue; + + cmd->transport_state &= ~CMD_T_QUEUED; + atomic_dec(&qobj->queue_cnt); + list_move_tail(&cmd->se_queue_node, &drain_cmd_list); + } + spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); + + while (!list_empty(&drain_cmd_list)) { + cmd = list_entry(drain_cmd_list.next, struct se_cmd, se_queue_node); + list_del_init(&cmd->se_queue_node); + + pr_debug("LUN_RESET: %s from Device Queue: cmd: %p t_state:" + " %d t_fe_count: %d\n", (preempt_and_abort_list) ? + "Preempt" : "", cmd, cmd->t_state, + atomic_read(&cmd->t_fe_count)); + + core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, + atomic_read(&cmd->t_fe_count)); + } +} + int core_tmr_lun_reset( struct se_device *dev, struct se_tmr_req *tmr, @@ -393,7 +447,8 @@ int core_tmr_lun_reset( core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list); core_tmr_drain_state_list(dev, prout_cmd, tmr_nacl, tas, preempt_and_abort_list); - + core_tmr_drain_cmd_list(dev, prout_cmd, tmr_nacl, tas, + preempt_and_abort_list); /* * Clear any legacy SPC-2 reservation when called during * LOGICAL UNIT RESET diff --git a/trunk/drivers/target/target_core_tpg.c b/trunk/drivers/target/target_core_tpg.c index b8628a5014b9..8bd58e284185 100644 --- a/trunk/drivers/target/target_core_tpg.c +++ b/trunk/drivers/target/target_core_tpg.c @@ -77,8 +77,8 @@ static void core_clear_initiator_node_from_tpg( lun = deve->se_lun; spin_unlock_irq(&nacl->device_list_lock); - core_disable_device_list_for_node(lun, NULL, deve->mapped_lun, - TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg); + core_update_device_list_for_node(lun, NULL, deve->mapped_lun, + TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0); spin_lock_irq(&nacl->device_list_lock); } @@ -172,8 +172,8 @@ void core_tpg_add_node_to_devs( (lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ? "READ-WRITE" : "READ-ONLY"); - core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun, - lun_access, acl, tpg); + core_update_device_list_for_node(lun, NULL, lun->unpacked_lun, + lun_access, acl, tpg, 1); spin_lock(&tpg->tpg_lun_lock); } spin_unlock(&tpg->tpg_lun_lock); @@ -306,8 +306,10 @@ struct se_node_acl *core_tpg_check_initiator_node_acl( * TPG LUNs if the fabric is not explictly asking for * tpg_check_demo_mode_login_only() == 1. */ - if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) || - (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1)) + if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only != NULL) && + (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) == 1)) + do { ; } while (0); + else core_tpg_add_node_to_devs(acl, tpg); spin_lock_irq(&tpg->acl_node_lock); diff --git a/trunk/drivers/target/target_core_transport.c b/trunk/drivers/target/target_core_transport.c index 0eaae23d12b5..634d0f31a28c 100644 --- a/trunk/drivers/target/target_core_transport.c +++ b/trunk/drivers/target/target_core_transport.c @@ -66,12 +66,15 @@ struct kmem_cache *t10_alua_lu_gp_mem_cache; struct kmem_cache *t10_alua_tg_pt_gp_cache; struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; +static int transport_generic_write_pending(struct se_cmd *); +static int transport_processing_thread(void *param); +static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *); static void transport_complete_task_attr(struct se_cmd *cmd); static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev); static int transport_generic_get_mem(struct se_cmd *cmd); -static int target_get_sess_cmd(struct se_session *, struct se_cmd *, bool); static void transport_put_cmd(struct se_cmd *cmd); +static void transport_remove_cmd_from_queue(struct se_cmd *cmd); static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); static void target_complete_ok_work(struct work_struct *work); @@ -192,6 +195,14 @@ u32 scsi_get_new_index(scsi_index_t type) return new_index; } +static void transport_init_queue_obj(struct se_queue_obj *qobj) +{ + atomic_set(&qobj->queue_cnt, 0); + INIT_LIST_HEAD(&qobj->qobj_list); + init_waitqueue_head(&qobj->thread_wq); + spin_lock_init(&qobj->cmd_queue_lock); +} + void transport_subsystem_check_init(void) { int ret; @@ -232,6 +243,7 @@ struct se_session *transport_init_session(void) INIT_LIST_HEAD(&se_sess->sess_list); INIT_LIST_HEAD(&se_sess->sess_acl_list); INIT_LIST_HEAD(&se_sess->sess_cmd_list); + INIT_LIST_HEAD(&se_sess->sess_wait_list); spin_lock_init(&se_sess->sess_cmd_lock); kref_init(&se_sess->sess_kref); @@ -456,7 +468,18 @@ static void target_remove_from_state_list(struct se_cmd *cmd) spin_unlock_irqrestore(&dev->execute_task_lock, flags); } -static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists) +/* transport_cmd_check_stop(): + * + * 'transport_off = 1' determines if CMD_T_ACTIVE should be cleared. + * 'transport_off = 2' determines if task_dev_state should be removed. + * + * A non-zero u8 t_state sets cmd->t_state. + * Returns 1 when command is stopped, else 0. + */ +static int transport_cmd_check_stop( + struct se_cmd *cmd, + int transport_off, + u8 t_state) { unsigned long flags; @@ -470,23 +493,13 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists) __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd)); cmd->transport_state &= ~CMD_T_ACTIVE; - if (remove_from_lists) + if (transport_off == 2) target_remove_from_state_list(cmd); spin_unlock_irqrestore(&cmd->t_state_lock, flags); complete(&cmd->transport_lun_stop_comp); return 1; } - - if (remove_from_lists) { - target_remove_from_state_list(cmd); - - /* - * Clear struct se_cmd->se_lun before the handoff to FE. - */ - cmd->se_lun = NULL; - } - /* * Determine if frontend context caller is requesting the stopping of * this command for frontend exceptions. @@ -496,36 +509,58 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists) __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd)); + if (transport_off == 2) + target_remove_from_state_list(cmd); + + /* + * Clear struct se_cmd->se_lun before the transport_off == 2 handoff + * to FE. + */ + if (transport_off == 2) + cmd->se_lun = NULL; spin_unlock_irqrestore(&cmd->t_state_lock, flags); complete(&cmd->t_transport_stop_comp); return 1; } - - cmd->transport_state &= ~CMD_T_ACTIVE; - if (remove_from_lists) { - /* - * Some fabric modules like tcm_loop can release - * their internally allocated I/O reference now and - * struct se_cmd now. - * - * Fabric modules are expected to return '1' here if the - * se_cmd being passed is released at this point, - * or zero if not being released. - */ - if (cmd->se_tfo->check_stop_free != NULL) { - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - return cmd->se_tfo->check_stop_free(cmd); + if (transport_off) { + cmd->transport_state &= ~CMD_T_ACTIVE; + if (transport_off == 2) { + target_remove_from_state_list(cmd); + /* + * Clear struct se_cmd->se_lun before the transport_off == 2 + * handoff to fabric module. + */ + cmd->se_lun = NULL; + /* + * Some fabric modules like tcm_loop can release + * their internally allocated I/O reference now and + * struct se_cmd now. + * + * Fabric modules are expected to return '1' here if the + * se_cmd being passed is released at this point, + * or zero if not being released. + */ + if (cmd->se_tfo->check_stop_free != NULL) { + spin_unlock_irqrestore( + &cmd->t_state_lock, flags); + + return cmd->se_tfo->check_stop_free(cmd); + } } - } + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + return 0; + } else if (t_state) + cmd->t_state = t_state; spin_unlock_irqrestore(&cmd->t_state_lock, flags); + return 0; } static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) { - return transport_cmd_check_stop(cmd, true); + return transport_cmd_check_stop(cmd, 2, 0); } static void transport_lun_remove_cmd(struct se_cmd *cmd) @@ -556,8 +591,79 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) if (transport_cmd_check_stop_to_fabric(cmd)) return; - if (remove) + if (remove) { + transport_remove_cmd_from_queue(cmd); transport_put_cmd(cmd); + } +} + +static void transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state, + bool at_head) +{ + struct se_device *dev = cmd->se_dev; + struct se_queue_obj *qobj = &dev->dev_queue_obj; + unsigned long flags; + + if (t_state) { + spin_lock_irqsave(&cmd->t_state_lock, flags); + cmd->t_state = t_state; + cmd->transport_state |= CMD_T_ACTIVE; + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + } + + spin_lock_irqsave(&qobj->cmd_queue_lock, flags); + + /* If the cmd is already on the list, remove it before we add it */ + if (!list_empty(&cmd->se_queue_node)) + list_del(&cmd->se_queue_node); + else + atomic_inc(&qobj->queue_cnt); + + if (at_head) + list_add(&cmd->se_queue_node, &qobj->qobj_list); + else + list_add_tail(&cmd->se_queue_node, &qobj->qobj_list); + cmd->transport_state |= CMD_T_QUEUED; + spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); + + wake_up_interruptible(&qobj->thread_wq); +} + +static struct se_cmd * +transport_get_cmd_from_queue(struct se_queue_obj *qobj) +{ + struct se_cmd *cmd; + unsigned long flags; + + spin_lock_irqsave(&qobj->cmd_queue_lock, flags); + if (list_empty(&qobj->qobj_list)) { + spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); + return NULL; + } + cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node); + + cmd->transport_state &= ~CMD_T_QUEUED; + list_del_init(&cmd->se_queue_node); + atomic_dec(&qobj->queue_cnt); + spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); + + return cmd; +} + +static void transport_remove_cmd_from_queue(struct se_cmd *cmd) +{ + struct se_queue_obj *qobj = &cmd->se_dev->dev_queue_obj; + unsigned long flags; + + spin_lock_irqsave(&qobj->cmd_queue_lock, flags); + if (!(cmd->transport_state & CMD_T_QUEUED)) { + spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); + return; + } + cmd->transport_state &= ~CMD_T_QUEUED; + atomic_dec(&qobj->queue_cnt); + list_del_init(&cmd->se_queue_node); + spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); } static void target_complete_failure_work(struct work_struct *work) @@ -636,11 +742,68 @@ static void target_add_to_state_list(struct se_cmd *cmd) spin_unlock_irqrestore(&dev->execute_task_lock, flags); } +static void __target_add_to_execute_list(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + bool head_of_queue = false; + + if (!list_empty(&cmd->execute_list)) + return; + + if (dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED && + cmd->sam_task_attr == MSG_HEAD_TAG) + head_of_queue = true; + + if (head_of_queue) + list_add(&cmd->execute_list, &dev->execute_list); + else + list_add_tail(&cmd->execute_list, &dev->execute_list); + + atomic_inc(&dev->execute_tasks); + + if (cmd->state_active) + return; + + if (head_of_queue) + list_add(&cmd->state_list, &dev->state_list); + else + list_add_tail(&cmd->state_list, &dev->state_list); + + cmd->state_active = true; +} + +static void target_add_to_execute_list(struct se_cmd *cmd) +{ + unsigned long flags; + struct se_device *dev = cmd->se_dev; + + spin_lock_irqsave(&dev->execute_task_lock, flags); + __target_add_to_execute_list(cmd); + spin_unlock_irqrestore(&dev->execute_task_lock, flags); +} + +void __target_remove_from_execute_list(struct se_cmd *cmd) +{ + list_del_init(&cmd->execute_list); + atomic_dec(&cmd->se_dev->execute_tasks); +} + +static void target_remove_from_execute_list(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + unsigned long flags; + + if (WARN_ON(list_empty(&cmd->execute_list))) + return; + + spin_lock_irqsave(&dev->execute_task_lock, flags); + __target_remove_from_execute_list(cmd); + spin_unlock_irqrestore(&dev->execute_task_lock, flags); +} + /* * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status */ -static void transport_write_pending_qf(struct se_cmd *cmd); -static void transport_complete_qf(struct se_cmd *cmd); static void target_qf_do_work(struct work_struct *work) { @@ -664,10 +827,7 @@ static void target_qf_do_work(struct work_struct *work) (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" : "UNKNOWN"); - if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) - transport_write_pending_qf(cmd); - else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) - transport_complete_qf(cmd); + transport_add_cmd_to_queue(cmd, cmd->t_state, true); } } @@ -714,7 +874,8 @@ void transport_dump_dev_state( break; } - *bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth); + *bl += sprintf(b + *bl, " Execute/Max Queue Depth: %d/%d", + atomic_read(&dev->execute_tasks), dev->queue_depth); *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n", dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.hw_max_sectors); @@ -1051,6 +1212,7 @@ struct se_device *transport_add_device_to_core_hba( return NULL; } + transport_init_queue_obj(&dev->dev_queue_obj); dev->dev_flags = device_flags; dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED; dev->dev_ptr = transport_dev; @@ -1060,6 +1222,7 @@ struct se_device *transport_add_device_to_core_hba( INIT_LIST_HEAD(&dev->dev_list); INIT_LIST_HEAD(&dev->dev_sep_list); INIT_LIST_HEAD(&dev->dev_tmr_list); + INIT_LIST_HEAD(&dev->execute_list); INIT_LIST_HEAD(&dev->delayed_cmd_list); INIT_LIST_HEAD(&dev->state_list); INIT_LIST_HEAD(&dev->qf_cmd_list); @@ -1098,17 +1261,17 @@ struct se_device *transport_add_device_to_core_hba( * Setup the Asymmetric Logical Unit Assignment for struct se_device */ if (core_setup_alua(dev, force_pt) < 0) - goto err_dev_list; + goto out; /* * Startup the struct se_device processing thread */ - dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1, - dev->transport->name); - if (!dev->tmr_wq) { - pr_err("Unable to create tmr workqueue for %s\n", + dev->process_thread = kthread_run(transport_processing_thread, dev, + "LIO_%s", dev->transport->name); + if (IS_ERR(dev->process_thread)) { + pr_err("Unable to create kthread: LIO_%s\n", dev->transport->name); - goto err_dev_list; + goto out; } /* * Setup work_queue for QUEUE_FULL @@ -1126,7 +1289,7 @@ struct se_device *transport_add_device_to_core_hba( if (!inquiry_prod || !inquiry_rev) { pr_err("All non TCM/pSCSI plugins require" " INQUIRY consts\n"); - goto err_wq; + goto out; } strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8); @@ -1136,10 +1299,9 @@ struct se_device *transport_add_device_to_core_hba( scsi_dump_inquiry(dev); return dev; +out: + kthread_stop(dev->process_thread); -err_wq: - destroy_workqueue(dev->tmr_wq); -err_dev_list: spin_lock(&hba->device_lock); list_del(&dev->dev_list); hba->dev_count--; @@ -1153,55 +1315,36 @@ struct se_device *transport_add_device_to_core_hba( } EXPORT_SYMBOL(transport_add_device_to_core_hba); -int target_cmd_size_check(struct se_cmd *cmd, unsigned int size) +/* transport_generic_prepare_cdb(): + * + * Since the Initiator sees iSCSI devices as LUNs, the SCSI CDB will + * contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2. + * The point of this is since we are mapping iSCSI LUNs to + * SCSI Target IDs having a non-zero LUN in the CDB will throw the + * devices and HBAs for a loop. + */ +static inline void transport_generic_prepare_cdb( + unsigned char *cdb) { - struct se_device *dev = cmd->se_dev; - - if (cmd->unknown_data_length) { - cmd->data_length = size; - } else if (size != cmd->data_length) { - pr_warn("TARGET_CORE[%s]: Expected Transfer Length:" - " %u does not match SCSI CDB Length: %u for SAM Opcode:" - " 0x%02x\n", cmd->se_tfo->get_fabric_name(), - cmd->data_length, size, cmd->t_task_cdb[0]); - - cmd->cmd_spdtl = size; - - if (cmd->data_direction == DMA_TO_DEVICE) { - pr_err("Rejecting underflow/overflow" - " WRITE data\n"); - goto out_invalid_cdb_field; - } - /* - * Reject READ_* or WRITE_* with overflow/underflow for - * type SCF_SCSI_DATA_CDB. - */ - if (dev->se_sub_dev->se_dev_attrib.block_size != 512) { - pr_err("Failing OVERFLOW/UNDERFLOW for LBA op" - " CDB on non 512-byte sector setup subsystem" - " plugin: %s\n", dev->transport->name); - /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ - goto out_invalid_cdb_field; - } - - if (size > cmd->data_length) { - cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; - cmd->residual_count = (size - cmd->data_length); - } else { - cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; - cmd->residual_count = (cmd->data_length - size); - } - cmd->data_length = size; + switch (cdb[0]) { + case READ_10: /* SBC - RDProtect */ + case READ_12: /* SBC - RDProtect */ + case READ_16: /* SBC - RDProtect */ + case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */ + case VERIFY: /* SBC - VRProtect */ + case VERIFY_16: /* SBC - VRProtect */ + case WRITE_VERIFY: /* SBC - VRProtect */ + case WRITE_VERIFY_12: /* SBC - VRProtect */ + case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */ + break; + default: + cdb[1] &= 0x1f; /* clear logical unit number */ + break; } - - return 0; - -out_invalid_cdb_field: - cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; - return -EINVAL; } +static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *); + /* * Used by fabric modules containing a local struct se_cmd within their * fabric dependent per I/O descriptor. @@ -1218,7 +1361,9 @@ void transport_init_se_cmd( INIT_LIST_HEAD(&cmd->se_lun_node); INIT_LIST_HEAD(&cmd->se_delayed_node); INIT_LIST_HEAD(&cmd->se_qf_node); + INIT_LIST_HEAD(&cmd->se_queue_node); INIT_LIST_HEAD(&cmd->se_cmd_list); + INIT_LIST_HEAD(&cmd->execute_list); INIT_LIST_HEAD(&cmd->state_list); init_completion(&cmd->transport_lun_fe_stop_comp); init_completion(&cmd->transport_lun_stop_comp); @@ -1273,12 +1418,9 @@ int target_setup_cmd_from_cdb( struct se_cmd *cmd, unsigned char *cdb) { - struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev; - u32 pr_reg_type = 0; - u8 alua_ascq = 0; - unsigned long flags; int ret; + transport_generic_prepare_cdb(cdb); /* * Ensure that the received CDB is less than the max (252 + 8) bytes * for VARIABLE_LENGTH_CMD @@ -1315,66 +1457,15 @@ int target_setup_cmd_from_cdb( * Copy the original CDB into cmd-> */ memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb)); - - /* - * Check for an existing UNIT ATTENTION condition - */ - if (core_scsi3_ua_check(cmd, cdb) < 0) { - cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION; - return -EINVAL; - } - - ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq); - if (ret != 0) { - /* - * Set SCSI additional sense code (ASC) to 'LUN Not Accessible'; - * The ALUA additional sense code qualifier (ASCQ) is determined - * by the ALUA primary or secondary access state.. - */ - if (ret > 0) { - pr_debug("[%s]: ALUA TG Port not available, " - "SenseKey: NOT_READY, ASC/ASCQ: " - "0x04/0x%02x\n", - cmd->se_tfo->get_fabric_name(), alua_ascq); - - transport_set_sense_codes(cmd, 0x04, alua_ascq); - cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY; - return -EINVAL; - } - cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; - return -EINVAL; - } - /* - * Check status for SPC-3 Persistent Reservations + * Setup the received CDB based on SCSI defined opcodes and + * perform unit attention, persistent reservations and ALUA + * checks for virtual device backends. The cmd->t_task_cdb + * pointer is expected to be setup before we reach this point. */ - if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type)) { - if (su_dev->t10_pr.pr_ops.t10_seq_non_holder( - cmd, cdb, pr_reg_type) != 0) { - cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT; - cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; - cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; - return -EBUSY; - } - /* - * This means the CDB is allowed for the SCSI Initiator port - * when said port is *NOT* holding the legacy SPC-2 or - * SPC-3 Persistent Reservation. - */ - } - - ret = cmd->se_dev->transport->parse_cdb(cmd); + ret = transport_generic_cmd_sequencer(cmd, cdb); if (ret < 0) return ret; - - spin_lock_irqsave(&cmd->t_state_lock, flags); - cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - /* * Check for SAM Task Attribute Emulation */ @@ -1412,9 +1503,10 @@ int transport_handle_cdb_direct( return -EINVAL; } /* - * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that - * outstanding descriptors are handled correctly during shutdown via - * transport_wait_for_tasks() + * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE following + * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue() + * in existing usage to ensure that outstanding descriptors are handled + * correctly during shutdown via transport_wait_for_tasks() * * Also, we don't take cmd->t_state_lock here as we only expect * this to be called for initial descriptor submission. @@ -1448,14 +1540,10 @@ EXPORT_SYMBOL(transport_handle_cdb_direct); * @data_dir: DMA data direction * @flags: flags for command submission from target_sc_flags_tables * - * Returns non zero to signal active I/O shutdown failure. All other - * setup exceptions will be returned as a SCSI CHECK_CONDITION response, - * but still return zero here. - * * This may only be called from process context, and also currently * assumes internal allocation of fabric payload buffer by target-core. **/ -int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, +void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, unsigned char *cdb, unsigned char *sense, u32 unpacked_lun, u32 data_length, int task_attr, int data_dir, int flags) { @@ -1481,9 +1569,7 @@ int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, * for fabrics using TARGET_SCF_ACK_KREF that expect a second * kref_put() to happen during fabric packet acknowledgement. */ - rc = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF)); - if (rc) - return rc; + target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF)); /* * Signal bidirectional data payloads to target-core */ @@ -1496,13 +1582,16 @@ int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, transport_send_check_condition_and_sense(se_cmd, se_cmd->scsi_sense_reason, 0); target_put_sess_cmd(se_sess, se_cmd); - return 0; + return; } - + /* + * Sanitize CDBs via transport_generic_cmd_sequencer() and + * allocate the necessary tasks to complete the received CDB+data + */ rc = target_setup_cmd_from_cdb(se_cmd, cdb); if (rc != 0) { transport_generic_request_failure(se_cmd); - return 0; + return; } /* @@ -1511,8 +1600,14 @@ int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, */ core_alua_check_nonop_delay(se_cmd); + /* + * Dispatch se_cmd descriptor to se_lun->lun_se_dev backend + * for immediate execution of READs, otherwise wait for + * transport_generic_handle_data() to be called for WRITEs + * when fabric has filled the incoming buffer. + */ transport_handle_cdb_direct(se_cmd); - return 0; + return; } EXPORT_SYMBOL(target_submit_cmd); @@ -1567,11 +1662,7 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, se_cmd->se_tmr_req->ref_task_tag = tag; /* See target_submit_cmd for commentary */ - ret = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF)); - if (ret) { - core_tmr_release_req(se_cmd->se_tmr_req); - return ret; - } + target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF)); ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun); if (ret) { @@ -1588,6 +1679,67 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, } EXPORT_SYMBOL(target_submit_tmr); +/* + * Used by fabric module frontends defining a TFO->new_cmd_map() caller + * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to + * complete setup in TCM process context w/ TFO->new_cmd_map(). + */ +int transport_generic_handle_cdb_map( + struct se_cmd *cmd) +{ + if (!cmd->se_lun) { + dump_stack(); + pr_err("cmd->se_lun is NULL\n"); + return -EINVAL; + } + + transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP, false); + return 0; +} +EXPORT_SYMBOL(transport_generic_handle_cdb_map); + +/* transport_generic_handle_data(): + * + * + */ +int transport_generic_handle_data( + struct se_cmd *cmd) +{ + /* + * For the software fabric case, then we assume the nexus is being + * failed/shutdown when signals are pending from the kthread context + * caller, so we return a failure. For the HW target mode case running + * in interrupt code, the signal_pending() check is skipped. + */ + if (!in_interrupt() && signal_pending(current)) + return -EPERM; + /* + * If the received CDB has aleady been ABORTED by the generic + * target engine, we now call transport_check_aborted_status() + * to queue any delated TASK_ABORTED status for the received CDB to the + * fabric module as we are expecting no further incoming DATA OUT + * sequences at this point. + */ + if (transport_check_aborted_status(cmd, 1) != 0) + return 0; + + transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE, false); + return 0; +} +EXPORT_SYMBOL(transport_generic_handle_data); + +/* transport_generic_handle_tmr(): + * + * + */ +int transport_generic_handle_tmr( + struct se_cmd *cmd) +{ + transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR, false); + return 0; +} +EXPORT_SYMBOL(transport_generic_handle_tmr); + /* * If the cmd is active, request it to be stopped and sleep until it * has completed. @@ -1645,7 +1797,6 @@ void transport_generic_request_failure(struct se_cmd *cmd) case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: case TCM_UNKNOWN_MODE_PAGE: case TCM_WRITE_PROTECTED: - case TCM_ADDRESS_OUT_OF_RANGE: case TCM_CHECK_CONDITION_ABORT_CMD: case TCM_CHECK_CONDITION_UNIT_ATTENTION: case TCM_CHECK_CONDITION_NOT_READY: @@ -1681,7 +1832,13 @@ void transport_generic_request_failure(struct se_cmd *cmd) cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; break; } - + /* + * If a fabric does not define a cmd->se_tfo->new_cmd_map caller, + * make the call to transport_send_check_condition_and_sense() + * directly. Otherwise expect the fabric to make the call to + * transport_send_check_condition_and_sense() after handling + * possible unsoliticied write data payloads. + */ ret = transport_send_check_condition_and_sense(cmd, cmd->scsi_sense_reason, 0); if (ret == -EAGAIN || ret == -ENOMEM) @@ -1699,123 +1856,406 @@ void transport_generic_request_failure(struct se_cmd *cmd) } EXPORT_SYMBOL(transport_generic_request_failure); -static void __target_execute_cmd(struct se_cmd *cmd) +static inline u32 transport_lba_21(unsigned char *cdb) +{ + return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3]; +} + +static inline u32 transport_lba_32(unsigned char *cdb) +{ + return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; +} + +static inline unsigned long long transport_lba_64(unsigned char *cdb) +{ + unsigned int __v1, __v2; + + __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; + __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; + + return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; +} + +/* + * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs + */ +static inline unsigned long long transport_lba_64_ext(unsigned char *cdb) +{ + unsigned int __v1, __v2; + + __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15]; + __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19]; + + return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; +} + +static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd) +{ + unsigned long flags; + + spin_lock_irqsave(&se_cmd->t_state_lock, flags); + se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; + spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); +} + +/* + * Called from Fabric Module context from transport_execute_tasks() + * + * The return of this function determins if the tasks from struct se_cmd + * get added to the execution queue in transport_execute_tasks(), + * or are added to the delayed or ordered lists here. + */ +static inline int transport_execute_task_attr(struct se_cmd *cmd) +{ + if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) + return 1; + /* + * Check for the existence of HEAD_OF_QUEUE, and if true return 1 + * to allow the passed struct se_cmd list of tasks to the front of the list. + */ + if (cmd->sam_task_attr == MSG_HEAD_TAG) { + pr_debug("Added HEAD_OF_QUEUE for CDB:" + " 0x%02x, se_ordered_id: %u\n", + cmd->t_task_cdb[0], + cmd->se_ordered_id); + return 1; + } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { + atomic_inc(&cmd->se_dev->dev_ordered_sync); + smp_mb__after_atomic_inc(); + + pr_debug("Added ORDERED for CDB: 0x%02x to ordered" + " list, se_ordered_id: %u\n", + cmd->t_task_cdb[0], + cmd->se_ordered_id); + /* + * Add ORDERED command to tail of execution queue if + * no other older commands exist that need to be + * completed first. + */ + if (!atomic_read(&cmd->se_dev->simple_cmds)) + return 1; + } else { + /* + * For SIMPLE and UNTAGGED Task Attribute commands + */ + atomic_inc(&cmd->se_dev->simple_cmds); + smp_mb__after_atomic_inc(); + } + /* + * Otherwise if one or more outstanding ORDERED task attribute exist, + * add the dormant task(s) built for the passed struct se_cmd to the + * execution queue and become in Active state for this struct se_device. + */ + if (atomic_read(&cmd->se_dev->dev_ordered_sync) != 0) { + /* + * Otherwise, add cmd w/ tasks to delayed cmd queue that + * will be drained upon completion of HEAD_OF_QUEUE task. + */ + spin_lock(&cmd->se_dev->delayed_cmd_lock); + cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR; + list_add_tail(&cmd->se_delayed_node, + &cmd->se_dev->delayed_cmd_list); + spin_unlock(&cmd->se_dev->delayed_cmd_lock); + + pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to" + " delayed CMD list, se_ordered_id: %u\n", + cmd->t_task_cdb[0], cmd->sam_task_attr, + cmd->se_ordered_id); + /* + * Return zero to let transport_execute_tasks() know + * not to add the delayed tasks to the execution list. + */ + return 0; + } + /* + * Otherwise, no ORDERED task attributes exist.. + */ + return 1; +} + +/* + * Called from fabric module context in transport_generic_new_cmd() and + * transport_generic_process_write() + */ +static void transport_execute_tasks(struct se_cmd *cmd) +{ + int add_tasks; + struct se_device *se_dev = cmd->se_dev; + /* + * Call transport_cmd_check_stop() to see if a fabric exception + * has occurred that prevents execution. + */ + if (!transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING)) { + /* + * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE + * attribute for the tasks of the received struct se_cmd CDB + */ + add_tasks = transport_execute_task_attr(cmd); + if (add_tasks) { + __transport_execute_tasks(se_dev, cmd); + return; + } + } + __transport_execute_tasks(se_dev, NULL); +} + +static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *new_cmd) { - int error = 0; + int error; + struct se_cmd *cmd = NULL; + unsigned long flags; + +check_depth: + spin_lock_irq(&dev->execute_task_lock); + if (new_cmd != NULL) + __target_add_to_execute_list(new_cmd); - spin_lock_irq(&cmd->t_state_lock); - cmd->transport_state |= (CMD_T_BUSY|CMD_T_SENT); - spin_unlock_irq(&cmd->t_state_lock); + if (list_empty(&dev->execute_list)) { + spin_unlock_irq(&dev->execute_task_lock); + return 0; + } + cmd = list_first_entry(&dev->execute_list, struct se_cmd, execute_list); + __target_remove_from_execute_list(cmd); + spin_unlock_irq(&dev->execute_task_lock); + + spin_lock_irqsave(&cmd->t_state_lock, flags); + cmd->transport_state |= CMD_T_BUSY; + cmd->transport_state |= CMD_T_SENT; + + spin_unlock_irqrestore(&cmd->t_state_lock, flags); if (cmd->execute_cmd) error = cmd->execute_cmd(cmd); + else { + error = dev->transport->execute_cmd(cmd, cmd->t_data_sg, + cmd->t_data_nents, cmd->data_direction); + } - if (error) { - spin_lock_irq(&cmd->t_state_lock); - cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT); - spin_unlock_irq(&cmd->t_state_lock); + if (error != 0) { + spin_lock_irqsave(&cmd->t_state_lock, flags); + cmd->transport_state &= ~CMD_T_BUSY; + cmd->transport_state &= ~CMD_T_SENT; + spin_unlock_irqrestore(&cmd->t_state_lock, flags); transport_generic_request_failure(cmd); } + + new_cmd = NULL; + goto check_depth; + + return 0; } -void target_execute_cmd(struct se_cmd *cmd) +static inline u32 transport_get_sectors_6( + unsigned char *cdb, + struct se_cmd *cmd, + int *ret) { struct se_device *dev = cmd->se_dev; /* - * If the received CDB has aleady been aborted stop processing it here. + * Assume TYPE_DISK for non struct se_device objects. + * Use 8-bit sector value. */ - if (transport_check_aborted_status(cmd, 1)) - return; + if (!dev) + goto type_disk; /* - * Determine if IOCTL context caller in requesting the stopping of this - * command for LUN shutdown purposes. + * Use 24-bit allocation length for TYPE_TAPE. */ - spin_lock_irq(&cmd->t_state_lock); - if (cmd->transport_state & CMD_T_LUN_STOP) { - pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n", - __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd)); + if (dev->transport->get_device_type(dev) == TYPE_TAPE) + return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4]; - cmd->transport_state &= ~CMD_T_ACTIVE; - spin_unlock_irq(&cmd->t_state_lock); - complete(&cmd->transport_lun_stop_comp); - return; + /* + * Everything else assume TYPE_DISK Sector CDB location. + * Use 8-bit sector value. SBC-3 says: + * + * A TRANSFER LENGTH field set to zero specifies that 256 + * logical blocks shall be written. Any other value + * specifies the number of logical blocks that shall be + * written. + */ +type_disk: + return cdb[4] ? : 256; +} + +static inline u32 transport_get_sectors_10( + unsigned char *cdb, + struct se_cmd *cmd, + int *ret) +{ + struct se_device *dev = cmd->se_dev; + + /* + * Assume TYPE_DISK for non struct se_device objects. + * Use 16-bit sector value. + */ + if (!dev) + goto type_disk; + + /* + * XXX_10 is not defined in SSC, throw an exception + */ + if (dev->transport->get_device_type(dev) == TYPE_TAPE) { + *ret = -EINVAL; + return 0; } + /* - * Determine if frontend context caller is requesting the stopping of - * this command for frontend exceptions. + * Everything else assume TYPE_DISK Sector CDB location. + * Use 16-bit sector value. */ - if (cmd->transport_state & CMD_T_STOP) { - pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n", - __func__, __LINE__, - cmd->se_tfo->get_task_tag(cmd)); +type_disk: + return (u32)(cdb[7] << 8) + cdb[8]; +} - spin_unlock_irq(&cmd->t_state_lock); - complete(&cmd->t_transport_stop_comp); - return; +static inline u32 transport_get_sectors_12( + unsigned char *cdb, + struct se_cmd *cmd, + int *ret) +{ + struct se_device *dev = cmd->se_dev; + + /* + * Assume TYPE_DISK for non struct se_device objects. + * Use 32-bit sector value. + */ + if (!dev) + goto type_disk; + + /* + * XXX_12 is not defined in SSC, throw an exception + */ + if (dev->transport->get_device_type(dev) == TYPE_TAPE) { + *ret = -EINVAL; + return 0; } - cmd->t_state = TRANSPORT_PROCESSING; - spin_unlock_irq(&cmd->t_state_lock); + /* + * Everything else assume TYPE_DISK Sector CDB location. + * Use 32-bit sector value. + */ +type_disk: + return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9]; +} + +static inline u32 transport_get_sectors_16( + unsigned char *cdb, + struct se_cmd *cmd, + int *ret) +{ + struct se_device *dev = cmd->se_dev; + + /* + * Assume TYPE_DISK for non struct se_device objects. + * Use 32-bit sector value. + */ + if (!dev) + goto type_disk; - if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) - goto execute; + /* + * Use 24-bit allocation length for TYPE_TAPE. + */ + if (dev->transport->get_device_type(dev) == TYPE_TAPE) + return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14]; +type_disk: + return (u32)(cdb[10] << 24) + (cdb[11] << 16) + + (cdb[12] << 8) + cdb[13]; +} + +/* + * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants + */ +static inline u32 transport_get_sectors_32( + unsigned char *cdb, + struct se_cmd *cmd, + int *ret) +{ /* - * Check for the existence of HEAD_OF_QUEUE, and if true return 1 - * to allow the passed struct se_cmd list of tasks to the front of the list. + * Assume TYPE_DISK for non struct se_device objects. + * Use 32-bit sector value. */ - switch (cmd->sam_task_attr) { - case MSG_HEAD_TAG: - pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x, " - "se_ordered_id: %u\n", - cmd->t_task_cdb[0], cmd->se_ordered_id); - goto execute; - case MSG_ORDERED_TAG: - atomic_inc(&dev->dev_ordered_sync); - smp_mb__after_atomic_inc(); + return (u32)(cdb[28] << 24) + (cdb[29] << 16) + + (cdb[30] << 8) + cdb[31]; - pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, " - " se_ordered_id: %u\n", - cmd->t_task_cdb[0], cmd->se_ordered_id); +} - /* - * Execute an ORDERED command if no other older commands - * exist that need to be completed first. - */ - if (!atomic_read(&dev->simple_cmds)) - goto execute; - break; - default: - /* - * For SIMPLE and UNTAGGED Task Attribute commands - */ - atomic_inc(&dev->simple_cmds); - smp_mb__after_atomic_inc(); - break; +static inline u32 transport_get_size( + u32 sectors, + unsigned char *cdb, + struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + + if (dev->transport->get_device_type(dev) == TYPE_TAPE) { + if (cdb[1] & 1) { /* sectors */ + return dev->se_sub_dev->se_dev_attrib.block_size * sectors; + } else /* bytes */ + return sectors; } - if (atomic_read(&dev->dev_ordered_sync) != 0) { - spin_lock(&dev->delayed_cmd_lock); - list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list); - spin_unlock(&dev->delayed_cmd_lock); + pr_debug("Returning block_size: %u, sectors: %u == %u for" + " %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, + sectors, dev->se_sub_dev->se_dev_attrib.block_size * sectors, + dev->transport->name); - pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to" - " delayed CMD list, se_ordered_id: %u\n", - cmd->t_task_cdb[0], cmd->sam_task_attr, - cmd->se_ordered_id); + return dev->se_sub_dev->se_dev_attrib.block_size * sectors; +} + +static void transport_xor_callback(struct se_cmd *cmd) +{ + unsigned char *buf, *addr; + struct scatterlist *sg; + unsigned int offset; + int i; + int count; + /* + * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command + * + * 1) read the specified logical block(s); + * 2) transfer logical blocks from the data-out buffer; + * 3) XOR the logical blocks transferred from the data-out buffer with + * the logical blocks read, storing the resulting XOR data in a buffer; + * 4) if the DISABLE WRITE bit is set to zero, then write the logical + * blocks transferred from the data-out buffer; and + * 5) transfer the resulting XOR data to the data-in buffer. + */ + buf = kmalloc(cmd->data_length, GFP_KERNEL); + if (!buf) { + pr_err("Unable to allocate xor_callback buf\n"); return; } + /* + * Copy the scatterlist WRITE buffer located at cmd->t_data_sg + * into the locally allocated *buf + */ + sg_copy_to_buffer(cmd->t_data_sg, + cmd->t_data_nents, + buf, + cmd->data_length); -execute: /* - * Otherwise, no ORDERED task attributes exist.. + * Now perform the XOR against the BIDI read memory located at + * cmd->t_mem_bidi_list */ - __target_execute_cmd(cmd); + + offset = 0; + for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) { + addr = kmap_atomic(sg_page(sg)); + if (!addr) + goto out; + + for (i = 0; i < sg->length; i++) + *(addr + sg->offset + i) ^= *(buf + offset + i); + + offset += sg->length; + kunmap_atomic(addr); + } + +out: + kfree(buf); } -EXPORT_SYMBOL(target_execute_cmd); /* * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd @@ -1829,74 +2269,780 @@ static int transport_get_sense_data(struct se_cmd *cmd) WARN_ON(!cmd->se_lun); - if (!dev) - return 0; + if (!dev) + return 0; + + spin_lock_irqsave(&cmd->t_state_lock, flags); + if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + return 0; + } + + if (!(cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)) + goto out; + + if (!dev->transport->get_sense_buffer) { + pr_err("dev->transport->get_sense_buffer is NULL\n"); + goto out; + } + + sense_buffer = dev->transport->get_sense_buffer(cmd); + if (!sense_buffer) { + pr_err("ITT 0x%08x cmd %p: Unable to locate" + " sense buffer for task with sense\n", + cmd->se_tfo->get_task_tag(cmd), cmd); + goto out; + } + + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + + offset = cmd->se_tfo->set_fabric_sense_len(cmd, TRANSPORT_SENSE_BUFFER); + + memcpy(&buffer[offset], sense_buffer, TRANSPORT_SENSE_BUFFER); + + /* Automatically padded */ + cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset; + + pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x and sense\n", + dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status); + return 0; + +out: + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + return -1; +} + +static inline long long transport_dev_end_lba(struct se_device *dev) +{ + return dev->transport->get_blocks(dev) + 1; +} + +static int transport_cmd_get_valid_sectors(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + u32 sectors; + + if (dev->transport->get_device_type(dev) != TYPE_DISK) + return 0; + + sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size); + + if ((cmd->t_task_lba + sectors) > transport_dev_end_lba(dev)) { + pr_err("LBA: %llu Sectors: %u exceeds" + " transport_dev_end_lba(): %llu\n", + cmd->t_task_lba, sectors, + transport_dev_end_lba(dev)); + return -EINVAL; + } + + return 0; +} + +static int target_check_write_same_discard(unsigned char *flags, struct se_device *dev) +{ + /* + * Determine if the received WRITE_SAME is used to for direct + * passthrough into Linux/SCSI with struct request via TCM/pSCSI + * or we are signaling the use of internal WRITE_SAME + UNMAP=1 + * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK code. + */ + int passthrough = (dev->transport->transport_type == + TRANSPORT_PLUGIN_PHBA_PDEV); + + if (!passthrough) { + if ((flags[0] & 0x04) || (flags[0] & 0x02)) { + pr_err("WRITE_SAME PBDATA and LBDATA" + " bits not supported for Block Discard" + " Emulation\n"); + return -ENOSYS; + } + /* + * Currently for the emulated case we only accept + * tpws with the UNMAP=1 bit set. + */ + if (!(flags[0] & 0x08)) { + pr_err("WRITE_SAME w/o UNMAP bit not" + " supported for Block Discard Emulation\n"); + return -ENOSYS; + } + } + + return 0; +} + +/* transport_generic_cmd_sequencer(): + * + * Generic Command Sequencer that should work for most DAS transport + * drivers. + * + * Called from target_setup_cmd_from_cdb() in the $FABRIC_MOD + * RX Thread. + * + * FIXME: Need to support other SCSI OPCODES where as well. + */ +static int transport_generic_cmd_sequencer( + struct se_cmd *cmd, + unsigned char *cdb) +{ + struct se_device *dev = cmd->se_dev; + struct se_subsystem_dev *su_dev = dev->se_sub_dev; + int ret = 0, sector_ret = 0, passthrough; + u32 sectors = 0, size = 0, pr_reg_type = 0; + u16 service_action; + u8 alua_ascq = 0; + /* + * Check for an existing UNIT ATTENTION condition + */ + if (core_scsi3_ua_check(cmd, cdb) < 0) { + cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; + cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION; + return -EINVAL; + } + /* + * Check status of Asymmetric Logical Unit Assignment port + */ + ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq); + if (ret != 0) { + /* + * Set SCSI additional sense code (ASC) to 'LUN Not Accessible'; + * The ALUA additional sense code qualifier (ASCQ) is determined + * by the ALUA primary or secondary access state.. + */ + if (ret > 0) { + pr_debug("[%s]: ALUA TG Port not available," + " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n", + cmd->se_tfo->get_fabric_name(), alua_ascq); + + transport_set_sense_codes(cmd, 0x04, alua_ascq); + cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; + cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY; + return -EINVAL; + } + goto out_invalid_cdb_field; + } + /* + * Check status for SPC-3 Persistent Reservations + */ + if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) { + if (su_dev->t10_pr.pr_ops.t10_seq_non_holder( + cmd, cdb, pr_reg_type) != 0) { + cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; + cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT; + cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; + cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; + return -EBUSY; + } + /* + * This means the CDB is allowed for the SCSI Initiator port + * when said port is *NOT* holding the legacy SPC-2 or + * SPC-3 Persistent Reservation. + */ + } + + /* + * If we operate in passthrough mode we skip most CDB emulation and + * instead hand the commands down to the physical SCSI device. + */ + passthrough = + (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV); + + switch (cdb[0]) { + case READ_6: + sectors = transport_get_sectors_6(cdb, cmd, §or_ret); + if (sector_ret) + goto out_unsupported_cdb; + size = transport_get_size(sectors, cdb, cmd); + cmd->t_task_lba = transport_lba_21(cdb); + cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; + break; + case READ_10: + sectors = transport_get_sectors_10(cdb, cmd, §or_ret); + if (sector_ret) + goto out_unsupported_cdb; + size = transport_get_size(sectors, cdb, cmd); + cmd->t_task_lba = transport_lba_32(cdb); + cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; + break; + case READ_12: + sectors = transport_get_sectors_12(cdb, cmd, §or_ret); + if (sector_ret) + goto out_unsupported_cdb; + size = transport_get_size(sectors, cdb, cmd); + cmd->t_task_lba = transport_lba_32(cdb); + cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; + break; + case READ_16: + sectors = transport_get_sectors_16(cdb, cmd, §or_ret); + if (sector_ret) + goto out_unsupported_cdb; + size = transport_get_size(sectors, cdb, cmd); + cmd->t_task_lba = transport_lba_64(cdb); + cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; + break; + case WRITE_6: + sectors = transport_get_sectors_6(cdb, cmd, §or_ret); + if (sector_ret) + goto out_unsupported_cdb; + size = transport_get_size(sectors, cdb, cmd); + cmd->t_task_lba = transport_lba_21(cdb); + cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; + break; + case WRITE_10: + case WRITE_VERIFY: + sectors = transport_get_sectors_10(cdb, cmd, §or_ret); + if (sector_ret) + goto out_unsupported_cdb; + size = transport_get_size(sectors, cdb, cmd); + cmd->t_task_lba = transport_lba_32(cdb); + if (cdb[1] & 0x8) + cmd->se_cmd_flags |= SCF_FUA; + cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; + break; + case WRITE_12: + sectors = transport_get_sectors_12(cdb, cmd, §or_ret); + if (sector_ret) + goto out_unsupported_cdb; + size = transport_get_size(sectors, cdb, cmd); + cmd->t_task_lba = transport_lba_32(cdb); + if (cdb[1] & 0x8) + cmd->se_cmd_flags |= SCF_FUA; + cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; + break; + case WRITE_16: + sectors = transport_get_sectors_16(cdb, cmd, §or_ret); + if (sector_ret) + goto out_unsupported_cdb; + size = transport_get_size(sectors, cdb, cmd); + cmd->t_task_lba = transport_lba_64(cdb); + if (cdb[1] & 0x8) + cmd->se_cmd_flags |= SCF_FUA; + cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; + break; + case XDWRITEREAD_10: + if ((cmd->data_direction != DMA_TO_DEVICE) || + !(cmd->se_cmd_flags & SCF_BIDI)) + goto out_invalid_cdb_field; + sectors = transport_get_sectors_10(cdb, cmd, §or_ret); + if (sector_ret) + goto out_unsupported_cdb; + size = transport_get_size(sectors, cdb, cmd); + cmd->t_task_lba = transport_lba_32(cdb); + cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; + + /* + * Do now allow BIDI commands for passthrough mode. + */ + if (passthrough) + goto out_unsupported_cdb; + + /* + * Setup BIDI XOR callback to be run after I/O completion. + */ + cmd->transport_complete_callback = &transport_xor_callback; + if (cdb[1] & 0x8) + cmd->se_cmd_flags |= SCF_FUA; + break; + case VARIABLE_LENGTH_CMD: + service_action = get_unaligned_be16(&cdb[8]); + switch (service_action) { + case XDWRITEREAD_32: + sectors = transport_get_sectors_32(cdb, cmd, §or_ret); + if (sector_ret) + goto out_unsupported_cdb; + size = transport_get_size(sectors, cdb, cmd); + /* + * Use WRITE_32 and READ_32 opcodes for the emulated + * XDWRITE_READ_32 logic. + */ + cmd->t_task_lba = transport_lba_64_ext(cdb); + cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; + + /* + * Do now allow BIDI commands for passthrough mode. + */ + if (passthrough) + goto out_unsupported_cdb; + + /* + * Setup BIDI XOR callback to be run during after I/O + * completion. + */ + cmd->transport_complete_callback = &transport_xor_callback; + if (cdb[1] & 0x8) + cmd->se_cmd_flags |= SCF_FUA; + break; + case WRITE_SAME_32: + sectors = transport_get_sectors_32(cdb, cmd, §or_ret); + if (sector_ret) + goto out_unsupported_cdb; + + if (sectors) + size = transport_get_size(1, cdb, cmd); + else { + pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not" + " supported\n"); + goto out_invalid_cdb_field; + } + + cmd->t_task_lba = get_unaligned_be64(&cdb[12]); + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; + + if (target_check_write_same_discard(&cdb[10], dev) < 0) + goto out_unsupported_cdb; + if (!passthrough) + cmd->execute_cmd = target_emulate_write_same; + break; + default: + pr_err("VARIABLE_LENGTH_CMD service action" + " 0x%04x not supported\n", service_action); + goto out_unsupported_cdb; + } + break; + case MAINTENANCE_IN: + if (dev->transport->get_device_type(dev) != TYPE_ROM) { + /* MAINTENANCE_IN from SCC-2 */ + /* + * Check for emulated MI_REPORT_TARGET_PGS. + */ + if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS && + su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) { + cmd->execute_cmd = + target_emulate_report_target_port_groups; + } + size = (cdb[6] << 24) | (cdb[7] << 16) | + (cdb[8] << 8) | cdb[9]; + } else { + /* GPCMD_SEND_KEY from multi media commands */ + size = (cdb[8] << 8) + cdb[9]; + } + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; + break; + case MODE_SELECT: + size = cdb[4]; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; + break; + case MODE_SELECT_10: + size = (cdb[7] << 8) + cdb[8]; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; + break; + case MODE_SENSE: + size = cdb[4]; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; + if (!passthrough) + cmd->execute_cmd = target_emulate_modesense; + break; + case MODE_SENSE_10: + size = (cdb[7] << 8) + cdb[8]; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; + if (!passthrough) + cmd->execute_cmd = target_emulate_modesense; + break; + case GPCMD_READ_BUFFER_CAPACITY: + case GPCMD_SEND_OPC: + case LOG_SELECT: + case LOG_SENSE: + size = (cdb[7] << 8) + cdb[8]; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; + break; + case READ_BLOCK_LIMITS: + size = READ_BLOCK_LEN; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; + break; + case GPCMD_GET_CONFIGURATION: + case GPCMD_READ_FORMAT_CAPACITIES: + case GPCMD_READ_DISC_INFO: + case GPCMD_READ_TRACK_RZONE_INFO: + size = (cdb[7] << 8) + cdb[8]; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; + break; + case PERSISTENT_RESERVE_IN: + if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS) + cmd->execute_cmd = target_scsi3_emulate_pr_in; + size = (cdb[7] << 8) + cdb[8]; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; + break; + case PERSISTENT_RESERVE_OUT: + if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS) + cmd->execute_cmd = target_scsi3_emulate_pr_out; + size = (cdb[7] << 8) + cdb[8]; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; + break; + case GPCMD_MECHANISM_STATUS: + case GPCMD_READ_DVD_STRUCTURE: + size = (cdb[8] << 8) + cdb[9]; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; + break; + case READ_POSITION: + size = READ_POSITION_LEN; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; + break; + case MAINTENANCE_OUT: + if (dev->transport->get_device_type(dev) != TYPE_ROM) { + /* MAINTENANCE_OUT from SCC-2 + * + * Check for emulated MO_SET_TARGET_PGS. + */ + if (cdb[1] == MO_SET_TARGET_PGS && + su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) { + cmd->execute_cmd = + target_emulate_set_target_port_groups; + } + + size = (cdb[6] << 24) | (cdb[7] << 16) | + (cdb[8] << 8) | cdb[9]; + } else { + /* GPCMD_REPORT_KEY from multi media commands */ + size = (cdb[8] << 8) + cdb[9]; + } + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; + break; + case INQUIRY: + size = (cdb[3] << 8) + cdb[4]; + /* + * Do implict HEAD_OF_QUEUE processing for INQUIRY. + * See spc4r17 section 5.3 + */ + if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) + cmd->sam_task_attr = MSG_HEAD_TAG; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; + if (!passthrough) + cmd->execute_cmd = target_emulate_inquiry; + break; + case READ_BUFFER: + size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; + break; + case READ_CAPACITY: + size = READ_CAP_LEN; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; + if (!passthrough) + cmd->execute_cmd = target_emulate_readcapacity; + break; + case READ_MEDIA_SERIAL_NUMBER: + case SECURITY_PROTOCOL_IN: + case SECURITY_PROTOCOL_OUT: + size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; + break; + case SERVICE_ACTION_IN: + switch (cmd->t_task_cdb[1] & 0x1f) { + case SAI_READ_CAPACITY_16: + if (!passthrough) + cmd->execute_cmd = + target_emulate_readcapacity_16; + break; + default: + if (passthrough) + break; + + pr_err("Unsupported SA: 0x%02x\n", + cmd->t_task_cdb[1] & 0x1f); + goto out_invalid_cdb_field; + } + /*FALLTHROUGH*/ + case ACCESS_CONTROL_IN: + case ACCESS_CONTROL_OUT: + case EXTENDED_COPY: + case READ_ATTRIBUTE: + case RECEIVE_COPY_RESULTS: + case WRITE_ATTRIBUTE: + size = (cdb[10] << 24) | (cdb[11] << 16) | + (cdb[12] << 8) | cdb[13]; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; + break; + case RECEIVE_DIAGNOSTIC: + case SEND_DIAGNOSTIC: + size = (cdb[3] << 8) | cdb[4]; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; + break; +/* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */ +#if 0 + case GPCMD_READ_CD: + sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; + size = (2336 * sectors); + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; + break; +#endif + case READ_TOC: + size = cdb[8]; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; + break; + case REQUEST_SENSE: + size = cdb[4]; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; + if (!passthrough) + cmd->execute_cmd = target_emulate_request_sense; + break; + case READ_ELEMENT_STATUS: + size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9]; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; + break; + case WRITE_BUFFER: + size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; + break; + case RESERVE: + case RESERVE_10: + /* + * The SPC-2 RESERVE does not contain a size in the SCSI CDB. + * Assume the passthrough or $FABRIC_MOD will tell us about it. + */ + if (cdb[0] == RESERVE_10) + size = (cdb[7] << 8) | cdb[8]; + else + size = cmd->data_length; + + /* + * Setup the legacy emulated handler for SPC-2 and + * >= SPC-3 compatible reservation handling (CRH=1) + * Otherwise, we assume the underlying SCSI logic is + * is running in SPC_PASSTHROUGH, and wants reservations + * emulation disabled. + */ + if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH) + cmd->execute_cmd = target_scsi2_reservation_reserve; + cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; + break; + case RELEASE: + case RELEASE_10: + /* + * The SPC-2 RELEASE does not contain a size in the SCSI CDB. + * Assume the passthrough or $FABRIC_MOD will tell us about it. + */ + if (cdb[0] == RELEASE_10) + size = (cdb[7] << 8) | cdb[8]; + else + size = cmd->data_length; + + if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH) + cmd->execute_cmd = target_scsi2_reservation_release; + cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; + break; + case SYNCHRONIZE_CACHE: + case SYNCHRONIZE_CACHE_16: + /* + * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE + */ + if (cdb[0] == SYNCHRONIZE_CACHE) { + sectors = transport_get_sectors_10(cdb, cmd, §or_ret); + cmd->t_task_lba = transport_lba_32(cdb); + } else { + sectors = transport_get_sectors_16(cdb, cmd, §or_ret); + cmd->t_task_lba = transport_lba_64(cdb); + } + if (sector_ret) + goto out_unsupported_cdb; + + size = transport_get_size(sectors, cdb, cmd); + cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; + + if (passthrough) + break; + + /* + * Check to ensure that LBA + Range does not exceed past end of + * device for IBLOCK and FILEIO ->do_sync_cache() backend calls + */ + if ((cmd->t_task_lba != 0) || (sectors != 0)) { + if (transport_cmd_get_valid_sectors(cmd) < 0) + goto out_invalid_cdb_field; + } + cmd->execute_cmd = target_emulate_synchronize_cache; + break; + case UNMAP: + size = get_unaligned_be16(&cdb[7]); + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; + if (!passthrough) + cmd->execute_cmd = target_emulate_unmap; + break; + case WRITE_SAME_16: + sectors = transport_get_sectors_16(cdb, cmd, §or_ret); + if (sector_ret) + goto out_unsupported_cdb; - spin_lock_irqsave(&cmd->t_state_lock, flags); - if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - return 0; - } + if (sectors) + size = transport_get_size(1, cdb, cmd); + else { + pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); + goto out_invalid_cdb_field; + } - if (!(cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)) - goto out; + cmd->t_task_lba = get_unaligned_be64(&cdb[2]); + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; - if (!dev->transport->get_sense_buffer) { - pr_err("dev->transport->get_sense_buffer is NULL\n"); - goto out; - } + if (target_check_write_same_discard(&cdb[1], dev) < 0) + goto out_unsupported_cdb; + if (!passthrough) + cmd->execute_cmd = target_emulate_write_same; + break; + case WRITE_SAME: + sectors = transport_get_sectors_10(cdb, cmd, §or_ret); + if (sector_ret) + goto out_unsupported_cdb; - sense_buffer = dev->transport->get_sense_buffer(cmd); - if (!sense_buffer) { - pr_err("ITT 0x%08x cmd %p: Unable to locate" - " sense buffer for task with sense\n", - cmd->se_tfo->get_task_tag(cmd), cmd); - goto out; - } + if (sectors) + size = transport_get_size(1, cdb, cmd); + else { + pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); + goto out_invalid_cdb_field; + } - spin_unlock_irqrestore(&cmd->t_state_lock, flags); + cmd->t_task_lba = get_unaligned_be32(&cdb[2]); + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; + /* + * Follow sbcr26 with WRITE_SAME (10) and check for the existence + * of byte 1 bit 3 UNMAP instead of original reserved field + */ + if (target_check_write_same_discard(&cdb[1], dev) < 0) + goto out_unsupported_cdb; + if (!passthrough) + cmd->execute_cmd = target_emulate_write_same; + break; + case ALLOW_MEDIUM_REMOVAL: + case ERASE: + case REZERO_UNIT: + case SEEK_10: + case SPACE: + case START_STOP: + case TEST_UNIT_READY: + case VERIFY: + case WRITE_FILEMARKS: + cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; + if (!passthrough) + cmd->execute_cmd = target_emulate_noop; + break; + case GPCMD_CLOSE_TRACK: + case INITIALIZE_ELEMENT_STATUS: + case GPCMD_LOAD_UNLOAD: + case GPCMD_SET_SPEED: + case MOVE_MEDIUM: + cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; + break; + case REPORT_LUNS: + cmd->execute_cmd = target_report_luns; + size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; + /* + * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS + * See spc4r17 section 5.3 + */ + if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) + cmd->sam_task_attr = MSG_HEAD_TAG; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; + break; + case GET_EVENT_STATUS_NOTIFICATION: + size = (cdb[7] << 8) | cdb[8]; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; + break; + case ATA_16: + /* Only support ATA passthrough to pSCSI backends.. */ + if (!passthrough) + goto out_unsupported_cdb; + + /* T_LENGTH */ + switch (cdb[2] & 0x3) { + case 0x0: + sectors = 0; + break; + case 0x1: + sectors = (((cdb[1] & 0x1) ? cdb[3] : 0) << 8) | cdb[4]; + break; + case 0x2: + sectors = (((cdb[1] & 0x1) ? cdb[5] : 0) << 8) | cdb[6]; + break; + case 0x3: + pr_err("T_LENGTH=0x3 not supported for ATA_16\n"); + goto out_invalid_cdb_field; + } - offset = cmd->se_tfo->set_fabric_sense_len(cmd, TRANSPORT_SENSE_BUFFER); + /* BYTE_BLOCK */ + if (cdb[2] & 0x4) { + /* BLOCK T_TYPE: 512 or sector */ + size = sectors * ((cdb[2] & 0x10) ? + dev->se_sub_dev->se_dev_attrib.block_size : 512); + } else { + /* BYTE */ + size = sectors; + } + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; + break; + default: + pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode" + " 0x%02x, sending CHECK_CONDITION.\n", + cmd->se_tfo->get_fabric_name(), cdb[0]); + goto out_unsupported_cdb; + } - memcpy(&buffer[offset], sense_buffer, TRANSPORT_SENSE_BUFFER); + if (cmd->unknown_data_length) + cmd->data_length = size; - /* Automatically padded */ - cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset; + if (size != cmd->data_length) { + pr_warn("TARGET_CORE[%s]: Expected Transfer Length:" + " %u does not match SCSI CDB Length: %u for SAM Opcode:" + " 0x%02x\n", cmd->se_tfo->get_fabric_name(), + cmd->data_length, size, cdb[0]); - pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x and sense\n", - dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status); - return 0; + cmd->cmd_spdtl = size; -out: - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - return -1; -} + if (cmd->data_direction == DMA_TO_DEVICE) { + pr_err("Rejecting underflow/overflow" + " WRITE data\n"); + goto out_invalid_cdb_field; + } + /* + * Reject READ_* or WRITE_* with overflow/underflow for + * type SCF_SCSI_DATA_SG_IO_CDB. + */ + if (!ret && (dev->se_sub_dev->se_dev_attrib.block_size != 512)) { + pr_err("Failing OVERFLOW/UNDERFLOW for LBA op" + " CDB on non 512-byte sector setup subsystem" + " plugin: %s\n", dev->transport->name); + /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ + goto out_invalid_cdb_field; + } -/* - * Process all commands up to the last received ORDERED task attribute which - * requires another blocking boundary - */ -static void target_restart_delayed_cmds(struct se_device *dev) -{ - for (;;) { - struct se_cmd *cmd; + if (size > cmd->data_length) { + cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; + cmd->residual_count = (size - cmd->data_length); + } else { + cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; + cmd->residual_count = (cmd->data_length - size); + } + cmd->data_length = size; + } - spin_lock(&dev->delayed_cmd_lock); - if (list_empty(&dev->delayed_cmd_list)) { - spin_unlock(&dev->delayed_cmd_lock); - break; + if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { + if (sectors > su_dev->se_dev_attrib.fabric_max_sectors) { + printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" + " big sectors %u exceeds fabric_max_sectors:" + " %u\n", cdb[0], sectors, + su_dev->se_dev_attrib.fabric_max_sectors); + goto out_invalid_cdb_field; + } + if (sectors > su_dev->se_dev_attrib.hw_max_sectors) { + printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" + " big sectors %u exceeds backend hw_max_sectors:" + " %u\n", cdb[0], sectors, + su_dev->se_dev_attrib.hw_max_sectors); + goto out_invalid_cdb_field; } + } - cmd = list_entry(dev->delayed_cmd_list.next, - struct se_cmd, se_delayed_node); - list_del(&cmd->se_delayed_node); - spin_unlock(&dev->delayed_cmd_lock); + /* reject any command that we don't have a handler for */ + if (!(passthrough || cmd->execute_cmd || + (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB))) + goto out_unsupported_cdb; - __target_execute_cmd(cmd); + transport_set_supported_SAM_opcode(cmd); + return ret; - if (cmd->sam_task_attr == MSG_ORDERED_TAG) - break; - } +out_unsupported_cdb: + cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; + cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; + return -EINVAL; +out_invalid_cdb_field: + cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; + cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; + return -EINVAL; } /* @@ -1906,6 +3052,8 @@ static void target_restart_delayed_cmds(struct se_device *dev) static void transport_complete_task_attr(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; + struct se_cmd *cmd_p, *cmd_tmp; + int new_active_tasks = 0; if (cmd->sam_task_attr == MSG_SIMPLE_TAG) { atomic_dec(&dev->simple_cmds); @@ -1927,8 +3075,38 @@ static void transport_complete_task_attr(struct se_cmd *cmd) pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:" " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); } + /* + * Process all commands up to the last received + * ORDERED task attribute which requires another blocking + * boundary + */ + spin_lock(&dev->delayed_cmd_lock); + list_for_each_entry_safe(cmd_p, cmd_tmp, + &dev->delayed_cmd_list, se_delayed_node) { - target_restart_delayed_cmds(dev); + list_del(&cmd_p->se_delayed_node); + spin_unlock(&dev->delayed_cmd_lock); + + pr_debug("Calling add_tasks() for" + " cmd_p: 0x%02x Task Attr: 0x%02x" + " Dormant -> Active, se_ordered_id: %u\n", + cmd_p->t_task_cdb[0], + cmd_p->sam_task_attr, cmd_p->se_ordered_id); + + target_add_to_execute_list(cmd_p); + new_active_tasks++; + + spin_lock(&dev->delayed_cmd_lock); + if (cmd_p->sam_task_attr == MSG_ORDERED_TAG) + break; + } + spin_unlock(&dev->delayed_cmd_lock); + /* + * If new tasks have become active, wake up the transport thread + * to do the processing of the Active tasks. + */ + if (new_active_tasks != 0) + wake_up_interruptible(&dev->dev_queue_obj.thread_wq); } static void transport_complete_qf(struct se_cmd *cmd) @@ -2187,27 +3365,31 @@ int transport_generic_map_mem_to_cmd( if (!sgl || !sgl_count) return 0; - /* - * Reject SCSI data overflow with map_mem_to_cmd() as incoming - * scatterlists already have been set to follow what the fabric - * passes for the original expected data transfer length. - */ - if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { - pr_warn("Rejecting SCSI DATA overflow for fabric using" - " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n"); - cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; - return -EINVAL; - } + if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) || + (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) { + /* + * Reject SCSI data overflow with map_mem_to_cmd() as incoming + * scatterlists already have been set to follow what the fabric + * passes for the original expected data transfer length. + */ + if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { + pr_warn("Rejecting SCSI DATA overflow for fabric using" + " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n"); + cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; + cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; + return -EINVAL; + } - cmd->t_data_sg = sgl; - cmd->t_data_nents = sgl_count; + cmd->t_data_sg = sgl; + cmd->t_data_nents = sgl_count; - if (sgl_bidi && sgl_bidi_count) { - cmd->t_bidi_data_sg = sgl_bidi; - cmd->t_bidi_data_nents = sgl_bidi_count; + if (sgl_bidi && sgl_bidi_count) { + cmd->t_bidi_data_sg = sgl_bidi; + cmd->t_bidi_data_nents = sgl_bidi_count; + } + cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; } - cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; + return 0; } EXPORT_SYMBOL(transport_generic_map_mem_to_cmd); @@ -2279,7 +3461,7 @@ transport_generic_get_mem(struct se_cmd *cmd) cmd->t_data_nents = nents; sg_init_table(cmd->t_data_sg, nents); - zero_flag = cmd->se_cmd_flags & SCF_SCSI_DATA_CDB ? 0 : __GFP_ZERO; + zero_flag = cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB ? 0 : __GFP_ZERO; while (length) { u32 page_len = min_t(u32, length, PAGE_SIZE); @@ -2310,6 +3492,7 @@ transport_generic_get_mem(struct se_cmd *cmd) */ int transport_generic_new_cmd(struct se_cmd *cmd) { + struct se_device *dev = cmd->se_dev; int ret = 0; /* @@ -2325,7 +3508,8 @@ int transport_generic_new_cmd(struct se_cmd *cmd) } /* Workaround for handling zero-length control CDBs */ - if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->data_length) { + if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) && + !cmd->data_length) { spin_lock_irq(&cmd->t_state_lock); cmd->t_state = TRANSPORT_COMPLETE; cmd->transport_state |= CMD_T_ACTIVE; @@ -2343,45 +3527,52 @@ int transport_generic_new_cmd(struct se_cmd *cmd) return 0; } + if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { + struct se_dev_attrib *attr = &dev->se_sub_dev->se_dev_attrib; + + if (transport_cmd_get_valid_sectors(cmd) < 0) + return -EINVAL; + + BUG_ON(cmd->data_length % attr->block_size); + BUG_ON(DIV_ROUND_UP(cmd->data_length, attr->block_size) > + attr->hw_max_sectors); + } + atomic_inc(&cmd->t_fe_count); /* - * If this command is not a write we can execute it right here, - * for write buffers we need to notify the fabric driver first - * and let it call back once the write buffers are ready. + * For WRITEs, let the fabric know its buffer is ready. + * + * The command will be added to the execution queue after its write + * data has arrived. */ - target_add_to_state_list(cmd); - if (cmd->data_direction != DMA_TO_DEVICE) { - target_execute_cmd(cmd); - return 0; + if (cmd->data_direction == DMA_TO_DEVICE) { + target_add_to_state_list(cmd); + return transport_generic_write_pending(cmd); } - - spin_lock_irq(&cmd->t_state_lock); - cmd->t_state = TRANSPORT_WRITE_PENDING; - spin_unlock_irq(&cmd->t_state_lock); - - transport_cmd_check_stop(cmd, false); - - ret = cmd->se_tfo->write_pending(cmd); - if (ret == -EAGAIN || ret == -ENOMEM) - goto queue_full; - - if (ret < 0) - return ret; - return 1; + /* + * Everything else but a WRITE, add the command to the execution queue. + */ + transport_execute_tasks(cmd); + return 0; out_fail: cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; return -EINVAL; -queue_full: - pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); - cmd->t_state = TRANSPORT_COMPLETE_QF_WP; - transport_handle_queue_full(cmd, cmd->se_dev); - return 0; } EXPORT_SYMBOL(transport_generic_new_cmd); +/* transport_generic_process_write(): + * + * + */ +void transport_generic_process_write(struct se_cmd *cmd) +{ + transport_execute_tasks(cmd); +} +EXPORT_SYMBOL(transport_generic_process_write); + static void transport_write_pending_qf(struct se_cmd *cmd) { int ret; @@ -2394,6 +3585,43 @@ static void transport_write_pending_qf(struct se_cmd *cmd) } } +static int transport_generic_write_pending(struct se_cmd *cmd) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&cmd->t_state_lock, flags); + cmd->t_state = TRANSPORT_WRITE_PENDING; + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + + /* + * Clear the se_cmd for WRITE_PENDING status in order to set + * CMD_T_ACTIVE so that transport_generic_handle_data can be called + * from HW target mode interrupt code. This is safe to be called + * with transport_off=1 before the cmd->se_tfo->write_pending + * because the se_cmd->se_lun pointer is not being cleared. + */ + transport_cmd_check_stop(cmd, 1, 0); + + /* + * Call the fabric write_pending function here to let the + * frontend know that WRITE buffers are ready. + */ + ret = cmd->se_tfo->write_pending(cmd); + if (ret == -EAGAIN || ret == -ENOMEM) + goto queue_full; + else if (ret < 0) + return ret; + + return 1; + +queue_full: + pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); + cmd->t_state = TRANSPORT_COMPLETE_QF_WP; + transport_handle_queue_full(cmd, cmd->se_dev); + return 0; +} + void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) { if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { @@ -2420,11 +3648,10 @@ EXPORT_SYMBOL(transport_generic_free_cmd); * @se_cmd: command descriptor to add * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd() */ -static int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd, - bool ack_kref) +void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd, + bool ack_kref) { unsigned long flags; - int ret = 0; kref_init(&se_cmd->cmd_kref); /* @@ -2438,17 +3665,11 @@ static int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd } spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); - if (se_sess->sess_tearing_down) { - ret = -ESHUTDOWN; - goto out; - } list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); se_cmd->check_release = 1; - -out: spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); - return ret; } +EXPORT_SYMBOL(target_get_sess_cmd); static void target_release_cmd_kref(struct kref *kref) { @@ -2483,27 +3704,28 @@ int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd) } EXPORT_SYMBOL(target_put_sess_cmd); -/* target_sess_cmd_list_set_waiting - Flag all commands in - * sess_cmd_list to complete cmd_wait_comp. Set - * sess_tearing_down so no more commands are queued. - * @se_sess: session to flag +/* target_splice_sess_cmd_list - Split active cmds into sess_wait_list + * @se_sess: session to split */ -void target_sess_cmd_list_set_waiting(struct se_session *se_sess) +void target_splice_sess_cmd_list(struct se_session *se_sess) { struct se_cmd *se_cmd; unsigned long flags; - spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); + WARN_ON(!list_empty(&se_sess->sess_wait_list)); + INIT_LIST_HEAD(&se_sess->sess_wait_list); - WARN_ON(se_sess->sess_tearing_down); + spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); se_sess->sess_tearing_down = 1; - list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) + list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list); + + list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) se_cmd->cmd_wait_set = 1; spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); } -EXPORT_SYMBOL(target_sess_cmd_list_set_waiting); +EXPORT_SYMBOL(target_splice_sess_cmd_list); /* target_wait_for_sess_cmds - Wait for outstanding descriptors * @se_sess: session to wait for active I/O @@ -2517,7 +3739,7 @@ void target_wait_for_sess_cmds( bool rc = false; list_for_each_entry_safe(se_cmd, tmp_cmd, - &se_sess->sess_cmd_list, se_cmd_list) { + &se_sess->sess_wait_list, se_cmd_list) { list_del(&se_cmd->se_cmd_list); pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:" @@ -2569,20 +3791,26 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) pr_debug("ConfigFS ITT[0x%08x] - CMD_T_STOP, skipping\n", cmd->se_tfo->get_task_tag(cmd)); spin_unlock_irqrestore(&cmd->t_state_lock, flags); - transport_cmd_check_stop(cmd, false); + transport_cmd_check_stop(cmd, 1, 0); return -EPERM; } cmd->transport_state |= CMD_T_LUN_FE_STOP; spin_unlock_irqrestore(&cmd->t_state_lock, flags); + wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq); + // XXX: audit task_flags checks. spin_lock_irqsave(&cmd->t_state_lock, flags); if ((cmd->transport_state & CMD_T_BUSY) && (cmd->transport_state & CMD_T_SENT)) { if (!target_stop_cmd(cmd, &flags)) ret++; + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + } else { + spin_unlock_irqrestore(&cmd->t_state_lock, + flags); + target_remove_from_execute_list(cmd); } - spin_unlock_irqrestore(&cmd->t_state_lock, flags); pr_debug("ConfigFS: cmd: %p stop tasks ret:" " %d\n", cmd, ret); @@ -2593,6 +3821,7 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n", cmd->se_tfo->get_task_tag(cmd)); } + transport_remove_cmd_from_queue(cmd); return 0; } @@ -2611,6 +3840,11 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun) struct se_cmd, se_lun_node); list_del_init(&cmd->se_lun_node); + /* + * This will notify iscsi_target_transport.c: + * transport_cmd_check_stop() that a LUN shutdown is in + * progress for the iscsi_cmd_t. + */ spin_lock(&cmd->t_state_lock); pr_debug("SE_LUN[%d] - Setting cmd->transport" "_lun_stop for ITT: 0x%08x\n", @@ -2677,7 +3911,7 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun) spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); - transport_cmd_check_stop(cmd, false); + transport_cmd_check_stop(cmd, 1, 0); complete(&cmd->transport_lun_fe_stop_comp); spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); continue; @@ -2733,7 +3967,10 @@ bool transport_wait_for_tasks(struct se_cmd *cmd) spin_unlock_irqrestore(&cmd->t_state_lock, flags); return false; } - + /* + * Only perform a possible wait_for_tasks if SCF_SUPPORTED_SAM_OPCODE + * has been set in transport_set_supported_SAM_opcode(). + */ if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { spin_unlock_irqrestore(&cmd->t_state_lock, flags); @@ -2791,6 +4028,8 @@ bool transport_wait_for_tasks(struct se_cmd *cmd) spin_unlock_irqrestore(&cmd->t_state_lock, flags); + wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq); + wait_for_completion(&cmd->t_transport_stop_comp); spin_lock_irqsave(&cmd->t_state_lock, flags); @@ -2973,15 +4212,6 @@ int transport_send_check_condition_and_sense( /* WRITE PROTECTED */ buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27; break; - case TCM_ADDRESS_OUT_OF_RANGE: - /* CURRENT ERROR */ - buffer[offset] = 0x70; - buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; - /* ILLEGAL REQUEST */ - buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; - /* LOGICAL BLOCK ADDRESS OUT OF RANGE */ - buffer[offset+SPC_ASC_KEY_OFFSET] = 0x21; - break; case TCM_CHECK_CONDITION_UNIT_ATTENTION: /* CURRENT ERROR */ buffer[offset] = 0x70; @@ -3082,9 +4312,8 @@ void transport_send_task_abort(struct se_cmd *cmd) cmd->se_tfo->queue_status(cmd); } -static void target_tmr_work(struct work_struct *work) +static int transport_generic_do_tmr(struct se_cmd *cmd) { - struct se_cmd *cmd = container_of(work, struct se_cmd, work); struct se_device *dev = cmd->se_dev; struct se_tmr_req *tmr = cmd->se_tmr_req; int ret; @@ -3120,13 +4349,80 @@ static void target_tmr_work(struct work_struct *work) cmd->se_tfo->queue_tm_rsp(cmd); transport_cmd_check_stop_to_fabric(cmd); + return 0; } -int transport_generic_handle_tmr( - struct se_cmd *cmd) +/* transport_processing_thread(): + * + * + */ +static int transport_processing_thread(void *param) { - INIT_WORK(&cmd->work, target_tmr_work); - queue_work(cmd->se_dev->tmr_wq, &cmd->work); + int ret; + struct se_cmd *cmd; + struct se_device *dev = param; + + while (!kthread_should_stop()) { + ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq, + atomic_read(&dev->dev_queue_obj.queue_cnt) || + kthread_should_stop()); + if (ret < 0) + goto out; + +get_cmd: + cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj); + if (!cmd) + continue; + + switch (cmd->t_state) { + case TRANSPORT_NEW_CMD: + BUG(); + break; + case TRANSPORT_NEW_CMD_MAP: + if (!cmd->se_tfo->new_cmd_map) { + pr_err("cmd->se_tfo->new_cmd_map is" + " NULL for TRANSPORT_NEW_CMD_MAP\n"); + BUG(); + } + ret = cmd->se_tfo->new_cmd_map(cmd); + if (ret < 0) { + transport_generic_request_failure(cmd); + break; + } + ret = transport_generic_new_cmd(cmd); + if (ret < 0) { + transport_generic_request_failure(cmd); + break; + } + break; + case TRANSPORT_PROCESS_WRITE: + transport_generic_process_write(cmd); + break; + case TRANSPORT_PROCESS_TMR: + transport_generic_do_tmr(cmd); + break; + case TRANSPORT_COMPLETE_QF_WP: + transport_write_pending_qf(cmd); + break; + case TRANSPORT_COMPLETE_QF_OK: + transport_complete_qf(cmd); + break; + default: + pr_err("Unknown t_state: %d for ITT: 0x%08x " + "i_state: %d on SE LUN: %u\n", + cmd->t_state, + cmd->se_tfo->get_task_tag(cmd), + cmd->se_tfo->get_cmd_state(cmd), + cmd->se_lun->unpacked_lun); + BUG(); + } + + goto get_cmd; + } + +out: + WARN_ON(!list_empty(&dev->state_list)); + WARN_ON(!list_empty(&dev->dev_queue_obj.qobj_list)); + dev->process_thread = NULL; return 0; } -EXPORT_SYMBOL(transport_generic_handle_tmr); diff --git a/trunk/drivers/target/tcm_fc/tfc_cmd.c b/trunk/drivers/target/tcm_fc/tfc_cmd.c index b9cb5006177e..f03fb9730f5b 100644 --- a/trunk/drivers/target/tcm_fc/tfc_cmd.c +++ b/trunk/drivers/target/tcm_fc/tfc_cmd.c @@ -215,7 +215,7 @@ int ft_write_pending(struct se_cmd *se_cmd) */ if ((ep->xid <= lport->lro_xid) && (fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) { - if ((se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && + if ((se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) && lport->tt.ddp_target(lport, ep->xid, se_cmd->t_data_sg, se_cmd->t_data_nents)) @@ -230,8 +230,6 @@ u32 ft_get_task_tag(struct se_cmd *se_cmd) { struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); - if (cmd->aborted) - return ~0; return fc_seq_exch(cmd->seq)->rxid; } @@ -543,11 +541,9 @@ static void ft_send_work(struct work_struct *work) * Use a single se_cmd->cmd_kref as we expect to release se_cmd * directly from ft_check_stop_free callback in response path. */ - if (target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb, - &cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun), - ntohl(fcp->fc_dl), task_attr, data_dir, 0)) - goto err; - + target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb, + &cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun), + ntohl(fcp->fc_dl), task_attr, data_dir, 0); pr_debug("r_ctl %x alloc target_submit_cmd\n", fh->fh_r_ctl); return; diff --git a/trunk/drivers/target/tcm_fc/tfc_io.c b/trunk/drivers/target/tcm_fc/tfc_io.c index ad36ede1a1ea..071a505f98fc 100644 --- a/trunk/drivers/target/tcm_fc/tfc_io.c +++ b/trunk/drivers/target/tcm_fc/tfc_io.c @@ -183,13 +183,6 @@ int ft_queue_data_in(struct se_cmd *se_cmd) return ft_queue_status(se_cmd); } -static void ft_execute_work(struct work_struct *work) -{ - struct ft_cmd *cmd = container_of(work, struct ft_cmd, work); - - target_execute_cmd(&cmd->se_cmd); -} - /* * Receive write data frame. */ @@ -314,10 +307,8 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp) cmd->write_data_len += tlen; } last_frame: - if (cmd->write_data_len == se_cmd->data_length) { - INIT_WORK(&cmd->work, ft_execute_work); - queue_work(cmd->sess->tport->tpg->workqueue, &cmd->work); - } + if (cmd->write_data_len == se_cmd->data_length) + transport_generic_handle_data(se_cmd); drop: fc_frame_free(fp); } diff --git a/trunk/drivers/usb/gadget/tcm_usb_gadget.c b/trunk/drivers/usb/gadget/tcm_usb_gadget.c index 5444866e13ef..c46439c8dd74 100644 --- a/trunk/drivers/usb/gadget/tcm_usb_gadget.c +++ b/trunk/drivers/usb/gadget/tcm_usb_gadget.c @@ -294,7 +294,7 @@ static int bot_send_write_request(struct usbg_cmd *cmd) pr_err("%s(%d)\n", __func__, __LINE__); wait_for_completion(&cmd->write_complete); - target_execute_cmd(se_cmd); + transport_generic_process_write(se_cmd); cleanup: return ret; } @@ -725,7 +725,7 @@ static int uasp_send_write_request(struct usbg_cmd *cmd) } wait_for_completion(&cmd->write_complete); - target_execute_cmd(se_cmd); + transport_generic_process_write(se_cmd); cleanup: return ret; } @@ -1065,20 +1065,16 @@ static void usbg_cmd_work(struct work_struct *work) tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo, tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE, cmd->prio_attr, cmd->sense_iu.sense); - goto out; + + transport_send_check_condition_and_sense(se_cmd, + TCM_UNSUPPORTED_SCSI_OPCODE, 1); + usbg_cleanup_cmd(cmd); + return; } - if (target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess, + target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess, cmd->cmd_buf, cmd->sense_iu.sense, cmd->unpacked_lun, - 0, cmd->prio_attr, dir, TARGET_SCF_UNKNOWN_SIZE) < 0) - goto out; - - return; - -out: - transport_send_check_condition_and_sense(se_cmd, - TCM_UNSUPPORTED_SCSI_OPCODE, 1); - usbg_cleanup_cmd(cmd); + 0, cmd->prio_attr, dir, TARGET_SCF_UNKNOWN_SIZE); } static int usbg_submit_command(struct f_uas *fu, @@ -1181,20 +1177,16 @@ static void bot_cmd_work(struct work_struct *work) tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo, tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE, cmd->prio_attr, cmd->sense_iu.sense); - goto out; + + transport_send_check_condition_and_sense(se_cmd, + TCM_UNSUPPORTED_SCSI_OPCODE, 1); + usbg_cleanup_cmd(cmd); + return; } - if (target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess, + target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess, cmd->cmd_buf, cmd->sense_iu.sense, cmd->unpacked_lun, - cmd->data_len, cmd->prio_attr, dir, 0) < 0) - goto out; - - return; - -out: - transport_send_check_condition_and_sense(se_cmd, - TCM_UNSUPPORTED_SCSI_OPCODE, 1); - usbg_cleanup_cmd(cmd); + cmd->data_len, cmd->prio_attr, dir, 0); } static int bot_submit_command(struct f_uas *fu, @@ -1408,6 +1400,19 @@ static u32 usbg_tpg_get_inst_index(struct se_portal_group *se_tpg) return 1; } +static int usbg_new_cmd(struct se_cmd *se_cmd) +{ + struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd, + se_cmd); + int ret; + + ret = target_setup_cmd_from_cdb(se_cmd, cmd->cmd_buf); + if (ret) + return ret; + + return transport_generic_map_mem_to_cmd(se_cmd, NULL, 0, NULL, 0); +} + static void usbg_cmd_release(struct kref *ref) { struct usbg_cmd *cmd = container_of(ref, struct usbg_cmd, @@ -1897,6 +1902,7 @@ static struct target_core_fabric_ops usbg_ops = { .tpg_alloc_fabric_acl = usbg_alloc_fabric_acl, .tpg_release_fabric_acl = usbg_release_fabric_acl, .tpg_get_inst_index = usbg_tpg_get_inst_index, + .new_cmd_map = usbg_new_cmd, .release_cmd = usbg_release_cmd, .shutdown_session = usbg_shutdown_session, .close_session = usbg_close_session, diff --git a/trunk/fs/9p/v9fs.h b/trunk/fs/9p/v9fs.h index 34c59f14a1c9..e78956cbd702 100644 --- a/trunk/fs/9p/v9fs.h +++ b/trunk/fs/9p/v9fs.h @@ -144,7 +144,7 @@ extern void v9fs_session_close(struct v9fs_session_info *v9ses); extern void v9fs_session_cancel(struct v9fs_session_info *v9ses); extern void v9fs_session_begin_cancel(struct v9fs_session_info *v9ses); extern struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry, - unsigned int flags); + struct nameidata *nameidata); extern int v9fs_vfs_unlink(struct inode *i, struct dentry *d); extern int v9fs_vfs_rmdir(struct inode *i, struct dentry *d); extern int v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry, diff --git a/trunk/fs/9p/vfs_dentry.c b/trunk/fs/9p/vfs_dentry.c index 64600b5d0522..d529437ff442 100644 --- a/trunk/fs/9p/vfs_dentry.c +++ b/trunk/fs/9p/vfs_dentry.c @@ -100,13 +100,13 @@ static void v9fs_dentry_release(struct dentry *dentry) } } -static int v9fs_lookup_revalidate(struct dentry *dentry, unsigned int flags) +static int v9fs_lookup_revalidate(struct dentry *dentry, struct nameidata *nd) { struct p9_fid *fid; struct inode *inode; struct v9fs_inode *v9inode; - if (flags & LOOKUP_RCU) + if (nd->flags & LOOKUP_RCU) return -ECHILD; inode = dentry->d_inode; diff --git a/trunk/fs/9p/vfs_inode.c b/trunk/fs/9p/vfs_inode.c index cbf9dbb1b2a2..57ccb7537dae 100644 --- a/trunk/fs/9p/vfs_inode.c +++ b/trunk/fs/9p/vfs_inode.c @@ -712,34 +712,88 @@ v9fs_create(struct v9fs_session_info *v9ses, struct inode *dir, } /** - * v9fs_vfs_create - VFS hook to create a regular file - * - * open(.., O_CREAT) is handled in v9fs_vfs_atomic_open(). This is only called - * for mknod(2). - * + * v9fs_vfs_create - VFS hook to create files * @dir: directory inode that is being created * @dentry: dentry that is being deleted * @mode: create permissions + * @nd: path information * */ static int v9fs_vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, - bool excl) + struct nameidata *nd) { - struct v9fs_session_info *v9ses = v9fs_inode2v9ses(dir); - u32 perm = unixmode2p9mode(v9ses, mode); - struct p9_fid *fid; + int err; + u32 perm; + int flags; + struct file *filp; + struct v9fs_inode *v9inode; + struct v9fs_session_info *v9ses; + struct p9_fid *fid, *inode_fid; - /* P9_OEXCL? */ - fid = v9fs_create(v9ses, dir, dentry, NULL, perm, P9_ORDWR); - if (IS_ERR(fid)) - return PTR_ERR(fid); + err = 0; + fid = NULL; + v9ses = v9fs_inode2v9ses(dir); + perm = unixmode2p9mode(v9ses, mode); + if (nd) + flags = nd->intent.open.flags; + else + flags = O_RDWR; + + fid = v9fs_create(v9ses, dir, dentry, NULL, perm, + v9fs_uflags2omode(flags, + v9fs_proto_dotu(v9ses))); + if (IS_ERR(fid)) { + err = PTR_ERR(fid); + fid = NULL; + goto error; + } v9fs_invalidate_inode_attr(dir); - p9_client_clunk(fid); + /* if we are opening a file, assign the open fid to the file */ + if (nd) { + v9inode = V9FS_I(dentry->d_inode); + mutex_lock(&v9inode->v_mutex); + if (v9ses->cache && !v9inode->writeback_fid && + ((flags & O_ACCMODE) != O_RDONLY)) { + /* + * clone a fid and add it to writeback_fid + * we do it during open time instead of + * page dirty time via write_begin/page_mkwrite + * because we want write after unlink usecase + * to work. + */ + inode_fid = v9fs_writeback_fid(dentry); + if (IS_ERR(inode_fid)) { + err = PTR_ERR(inode_fid); + mutex_unlock(&v9inode->v_mutex); + goto error; + } + v9inode->writeback_fid = (void *) inode_fid; + } + mutex_unlock(&v9inode->v_mutex); + filp = lookup_instantiate_filp(nd, dentry, generic_file_open); + if (IS_ERR(filp)) { + err = PTR_ERR(filp); + goto error; + } + + filp->private_data = fid; +#ifdef CONFIG_9P_FSCACHE + if (v9ses->cache) + v9fs_cache_inode_set_cookie(dentry->d_inode, filp); +#endif + } else + p9_client_clunk(fid); return 0; + +error: + if (fid) + p9_client_clunk(fid); + + return err; } /** @@ -785,7 +839,7 @@ static int v9fs_vfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode */ struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry, - unsigned int flags) + struct nameidata *nameidata) { struct dentry *res; struct super_block *sb; @@ -795,8 +849,8 @@ struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry, char *name; int result = 0; - p9_debug(P9_DEBUG_VFS, "dir: %p dentry: (%s) %p flags: %x\n", - dir, dentry->d_name.name, dentry, flags); + p9_debug(P9_DEBUG_VFS, "dir: %p dentry: (%s) %p nameidata: %p\n", + dir, dentry->d_name.name, dentry, nameidata); if (dentry->d_name.len > NAME_MAX) return ERR_PTR(-ENAMETOOLONG); @@ -856,86 +910,6 @@ struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry, return ERR_PTR(result); } -static int -v9fs_vfs_atomic_open(struct inode *dir, struct dentry *dentry, - struct file *file, unsigned flags, umode_t mode, - int *opened) -{ - int err; - u32 perm; - struct v9fs_inode *v9inode; - struct v9fs_session_info *v9ses; - struct p9_fid *fid, *inode_fid; - struct dentry *res = NULL; - - if (d_unhashed(dentry)) { - res = v9fs_vfs_lookup(dir, dentry, 0); - if (IS_ERR(res)) - return PTR_ERR(res); - - if (res) - dentry = res; - } - - /* Only creates */ - if (!(flags & O_CREAT) || dentry->d_inode) - return finish_no_open(file, res); - - err = 0; - fid = NULL; - v9ses = v9fs_inode2v9ses(dir); - perm = unixmode2p9mode(v9ses, mode); - fid = v9fs_create(v9ses, dir, dentry, NULL, perm, - v9fs_uflags2omode(flags, - v9fs_proto_dotu(v9ses))); - if (IS_ERR(fid)) { - err = PTR_ERR(fid); - fid = NULL; - goto error; - } - - v9fs_invalidate_inode_attr(dir); - v9inode = V9FS_I(dentry->d_inode); - mutex_lock(&v9inode->v_mutex); - if (v9ses->cache && !v9inode->writeback_fid && - ((flags & O_ACCMODE) != O_RDONLY)) { - /* - * clone a fid and add it to writeback_fid - * we do it during open time instead of - * page dirty time via write_begin/page_mkwrite - * because we want write after unlink usecase - * to work. - */ - inode_fid = v9fs_writeback_fid(dentry); - if (IS_ERR(inode_fid)) { - err = PTR_ERR(inode_fid); - mutex_unlock(&v9inode->v_mutex); - goto error; - } - v9inode->writeback_fid = (void *) inode_fid; - } - mutex_unlock(&v9inode->v_mutex); - err = finish_open(file, dentry, generic_file_open, opened); - if (err) - goto error; - - file->private_data = fid; -#ifdef CONFIG_9P_FSCACHE - if (v9ses->cache) - v9fs_cache_inode_set_cookie(dentry->d_inode, file); -#endif - - *opened |= FILE_CREATED; -out: - dput(res); - return err; - -error: - if (fid) - p9_client_clunk(fid); - goto out; -} - /** * v9fs_vfs_unlink - VFS unlink hook to delete an inode * @i: inode that is being unlinked @@ -1514,7 +1488,6 @@ int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode) static const struct inode_operations v9fs_dir_inode_operations_dotu = { .create = v9fs_vfs_create, .lookup = v9fs_vfs_lookup, - .atomic_open = v9fs_vfs_atomic_open, .symlink = v9fs_vfs_symlink, .link = v9fs_vfs_link, .unlink = v9fs_vfs_unlink, @@ -1529,7 +1502,6 @@ static const struct inode_operations v9fs_dir_inode_operations_dotu = { static const struct inode_operations v9fs_dir_inode_operations = { .create = v9fs_vfs_create, .lookup = v9fs_vfs_lookup, - .atomic_open = v9fs_vfs_atomic_open, .unlink = v9fs_vfs_unlink, .mkdir = v9fs_vfs_mkdir, .rmdir = v9fs_vfs_rmdir, diff --git a/trunk/fs/9p/vfs_inode_dotl.c b/trunk/fs/9p/vfs_inode_dotl.c index 40895546e103..e3dd2a1e2bfc 100644 --- a/trunk/fs/9p/vfs_inode_dotl.c +++ b/trunk/fs/9p/vfs_inode_dotl.c @@ -230,25 +230,20 @@ int v9fs_open_to_dotl_flags(int flags) * @dir: directory inode that is being created * @dentry: dentry that is being deleted * @mode: create permissions + * @nd: path information * */ static int v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, umode_t omode, - bool excl) -{ - return v9fs_vfs_mknod_dotl(dir, dentry, omode, 0); -} - -static int -v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry, - struct file *file, unsigned flags, umode_t omode, - int *opened) + struct nameidata *nd) { int err = 0; gid_t gid; + int flags; umode_t mode; char *name = NULL; + struct file *filp; struct p9_qid qid; struct inode *inode; struct p9_fid *fid = NULL; @@ -256,22 +251,18 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry, struct p9_fid *dfid, *ofid, *inode_fid; struct v9fs_session_info *v9ses; struct posix_acl *pacl = NULL, *dacl = NULL; - struct dentry *res = NULL; - - if (d_unhashed(dentry)) { - res = v9fs_vfs_lookup(dir, dentry, 0); - if (IS_ERR(res)) - return PTR_ERR(res); - - if (res) - dentry = res; - } - - /* Only creates */ - if (!(flags & O_CREAT) || dentry->d_inode) - return finish_no_open(file, res); v9ses = v9fs_inode2v9ses(dir); + if (nd) + flags = nd->intent.open.flags; + else { + /* + * create call without LOOKUP_OPEN is due + * to mknod of regular files. So use mknod + * operation. + */ + return v9fs_vfs_mknod_dotl(dir, dentry, omode, 0); + } name = (char *) dentry->d_name.name; p9_debug(P9_DEBUG_VFS, "name:%s flags:0x%x mode:0x%hx\n", @@ -281,7 +272,7 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry, if (IS_ERR(dfid)) { err = PTR_ERR(dfid); p9_debug(P9_DEBUG_VFS, "fid lookup failed %d\n", err); - goto out; + return err; } /* clone a fid to use for creation */ @@ -289,7 +280,7 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry, if (IS_ERR(ofid)) { err = PTR_ERR(ofid); p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err); - goto out; + return err; } gid = v9fs_get_fsgid_for_create(dir); @@ -354,18 +345,17 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry, } mutex_unlock(&v9inode->v_mutex); /* Since we are opening a file, assign the open fid to the file */ - err = finish_open(file, dentry, generic_file_open, opened); - if (err) + filp = lookup_instantiate_filp(nd, dentry, generic_file_open); + if (IS_ERR(filp)) { + err = PTR_ERR(filp); goto err_clunk_old_fid; - file->private_data = ofid; + } + filp->private_data = ofid; #ifdef CONFIG_9P_FSCACHE if (v9ses->cache) - v9fs_cache_inode_set_cookie(inode, file); + v9fs_cache_inode_set_cookie(inode, filp); #endif - *opened |= FILE_CREATED; -out: - dput(res); - return err; + return 0; error: if (fid) @@ -374,7 +364,7 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry, if (ofid) p9_client_clunk(ofid); v9fs_set_create_acl(NULL, &dacl, &pacl); - goto out; + return err; } /** @@ -992,7 +982,6 @@ int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode) const struct inode_operations v9fs_dir_inode_operations_dotl = { .create = v9fs_vfs_create_dotl, - .atomic_open = v9fs_vfs_atomic_open_dotl, .lookup = v9fs_vfs_lookup, .link = v9fs_vfs_link_dotl, .symlink = v9fs_vfs_symlink_dotl, diff --git a/trunk/fs/9p/vfs_super.c b/trunk/fs/9p/vfs_super.c index 137d50396898..8c92a9ba8330 100644 --- a/trunk/fs/9p/vfs_super.c +++ b/trunk/fs/9p/vfs_super.c @@ -89,7 +89,7 @@ v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses, if (v9ses->cache) sb->s_bdi->ra_pages = (VM_MAX_READAHEAD * 1024)/PAGE_CACHE_SIZE; - sb->s_flags |= MS_ACTIVE | MS_DIRSYNC | MS_NOATIME; + sb->s_flags = flags | MS_ACTIVE | MS_DIRSYNC | MS_NOATIME; if (!v9ses->cache) sb->s_flags |= MS_SYNCHRONOUS; @@ -137,7 +137,7 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags, goto close_session; } - sb = sget(fs_type, NULL, v9fs_set_super, flags, v9ses); + sb = sget(fs_type, NULL, v9fs_set_super, v9ses); if (IS_ERR(sb)) { retval = PTR_ERR(sb); goto clunk_fid; diff --git a/trunk/fs/adfs/dir.c b/trunk/fs/adfs/dir.c index b3be2e7c5643..3d83075aaa2e 100644 --- a/trunk/fs/adfs/dir.c +++ b/trunk/fs/adfs/dir.c @@ -266,7 +266,7 @@ const struct dentry_operations adfs_dentry_operations = { }; static struct dentry * -adfs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) +adfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { struct inode *inode = NULL; struct object_info obj; diff --git a/trunk/fs/adfs/super.c b/trunk/fs/adfs/super.c index bdaec92353c2..06fdcc9382c4 100644 --- a/trunk/fs/adfs/super.c +++ b/trunk/fs/adfs/super.c @@ -246,6 +246,7 @@ static struct inode *adfs_alloc_inode(struct super_block *sb) static void adfs_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); + INIT_LIST_HEAD(&inode->i_dentry); kmem_cache_free(adfs_inode_cachep, ADFS_I(inode)); } diff --git a/trunk/fs/affs/affs.h b/trunk/fs/affs/affs.h index 6e216419f340..1fceb320d2f2 100644 --- a/trunk/fs/affs/affs.h +++ b/trunk/fs/affs/affs.h @@ -3,7 +3,6 @@ #include #include #include -#include /* AmigaOS allows file names with up to 30 characters length. * Names longer than that will be silently truncated. If you @@ -101,10 +100,6 @@ struct affs_sb_info { char *s_prefix; /* Prefix for volumes and assigns. */ char s_volume[32]; /* Volume prefix for absolute symlinks. */ spinlock_t symlink_lock; /* protects the previous two */ - struct super_block *sb; /* the VFS superblock object */ - int work_queued; /* non-zero delayed work is queued */ - struct delayed_work sb_work; /* superblock flush delayed work */ - spinlock_t work_lock; /* protects sb_work and work_queued */ }; #define SF_INTL 0x0001 /* International filesystem. */ @@ -125,8 +120,6 @@ static inline struct affs_sb_info *AFFS_SB(struct super_block *sb) return sb->s_fs_info; } -void affs_mark_sb_dirty(struct super_block *sb); - /* amigaffs.c */ extern int affs_insert_hash(struct inode *inode, struct buffer_head *bh); @@ -153,9 +146,9 @@ extern void affs_free_bitmap(struct super_block *sb); /* namei.c */ extern int affs_hash_name(struct super_block *sb, const u8 *name, unsigned int len); -extern struct dentry *affs_lookup(struct inode *dir, struct dentry *dentry, unsigned int); +extern struct dentry *affs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *); extern int affs_unlink(struct inode *dir, struct dentry *dentry); -extern int affs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool); +extern int affs_create(struct inode *dir, struct dentry *dentry, umode_t mode, struct nameidata *); extern int affs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode); extern int affs_rmdir(struct inode *dir, struct dentry *dentry); extern int affs_link(struct dentry *olddentry, struct inode *dir, diff --git a/trunk/fs/affs/amigaffs.c b/trunk/fs/affs/amigaffs.c index eb82ee53ee0b..52a6407682e6 100644 --- a/trunk/fs/affs/amigaffs.c +++ b/trunk/fs/affs/amigaffs.c @@ -122,16 +122,22 @@ affs_remove_hash(struct inode *dir, struct buffer_head *rem_bh) } static void -affs_fix_dcache(struct inode *inode, u32 entry_ino) +affs_fix_dcache(struct dentry *dentry, u32 entry_ino) { - struct dentry *dentry; - struct hlist_node *p; + struct inode *inode = dentry->d_inode; + void *data = dentry->d_fsdata; + struct list_head *head, *next; + spin_lock(&inode->i_lock); - hlist_for_each_entry(dentry, p, &inode->i_dentry, d_alias) { + head = &inode->i_dentry; + next = head->next; + while (next != head) { + dentry = list_entry(next, struct dentry, d_alias); if (entry_ino == (u32)(long)dentry->d_fsdata) { - dentry->d_fsdata = (void *)inode->i_ino; + dentry->d_fsdata = data; break; } + next = next->next; } spin_unlock(&inode->i_lock); } @@ -171,11 +177,7 @@ affs_remove_link(struct dentry *dentry) } affs_lock_dir(dir); - /* - * if there's a dentry for that block, make it - * refer to inode itself. - */ - affs_fix_dcache(inode, link_ino); + affs_fix_dcache(dentry, link_ino); retval = affs_remove_hash(dir, link_bh); if (retval) { affs_unlock_dir(dir); diff --git a/trunk/fs/affs/bitmap.c b/trunk/fs/affs/bitmap.c index 6e0be43ef6ef..3e262711ae06 100644 --- a/trunk/fs/affs/bitmap.c +++ b/trunk/fs/affs/bitmap.c @@ -103,7 +103,7 @@ affs_free_block(struct super_block *sb, u32 block) *(__be32 *)bh->b_data = cpu_to_be32(tmp - mask); mark_buffer_dirty(bh); - affs_mark_sb_dirty(sb); + sb->s_dirt = 1; bm->bm_free++; mutex_unlock(&sbi->s_bmlock); @@ -248,7 +248,7 @@ affs_alloc_block(struct inode *inode, u32 goal) *(__be32 *)bh->b_data = cpu_to_be32(tmp + mask); mark_buffer_dirty(bh); - affs_mark_sb_dirty(sb); + sb->s_dirt = 1; mutex_unlock(&sbi->s_bmlock); diff --git a/trunk/fs/affs/namei.c b/trunk/fs/affs/namei.c index ff65884a7839..47806940aac0 100644 --- a/trunk/fs/affs/namei.c +++ b/trunk/fs/affs/namei.c @@ -211,7 +211,7 @@ affs_find_entry(struct inode *dir, struct dentry *dentry) } struct dentry * -affs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) +affs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { struct super_block *sb = dir->i_sb; struct buffer_head *bh; @@ -255,7 +255,7 @@ affs_unlink(struct inode *dir, struct dentry *dentry) } int -affs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) +affs_create(struct inode *dir, struct dentry *dentry, umode_t mode, struct nameidata *nd) { struct super_block *sb = dir->i_sb; struct inode *inode; diff --git a/trunk/fs/affs/super.c b/trunk/fs/affs/super.c index c70f1e5fc024..0782653a05a2 100644 --- a/trunk/fs/affs/super.c +++ b/trunk/fs/affs/super.c @@ -17,7 +17,6 @@ #include #include #include -#include #include "affs.h" extern struct timezone sys_tz; @@ -26,17 +25,15 @@ static int affs_statfs(struct dentry *dentry, struct kstatfs *buf); static int affs_remount (struct super_block *sb, int *flags, char *data); static void -affs_commit_super(struct super_block *sb, int wait) +affs_commit_super(struct super_block *sb, int wait, int clean) { struct affs_sb_info *sbi = AFFS_SB(sb); struct buffer_head *bh = sbi->s_root_bh; struct affs_root_tail *tail = AFFS_ROOT_TAIL(sb, bh); - lock_buffer(bh); + tail->bm_flag = cpu_to_be32(clean); secs_to_datestamp(get_seconds(), &tail->disk_change); affs_fix_checksum(sb, bh); - unlock_buffer(bh); - mark_buffer_dirty(bh); if (wait) sync_dirty_buffer(bh); @@ -48,7 +45,9 @@ affs_put_super(struct super_block *sb) struct affs_sb_info *sbi = AFFS_SB(sb); pr_debug("AFFS: put_super()\n"); - cancel_delayed_work_sync(&sbi->sb_work); + if (!(sb->s_flags & MS_RDONLY) && sb->s_dirt) + affs_commit_super(sb, 1, 1); + kfree(sbi->s_prefix); affs_free_bitmap(sb); affs_brelse(sbi->s_root_bh); @@ -56,43 +55,26 @@ affs_put_super(struct super_block *sb) sb->s_fs_info = NULL; } -static int -affs_sync_fs(struct super_block *sb, int wait) -{ - affs_commit_super(sb, wait); - return 0; -} - -static void flush_superblock(struct work_struct *work) +static void +affs_write_super(struct super_block *sb) { - struct affs_sb_info *sbi; - struct super_block *sb; - - sbi = container_of(work, struct affs_sb_info, sb_work.work); - sb = sbi->sb; + lock_super(sb); + if (!(sb->s_flags & MS_RDONLY)) + affs_commit_super(sb, 1, 2); + sb->s_dirt = 0; + unlock_super(sb); - spin_lock(&sbi->work_lock); - sbi->work_queued = 0; - spin_unlock(&sbi->work_lock); - - affs_commit_super(sb, 1); + pr_debug("AFFS: write_super() at %lu, clean=2\n", get_seconds()); } -void affs_mark_sb_dirty(struct super_block *sb) +static int +affs_sync_fs(struct super_block *sb, int wait) { - struct affs_sb_info *sbi = AFFS_SB(sb); - unsigned long delay; - - if (sb->s_flags & MS_RDONLY) - return; - - spin_lock(&sbi->work_lock); - if (!sbi->work_queued) { - delay = msecs_to_jiffies(dirty_writeback_interval * 10); - queue_delayed_work(system_long_wq, &sbi->sb_work, delay); - sbi->work_queued = 1; - } - spin_unlock(&sbi->work_lock); + lock_super(sb); + affs_commit_super(sb, wait, 2); + sb->s_dirt = 0; + unlock_super(sb); + return 0; } static struct kmem_cache * affs_inode_cachep; @@ -156,6 +138,7 @@ static const struct super_operations affs_sops = { .write_inode = affs_write_inode, .evict_inode = affs_evict_inode, .put_super = affs_put_super, + .write_super = affs_write_super, .sync_fs = affs_sync_fs, .statfs = affs_statfs, .remount_fs = affs_remount, @@ -322,11 +305,8 @@ static int affs_fill_super(struct super_block *sb, void *data, int silent) return -ENOMEM; sb->s_fs_info = sbi; - sbi->sb = sb; mutex_init(&sbi->s_bmlock); spin_lock_init(&sbi->symlink_lock); - spin_lock_init(&sbi->work_lock); - INIT_DELAYED_WORK(&sbi->sb_work, flush_superblock); if (!parse_options(data,&uid,&gid,&i,&reserved,&root_block, &blocksize,&sbi->s_prefix, @@ -551,7 +531,6 @@ affs_remount(struct super_block *sb, int *flags, char *data) return -EINVAL; } - flush_delayed_work_sync(&sbi->sb_work); replace_mount_options(sb, new_opts); sbi->s_flags = mount_flags; @@ -570,9 +549,10 @@ affs_remount(struct super_block *sb, int *flags, char *data) if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) return 0; - if (*flags & MS_RDONLY) + if (*flags & MS_RDONLY) { + affs_write_super(sb); affs_free_bitmap(sb); - else + } else res = affs_init_bitmap(sb, flags); return res; diff --git a/trunk/fs/afs/dir.c b/trunk/fs/afs/dir.c index db477906ba4f..e22dc4b4a503 100644 --- a/trunk/fs/afs/dir.c +++ b/trunk/fs/afs/dir.c @@ -20,16 +20,16 @@ #include "internal.h" static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry, - unsigned int flags); + struct nameidata *nd); static int afs_dir_open(struct inode *inode, struct file *file); static int afs_readdir(struct file *file, void *dirent, filldir_t filldir); -static int afs_d_revalidate(struct dentry *dentry, unsigned int flags); +static int afs_d_revalidate(struct dentry *dentry, struct nameidata *nd); static int afs_d_delete(const struct dentry *dentry); static void afs_d_release(struct dentry *dentry); static int afs_lookup_filldir(void *_cookie, const char *name, int nlen, loff_t fpos, u64 ino, unsigned dtype); static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode, - bool excl); + struct nameidata *nd); static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode); static int afs_rmdir(struct inode *dir, struct dentry *dentry); static int afs_unlink(struct inode *dir, struct dentry *dentry); @@ -516,7 +516,7 @@ static struct inode *afs_try_auto_mntpt( * look up an entry in a directory */ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry, - unsigned int flags) + struct nameidata *nd) { struct afs_vnode *vnode; struct afs_fid fid; @@ -598,7 +598,7 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry, * - NOTE! the hit can be a negative hit too, so we can't assume we have an * inode */ -static int afs_d_revalidate(struct dentry *dentry, unsigned int flags) +static int afs_d_revalidate(struct dentry *dentry, struct nameidata *nd) { struct afs_vnode *vnode, *dir; struct afs_fid uninitialized_var(fid); @@ -607,7 +607,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags) void *dir_version; int ret; - if (flags & LOOKUP_RCU) + if (nd->flags & LOOKUP_RCU) return -ECHILD; vnode = AFS_FS_I(dentry->d_inode); @@ -949,7 +949,7 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry) * create a regular file on an AFS filesystem */ static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode, - bool excl) + struct nameidata *nd) { struct afs_file_status status; struct afs_callback cb; diff --git a/trunk/fs/afs/mntpt.c b/trunk/fs/afs/mntpt.c index 9682c33d5daf..298cf8919ec7 100644 --- a/trunk/fs/afs/mntpt.c +++ b/trunk/fs/afs/mntpt.c @@ -22,7 +22,7 @@ static struct dentry *afs_mntpt_lookup(struct inode *dir, struct dentry *dentry, - unsigned int flags); + struct nameidata *nd); static int afs_mntpt_open(struct inode *inode, struct file *file); static void afs_mntpt_expiry_timed_out(struct work_struct *work); @@ -104,7 +104,7 @@ int afs_mntpt_check_symlink(struct afs_vnode *vnode, struct key *key) */ static struct dentry *afs_mntpt_lookup(struct inode *dir, struct dentry *dentry, - unsigned int flags) + struct nameidata *nd) { _enter("%p,%p{%p{%s},%s}", dir, diff --git a/trunk/fs/afs/super.c b/trunk/fs/afs/super.c index df8c6047c2a1..f02b31e7e648 100644 --- a/trunk/fs/afs/super.c +++ b/trunk/fs/afs/super.c @@ -395,7 +395,7 @@ static struct dentry *afs_mount(struct file_system_type *fs_type, as->volume = vol; /* allocate a deviceless superblock */ - sb = sget(fs_type, afs_test_super, afs_set_super, flags, as); + sb = sget(fs_type, afs_test_super, afs_set_super, as); if (IS_ERR(sb)) { ret = PTR_ERR(sb); afs_put_volume(vol); @@ -406,6 +406,7 @@ static struct dentry *afs_mount(struct file_system_type *fs_type, if (!sb->s_root) { /* initial superblock/root creation */ _debug("create"); + sb->s_flags = flags; ret = afs_fill_super(sb, ¶ms); if (ret < 0) { deactivate_locked_super(sb); diff --git a/trunk/fs/aio.c b/trunk/fs/aio.c index 71f613cf4a85..55c4c7656053 100644 --- a/trunk/fs/aio.c +++ b/trunk/fs/aio.c @@ -56,6 +56,13 @@ static struct kmem_cache *kioctx_cachep; static struct workqueue_struct *aio_wq; +/* Used for rare fput completion. */ +static void aio_fput_routine(struct work_struct *); +static DECLARE_WORK(fput_work, aio_fput_routine); + +static DEFINE_SPINLOCK(fput_lock); +static LIST_HEAD(fput_head); + static void aio_kick_handler(struct work_struct *); static void aio_queue_work(struct kioctx *); @@ -472,6 +479,7 @@ static int kiocb_batch_refill(struct kioctx *ctx, struct kiocb_batch *batch) { unsigned short allocated, to_alloc; long avail; + bool called_fput = false; struct kiocb *req, *n; struct aio_ring *ring; @@ -487,11 +495,28 @@ static int kiocb_batch_refill(struct kioctx *ctx, struct kiocb_batch *batch) if (allocated == 0) goto out; +retry: spin_lock_irq(&ctx->ctx_lock); ring = kmap_atomic(ctx->ring_info.ring_pages[0]); avail = aio_ring_avail(&ctx->ring_info, ring) - ctx->reqs_active; BUG_ON(avail < 0); + if (avail == 0 && !called_fput) { + /* + * Handle a potential starvation case. It is possible that + * we hold the last reference on a struct file, causing us + * to delay the final fput to non-irq context. In this case, + * ctx->reqs_active is artificially high. Calling the fput + * routine here may free up a slot in the event completion + * ring, allowing this allocation to succeed. + */ + kunmap_atomic(ring); + spin_unlock_irq(&ctx->ctx_lock); + aio_fput_routine(NULL); + called_fput = true; + goto retry; + } + if (avail < allocated) { /* Trim back the number of requests. */ list_for_each_entry_safe(req, n, &batch->head, ki_batch) { @@ -545,6 +570,36 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req) wake_up_all(&ctx->wait); } +static void aio_fput_routine(struct work_struct *data) +{ + spin_lock_irq(&fput_lock); + while (likely(!list_empty(&fput_head))) { + struct kiocb *req = list_kiocb(fput_head.next); + struct kioctx *ctx = req->ki_ctx; + + list_del(&req->ki_list); + spin_unlock_irq(&fput_lock); + + /* Complete the fput(s) */ + if (req->ki_filp != NULL) + fput(req->ki_filp); + + /* Link the iocb into the context's free list */ + rcu_read_lock(); + spin_lock_irq(&ctx->ctx_lock); + really_put_req(ctx, req); + /* + * at that point ctx might've been killed, but actual + * freeing is RCU'd + */ + spin_unlock_irq(&ctx->ctx_lock); + rcu_read_unlock(); + + spin_lock_irq(&fput_lock); + } + spin_unlock_irq(&fput_lock); +} + /* __aio_put_req * Returns true if this put was the last user of the request. */ @@ -563,9 +618,21 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req) req->ki_cancel = NULL; req->ki_retry = NULL; - fput(req->ki_filp); - req->ki_filp = NULL; - really_put_req(ctx, req); + /* + * Try to optimize the aio and eventfd file* puts, by avoiding to + * schedule work in case it is not final fput() time. In normal cases, + * we would not be holding the last reference to the file*, so + * this function will be executed w/out any aio kthread wakeup. + */ + if (unlikely(!fput_atomic(req->ki_filp))) { + spin_lock(&fput_lock); + list_add(&req->ki_list, &fput_head); + spin_unlock(&fput_lock); + schedule_work(&fput_work); + } else { + req->ki_filp = NULL; + really_put_req(ctx, req); + } return 1; } diff --git a/trunk/fs/attr.c b/trunk/fs/attr.c index 29e38a1f7f77..0da90951d277 100644 --- a/trunk/fs/attr.c +++ b/trunk/fs/attr.c @@ -171,8 +171,6 @@ int notify_change(struct dentry * dentry, struct iattr * attr) struct timespec now; unsigned int ia_valid = attr->ia_valid; - WARN_ON_ONCE(!mutex_is_locked(&inode->i_mutex)); - if (ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID | ATTR_TIMES_SET)) { if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) return -EPERM; @@ -252,4 +250,5 @@ int notify_change(struct dentry * dentry, struct iattr * attr) return error; } + EXPORT_SYMBOL(notify_change); diff --git a/trunk/fs/autofs4/dev-ioctl.c b/trunk/fs/autofs4/dev-ioctl.c index abf645c1703b..aa9103f8f01b 100644 --- a/trunk/fs/autofs4/dev-ioctl.c +++ b/trunk/fs/autofs4/dev-ioctl.c @@ -257,8 +257,8 @@ static int autofs_dev_ioctl_open_mountpoint(const char *name, dev_t devid) * corresponding to the autofs fs we want to open. */ - filp = dentry_open(&path, O_RDONLY, current_cred()); - path_put(&path); + filp = dentry_open(path.dentry, path.mnt, O_RDONLY, + current_cred()); if (IS_ERR(filp)) { err = PTR_ERR(filp); goto out; diff --git a/trunk/fs/autofs4/root.c b/trunk/fs/autofs4/root.c index e7396cfdb109..75e5f1c8e028 100644 --- a/trunk/fs/autofs4/root.c +++ b/trunk/fs/autofs4/root.c @@ -32,7 +32,7 @@ static long autofs4_root_ioctl(struct file *,unsigned int,unsigned long); static long autofs4_root_compat_ioctl(struct file *,unsigned int,unsigned long); #endif static int autofs4_dir_open(struct inode *inode, struct file *file); -static struct dentry *autofs4_lookup(struct inode *,struct dentry *, unsigned int); +static struct dentry *autofs4_lookup(struct inode *,struct dentry *, struct nameidata *); static struct vfsmount *autofs4_d_automount(struct path *); static int autofs4_d_manage(struct dentry *, bool); static void autofs4_dentry_release(struct dentry *); @@ -458,7 +458,7 @@ int autofs4_d_manage(struct dentry *dentry, bool rcu_walk) } /* Lookups in the root directory */ -static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) +static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { struct autofs_sb_info *sbi; struct autofs_info *ino; diff --git a/trunk/fs/bad_inode.c b/trunk/fs/bad_inode.c index b1342ffb3cf6..1b35d6bd06b0 100644 --- a/trunk/fs/bad_inode.c +++ b/trunk/fs/bad_inode.c @@ -173,13 +173,13 @@ static const struct file_operations bad_file_ops = }; static int bad_inode_create (struct inode *dir, struct dentry *dentry, - umode_t mode, bool excl) + umode_t mode, struct nameidata *nd) { return -EIO; } static struct dentry *bad_inode_lookup(struct inode *dir, - struct dentry *dentry, unsigned int flags) + struct dentry *dentry, struct nameidata *nd) { return ERR_PTR(-EIO); } diff --git a/trunk/fs/befs/linuxvfs.c b/trunk/fs/befs/linuxvfs.c index cf7f3c67c8b7..e18da23d42b5 100644 --- a/trunk/fs/befs/linuxvfs.c +++ b/trunk/fs/befs/linuxvfs.c @@ -34,7 +34,7 @@ static int befs_readdir(struct file *, void *, filldir_t); static int befs_get_block(struct inode *, sector_t, struct buffer_head *, int); static int befs_readpage(struct file *file, struct page *page); static sector_t befs_bmap(struct address_space *mapping, sector_t block); -static struct dentry *befs_lookup(struct inode *, struct dentry *, unsigned int); +static struct dentry *befs_lookup(struct inode *, struct dentry *, struct nameidata *); static struct inode *befs_iget(struct super_block *, unsigned long); static struct inode *befs_alloc_inode(struct super_block *sb); static void befs_destroy_inode(struct inode *inode); @@ -159,7 +159,7 @@ befs_get_block(struct inode *inode, sector_t block, } static struct dentry * -befs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) +befs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { struct inode *inode = NULL; struct super_block *sb = dir->i_sb; diff --git a/trunk/fs/bfs/dir.c b/trunk/fs/bfs/dir.c index 2785ef91191a..d12c7966db27 100644 --- a/trunk/fs/bfs/dir.c +++ b/trunk/fs/bfs/dir.c @@ -85,7 +85,7 @@ const struct file_operations bfs_dir_operations = { extern void dump_imap(const char *, struct super_block *); static int bfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, - bool excl) + struct nameidata *nd) { int err; struct inode *inode; @@ -133,7 +133,7 @@ static int bfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, } static struct dentry *bfs_lookup(struct inode *dir, struct dentry *dentry, - unsigned int flags) + struct nameidata *nd) { struct inode *inode = NULL; struct buffer_head *bh; diff --git a/trunk/fs/block_dev.c b/trunk/fs/block_dev.c index 1e519195d45b..c2bbe1fb1326 100644 --- a/trunk/fs/block_dev.c +++ b/trunk/fs/block_dev.c @@ -1710,39 +1710,3 @@ int __invalidate_device(struct block_device *bdev, bool kill_dirty) return res; } EXPORT_SYMBOL(__invalidate_device); - -void iterate_bdevs(void (*func)(struct block_device *, void *), void *arg) -{ - struct inode *inode, *old_inode = NULL; - - spin_lock(&inode_sb_list_lock); - list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) { - struct address_space *mapping = inode->i_mapping; - - spin_lock(&inode->i_lock); - if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW) || - mapping->nrpages == 0) { - spin_unlock(&inode->i_lock); - continue; - } - __iget(inode); - spin_unlock(&inode->i_lock); - spin_unlock(&inode_sb_list_lock); - /* - * We hold a reference to 'inode' so it couldn't have been - * removed from s_inodes list while we dropped the - * inode_sb_list_lock. We cannot iput the inode now as we can - * be holding the last reference and we cannot iput it under - * inode_sb_list_lock. So we keep the reference and iput it - * later. - */ - iput(old_inode); - old_inode = inode; - - func(I_BDEV(inode), arg); - - spin_lock(&inode_sb_list_lock); - } - spin_unlock(&inode_sb_list_lock); - iput(old_inode); -} diff --git a/trunk/fs/btrfs/inode.c b/trunk/fs/btrfs/inode.c index fb8d671d00e6..a7d1921ac76b 100644 --- a/trunk/fs/btrfs/inode.c +++ b/trunk/fs/btrfs/inode.c @@ -4247,7 +4247,7 @@ static void btrfs_dentry_release(struct dentry *dentry) } static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry, - unsigned int flags) + struct nameidata *nd) { struct dentry *ret; @@ -4893,7 +4893,7 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry, } static int btrfs_create(struct inode *dir, struct dentry *dentry, - umode_t mode, bool excl) + umode_t mode, struct nameidata *nd) { struct btrfs_trans_handle *trans; struct btrfs_root *root = BTRFS_I(dir)->root; @@ -6987,7 +6987,7 @@ void btrfs_destroy_inode(struct inode *inode) struct btrfs_ordered_extent *ordered; struct btrfs_root *root = BTRFS_I(inode)->root; - WARN_ON(!hlist_empty(&inode->i_dentry)); + WARN_ON(!list_empty(&inode->i_dentry)); WARN_ON(inode->i_data.nrpages); WARN_ON(BTRFS_I(inode)->outstanding_extents); WARN_ON(BTRFS_I(inode)->reserved_extents); diff --git a/trunk/fs/btrfs/ioctl.c b/trunk/fs/btrfs/ioctl.c index 1e9f6c019ad0..0e92e5763005 100644 --- a/trunk/fs/btrfs/ioctl.c +++ b/trunk/fs/btrfs/ioctl.c @@ -3268,7 +3268,7 @@ static long btrfs_ioctl_balance(struct file *file, void __user *arg) if (fs_info->sb->s_flags & MS_RDONLY) return -EROFS; - ret = mnt_want_write_file(file); + ret = mnt_want_write(file->f_path.mnt); if (ret) return ret; @@ -3338,7 +3338,7 @@ static long btrfs_ioctl_balance(struct file *file, void __user *arg) out: mutex_unlock(&fs_info->balance_mutex); mutex_unlock(&fs_info->volume_mutex); - mnt_drop_write_file(file); + mnt_drop_write(file->f_path.mnt); return ret; } diff --git a/trunk/fs/btrfs/super.c b/trunk/fs/btrfs/super.c index b19d75567728..e23991574fdf 100644 --- a/trunk/fs/btrfs/super.c +++ b/trunk/fs/btrfs/super.c @@ -1068,8 +1068,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags, } bdev = fs_devices->latest_bdev; - s = sget(fs_type, btrfs_test_super, btrfs_set_super, flags | MS_NOSEC, - fs_info); + s = sget(fs_type, btrfs_test_super, btrfs_set_super, fs_info); if (IS_ERR(s)) { error = PTR_ERR(s); goto error_close_devices; @@ -1083,6 +1082,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags, } else { char b[BDEVNAME_SIZE]; + s->s_flags = flags | MS_NOSEC; strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id)); btrfs_sb(s)->bdev_holder = fs_type; error = btrfs_fill_super(s, fs_devices, data, diff --git a/trunk/fs/cachefiles/namei.c b/trunk/fs/cachefiles/namei.c index b0b5f7cdfffa..7f0771d3894e 100644 --- a/trunk/fs/cachefiles/namei.c +++ b/trunk/fs/cachefiles/namei.c @@ -567,7 +567,7 @@ int cachefiles_walk_to_object(struct cachefiles_object *parent, if (ret < 0) goto create_error; start = jiffies; - ret = vfs_create(dir->d_inode, next, S_IFREG, true); + ret = vfs_create(dir->d_inode, next, S_IFREG, NULL); cachefiles_hist(cachefiles_create_histogram, start); if (ret < 0) goto create_error; diff --git a/trunk/fs/cachefiles/rdwr.c b/trunk/fs/cachefiles/rdwr.c index c0353dfac51f..0e3c0924cc3a 100644 --- a/trunk/fs/cachefiles/rdwr.c +++ b/trunk/fs/cachefiles/rdwr.c @@ -891,7 +891,6 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page) struct cachefiles_cache *cache; mm_segment_t old_fs; struct file *file; - struct path path; loff_t pos, eof; size_t len; void *data; @@ -917,9 +916,10 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page) /* write the page to the backing filesystem and let it store it in its * own time */ - path.mnt = cache->mnt; - path.dentry = object->backer; - file = dentry_open(&path, O_RDWR, cache->cache_cred); + dget(object->backer); + mntget(cache->mnt); + file = dentry_open(object->backer, cache->mnt, O_RDWR, + cache->cache_cred); if (IS_ERR(file)) { ret = PTR_ERR(file); } else { diff --git a/trunk/fs/ceph/dir.c b/trunk/fs/ceph/dir.c index 00894ff9246c..3e8094be4604 100644 --- a/trunk/fs/ceph/dir.c +++ b/trunk/fs/ceph/dir.c @@ -576,7 +576,7 @@ static int is_root_ceph_dentry(struct inode *inode, struct dentry *dentry) * the MDS so that it gets our 'caps wanted' value in a single op. */ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry, - unsigned int flags) + struct nameidata *nd) { struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); struct ceph_mds_client *mdsc = fsc->mdsc; @@ -594,6 +594,14 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry, if (err < 0) return ERR_PTR(err); + /* open (but not create!) intent? */ + if (nd && + (nd->flags & LOOKUP_OPEN) && + !(nd->intent.open.flags & O_CREAT)) { + int mode = nd->intent.open.create_mode & ~current->fs->umask; + return ceph_lookup_open(dir, dentry, nd, mode, 1); + } + /* can we conclude ENOENT locally? */ if (dentry->d_inode == NULL) { struct ceph_inode_info *ci = ceph_inode(dir); @@ -634,51 +642,13 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry, return dentry; } -int ceph_atomic_open(struct inode *dir, struct dentry *dentry, - struct file *file, unsigned flags, umode_t mode, - int *opened) -{ - int err; - struct dentry *res = NULL; - - if (!(flags & O_CREAT)) { - if (dentry->d_name.len > NAME_MAX) - return -ENAMETOOLONG; - - err = ceph_init_dentry(dentry); - if (err < 0) - return err; - - return ceph_lookup_open(dir, dentry, file, flags, mode, opened); - } - - if (d_unhashed(dentry)) { - res = ceph_lookup(dir, dentry, 0); - if (IS_ERR(res)) - return PTR_ERR(res); - - if (res) - dentry = res; - } - - /* We don't deal with positive dentries here */ - if (dentry->d_inode) - return finish_no_open(file, res); - - *opened |= FILE_CREATED; - err = ceph_lookup_open(dir, dentry, file, flags, mode, opened); - dput(res); - - return err; -} - /* * If we do a create but get no trace back from the MDS, follow up with * a lookup (the VFS expects us to link up the provided dentry). */ int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry) { - struct dentry *result = ceph_lookup(dir, dentry, 0); + struct dentry *result = ceph_lookup(dir, dentry, NULL); if (result && !IS_ERR(result)) { /* @@ -730,9 +700,25 @@ static int ceph_mknod(struct inode *dir, struct dentry *dentry, } static int ceph_create(struct inode *dir, struct dentry *dentry, umode_t mode, - bool excl) + struct nameidata *nd) { - return ceph_mknod(dir, dentry, mode, 0); + dout("create in dir %p dentry %p name '%.*s'\n", + dir, dentry, dentry->d_name.len, dentry->d_name.name); + + if (ceph_snap(dir) != CEPH_NOSNAP) + return -EROFS; + + if (nd) { + BUG_ON((nd->flags & LOOKUP_OPEN) == 0); + dentry = ceph_lookup_open(dir, dentry, nd, mode, 0); + /* hrm, what should i do here if we get aliased? */ + if (IS_ERR(dentry)) + return PTR_ERR(dentry); + return 0; + } + + /* fall back to mknod */ + return ceph_mknod(dir, dentry, (mode & ~S_IFMT) | S_IFREG, 0); } static int ceph_symlink(struct inode *dir, struct dentry *dentry, @@ -1042,12 +1028,12 @@ static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry) /* * Check if cached dentry can be trusted. */ -static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags) +static int ceph_d_revalidate(struct dentry *dentry, struct nameidata *nd) { int valid = 0; struct inode *dir; - if (flags & LOOKUP_RCU) + if (nd && nd->flags & LOOKUP_RCU) return -ECHILD; dout("d_revalidate %p '%.*s' inode %p offset %lld\n", dentry, @@ -1094,7 +1080,7 @@ static void ceph_d_release(struct dentry *dentry) } static int ceph_snapdir_d_revalidate(struct dentry *dentry, - unsigned int flags) + struct nameidata *nd) { /* * Eventually, we'll want to revalidate snapped metadata @@ -1371,7 +1357,6 @@ const struct inode_operations ceph_dir_iops = { .rmdir = ceph_unlink, .rename = ceph_rename, .create = ceph_create, - .atomic_open = ceph_atomic_open, }; const struct dentry_operations ceph_dentry_ops = { diff --git a/trunk/fs/ceph/file.c b/trunk/fs/ceph/file.c index 1b81d6c31878..988d4f302e48 100644 --- a/trunk/fs/ceph/file.c +++ b/trunk/fs/ceph/file.c @@ -213,15 +213,22 @@ int ceph_open(struct inode *inode, struct file *file) * may_open() fails, the struct *file gets cleaned up (i.e. * ceph_release gets called). So fear not! */ -int ceph_lookup_open(struct inode *dir, struct dentry *dentry, - struct file *file, unsigned flags, umode_t mode, - int *opened) +/* + * flags + * path_lookup_open -> LOOKUP_OPEN + * path_lookup_create -> LOOKUP_OPEN|LOOKUP_CREATE + */ +struct dentry *ceph_lookup_open(struct inode *dir, struct dentry *dentry, + struct nameidata *nd, int mode, + int locked_dir) { struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); struct ceph_mds_client *mdsc = fsc->mdsc; + struct file *file; struct ceph_mds_request *req; struct dentry *ret; int err; + int flags = nd->intent.open.flags; dout("ceph_lookup_open dentry %p '%.*s' flags %d mode 0%o\n", dentry, dentry->d_name.len, dentry->d_name.name, flags, mode); @@ -229,7 +236,7 @@ int ceph_lookup_open(struct inode *dir, struct dentry *dentry, /* do the open */ req = prepare_open_request(dir->i_sb, flags, mode); if (IS_ERR(req)) - return PTR_ERR(req); + return ERR_CAST(req); req->r_dentry = dget(dentry); req->r_num_caps = 2; if (flags & O_CREAT) { @@ -247,17 +254,14 @@ int ceph_lookup_open(struct inode *dir, struct dentry *dentry, err = ceph_handle_notrace_create(dir, dentry); if (err) goto out; - err = finish_open(file, req->r_dentry, ceph_open, opened); + file = lookup_instantiate_filp(nd, req->r_dentry, ceph_open); + if (IS_ERR(file)) + err = PTR_ERR(file); out: ret = ceph_finish_lookup(req, dentry, err); ceph_mdsc_put_request(req); dout("ceph_lookup_open result=%p\n", ret); - - if (IS_ERR(ret)) - return PTR_ERR(ret); - - dput(ret); - return err; + return ret; } int ceph_release(struct inode *inode, struct file *file) diff --git a/trunk/fs/ceph/super.c b/trunk/fs/ceph/super.c index 7076109f014d..1e67dd7305a4 100644 --- a/trunk/fs/ceph/super.c +++ b/trunk/fs/ceph/super.c @@ -871,7 +871,7 @@ static struct dentry *ceph_mount(struct file_system_type *fs_type, if (ceph_test_opt(fsc->client, NOSHARE)) compare_super = NULL; - sb = sget(fs_type, compare_super, ceph_set_super, flags, fsc); + sb = sget(fs_type, compare_super, ceph_set_super, fsc); if (IS_ERR(sb)) { res = ERR_CAST(sb); goto out; diff --git a/trunk/fs/ceph/super.h b/trunk/fs/ceph/super.h index f4d5522cb619..fc35036d258d 100644 --- a/trunk/fs/ceph/super.h +++ b/trunk/fs/ceph/super.h @@ -806,9 +806,9 @@ extern int ceph_copy_from_page_vector(struct page **pages, loff_t off, size_t len); extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags); extern int ceph_open(struct inode *inode, struct file *file); -extern int ceph_lookup_open(struct inode *dir, struct dentry *dentry, - struct file *od, unsigned flags, - umode_t mode, int *opened); +extern struct dentry *ceph_lookup_open(struct inode *dir, struct dentry *dentry, + struct nameidata *nd, int mode, + int locked_dir); extern int ceph_release(struct inode *inode, struct file *filp); /* dir.c */ diff --git a/trunk/fs/cifs/cifsfs.c b/trunk/fs/cifs/cifsfs.c index a7610cfedf0a..8b6e344eb0ba 100644 --- a/trunk/fs/cifs/cifsfs.c +++ b/trunk/fs/cifs/cifsfs.c @@ -257,6 +257,7 @@ cifs_alloc_inode(struct super_block *sb) static void cifs_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); + INIT_LIST_HEAD(&inode->i_dentry); kmem_cache_free(cifs_inode_cachep, CIFS_I(inode)); } @@ -637,10 +638,7 @@ cifs_do_mount(struct file_system_type *fs_type, mnt_data.cifs_sb = cifs_sb; mnt_data.flags = flags; - /* BB should we make this contingent on mount parm? */ - flags |= MS_NODIRATIME | MS_NOATIME; - - sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data); + sb = sget(fs_type, cifs_match_super, cifs_set_super, &mnt_data); if (IS_ERR(sb)) { root = ERR_CAST(sb); cifs_umount(cifs_sb); @@ -651,6 +649,10 @@ cifs_do_mount(struct file_system_type *fs_type, cFYI(1, "Use existing superblock"); cifs_umount(cifs_sb); } else { + sb->s_flags = flags; + /* BB should we make this contingent on mount parm? */ + sb->s_flags |= MS_NODIRATIME | MS_NOATIME; + rc = cifs_read_super(sb); if (rc) { root = ERR_PTR(rc); @@ -776,7 +778,6 @@ struct file_system_type cifs_fs_type = { }; const struct inode_operations cifs_dir_inode_ops = { .create = cifs_create, - .atomic_open = cifs_atomic_open, .lookup = cifs_lookup, .getattr = cifs_getattr, .unlink = cifs_unlink, diff --git a/trunk/fs/cifs/cifsfs.h b/trunk/fs/cifs/cifsfs.h index 1c49c5a9b27a..65365358c976 100644 --- a/trunk/fs/cifs/cifsfs.h +++ b/trunk/fs/cifs/cifsfs.h @@ -45,12 +45,9 @@ extern const struct address_space_operations cifs_addr_ops_smallbuf; extern const struct inode_operations cifs_dir_inode_ops; extern struct inode *cifs_root_iget(struct super_block *); extern int cifs_create(struct inode *, struct dentry *, umode_t, - bool excl); -extern int cifs_atomic_open(struct inode *, struct dentry *, - struct file *, unsigned, umode_t, - int *); + struct nameidata *); extern struct dentry *cifs_lookup(struct inode *, struct dentry *, - unsigned int); + struct nameidata *); extern int cifs_unlink(struct inode *dir, struct dentry *dentry); extern int cifs_hardlink(struct dentry *, struct inode *, struct dentry *); extern int cifs_mknod(struct inode *, struct dentry *, umode_t, dev_t); diff --git a/trunk/fs/cifs/cifssmb.c b/trunk/fs/cifs/cifssmb.c index 4ee522b3f66f..5b400730c213 100644 --- a/trunk/fs/cifs/cifssmb.c +++ b/trunk/fs/cifs/cifssmb.c @@ -86,31 +86,7 @@ static struct { #endif /* CONFIG_CIFS_WEAK_PW_HASH */ #endif /* CIFS_POSIX */ -#ifdef CONFIG_HIGHMEM -/* - * On arches that have high memory, kmap address space is limited. By - * serializing the kmap operations on those arches, we ensure that we don't - * end up with a bunch of threads in writeback with partially mapped page - * arrays, stuck waiting for kmap to come back. That situation prevents - * progress and can deadlock. - */ -static DEFINE_MUTEX(cifs_kmap_mutex); - -static inline void -cifs_kmap_lock(void) -{ - mutex_lock(&cifs_kmap_mutex); -} - -static inline void -cifs_kmap_unlock(void) -{ - mutex_unlock(&cifs_kmap_mutex); -} -#else /* !CONFIG_HIGHMEM */ -#define cifs_kmap_lock() do { ; } while(0) -#define cifs_kmap_unlock() do { ; } while(0) -#endif /* CONFIG_HIGHMEM */ +/* Forward declarations */ /* Mark as invalid, all open files on tree connections since they were closed when session to server was lost */ @@ -1527,9 +1503,7 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) } /* marshal up the page array */ - cifs_kmap_lock(); len = rdata->marshal_iov(rdata, data_len); - cifs_kmap_unlock(); data_len -= len; /* issue the read if we have any iovecs left to fill */ @@ -2095,9 +2069,7 @@ cifs_async_writev(struct cifs_writedata *wdata) * and set the iov_len properly for each one. It may also set * wdata->bytes too. */ - cifs_kmap_lock(); wdata->marshal_iov(iov, wdata); - cifs_kmap_unlock(); cFYI(1, "async write at %llu %u bytes", wdata->offset, wdata->bytes); diff --git a/trunk/fs/cifs/connect.c b/trunk/fs/cifs/connect.c index 94b7788c3189..0ae86ddf2213 100644 --- a/trunk/fs/cifs/connect.c +++ b/trunk/fs/cifs/connect.c @@ -3445,18 +3445,6 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info, #define CIFS_DEFAULT_NON_POSIX_RSIZE (60 * 1024) #define CIFS_DEFAULT_NON_POSIX_WSIZE (65536) -/* - * On hosts with high memory, we can't currently support wsize/rsize that are - * larger than we can kmap at once. Cap the rsize/wsize at - * LAST_PKMAP * PAGE_SIZE. We'll never be able to fill a read or write request - * larger than that anyway. - */ -#ifdef CONFIG_HIGHMEM -#define CIFS_KMAP_SIZE_LIMIT (LAST_PKMAP * PAGE_CACHE_SIZE) -#else /* CONFIG_HIGHMEM */ -#define CIFS_KMAP_SIZE_LIMIT (1<<24) -#endif /* CONFIG_HIGHMEM */ - static unsigned int cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info) { @@ -3487,9 +3475,6 @@ cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info) wsize = min_t(unsigned int, wsize, server->maxBuf - sizeof(WRITE_REQ) + 4); - /* limit to the amount that we can kmap at once */ - wsize = min_t(unsigned int, wsize, CIFS_KMAP_SIZE_LIMIT); - /* hard limit of CIFS_MAX_WSIZE */ wsize = min_t(unsigned int, wsize, CIFS_MAX_WSIZE); @@ -3531,9 +3516,6 @@ cifs_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info) if (!(server->capabilities & CAP_LARGE_READ_X)) rsize = min_t(unsigned int, CIFSMaxBufSize, rsize); - /* limit to the amount that we can kmap at once */ - rsize = min_t(unsigned int, rsize, CIFS_KMAP_SIZE_LIMIT); - /* hard limit of CIFS_MAX_RSIZE */ rsize = min_t(unsigned int, rsize, CIFS_MAX_RSIZE); diff --git a/trunk/fs/cifs/dir.c b/trunk/fs/cifs/dir.c index a180265a10b5..ec4e9a2a12f8 100644 --- a/trunk/fs/cifs/dir.c +++ b/trunk/fs/cifs/dir.c @@ -133,133 +133,100 @@ build_path_from_dentry(struct dentry *direntry) return full_path; } -/* - * Don't allow the separator character in a path component. - * The VFS will not allow "/", but "\" is allowed by posix. - */ -static int -check_name(struct dentry *direntry) -{ - struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb); - int i; - - if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)) { - for (i = 0; i < direntry->d_name.len; i++) { - if (direntry->d_name.name[i] == '\\') { - cFYI(1, "Invalid file name"); - return -EINVAL; - } - } - } - return 0; -} - - /* Inode operations in similar order to how they appear in Linux file fs.h */ -static int cifs_do_create(struct inode *inode, struct dentry *direntry, - int xid, struct tcon_link *tlink, unsigned oflags, - umode_t mode, __u32 *oplock, __u16 *fileHandle, - int *created) +int +cifs_create(struct inode *inode, struct dentry *direntry, umode_t mode, + struct nameidata *nd) { int rc = -ENOENT; + int xid; int create_options = CREATE_NOT_DIR; - int desiredAccess; - struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); - struct cifs_tcon *tcon = tlink_tcon(tlink); + __u32 oplock = 0; + int oflags; + /* + * BB below access is probably too much for mknod to request + * but we have to do query and setpathinfo so requesting + * less could fail (unless we want to request getatr and setatr + * permissions (only). At least for POSIX we do not have to + * request so much. + */ + int desiredAccess = GENERIC_READ | GENERIC_WRITE; + __u16 fileHandle; + struct cifs_sb_info *cifs_sb; + struct tcon_link *tlink; + struct cifs_tcon *tcon; char *full_path = NULL; FILE_ALL_INFO *buf = NULL; struct inode *newinode = NULL; - int disposition; + int disposition = FILE_OVERWRITE_IF; + + xid = GetXid(); + + cifs_sb = CIFS_SB(inode->i_sb); + tlink = cifs_sb_tlink(cifs_sb); + if (IS_ERR(tlink)) { + FreeXid(xid); + return PTR_ERR(tlink); + } + tcon = tlink_tcon(tlink); - *oplock = 0; if (tcon->ses->server->oplocks) - *oplock = REQ_OPLOCK; + oplock = REQ_OPLOCK; + + if (nd) + oflags = nd->intent.open.file->f_flags; + else + oflags = O_RDONLY | O_CREAT; full_path = build_path_from_dentry(direntry); if (full_path == NULL) { rc = -ENOMEM; - goto out; + goto cifs_create_out; } if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) && - !tcon->broken_posix_open && (CIFS_UNIX_POSIX_PATH_OPS_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))) { rc = cifs_posix_open(full_path, &newinode, - inode->i_sb, mode, oflags, oplock, fileHandle, xid); - switch (rc) { - case 0: - if (newinode == NULL) { - /* query inode info */ + inode->i_sb, mode, oflags, &oplock, &fileHandle, xid); + /* EIO could indicate that (posix open) operation is not + supported, despite what server claimed in capability + negotiation. EREMOTE indicates DFS junction, which is not + handled in posix open */ + + if (rc == 0) { + if (newinode == NULL) /* query inode info */ goto cifs_create_get_file_info; - } - - if (!S_ISREG(newinode->i_mode)) { - /* - * The server may allow us to open things like - * FIFOs, but the client isn't set up to deal - * with that. If it's not a regular file, just - * close it and proceed as if it were a normal - * lookup. - */ - CIFSSMBClose(xid, tcon, *fileHandle); - goto cifs_create_get_file_info; - } - /* success, no need to query */ - goto cifs_create_set_dentry; - - case -ENOENT: - goto cifs_create_get_file_info; - - case -EIO: - case -EINVAL: - /* - * EIO could indicate that (posix open) operation is not - * supported, despite what server claimed in capability - * negotiation. - * - * POSIX open in samba versions 3.3.1 and earlier could - * incorrectly fail with invalid parameter. - */ - tcon->broken_posix_open = true; - break; - - case -EREMOTE: - case -EOPNOTSUPP: - /* - * EREMOTE indicates DFS junction, which is not handled - * in posix open. If either that or op not supported - * returned, follow the normal lookup. - */ - break; - - default: - goto out; - } - /* - * fallthrough to retry, using older open call, this is case - * where server does not support this SMB level, and falsely - * claims capability (also get here for DFS case which should be - * rare for path not covered on files) - */ + else /* success, no need to query */ + goto cifs_create_set_dentry; + } else if ((rc != -EIO) && (rc != -EREMOTE) && + (rc != -EOPNOTSUPP) && (rc != -EINVAL)) + goto cifs_create_out; + /* else fallthrough to retry, using older open call, this is + case where server does not support this SMB level, and + falsely claims capability (also get here for DFS case + which should be rare for path not covered on files) */ } - desiredAccess = 0; - if (OPEN_FMODE(oflags) & FMODE_READ) - desiredAccess |= GENERIC_READ; /* is this too little? */ - if (OPEN_FMODE(oflags) & FMODE_WRITE) - desiredAccess |= GENERIC_WRITE; - - disposition = FILE_OVERWRITE_IF; - if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL)) - disposition = FILE_CREATE; - else if ((oflags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC)) - disposition = FILE_OVERWRITE_IF; - else if ((oflags & O_CREAT) == O_CREAT) - disposition = FILE_OPEN_IF; - else - cFYI(1, "Create flag not set in create function"); + if (nd) { + /* if the file is going to stay open, then we + need to set the desired access properly */ + desiredAccess = 0; + if (OPEN_FMODE(oflags) & FMODE_READ) + desiredAccess |= GENERIC_READ; /* is this too little? */ + if (OPEN_FMODE(oflags) & FMODE_WRITE) + desiredAccess |= GENERIC_WRITE; + + if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL)) + disposition = FILE_CREATE; + else if ((oflags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC)) + disposition = FILE_OVERWRITE_IF; + else if ((oflags & O_CREAT) == O_CREAT) + disposition = FILE_OPEN_IF; + else + cFYI(1, "Create flag not set in create function"); + } /* BB add processing to set equivalent of mode - e.g. via CreateX with ACLs */ @@ -267,7 +234,7 @@ static int cifs_do_create(struct inode *inode, struct dentry *direntry, buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL); if (buf == NULL) { rc = -ENOMEM; - goto out; + goto cifs_create_out; } /* @@ -283,7 +250,7 @@ static int cifs_do_create(struct inode *inode, struct dentry *direntry, if (tcon->ses->capabilities & CAP_NT_SMBS) rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess, create_options, - fileHandle, oplock, buf, cifs_sb->local_nls, + &fileHandle, &oplock, buf, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); else rc = -EIO; /* no NT SMB support fall into legacy open below */ @@ -292,17 +259,17 @@ static int cifs_do_create(struct inode *inode, struct dentry *direntry, /* old server, retry the open legacy style */ rc = SMBLegacyOpen(xid, tcon, full_path, disposition, desiredAccess, create_options, - fileHandle, oplock, buf, cifs_sb->local_nls, + &fileHandle, &oplock, buf, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); } if (rc) { cFYI(1, "cifs_create returned 0x%x", rc); - goto out; + goto cifs_create_out; } /* If Open reported that we actually created a file then we now have to set the mode if possible */ - if ((tcon->unix_ext) && (*oplock & CIFS_CREATE_ACTION)) { + if ((tcon->unix_ext) && (oplock & CIFS_CREATE_ACTION)) { struct cifs_unix_set_info_args args = { .mode = mode, .ctime = NO_CHANGE_64, @@ -311,7 +278,6 @@ static int cifs_do_create(struct inode *inode, struct dentry *direntry, .device = 0, }; - *created |= FILE_CREATED; if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) { args.uid = (__u64) current_fsuid(); if (inode->i_mode & S_ISGID) @@ -322,7 +288,7 @@ static int cifs_do_create(struct inode *inode, struct dentry *direntry, args.uid = NO_CHANGE_64; args.gid = NO_CHANGE_64; } - CIFSSMBUnixSetFileInfo(xid, tcon, &args, *fileHandle, + CIFSSMBUnixSetFileInfo(xid, tcon, &args, fileHandle, current->tgid); } else { /* BB implement mode setting via Windows security @@ -339,11 +305,11 @@ static int cifs_do_create(struct inode *inode, struct dentry *direntry, inode->i_sb, xid); else { rc = cifs_get_inode_info(&newinode, full_path, buf, - inode->i_sb, xid, fileHandle); + inode->i_sb, xid, &fileHandle); if (newinode) { if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM) newinode->i_mode = mode; - if ((*oplock & CIFS_CREATE_ACTION) && + if ((oplock & CIFS_CREATE_ACTION) && (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)) { newinode->i_uid = current_fsuid(); if (inode->i_mode & S_ISGID) @@ -355,136 +321,37 @@ static int cifs_do_create(struct inode *inode, struct dentry *direntry, } cifs_create_set_dentry: - if (rc != 0) { + if (rc == 0) + d_instantiate(direntry, newinode); + else cFYI(1, "Create worked, get_inode_info failed rc = %d", rc); - goto out; - } - d_drop(direntry); - d_add(direntry, newinode); - /* ENOENT for create? How weird... */ - rc = -ENOENT; - if (!newinode) { - CIFSSMBClose(xid, tcon, *fileHandle); - goto out; - } - rc = 0; + if (newinode && nd) { + struct cifsFileInfo *pfile_info; + struct file *filp; -out: - kfree(buf); - kfree(full_path); - return rc; -} - -int -cifs_atomic_open(struct inode *inode, struct dentry *direntry, - struct file *file, unsigned oflags, umode_t mode, - int *opened) -{ - int rc; - int xid; - struct tcon_link *tlink; - struct cifs_tcon *tcon; - __u16 fileHandle; - __u32 oplock; - struct file *filp; - struct cifsFileInfo *pfile_info; - - /* Posix open is only called (at lookup time) for file create now. For - * opens (rather than creates), because we do not know if it is a file - * or directory yet, and current Samba no longer allows us to do posix - * open on dirs, we could end up wasting an open call on what turns out - * to be a dir. For file opens, we wait to call posix open till - * cifs_open. It could be added to atomic_open in the future but the - * performance tradeoff of the extra network request when EISDIR or - * EACCES is returned would have to be weighed against the 50% reduction - * in network traffic in the other paths. - */ - if (!(oflags & O_CREAT)) { - struct dentry *res = cifs_lookup(inode, direntry, 0); - if (IS_ERR(res)) - return PTR_ERR(res); - - return finish_no_open(file, res); - } - - rc = check_name(direntry); - if (rc) - return rc; - - xid = GetXid(); - - cFYI(1, "parent inode = 0x%p name is: %s and dentry = 0x%p", - inode, direntry->d_name.name, direntry); - - tlink = cifs_sb_tlink(CIFS_SB(inode->i_sb)); - filp = ERR_CAST(tlink); - if (IS_ERR(tlink)) - goto free_xid; - - tcon = tlink_tcon(tlink); - - rc = cifs_do_create(inode, direntry, xid, tlink, oflags, mode, - &oplock, &fileHandle, opened); - - if (rc) - goto out; - - rc = finish_open(file, direntry, generic_file_open, opened); - if (rc) { - CIFSSMBClose(xid, tcon, fileHandle); - goto out; - } + filp = lookup_instantiate_filp(nd, direntry, generic_file_open); + if (IS_ERR(filp)) { + rc = PTR_ERR(filp); + CIFSSMBClose(xid, tcon, fileHandle); + goto cifs_create_out; + } - pfile_info = cifs_new_fileinfo(fileHandle, filp, tlink, oplock); - if (pfile_info == NULL) { + pfile_info = cifs_new_fileinfo(fileHandle, filp, tlink, oplock); + if (pfile_info == NULL) { + fput(filp); + CIFSSMBClose(xid, tcon, fileHandle); + rc = -ENOMEM; + } + } else { CIFSSMBClose(xid, tcon, fileHandle); - fput(filp); - rc = -ENOMEM; } -out: - cifs_put_tlink(tlink); -free_xid: - FreeXid(xid); - return rc; -} - -int cifs_create(struct inode *inode, struct dentry *direntry, umode_t mode, - bool excl) -{ - int rc; - int xid = GetXid(); - /* - * BB below access is probably too much for mknod to request - * but we have to do query and setpathinfo so requesting - * less could fail (unless we want to request getatr and setatr - * permissions (only). At least for POSIX we do not have to - * request so much. - */ - unsigned oflags = O_EXCL | O_CREAT | O_RDWR; - struct tcon_link *tlink; - __u16 fileHandle; - __u32 oplock; - int created = FILE_CREATED; - - cFYI(1, "cifs_create parent inode = 0x%p name is: %s and dentry = 0x%p", - inode, direntry->d_name.name, direntry); - - tlink = cifs_sb_tlink(CIFS_SB(inode->i_sb)); - rc = PTR_ERR(tlink); - if (IS_ERR(tlink)) - goto free_xid; - - rc = cifs_do_create(inode, direntry, xid, tlink, oflags, mode, - &oplock, &fileHandle, &created); - if (!rc) - CIFSSMBClose(xid, tlink_tcon(tlink), fileHandle); - +cifs_create_out: + kfree(buf); + kfree(full_path); cifs_put_tlink(tlink); -free_xid: FreeXid(xid); - return rc; } @@ -621,15 +488,20 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode, struct dentry * cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry, - unsigned int flags) + struct nameidata *nd) { int xid; int rc = 0; /* to get around spurious gcc warning, set to zero here */ + __u32 oplock; + __u16 fileHandle = 0; + bool posix_open = false; struct cifs_sb_info *cifs_sb; struct tcon_link *tlink; struct cifs_tcon *pTcon; + struct cifsFileInfo *cfile; struct inode *newInode = NULL; char *full_path = NULL; + struct file *filp; xid = GetXid(); @@ -646,9 +518,31 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry, } pTcon = tlink_tcon(tlink); - rc = check_name(direntry); - if (rc) + oplock = pTcon->ses->server->oplocks ? REQ_OPLOCK : 0; + + /* + * Don't allow the separator character in a path component. + * The VFS will not allow "/", but "\" is allowed by posix. + */ + if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)) { + int i; + for (i = 0; i < direntry->d_name.len; i++) + if (direntry->d_name.name[i] == '\\') { + cFYI(1, "Invalid file name"); + rc = -EINVAL; + goto lookup_out; + } + } + + /* + * O_EXCL: optimize away the lookup, but don't hash the dentry. Let + * the VFS handle the create. + */ + if (nd && (nd->flags & LOOKUP_EXCL)) { + d_instantiate(direntry, NULL); + rc = 0; goto lookup_out; + } /* can not grab the rename sem here since it would deadlock in the cases (beginning of sys_rename itself) @@ -666,16 +560,80 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry, } cFYI(1, "Full path: %s inode = 0x%p", full_path, direntry->d_inode); + /* Posix open is only called (at lookup time) for file create now. + * For opens (rather than creates), because we do not know if it + * is a file or directory yet, and current Samba no longer allows + * us to do posix open on dirs, we could end up wasting an open call + * on what turns out to be a dir. For file opens, we wait to call posix + * open till cifs_open. It could be added here (lookup) in the future + * but the performance tradeoff of the extra network request when EISDIR + * or EACCES is returned would have to be weighed against the 50% + * reduction in network traffic in the other paths. + */ if (pTcon->unix_ext) { - rc = cifs_get_inode_info_unix(&newInode, full_path, - parent_dir_inode->i_sb, xid); - } else { + if (nd && !(nd->flags & LOOKUP_DIRECTORY) && + (nd->flags & LOOKUP_OPEN) && !pTcon->broken_posix_open && + (nd->intent.open.file->f_flags & O_CREAT)) { + rc = cifs_posix_open(full_path, &newInode, + parent_dir_inode->i_sb, + nd->intent.open.create_mode, + nd->intent.open.file->f_flags, &oplock, + &fileHandle, xid); + /* + * The check below works around a bug in POSIX + * open in samba versions 3.3.1 and earlier where + * open could incorrectly fail with invalid parameter. + * If either that or op not supported returned, follow + * the normal lookup. + */ + switch (rc) { + case 0: + /* + * The server may allow us to open things like + * FIFOs, but the client isn't set up to deal + * with that. If it's not a regular file, just + * close it and proceed as if it were a normal + * lookup. + */ + if (newInode && !S_ISREG(newInode->i_mode)) { + CIFSSMBClose(xid, pTcon, fileHandle); + break; + } + case -ENOENT: + posix_open = true; + case -EOPNOTSUPP: + break; + default: + pTcon->broken_posix_open = true; + } + } + if (!posix_open) + rc = cifs_get_inode_info_unix(&newInode, full_path, + parent_dir_inode->i_sb, xid); + } else rc = cifs_get_inode_info(&newInode, full_path, NULL, parent_dir_inode->i_sb, xid, NULL); - } if ((rc == 0) && (newInode != NULL)) { d_add(direntry, newInode); + if (posix_open) { + filp = lookup_instantiate_filp(nd, direntry, + generic_file_open); + if (IS_ERR(filp)) { + rc = PTR_ERR(filp); + CIFSSMBClose(xid, pTcon, fileHandle); + goto lookup_out; + } + + cfile = cifs_new_fileinfo(fileHandle, filp, tlink, + oplock); + if (cfile == NULL) { + fput(filp); + CIFSSMBClose(xid, pTcon, fileHandle); + rc = -ENOMEM; + goto lookup_out; + } + } /* since paths are not looked up by component - the parent directories are presumed to be good here */ renew_parental_timestamps(direntry); @@ -700,9 +658,9 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry, } static int -cifs_d_revalidate(struct dentry *direntry, unsigned int flags) +cifs_d_revalidate(struct dentry *direntry, struct nameidata *nd) { - if (flags & LOOKUP_RCU) + if (nd && (nd->flags & LOOKUP_RCU)) return -ECHILD; if (direntry->d_inode) { @@ -731,7 +689,7 @@ cifs_d_revalidate(struct dentry *direntry, unsigned int flags) * This may be nfsd (or something), anyway, we can't see the * intent of this. So, since this can be for creation, drop it. */ - if (!flags) + if (!nd) return 0; /* @@ -739,7 +697,7 @@ cifs_d_revalidate(struct dentry *direntry, unsigned int flags) * case sensitive name which is specified by user if this is * for creation. */ - if (flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET)) + if (nd->flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET)) return 0; if (time_after(jiffies, direntry->d_time + HZ) || !lookupCacheEnabled) diff --git a/trunk/fs/cifs/inode.c b/trunk/fs/cifs/inode.c index 8e8bb49112ff..745da3d0653e 100644 --- a/trunk/fs/cifs/inode.c +++ b/trunk/fs/cifs/inode.c @@ -800,7 +800,7 @@ cifs_find_inode(struct inode *inode, void *opaque) return 0; /* if it's not a directory or has no dentries, then flag it */ - if (S_ISDIR(inode->i_mode) && !hlist_empty(&inode->i_dentry)) + if (S_ISDIR(inode->i_mode) && !list_empty(&inode->i_dentry)) fattr->cf_flags |= CIFS_FATTR_INO_COLLISION; return 1; @@ -825,10 +825,9 @@ static bool inode_has_hashed_dentries(struct inode *inode) { struct dentry *dentry; - struct hlist_node *p; spin_lock(&inode->i_lock); - hlist_for_each_entry(dentry, p, &inode->i_dentry, d_alias) { + list_for_each_entry(dentry, &inode->i_dentry, d_alias) { if (!d_unhashed(dentry) || IS_ROOT(dentry)) { spin_unlock(&inode->i_lock); return true; diff --git a/trunk/fs/cifs/readdir.c b/trunk/fs/cifs/readdir.c index a4217f02fab2..0a8224d1c4c5 100644 --- a/trunk/fs/cifs/readdir.c +++ b/trunk/fs/cifs/readdir.c @@ -86,12 +86,9 @@ cifs_readdir_lookup(struct dentry *parent, struct qstr *name, dentry = d_lookup(parent, name); if (dentry) { - inode = dentry->d_inode; - /* update inode in place if i_ino didn't change */ - if (inode && CIFS_I(inode)->uniqueid == fattr->cf_uniqueid) { - cifs_fattr_to_inode(inode, fattr); + /* FIXME: check for inode number changes? */ + if (dentry->d_inode != NULL) return dentry; - } d_drop(dentry); dput(dentry); } diff --git a/trunk/fs/cifs/transport.c b/trunk/fs/cifs/transport.c index f25d4ea14be4..3097ee58fd7d 100644 --- a/trunk/fs/cifs/transport.c +++ b/trunk/fs/cifs/transport.c @@ -365,14 +365,16 @@ cifs_setup_async_request(struct TCP_Server_Info *server, struct kvec *iov, if (mid == NULL) return -ENOMEM; - rc = cifs_sign_smb2(iov, nvec, server, &mid->sequence_number); - if (rc) { - DeleteMidQEntry(mid); - return rc; - } + /* put it on the pending_mid_q */ + spin_lock(&GlobalMid_Lock); + list_add_tail(&mid->qhead, &server->pending_mid_q); + spin_unlock(&GlobalMid_Lock); + rc = cifs_sign_smb2(iov, nvec, server, &mid->sequence_number); + if (rc) + delete_mid(mid); *ret_mid = mid; - return 0; + return rc; } /* @@ -405,21 +407,17 @@ cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov, mid->callback_data = cbdata; mid->mid_state = MID_REQUEST_SUBMITTED; - /* put it on the pending_mid_q */ - spin_lock(&GlobalMid_Lock); - list_add_tail(&mid->qhead, &server->pending_mid_q); - spin_unlock(&GlobalMid_Lock); - - cifs_in_send_inc(server); rc = smb_sendv(server, iov, nvec); cifs_in_send_dec(server); cifs_save_when_sent(mid); mutex_unlock(&server->srv_mutex); - if (rc == 0) - return 0; + if (rc) + goto out_err; + return rc; +out_err: delete_mid(mid); add_credits(server, 1); wake_up(&server->request_q); diff --git a/trunk/fs/coda/cache.c b/trunk/fs/coda/cache.c index 958ae0e0ff8c..690157876184 100644 --- a/trunk/fs/coda/cache.c +++ b/trunk/fs/coda/cache.c @@ -89,13 +89,17 @@ int coda_cache_check(struct inode *inode, int mask) /* this won't do any harm: just flag all children */ static void coda_flag_children(struct dentry *parent, int flag) { + struct list_head *child; struct dentry *de; spin_lock(&parent->d_lock); - list_for_each_entry(de, &parent->d_subdirs, d_u.d_child) { + list_for_each(child, &parent->d_subdirs) + { + de = list_entry(child, struct dentry, d_u.d_child); /* don't know what to do with negative dentries */ - if (de->d_inode ) - coda_flag_inode(de->d_inode, flag); + if ( ! de->d_inode ) + continue; + coda_flag_inode(de->d_inode, flag); } spin_unlock(&parent->d_lock); return; diff --git a/trunk/fs/coda/dir.c b/trunk/fs/coda/dir.c index 49fe52d25600..177515829062 100644 --- a/trunk/fs/coda/dir.c +++ b/trunk/fs/coda/dir.c @@ -30,8 +30,8 @@ #include "coda_int.h" /* dir inode-ops */ -static int coda_create(struct inode *dir, struct dentry *new, umode_t mode, bool excl); -static struct dentry *coda_lookup(struct inode *dir, struct dentry *target, unsigned int flags); +static int coda_create(struct inode *dir, struct dentry *new, umode_t mode, struct nameidata *nd); +static struct dentry *coda_lookup(struct inode *dir, struct dentry *target, struct nameidata *nd); static int coda_link(struct dentry *old_dentry, struct inode *dir_inode, struct dentry *entry); static int coda_unlink(struct inode *dir_inode, struct dentry *entry); @@ -46,7 +46,7 @@ static int coda_rename(struct inode *old_inode, struct dentry *old_dentry, static int coda_readdir(struct file *file, void *buf, filldir_t filldir); /* dentry ops */ -static int coda_dentry_revalidate(struct dentry *de, unsigned int flags); +static int coda_dentry_revalidate(struct dentry *de, struct nameidata *nd); static int coda_dentry_delete(const struct dentry *); /* support routines */ @@ -94,7 +94,7 @@ const struct file_operations coda_dir_operations = { /* inode operations for directories */ /* access routines: lookup, readlink, permission */ -static struct dentry *coda_lookup(struct inode *dir, struct dentry *entry, unsigned int flags) +static struct dentry *coda_lookup(struct inode *dir, struct dentry *entry, struct nameidata *nd) { struct super_block *sb = dir->i_sb; const char *name = entry->d_name.name; @@ -188,7 +188,7 @@ static inline void coda_dir_drop_nlink(struct inode *dir) } /* creation routines: create, mknod, mkdir, link, symlink */ -static int coda_create(struct inode *dir, struct dentry *de, umode_t mode, bool excl) +static int coda_create(struct inode *dir, struct dentry *de, umode_t mode, struct nameidata *nd) { int error; const char *name=de->d_name.name; @@ -536,12 +536,12 @@ static int coda_venus_readdir(struct file *coda_file, void *buf, } /* called when a cache lookup succeeds */ -static int coda_dentry_revalidate(struct dentry *de, unsigned int flags) +static int coda_dentry_revalidate(struct dentry *de, struct nameidata *nd) { struct inode *inode; struct coda_inode_info *cii; - if (flags & LOOKUP_RCU) + if (nd->flags & LOOKUP_RCU) return -ECHILD; inode = de->d_inode; diff --git a/trunk/fs/configfs/dir.c b/trunk/fs/configfs/dir.c index 7414ae24a79b..7e6c52d8a207 100644 --- a/trunk/fs/configfs/dir.c +++ b/trunk/fs/configfs/dir.c @@ -442,7 +442,7 @@ static int configfs_attach_attr(struct configfs_dirent * sd, struct dentry * den static struct dentry * configfs_lookup(struct inode *dir, struct dentry *dentry, - unsigned int flags) + struct nameidata *nd) { struct configfs_dirent * parent_sd = dentry->d_parent->d_fsdata; struct configfs_dirent * sd; diff --git a/trunk/fs/cramfs/inode.c b/trunk/fs/cramfs/inode.c index 28cca01ca9c9..d013c46402ed 100644 --- a/trunk/fs/cramfs/inode.c +++ b/trunk/fs/cramfs/inode.c @@ -417,7 +417,7 @@ static int cramfs_readdir(struct file *filp, void *dirent, filldir_t filldir) /* * Lookup and fill in the inode data.. */ -static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) +static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { unsigned int offset = 0; struct inode *inode = NULL; diff --git a/trunk/fs/dcache.c b/trunk/fs/dcache.c index 8086636bf796..40469044088d 100644 --- a/trunk/fs/dcache.c +++ b/trunk/fs/dcache.c @@ -218,7 +218,7 @@ static void __d_free(struct rcu_head *head) { struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu); - WARN_ON(!hlist_unhashed(&dentry->d_alias)); + WARN_ON(!list_empty(&dentry->d_alias)); if (dname_external(dentry)) kfree(dentry->d_name.name); kmem_cache_free(dentry_cache, dentry); @@ -267,7 +267,7 @@ static void dentry_iput(struct dentry * dentry) struct inode *inode = dentry->d_inode; if (inode) { dentry->d_inode = NULL; - hlist_del_init(&dentry->d_alias); + list_del_init(&dentry->d_alias); spin_unlock(&dentry->d_lock); spin_unlock(&inode->i_lock); if (!inode->i_nlink) @@ -291,7 +291,7 @@ static void dentry_unlink_inode(struct dentry * dentry) { struct inode *inode = dentry->d_inode; dentry->d_inode = NULL; - hlist_del_init(&dentry->d_alias); + list_del_init(&dentry->d_alias); dentry_rcuwalk_barrier(dentry); spin_unlock(&dentry->d_lock); spin_unlock(&inode->i_lock); @@ -699,11 +699,10 @@ EXPORT_SYMBOL(dget_parent); static struct dentry *__d_find_alias(struct inode *inode, int want_discon) { struct dentry *alias, *discon_alias; - struct hlist_node *p; again: discon_alias = NULL; - hlist_for_each_entry(alias, p, &inode->i_dentry, d_alias) { + list_for_each_entry(alias, &inode->i_dentry, d_alias) { spin_lock(&alias->d_lock); if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) { if (IS_ROOT(alias) && @@ -738,7 +737,7 @@ struct dentry *d_find_alias(struct inode *inode) { struct dentry *de = NULL; - if (!hlist_empty(&inode->i_dentry)) { + if (!list_empty(&inode->i_dentry)) { spin_lock(&inode->i_lock); de = __d_find_alias(inode, 0); spin_unlock(&inode->i_lock); @@ -754,10 +753,9 @@ EXPORT_SYMBOL(d_find_alias); void d_prune_aliases(struct inode *inode) { struct dentry *dentry; - struct hlist_node *p; restart: spin_lock(&inode->i_lock); - hlist_for_each_entry(dentry, p, &inode->i_dentry, d_alias) { + list_for_each_entry(dentry, &inode->i_dentry, d_alias) { spin_lock(&dentry->d_lock); if (!dentry->d_count) { __dget_dlock(dentry); @@ -979,7 +977,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry) inode = dentry->d_inode; if (inode) { dentry->d_inode = NULL; - hlist_del_init(&dentry->d_alias); + list_del_init(&dentry->d_alias); if (dentry->d_op && dentry->d_op->d_iput) dentry->d_op->d_iput(dentry, inode); else @@ -1314,7 +1312,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name) INIT_HLIST_BL_NODE(&dentry->d_hash); INIT_LIST_HEAD(&dentry->d_lru); INIT_LIST_HEAD(&dentry->d_subdirs); - INIT_HLIST_NODE(&dentry->d_alias); + INIT_LIST_HEAD(&dentry->d_alias); INIT_LIST_HEAD(&dentry->d_u.d_child); d_set_d_op(dentry, dentry->d_sb->s_d_op); @@ -1402,7 +1400,7 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode) if (inode) { if (unlikely(IS_AUTOMOUNT(inode))) dentry->d_flags |= DCACHE_NEED_AUTOMOUNT; - hlist_add_head(&dentry->d_alias, &inode->i_dentry); + list_add(&dentry->d_alias, &inode->i_dentry); } dentry->d_inode = inode; dentry_rcuwalk_barrier(dentry); @@ -1427,7 +1425,7 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode) void d_instantiate(struct dentry *entry, struct inode * inode) { - BUG_ON(!hlist_unhashed(&entry->d_alias)); + BUG_ON(!list_empty(&entry->d_alias)); if (inode) spin_lock(&inode->i_lock); __d_instantiate(entry, inode); @@ -1460,14 +1458,13 @@ static struct dentry *__d_instantiate_unique(struct dentry *entry, int len = entry->d_name.len; const char *name = entry->d_name.name; unsigned int hash = entry->d_name.hash; - struct hlist_node *p; if (!inode) { __d_instantiate(entry, NULL); return NULL; } - hlist_for_each_entry(alias, p, &inode->i_dentry, d_alias) { + list_for_each_entry(alias, &inode->i_dentry, d_alias) { /* * Don't need alias->d_lock here, because aliases with * d_parent == entry->d_parent are not subject to name or @@ -1493,7 +1490,7 @@ struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode) { struct dentry *result; - BUG_ON(!hlist_unhashed(&entry->d_alias)); + BUG_ON(!list_empty(&entry->d_alias)); if (inode) spin_lock(&inode->i_lock); @@ -1534,9 +1531,9 @@ static struct dentry * __d_find_any_alias(struct inode *inode) { struct dentry *alias; - if (hlist_empty(&inode->i_dentry)) + if (list_empty(&inode->i_dentry)) return NULL; - alias = hlist_entry(inode->i_dentry.first, struct dentry, d_alias); + alias = list_first_entry(&inode->i_dentry, struct dentry, d_alias); __dget(alias); return alias; } @@ -1610,7 +1607,7 @@ struct dentry *d_obtain_alias(struct inode *inode) spin_lock(&tmp->d_lock); tmp->d_inode = inode; tmp->d_flags |= DCACHE_DISCONNECTED; - hlist_add_head(&tmp->d_alias, &inode->i_dentry); + list_add(&tmp->d_alias, &inode->i_dentry); hlist_bl_lock(&tmp->d_sb->s_anon); hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon); hlist_bl_unlock(&tmp->d_sb->s_anon); @@ -2387,13 +2384,14 @@ static struct dentry *__d_unalias(struct inode *inode, struct dentry *dentry, struct dentry *alias) { struct mutex *m1 = NULL, *m2 = NULL; - struct dentry *ret = ERR_PTR(-EBUSY); + struct dentry *ret; /* If alias and dentry share a parent, then no extra locks required */ if (alias->d_parent == dentry->d_parent) goto out_unalias; /* See lock_rename() */ + ret = ERR_PTR(-EBUSY); if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex)) goto out_err; m1 = &dentry->d_sb->s_vfs_rename_mutex; @@ -2401,10 +2399,8 @@ static struct dentry *__d_unalias(struct inode *inode, goto out_err; m2 = &alias->d_parent->d_inode->i_mutex; out_unalias: - if (likely(!d_mountpoint(alias))) { - __d_move(alias, dentry); - ret = alias; - } + __d_move(alias, dentry); + ret = alias; out_err: spin_unlock(&inode->i_lock); if (m2) @@ -2626,7 +2622,7 @@ static int prepend_path(const struct path *path, if (!slash) error = prepend(buffer, buflen, "/", 1); if (!error) - error = is_mounted(vfsmnt) ? 1 : 2; + error = real_mount(vfsmnt)->mnt_ns ? 1 : 2; goto out; } diff --git a/trunk/fs/debugfs/inode.c b/trunk/fs/debugfs/inode.c index d17c20fd74e6..b80bc846a15a 100644 --- a/trunk/fs/debugfs/inode.c +++ b/trunk/fs/debugfs/inode.c @@ -54,12 +54,13 @@ static struct inode *debugfs_get_inode(struct super_block *sb, umode_t mode, dev break; case S_IFLNK: inode->i_op = &debugfs_link_operations; + inode->i_fop = fops; inode->i_private = data; break; case S_IFDIR: inode->i_op = &simple_dir_inode_operations; - inode->i_fop = &simple_dir_operations; - inode->i_private = NULL; + inode->i_fop = fops ? fops : &simple_dir_operations; + inode->i_private = data; /* directory inodes start off with i_nlink == 2 * (for "." entry) */ @@ -90,12 +91,13 @@ static int debugfs_mknod(struct inode *dir, struct dentry *dentry, return error; } -static int debugfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) +static int debugfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode, + void *data, const struct file_operations *fops) { int res; mode = (mode & (S_IRWXUGO | S_ISVTX)) | S_IFDIR; - res = debugfs_mknod(dir, dentry, mode, 0, NULL, NULL); + res = debugfs_mknod(dir, dentry, mode, 0, data, fops); if (!res) { inc_nlink(dir); fsnotify_mkdir(dir, dentry); @@ -104,10 +106,10 @@ static int debugfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) } static int debugfs_link(struct inode *dir, struct dentry *dentry, umode_t mode, - void *data) + void *data, const struct file_operations *fops) { mode = (mode & S_IALLUGO) | S_IFLNK; - return debugfs_mknod(dir, dentry, mode, 0, data, NULL); + return debugfs_mknod(dir, dentry, mode, 0, data, fops); } static int debugfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, @@ -291,19 +293,13 @@ static struct file_system_type debug_fs_type = { .kill_sb = kill_litter_super, }; -struct dentry *__create_file(const char *name, umode_t mode, - struct dentry *parent, void *data, - const struct file_operations *fops) +static int debugfs_create_by_name(const char *name, umode_t mode, + struct dentry *parent, + struct dentry **dentry, + void *data, + const struct file_operations *fops) { - struct dentry *dentry = NULL; - int error; - - pr_debug("debugfs: creating file '%s'\n",name); - - error = simple_pin_fs(&debug_fs_type, &debugfs_mount, - &debugfs_mount_count); - if (error) - goto exit; + int error = 0; /* If the parent is not specified, we create it in the root. * We need the root dentry to do this, which is in the super @@ -313,35 +309,30 @@ struct dentry *__create_file(const char *name, umode_t mode, if (!parent) parent = debugfs_mount->mnt_root; - dentry = NULL; + *dentry = NULL; mutex_lock(&parent->d_inode->i_mutex); - dentry = lookup_one_len(name, parent, strlen(name)); - if (!IS_ERR(dentry)) { + *dentry = lookup_one_len(name, parent, strlen(name)); + if (!IS_ERR(*dentry)) { switch (mode & S_IFMT) { case S_IFDIR: - error = debugfs_mkdir(parent->d_inode, dentry, mode); - + error = debugfs_mkdir(parent->d_inode, *dentry, mode, + data, fops); break; case S_IFLNK: - error = debugfs_link(parent->d_inode, dentry, mode, - data); + error = debugfs_link(parent->d_inode, *dentry, mode, + data, fops); break; default: - error = debugfs_create(parent->d_inode, dentry, mode, + error = debugfs_create(parent->d_inode, *dentry, mode, data, fops); break; } - dput(dentry); + dput(*dentry); } else - error = PTR_ERR(dentry); + error = PTR_ERR(*dentry); mutex_unlock(&parent->d_inode->i_mutex); - if (error) { - dentry = NULL; - simple_release_fs(&debugfs_mount, &debugfs_mount_count); - } -exit: - return dentry; + return error; } /** @@ -374,15 +365,25 @@ struct dentry *debugfs_create_file(const char *name, umode_t mode, struct dentry *parent, void *data, const struct file_operations *fops) { - switch (mode & S_IFMT) { - case S_IFREG: - case 0: - break; - default: - BUG(); - } + struct dentry *dentry = NULL; + int error; - return __create_file(name, mode, parent, data, fops); + pr_debug("debugfs: creating file '%s'\n",name); + + error = simple_pin_fs(&debug_fs_type, &debugfs_mount, + &debugfs_mount_count); + if (error) + goto exit; + + error = debugfs_create_by_name(name, mode, parent, &dentry, + data, fops); + if (error) { + dentry = NULL; + simple_release_fs(&debugfs_mount, &debugfs_mount_count); + goto exit; + } +exit: + return dentry; } EXPORT_SYMBOL_GPL(debugfs_create_file); @@ -406,7 +407,8 @@ EXPORT_SYMBOL_GPL(debugfs_create_file); */ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent) { - return __create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO, + return debugfs_create_file(name, + S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO, parent, NULL, NULL); } EXPORT_SYMBOL_GPL(debugfs_create_dir); @@ -444,7 +446,8 @@ struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent, if (!link) return NULL; - result = __create_file(name, S_IFLNK | S_IRWXUGO, parent, link, NULL); + result = debugfs_create_file(name, S_IFLNK | S_IRWXUGO, parent, link, + NULL); if (!result) kfree(link); return result; diff --git a/trunk/fs/devpts/inode.c b/trunk/fs/devpts/inode.c index 14afbabe6546..979c1e309c73 100644 --- a/trunk/fs/devpts/inode.c +++ b/trunk/fs/devpts/inode.c @@ -439,15 +439,15 @@ static struct dentry *devpts_mount(struct file_system_type *fs_type, return ERR_PTR(error); if (opts.newinstance) - s = sget(fs_type, NULL, set_anon_super, flags, NULL); + s = sget(fs_type, NULL, set_anon_super, NULL); else - s = sget(fs_type, compare_init_pts_sb, set_anon_super, flags, - NULL); + s = sget(fs_type, compare_init_pts_sb, set_anon_super, NULL); if (IS_ERR(s)) return ERR_CAST(s); if (!s->s_root) { + s->s_flags = flags; error = devpts_fill_super(s, data, flags & MS_SILENT ? 1 : 0); if (error) goto out_undo_sget; diff --git a/trunk/fs/direct-io.c b/trunk/fs/direct-io.c index 1faf4cb56f39..0c85fae37666 100644 --- a/trunk/fs/direct-io.c +++ b/trunk/fs/direct-io.c @@ -1258,7 +1258,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, */ BUG_ON(retval == -EIOCBQUEUED); if (dio->is_async && retval == 0 && dio->result && - ((rw == READ) || (dio->result == sdio.size))) + ((rw & READ) || (dio->result == sdio.size))) retval = -EIOCBQUEUED; if (retval != -EIOCBQUEUED) diff --git a/trunk/fs/ecryptfs/dentry.c b/trunk/fs/ecryptfs/dentry.c index 1b5d9af937df..534c1d46e69e 100644 --- a/trunk/fs/ecryptfs/dentry.c +++ b/trunk/fs/ecryptfs/dentry.c @@ -32,7 +32,7 @@ /** * ecryptfs_d_revalidate - revalidate an ecryptfs dentry * @dentry: The ecryptfs dentry - * @flags: lookup flags + * @nd: The associated nameidata * * Called when the VFS needs to revalidate a dentry. This * is called whenever a name lookup finds a dentry in the @@ -42,20 +42,32 @@ * Returns 1 if valid, 0 otherwise. * */ -static int ecryptfs_d_revalidate(struct dentry *dentry, unsigned int flags) +static int ecryptfs_d_revalidate(struct dentry *dentry, struct nameidata *nd) { struct dentry *lower_dentry; struct vfsmount *lower_mnt; + struct dentry *dentry_save = NULL; + struct vfsmount *vfsmount_save = NULL; int rc = 1; - if (flags & LOOKUP_RCU) + if (nd && nd->flags & LOOKUP_RCU) return -ECHILD; lower_dentry = ecryptfs_dentry_to_lower(dentry); lower_mnt = ecryptfs_dentry_to_lower_mnt(dentry); if (!lower_dentry->d_op || !lower_dentry->d_op->d_revalidate) goto out; - rc = lower_dentry->d_op->d_revalidate(lower_dentry, flags); + if (nd) { + dentry_save = nd->path.dentry; + vfsmount_save = nd->path.mnt; + nd->path.dentry = lower_dentry; + nd->path.mnt = lower_mnt; + } + rc = lower_dentry->d_op->d_revalidate(lower_dentry, nd); + if (nd) { + nd->path.dentry = dentry_save; + nd->path.mnt = vfsmount_save; + } if (dentry->d_inode) { struct inode *lower_inode = ecryptfs_inode_to_lower(dentry->d_inode); diff --git a/trunk/fs/ecryptfs/ecryptfs_kernel.h b/trunk/fs/ecryptfs/ecryptfs_kernel.h index 989e034f02bd..867b64c5d84f 100644 --- a/trunk/fs/ecryptfs/ecryptfs_kernel.h +++ b/trunk/fs/ecryptfs/ecryptfs_kernel.h @@ -550,6 +550,20 @@ extern struct kmem_cache *ecryptfs_key_record_cache; extern struct kmem_cache *ecryptfs_key_sig_cache; extern struct kmem_cache *ecryptfs_global_auth_tok_cache; extern struct kmem_cache *ecryptfs_key_tfm_cache; +extern struct kmem_cache *ecryptfs_open_req_cache; + +struct ecryptfs_open_req { +#define ECRYPTFS_REQ_PROCESSED 0x00000001 +#define ECRYPTFS_REQ_DROPPED 0x00000002 +#define ECRYPTFS_REQ_ZOMBIE 0x00000004 + u32 flags; + struct file **lower_file; + struct dentry *lower_dentry; + struct vfsmount *lower_mnt; + wait_queue_head_t wait; + struct mutex mux; + struct list_head kthread_ctl_list; +}; struct inode *ecryptfs_get_inode(struct inode *lower_inode, struct super_block *sb); diff --git a/trunk/fs/ecryptfs/inode.c b/trunk/fs/ecryptfs/inode.c index ffa2be57804d..a07441a0a878 100644 --- a/trunk/fs/ecryptfs/inode.c +++ b/trunk/fs/ecryptfs/inode.c @@ -173,7 +173,7 @@ ecryptfs_do_create(struct inode *directory_inode, inode = ERR_CAST(lower_dir_dentry); goto out; } - rc = vfs_create(lower_dir_dentry->d_inode, lower_dentry, mode, true); + rc = vfs_create(lower_dir_dentry->d_inode, lower_dentry, mode, NULL); if (rc) { printk(KERN_ERR "%s: Failure to create dentry in lower fs; " "rc = [%d]\n", __func__, rc); @@ -240,6 +240,7 @@ static int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry, * @dir: The inode of the directory in which to create the file. * @dentry: The eCryptfs dentry * @mode: The mode of the new file. + * @nd: nameidata * * Creates a new file. * @@ -247,7 +248,7 @@ static int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry, */ static int ecryptfs_create(struct inode *directory_inode, struct dentry *ecryptfs_dentry, - umode_t mode, bool excl) + umode_t mode, struct nameidata *nd) { struct inode *ecryptfs_inode; int rc; @@ -269,8 +270,8 @@ ecryptfs_create(struct inode *directory_inode, struct dentry *ecryptfs_dentry, iput(ecryptfs_inode); goto out; } - unlock_new_inode(ecryptfs_inode); d_instantiate(ecryptfs_dentry, ecryptfs_inode); + unlock_new_inode(ecryptfs_inode); out: return rc; } @@ -373,7 +374,7 @@ static int ecryptfs_lookup_interpose(struct dentry *dentry, */ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode, struct dentry *ecryptfs_dentry, - unsigned int flags) + struct nameidata *ecryptfs_nd) { char *encrypted_and_encoded_name = NULL; size_t encrypted_and_encoded_name_size; diff --git a/trunk/fs/ecryptfs/kthread.c b/trunk/fs/ecryptfs/kthread.c index 809e67d05ca3..0dbe58a8b172 100644 --- a/trunk/fs/ecryptfs/kthread.c +++ b/trunk/fs/ecryptfs/kthread.c @@ -27,12 +27,7 @@ #include #include "ecryptfs_kernel.h" -struct ecryptfs_open_req { - struct file **lower_file; - struct path path; - struct completion done; - struct list_head kthread_ctl_list; -}; +struct kmem_cache *ecryptfs_open_req_cache; static struct ecryptfs_kthread_ctl { #define ECRYPTFS_KTHREAD_ZOMBIE 0x00000001 @@ -72,10 +67,18 @@ static int ecryptfs_threadfn(void *ignored) req = list_first_entry(&ecryptfs_kthread_ctl.req_list, struct ecryptfs_open_req, kthread_ctl_list); + mutex_lock(&req->mux); list_del(&req->kthread_ctl_list); - *req->lower_file = dentry_open(&req->path, - (O_RDWR | O_LARGEFILE), current_cred()); - complete(&req->done); + if (!(req->flags & ECRYPTFS_REQ_ZOMBIE)) { + dget(req->lower_dentry); + mntget(req->lower_mnt); + (*req->lower_file) = dentry_open( + req->lower_dentry, req->lower_mnt, + (O_RDWR | O_LARGEFILE), current_cred()); + req->flags |= ECRYPTFS_REQ_PROCESSED; + } + wake_up(&req->wait); + mutex_unlock(&req->mux); } mutex_unlock(&ecryptfs_kthread_ctl.mux); } @@ -108,9 +111,10 @@ void ecryptfs_destroy_kthread(void) ecryptfs_kthread_ctl.flags |= ECRYPTFS_KTHREAD_ZOMBIE; list_for_each_entry(req, &ecryptfs_kthread_ctl.req_list, kthread_ctl_list) { - list_del(&req->kthread_ctl_list); - *req->lower_file = ERR_PTR(-EIO); - complete(&req->done); + mutex_lock(&req->mux); + req->flags |= ECRYPTFS_REQ_ZOMBIE; + wake_up(&req->wait); + mutex_unlock(&req->mux); } mutex_unlock(&ecryptfs_kthread_ctl.mux); kthread_stop(ecryptfs_kthread); @@ -132,26 +136,34 @@ int ecryptfs_privileged_open(struct file **lower_file, struct vfsmount *lower_mnt, const struct cred *cred) { - struct ecryptfs_open_req req; + struct ecryptfs_open_req *req; int flags = O_LARGEFILE; int rc = 0; - init_completion(&req.done); - req.lower_file = lower_file; - req.path.dentry = lower_dentry; - req.path.mnt = lower_mnt; - /* Corresponding dput() and mntput() are done when the * lower file is fput() when all eCryptfs files for the inode are * released. */ + dget(lower_dentry); + mntget(lower_mnt); flags |= IS_RDONLY(lower_dentry->d_inode) ? O_RDONLY : O_RDWR; - (*lower_file) = dentry_open(&req.path, flags, cred); + (*lower_file) = dentry_open(lower_dentry, lower_mnt, flags, cred); if (!IS_ERR(*lower_file)) goto out; if ((flags & O_ACCMODE) == O_RDONLY) { rc = PTR_ERR((*lower_file)); goto out; } + req = kmem_cache_alloc(ecryptfs_open_req_cache, GFP_KERNEL); + if (!req) { + rc = -ENOMEM; + goto out; + } + mutex_init(&req->mux); + req->lower_file = lower_file; + req->lower_dentry = lower_dentry; + req->lower_mnt = lower_mnt; + init_waitqueue_head(&req->wait); + req->flags = 0; mutex_lock(&ecryptfs_kthread_ctl.mux); if (ecryptfs_kthread_ctl.flags & ECRYPTFS_KTHREAD_ZOMBIE) { rc = -EIO; @@ -159,14 +171,27 @@ int ecryptfs_privileged_open(struct file **lower_file, printk(KERN_ERR "%s: We are in the middle of shutting down; " "aborting privileged request to open lower file\n", __func__); - goto out; + goto out_free; } - list_add_tail(&req.kthread_ctl_list, &ecryptfs_kthread_ctl.req_list); + list_add_tail(&req->kthread_ctl_list, &ecryptfs_kthread_ctl.req_list); mutex_unlock(&ecryptfs_kthread_ctl.mux); wake_up(&ecryptfs_kthread_ctl.wait); - wait_for_completion(&req.done); - if (IS_ERR(*lower_file)) - rc = PTR_ERR(*lower_file); + wait_event(req->wait, (req->flags != 0)); + mutex_lock(&req->mux); + BUG_ON(req->flags == 0); + if (req->flags & ECRYPTFS_REQ_DROPPED + || req->flags & ECRYPTFS_REQ_ZOMBIE) { + rc = -EIO; + printk(KERN_WARNING "%s: Privileged open request dropped\n", + __func__); + goto out_unlock; + } + if (IS_ERR(*req->lower_file)) + rc = PTR_ERR(*req->lower_file); +out_unlock: + mutex_unlock(&req->mux); +out_free: + kmem_cache_free(ecryptfs_open_req_cache, req); out: return rc; } diff --git a/trunk/fs/ecryptfs/main.c b/trunk/fs/ecryptfs/main.c index 1c0b3b6b75c6..68954937a071 100644 --- a/trunk/fs/ecryptfs/main.c +++ b/trunk/fs/ecryptfs/main.c @@ -499,12 +499,13 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags goto out; } - s = sget(fs_type, NULL, set_anon_super, flags, NULL); + s = sget(fs_type, NULL, set_anon_super, NULL); if (IS_ERR(s)) { rc = PTR_ERR(s); goto out; } + s->s_flags = flags; rc = bdi_setup_and_register(&sbi->bdi, "ecryptfs", BDI_CAP_MAP_COPY); if (rc) goto out1; @@ -681,6 +682,11 @@ static struct ecryptfs_cache_info { .name = "ecryptfs_key_tfm_cache", .size = sizeof(struct ecryptfs_key_tfm), }, + { + .cache = &ecryptfs_open_req_cache, + .name = "ecryptfs_open_req_cache", + .size = sizeof(struct ecryptfs_open_req), + }, }; static void ecryptfs_free_kmem_caches(void) diff --git a/trunk/fs/efs/efs.h b/trunk/fs/efs/efs.h index 5528926ac7f6..d8305b582ab0 100644 --- a/trunk/fs/efs/efs.h +++ b/trunk/fs/efs/efs.h @@ -129,7 +129,7 @@ extern struct inode *efs_iget(struct super_block *, unsigned long); extern efs_block_t efs_map_block(struct inode *, efs_block_t); extern int efs_get_block(struct inode *, sector_t, struct buffer_head *, int); -extern struct dentry *efs_lookup(struct inode *, struct dentry *, unsigned int); +extern struct dentry *efs_lookup(struct inode *, struct dentry *, struct nameidata *); extern struct dentry *efs_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type); extern struct dentry *efs_fh_to_parent(struct super_block *sb, struct fid *fid, diff --git a/trunk/fs/efs/namei.c b/trunk/fs/efs/namei.c index 96f66d213a19..832b10ded82f 100644 --- a/trunk/fs/efs/namei.c +++ b/trunk/fs/efs/namei.c @@ -58,8 +58,7 @@ static efs_ino_t efs_find_entry(struct inode *inode, const char *name, int len) return(0); } -struct dentry *efs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) -{ +struct dentry *efs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { efs_ino_t inodenum; struct inode *inode = NULL; diff --git a/trunk/fs/eventpoll.c b/trunk/fs/eventpoll.c index 1c8b55670804..74598f67efeb 100644 --- a/trunk/fs/eventpoll.c +++ b/trunk/fs/eventpoll.c @@ -1710,7 +1710,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, goto error_tgt_fput; /* Check if EPOLLWAKEUP is allowed */ - if ((epds.events & EPOLLWAKEUP) && !capable(CAP_BLOCK_SUSPEND)) + if ((epds.events & EPOLLWAKEUP) && !capable(CAP_EPOLLWAKEUP)) epds.events &= ~EPOLLWAKEUP; /* diff --git a/trunk/fs/exofs/namei.c b/trunk/fs/exofs/namei.c index 4731fd991efe..fc7161d6bf6b 100644 --- a/trunk/fs/exofs/namei.c +++ b/trunk/fs/exofs/namei.c @@ -46,7 +46,7 @@ static inline int exofs_add_nondir(struct dentry *dentry, struct inode *inode) } static struct dentry *exofs_lookup(struct inode *dir, struct dentry *dentry, - unsigned int flags) + struct nameidata *nd) { struct inode *inode; ino_t ino; @@ -60,7 +60,7 @@ static struct dentry *exofs_lookup(struct inode *dir, struct dentry *dentry, } static int exofs_create(struct inode *dir, struct dentry *dentry, umode_t mode, - bool excl) + struct nameidata *nd) { struct inode *inode = exofs_new_inode(dir, mode); int err = PTR_ERR(inode); diff --git a/trunk/fs/exofs/ore.c b/trunk/fs/exofs/ore.c index 24a49d47e935..49cf230554a2 100644 --- a/trunk/fs/exofs/ore.c +++ b/trunk/fs/exofs/ore.c @@ -735,7 +735,13 @@ static int _prepare_for_striping(struct ore_io_state *ios) out: ios->numdevs = devs_in_group; ios->pages_consumed = cur_pg; - return ret; + if (unlikely(ret)) { + if (length == ios->length) + return ret; + else + ios->length -= length; + } + return 0; } int ore_create(struct ore_io_state *ios) diff --git a/trunk/fs/exofs/ore_raid.c b/trunk/fs/exofs/ore_raid.c index 5f376d14fdcc..d222c77cfa1b 100644 --- a/trunk/fs/exofs/ore_raid.c +++ b/trunk/fs/exofs/ore_raid.c @@ -144,25 +144,25 @@ static void _sp2d_reset(struct __stripe_pages_2d *sp2d, { unsigned data_devs = sp2d->data_devs; unsigned group_width = data_devs + sp2d->parity; - int p, c; + unsigned p; if (!sp2d->needed) return; - for (c = data_devs - 1; c >= 0; --c) - for (p = sp2d->pages_in_unit - 1; p >= 0; --p) { - struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p]; + for (p = 0; p < sp2d->pages_in_unit; p++) { + struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p]; - if (_1ps->page_is_read[c]) { - struct page *page = _1ps->pages[c]; + if (_1ps->write_count < group_width) { + unsigned c; - r4w->put_page(priv, page); - _1ps->page_is_read[c] = false; - } - } + for (c = 0; c < data_devs; c++) + if (_1ps->page_is_read[c]) { + struct page *page = _1ps->pages[c]; - for (p = 0; p < sp2d->pages_in_unit; p++) { - struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p]; + r4w->put_page(priv, page); + _1ps->page_is_read[c] = false; + } + } memset(_1ps->pages, 0, group_width * sizeof(*_1ps->pages)); _1ps->write_count = 0; @@ -461,12 +461,16 @@ static void _mark_read4write_pages_uptodate(struct ore_io_state *ios, int ret) * ios->sp2d[p][*], xor is calculated the same way. These pages are * allocated/freed and don't go through cache */ -static int _read_4_write_first_stripe(struct ore_io_state *ios) +static int _read_4_write(struct ore_io_state *ios) { + struct ore_io_state *ios_read; struct ore_striping_info read_si; struct __stripe_pages_2d *sp2d = ios->sp2d; u64 offset = ios->si.first_stripe_start; - unsigned c, p, min_p = sp2d->pages_in_unit, max_p = -1; + u64 last_stripe_end; + unsigned bytes_in_stripe = ios->si.bytes_in_stripe; + unsigned i, c, p, min_p = sp2d->pages_in_unit, max_p = -1; + int ret; if (offset == ios->offset) /* Go to start collect $200 */ goto read_last_stripe; @@ -474,9 +478,6 @@ static int _read_4_write_first_stripe(struct ore_io_state *ios) min_p = _sp2d_min_pg(sp2d); max_p = _sp2d_max_pg(sp2d); - ORE_DBGMSG("stripe_start=0x%llx ios->offset=0x%llx min_p=%d max_p=%d\n", - offset, ios->offset, min_p, max_p); - for (c = 0; ; c++) { ore_calc_stripe_info(ios->layout, offset, 0, &read_si); read_si.obj_offset += min_p * PAGE_SIZE; @@ -511,18 +512,6 @@ static int _read_4_write_first_stripe(struct ore_io_state *ios) } read_last_stripe: - return 0; -} - -static int _read_4_write_last_stripe(struct ore_io_state *ios) -{ - struct ore_striping_info read_si; - struct __stripe_pages_2d *sp2d = ios->sp2d; - u64 offset; - u64 last_stripe_end; - unsigned bytes_in_stripe = ios->si.bytes_in_stripe; - unsigned c, p, min_p = sp2d->pages_in_unit, max_p = -1; - offset = ios->offset + ios->length; if (offset % PAGE_SIZE) _add_to_r4w_last_page(ios, &offset); @@ -538,15 +527,15 @@ static int _read_4_write_last_stripe(struct ore_io_state *ios) c = _dev_order(ios->layout->group_width * ios->layout->mirrors_p1, ios->layout->mirrors_p1, read_si.par_dev, read_si.dev); + BUG_ON(ios->si.first_stripe_start + bytes_in_stripe != last_stripe_end); + /* unaligned IO must be within a single stripe */ + if (min_p == sp2d->pages_in_unit) { /* Didn't do it yet */ min_p = _sp2d_min_pg(sp2d); max_p = _sp2d_max_pg(sp2d); } - ORE_DBGMSG("offset=0x%llx stripe_end=0x%llx min_p=%d max_p=%d\n", - offset, last_stripe_end, min_p, max_p); - while (offset < last_stripe_end) { struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p]; @@ -579,15 +568,6 @@ static int _read_4_write_last_stripe(struct ore_io_state *ios) } read_it: - return 0; -} - -static int _read_4_write_execute(struct ore_io_state *ios) -{ - struct ore_io_state *ios_read; - unsigned i; - int ret; - ios_read = ios->ios_read_4_write; if (!ios_read) return 0; @@ -611,8 +591,6 @@ static int _read_4_write_execute(struct ore_io_state *ios) } _mark_read4write_pages_uptodate(ios_read, ret); - ore_put_io_state(ios_read); - ios->ios_read_4_write = NULL; /* Might need a reuse at last stripe */ return 0; } @@ -648,11 +626,8 @@ int _ore_add_parity_unit(struct ore_io_state *ios, /* If first stripe, Read in all read4write pages * (if needed) before we calculate the first parity. */ - _read_4_write_first_stripe(ios); + _read_4_write(ios); } - if (!cur_len) /* If last stripe r4w pages of last stripe */ - _read_4_write_last_stripe(ios); - _read_4_write_execute(ios); for (i = 0; i < num_pages; i++) { pages[i] = _raid_page_alloc(); @@ -679,14 +654,34 @@ int _ore_add_parity_unit(struct ore_io_state *ios, int _ore_post_alloc_raid_stuff(struct ore_io_state *ios) { + struct ore_layout *layout = ios->layout; + if (ios->parity_pages) { - struct ore_layout *layout = ios->layout; unsigned pages_in_unit = layout->stripe_unit / PAGE_SIZE; + unsigned stripe_size = ios->si.bytes_in_stripe; + u64 last_stripe, first_stripe; if (_sp2d_alloc(pages_in_unit, layout->group_width, layout->parity, &ios->sp2d)) { return -ENOMEM; } + + /* Round io down to last full strip */ + first_stripe = div_u64(ios->offset, stripe_size); + last_stripe = div_u64(ios->offset + ios->length, stripe_size); + + /* If an IO spans more then a single stripe it must end at + * a stripe boundary. The reminder at the end is pushed into the + * next IO. + */ + if (last_stripe != first_stripe) { + ios->length = last_stripe * stripe_size - ios->offset; + + BUG_ON(!ios->length); + ios->nr_pages = (ios->length + PAGE_SIZE - 1) / + PAGE_SIZE; + ios->si.length = ios->length; /*make it consistent */ + } } return 0; } diff --git a/trunk/fs/exportfs/expfs.c b/trunk/fs/exportfs/expfs.c index 29ab099e3e08..b0201ca6e9c6 100644 --- a/trunk/fs/exportfs/expfs.c +++ b/trunk/fs/exportfs/expfs.c @@ -19,19 +19,19 @@ #define dprintk(fmt, args...) do{}while(0) -static int get_name(const struct path *path, char *name, struct dentry *child); +static int get_name(struct vfsmount *mnt, struct dentry *dentry, char *name, + struct dentry *child); static int exportfs_get_name(struct vfsmount *mnt, struct dentry *dir, char *name, struct dentry *child) { const struct export_operations *nop = dir->d_sb->s_export_op; - struct path path = {.mnt = mnt, .dentry = dir}; if (nop->get_name) return nop->get_name(dir, name, child); else - return get_name(&path, name, child); + return get_name(mnt, dir, name, child); } /* @@ -44,14 +44,13 @@ find_acceptable_alias(struct dentry *result, { struct dentry *dentry, *toput = NULL; struct inode *inode; - struct hlist_node *p; if (acceptable(context, result)) return result; inode = result->d_inode; spin_lock(&inode->i_lock); - hlist_for_each_entry(dentry, p, &inode->i_dentry, d_alias) { + list_for_each_entry(dentry, &inode->i_dentry, d_alias) { dget(dentry); spin_unlock(&inode->i_lock); if (toput) @@ -249,10 +248,11 @@ static int filldir_one(void * __buf, const char * name, int len, * calls readdir on the parent until it finds an entry with * the same inode number as the child, and returns that. */ -static int get_name(const struct path *path, char *name, struct dentry *child) +static int get_name(struct vfsmount *mnt, struct dentry *dentry, + char *name, struct dentry *child) { const struct cred *cred = current_cred(); - struct inode *dir = path->dentry->d_inode; + struct inode *dir = dentry->d_inode; int error; struct file *file; struct getdents_callback buffer; @@ -266,7 +266,7 @@ static int get_name(const struct path *path, char *name, struct dentry *child) /* * Open the directory ... */ - file = dentry_open(path, O_RDONLY, cred); + file = dentry_open(dget(dentry), mntget(mnt), O_RDONLY, cred); error = PTR_ERR(file); if (IS_ERR(file)) goto out; diff --git a/trunk/fs/ext2/namei.c b/trunk/fs/ext2/namei.c index 73b0d9519836..f663a67d7bf0 100644 --- a/trunk/fs/ext2/namei.c +++ b/trunk/fs/ext2/namei.c @@ -41,8 +41,8 @@ static inline int ext2_add_nondir(struct dentry *dentry, struct inode *inode) { int err = ext2_add_link(dentry, inode); if (!err) { - unlock_new_inode(inode); d_instantiate(dentry, inode); + unlock_new_inode(inode); return 0; } inode_dec_link_count(inode); @@ -55,7 +55,7 @@ static inline int ext2_add_nondir(struct dentry *dentry, struct inode *inode) * Methods themselves. */ -static struct dentry *ext2_lookup(struct inode * dir, struct dentry *dentry, unsigned int flags) +static struct dentry *ext2_lookup(struct inode * dir, struct dentry *dentry, struct nameidata *nd) { struct inode * inode; ino_t ino; @@ -94,7 +94,7 @@ struct dentry *ext2_get_parent(struct dentry *child) * If the create succeeds, we fill in the inode information * with d_instantiate(). */ -static int ext2_create (struct inode * dir, struct dentry * dentry, umode_t mode, bool excl) +static int ext2_create (struct inode * dir, struct dentry * dentry, umode_t mode, struct nameidata *nd) { struct inode *inode; @@ -242,8 +242,8 @@ static int ext2_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode) if (err) goto out_fail; - unlock_new_inode(inode); d_instantiate(dentry, inode); + unlock_new_inode(inode); out: return err; diff --git a/trunk/fs/ext2/super.c b/trunk/fs/ext2/super.c index 5df3d2d8169c..b3621cb7ea31 100644 --- a/trunk/fs/ext2/super.c +++ b/trunk/fs/ext2/super.c @@ -1184,12 +1184,6 @@ static int ext2_sync_fs(struct super_block *sb, int wait) struct ext2_sb_info *sbi = EXT2_SB(sb); struct ext2_super_block *es = EXT2_SB(sb)->s_es; - /* - * Write quota structures to quota file, sync_blockdev() will write - * them to disk later - */ - dquot_writeback_dquots(sb, -1); - spin_lock(&sbi->s_lock); if (es->s_state & cpu_to_le16(EXT2_VALID_FS)) { ext2_debug("setting valid to 0\n"); diff --git a/trunk/fs/ext3/dir.c b/trunk/fs/ext3/dir.c index c8fff930790d..92490e9f85ca 100644 --- a/trunk/fs/ext3/dir.c +++ b/trunk/fs/ext3/dir.c @@ -300,11 +300,10 @@ loff_t ext3_dir_llseek(struct file *file, loff_t offset, int origin) { struct inode *inode = file->f_mapping->host; int dx_dir = is_dx_dir(inode); - loff_t htree_max = ext3_get_htree_eof(file); if (likely(dx_dir)) return generic_file_llseek_size(file, offset, origin, - htree_max, htree_max); + ext3_get_htree_eof(file)); else return generic_file_llseek(file, offset, origin); } diff --git a/trunk/fs/ext3/namei.c b/trunk/fs/ext3/namei.c index 8f4fddac01a6..eeb63dfc5d20 100644 --- a/trunk/fs/ext3/namei.c +++ b/trunk/fs/ext3/namei.c @@ -1011,7 +1011,7 @@ static struct buffer_head * ext3_dx_find_entry(struct inode *dir, return NULL; } -static struct dentry *ext3_lookup(struct inode * dir, struct dentry *dentry, unsigned int flags) +static struct dentry *ext3_lookup(struct inode * dir, struct dentry *dentry, struct nameidata *nd) { struct inode * inode; struct ext3_dir_entry_2 * de; @@ -1671,8 +1671,8 @@ static int ext3_add_nondir(handle_t *handle, int err = ext3_add_entry(handle, dentry, inode); if (!err) { ext3_mark_inode_dirty(handle, inode); - unlock_new_inode(inode); d_instantiate(dentry, inode); + unlock_new_inode(inode); return 0; } drop_nlink(inode); @@ -1690,7 +1690,7 @@ static int ext3_add_nondir(handle_t *handle, * with d_instantiate(). */ static int ext3_create (struct inode * dir, struct dentry * dentry, umode_t mode, - bool excl) + struct nameidata *nd) { handle_t *handle; struct inode * inode; @@ -1836,8 +1836,8 @@ static int ext3_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode) if (err) goto out_clear_inode; - unlock_new_inode(inode); d_instantiate(dentry, inode); + unlock_new_inode(inode); out_stop: brelse(dir_block); ext3_journal_stop(handle); diff --git a/trunk/fs/ext3/super.c b/trunk/fs/ext3/super.c index 4ac304c55c53..8c3a44b7c375 100644 --- a/trunk/fs/ext3/super.c +++ b/trunk/fs/ext3/super.c @@ -2526,11 +2526,6 @@ static int ext3_sync_fs(struct super_block *sb, int wait) tid_t target; trace_ext3_sync_fs(sb, wait); - /* - * Writeback quota in non-journalled quota case - journalled quota has - * no dirty dquots - */ - dquot_writeback_dquots(sb, -1); if (journal_start_commit(EXT3_SB(sb)->s_journal, &target)) { if (wait) log_wait_commit(EXT3_SB(sb)->s_journal, target); diff --git a/trunk/fs/ext4/dir.c b/trunk/fs/ext4/dir.c index 8e07d2a5a139..aa39e600d159 100644 --- a/trunk/fs/ext4/dir.c +++ b/trunk/fs/ext4/dir.c @@ -324,27 +324,74 @@ static inline loff_t ext4_get_htree_eof(struct file *filp) /* - * ext4_dir_llseek() calls generic_file_llseek_size to handle htree - * directories, where the "offset" is in terms of the filename hash - * value instead of the byte offset. + * ext4_dir_llseek() based on generic_file_llseek() to handle both + * non-htree and htree directories, where the "offset" is in terms + * of the filename hash value instead of the byte offset. * - * Because we may return a 64-bit hash that is well beyond offset limits, - * we need to pass the max hash as the maximum allowable offset in - * the htree directory case. - * - * For non-htree, ext4_llseek already chooses the proper max offset. + * NOTE: offsets obtained *before* ext4_set_inode_flag(dir, EXT4_INODE_INDEX) + * will be invalid once the directory was converted into a dx directory */ loff_t ext4_dir_llseek(struct file *file, loff_t offset, int origin) { struct inode *inode = file->f_mapping->host; + loff_t ret = -EINVAL; int dx_dir = is_dx_dir(inode); - loff_t htree_max = ext4_get_htree_eof(file); - if (likely(dx_dir)) - return generic_file_llseek_size(file, offset, origin, - htree_max, htree_max); - else - return ext4_llseek(file, offset, origin); + mutex_lock(&inode->i_mutex); + + /* NOTE: relative offsets with dx directories might not work + * as expected, as it is difficult to figure out the + * correct offset between dx hashes */ + + switch (origin) { + case SEEK_END: + if (unlikely(offset > 0)) + goto out_err; /* not supported for directories */ + + /* so only negative offsets are left, does that have a + * meaning for directories at all? */ + if (dx_dir) + offset += ext4_get_htree_eof(file); + else + offset += inode->i_size; + break; + case SEEK_CUR: + /* + * Here we special-case the lseek(fd, 0, SEEK_CUR) + * position-querying operation. Avoid rewriting the "same" + * f_pos value back to the file because a concurrent read(), + * write() or lseek() might have altered it + */ + if (offset == 0) { + offset = file->f_pos; + goto out_ok; + } + + offset += file->f_pos; + break; + } + + if (unlikely(offset < 0)) + goto out_err; + + if (!dx_dir) { + if (offset > inode->i_sb->s_maxbytes) + goto out_err; + } else if (offset > ext4_get_htree_eof(file)) + goto out_err; + + /* Special lock needed here? */ + if (offset != file->f_pos) { + file->f_pos = offset; + file->f_version = 0; + } + +out_ok: + ret = offset; +out_err: + mutex_unlock(&inode->i_mutex); + + return ret; } /* diff --git a/trunk/fs/ext4/file.c b/trunk/fs/ext4/file.c index 782eecb57e43..8c7642a00054 100644 --- a/trunk/fs/ext4/file.c +++ b/trunk/fs/ext4/file.c @@ -211,9 +211,9 @@ static int ext4_file_open(struct inode * inode, struct file * filp) } /* - * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values - * by calling generic_file_llseek_size() with the appropriate maxbytes - * value for each. + * ext4_llseek() copied from generic_file_llseek() to handle both + * block-mapped and extent-mapped maxbytes values. This should + * otherwise be identical with generic_file_llseek(). */ loff_t ext4_llseek(struct file *file, loff_t offset, int origin) { @@ -225,8 +225,7 @@ loff_t ext4_llseek(struct file *file, loff_t offset, int origin) else maxbytes = inode->i_sb->s_maxbytes; - return generic_file_llseek_size(file, offset, origin, - maxbytes, i_size_read(inode)); + return generic_file_llseek_size(file, offset, origin, maxbytes); } const struct file_operations ext4_file_operations = { diff --git a/trunk/fs/ext4/fsync.c b/trunk/fs/ext4/fsync.c index 2a1dcea4f12e..bb6c7d811313 100644 --- a/trunk/fs/ext4/fsync.c +++ b/trunk/fs/ext4/fsync.c @@ -135,7 +135,14 @@ static int ext4_sync_parent(struct inode *inode) inode = igrab(inode); while (ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) { ext4_clear_inode_state(inode, EXT4_STATE_NEWENTRY); - dentry = d_find_any_alias(inode); + dentry = NULL; + spin_lock(&inode->i_lock); + if (!list_empty(&inode->i_dentry)) { + dentry = list_first_entry(&inode->i_dentry, + struct dentry, d_alias); + dget(dentry); + } + spin_unlock(&inode->i_lock); if (!dentry) break; next = igrab(dentry->d_parent->d_inode); @@ -225,7 +232,7 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync) if (!journal) { ret = __sync_inode(inode, datasync); - if (!ret && !hlist_empty(&inode->i_dentry)) + if (!ret && !list_empty(&inode->i_dentry)) ret = ext4_sync_parent(inode); goto out; } diff --git a/trunk/fs/ext4/ioctl.c b/trunk/fs/ext4/ioctl.c index 7f7dad787603..e34deac3f366 100644 --- a/trunk/fs/ext4/ioctl.c +++ b/trunk/fs/ext4/ioctl.c @@ -268,6 +268,7 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) err = ext4_move_extents(filp, donor_filp, me.orig_start, me.donor_start, me.len, &me.moved_len); mnt_drop_write_file(filp); + mnt_drop_write(filp->f_path.mnt); if (copy_to_user((struct move_extent __user *)arg, &me, sizeof(me))) @@ -389,7 +390,7 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) if (err) return err; - err = mnt_want_write_file(filp); + err = mnt_want_write(filp->f_path.mnt); if (err) goto resizefs_out; @@ -401,7 +402,7 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) } if (err == 0) err = err2; - mnt_drop_write_file(filp); + mnt_drop_write(filp->f_path.mnt); resizefs_out: ext4_resize_end(sb); return err; diff --git a/trunk/fs/ext4/namei.c b/trunk/fs/ext4/namei.c index d0d3f0e87f99..5845cd97bf8b 100644 --- a/trunk/fs/ext4/namei.c +++ b/trunk/fs/ext4/namei.c @@ -1312,7 +1312,7 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct q return NULL; } -static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) +static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { struct inode *inode; struct ext4_dir_entry_2 *de; @@ -2072,8 +2072,8 @@ static int ext4_add_nondir(handle_t *handle, int err = ext4_add_entry(handle, dentry, inode); if (!err) { ext4_mark_inode_dirty(handle, inode); - unlock_new_inode(inode); d_instantiate(dentry, inode); + unlock_new_inode(inode); return 0; } drop_nlink(inode); @@ -2091,7 +2091,7 @@ static int ext4_add_nondir(handle_t *handle, * with d_instantiate(). */ static int ext4_create(struct inode *dir, struct dentry *dentry, umode_t mode, - bool excl) + struct nameidata *nd) { handle_t *handle; struct inode *inode; @@ -2249,8 +2249,8 @@ static int ext4_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) err = ext4_mark_inode_dirty(handle, dir); if (err) goto out_clear_inode; - unlock_new_inode(inode); d_instantiate(dentry, inode); + unlock_new_inode(inode); out_stop: brelse(dir_block); ext4_journal_stop(handle); diff --git a/trunk/fs/ext4/super.c b/trunk/fs/ext4/super.c index d8759401ecae..eb7aa3e4ef05 100644 --- a/trunk/fs/ext4/super.c +++ b/trunk/fs/ext4/super.c @@ -4325,11 +4325,6 @@ static int ext4_sync_fs(struct super_block *sb, int wait) trace_ext4_sync_fs(sb, wait); flush_workqueue(sbi->dio_unwritten_wq); - /* - * Writeback quota in non-journalled quota case - journalled quota has - * no dirty dquots - */ - dquot_writeback_dquots(sb, -1); if (jbd2_journal_start_commit(sbi->s_journal, &target)) { if (wait) jbd2_log_wait_commit(sbi->s_journal, target); diff --git a/trunk/fs/fat/namei_msdos.c b/trunk/fs/fat/namei_msdos.c index 70d993a93805..c5938c9084b9 100644 --- a/trunk/fs/fat/namei_msdos.c +++ b/trunk/fs/fat/namei_msdos.c @@ -201,7 +201,7 @@ static const struct dentry_operations msdos_dentry_operations = { /***** Get inode using directory and name */ static struct dentry *msdos_lookup(struct inode *dir, struct dentry *dentry, - unsigned int flags) + struct nameidata *nd) { struct super_block *sb = dir->i_sb; struct fat_slot_info sinfo; @@ -265,7 +265,7 @@ static int msdos_add_entry(struct inode *dir, const unsigned char *name, /***** Create a file */ static int msdos_create(struct inode *dir, struct dentry *dentry, umode_t mode, - bool excl) + struct nameidata *nd) { struct super_block *sb = dir->i_sb; struct inode *inode = NULL; diff --git a/trunk/fs/fat/namei_vfat.c b/trunk/fs/fat/namei_vfat.c index 6cc480652433..98ae804f5273 100644 --- a/trunk/fs/fat/namei_vfat.c +++ b/trunk/fs/fat/namei_vfat.c @@ -41,9 +41,9 @@ static int vfat_revalidate_shortname(struct dentry *dentry) return ret; } -static int vfat_revalidate(struct dentry *dentry, unsigned int flags) +static int vfat_revalidate(struct dentry *dentry, struct nameidata *nd) { - if (flags & LOOKUP_RCU) + if (nd && nd->flags & LOOKUP_RCU) return -ECHILD; /* This is not negative dentry. Always valid. */ @@ -52,9 +52,9 @@ static int vfat_revalidate(struct dentry *dentry, unsigned int flags) return vfat_revalidate_shortname(dentry); } -static int vfat_revalidate_ci(struct dentry *dentry, unsigned int flags) +static int vfat_revalidate_ci(struct dentry *dentry, struct nameidata *nd) { - if (flags & LOOKUP_RCU) + if (nd && nd->flags & LOOKUP_RCU) return -ECHILD; /* @@ -74,7 +74,7 @@ static int vfat_revalidate_ci(struct dentry *dentry, unsigned int flags) * This may be nfsd (or something), anyway, we can't see the * intent of this. So, since this can be for creation, drop it. */ - if (!flags) + if (!nd) return 0; /* @@ -82,7 +82,7 @@ static int vfat_revalidate_ci(struct dentry *dentry, unsigned int flags) * case sensitive name which is specified by user if this is * for creation. */ - if (flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET)) + if (nd->flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET)) return 0; return vfat_revalidate_shortname(dentry); @@ -714,7 +714,7 @@ static int vfat_d_anon_disconn(struct dentry *dentry) } static struct dentry *vfat_lookup(struct inode *dir, struct dentry *dentry, - unsigned int flags) + struct nameidata *nd) { struct super_block *sb = dir->i_sb; struct fat_slot_info sinfo; @@ -772,7 +772,7 @@ static struct dentry *vfat_lookup(struct inode *dir, struct dentry *dentry, } static int vfat_create(struct inode *dir, struct dentry *dentry, umode_t mode, - bool excl) + struct nameidata *nd) { struct super_block *sb = dir->i_sb; struct inode *inode; diff --git a/trunk/fs/fifo.c b/trunk/fs/fifo.c index cf6f4345ceb0..b1a524d798e7 100644 --- a/trunk/fs/fifo.c +++ b/trunk/fs/fifo.c @@ -14,7 +14,7 @@ #include #include -static int wait_for_partner(struct inode* inode, unsigned int *cnt) +static void wait_for_partner(struct inode* inode, unsigned int *cnt) { int cur = *cnt; @@ -23,7 +23,6 @@ static int wait_for_partner(struct inode* inode, unsigned int *cnt) if (signal_pending(current)) break; } - return cur == *cnt ? -ERESTARTSYS : 0; } static void wake_up_partner(struct inode* inode) @@ -68,7 +67,8 @@ static int fifo_open(struct inode *inode, struct file *filp) * seen a writer */ filp->f_version = pipe->w_counter; } else { - if (wait_for_partner(inode, &pipe->w_counter)) + wait_for_partner(inode, &pipe->w_counter); + if(signal_pending(current)) goto err_rd; } } @@ -90,7 +90,8 @@ static int fifo_open(struct inode *inode, struct file *filp) wake_up_partner(inode); if (!pipe->readers) { - if (wait_for_partner(inode, &pipe->r_counter)) + wait_for_partner(inode, &pipe->r_counter); + if (signal_pending(current)) goto err_wr; } break; diff --git a/trunk/fs/file_table.c b/trunk/fs/file_table.c index b3fc4d67a26b..a305d9e2d1b2 100644 --- a/trunk/fs/file_table.c +++ b/trunk/fs/file_table.c @@ -23,8 +23,6 @@ #include #include #include -#include -#include #include #include @@ -253,6 +251,7 @@ static void __fput(struct file *file) } fops_put(file->f_op); put_pid(file->f_owner.pid); + file_sb_list_del(file); if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) i_readcount_dec(inode); if (file->f_mode & FMODE_WRITE) @@ -264,77 +263,10 @@ static void __fput(struct file *file) mntput(mnt); } -static DEFINE_SPINLOCK(delayed_fput_lock); -static LIST_HEAD(delayed_fput_list); -static void delayed_fput(struct work_struct *unused) -{ - LIST_HEAD(head); - spin_lock_irq(&delayed_fput_lock); - list_splice_init(&delayed_fput_list, &head); - spin_unlock_irq(&delayed_fput_lock); - while (!list_empty(&head)) { - struct file *f = list_first_entry(&head, struct file, f_u.fu_list); - list_del_init(&f->f_u.fu_list); - __fput(f); - } -} - -static void ____fput(struct callback_head *work) -{ - __fput(container_of(work, struct file, f_u.fu_rcuhead)); -} - -/* - * If kernel thread really needs to have the final fput() it has done - * to complete, call this. The only user right now is the boot - we - * *do* need to make sure our writes to binaries on initramfs has - * not left us with opened struct file waiting for __fput() - execve() - * won't work without that. Please, don't add more callers without - * very good reasons; in particular, never call that with locks - * held and never call that from a thread that might need to do - * some work on any kind of umount. - */ -void flush_delayed_fput(void) -{ - delayed_fput(NULL); -} - -static DECLARE_WORK(delayed_fput_work, delayed_fput); - void fput(struct file *file) { - if (atomic_long_dec_and_test(&file->f_count)) { - struct task_struct *task = current; - file_sb_list_del(file); - if (unlikely(in_interrupt() || task->flags & PF_KTHREAD)) { - unsigned long flags; - spin_lock_irqsave(&delayed_fput_lock, flags); - list_add(&file->f_u.fu_list, &delayed_fput_list); - schedule_work(&delayed_fput_work); - spin_unlock_irqrestore(&delayed_fput_lock, flags); - return; - } - init_task_work(&file->f_u.fu_rcuhead, ____fput); - task_work_add(task, &file->f_u.fu_rcuhead, true); - } -} - -/* - * synchronous analog of fput(); for kernel threads that might be needed - * in some umount() (and thus can't use flush_delayed_fput() without - * risking deadlocks), need to wait for completion of __fput() and know - * for this specific struct file it won't involve anything that would - * need them. Use only if you really need it - at the very least, - * don't blindly convert fput() by kernel thread to that. - */ -void __fput_sync(struct file *file) -{ - if (atomic_long_dec_and_test(&file->f_count)) { - struct task_struct *task = current; - file_sb_list_del(file); - BUG_ON(!(task->flags & PF_KTHREAD)); + if (atomic_long_dec_and_test(&file->f_count)) __fput(file); - } } EXPORT_SYMBOL(fput); @@ -551,8 +483,10 @@ void mark_files_ro(struct super_block *sb) { struct file *f; +retry: lg_global_lock(&files_lglock); do_file_list_for_each_entry(sb, f) { + struct vfsmount *mnt; if (!S_ISREG(f->f_path.dentry->d_inode->i_mode)) continue; if (!file_count(f)) @@ -565,7 +499,12 @@ void mark_files_ro(struct super_block *sb) if (file_check_writeable(f) != 0) continue; file_release_write(f); - mnt_drop_write_file(f); + mnt = mntget(f->f_path.mnt); + /* This can sleep, so we can't hold the spinlock. */ + lg_global_unlock(&files_lglock); + mnt_drop_write(mnt); + mntput(mnt); + goto retry; } while_file_list_for_each_entry; lg_global_unlock(&files_lglock); } diff --git a/trunk/fs/freevxfs/vxfs_lookup.c b/trunk/fs/freevxfs/vxfs_lookup.c index bd447e88f208..3360f1e678ad 100644 --- a/trunk/fs/freevxfs/vxfs_lookup.c +++ b/trunk/fs/freevxfs/vxfs_lookup.c @@ -48,7 +48,7 @@ #define VXFS_BLOCK_PER_PAGE(sbp) ((PAGE_CACHE_SIZE / (sbp)->s_blocksize)) -static struct dentry * vxfs_lookup(struct inode *, struct dentry *, unsigned int); +static struct dentry * vxfs_lookup(struct inode *, struct dentry *, struct nameidata *); static int vxfs_readdir(struct file *, void *, filldir_t); const struct inode_operations vxfs_dir_inode_ops = { @@ -203,7 +203,7 @@ vxfs_inode_by_name(struct inode *dip, struct dentry *dp) * in the return pointer. */ static struct dentry * -vxfs_lookup(struct inode *dip, struct dentry *dp, unsigned int flags) +vxfs_lookup(struct inode *dip, struct dentry *dp, struct nameidata *nd) { struct inode *ip = NULL; ino_t ino; diff --git a/trunk/fs/fs-writeback.c b/trunk/fs/fs-writeback.c index 8f660dd6137a..41a3ccff18d8 100644 --- a/trunk/fs/fs-writeback.c +++ b/trunk/fs/fs-writeback.c @@ -1315,8 +1315,6 @@ void writeback_inodes_sb_nr(struct super_block *sb, .reason = reason, }; - if (sb->s_bdi == &noop_backing_dev_info) - return; WARN_ON(!rwsem_is_locked(&sb->s_umount)); bdi_queue_work(sb->s_bdi, &work); wait_for_completion(&done); @@ -1400,9 +1398,6 @@ void sync_inodes_sb(struct super_block *sb) .reason = WB_REASON_SYNC, }; - /* Nothing to do? */ - if (sb->s_bdi == &noop_backing_dev_info) - return; WARN_ON(!rwsem_is_locked(&sb->s_umount)); bdi_queue_work(sb->s_bdi, &work); diff --git a/trunk/fs/fs_struct.c b/trunk/fs/fs_struct.c index 5df4775fea03..e159e682ad4c 100644 --- a/trunk/fs/fs_struct.c +++ b/trunk/fs/fs_struct.c @@ -6,6 +6,18 @@ #include #include "internal.h" +static inline void path_get_longterm(struct path *path) +{ + path_get(path); + mnt_make_longterm(path->mnt); +} + +static inline void path_put_longterm(struct path *path) +{ + mnt_make_shortterm(path->mnt); + path_put(path); +} + /* * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values. * It can block. @@ -14,7 +26,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path) { struct path old_root; - path_get(path); + path_get_longterm(path); spin_lock(&fs->lock); write_seqcount_begin(&fs->seq); old_root = fs->root; @@ -22,7 +34,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path) write_seqcount_end(&fs->seq); spin_unlock(&fs->lock); if (old_root.dentry) - path_put(&old_root); + path_put_longterm(&old_root); } /* @@ -33,7 +45,7 @@ void set_fs_pwd(struct fs_struct *fs, struct path *path) { struct path old_pwd; - path_get(path); + path_get_longterm(path); spin_lock(&fs->lock); write_seqcount_begin(&fs->seq); old_pwd = fs->pwd; @@ -42,7 +54,7 @@ void set_fs_pwd(struct fs_struct *fs, struct path *path) spin_unlock(&fs->lock); if (old_pwd.dentry) - path_put(&old_pwd); + path_put_longterm(&old_pwd); } static inline int replace_path(struct path *p, const struct path *old, const struct path *new) @@ -72,7 +84,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root) write_seqcount_end(&fs->seq); while (hits--) { count++; - path_get(new_root); + path_get_longterm(new_root); } spin_unlock(&fs->lock); } @@ -80,13 +92,13 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root) } while_each_thread(g, p); read_unlock(&tasklist_lock); while (count--) - path_put(old_root); + path_put_longterm(old_root); } void free_fs_struct(struct fs_struct *fs) { - path_put(&fs->root); - path_put(&fs->pwd); + path_put_longterm(&fs->root); + path_put_longterm(&fs->pwd); kmem_cache_free(fs_cachep, fs); } @@ -120,9 +132,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old) spin_lock(&old->lock); fs->root = old->root; - path_get(&fs->root); + path_get_longterm(&fs->root); fs->pwd = old->pwd; - path_get(&fs->pwd); + path_get_longterm(&fs->pwd); spin_unlock(&old->lock); } return fs; diff --git a/trunk/fs/fuse/dir.c b/trunk/fs/fuse/dir.c index 8964cf3999b2..334e0b18a014 100644 --- a/trunk/fs/fuse/dir.c +++ b/trunk/fs/fuse/dir.c @@ -154,7 +154,7 @@ u64 fuse_get_attr_version(struct fuse_conn *fc) * the lookup once more. If the lookup results in the same inode, * then refresh the attributes, timeouts and mark the dentry valid. */ -static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags) +static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd) { struct inode *inode; @@ -174,7 +174,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags) if (!inode) return 0; - if (flags & LOOKUP_RCU) + if (nd && (nd->flags & LOOKUP_RCU)) return -ECHILD; fc = get_fuse_conn(inode); @@ -249,7 +249,7 @@ static struct dentry *fuse_d_add_directory(struct dentry *entry, /* This tries to shrink the subtree below alias */ fuse_invalidate_entry(alias); dput(alias); - if (!hlist_empty(&inode->i_dentry)) + if (!list_empty(&inode->i_dentry)) return ERR_PTR(-EBUSY); } else { dput(alias); @@ -316,7 +316,7 @@ int fuse_lookup_name(struct super_block *sb, u64 nodeid, struct qstr *name, } static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry, - unsigned int flags) + struct nameidata *nd) { int err; struct fuse_entry_out outarg; @@ -370,8 +370,7 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry, * 'mknod' + 'open' requests. */ static int fuse_create_open(struct inode *dir, struct dentry *entry, - struct file *file, unsigned flags, - umode_t mode, int *opened) + umode_t mode, struct nameidata *nd) { int err; struct inode *inode; @@ -382,11 +381,15 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, struct fuse_open_out outopen; struct fuse_entry_out outentry; struct fuse_file *ff; + struct file *file; + int flags = nd->intent.open.flags; + + if (fc->no_create) + return -ENOSYS; forget = fuse_alloc_forget(); - err = -ENOMEM; if (!forget) - goto out_err; + return -ENOMEM; req = fuse_get_req(fc); err = PTR_ERR(req); @@ -425,8 +428,11 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, req->out.args[1].value = &outopen; fuse_request_send(fc, req); err = req->out.h.error; - if (err) + if (err) { + if (err == -ENOSYS) + fc->no_create = 1; goto out_free_ff; + } err = -EIO; if (!S_ISREG(outentry.attr.mode) || invalid_nodeid(outentry.nodeid)) @@ -442,74 +448,28 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, flags &= ~(O_CREAT | O_EXCL | O_TRUNC); fuse_sync_release(ff, flags); fuse_queue_forget(fc, forget, outentry.nodeid, 1); - err = -ENOMEM; - goto out_err; + return -ENOMEM; } kfree(forget); d_instantiate(entry, inode); fuse_change_entry_timeout(entry, &outentry); fuse_invalidate_attr(dir); - err = finish_open(file, entry, generic_file_open, opened); - if (err) { + file = lookup_instantiate_filp(nd, entry, generic_file_open); + if (IS_ERR(file)) { fuse_sync_release(ff, flags); - } else { - file->private_data = fuse_file_get(ff); - fuse_finish_open(inode, file); + return PTR_ERR(file); } - return err; + file->private_data = fuse_file_get(ff); + fuse_finish_open(inode, file); + return 0; -out_free_ff: + out_free_ff: fuse_file_free(ff); -out_put_request: + out_put_request: fuse_put_request(fc, req); -out_put_forget_req: + out_put_forget_req: kfree(forget); -out_err: - return err; -} - -static int fuse_mknod(struct inode *, struct dentry *, umode_t, dev_t); -static int fuse_atomic_open(struct inode *dir, struct dentry *entry, - struct file *file, unsigned flags, - umode_t mode, int *opened) -{ - int err; - struct fuse_conn *fc = get_fuse_conn(dir); - struct dentry *res = NULL; - - if (d_unhashed(entry)) { - res = fuse_lookup(dir, entry, 0); - if (IS_ERR(res)) - return PTR_ERR(res); - - if (res) - entry = res; - } - - if (!(flags & O_CREAT) || entry->d_inode) - goto no_open; - - /* Only creates */ - *opened |= FILE_CREATED; - - if (fc->no_create) - goto mknod; - - err = fuse_create_open(dir, entry, file, flags, mode, opened); - if (err == -ENOSYS) { - fc->no_create = 1; - goto mknod; - } -out_dput: - dput(res); return err; - -mknod: - err = fuse_mknod(dir, entry, mode, 0); - if (err) - goto out_dput; -no_open: - return finish_no_open(file, res); } /* @@ -611,8 +571,14 @@ static int fuse_mknod(struct inode *dir, struct dentry *entry, umode_t mode, } static int fuse_create(struct inode *dir, struct dentry *entry, umode_t mode, - bool excl) + struct nameidata *nd) { + if (nd) { + int err = fuse_create_open(dir, entry, mode, nd); + if (err != -ENOSYS) + return err; + /* Fall back on mknod */ + } return fuse_mknod(dir, entry, mode, 0); } @@ -1680,7 +1646,6 @@ static const struct inode_operations fuse_dir_inode_operations = { .link = fuse_link, .setattr = fuse_setattr, .create = fuse_create, - .atomic_open = fuse_atomic_open, .mknod = fuse_mknod, .permission = fuse_permission, .getattr = fuse_getattr, diff --git a/trunk/fs/gfs2/dentry.c b/trunk/fs/gfs2/dentry.c index 4fddb3c22d25..0da8da2c991d 100644 --- a/trunk/fs/gfs2/dentry.c +++ b/trunk/fs/gfs2/dentry.c @@ -25,7 +25,7 @@ /** * gfs2_drevalidate - Check directory lookup consistency * @dentry: the mapping to check - * @flags: lookup flags + * @nd: * * Check to make sure the lookup necessary to arrive at this inode from its * parent is still good. @@ -33,7 +33,7 @@ * Returns: 1 if the dentry is ok, 0 if it isn't */ -static int gfs2_drevalidate(struct dentry *dentry, unsigned int flags) +static int gfs2_drevalidate(struct dentry *dentry, struct nameidata *nd) { struct dentry *parent; struct gfs2_sbd *sdp; @@ -44,7 +44,7 @@ static int gfs2_drevalidate(struct dentry *dentry, unsigned int flags) int error; int had_lock = 0; - if (flags & LOOKUP_RCU) + if (nd && nd->flags & LOOKUP_RCU) return -ECHILD; parent = dget_parent(dentry); diff --git a/trunk/fs/gfs2/inode.c b/trunk/fs/gfs2/inode.c index 867674785fcf..a9ba2444e077 100644 --- a/trunk/fs/gfs2/inode.c +++ b/trunk/fs/gfs2/inode.c @@ -755,8 +755,11 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, */ static int gfs2_create(struct inode *dir, struct dentry *dentry, - umode_t mode, bool excl) + umode_t mode, struct nameidata *nd) { + int excl = 0; + if (nd && (nd->flags & LOOKUP_EXCL)) + excl = 1; return gfs2_create_inode(dir, dentry, S_IFREG | mode, 0, NULL, 0, excl); } @@ -772,7 +775,7 @@ static int gfs2_create(struct inode *dir, struct dentry *dentry, */ static struct dentry *gfs2_lookup(struct inode *dir, struct dentry *dentry, - unsigned int flags) + struct nameidata *nd) { struct inode *inode = gfs2_lookupi(dir, &dentry->d_name, 0); if (inode && !IS_ERR(inode)) { diff --git a/trunk/fs/gfs2/ops_fstype.c b/trunk/fs/gfs2/ops_fstype.c index 6c906078f657..b8c250fc4922 100644 --- a/trunk/fs/gfs2/ops_fstype.c +++ b/trunk/fs/gfs2/ops_fstype.c @@ -1286,7 +1286,7 @@ static struct dentry *gfs2_mount(struct file_system_type *fs_type, int flags, error = -EBUSY; goto error_bdev; } - s = sget(fs_type, test_gfs2_super, set_gfs2_super, flags, bdev); + s = sget(fs_type, test_gfs2_super, set_gfs2_super, bdev); mutex_unlock(&bdev->bd_fsfreeze_mutex); error = PTR_ERR(s); if (IS_ERR(s)) @@ -1316,6 +1316,7 @@ static struct dentry *gfs2_mount(struct file_system_type *fs_type, int flags, } else { char b[BDEVNAME_SIZE]; + s->s_flags = flags; s->s_mode = mode; strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id)); sb_set_blocksize(s, block_size(bdev)); @@ -1359,7 +1360,7 @@ static struct dentry *gfs2_mount_meta(struct file_system_type *fs_type, dev_name, error); return ERR_PTR(error); } - s = sget(&gfs2_fs_type, test_gfs2_super, set_meta_super, flags, + s = sget(&gfs2_fs_type, test_gfs2_super, set_meta_super, path.dentry->d_inode->i_sb->s_bdev); path_put(&path); if (IS_ERR(s)) { diff --git a/trunk/fs/gfs2/quota.c b/trunk/fs/gfs2/quota.c index 27b5cc7d6881..b97178e7d397 100644 --- a/trunk/fs/gfs2/quota.c +++ b/trunk/fs/gfs2/quota.c @@ -1108,7 +1108,7 @@ void gfs2_quota_change(struct gfs2_inode *ip, s64 change, } } -int gfs2_quota_sync(struct super_block *sb, int type) +int gfs2_quota_sync(struct super_block *sb, int type, int wait) { struct gfs2_sbd *sdp = sb->s_fs_info; struct gfs2_quota_data **qda; @@ -1154,7 +1154,7 @@ int gfs2_quota_sync(struct super_block *sb, int type) static int gfs2_quota_sync_timeo(struct super_block *sb, int type) { - return gfs2_quota_sync(sb, type); + return gfs2_quota_sync(sb, type, 0); } int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id) diff --git a/trunk/fs/gfs2/quota.h b/trunk/fs/gfs2/quota.h index f25d98b87904..90bf1c302a98 100644 --- a/trunk/fs/gfs2/quota.h +++ b/trunk/fs/gfs2/quota.h @@ -26,7 +26,7 @@ extern int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid); extern void gfs2_quota_change(struct gfs2_inode *ip, s64 change, u32 uid, u32 gid); -extern int gfs2_quota_sync(struct super_block *sb, int type); +extern int gfs2_quota_sync(struct super_block *sb, int type, int wait); extern int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id); extern int gfs2_quota_init(struct gfs2_sbd *sdp); diff --git a/trunk/fs/gfs2/super.c b/trunk/fs/gfs2/super.c index f3d6bbfb32c5..713e621c240b 100644 --- a/trunk/fs/gfs2/super.c +++ b/trunk/fs/gfs2/super.c @@ -838,7 +838,7 @@ static int gfs2_make_fs_ro(struct gfs2_sbd *sdp) int error; flush_workqueue(gfs2_delete_workqueue); - gfs2_quota_sync(sdp->sd_vfs, 0); + gfs2_quota_sync(sdp->sd_vfs, 0, 1); gfs2_statfs_sync(sdp->sd_vfs, 0); error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, GL_NOCACHE, @@ -952,8 +952,6 @@ static void gfs2_put_super(struct super_block *sb) static int gfs2_sync_fs(struct super_block *sb, int wait) { struct gfs2_sbd *sdp = sb->s_fs_info; - - gfs2_quota_sync(sb, -1); if (wait && sdp) gfs2_log_flush(sdp, NULL); return 0; diff --git a/trunk/fs/gfs2/sys.c b/trunk/fs/gfs2/sys.c index 73ecc34c4342..9c2592b1d5ff 100644 --- a/trunk/fs/gfs2/sys.c +++ b/trunk/fs/gfs2/sys.c @@ -168,7 +168,7 @@ static ssize_t quota_sync_store(struct gfs2_sbd *sdp, const char *buf, if (simple_strtol(buf, NULL, 0) != 1) return -EINVAL; - gfs2_quota_sync(sdp->sd_vfs, 0); + gfs2_quota_sync(sdp->sd_vfs, 0, 1); return len; } diff --git a/trunk/fs/hfs/dir.c b/trunk/fs/hfs/dir.c index 422dde2ec0a1..62fc14ea4b73 100644 --- a/trunk/fs/hfs/dir.c +++ b/trunk/fs/hfs/dir.c @@ -18,7 +18,7 @@ * hfs_lookup() */ static struct dentry *hfs_lookup(struct inode *dir, struct dentry *dentry, - unsigned int flags) + struct nameidata *nd) { hfs_cat_rec rec; struct hfs_find_data fd; @@ -187,7 +187,7 @@ static int hfs_dir_release(struct inode *inode, struct file *file) * the directory and the name (and its length) of the new file. */ static int hfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, - bool excl) + struct nameidata *nd) { struct inode *inode; int res; diff --git a/trunk/fs/hfs/extent.c b/trunk/fs/hfs/extent.c index a67955a0c36f..2c16316d2917 100644 --- a/trunk/fs/hfs/extent.c +++ b/trunk/fs/hfs/extent.c @@ -432,7 +432,7 @@ int hfs_extend_file(struct inode *inode) if (inode->i_ino < HFS_FIRSTUSER_CNID) set_bit(HFS_FLG_ALT_MDB_DIRTY, &HFS_SB(sb)->flags); set_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags); - hfs_mark_mdb_dirty(sb); + sb->s_dirt = 1; } return res; diff --git a/trunk/fs/hfs/hfs_fs.h b/trunk/fs/hfs/hfs_fs.h index 8275175acf6e..1bf967c6bfdc 100644 --- a/trunk/fs/hfs/hfs_fs.h +++ b/trunk/fs/hfs/hfs_fs.h @@ -14,7 +14,6 @@ #include #include #include -#include #include #include @@ -138,15 +137,16 @@ struct hfs_sb_info { gid_t s_gid; /* The gid of all files */ int session, part; + struct nls_table *nls_io, *nls_disk; + struct mutex bitmap_lock; + unsigned long flags; + u16 blockoffset; + int fs_div; - struct super_block *sb; - int work_queued; /* non-zero delayed work is queued */ - struct delayed_work mdb_work; /* MDB flush delayed work */ - spinlock_t work_lock; /* protects mdb_work and work_queued */ }; #define HFS_FLG_BITMAP_DIRTY 0 @@ -226,9 +226,6 @@ extern int hfs_compare_dentry(const struct dentry *parent, extern void hfs_asc2mac(struct super_block *, struct hfs_name *, struct qstr *); extern int hfs_mac2asc(struct super_block *, char *, const struct hfs_name *); -/* super.c */ -extern void hfs_mark_mdb_dirty(struct super_block *sb); - extern struct timezone sys_tz; /* @@ -256,7 +253,7 @@ static inline const char *hfs_mdb_name(struct super_block *sb) static inline void hfs_bitmap_dirty(struct super_block *sb) { set_bit(HFS_FLG_BITMAP_DIRTY, &HFS_SB(sb)->flags); - hfs_mark_mdb_dirty(sb); + sb->s_dirt = 1; } #define sb_bread512(sb, sec, data) ({ \ diff --git a/trunk/fs/hfs/inode.c b/trunk/fs/hfs/inode.c index ee1bc55677f1..761ec06354b4 100644 --- a/trunk/fs/hfs/inode.c +++ b/trunk/fs/hfs/inode.c @@ -220,7 +220,7 @@ struct inode *hfs_new_inode(struct inode *dir, struct qstr *name, umode_t mode) insert_inode_hash(inode); mark_inode_dirty(inode); set_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags); - hfs_mark_mdb_dirty(sb); + sb->s_dirt = 1; return inode; } @@ -235,7 +235,7 @@ void hfs_delete_inode(struct inode *inode) if (HFS_I(inode)->cat_key.ParID == cpu_to_be32(HFS_ROOT_CNID)) HFS_SB(sb)->root_dirs--; set_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags); - hfs_mark_mdb_dirty(sb); + sb->s_dirt = 1; return; } HFS_SB(sb)->file_count--; @@ -248,7 +248,7 @@ void hfs_delete_inode(struct inode *inode) } } set_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags); - hfs_mark_mdb_dirty(sb); + sb->s_dirt = 1; } void hfs_inode_read_fork(struct inode *inode, struct hfs_extent *ext, @@ -489,7 +489,7 @@ int hfs_write_inode(struct inode *inode, struct writeback_control *wbc) } static struct dentry *hfs_file_lookup(struct inode *dir, struct dentry *dentry, - unsigned int flags) + struct nameidata *nd) { struct inode *inode = NULL; hfs_cat_rec rec; @@ -644,7 +644,13 @@ static int hfs_file_fsync(struct file *filp, loff_t start, loff_t end, /* sync the superblock to buffers */ sb = inode->i_sb; - flush_delayed_work_sync(&HFS_SB(sb)->mdb_work); + if (sb->s_dirt) { + lock_super(sb); + sb->s_dirt = 0; + if (!(sb->s_flags & MS_RDONLY)) + hfs_mdb_commit(sb); + unlock_super(sb); + } /* .. finally sync the buffers to disk */ err = sync_blockdev(sb->s_bdev); if (!ret) diff --git a/trunk/fs/hfs/mdb.c b/trunk/fs/hfs/mdb.c index 5fd51a5833ff..1563d5ce5764 100644 --- a/trunk/fs/hfs/mdb.c +++ b/trunk/fs/hfs/mdb.c @@ -260,10 +260,6 @@ void hfs_mdb_commit(struct super_block *sb) { struct hfs_mdb *mdb = HFS_SB(sb)->mdb; - if (sb->s_flags & MS_RDONLY) - return; - - lock_buffer(HFS_SB(sb)->mdb_bh); if (test_and_clear_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags)) { /* These parameters may have been modified, so write them back */ mdb->drLsMod = hfs_mtime(); @@ -287,13 +283,9 @@ void hfs_mdb_commit(struct super_block *sb) &mdb->drXTFlSize, NULL); hfs_inode_write_fork(HFS_SB(sb)->cat_tree->inode, mdb->drCTExtRec, &mdb->drCTFlSize, NULL); - - lock_buffer(HFS_SB(sb)->alt_mdb_bh); memcpy(HFS_SB(sb)->alt_mdb, HFS_SB(sb)->mdb, HFS_SECTOR_SIZE); HFS_SB(sb)->alt_mdb->drAtrb |= cpu_to_be16(HFS_SB_ATTRIB_UNMNT); HFS_SB(sb)->alt_mdb->drAtrb &= cpu_to_be16(~HFS_SB_ATTRIB_INCNSTNT); - unlock_buffer(HFS_SB(sb)->alt_mdb_bh); - mark_buffer_dirty(HFS_SB(sb)->alt_mdb_bh); sync_dirty_buffer(HFS_SB(sb)->alt_mdb_bh); } @@ -316,11 +308,7 @@ void hfs_mdb_commit(struct super_block *sb) break; } len = min((int)sb->s_blocksize - off, size); - - lock_buffer(bh); memcpy(bh->b_data + off, ptr, len); - unlock_buffer(bh); - mark_buffer_dirty(bh); brelse(bh); block++; @@ -329,7 +317,6 @@ void hfs_mdb_commit(struct super_block *sb) size -= len; } } - unlock_buffer(HFS_SB(sb)->mdb_bh); } void hfs_mdb_close(struct super_block *sb) diff --git a/trunk/fs/hfs/super.c b/trunk/fs/hfs/super.c index 4eb873e0c07b..7b4c537d6e13 100644 --- a/trunk/fs/hfs/super.c +++ b/trunk/fs/hfs/super.c @@ -29,9 +29,43 @@ static struct kmem_cache *hfs_inode_cachep; MODULE_LICENSE("GPL"); +/* + * hfs_write_super() + * + * Description: + * This function is called by the VFS only. When the filesystem + * is mounted r/w it updates the MDB on disk. + * Input Variable(s): + * struct super_block *sb: Pointer to the hfs superblock + * Output Variable(s): + * NONE + * Returns: + * void + * Preconditions: + * 'sb' points to a "valid" (struct super_block). + * Postconditions: + * The MDB is marked 'unsuccessfully unmounted' by clearing bit 8 of drAtrb + * (hfs_put_super() must set this flag!). Some MDB fields are updated + * and the MDB buffer is written to disk by calling hfs_mdb_commit(). + */ +static void hfs_write_super(struct super_block *sb) +{ + lock_super(sb); + sb->s_dirt = 0; + + /* sync everything to the buffers */ + if (!(sb->s_flags & MS_RDONLY)) + hfs_mdb_commit(sb); + unlock_super(sb); +} + static int hfs_sync_fs(struct super_block *sb, int wait) { + lock_super(sb); hfs_mdb_commit(sb); + sb->s_dirt = 0; + unlock_super(sb); + return 0; } @@ -44,44 +78,13 @@ static int hfs_sync_fs(struct super_block *sb, int wait) */ static void hfs_put_super(struct super_block *sb) { - cancel_delayed_work_sync(&HFS_SB(sb)->mdb_work); + if (sb->s_dirt) + hfs_write_super(sb); hfs_mdb_close(sb); /* release the MDB's resources */ hfs_mdb_put(sb); } -static void flush_mdb(struct work_struct *work) -{ - struct hfs_sb_info *sbi; - struct super_block *sb; - - sbi = container_of(work, struct hfs_sb_info, mdb_work.work); - sb = sbi->sb; - - spin_lock(&sbi->work_lock); - sbi->work_queued = 0; - spin_unlock(&sbi->work_lock); - - hfs_mdb_commit(sb); -} - -void hfs_mark_mdb_dirty(struct super_block *sb) -{ - struct hfs_sb_info *sbi = HFS_SB(sb); - unsigned long delay; - - if (sb->s_flags & MS_RDONLY) - return; - - spin_lock(&sbi->work_lock); - if (!sbi->work_queued) { - delay = msecs_to_jiffies(dirty_writeback_interval * 10); - queue_delayed_work(system_long_wq, &sbi->mdb_work, delay); - sbi->work_queued = 1; - } - spin_unlock(&sbi->work_lock); -} - /* * hfs_statfs() * @@ -181,6 +184,7 @@ static const struct super_operations hfs_super_operations = { .write_inode = hfs_write_inode, .evict_inode = hfs_evict_inode, .put_super = hfs_put_super, + .write_super = hfs_write_super, .sync_fs = hfs_sync_fs, .statfs = hfs_statfs, .remount_fs = hfs_remount, @@ -383,10 +387,7 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent) if (!sbi) return -ENOMEM; - sbi->sb = sb; sb->s_fs_info = sbi; - spin_lock_init(&sbi->work_lock); - INIT_DELAYED_WORK(&sbi->mdb_work, flush_mdb); res = -EINVAL; if (!parse_options((char *)data, sbi)) { diff --git a/trunk/fs/hfs/sysdep.c b/trunk/fs/hfs/sysdep.c index 91b91fd3a901..19cf291eb91f 100644 --- a/trunk/fs/hfs/sysdep.c +++ b/trunk/fs/hfs/sysdep.c @@ -13,12 +13,12 @@ /* dentry case-handling: just lowercase everything */ -static int hfs_revalidate_dentry(struct dentry *dentry, unsigned int flags) +static int hfs_revalidate_dentry(struct dentry *dentry, struct nameidata *nd) { struct inode *inode; int diff; - if (flags & LOOKUP_RCU) + if (nd->flags & LOOKUP_RCU) return -ECHILD; inode = dentry->d_inode; diff --git a/trunk/fs/hfsplus/bitmap.c b/trunk/fs/hfsplus/bitmap.c index 4cfbe2edd296..1cad80c789cb 100644 --- a/trunk/fs/hfsplus/bitmap.c +++ b/trunk/fs/hfsplus/bitmap.c @@ -153,7 +153,7 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size, kunmap(page); *max = offset + (curr - pptr) * 32 + i - start; sbi->free_blocks -= *max; - hfsplus_mark_mdb_dirty(sb); + sb->s_dirt = 1; dprint(DBG_BITMAP, "-> %u,%u\n", start, *max); out: mutex_unlock(&sbi->alloc_mutex); @@ -228,7 +228,7 @@ int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count) set_page_dirty(page); kunmap(page); sbi->free_blocks += len; - hfsplus_mark_mdb_dirty(sb); + sb->s_dirt = 1; mutex_unlock(&sbi->alloc_mutex); return 0; diff --git a/trunk/fs/hfsplus/dir.c b/trunk/fs/hfsplus/dir.c index 6b9f921ef2fa..26b53fb09f68 100644 --- a/trunk/fs/hfsplus/dir.c +++ b/trunk/fs/hfsplus/dir.c @@ -25,7 +25,7 @@ static inline void hfsplus_instantiate(struct dentry *dentry, /* Find the entry inside dir named dentry->d_name */ static struct dentry *hfsplus_lookup(struct inode *dir, struct dentry *dentry, - unsigned int flags) + struct nameidata *nd) { struct inode *inode = NULL; struct hfs_find_data fd; @@ -316,7 +316,7 @@ static int hfsplus_link(struct dentry *src_dentry, struct inode *dst_dir, inode->i_ctime = CURRENT_TIME_SEC; mark_inode_dirty(inode); sbi->file_count++; - hfsplus_mark_mdb_dirty(dst_dir->i_sb); + dst_dir->i_sb->s_dirt = 1; out: mutex_unlock(&sbi->vh_mutex); return res; @@ -465,7 +465,7 @@ static int hfsplus_mknod(struct inode *dir, struct dentry *dentry, } static int hfsplus_create(struct inode *dir, struct dentry *dentry, umode_t mode, - bool excl) + struct nameidata *nd) { return hfsplus_mknod(dir, dentry, mode, 0); } diff --git a/trunk/fs/hfsplus/hfsplus_fs.h b/trunk/fs/hfsplus/hfsplus_fs.h index 558dbb463a4e..4e75ac646fea 100644 --- a/trunk/fs/hfsplus/hfsplus_fs.h +++ b/trunk/fs/hfsplus/hfsplus_fs.h @@ -153,11 +153,8 @@ struct hfsplus_sb_info { gid_t gid; int part, session; - unsigned long flags; - int work_queued; /* non-zero delayed work is queued */ - struct delayed_work sync_work; /* FS sync delayed work */ - spinlock_t work_lock; /* protects sync_work and work_queued */ + unsigned long flags; }; #define HFSPLUS_SB_WRITEBACKUP 0 @@ -431,7 +428,7 @@ int hfsplus_show_options(struct seq_file *, struct dentry *); /* super.c */ struct inode *hfsplus_iget(struct super_block *, unsigned long); -void hfsplus_mark_mdb_dirty(struct super_block *sb); +int hfsplus_sync_fs(struct super_block *sb, int wait); /* tables.c */ extern u16 hfsplus_case_fold_table[]; diff --git a/trunk/fs/hfsplus/inode.c b/trunk/fs/hfsplus/inode.c index 3d8b4a675ba0..82b69ee4dacc 100644 --- a/trunk/fs/hfsplus/inode.c +++ b/trunk/fs/hfsplus/inode.c @@ -168,7 +168,7 @@ const struct dentry_operations hfsplus_dentry_operations = { }; static struct dentry *hfsplus_file_lookup(struct inode *dir, - struct dentry *dentry, unsigned int flags) + struct dentry *dentry, struct nameidata *nd) { struct hfs_find_data fd; struct super_block *sb = dir->i_sb; @@ -431,7 +431,7 @@ struct inode *hfsplus_new_inode(struct super_block *sb, umode_t mode) sbi->file_count++; insert_inode_hash(inode); mark_inode_dirty(inode); - hfsplus_mark_mdb_dirty(sb); + sb->s_dirt = 1; return inode; } @@ -442,7 +442,7 @@ void hfsplus_delete_inode(struct inode *inode) if (S_ISDIR(inode->i_mode)) { HFSPLUS_SB(sb)->folder_count--; - hfsplus_mark_mdb_dirty(sb); + sb->s_dirt = 1; return; } HFSPLUS_SB(sb)->file_count--; @@ -455,7 +455,7 @@ void hfsplus_delete_inode(struct inode *inode) inode->i_size = 0; hfsplus_file_truncate(inode); } - hfsplus_mark_mdb_dirty(sb); + sb->s_dirt = 1; } void hfsplus_inode_read_fork(struct inode *inode, struct hfsplus_fork_raw *fork) diff --git a/trunk/fs/hfsplus/super.c b/trunk/fs/hfsplus/super.c index 473332098013..a9bca4b8768b 100644 --- a/trunk/fs/hfsplus/super.c +++ b/trunk/fs/hfsplus/super.c @@ -124,7 +124,7 @@ static int hfsplus_system_write_inode(struct inode *inode) if (fork->total_size != cpu_to_be64(inode->i_size)) { set_bit(HFSPLUS_SB_WRITEBACKUP, &sbi->flags); - hfsplus_mark_mdb_dirty(inode->i_sb); + inode->i_sb->s_dirt = 1; } hfsplus_inode_write_fork(inode, fork); if (tree) @@ -161,7 +161,7 @@ static void hfsplus_evict_inode(struct inode *inode) } } -static int hfsplus_sync_fs(struct super_block *sb, int wait) +int hfsplus_sync_fs(struct super_block *sb, int wait) { struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); struct hfsplus_vh *vhdr = sbi->s_vhdr; @@ -171,7 +171,9 @@ static int hfsplus_sync_fs(struct super_block *sb, int wait) if (!wait) return 0; - dprint(DBG_SUPER, "hfsplus_sync_fs\n"); + dprint(DBG_SUPER, "hfsplus_write_super\n"); + + sb->s_dirt = 0; /* * Explicitly write out the special metadata inodes. @@ -224,34 +226,12 @@ static int hfsplus_sync_fs(struct super_block *sb, int wait) return error; } -static void delayed_sync_fs(struct work_struct *work) -{ - struct hfsplus_sb_info *sbi; - - sbi = container_of(work, struct hfsplus_sb_info, sync_work.work); - - spin_lock(&sbi->work_lock); - sbi->work_queued = 0; - spin_unlock(&sbi->work_lock); - - hfsplus_sync_fs(sbi->alloc_file->i_sb, 1); -} - -void hfsplus_mark_mdb_dirty(struct super_block *sb) +static void hfsplus_write_super(struct super_block *sb) { - struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); - unsigned long delay; - - if (sb->s_flags & MS_RDONLY) - return; - - spin_lock(&sbi->work_lock); - if (!sbi->work_queued) { - delay = msecs_to_jiffies(dirty_writeback_interval * 10); - queue_delayed_work(system_long_wq, &sbi->sync_work, delay); - sbi->work_queued = 1; - } - spin_unlock(&sbi->work_lock); + if (!(sb->s_flags & MS_RDONLY)) + hfsplus_sync_fs(sb, 1); + else + sb->s_dirt = 0; } static void hfsplus_put_super(struct super_block *sb) @@ -260,7 +240,8 @@ static void hfsplus_put_super(struct super_block *sb) dprint(DBG_SUPER, "hfsplus_put_super\n"); - cancel_delayed_work_sync(&sbi->sync_work); + if (!sb->s_fs_info) + return; if (!(sb->s_flags & MS_RDONLY) && sbi->s_vhdr) { struct hfsplus_vh *vhdr = sbi->s_vhdr; @@ -347,6 +328,7 @@ static const struct super_operations hfsplus_sops = { .write_inode = hfsplus_write_inode, .evict_inode = hfsplus_evict_inode, .put_super = hfsplus_put_super, + .write_super = hfsplus_write_super, .sync_fs = hfsplus_sync_fs, .statfs = hfsplus_statfs, .remount_fs = hfsplus_remount, @@ -373,8 +355,6 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) sb->s_fs_info = sbi; mutex_init(&sbi->alloc_mutex); mutex_init(&sbi->vh_mutex); - spin_lock_init(&sbi->work_lock); - INIT_DELAYED_WORK(&sbi->sync_work, delayed_sync_fs); hfsplus_fill_defaults(sbi); err = -EINVAL; diff --git a/trunk/fs/hostfs/hostfs_kern.c b/trunk/fs/hostfs/hostfs_kern.c index 124146543aa7..2afa5bbccf9b 100644 --- a/trunk/fs/hostfs/hostfs_kern.c +++ b/trunk/fs/hostfs/hostfs_kern.c @@ -553,7 +553,7 @@ static int read_name(struct inode *ino, char *name) } int hostfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, - bool excl) + struct nameidata *nd) { struct inode *inode; char *name; @@ -595,7 +595,7 @@ int hostfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, } struct dentry *hostfs_lookup(struct inode *ino, struct dentry *dentry, - unsigned int flags) + struct nameidata *nd) { struct inode *inode; char *name; diff --git a/trunk/fs/hpfs/dir.c b/trunk/fs/hpfs/dir.c index 78e12b2e0ea2..b8472f803f4e 100644 --- a/trunk/fs/hpfs/dir.c +++ b/trunk/fs/hpfs/dir.c @@ -189,7 +189,7 @@ static int hpfs_readdir(struct file *filp, void *dirent, filldir_t filldir) * to tell read_inode to read fnode or not. */ -struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) +struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { const unsigned char *name = dentry->d_name.name; unsigned len = dentry->d_name.len; diff --git a/trunk/fs/hpfs/hpfs_fn.h b/trunk/fs/hpfs/hpfs_fn.h index ac1ead194db5..c07ef1f1ced6 100644 --- a/trunk/fs/hpfs/hpfs_fn.h +++ b/trunk/fs/hpfs/hpfs_fn.h @@ -220,7 +220,7 @@ extern const struct dentry_operations hpfs_dentry_operations; /* dir.c */ -struct dentry *hpfs_lookup(struct inode *, struct dentry *, unsigned int); +struct dentry *hpfs_lookup(struct inode *, struct dentry *, struct nameidata *); extern const struct file_operations hpfs_dir_ops; /* dnode.c */ diff --git a/trunk/fs/hpfs/namei.c b/trunk/fs/hpfs/namei.c index bc9082482f68..9083ef8af58c 100644 --- a/trunk/fs/hpfs/namei.c +++ b/trunk/fs/hpfs/namei.c @@ -115,7 +115,7 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) return err; } -static int hpfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) +static int hpfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, struct nameidata *nd) { const unsigned char *name = dentry->d_name.name; unsigned len = dentry->d_name.len; diff --git a/trunk/fs/hppfs/hppfs.c b/trunk/fs/hppfs/hppfs.c index c1dffe47fde2..d4f93b52cec5 100644 --- a/trunk/fs/hppfs/hppfs.c +++ b/trunk/fs/hppfs/hppfs.c @@ -138,7 +138,7 @@ static int file_removed(struct dentry *dentry, const char *file) } static struct dentry *hppfs_lookup(struct inode *ino, struct dentry *dentry, - unsigned int flags) + struct nameidata *nd) { struct dentry *proc_dentry, *parent; struct qstr *name = &dentry->d_name; @@ -420,7 +420,8 @@ static int hppfs_open(struct inode *inode, struct file *file) { const struct cred *cred = file->f_cred; struct hppfs_private *data; - struct path path; + struct vfsmount *proc_mnt; + struct dentry *proc_dentry; char *host_file; int err, fd, type, filter; @@ -433,11 +434,12 @@ static int hppfs_open(struct inode *inode, struct file *file) if (host_file == NULL) goto out_free2; - path.mnt = inode->i_sb->s_fs_info; - path.dentry = HPPFS_I(inode)->proc_dentry; + proc_dentry = HPPFS_I(inode)->proc_dentry; + proc_mnt = inode->i_sb->s_fs_info; /* XXX This isn't closed anywhere */ - data->proc_file = dentry_open(&path, file_mode(file->f_mode), cred); + data->proc_file = dentry_open(dget(proc_dentry), mntget(proc_mnt), + file_mode(file->f_mode), cred); err = PTR_ERR(data->proc_file); if (IS_ERR(data->proc_file)) goto out_free1; @@ -482,7 +484,8 @@ static int hppfs_dir_open(struct inode *inode, struct file *file) { const struct cred *cred = file->f_cred; struct hppfs_private *data; - struct path path; + struct vfsmount *proc_mnt; + struct dentry *proc_dentry; int err; err = -ENOMEM; @@ -490,9 +493,10 @@ static int hppfs_dir_open(struct inode *inode, struct file *file) if (data == NULL) goto out; - path.mnt = inode->i_sb->s_fs_info; - path.dentry = HPPFS_I(inode)->proc_dentry; - data->proc_file = dentry_open(&path, file_mode(file->f_mode), cred); + proc_dentry = HPPFS_I(inode)->proc_dentry; + proc_mnt = inode->i_sb->s_fs_info; + data->proc_file = dentry_open(dget(proc_dentry), mntget(proc_mnt), + file_mode(file->f_mode), cred); err = PTR_ERR(data->proc_file); if (IS_ERR(data->proc_file)) goto out_free; diff --git a/trunk/fs/hugetlbfs/inode.c b/trunk/fs/hugetlbfs/inode.c index e13e9bdb0bf5..cc9281b6c628 100644 --- a/trunk/fs/hugetlbfs/inode.c +++ b/trunk/fs/hugetlbfs/inode.c @@ -565,7 +565,7 @@ static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mod return retval; } -static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) +static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, struct nameidata *nd) { return hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0); } diff --git a/trunk/fs/inode.c b/trunk/fs/inode.c index 775cbabd4fa5..c99163b1b310 100644 --- a/trunk/fs/inode.c +++ b/trunk/fs/inode.c @@ -182,7 +182,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode) } inode->i_private = NULL; inode->i_mapping = mapping; - INIT_HLIST_HEAD(&inode->i_dentry); /* buggered by rcu freeing */ + INIT_LIST_HEAD(&inode->i_dentry); /* buggered by rcu freeing */ #ifdef CONFIG_FS_POSIX_ACL inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED; #endif diff --git a/trunk/fs/internal.h b/trunk/fs/internal.h index a6fd56c68b11..18bc216ea09d 100644 --- a/trunk/fs/internal.h +++ b/trunk/fs/internal.h @@ -41,11 +41,6 @@ static inline int __sync_blockdev(struct block_device *bdev, int wait) */ extern void __init chrdev_init(void); -/* - * namei.c - */ -extern int __inode_permission(struct inode *, int); - /* * namespace.c */ @@ -55,6 +50,8 @@ extern int copy_mount_string(const void __user *, char **); extern struct vfsmount *lookup_mnt(struct path *); extern int finish_automount(struct vfsmount *, struct path *); +extern void mnt_make_longterm(struct vfsmount *); +extern void mnt_make_shortterm(struct vfsmount *); extern int sb_prepare_remount_readonly(struct super_block *); extern void __init mnt_init(void); @@ -87,6 +84,9 @@ extern struct super_block *user_get_super(dev_t); /* * open.c */ +struct nameidata; +extern struct file *nameidata_to_filp(struct nameidata *); +extern void release_open_intent(struct nameidata *); struct open_flags { int open_flag; umode_t mode; diff --git a/trunk/fs/isofs/isofs.h b/trunk/fs/isofs/isofs.h index 3620ad1ea9bc..0e73f63d9274 100644 --- a/trunk/fs/isofs/isofs.h +++ b/trunk/fs/isofs/isofs.h @@ -114,7 +114,7 @@ extern int isofs_name_translate(struct iso_directory_record *, char *, struct in int get_joliet_filename(struct iso_directory_record *, unsigned char *, struct inode *); int get_acorn_filename(struct iso_directory_record *, char *, struct inode *); -extern struct dentry *isofs_lookup(struct inode *, struct dentry *, unsigned int flags); +extern struct dentry *isofs_lookup(struct inode *, struct dentry *, struct nameidata *); extern struct buffer_head *isofs_bread(struct inode *, sector_t); extern int isofs_get_blocks(struct inode *, sector_t, struct buffer_head **, unsigned long); diff --git a/trunk/fs/isofs/namei.c b/trunk/fs/isofs/namei.c index c167028844ed..1e2946f2a69e 100644 --- a/trunk/fs/isofs/namei.c +++ b/trunk/fs/isofs/namei.c @@ -163,7 +163,7 @@ isofs_find_entry(struct inode *dir, struct dentry *dentry, return 0; } -struct dentry *isofs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) +struct dentry *isofs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { int found; unsigned long uninitialized_var(block); diff --git a/trunk/fs/jffs2/dir.c b/trunk/fs/jffs2/dir.c index ad7774d32095..b56018896d5e 100644 --- a/trunk/fs/jffs2/dir.c +++ b/trunk/fs/jffs2/dir.c @@ -25,9 +25,9 @@ static int jffs2_readdir (struct file *, void *, filldir_t); static int jffs2_create (struct inode *,struct dentry *,umode_t, - bool); + struct nameidata *); static struct dentry *jffs2_lookup (struct inode *,struct dentry *, - unsigned int); + struct nameidata *); static int jffs2_link (struct dentry *,struct inode *,struct dentry *); static int jffs2_unlink (struct inode *,struct dentry *); static int jffs2_symlink (struct inode *,struct dentry *,const char *); @@ -74,7 +74,7 @@ const struct inode_operations jffs2_dir_inode_operations = nice and simple */ static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target, - unsigned int flags) + struct nameidata *nd) { struct jffs2_inode_info *dir_f; struct jffs2_full_dirent *fd = NULL, *fd_list; @@ -175,7 +175,7 @@ static int jffs2_readdir(struct file *filp, void *dirent, filldir_t filldir) static int jffs2_create(struct inode *dir_i, struct dentry *dentry, - umode_t mode, bool excl) + umode_t mode, struct nameidata *nd) { struct jffs2_raw_inode *ri; struct jffs2_inode_info *f, *dir_f; @@ -226,8 +226,8 @@ static int jffs2_create(struct inode *dir_i, struct dentry *dentry, __func__, inode->i_ino, inode->i_mode, inode->i_nlink, f->inocache->pino_nlink, inode->i_mapping->nrpages); - unlock_new_inode(inode); d_instantiate(dentry, inode); + unlock_new_inode(inode); return 0; fail: @@ -446,8 +446,8 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char mutex_unlock(&dir_f->sem); jffs2_complete_reservation(c); - unlock_new_inode(inode); d_instantiate(dentry, inode); + unlock_new_inode(inode); return 0; fail: @@ -591,8 +591,8 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, umode_t mode mutex_unlock(&dir_f->sem); jffs2_complete_reservation(c); - unlock_new_inode(inode); d_instantiate(dentry, inode); + unlock_new_inode(inode); return 0; fail: @@ -766,8 +766,8 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, umode_t mode mutex_unlock(&dir_f->sem); jffs2_complete_reservation(c); - unlock_new_inode(inode); d_instantiate(dentry, inode); + unlock_new_inode(inode); return 0; fail: diff --git a/trunk/fs/jfs/namei.c b/trunk/fs/jfs/namei.c index 3b91a7ad6086..07c91ca6017d 100644 --- a/trunk/fs/jfs/namei.c +++ b/trunk/fs/jfs/namei.c @@ -73,7 +73,7 @@ static inline void free_ea_wmap(struct inode *inode) * */ static int jfs_create(struct inode *dip, struct dentry *dentry, umode_t mode, - bool excl) + struct nameidata *nd) { int rc = 0; tid_t tid; /* transaction id */ @@ -176,8 +176,8 @@ static int jfs_create(struct inode *dip, struct dentry *dentry, umode_t mode, unlock_new_inode(ip); iput(ip); } else { - unlock_new_inode(ip); d_instantiate(dentry, ip); + unlock_new_inode(ip); } out2: @@ -309,8 +309,8 @@ static int jfs_mkdir(struct inode *dip, struct dentry *dentry, umode_t mode) unlock_new_inode(ip); iput(ip); } else { - unlock_new_inode(ip); d_instantiate(dentry, ip); + unlock_new_inode(ip); } out2: @@ -1043,8 +1043,8 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry, unlock_new_inode(ip); iput(ip); } else { - unlock_new_inode(ip); d_instantiate(dentry, ip); + unlock_new_inode(ip); } out2: @@ -1424,8 +1424,8 @@ static int jfs_mknod(struct inode *dir, struct dentry *dentry, unlock_new_inode(ip); iput(ip); } else { - unlock_new_inode(ip); d_instantiate(dentry, ip); + unlock_new_inode(ip); } out1: @@ -1436,7 +1436,7 @@ static int jfs_mknod(struct inode *dir, struct dentry *dentry, return rc; } -static struct dentry *jfs_lookup(struct inode *dip, struct dentry *dentry, unsigned int flags) +static struct dentry *jfs_lookup(struct inode *dip, struct dentry *dentry, struct nameidata *nd) { struct btstack btstack; ino_t inum; @@ -1570,7 +1570,7 @@ static int jfs_ci_compare(const struct dentry *parent, return result; } -static int jfs_ci_revalidate(struct dentry *dentry, unsigned int flags) +static int jfs_ci_revalidate(struct dentry *dentry, struct nameidata *nd) { /* * This is not negative dentry. Always valid. @@ -1589,7 +1589,7 @@ static int jfs_ci_revalidate(struct dentry *dentry, unsigned int flags) * This may be nfsd (or something), anyway, we can't see the * intent of this. So, since this can be for creation, drop it. */ - if (!flags) + if (!nd) return 0; /* @@ -1597,7 +1597,7 @@ static int jfs_ci_revalidate(struct dentry *dentry, unsigned int flags) * case sensitive name which is specified by user if this is * for creation. */ - if (flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET)) + if (nd->flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET)) return 0; return 1; } diff --git a/trunk/fs/jfs/super.c b/trunk/fs/jfs/super.c index c55c7452d285..4a82950f412f 100644 --- a/trunk/fs/jfs/super.c +++ b/trunk/fs/jfs/super.c @@ -601,11 +601,6 @@ static int jfs_sync_fs(struct super_block *sb, int wait) /* log == NULL indicates read-only mount */ if (log) { - /* - * Write quota structures to quota file, sync_blockdev() will - * write them to disk later - */ - dquot_writeback_dquots(sb, -1); jfs_flush_journal(log, wait); jfs_syncpt(log, 0); } diff --git a/trunk/fs/libfs.c b/trunk/fs/libfs.c index a74cb1725ac6..f86ec27a4230 100644 --- a/trunk/fs/libfs.c +++ b/trunk/fs/libfs.c @@ -53,7 +53,7 @@ static int simple_delete_dentry(const struct dentry *dentry) * Lookup the data. This is trivial - if the dentry didn't already * exist, we know it is negative. Set d_op to delete negative dentries. */ -struct dentry *simple_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) +struct dentry *simple_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { static const struct dentry_operations simple_dentry_operations = { .d_delete = simple_delete_dentry, @@ -222,15 +222,15 @@ struct dentry *mount_pseudo(struct file_system_type *fs_type, char *name, const struct super_operations *ops, const struct dentry_operations *dops, unsigned long magic) { - struct super_block *s; + struct super_block *s = sget(fs_type, NULL, set_anon_super, NULL); struct dentry *dentry; struct inode *root; struct qstr d_name = QSTR_INIT(name, strlen(name)); - s = sget(fs_type, NULL, set_anon_super, MS_NOUSER, NULL); if (IS_ERR(s)) return ERR_CAST(s); + s->s_flags = MS_NOUSER; s->s_maxbytes = MAX_LFS_FILESIZE; s->s_blocksize = PAGE_SIZE; s->s_blocksize_bits = PAGE_SHIFT; diff --git a/trunk/fs/locks.c b/trunk/fs/locks.c index 82c353304f9e..fce6238d52c1 100644 --- a/trunk/fs/locks.c +++ b/trunk/fs/locks.c @@ -308,7 +308,7 @@ static int flock_make_lock(struct file *filp, struct file_lock **lock, return 0; } -static int assign_type(struct file_lock *fl, long type) +static int assign_type(struct file_lock *fl, int type) { switch (type) { case F_RDLCK: @@ -445,7 +445,7 @@ static const struct lock_manager_operations lease_manager_ops = { /* * Initialize a lease, use the default lock manager operations */ -static int lease_init(struct file *filp, long type, struct file_lock *fl) +static int lease_init(struct file *filp, int type, struct file_lock *fl) { if (assign_type(fl, type) != 0) return -EINVAL; @@ -463,7 +463,7 @@ static int lease_init(struct file *filp, long type, struct file_lock *fl) } /* Allocate a file_lock initialised to this type of lease */ -static struct file_lock *lease_alloc(struct file *filp, long type) +static struct file_lock *lease_alloc(struct file *filp, int type) { struct file_lock *fl = locks_alloc_lock(); int error = -ENOMEM; diff --git a/trunk/fs/logfs/dir.c b/trunk/fs/logfs/dir.c index 26e4a941532f..bea5d1b9954b 100644 --- a/trunk/fs/logfs/dir.c +++ b/trunk/fs/logfs/dir.c @@ -349,7 +349,7 @@ static void logfs_set_name(struct logfs_disk_dentry *dd, struct qstr *name) } static struct dentry *logfs_lookup(struct inode *dir, struct dentry *dentry, - unsigned int flags) + struct nameidata *nd) { struct page *page; struct logfs_disk_dentry *dd; @@ -502,7 +502,7 @@ static int logfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) } static int logfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, - bool excl) + struct nameidata *nd) { struct inode *inode; diff --git a/trunk/fs/logfs/super.c b/trunk/fs/logfs/super.c index 345c24b8a6f8..97bca623d893 100644 --- a/trunk/fs/logfs/super.c +++ b/trunk/fs/logfs/super.c @@ -519,7 +519,7 @@ static struct dentry *logfs_get_sb_device(struct logfs_super *super, log_super("LogFS: Start mount %x\n", mount_count++); err = -EINVAL; - sb = sget(type, logfs_sb_test, logfs_sb_set, flags | MS_NOATIME, super); + sb = sget(type, logfs_sb_test, logfs_sb_set, super); if (IS_ERR(sb)) { super->s_devops->put_device(super); kfree(super); @@ -542,6 +542,7 @@ static struct dentry *logfs_get_sb_device(struct logfs_super *super, sb->s_maxbytes = (1ull << 43) - 1; sb->s_max_links = LOGFS_LINK_MAX; sb->s_op = &logfs_super_operations; + sb->s_flags = flags | MS_NOATIME; err = logfs_read_sb(sb, sb->s_flags & MS_RDONLY); if (err) diff --git a/trunk/fs/minix/namei.c b/trunk/fs/minix/namei.c index 0db73d9dd668..2d0ee1786305 100644 --- a/trunk/fs/minix/namei.c +++ b/trunk/fs/minix/namei.c @@ -18,7 +18,7 @@ static int add_nondir(struct dentry *dentry, struct inode *inode) return err; } -static struct dentry *minix_lookup(struct inode * dir, struct dentry *dentry, unsigned int flags) +static struct dentry *minix_lookup(struct inode * dir, struct dentry *dentry, struct nameidata *nd) { struct inode * inode = NULL; ino_t ino; @@ -55,7 +55,7 @@ static int minix_mknod(struct inode * dir, struct dentry *dentry, umode_t mode, } static int minix_create(struct inode *dir, struct dentry *dentry, umode_t mode, - bool excl) + struct nameidata *nd) { return minix_mknod(dir, dentry, mode, 0); } diff --git a/trunk/fs/mount.h b/trunk/fs/mount.h index 4f291f9de641..4ef36d93e5a2 100644 --- a/trunk/fs/mount.h +++ b/trunk/fs/mount.h @@ -22,6 +22,7 @@ struct mount { struct vfsmount mnt; #ifdef CONFIG_SMP struct mnt_pcp __percpu *mnt_pcp; + atomic_t mnt_longterm; /* how many of the refs are longterm */ #else int mnt_count; int mnt_writers; @@ -48,8 +49,6 @@ struct mount { int mnt_ghosts; }; -#define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */ - static inline struct mount *real_mount(struct vfsmount *mnt) { return container_of(mnt, struct mount, mnt); @@ -60,12 +59,6 @@ static inline int mnt_has_parent(struct mount *mnt) return mnt != mnt->mnt_parent; } -static inline int is_mounted(struct vfsmount *mnt) -{ - /* neither detached nor internal? */ - return !IS_ERR_OR_NULL(real_mount(mnt)); -} - extern struct mount *__lookup_mnt(struct vfsmount *, struct dentry *, int); static inline void get_mnt_ns(struct mnt_namespace *ns) @@ -74,12 +67,10 @@ static inline void get_mnt_ns(struct mnt_namespace *ns) } struct proc_mounts { - struct seq_file m; + struct seq_file m; /* must be the first element */ struct mnt_namespace *ns; struct path root; int (*show)(struct seq_file *, struct vfsmount *); }; -#define proc_mounts(p) (container_of((p), struct proc_mounts, m)) - extern const struct seq_operations mounts_op; diff --git a/trunk/fs/namei.c b/trunk/fs/namei.c index 2ccc35c4dc24..7d694194024a 100644 --- a/trunk/fs/namei.c +++ b/trunk/fs/namei.c @@ -315,22 +315,31 @@ static inline int do_inode_permission(struct inode *inode, int mask) } /** - * __inode_permission - Check for access rights to a given inode - * @inode: Inode to check permission on - * @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC) + * inode_permission - check for access rights to a given inode + * @inode: inode to check permission on + * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC, ...) * - * Check for read/write/execute permissions on an inode. + * Used to check for read/write/execute permissions on an inode. + * We use "fsuid" for this, letting us set arbitrary permissions + * for filesystem access without changing the "normal" uids which + * are used for other things. * * When checking for MAY_APPEND, MAY_WRITE must also be set in @mask. - * - * This does not check for a read-only file system. You probably want - * inode_permission(). */ -int __inode_permission(struct inode *inode, int mask) +int inode_permission(struct inode *inode, int mask) { int retval; if (unlikely(mask & MAY_WRITE)) { + umode_t mode = inode->i_mode; + + /* + * Nobody gets write access to a read-only fs. + */ + if (IS_RDONLY(inode) && + (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) + return -EROFS; + /* * Nobody gets write access to an immutable file. */ @@ -349,47 +358,6 @@ int __inode_permission(struct inode *inode, int mask) return security_inode_permission(inode, mask); } -/** - * sb_permission - Check superblock-level permissions - * @sb: Superblock of inode to check permission on - * @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC) - * - * Separate out file-system wide checks from inode-specific permission checks. - */ -static int sb_permission(struct super_block *sb, struct inode *inode, int mask) -{ - if (unlikely(mask & MAY_WRITE)) { - umode_t mode = inode->i_mode; - - /* Nobody gets write access to a read-only fs. */ - if ((sb->s_flags & MS_RDONLY) && - (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) - return -EROFS; - } - return 0; -} - -/** - * inode_permission - Check for access rights to a given inode - * @inode: Inode to check permission on - * @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC) - * - * Check for read/write/execute permissions on an inode. We use fs[ug]id for - * this, letting us set arbitrary permissions for filesystem access without - * changing the "normal" UIDs which are used for other things. - * - * When checking for MAY_APPEND, MAY_WRITE must also be set in @mask. - */ -int inode_permission(struct inode *inode, int mask) -{ - int retval; - - retval = sb_permission(inode->i_sb, inode, mask); - if (retval) - return retval; - return __inode_permission(inode, mask); -} - /** * path_get - get a reference to a path * @path: path to get the reference to @@ -427,18 +395,6 @@ EXPORT_SYMBOL(path_put); * to restart the path walk from the beginning in ref-walk mode. */ -static inline void lock_rcu_walk(void) -{ - br_read_lock(&vfsmount_lock); - rcu_read_lock(); -} - -static inline void unlock_rcu_walk(void) -{ - rcu_read_unlock(); - br_read_unlock(&vfsmount_lock); -} - /** * unlazy_walk - try to switch to ref-walk mode. * @nd: nameidata pathwalk data @@ -492,7 +448,8 @@ static int unlazy_walk(struct nameidata *nd, struct dentry *dentry) } mntget(nd->path.mnt); - unlock_rcu_walk(); + rcu_read_unlock(); + br_read_unlock(&vfsmount_lock); nd->flags &= ~LOOKUP_RCU; return 0; @@ -506,9 +463,25 @@ static int unlazy_walk(struct nameidata *nd, struct dentry *dentry) return -ECHILD; } -static inline int d_revalidate(struct dentry *dentry, unsigned int flags) +/** + * release_open_intent - free up open intent resources + * @nd: pointer to nameidata + */ +void release_open_intent(struct nameidata *nd) { - return dentry->d_op->d_revalidate(dentry, flags); + struct file *file = nd->intent.open.file; + + if (file && !IS_ERR(file)) { + if (file->f_path.dentry == NULL) + put_filp(file); + else + fput(file); + } +} + +static inline int d_revalidate(struct dentry *dentry, struct nameidata *nd) +{ + return dentry->d_op->d_revalidate(dentry, nd); } /** @@ -533,13 +506,15 @@ static int complete_walk(struct nameidata *nd) spin_lock(&dentry->d_lock); if (unlikely(!__d_rcu_to_refcount(dentry, nd->seq))) { spin_unlock(&dentry->d_lock); - unlock_rcu_walk(); + rcu_read_unlock(); + br_read_unlock(&vfsmount_lock); return -ECHILD; } BUG_ON(nd->inode != dentry->d_inode); spin_unlock(&dentry->d_lock); mntget(nd->path.mnt); - unlock_rcu_walk(); + rcu_read_unlock(); + br_read_unlock(&vfsmount_lock); } if (likely(!(nd->flags & LOOKUP_JUMPED))) @@ -552,7 +527,7 @@ static int complete_walk(struct nameidata *nd) return 0; /* Note: we do not d_invalidate() */ - status = d_revalidate(dentry, nd->flags); + status = d_revalidate(dentry, nd); if (status > 0) return 0; @@ -627,25 +602,10 @@ static inline void path_to_nameidata(const struct path *path, nd->path.dentry = path->dentry; } -/* - * Helper to directly jump to a known parsed path from ->follow_link, - * caller must have taken a reference to path beforehand. - */ -void nd_jump_link(struct nameidata *nd, struct path *path) -{ - path_put(&nd->path); - - nd->path = *path; - nd->inode = nd->path.dentry->d_inode; - nd->flags |= LOOKUP_JUMPED; - - BUG_ON(nd->inode->i_op->follow_link); -} - static inline void put_link(struct nameidata *nd, struct path *link, void *cookie) { struct inode *inode = link->dentry->d_inode; - if (inode->i_op->put_link) + if (!IS_ERR(cookie) && inode->i_op->put_link) inode->i_op->put_link(link->dentry, nd, cookie); path_put(link); } @@ -653,19 +613,19 @@ static inline void put_link(struct nameidata *nd, struct path *link, void *cooki static __always_inline int follow_link(struct path *link, struct nameidata *nd, void **p) { - struct dentry *dentry = link->dentry; int error; - char *s; + struct dentry *dentry = link->dentry; BUG_ON(nd->flags & LOOKUP_RCU); if (link->mnt == nd->path.mnt) mntget(link->mnt); - error = -ELOOP; - if (unlikely(current->total_link_count >= 40)) - goto out_put_nd_path; - + if (unlikely(current->total_link_count >= 40)) { + *p = ERR_PTR(-ELOOP); /* no ->put_link(), please */ + path_put(&nd->path); + return -ELOOP; + } cond_resched(); current->total_link_count++; @@ -673,28 +633,30 @@ follow_link(struct path *link, struct nameidata *nd, void **p) nd_set_link(nd, NULL); error = security_inode_follow_link(link->dentry, nd); - if (error) - goto out_put_nd_path; + if (error) { + *p = ERR_PTR(error); /* no ->put_link(), please */ + path_put(&nd->path); + return error; + } nd->last_type = LAST_BIND; *p = dentry->d_inode->i_op->follow_link(dentry, nd); error = PTR_ERR(*p); - if (IS_ERR(*p)) - goto out_put_nd_path; - - error = 0; - s = nd_get_link(nd); - if (s) { - error = __vfs_follow_link(nd, s); - if (unlikely(error)) - put_link(nd, link, *p); + if (!IS_ERR(*p)) { + char *s = nd_get_link(nd); + error = 0; + if (s) + error = __vfs_follow_link(nd, s); + else if (nd->last_type == LAST_BIND) { + nd->flags |= LOOKUP_JUMPED; + nd->inode = nd->path.dentry->d_inode; + if (nd->inode->i_op->follow_link) { + /* stepped on a _really_ weird one */ + path_put(&nd->path); + error = -ELOOP; + } + } } - - return error; - -out_put_nd_path: - path_put(&nd->path); - path_put(link); return error; } @@ -713,16 +675,6 @@ static int follow_up_rcu(struct path *path) return 1; } -/* - * follow_up - Find the mountpoint of path's vfsmount - * - * Given a path, find the mountpoint of its source file system. - * Replace @path with the path of the mountpoint in the parent mount. - * Up is towards /. - * - * Return 1 if we went up a level and 0 if we were already at the - * root. - */ int follow_up(struct path *path) { struct mount *mnt = real_mount(path->mnt); @@ -731,7 +683,7 @@ int follow_up(struct path *path) br_read_lock(&vfsmount_lock); parent = mnt->mnt_parent; - if (parent == mnt) { + if (&parent->mnt == path->mnt) { br_read_unlock(&vfsmount_lock); return 0; } @@ -994,7 +946,8 @@ static int follow_dotdot_rcu(struct nameidata *nd) nd->flags &= ~LOOKUP_RCU; if (!(nd->flags & LOOKUP_ROOT)) nd->root.mnt = NULL; - unlock_rcu_walk(); + rcu_read_unlock(); + br_read_unlock(&vfsmount_lock); return -ECHILD; } @@ -1095,7 +1048,7 @@ static void follow_dotdot(struct nameidata *nd) * dir->d_inode->i_mutex must be held */ static struct dentry *lookup_dcache(struct qstr *name, struct dentry *dir, - unsigned int flags, bool *need_lookup) + struct nameidata *nd, bool *need_lookup) { struct dentry *dentry; int error; @@ -1106,7 +1059,7 @@ static struct dentry *lookup_dcache(struct qstr *name, struct dentry *dir, if (d_need_lookup(dentry)) { *need_lookup = true; } else if (dentry->d_flags & DCACHE_OP_REVALIDATE) { - error = d_revalidate(dentry, flags); + error = d_revalidate(dentry, nd); if (unlikely(error <= 0)) { if (error < 0) { dput(dentry); @@ -1136,7 +1089,7 @@ static struct dentry *lookup_dcache(struct qstr *name, struct dentry *dir, * dir->d_inode->i_mutex must be held */ static struct dentry *lookup_real(struct inode *dir, struct dentry *dentry, - unsigned int flags) + struct nameidata *nd) { struct dentry *old; @@ -1146,7 +1099,7 @@ static struct dentry *lookup_real(struct inode *dir, struct dentry *dentry, return ERR_PTR(-ENOENT); } - old = dir->i_op->lookup(dir, dentry, flags); + old = dir->i_op->lookup(dir, dentry, nd); if (unlikely(old)) { dput(dentry); dentry = old; @@ -1155,16 +1108,16 @@ static struct dentry *lookup_real(struct inode *dir, struct dentry *dentry, } static struct dentry *__lookup_hash(struct qstr *name, - struct dentry *base, unsigned int flags) + struct dentry *base, struct nameidata *nd) { bool need_lookup; struct dentry *dentry; - dentry = lookup_dcache(name, base, flags, &need_lookup); + dentry = lookup_dcache(name, base, nd, &need_lookup); if (!need_lookup) return dentry; - return lookup_real(base->d_inode, dentry, flags); + return lookup_real(base->d_inode, dentry, nd); } /* @@ -1214,7 +1167,7 @@ static int lookup_fast(struct nameidata *nd, struct qstr *name, if (unlikely(d_need_lookup(dentry))) goto unlazy; if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) { - status = d_revalidate(dentry, nd->flags); + status = d_revalidate(dentry, nd); if (unlikely(status <= 0)) { if (status != -ECHILD) need_reval = 0; @@ -1244,7 +1197,7 @@ static int lookup_fast(struct nameidata *nd, struct qstr *name, } if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE) && need_reval) - status = d_revalidate(dentry, nd->flags); + status = d_revalidate(dentry, nd); if (unlikely(status <= 0)) { if (status < 0) { dput(dentry); @@ -1283,7 +1236,7 @@ static int lookup_slow(struct nameidata *nd, struct qstr *name, BUG_ON(nd->inode != parent->d_inode); mutex_lock(&parent->d_inode->i_mutex); - dentry = __lookup_hash(name, parent, nd->flags); + dentry = __lookup_hash(name, parent, nd); mutex_unlock(&parent->d_inode->i_mutex); if (IS_ERR(dentry)) return PTR_ERR(dentry); @@ -1331,7 +1284,8 @@ static void terminate_walk(struct nameidata *nd) nd->flags &= ~LOOKUP_RCU; if (!(nd->flags & LOOKUP_ROOT)) nd->root.mnt = NULL; - unlock_rcu_walk(); + rcu_read_unlock(); + br_read_unlock(&vfsmount_lock); } } @@ -1429,10 +1383,9 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd) void *cookie; res = follow_link(&link, nd, &cookie); - if (res) - break; - res = walk_component(nd, path, &nd->last, - nd->last_type, LOOKUP_FOLLOW); + if (!res) + res = walk_component(nd, path, &nd->last, + nd->last_type, LOOKUP_FOLLOW); put_link(nd, &link, cookie); } while (res > 0); @@ -1698,7 +1651,8 @@ static int path_init(int dfd, const char *name, unsigned int flags, nd->path = nd->root; nd->inode = inode; if (flags & LOOKUP_RCU) { - lock_rcu_walk(); + br_read_lock(&vfsmount_lock); + rcu_read_lock(); nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq); } else { path_get(&nd->path); @@ -1710,7 +1664,8 @@ static int path_init(int dfd, const char *name, unsigned int flags, if (*name=='/') { if (flags & LOOKUP_RCU) { - lock_rcu_walk(); + br_read_lock(&vfsmount_lock); + rcu_read_lock(); set_root_rcu(nd); } else { set_root(nd); @@ -1722,7 +1677,8 @@ static int path_init(int dfd, const char *name, unsigned int flags, struct fs_struct *fs = current->fs; unsigned seq; - lock_rcu_walk(); + br_read_lock(&vfsmount_lock); + rcu_read_lock(); do { seq = read_seqcount_begin(&fs->seq); @@ -1757,7 +1713,8 @@ static int path_init(int dfd, const char *name, unsigned int flags, if (fput_needed) *fp = file; nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq); - lock_rcu_walk(); + br_read_lock(&vfsmount_lock); + rcu_read_lock(); } else { path_get(&file->f_path); fput_light(file, fput_needed); @@ -1820,9 +1777,8 @@ static int path_lookupat(int dfd, const char *name, struct path link = path; nd->flags |= LOOKUP_PARENT; err = follow_link(&link, nd, &cookie); - if (err) - break; - err = lookup_last(nd, &path); + if (!err) + err = lookup_last(nd, &path); put_link(nd, &link, cookie); } } @@ -1865,27 +1821,9 @@ static int do_path_lookup(int dfd, const char *name, return retval; } -/* does lookup, returns the object with parent locked */ -struct dentry *kern_path_locked(const char *name, struct path *path) +int kern_path_parent(const char *name, struct nameidata *nd) { - struct nameidata nd; - struct dentry *d; - int err = do_path_lookup(AT_FDCWD, name, LOOKUP_PARENT, &nd); - if (err) - return ERR_PTR(err); - if (nd.last_type != LAST_NORM) { - path_put(&nd.path); - return ERR_PTR(-EINVAL); - } - mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT); - d = __lookup_hash(&nd.last, nd.path.dentry, 0); - if (IS_ERR(d)) { - mutex_unlock(&nd.path.dentry->d_inode->i_mutex); - path_put(&nd.path); - return d; - } - *path = nd.path; - return d; + return do_path_lookup(AT_FDCWD, name, LOOKUP_PARENT, nd); } int kern_path(const char *name, unsigned int flags, struct path *path) @@ -1928,7 +1866,7 @@ int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt, */ static struct dentry *lookup_hash(struct nameidata *nd) { - return __lookup_hash(&nd->last, nd->path.dentry, nd->flags); + return __lookup_hash(&nd->last, nd->path.dentry, nd); } /** @@ -1975,7 +1913,7 @@ struct dentry *lookup_one_len(const char *name, struct dentry *base, int len) if (err) return ERR_PTR(err); - return __lookup_hash(&this, base, 0); + return __lookup_hash(&this, base, NULL); } int user_path_at_empty(int dfd, const char __user *name, unsigned flags, @@ -2148,9 +2086,10 @@ void unlock_rename(struct dentry *p1, struct dentry *p2) } int vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, - bool want_excl) + struct nameidata *nd) { int error = may_create(dir, dentry); + if (error) return error; @@ -2161,7 +2100,7 @@ int vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, error = security_inode_create(dir, dentry, mode); if (error) return error; - error = dir->i_op->create(dir, dentry, mode, want_excl); + error = dir->i_op->create(dir, dentry, mode, nd); if (!error) fsnotify_create(dir, dentry); return error; @@ -2248,275 +2187,21 @@ static inline int open_to_namei_flags(int flag) return flag; } -static int may_o_create(struct path *dir, struct dentry *dentry, umode_t mode) -{ - int error = security_path_mknod(dir, dentry, mode, 0); - if (error) - return error; - - error = inode_permission(dir->dentry->d_inode, MAY_WRITE | MAY_EXEC); - if (error) - return error; - - return security_inode_create(dir->dentry->d_inode, dentry, mode); -} - -/* - * Attempt to atomically look up, create and open a file from a negative - * dentry. - * - * Returns 0 if successful. The file will have been created and attached to - * @file by the filesystem calling finish_open(). - * - * Returns 1 if the file was looked up only or didn't need creating. The - * caller will need to perform the open themselves. @path will have been - * updated to point to the new dentry. This may be negative. - * - * Returns an error code otherwise. - */ -static int atomic_open(struct nameidata *nd, struct dentry *dentry, - struct path *path, struct file *file, - const struct open_flags *op, - bool *want_write, bool need_lookup, - int *opened) -{ - struct inode *dir = nd->path.dentry->d_inode; - unsigned open_flag = open_to_namei_flags(op->open_flag); - umode_t mode; - int error; - int acc_mode; - int create_error = 0; - struct dentry *const DENTRY_NOT_SET = (void *) -1UL; - - BUG_ON(dentry->d_inode); - - /* Don't create child dentry for a dead directory. */ - if (unlikely(IS_DEADDIR(dir))) { - error = -ENOENT; - goto out; - } - - mode = op->mode & S_IALLUGO; - if ((open_flag & O_CREAT) && !IS_POSIXACL(dir)) - mode &= ~current_umask(); - - if (open_flag & O_EXCL) { - open_flag &= ~O_TRUNC; - *opened |= FILE_CREATED; - } - - /* - * Checking write permission is tricky, bacuse we don't know if we are - * going to actually need it: O_CREAT opens should work as long as the - * file exists. But checking existence breaks atomicity. The trick is - * to check access and if not granted clear O_CREAT from the flags. - * - * Another problem is returing the "right" error value (e.g. for an - * O_EXCL open we want to return EEXIST not EROFS). - */ - if ((open_flag & (O_CREAT | O_TRUNC)) || - (open_flag & O_ACCMODE) != O_RDONLY) { - error = mnt_want_write(nd->path.mnt); - if (!error) { - *want_write = true; - } else if (!(open_flag & O_CREAT)) { - /* - * No O_CREATE -> atomicity not a requirement -> fall - * back to lookup + open - */ - goto no_open; - } else if (open_flag & (O_EXCL | O_TRUNC)) { - /* Fall back and fail with the right error */ - create_error = error; - goto no_open; - } else { - /* No side effects, safe to clear O_CREAT */ - create_error = error; - open_flag &= ~O_CREAT; - } - } - - if (open_flag & O_CREAT) { - error = may_o_create(&nd->path, dentry, op->mode); - if (error) { - create_error = error; - if (open_flag & O_EXCL) - goto no_open; - open_flag &= ~O_CREAT; - } - } - - if (nd->flags & LOOKUP_DIRECTORY) - open_flag |= O_DIRECTORY; - - file->f_path.dentry = DENTRY_NOT_SET; - file->f_path.mnt = nd->path.mnt; - error = dir->i_op->atomic_open(dir, dentry, file, open_flag, mode, - opened); - if (error < 0) { - if (create_error && error == -ENOENT) - error = create_error; - goto out; - } - - acc_mode = op->acc_mode; - if (*opened & FILE_CREATED) { - fsnotify_create(dir, dentry); - acc_mode = MAY_OPEN; - } - - if (error) { /* returned 1, that is */ - if (WARN_ON(file->f_path.dentry == DENTRY_NOT_SET)) { - error = -EIO; - goto out; - } - if (file->f_path.dentry) { - dput(dentry); - dentry = file->f_path.dentry; - } - goto looked_up; - } - - /* - * We didn't have the inode before the open, so check open permission - * here. - */ - error = may_open(&file->f_path, acc_mode, open_flag); - if (error) - fput(file); - -out: - dput(dentry); - return error; - -no_open: - if (need_lookup) { - dentry = lookup_real(dir, dentry, nd->flags); - if (IS_ERR(dentry)) - return PTR_ERR(dentry); - - if (create_error) { - int open_flag = op->open_flag; - - error = create_error; - if ((open_flag & O_EXCL)) { - if (!dentry->d_inode) - goto out; - } else if (!dentry->d_inode) { - goto out; - } else if ((open_flag & O_TRUNC) && - S_ISREG(dentry->d_inode->i_mode)) { - goto out; - } - /* will fail later, go on to get the right error */ - } - } -looked_up: - path->dentry = dentry; - path->mnt = nd->path.mnt; - return 1; -} - -/* - * Look up and maybe create and open the last component. - * - * Must be called with i_mutex held on parent. - * - * Returns 0 if the file was successfully atomically created (if necessary) and - * opened. In this case the file will be returned attached to @file. - * - * Returns 1 if the file was not completely opened at this time, though lookups - * and creations will have been performed and the dentry returned in @path will - * be positive upon return if O_CREAT was specified. If O_CREAT wasn't - * specified then a negative dentry may be returned. - * - * An error code is returned otherwise. - * - * FILE_CREATE will be set in @*opened if the dentry was created and will be - * cleared otherwise prior to returning. - */ -static int lookup_open(struct nameidata *nd, struct path *path, - struct file *file, - const struct open_flags *op, - bool *want_write, int *opened) -{ - struct dentry *dir = nd->path.dentry; - struct inode *dir_inode = dir->d_inode; - struct dentry *dentry; - int error; - bool need_lookup; - - *opened &= ~FILE_CREATED; - dentry = lookup_dcache(&nd->last, dir, nd->flags, &need_lookup); - if (IS_ERR(dentry)) - return PTR_ERR(dentry); - - /* Cached positive dentry: will open in f_op->open */ - if (!need_lookup && dentry->d_inode) - goto out_no_open; - - if ((nd->flags & LOOKUP_OPEN) && dir_inode->i_op->atomic_open) { - return atomic_open(nd, dentry, path, file, op, want_write, - need_lookup, opened); - } - - if (need_lookup) { - BUG_ON(dentry->d_inode); - - dentry = lookup_real(dir_inode, dentry, nd->flags); - if (IS_ERR(dentry)) - return PTR_ERR(dentry); - } - - /* Negative dentry, just create the file */ - if (!dentry->d_inode && (op->open_flag & O_CREAT)) { - umode_t mode = op->mode; - if (!IS_POSIXACL(dir->d_inode)) - mode &= ~current_umask(); - /* - * This write is needed to ensure that a - * rw->ro transition does not occur between - * the time when the file is created and when - * a permanent write count is taken through - * the 'struct file' in finish_open(). - */ - error = mnt_want_write(nd->path.mnt); - if (error) - goto out_dput; - *want_write = true; - *opened |= FILE_CREATED; - error = security_path_mknod(&nd->path, dentry, mode, 0); - if (error) - goto out_dput; - error = vfs_create(dir->d_inode, dentry, mode, - nd->flags & LOOKUP_EXCL); - if (error) - goto out_dput; - } -out_no_open: - path->dentry = dentry; - path->mnt = nd->path.mnt; - return 1; - -out_dput: - dput(dentry); - return error; -} - /* * Handle the last step of open() */ -static int do_last(struct nameidata *nd, struct path *path, - struct file *file, const struct open_flags *op, - int *opened, const char *pathname) +static struct file *do_last(struct nameidata *nd, struct path *path, + const struct open_flags *op, const char *pathname) { struct dentry *dir = nd->path.dentry; + struct dentry *dentry; int open_flag = op->open_flag; - bool will_truncate = (open_flag & O_TRUNC) != 0; - bool want_write = false; + int will_truncate = open_flag & O_TRUNC; + int want_write = 0; int acc_mode = op->acc_mode; + struct file *filp; struct inode *inode; - bool symlink_ok = false; + int symlink_ok = 0; struct path save_parent = { .dentry = NULL, .mnt = NULL }; bool retried = false; int error; @@ -2529,99 +2214,112 @@ static int do_last(struct nameidata *nd, struct path *path, case LAST_DOT: error = handle_dots(nd, nd->last_type); if (error) - return error; + return ERR_PTR(error); /* fallthrough */ case LAST_ROOT: error = complete_walk(nd); if (error) - return error; + return ERR_PTR(error); audit_inode(pathname, nd->path.dentry); if (open_flag & O_CREAT) { error = -EISDIR; - goto out; + goto exit; } - goto finish_open; + goto ok; case LAST_BIND: error = complete_walk(nd); if (error) - return error; + return ERR_PTR(error); audit_inode(pathname, dir); - goto finish_open; + goto ok; } if (!(open_flag & O_CREAT)) { if (nd->last.name[nd->last.len]) nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY; if (open_flag & O_PATH && !(nd->flags & LOOKUP_FOLLOW)) - symlink_ok = true; + symlink_ok = 1; /* we _can_ be in RCU mode here */ error = lookup_fast(nd, &nd->last, path, &inode); - if (likely(!error)) - goto finish_lookup; - - if (error < 0) - goto out; + if (unlikely(error)) { + if (error < 0) + goto exit; - BUG_ON(nd->inode != dir->d_inode); - } else { - /* create side of things */ - /* - * This will *only* deal with leaving RCU mode - LOOKUP_JUMPED - * has been cleared when we got to the last component we are - * about to look up - */ - error = complete_walk(nd); - if (error) - return error; + error = lookup_slow(nd, &nd->last, path); + if (error < 0) + goto exit; - audit_inode(pathname, dir); - error = -EISDIR; - /* trailing slashes? */ - if (nd->last.name[nd->last.len]) - goto out; + inode = path->dentry->d_inode; + } + goto finish_lookup; } -retry_lookup: - mutex_lock(&dir->d_inode->i_mutex); - error = lookup_open(nd, path, file, op, &want_write, opened); - mutex_unlock(&dir->d_inode->i_mutex); + /* create side of things */ + /* + * This will *only* deal with leaving RCU mode - LOOKUP_JUMPED has been + * cleared when we got to the last component we are about to look up + */ + error = complete_walk(nd); + if (error) + return ERR_PTR(error); - if (error <= 0) { - if (error) - goto out; + audit_inode(pathname, dir); + error = -EISDIR; + /* trailing slashes? */ + if (nd->last.name[nd->last.len]) + goto exit; - if ((*opened & FILE_CREATED) || - !S_ISREG(file->f_path.dentry->d_inode->i_mode)) - will_truncate = false; +retry_lookup: + mutex_lock(&dir->d_inode->i_mutex); - audit_inode(pathname, file->f_path.dentry); - goto opened; + dentry = lookup_hash(nd); + error = PTR_ERR(dentry); + if (IS_ERR(dentry)) { + mutex_unlock(&dir->d_inode->i_mutex); + goto exit; } - if (*opened & FILE_CREATED) { + path->dentry = dentry; + path->mnt = nd->path.mnt; + + /* Negative dentry, just create the file */ + if (!dentry->d_inode) { + umode_t mode = op->mode; + if (!IS_POSIXACL(dir->d_inode)) + mode &= ~current_umask(); + /* + * This write is needed to ensure that a + * rw->ro transition does not occur between + * the time when the file is created and when + * a permanent write count is taken through + * the 'struct file' in nameidata_to_filp(). + */ + error = mnt_want_write(nd->path.mnt); + if (error) + goto exit_mutex_unlock; + want_write = 1; /* Don't check for write permission, don't truncate */ open_flag &= ~O_TRUNC; - will_truncate = false; + will_truncate = 0; acc_mode = MAY_OPEN; - path_to_nameidata(path, nd); - goto finish_open_created; + error = security_path_mknod(&nd->path, dentry, mode, 0); + if (error) + goto exit_mutex_unlock; + error = vfs_create(dir->d_inode, dentry, mode, nd); + if (error) + goto exit_mutex_unlock; + mutex_unlock(&dir->d_inode->i_mutex); + dput(nd->path.dentry); + nd->path.dentry = dentry; + goto common; } /* * It already exists. */ + mutex_unlock(&dir->d_inode->i_mutex); audit_inode(pathname, path->dentry); - /* - * If atomic_open() acquired write access it is dropped now due to - * possible mount and symlink following (this might be optimized away if - * necessary...) - */ - if (want_write) { - mnt_drop_write(nd->path.mnt); - want_write = false; - } - error = -EEXIST; if (open_flag & O_EXCL) goto exit_dput; @@ -2640,18 +2338,18 @@ static int do_last(struct nameidata *nd, struct path *path, error = -ENOENT; if (!inode) { path_to_nameidata(path, nd); - goto out; + goto exit; } if (should_follow_link(inode, !symlink_ok)) { if (nd->flags & LOOKUP_RCU) { if (unlikely(unlazy_walk(nd, path->dentry))) { error = -ECHILD; - goto out; + goto exit; } } BUG_ON(inode != path->dentry->d_inode); - return 1; + return NULL; } if ((nd->flags & LOOKUP_RCU) || nd->path.mnt != path->mnt) { @@ -2667,122 +2365,119 @@ static int do_last(struct nameidata *nd, struct path *path, error = complete_walk(nd); if (error) { path_put(&save_parent); - return error; + return ERR_PTR(error); } error = -EISDIR; if ((open_flag & O_CREAT) && S_ISDIR(nd->inode->i_mode)) - goto out; + goto exit; error = -ENOTDIR; if ((nd->flags & LOOKUP_DIRECTORY) && !nd->inode->i_op->lookup) - goto out; + goto exit; audit_inode(pathname, nd->path.dentry); -finish_open: +ok: if (!S_ISREG(nd->inode->i_mode)) - will_truncate = false; + will_truncate = 0; if (will_truncate) { error = mnt_want_write(nd->path.mnt); if (error) - goto out; - want_write = true; + goto exit; + want_write = 1; } -finish_open_created: +common: error = may_open(&nd->path, acc_mode, open_flag); if (error) - goto out; - file->f_path.mnt = nd->path.mnt; - error = finish_open(file, nd->path.dentry, NULL, opened); - if (error) { - if (error == -EOPENSTALE) - goto stale_open; - goto out; + goto exit; + filp = nameidata_to_filp(nd); + if (filp == ERR_PTR(-EOPENSTALE) && save_parent.dentry && !retried) { + BUG_ON(save_parent.dentry != dir); + path_put(&nd->path); + nd->path = save_parent; + nd->inode = dir->d_inode; + save_parent.mnt = NULL; + save_parent.dentry = NULL; + if (want_write) { + mnt_drop_write(nd->path.mnt); + want_write = 0; + } + retried = true; + goto retry_lookup; } -opened: - error = open_check_o_direct(file); - if (error) - goto exit_fput; - error = ima_file_check(file, op->acc_mode); - if (error) - goto exit_fput; - - if (will_truncate) { - error = handle_truncate(file); - if (error) - goto exit_fput; + if (!IS_ERR(filp)) { + error = ima_file_check(filp, op->acc_mode); + if (error) { + fput(filp); + filp = ERR_PTR(error); + } + } + if (!IS_ERR(filp)) { + if (will_truncate) { + error = handle_truncate(filp); + if (error) { + fput(filp); + filp = ERR_PTR(error); + } + } } out: if (want_write) mnt_drop_write(nd->path.mnt); path_put(&save_parent); terminate_walk(nd); - return error; + return filp; +exit_mutex_unlock: + mutex_unlock(&dir->d_inode->i_mutex); exit_dput: path_put_conditional(path, nd); +exit: + filp = ERR_PTR(error); goto out; -exit_fput: - fput(file); - goto out; - -stale_open: - /* If no saved parent or already retried then can't retry */ - if (!save_parent.dentry || retried) - goto out; - - BUG_ON(save_parent.dentry != dir); - path_put(&nd->path); - nd->path = save_parent; - nd->inode = dir->d_inode; - save_parent.mnt = NULL; - save_parent.dentry = NULL; - if (want_write) { - mnt_drop_write(nd->path.mnt); - want_write = false; - } - retried = true; - goto retry_lookup; } static struct file *path_openat(int dfd, const char *pathname, struct nameidata *nd, const struct open_flags *op, int flags) { struct file *base = NULL; - struct file *file; + struct file *filp; struct path path; - int opened = 0; int error; - file = get_empty_filp(); - if (!file) + filp = get_empty_filp(); + if (!filp) return ERR_PTR(-ENFILE); - file->f_flags = op->open_flag; + filp->f_flags = op->open_flag; + nd->intent.open.file = filp; + nd->intent.open.flags = open_to_namei_flags(op->open_flag); + nd->intent.open.create_mode = op->mode; error = path_init(dfd, pathname, flags | LOOKUP_PARENT, nd, &base); if (unlikely(error)) - goto out; + goto out_filp; current->total_link_count = 0; error = link_path_walk(pathname, nd); if (unlikely(error)) - goto out; + goto out_filp; - error = do_last(nd, &path, file, op, &opened, pathname); - while (unlikely(error > 0)) { /* trailing symlink */ + filp = do_last(nd, &path, op, pathname); + while (unlikely(!filp)) { /* trailing symlink */ struct path link = path; void *cookie; if (!(nd->flags & LOOKUP_FOLLOW)) { path_put_conditional(&path, nd); path_put(&nd->path); - error = -ELOOP; + filp = ERR_PTR(-ELOOP); break; } nd->flags |= LOOKUP_PARENT; nd->flags &= ~(LOOKUP_OPEN|LOOKUP_CREATE|LOOKUP_EXCL); error = follow_link(&link, nd, &cookie); if (unlikely(error)) - break; - error = do_last(nd, &path, file, op, &opened, pathname); + filp = ERR_PTR(error); + else + filp = do_last(nd, &path, op, pathname); put_link(nd, &link, cookie); } out: @@ -2790,20 +2485,18 @@ static struct file *path_openat(int dfd, const char *pathname, path_put(&nd->root); if (base) fput(base); - if (!(opened & FILE_OPENED)) { - BUG_ON(!error); - put_filp(file); - } - if (unlikely(error)) { - if (error == -EOPENSTALE) { - if (flags & LOOKUP_RCU) - error = -ECHILD; - else - error = -ESTALE; - } - file = ERR_PTR(error); + release_open_intent(nd); + if (filp == ERR_PTR(-EOPENSTALE)) { + if (flags & LOOKUP_RCU) + filp = ERR_PTR(-ECHILD); + else + filp = ERR_PTR(-ESTALE); } - return file; + return filp; + +out_filp: + filp = ERR_PTR(error); + goto out; } struct file *do_filp_open(int dfd, const char *pathname, @@ -2858,6 +2551,7 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path goto out; nd.flags &= ~LOOKUP_PARENT; nd.flags |= LOOKUP_CREATE | LOOKUP_EXCL; + nd.intent.open.flags = O_EXCL; /* * Do the final lookup. @@ -2976,7 +2670,7 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode, goto out_drop_write; switch (mode & S_IFMT) { case 0: case S_IFREG: - error = vfs_create(path.dentry->d_inode,dentry,mode,true); + error = vfs_create(path.dentry->d_inode,dentry,mode,NULL); break; case S_IFCHR: case S_IFBLK: error = vfs_mknod(path.dentry->d_inode,dentry,mode, diff --git a/trunk/fs/namespace.c b/trunk/fs/namespace.c index c53d3381b0d0..1e4a5fe3d7b7 100644 --- a/trunk/fs/namespace.c +++ b/trunk/fs/namespace.c @@ -515,20 +515,8 @@ struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry, } /* - * lookup_mnt - Return the first child mount mounted at path - * - * "First" means first mounted chronologically. If you create the - * following mounts: - * - * mount /dev/sda1 /mnt - * mount /dev/sda2 /mnt - * mount /dev/sda3 /mnt - * - * Then lookup_mnt() on the base /mnt dentry in the root mount will - * return successively the root dentry and vfsmount of /dev/sda1, then - * /dev/sda2, then /dev/sda3, then NULL. - * - * lookup_mnt takes a reference to the found vfsmount. + * lookup_mnt increments the ref count before returning + * the vfsmount struct. */ struct vfsmount *lookup_mnt(struct path *path) { @@ -633,6 +621,21 @@ static void attach_mnt(struct mount *mnt, struct path *path) list_add_tail(&mnt->mnt_child, &real_mount(path->mnt)->mnt_mounts); } +static inline void __mnt_make_longterm(struct mount *mnt) +{ +#ifdef CONFIG_SMP + atomic_inc(&mnt->mnt_longterm); +#endif +} + +/* needs vfsmount lock for write */ +static inline void __mnt_make_shortterm(struct mount *mnt) +{ +#ifdef CONFIG_SMP + atomic_dec(&mnt->mnt_longterm); +#endif +} + /* * vfsmount lock must be held for write */ @@ -646,8 +649,10 @@ static void commit_tree(struct mount *mnt) BUG_ON(parent == mnt); list_add_tail(&head, &mnt->mnt_list); - list_for_each_entry(m, &head, mnt_list) + list_for_each_entry(m, &head, mnt_list) { m->mnt_ns = n; + __mnt_make_longterm(m); + } list_splice(&head, n->list.prev); @@ -720,60 +725,56 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root, int flag) { struct super_block *sb = old->mnt.mnt_sb; - struct mount *mnt; - int err; + struct mount *mnt = alloc_vfsmnt(old->mnt_devname); - mnt = alloc_vfsmnt(old->mnt_devname); - if (!mnt) - return ERR_PTR(-ENOMEM); - - if (flag & (CL_SLAVE | CL_PRIVATE)) - mnt->mnt_group_id = 0; /* not a peer of original */ - else - mnt->mnt_group_id = old->mnt_group_id; - - if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) { - err = mnt_alloc_group_id(mnt); - if (err) - goto out_free; - } - - mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~MNT_WRITE_HOLD; - atomic_inc(&sb->s_active); - mnt->mnt.mnt_sb = sb; - mnt->mnt.mnt_root = dget(root); - mnt->mnt_mountpoint = mnt->mnt.mnt_root; - mnt->mnt_parent = mnt; - br_write_lock(&vfsmount_lock); - list_add_tail(&mnt->mnt_instance, &sb->s_mounts); - br_write_unlock(&vfsmount_lock); + if (mnt) { + if (flag & (CL_SLAVE | CL_PRIVATE)) + mnt->mnt_group_id = 0; /* not a peer of original */ + else + mnt->mnt_group_id = old->mnt_group_id; + + if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) { + int err = mnt_alloc_group_id(mnt); + if (err) + goto out_free; + } - if (flag & CL_SLAVE) { - list_add(&mnt->mnt_slave, &old->mnt_slave_list); - mnt->mnt_master = old; - CLEAR_MNT_SHARED(mnt); - } else if (!(flag & CL_PRIVATE)) { - if ((flag & CL_MAKE_SHARED) || IS_MNT_SHARED(old)) - list_add(&mnt->mnt_share, &old->mnt_share); - if (IS_MNT_SLAVE(old)) - list_add(&mnt->mnt_slave, &old->mnt_slave); - mnt->mnt_master = old->mnt_master; - } - if (flag & CL_MAKE_SHARED) - set_mnt_shared(mnt); + mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~MNT_WRITE_HOLD; + atomic_inc(&sb->s_active); + mnt->mnt.mnt_sb = sb; + mnt->mnt.mnt_root = dget(root); + mnt->mnt_mountpoint = mnt->mnt.mnt_root; + mnt->mnt_parent = mnt; + br_write_lock(&vfsmount_lock); + list_add_tail(&mnt->mnt_instance, &sb->s_mounts); + br_write_unlock(&vfsmount_lock); - /* stick the duplicate mount on the same expiry list - * as the original if that was on one */ - if (flag & CL_EXPIRE) { - if (!list_empty(&old->mnt_expire)) - list_add(&mnt->mnt_expire, &old->mnt_expire); + if (flag & CL_SLAVE) { + list_add(&mnt->mnt_slave, &old->mnt_slave_list); + mnt->mnt_master = old; + CLEAR_MNT_SHARED(mnt); + } else if (!(flag & CL_PRIVATE)) { + if ((flag & CL_MAKE_SHARED) || IS_MNT_SHARED(old)) + list_add(&mnt->mnt_share, &old->mnt_share); + if (IS_MNT_SLAVE(old)) + list_add(&mnt->mnt_slave, &old->mnt_slave); + mnt->mnt_master = old->mnt_master; + } + if (flag & CL_MAKE_SHARED) + set_mnt_shared(mnt); + + /* stick the duplicate mount on the same expiry list + * as the original if that was on one */ + if (flag & CL_EXPIRE) { + if (!list_empty(&old->mnt_expire)) + list_add(&mnt->mnt_expire, &old->mnt_expire); + } } - return mnt; out_free: free_vfsmnt(mnt); - return ERR_PTR(err); + return NULL; } static inline void mntfree(struct mount *mnt) @@ -803,8 +804,7 @@ static void mntput_no_expire(struct mount *mnt) put_again: #ifdef CONFIG_SMP br_read_lock(&vfsmount_lock); - if (likely(mnt->mnt_ns)) { - /* shouldn't be the last one */ + if (likely(atomic_read(&mnt->mnt_longterm))) { mnt_add_count(mnt, -1); br_read_unlock(&vfsmount_lock); return; @@ -939,7 +939,7 @@ EXPORT_SYMBOL(replace_mount_options); /* iterator; we want it to have access to namespace_sem, thus here... */ static void *m_start(struct seq_file *m, loff_t *pos) { - struct proc_mounts *p = proc_mounts(m); + struct proc_mounts *p = container_of(m, struct proc_mounts, m); down_read(&namespace_sem); return seq_list_start(&p->ns->list, *pos); @@ -947,7 +947,7 @@ static void *m_start(struct seq_file *m, loff_t *pos) static void *m_next(struct seq_file *m, void *v, loff_t *pos) { - struct proc_mounts *p = proc_mounts(m); + struct proc_mounts *p = container_of(m, struct proc_mounts, m); return seq_list_next(v, &p->ns->list, pos); } @@ -959,7 +959,7 @@ static void m_stop(struct seq_file *m, void *v) static int m_show(struct seq_file *m, void *v) { - struct proc_mounts *p = proc_mounts(m); + struct proc_mounts *p = container_of(m, struct proc_mounts, m); struct mount *r = list_entry(v, struct mount, mnt_list); return p->show(m, &r->mnt); } @@ -1074,6 +1074,8 @@ void umount_tree(struct mount *mnt, int propagate, struct list_head *kill) list_del_init(&p->mnt_expire); list_del_init(&p->mnt_list); __touch_mnt_namespace(p->mnt_ns); + if (p->mnt_ns) + __mnt_make_shortterm(p); p->mnt_ns = NULL; list_del_init(&p->mnt_child); if (mnt_has_parent(p)) { @@ -1258,12 +1260,11 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry, struct path path; if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(mnt)) - return ERR_PTR(-EINVAL); + return NULL; res = q = clone_mnt(mnt, dentry, flag); - if (IS_ERR(q)) - return q; - + if (!q) + goto Enomem; q->mnt_mountpoint = mnt->mnt_mountpoint; p = mnt; @@ -1285,8 +1286,8 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry, path.mnt = &q->mnt; path.dentry = p->mnt_mountpoint; q = clone_mnt(p, p->mnt.mnt_root, flag); - if (IS_ERR(q)) - goto out; + if (!q) + goto Enomem; br_write_lock(&vfsmount_lock); list_add_tail(&q->mnt_list, &res->mnt_list); attach_mnt(q, &path); @@ -1294,7 +1295,7 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry, } } return res; -out: +Enomem: if (res) { LIST_HEAD(umount_list); br_write_lock(&vfsmount_lock); @@ -1302,11 +1303,9 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry, br_write_unlock(&vfsmount_lock); release_mounts(&umount_list); } - return q; + return NULL; } -/* Caller should check returned pointer for errors */ - struct vfsmount *collect_mounts(struct path *path) { struct mount *tree; @@ -1314,9 +1313,7 @@ struct vfsmount *collect_mounts(struct path *path) tree = copy_tree(real_mount(path->mnt), path->dentry, CL_COPY_ALL | CL_PRIVATE); up_write(&namespace_sem); - if (IS_ERR(tree)) - return NULL; - return &tree->mnt; + return tree ? &tree->mnt : NULL; } void drop_collected_mounts(struct vfsmount *mnt) @@ -1611,15 +1608,14 @@ static int do_loopback(struct path *path, char *old_name, if (!check_mnt(real_mount(path->mnt)) || !check_mnt(old)) goto out2; + err = -ENOMEM; if (recurse) mnt = copy_tree(old, old_path.dentry, 0); else mnt = clone_mnt(old, old_path.dentry, 0); - if (IS_ERR(mnt)) { - err = PTR_ERR(mnt); - goto out; - } + if (!mnt) + goto out2; err = graft_tree(mnt, path); if (err) { @@ -2213,6 +2209,23 @@ static struct mnt_namespace *alloc_mnt_ns(void) return new_ns; } +void mnt_make_longterm(struct vfsmount *mnt) +{ + __mnt_make_longterm(real_mount(mnt)); +} + +void mnt_make_shortterm(struct vfsmount *m) +{ +#ifdef CONFIG_SMP + struct mount *mnt = real_mount(m); + if (atomic_add_unless(&mnt->mnt_longterm, -1, 1)) + return; + br_write_lock(&vfsmount_lock); + atomic_dec(&mnt->mnt_longterm); + br_write_unlock(&vfsmount_lock); +#endif +} + /* * Allocate a new namespace structure and populate it with contents * copied from the namespace of the passed in task structure. @@ -2233,10 +2246,10 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns, down_write(&namespace_sem); /* First pass: copy the tree topology */ new = copy_tree(old, old->mnt.mnt_root, CL_COPY_ALL | CL_EXPIRE); - if (IS_ERR(new)) { + if (!new) { up_write(&namespace_sem); kfree(new_ns); - return ERR_CAST(new); + return ERR_PTR(-ENOMEM); } new_ns->root = new; br_write_lock(&vfsmount_lock); @@ -2252,13 +2265,18 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns, q = new; while (p) { q->mnt_ns = new_ns; + __mnt_make_longterm(q); if (fs) { if (&p->mnt == fs->root.mnt) { fs->root.mnt = mntget(&q->mnt); + __mnt_make_longterm(q); + mnt_make_shortterm(&p->mnt); rootmnt = &p->mnt; } if (&p->mnt == fs->pwd.mnt) { fs->pwd.mnt = mntget(&q->mnt); + __mnt_make_longterm(q); + mnt_make_shortterm(&p->mnt); pwdmnt = &p->mnt; } } @@ -2302,6 +2320,7 @@ static struct mnt_namespace *create_mnt_ns(struct vfsmount *m) if (!IS_ERR(new_ns)) { struct mount *mnt = real_mount(m); mnt->mnt_ns = new_ns; + __mnt_make_longterm(mnt); new_ns->root = mnt; list_add(&new_ns->list, &mnt->mnt_list); } else { @@ -2596,7 +2615,7 @@ struct vfsmount *kern_mount_data(struct file_system_type *type, void *data) * it is a longterm mount, don't release mnt until * we unmount before file sys is unregistered */ - real_mount(mnt)->mnt_ns = MNT_NS_INTERNAL; + mnt_make_longterm(mnt); } return mnt; } @@ -2606,9 +2625,7 @@ void kern_unmount(struct vfsmount *mnt) { /* release long term mount so mount point can be released */ if (!IS_ERR_OR_NULL(mnt)) { - br_write_lock(&vfsmount_lock); - real_mount(mnt)->mnt_ns = NULL; - br_write_unlock(&vfsmount_lock); + mnt_make_shortterm(mnt); mntput(mnt); } } diff --git a/trunk/fs/ncpfs/dir.c b/trunk/fs/ncpfs/dir.c index 4117e7b377bb..aeed93a6bde0 100644 --- a/trunk/fs/ncpfs/dir.c +++ b/trunk/fs/ncpfs/dir.c @@ -30,8 +30,8 @@ static void ncp_do_readdir(struct file *, void *, filldir_t, static int ncp_readdir(struct file *, void *, filldir_t); -static int ncp_create(struct inode *, struct dentry *, umode_t, bool); -static struct dentry *ncp_lookup(struct inode *, struct dentry *, unsigned int); +static int ncp_create(struct inode *, struct dentry *, umode_t, struct nameidata *); +static struct dentry *ncp_lookup(struct inode *, struct dentry *, struct nameidata *); static int ncp_unlink(struct inode *, struct dentry *); static int ncp_mkdir(struct inode *, struct dentry *, umode_t); static int ncp_rmdir(struct inode *, struct dentry *); @@ -72,7 +72,7 @@ const struct inode_operations ncp_dir_inode_operations = /* * Dentry operations routines */ -static int ncp_lookup_validate(struct dentry *, unsigned int); +static int ncp_lookup_validate(struct dentry *, struct nameidata *); static int ncp_hash_dentry(const struct dentry *, const struct inode *, struct qstr *); static int ncp_compare_dentry(const struct dentry *, const struct inode *, @@ -290,7 +290,7 @@ leave_me:; static int -ncp_lookup_validate(struct dentry *dentry, unsigned int flags) +ncp_lookup_validate(struct dentry *dentry, struct nameidata *nd) { struct ncp_server *server; struct dentry *parent; @@ -302,7 +302,7 @@ ncp_lookup_validate(struct dentry *dentry, unsigned int flags) if (dentry == dentry->d_sb->s_root) return 1; - if (flags & LOOKUP_RCU) + if (nd->flags & LOOKUP_RCU) return -ECHILD; parent = dget_parent(dentry); @@ -836,7 +836,7 @@ int ncp_conn_logged_in(struct super_block *sb) return result; } -static struct dentry *ncp_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) +static struct dentry *ncp_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { struct ncp_server *server = NCP_SERVER(dir); struct inode *inode = NULL; @@ -980,7 +980,7 @@ int ncp_create_new(struct inode *dir, struct dentry *dentry, umode_t mode, } static int ncp_create(struct inode *dir, struct dentry *dentry, umode_t mode, - bool excl) + struct nameidata *nd) { return ncp_create_new(dir, dentry, mode, 0, 0); } diff --git a/trunk/fs/nfs/dir.c b/trunk/fs/nfs/dir.c index a6b1c7fb8232..f430057ff3b3 100644 --- a/trunk/fs/nfs/dir.c +++ b/trunk/fs/nfs/dir.c @@ -46,8 +46,8 @@ static int nfs_opendir(struct inode *, struct file *); static int nfs_closedir(struct inode *, struct file *); static int nfs_readdir(struct file *, void *, filldir_t); -static struct dentry *nfs_lookup(struct inode *, struct dentry *, unsigned int); -static int nfs_create(struct inode *, struct dentry *, umode_t, bool); +static struct dentry *nfs_lookup(struct inode *, struct dentry *, struct nameidata *); +static int nfs_create(struct inode *, struct dentry *, umode_t, struct nameidata *); static int nfs_mkdir(struct inode *, struct dentry *, umode_t); static int nfs_rmdir(struct inode *, struct dentry *); static int nfs_unlink(struct inode *, struct dentry *); @@ -111,13 +111,11 @@ const struct inode_operations nfs3_dir_inode_operations = { #ifdef CONFIG_NFS_V4 -static int nfs_atomic_open(struct inode *, struct dentry *, - struct file *, unsigned, umode_t, - int *); +static struct dentry *nfs_atomic_lookup(struct inode *, struct dentry *, struct nameidata *); +static int nfs_open_create(struct inode *dir, struct dentry *dentry, umode_t mode, struct nameidata *nd); const struct inode_operations nfs4_dir_inode_operations = { - .create = nfs_create, - .lookup = nfs_lookup, - .atomic_open = nfs_atomic_open, + .create = nfs_open_create, + .lookup = nfs_atomic_lookup, .link = nfs_link, .unlink = nfs_unlink, .symlink = nfs_symlink, @@ -1030,15 +1028,28 @@ static int nfs_check_verifier(struct inode *dir, struct dentry *dentry) return 1; } +/* + * Return the intent data that applies to this particular path component + * + * Note that the current set of intents only apply to the very last + * component of the path and none of them is set before that last + * component. + */ +static inline unsigned int nfs_lookup_check_intent(struct nameidata *nd, + unsigned int mask) +{ + return nd->flags & mask; +} + /* * Use intent information to check whether or not we're going to do * an O_EXCL create using this path component. */ -static int nfs_is_exclusive_create(struct inode *dir, unsigned int flags) +static int nfs_is_exclusive_create(struct inode *dir, struct nameidata *nd) { if (NFS_PROTO(dir)->version == 2) return 0; - return flags & LOOKUP_EXCL; + return nd && nfs_lookup_check_intent(nd, LOOKUP_EXCL); } /* @@ -1050,20 +1061,25 @@ static int nfs_is_exclusive_create(struct inode *dir, unsigned int flags) * */ static inline -int nfs_lookup_verify_inode(struct inode *inode, unsigned int flags) +int nfs_lookup_verify_inode(struct inode *inode, struct nameidata *nd) { struct nfs_server *server = NFS_SERVER(inode); if (IS_AUTOMOUNT(inode)) return 0; - /* VFS wants an on-the-wire revalidation */ - if (flags & LOOKUP_REVAL) - goto out_force; - /* This is an open(2) */ - if ((flags & LOOKUP_OPEN) && !(server->flags & NFS_MOUNT_NOCTO) && - (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) - goto out_force; - return 0; + if (nd != NULL) { + /* VFS wants an on-the-wire revalidation */ + if (nd->flags & LOOKUP_REVAL) + goto out_force; + /* This is an open(2) */ + if (nfs_lookup_check_intent(nd, LOOKUP_OPEN) != 0 && + !(server->flags & NFS_MOUNT_NOCTO) && + (S_ISREG(inode->i_mode) || + S_ISDIR(inode->i_mode))) + goto out_force; + return 0; + } + return nfs_revalidate_inode(server, inode); out_force: return __nfs_revalidate_inode(server, inode); } @@ -1077,10 +1093,10 @@ int nfs_lookup_verify_inode(struct inode *inode, unsigned int flags) */ static inline int nfs_neg_need_reval(struct inode *dir, struct dentry *dentry, - unsigned int flags) + struct nameidata *nd) { /* Don't revalidate a negative dentry if we're creating a new file */ - if (flags & LOOKUP_CREATE) + if (nd != NULL && nfs_lookup_check_intent(nd, LOOKUP_CREATE) != 0) return 0; if (NFS_SERVER(dir)->flags & NFS_MOUNT_LOOKUP_CACHE_NONEG) return 1; @@ -1098,7 +1114,7 @@ int nfs_neg_need_reval(struct inode *dir, struct dentry *dentry, * If the parent directory is seen to have changed, we throw out the * cached dentry and do a new lookup. */ -static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags) +static int nfs_lookup_revalidate(struct dentry *dentry, struct nameidata *nd) { struct inode *dir; struct inode *inode; @@ -1107,7 +1123,7 @@ static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags) struct nfs_fattr *fattr = NULL; int error; - if (flags & LOOKUP_RCU) + if (nd->flags & LOOKUP_RCU) return -ECHILD; parent = dget_parent(dentry); @@ -1116,7 +1132,7 @@ static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags) inode = dentry->d_inode; if (!inode) { - if (nfs_neg_need_reval(dir, dentry, flags)) + if (nfs_neg_need_reval(dir, dentry, nd)) goto out_bad; goto out_valid_noent; } @@ -1132,8 +1148,8 @@ static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags) goto out_set_verifier; /* Force a full look up iff the parent directory has changed */ - if (!nfs_is_exclusive_create(dir, flags) && nfs_check_verifier(dir, dentry)) { - if (nfs_lookup_verify_inode(inode, flags)) + if (!nfs_is_exclusive_create(dir, nd) && nfs_check_verifier(dir, dentry)) { + if (nfs_lookup_verify_inode(inode, nd)) goto out_zap_parent; goto out_valid; } @@ -1270,7 +1286,7 @@ const struct dentry_operations nfs_dentry_operations = { .d_release = nfs_d_release, }; -static struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, unsigned int flags) +static struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd) { struct dentry *res; struct dentry *parent; @@ -1291,7 +1307,7 @@ static struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, unsi * If we're doing an exclusive create, optimize away the lookup * but don't hash the dentry. */ - if (nfs_is_exclusive_create(dir, flags)) { + if (nfs_is_exclusive_create(dir, nd)) { d_instantiate(dentry, NULL); res = NULL; goto out; @@ -1338,7 +1354,7 @@ static struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, unsi } #ifdef CONFIG_NFS_V4 -static int nfs4_lookup_revalidate(struct dentry *, unsigned int); +static int nfs4_lookup_revalidate(struct dentry *, struct nameidata *); const struct dentry_operations nfs4_dentry_operations = { .d_revalidate = nfs4_lookup_revalidate, @@ -1348,6 +1364,24 @@ const struct dentry_operations nfs4_dentry_operations = { .d_release = nfs_d_release, }; +/* + * Use intent information to determine whether we need to substitute + * the NFSv4-style stateful OPEN for the LOOKUP call + */ +static int is_atomic_open(struct nameidata *nd) +{ + if (nd == NULL || nfs_lookup_check_intent(nd, LOOKUP_OPEN) == 0) + return 0; + /* NFS does not (yet) have a stateful open for directories */ + if (nd->flags & LOOKUP_DIRECTORY) + return 0; + /* Are we trying to write to a read only partition? */ + if (__mnt_is_readonly(nd->path.mnt) && + (nd->intent.open.flags & (O_CREAT|O_TRUNC|O_ACCMODE))) + return 0; + return 1; +} + static fmode_t flags_to_mode(int flags) { fmode_t res = (__force fmode_t)flags & FMODE_EXEC; @@ -1369,143 +1403,136 @@ static int do_open(struct inode *inode, struct file *filp) return 0; } -static int nfs_finish_open(struct nfs_open_context *ctx, - struct dentry *dentry, - struct file *file, unsigned open_flags, - int *opened) +static int nfs_intent_set_file(struct nameidata *nd, struct nfs_open_context *ctx) { - int err; - - if (ctx->dentry != dentry) { - dput(ctx->dentry); - ctx->dentry = dget(dentry); - } + struct file *filp; + int ret = 0; /* If the open_intent is for execute, we have an extra check to make */ if (ctx->mode & FMODE_EXEC) { - err = nfs_may_open(dentry->d_inode, ctx->cred, open_flags); - if (err < 0) + ret = nfs_may_open(ctx->dentry->d_inode, + ctx->cred, + nd->intent.open.flags); + if (ret < 0) goto out; } - - err = finish_open(file, dentry, do_open, opened); - if (err) - goto out; - nfs_file_set_open_context(file, ctx); - + filp = lookup_instantiate_filp(nd, ctx->dentry, do_open); + if (IS_ERR(filp)) + ret = PTR_ERR(filp); + else + nfs_file_set_open_context(filp, ctx); out: put_nfs_open_context(ctx); - return err; + return ret; } -static int nfs_atomic_open(struct inode *dir, struct dentry *dentry, - struct file *file, unsigned open_flags, - umode_t mode, int *opened) +static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { struct nfs_open_context *ctx; - struct dentry *res; - struct iattr attr = { .ia_valid = ATTR_OPEN }; + struct iattr attr; + struct dentry *res = NULL; struct inode *inode; + int open_flags; int err; - /* Expect a negative dentry */ - BUG_ON(dentry->d_inode); - - dfprintk(VFS, "NFS: atomic_open(%s/%ld), %s\n", + dfprintk(VFS, "NFS: atomic_lookup(%s/%ld), %s\n", dir->i_sb->s_id, dir->i_ino, dentry->d_name.name); - /* NFS only supports OPEN on regular files */ - if ((open_flags & O_DIRECTORY)) { - if (!d_unhashed(dentry)) { - /* - * Hashed negative dentry with O_DIRECTORY: dentry was - * revalidated and is fine, no need to perform lookup - * again - */ - return -ENOENT; - } + /* Check that we are indeed trying to open this file */ + if (!is_atomic_open(nd)) goto no_open; + + if (dentry->d_name.len > NFS_SERVER(dir)->namelen) { + res = ERR_PTR(-ENAMETOOLONG); + goto out; } - if (dentry->d_name.len > NFS_SERVER(dir)->namelen) - return -ENAMETOOLONG; + /* Let vfs_create() deal with O_EXCL. Instantiate, but don't hash + * the dentry. */ + if (nd->flags & LOOKUP_EXCL) { + d_instantiate(dentry, NULL); + goto out; + } + + open_flags = nd->intent.open.flags; + attr.ia_valid = ATTR_OPEN; + + ctx = create_nfs_open_context(dentry, open_flags); + res = ERR_CAST(ctx); + if (IS_ERR(ctx)) + goto out; - if (open_flags & O_CREAT) { + if (nd->flags & LOOKUP_CREATE) { + attr.ia_mode = nd->intent.open.create_mode; attr.ia_valid |= ATTR_MODE; - attr.ia_mode = mode & ~current_umask(); - } + attr.ia_mode &= ~current_umask(); + } else + open_flags &= ~(O_EXCL | O_CREAT); + if (open_flags & O_TRUNC) { attr.ia_valid |= ATTR_SIZE; attr.ia_size = 0; } - ctx = create_nfs_open_context(dentry, open_flags); - err = PTR_ERR(ctx); - if (IS_ERR(ctx)) - goto out; - + /* Open the file on the server */ nfs_block_sillyrename(dentry->d_parent); inode = NFS_PROTO(dir)->open_context(dir, ctx, open_flags, &attr); - d_drop(dentry); if (IS_ERR(inode)) { nfs_unblock_sillyrename(dentry->d_parent); put_nfs_open_context(ctx); - err = PTR_ERR(inode); - switch (err) { - case -ENOENT: - d_add(dentry, NULL); - break; - case -EISDIR: - case -ENOTDIR: - goto no_open; - case -ELOOP: - if (!(open_flags & O_NOFOLLOW)) + switch (PTR_ERR(inode)) { + /* Make a negative dentry */ + case -ENOENT: + d_add(dentry, NULL); + res = NULL; + goto out; + /* This turned out not to be a regular file */ + case -EISDIR: + case -ENOTDIR: goto no_open; - break; + case -ELOOP: + if (!(nd->intent.open.flags & O_NOFOLLOW)) + goto no_open; /* case -EINVAL: */ - default: - break; + default: + res = ERR_CAST(inode); + goto out; } - goto out; } res = d_add_unique(dentry, inode); - if (res != NULL) - dentry = res; - nfs_unblock_sillyrename(dentry->d_parent); - nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); - - err = nfs_finish_open(ctx, dentry, file, open_flags, opened); - - dput(res); + if (res != NULL) { + dput(ctx->dentry); + ctx->dentry = dget(res); + dentry = res; + } + err = nfs_intent_set_file(nd, ctx); + if (err < 0) { + if (res != NULL) + dput(res); + return ERR_PTR(err); + } out: - return err; - + nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); + return res; no_open: - res = nfs_lookup(dir, dentry, 0); - err = PTR_ERR(res); - if (IS_ERR(res)) - goto out; - - return finish_no_open(file, res); + return nfs_lookup(dir, dentry, nd); } -static int nfs4_lookup_revalidate(struct dentry *dentry, unsigned int flags) +static int nfs4_lookup_revalidate(struct dentry *dentry, struct nameidata *nd) { struct dentry *parent = NULL; struct inode *inode; struct inode *dir; - int ret = 0; + int openflags, ret = 0; - if (flags & LOOKUP_RCU) + if (nd->flags & LOOKUP_RCU) return -ECHILD; - if (!(flags & LOOKUP_OPEN) || (flags & LOOKUP_DIRECTORY)) - goto no_open; - if (d_mountpoint(dentry)) + inode = dentry->d_inode; + if (!is_atomic_open(nd) || d_mountpoint(dentry)) goto no_open; - inode = dentry->d_inode; parent = dget_parent(dentry); dir = parent->d_inode; @@ -1513,7 +1540,7 @@ static int nfs4_lookup_revalidate(struct dentry *dentry, unsigned int flags) * optimize away revalidation of negative dentries. */ if (inode == NULL) { - if (!nfs_neg_need_reval(dir, dentry, flags)) + if (!nfs_neg_need_reval(dir, dentry, nd)) ret = 1; goto out; } @@ -1521,8 +1548,9 @@ static int nfs4_lookup_revalidate(struct dentry *dentry, unsigned int flags) /* NFS only supports OPEN on regular files */ if (!S_ISREG(inode->i_mode)) goto no_open_dput; + openflags = nd->intent.open.flags; /* We cannot do exclusive creation on a positive dentry */ - if (flags & LOOKUP_EXCL) + if ((openflags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL)) goto no_open_dput; /* Let f_op->open() actually open (and revalidate) the file */ @@ -1535,7 +1563,48 @@ static int nfs4_lookup_revalidate(struct dentry *dentry, unsigned int flags) no_open_dput: dput(parent); no_open: - return nfs_lookup_revalidate(dentry, flags); + return nfs_lookup_revalidate(dentry, nd); +} + +static int nfs_open_create(struct inode *dir, struct dentry *dentry, + umode_t mode, struct nameidata *nd) +{ + struct nfs_open_context *ctx = NULL; + struct iattr attr; + int error; + int open_flags = O_CREAT|O_EXCL; + + dfprintk(VFS, "NFS: create(%s/%ld), %s\n", + dir->i_sb->s_id, dir->i_ino, dentry->d_name.name); + + attr.ia_mode = mode; + attr.ia_valid = ATTR_MODE; + + if (nd) + open_flags = nd->intent.open.flags; + + ctx = create_nfs_open_context(dentry, open_flags); + error = PTR_ERR(ctx); + if (IS_ERR(ctx)) + goto out_err_drop; + + error = NFS_PROTO(dir)->create(dir, dentry, &attr, open_flags, ctx); + if (error != 0) + goto out_put_ctx; + if (nd) { + error = nfs_intent_set_file(nd, ctx); + if (error < 0) + goto out_err; + } else { + put_nfs_open_context(ctx); + } + return 0; +out_put_ctx: + put_nfs_open_context(ctx); +out_err_drop: + d_drop(dentry); +out_err: + return error; } #endif /* CONFIG_NFSV4 */ @@ -1589,11 +1658,11 @@ int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fhandle, * reply path made it appear to have failed. */ static int nfs_create(struct inode *dir, struct dentry *dentry, - umode_t mode, bool excl) + umode_t mode, struct nameidata *nd) { struct iattr attr; - int open_flags = excl ? O_CREAT | O_EXCL : O_CREAT; int error; + int open_flags = O_CREAT|O_EXCL; dfprintk(VFS, "NFS: create(%s/%ld), %s\n", dir->i_sb->s_id, dir->i_ino, dentry->d_name.name); @@ -1601,7 +1670,10 @@ static int nfs_create(struct inode *dir, struct dentry *dentry, attr.ia_mode = mode; attr.ia_valid = ATTR_MODE; - error = NFS_PROTO(dir)->create(dir, dentry, &attr, open_flags); + if (nd) + open_flags = nd->intent.open.flags; + + error = NFS_PROTO(dir)->create(dir, dentry, &attr, open_flags, NULL); if (error != 0) goto out_err; return 0; diff --git a/trunk/fs/nfs/getroot.c b/trunk/fs/nfs/getroot.c index a67990f90bd7..8abfb19bd3aa 100644 --- a/trunk/fs/nfs/getroot.c +++ b/trunk/fs/nfs/getroot.c @@ -62,7 +62,7 @@ static int nfs_superblock_set_dummy_root(struct super_block *sb, struct inode *i */ spin_lock(&sb->s_root->d_inode->i_lock); spin_lock(&sb->s_root->d_lock); - hlist_del_init(&sb->s_root->d_alias); + list_del_init(&sb->s_root->d_alias); spin_unlock(&sb->s_root->d_lock); spin_unlock(&sb->s_root->d_inode->i_lock); } diff --git a/trunk/fs/nfs/nfs3proc.c b/trunk/fs/nfs/nfs3proc.c index 3187e24e8f78..2292a0fd2bff 100644 --- a/trunk/fs/nfs/nfs3proc.c +++ b/trunk/fs/nfs/nfs3proc.c @@ -314,7 +314,7 @@ static void nfs3_free_createdata(struct nfs3_createdata *data) */ static int nfs3_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, - int flags) + int flags, struct nfs_open_context *ctx) { struct nfs3_createdata *data; umode_t mode = sattr->ia_mode; diff --git a/trunk/fs/nfs/nfs4proc.c b/trunk/fs/nfs/nfs4proc.c index c157b2089b47..15fc7e4664ed 100644 --- a/trunk/fs/nfs/nfs4proc.c +++ b/trunk/fs/nfs/nfs4proc.c @@ -2806,22 +2806,37 @@ static int nfs4_proc_readlink(struct inode *inode, struct page *page, } /* - * This is just for mknod. open(O_CREAT) will always do ->open_context(). + * Got race? + * We will need to arrange for the VFS layer to provide an atomic open. + * Until then, this create/open method is prone to inefficiency and race + * conditions due to the lookup, create, and open VFS calls from sys_open() + * placed on the wire. + * + * Given the above sorry state of affairs, I'm simply sending an OPEN. + * The file will be opened again in the subsequent VFS open call + * (nfs4_proc_file_open). + * + * The open for read will just hang around to be used by any process that + * opens the file O_RDONLY. This will all be resolved with the VFS changes. */ + static int nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, - int flags) + int flags, struct nfs_open_context *ctx) { - struct nfs_open_context *ctx; + struct dentry *de = dentry; struct nfs4_state *state; + struct rpc_cred *cred = NULL; + fmode_t fmode = 0; int status = 0; - ctx = alloc_nfs_open_context(dentry, FMODE_READ); - if (IS_ERR(ctx)) - return PTR_ERR(ctx); - + if (ctx != NULL) { + cred = ctx->cred; + de = ctx->dentry; + fmode = ctx->mode; + } sattr->ia_mode &= ~current_umask(); - state = nfs4_do_open(dir, dentry, ctx->mode, flags, sattr, ctx->cred, NULL); + state = nfs4_do_open(dir, de, fmode, flags, sattr, cred, NULL); d_drop(dentry); if (IS_ERR(state)) { status = PTR_ERR(state); @@ -2829,9 +2844,11 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, } d_add(dentry, igrab(state->inode)); nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); - ctx->state = state; + if (ctx != NULL) + ctx->state = state; + else + nfs4_close_sync(state, fmode); out: - put_nfs_open_context(ctx); return status; } diff --git a/trunk/fs/nfs/objlayout/objio_osd.c b/trunk/fs/nfs/objlayout/objio_osd.c index f50d3e8d6f22..b47277baebab 100644 --- a/trunk/fs/nfs/objlayout/objio_osd.c +++ b/trunk/fs/nfs/objlayout/objio_osd.c @@ -454,10 +454,7 @@ int objio_read_pagelist(struct nfs_read_data *rdata) objios->ios->done = _read_done; dprintk("%s: offset=0x%llx length=0x%x\n", __func__, rdata->args.offset, rdata->args.count); - ret = ore_read(objios->ios); - if (unlikely(ret)) - objio_free_result(&objios->oir); - return ret; + return ore_read(objios->ios); } /* @@ -489,16 +486,8 @@ static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate) struct nfs_write_data *wdata = objios->oir.rpcdata; struct address_space *mapping = wdata->header->inode->i_mapping; pgoff_t index = offset / PAGE_SIZE; - struct page *page; - loff_t i_size = i_size_read(wdata->header->inode); - - if (offset >= i_size) { - *uptodate = true; - dprintk("%s: g_zero_page index=0x%lx\n", __func__, index); - return ZERO_PAGE(0); - } + struct page *page = find_get_page(mapping, index); - page = find_get_page(mapping, index); if (!page) { page = find_or_create_page(mapping, index, GFP_NOFS); if (unlikely(!page)) { @@ -518,10 +507,8 @@ static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate) static void __r4w_put_page(void *priv, struct page *page) { - dprintk("%s: index=0x%lx\n", __func__, - (page == ZERO_PAGE(0)) ? -1UL : page->index); - if (ZERO_PAGE(0) != page) - page_cache_release(page); + dprintk("%s: index=0x%lx\n", __func__, page->index); + page_cache_release(page); return; } @@ -552,10 +539,8 @@ int objio_write_pagelist(struct nfs_write_data *wdata, int how) dprintk("%s: offset=0x%llx length=0x%x\n", __func__, wdata->args.offset, wdata->args.count); ret = ore_write(objios->ios); - if (unlikely(ret)) { - objio_free_result(&objios->oir); + if (unlikely(ret)) return ret; - } if (objios->sync) _write_done(objios->ios, objios); diff --git a/trunk/fs/nfs/proc.c b/trunk/fs/nfs/proc.c index 4433806e116f..617c7419a08e 100644 --- a/trunk/fs/nfs/proc.c +++ b/trunk/fs/nfs/proc.c @@ -259,7 +259,7 @@ static void nfs_free_createdata(const struct nfs_createdata *data) static int nfs_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, - int flags) + int flags, struct nfs_open_context *ctx) { struct nfs_createdata *data; struct rpc_message msg = { diff --git a/trunk/fs/nfs/super.c b/trunk/fs/nfs/super.c index 8b2a2977b720..06228192f64e 100644 --- a/trunk/fs/nfs/super.c +++ b/trunk/fs/nfs/super.c @@ -2419,7 +2419,7 @@ static struct dentry *nfs_fs_mount_common(struct file_system_type *fs_type, sb_mntdata.mntflags |= MS_SYNCHRONOUS; /* Get a superblock - note that we may end up sharing one that already exists */ - s = sget(fs_type, compare_super, nfs_set_super, flags, &sb_mntdata); + s = sget(fs_type, compare_super, nfs_set_super, &sb_mntdata); if (IS_ERR(s)) { mntroot = ERR_CAST(s); goto out_err_nosb; diff --git a/trunk/fs/nfsd/vfs.c b/trunk/fs/nfsd/vfs.c index 4700a0a929d7..c8bd9c3be7f7 100644 --- a/trunk/fs/nfsd/vfs.c +++ b/trunk/fs/nfsd/vfs.c @@ -745,7 +745,7 @@ __be32 nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, int may_flags, struct file **filp) { - struct path path; + struct dentry *dentry; struct inode *inode; int flags = O_RDONLY|O_LARGEFILE; __be32 err; @@ -762,9 +762,8 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, if (err) goto out; - path.mnt = fhp->fh_export->ex_path.mnt; - path.dentry = fhp->fh_dentry; - inode = path.dentry->d_inode; + dentry = fhp->fh_dentry; + inode = dentry->d_inode; /* Disallow write access to files with the append-only bit set * or any access when mandatory locking enabled @@ -793,7 +792,8 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, else flags = O_WRONLY|O_LARGEFILE; } - *filp = dentry_open(&path, flags, current_cred()); + *filp = dentry_open(dget(dentry), mntget(fhp->fh_export->ex_path.mnt), + flags, current_cred()); if (IS_ERR(*filp)) host_err = PTR_ERR(*filp); else { @@ -1329,7 +1329,7 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp, err = 0; switch (type) { case S_IFREG: - host_err = vfs_create(dirp, dchild, iap->ia_mode, true); + host_err = vfs_create(dirp, dchild, iap->ia_mode, NULL); if (!host_err) nfsd_check_ignore_resizing(iap); break; @@ -1492,7 +1492,7 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp, goto out; } - host_err = vfs_create(dirp, dchild, iap->ia_mode, true); + host_err = vfs_create(dirp, dchild, iap->ia_mode, NULL); if (host_err < 0) { fh_drop_write(fhp); goto out_nfserr; diff --git a/trunk/fs/nilfs2/namei.c b/trunk/fs/nilfs2/namei.c index 1d0c0b84c5a3..b72847988b78 100644 --- a/trunk/fs/nilfs2/namei.c +++ b/trunk/fs/nilfs2/namei.c @@ -63,7 +63,7 @@ static inline int nilfs_add_nondir(struct dentry *dentry, struct inode *inode) */ static struct dentry * -nilfs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) +nilfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { struct inode *inode; ino_t ino; @@ -85,7 +85,7 @@ nilfs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) * with d_instantiate(). */ static int nilfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, - bool excl) + struct nameidata *nd) { struct inode *inode; struct nilfs_transaction_info ti; diff --git a/trunk/fs/nilfs2/super.c b/trunk/fs/nilfs2/super.c index d57c42f974ea..1099a76cee59 100644 --- a/trunk/fs/nilfs2/super.c +++ b/trunk/fs/nilfs2/super.c @@ -1288,8 +1288,7 @@ nilfs_mount(struct file_system_type *fs_type, int flags, err = -EBUSY; goto failed; } - s = sget(fs_type, nilfs_test_bdev_super, nilfs_set_bdev_super, flags, - sd.bdev); + s = sget(fs_type, nilfs_test_bdev_super, nilfs_set_bdev_super, sd.bdev); mutex_unlock(&sd.bdev->bd_fsfreeze_mutex); if (IS_ERR(s)) { err = PTR_ERR(s); @@ -1302,6 +1301,7 @@ nilfs_mount(struct file_system_type *fs_type, int flags, s_new = true; /* New superblock instance created */ + s->s_flags = flags; s->s_mode = mode; strlcpy(s->s_id, bdevname(sd.bdev, b), sizeof(s->s_id)); sb_set_blocksize(s, block_size(sd.bdev)); diff --git a/trunk/fs/notify/fanotify/fanotify_user.c b/trunk/fs/notify/fanotify/fanotify_user.c index d43803669739..3568c8a8b138 100644 --- a/trunk/fs/notify/fanotify/fanotify_user.c +++ b/trunk/fs/notify/fanotify/fanotify_user.c @@ -61,6 +61,8 @@ static struct fsnotify_event *get_one_event(struct fsnotify_group *group, static int create_fd(struct fsnotify_group *group, struct fsnotify_event *event) { int client_fd; + struct dentry *dentry; + struct vfsmount *mnt; struct file *new_file; pr_debug("%s: group=%p event=%p\n", __func__, group, event); @@ -79,10 +81,12 @@ static int create_fd(struct fsnotify_group *group, struct fsnotify_event *event) * we need a new file handle for the userspace program so it can read even if it was * originally opened O_WRONLY. */ + dentry = dget(event->path.dentry); + mnt = mntget(event->path.mnt); /* it's possible this event was an overflow event. in that case dentry and mnt * are NULL; That's fine, just don't call dentry open */ - if (event->path.dentry && event->path.mnt) - new_file = dentry_open(&event->path, + if (dentry && mnt) + new_file = dentry_open(dentry, mnt, group->fanotify_data.f_flags | FMODE_NONOTIFY, current_cred()); else diff --git a/trunk/fs/notify/fsnotify.c b/trunk/fs/notify/fsnotify.c index 6baadb5a8430..b39c5c161adb 100644 --- a/trunk/fs/notify/fsnotify.c +++ b/trunk/fs/notify/fsnotify.c @@ -52,7 +52,6 @@ void __fsnotify_vfsmount_delete(struct vfsmount *mnt) void __fsnotify_update_child_dentry_flags(struct inode *inode) { struct dentry *alias; - struct hlist_node *p; int watched; if (!S_ISDIR(inode->i_mode)) @@ -64,7 +63,7 @@ void __fsnotify_update_child_dentry_flags(struct inode *inode) spin_lock(&inode->i_lock); /* run all of the dentries associated with this inode. Since this is a * directory, there damn well better only be one item on this list */ - hlist_for_each_entry(alias, p, &inode->i_dentry, d_alias) { + list_for_each_entry(alias, &inode->i_dentry, d_alias) { struct dentry *child; /* run all of the children of the original inode and fix their diff --git a/trunk/fs/ntfs/namei.c b/trunk/fs/ntfs/namei.c index 436f36037e09..358273e59ade 100644 --- a/trunk/fs/ntfs/namei.c +++ b/trunk/fs/ntfs/namei.c @@ -101,7 +101,7 @@ * Locking: Caller must hold i_mutex on the directory. */ static struct dentry *ntfs_lookup(struct inode *dir_ino, struct dentry *dent, - unsigned int flags) + struct nameidata *nd) { ntfs_volume *vol = NTFS_SB(dir_ino->i_sb); struct inode *dent_inode; diff --git a/trunk/fs/ocfs2/dcache.c b/trunk/fs/ocfs2/dcache.c index 8db4b58b2e4b..e5ba34818332 100644 --- a/trunk/fs/ocfs2/dcache.c +++ b/trunk/fs/ocfs2/dcache.c @@ -49,13 +49,14 @@ void ocfs2_dentry_attach_gen(struct dentry *dentry) } -static int ocfs2_dentry_revalidate(struct dentry *dentry, unsigned int flags) +static int ocfs2_dentry_revalidate(struct dentry *dentry, + struct nameidata *nd) { struct inode *inode; int ret = 0; /* if all else fails, just return false */ struct ocfs2_super *osb; - if (flags & LOOKUP_RCU) + if (nd && nd->flags & LOOKUP_RCU) return -ECHILD; inode = dentry->d_inode; @@ -169,11 +170,13 @@ struct dentry *ocfs2_find_local_alias(struct inode *inode, u64 parent_blkno, int skip_unhashed) { - struct hlist_node *p; - struct dentry *dentry; + struct list_head *p; + struct dentry *dentry = NULL; spin_lock(&inode->i_lock); - hlist_for_each_entry(dentry, p, &inode->i_dentry, d_alias) { + list_for_each(p, &inode->i_dentry) { + dentry = list_entry(p, struct dentry, d_alias); + spin_lock(&dentry->d_lock); if (ocfs2_match_dentry(dentry, parent_blkno, skip_unhashed)) { trace_ocfs2_find_local_alias(dentry->d_name.len, @@ -181,13 +184,16 @@ struct dentry *ocfs2_find_local_alias(struct inode *inode, dget_dlock(dentry); spin_unlock(&dentry->d_lock); - spin_unlock(&inode->i_lock); - return dentry; + break; } spin_unlock(&dentry->d_lock); + + dentry = NULL; } + spin_unlock(&inode->i_lock); - return NULL; + + return dentry; } DEFINE_SPINLOCK(dentry_attach_lock); diff --git a/trunk/fs/ocfs2/dlmfs/dlmfs.c b/trunk/fs/ocfs2/dlmfs/dlmfs.c index 83b6f98e0665..e31d6ae013ab 100644 --- a/trunk/fs/ocfs2/dlmfs/dlmfs.c +++ b/trunk/fs/ocfs2/dlmfs/dlmfs.c @@ -526,7 +526,7 @@ static int dlmfs_mkdir(struct inode * dir, static int dlmfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, - bool excl) + struct nameidata *nd) { int status = 0; struct inode *inode; diff --git a/trunk/fs/ocfs2/namei.c b/trunk/fs/ocfs2/namei.c index f1fd0741162b..9f39c640cddf 100644 --- a/trunk/fs/ocfs2/namei.c +++ b/trunk/fs/ocfs2/namei.c @@ -98,7 +98,7 @@ static int ocfs2_create_symlink_data(struct ocfs2_super *osb, #define OCFS2_ORPHAN_NAMELEN ((int)(2 * sizeof(u64))) static struct dentry *ocfs2_lookup(struct inode *dir, struct dentry *dentry, - unsigned int flags) + struct nameidata *nd) { int status; u64 blkno; @@ -618,7 +618,7 @@ static int ocfs2_mkdir(struct inode *dir, static int ocfs2_create(struct inode *dir, struct dentry *dentry, umode_t mode, - bool excl) + struct nameidata *nd) { int ret; diff --git a/trunk/fs/omfs/dir.c b/trunk/fs/omfs/dir.c index fb5b3ff79dc6..f00576ec320f 100644 --- a/trunk/fs/omfs/dir.c +++ b/trunk/fs/omfs/dir.c @@ -285,13 +285,13 @@ static int omfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) } static int omfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, - bool excl) + struct nameidata *nd) { return omfs_add_node(dir, dentry, mode | S_IFREG); } static struct dentry *omfs_lookup(struct inode *dir, struct dentry *dentry, - unsigned int flags) + struct nameidata *nd) { struct buffer_head *bh; struct inode *inode = NULL; diff --git a/trunk/fs/open.c b/trunk/fs/open.c index 1e914b397e12..1540632d8387 100644 --- a/trunk/fs/open.c +++ b/trunk/fs/open.c @@ -537,6 +537,25 @@ static int chown_common(struct path *path, uid_t user, gid_t group) return error; } +SYSCALL_DEFINE3(chown, const char __user *, filename, uid_t, user, gid_t, group) +{ + struct path path; + int error; + + error = user_path(filename, &path); + if (error) + goto out; + error = mnt_want_write(path.mnt); + if (error) + goto out_release; + error = chown_common(&path, user, group); + mnt_drop_write(path.mnt); +out_release: + path_put(&path); +out: + return error; +} + SYSCALL_DEFINE5(fchownat, int, dfd, const char __user *, filename, uid_t, user, gid_t, group, int, flag) { @@ -564,15 +583,23 @@ SYSCALL_DEFINE5(fchownat, int, dfd, const char __user *, filename, uid_t, user, return error; } -SYSCALL_DEFINE3(chown, const char __user *, filename, uid_t, user, gid_t, group) -{ - return sys_fchownat(AT_FDCWD, filename, user, group, 0); -} - SYSCALL_DEFINE3(lchown, const char __user *, filename, uid_t, user, gid_t, group) { - return sys_fchownat(AT_FDCWD, filename, user, group, - AT_SYMLINK_NOFOLLOW); + struct path path; + int error; + + error = user_lpath(filename, &path); + if (error) + goto out; + error = mnt_want_write(path.mnt); + if (error) + goto out_release; + error = chown_common(&path, user, group); + mnt_drop_write(path.mnt); +out_release: + path_put(&path); +out: + return error; } SYSCALL_DEFINE3(fchown, unsigned int, fd, uid_t, user, gid_t, group) @@ -640,9 +667,10 @@ int open_check_o_direct(struct file *f) return 0; } -static int do_dentry_open(struct file *f, - int (*open)(struct inode *, struct file *), - const struct cred *cred) +static struct file *do_dentry_open(struct dentry *dentry, struct vfsmount *mnt, + struct file *f, + int (*open)(struct inode *, struct file *), + const struct cred *cred) { static const struct file_operations empty_fops = {}; struct inode *inode; @@ -654,9 +682,9 @@ static int do_dentry_open(struct file *f, if (unlikely(f->f_flags & O_PATH)) f->f_mode = FMODE_PATH; - inode = f->f_path.dentry->d_inode; + inode = dentry->d_inode; if (f->f_mode & FMODE_WRITE) { - error = __get_file_write_access(inode, f->f_path.mnt); + error = __get_file_write_access(inode, mnt); if (error) goto cleanup_file; if (!special_file(inode->i_mode)) @@ -664,12 +692,14 @@ static int do_dentry_open(struct file *f, } f->f_mapping = inode->i_mapping; + f->f_path.dentry = dentry; + f->f_path.mnt = mnt; f->f_pos = 0; file_sb_list_add(f, inode->i_sb); if (unlikely(f->f_mode & FMODE_PATH)) { f->f_op = &empty_fops; - return 0; + return f; } f->f_op = fops_get(inode->i_fop); @@ -696,11 +726,10 @@ static int do_dentry_open(struct file *f, file_ra_state_init(&f->f_ra, f->f_mapping->host->i_mapping); - return 0; + return f; cleanup_all: fops_put(f->f_op); - file_sb_list_del(f); if (f->f_mode & FMODE_WRITE) { put_write_access(inode); if (!special_file(inode->i_mode)) { @@ -711,62 +740,124 @@ static int do_dentry_open(struct file *f, * here, so just reset the state. */ file_reset_write(f); - mnt_drop_write(f->f_path.mnt); + mnt_drop_write(mnt); } } -cleanup_file: - path_put(&f->f_path); - f->f_path.mnt = NULL; + file_sb_list_del(f); f->f_path.dentry = NULL; - return error; + f->f_path.mnt = NULL; +cleanup_file: + dput(dentry); + mntput(mnt); + return ERR_PTR(error); +} + +static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt, + struct file *f, + int (*open)(struct inode *, struct file *), + const struct cred *cred) +{ + struct file *res = do_dentry_open(dentry, mnt, f, open, cred); + if (!IS_ERR(res)) { + int error = open_check_o_direct(f); + if (error) { + fput(res); + res = ERR_PTR(error); + } + } else { + put_filp(f); + } + return res; } /** - * finish_open - finish opening a file - * @od: opaque open data + * lookup_instantiate_filp - instantiates the open intent filp + * @nd: pointer to nameidata * @dentry: pointer to dentry * @open: open callback * - * This can be used to finish opening a file passed to i_op->atomic_open(). - * + * Helper for filesystems that want to use lookup open intents and pass back + * a fully instantiated struct file to the caller. + * This function is meant to be called from within a filesystem's + * lookup method. + * Beware of calling it for non-regular files! Those ->open methods might block + * (e.g. in fifo_open), leaving you with parent locked (and in case of fifo, + * leading to a deadlock, as nobody can open that fifo anymore, because + * another process to open fifo will block on locked parent when doing lookup). + * Note that in case of error, nd->intent.open.file is destroyed, but the + * path information remains valid. * If the open callback is set to NULL, then the standard f_op->open() * filesystem callback is substituted. */ -int finish_open(struct file *file, struct dentry *dentry, - int (*open)(struct inode *, struct file *), - int *opened) +struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry, + int (*open)(struct inode *, struct file *)) { - int error; - BUG_ON(*opened & FILE_OPENED); /* once it's opened, it's opened */ + const struct cred *cred = current_cred(); - mntget(file->f_path.mnt); - file->f_path.dentry = dget(dentry); - - error = do_dentry_open(file, open, current_cred()); - if (!error) - *opened |= FILE_OPENED; - - return error; + if (IS_ERR(nd->intent.open.file)) + goto out; + if (IS_ERR(dentry)) + goto out_err; + nd->intent.open.file = __dentry_open(dget(dentry), mntget(nd->path.mnt), + nd->intent.open.file, + open, cred); +out: + return nd->intent.open.file; +out_err: + release_open_intent(nd); + nd->intent.open.file = ERR_CAST(dentry); + goto out; } -EXPORT_SYMBOL(finish_open); +EXPORT_SYMBOL_GPL(lookup_instantiate_filp); /** - * finish_no_open - finish ->atomic_open() without opening the file - * - * @od: opaque open data - * @dentry: dentry or NULL (as returned from ->lookup()) + * nameidata_to_filp - convert a nameidata to an open filp. + * @nd: pointer to nameidata + * @flags: open flags * - * This can be used to set the result of a successful lookup in ->atomic_open(). - * The filesystem's atomic_open() method shall return NULL after calling this. + * Note that this function destroys the original nameidata */ -int finish_no_open(struct file *file, struct dentry *dentry) +struct file *nameidata_to_filp(struct nameidata *nd) { - file->f_path.dentry = dentry; - return 1; + const struct cred *cred = current_cred(); + struct file *filp; + + /* Pick up the filp from the open intent */ + filp = nd->intent.open.file; + + /* Has the filesystem initialised the file for us? */ + if (filp->f_path.dentry != NULL) { + nd->intent.open.file = NULL; + } else { + struct file *res; + + path_get(&nd->path); + res = do_dentry_open(nd->path.dentry, nd->path.mnt, + filp, NULL, cred); + if (!IS_ERR(res)) { + int error; + + nd->intent.open.file = NULL; + BUG_ON(res != filp); + + error = open_check_o_direct(filp); + if (error) { + fput(filp); + filp = ERR_PTR(error); + } + } else { + /* Allow nd->intent.open.file to be recycled */ + filp = res; + } + } + return filp; } -EXPORT_SYMBOL(finish_no_open); -struct file *dentry_open(const struct path *path, int flags, +/* + * dentry_open() will have done dput(dentry) and mntput(mnt) if it returns an + * error. + */ +struct file *dentry_open(struct dentry *dentry, struct vfsmount *mnt, int flags, const struct cred *cred) { int error; @@ -775,28 +866,18 @@ struct file *dentry_open(const struct path *path, int flags, validate_creds(cred); /* We must always pass in a valid mount pointer. */ - BUG_ON(!path->mnt); + BUG_ON(!mnt); error = -ENFILE; f = get_empty_filp(); - if (f == NULL) + if (f == NULL) { + dput(dentry); + mntput(mnt); return ERR_PTR(error); + } f->f_flags = flags; - f->f_path = *path; - path_get(&f->f_path); - error = do_dentry_open(f, NULL, cred); - if (!error) { - error = open_check_o_direct(f); - if (error) { - fput(f); - f = ERR_PTR(error); - } - } else { - put_filp(f); - f = ERR_PTR(error); - } - return f; + return __dentry_open(dentry, mnt, f, NULL, cred); } EXPORT_SYMBOL(dentry_open); diff --git a/trunk/fs/openpromfs/inode.c b/trunk/fs/openpromfs/inode.c index 4a3477949bca..bc49c975d501 100644 --- a/trunk/fs/openpromfs/inode.c +++ b/trunk/fs/openpromfs/inode.c @@ -170,13 +170,13 @@ static const struct file_operations openprom_operations = { .llseek = generic_file_llseek, }; -static struct dentry *openpromfs_lookup(struct inode *, struct dentry *, unsigned int); +static struct dentry *openpromfs_lookup(struct inode *, struct dentry *, struct nameidata *); static const struct inode_operations openprom_inode_operations = { .lookup = openpromfs_lookup, }; -static struct dentry *openpromfs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) +static struct dentry *openpromfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { struct op_inode_info *ent_oi, *oi = OP_I(dir); struct device_node *dp, *child; diff --git a/trunk/fs/pnode.c b/trunk/fs/pnode.c index 3e000a51ac0d..bed378db0758 100644 --- a/trunk/fs/pnode.c +++ b/trunk/fs/pnode.c @@ -237,9 +237,8 @@ int propagate_mnt(struct mount *dest_mnt, struct dentry *dest_dentry, source = get_source(m, prev_dest_mnt, prev_src_mnt, &type); - child = copy_tree(source, source->mnt.mnt_root, type); - if (IS_ERR(child)) { - ret = PTR_ERR(child); + if (!(child = copy_tree(source, source->mnt.mnt_root, type))) { + ret = -ENOMEM; list_splice(tree_list, tmp_list.prev); goto out; } diff --git a/trunk/fs/proc/base.c b/trunk/fs/proc/base.c index 2772208338f8..437195f204e1 100644 --- a/trunk/fs/proc/base.c +++ b/trunk/fs/proc/base.c @@ -1427,19 +1427,16 @@ static int proc_exe_link(struct dentry *dentry, struct path *exe_path) static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd) { struct inode *inode = dentry->d_inode; - struct path path; int error = -EACCES; + /* We don't need a base pointer in the /proc filesystem */ + path_put(&nd->path); + /* Are we allowed to snoop on the tasks file descriptors? */ if (!proc_fd_access_allowed(inode)) goto out; - error = PROC_I(inode)->op.proc_get_link(dentry, &path); - if (error) - goto out; - - nd_jump_link(nd, &path); - return NULL; + error = PROC_I(inode)->op.proc_get_link(dentry, &nd->path); out: return ERR_PTR(error); } @@ -1604,13 +1601,13 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) * made this apply to all per process world readable and executable * directories. */ -int pid_revalidate(struct dentry *dentry, unsigned int flags) +int pid_revalidate(struct dentry *dentry, struct nameidata *nd) { struct inode *inode; struct task_struct *task; const struct cred *cred; - if (flags & LOOKUP_RCU) + if (nd && nd->flags & LOOKUP_RCU) return -ECHILD; inode = dentry->d_inode; @@ -1784,7 +1781,7 @@ static int proc_fd_link(struct dentry *dentry, struct path *path) return proc_fd_info(dentry->d_inode, path, NULL); } -static int tid_fd_revalidate(struct dentry *dentry, unsigned int flags) +static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd) { struct inode *inode; struct task_struct *task; @@ -1792,7 +1789,7 @@ static int tid_fd_revalidate(struct dentry *dentry, unsigned int flags) struct files_struct *files; const struct cred *cred; - if (flags & LOOKUP_RCU) + if (nd && nd->flags & LOOKUP_RCU) return -ECHILD; inode = dentry->d_inode; @@ -1871,7 +1868,7 @@ static struct dentry *proc_fd_instantiate(struct inode *dir, d_set_d_op(dentry, &tid_fd_dentry_operations); d_add(dentry, inode); /* Close the race of the process dying before we return the dentry */ - if (tid_fd_revalidate(dentry, 0)) + if (tid_fd_revalidate(dentry, NULL)) error = NULL; out: @@ -1959,7 +1956,7 @@ static int proc_readfd_common(struct file * filp, void * dirent, } static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry, - unsigned int flags) + struct nameidata *nd) { return proc_lookupfd_common(dir, dentry, proc_fd_instantiate); } @@ -2006,7 +2003,7 @@ static int dname_to_vma_addr(struct dentry *dentry, return 0; } -static int map_files_d_revalidate(struct dentry *dentry, unsigned int flags) +static int map_files_d_revalidate(struct dentry *dentry, struct nameidata *nd) { unsigned long vm_start, vm_end; bool exact_vma_exists = false; @@ -2016,7 +2013,7 @@ static int map_files_d_revalidate(struct dentry *dentry, unsigned int flags) struct inode *inode; int status = 0; - if (flags & LOOKUP_RCU) + if (nd && nd->flags & LOOKUP_RCU) return -ECHILD; if (!capable(CAP_SYS_ADMIN)) { @@ -2148,7 +2145,7 @@ proc_map_files_instantiate(struct inode *dir, struct dentry *dentry, } static struct dentry *proc_map_files_lookup(struct inode *dir, - struct dentry *dentry, unsigned int flags) + struct dentry *dentry, struct nameidata *nd) { unsigned long vm_start, vm_end; struct vm_area_struct *vma; @@ -2374,7 +2371,7 @@ static struct dentry *proc_fdinfo_instantiate(struct inode *dir, d_set_d_op(dentry, &tid_fd_dentry_operations); d_add(dentry, inode); /* Close the race of the process dying before we return the dentry */ - if (tid_fd_revalidate(dentry, 0)) + if (tid_fd_revalidate(dentry, NULL)) error = NULL; out: @@ -2383,7 +2380,7 @@ static struct dentry *proc_fdinfo_instantiate(struct inode *dir, static struct dentry *proc_lookupfdinfo(struct inode *dir, struct dentry *dentry, - unsigned int flags) + struct nameidata *nd) { return proc_lookupfd_common(dir, dentry, proc_fdinfo_instantiate); } @@ -2433,7 +2430,7 @@ static struct dentry *proc_pident_instantiate(struct inode *dir, d_set_d_op(dentry, &pid_dentry_operations); d_add(dentry, inode); /* Close the race of the process dying before we return the dentry */ - if (pid_revalidate(dentry, 0)) + if (pid_revalidate(dentry, NULL)) error = NULL; out: return error; @@ -2633,7 +2630,7 @@ static const struct file_operations proc_attr_dir_operations = { }; static struct dentry *proc_attr_dir_lookup(struct inode *dir, - struct dentry *dentry, unsigned int flags) + struct dentry *dentry, struct nameidata *nd) { return proc_pident_lookup(dir, dentry, attr_dir_stuff, ARRAY_SIZE(attr_dir_stuff)); @@ -3117,8 +3114,7 @@ static const struct file_operations proc_tgid_base_operations = { .llseek = default_llseek, }; -static struct dentry *proc_tgid_base_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) -{ +static struct dentry *proc_tgid_base_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd){ return proc_pident_lookup(dir, dentry, tgid_base_stuff, ARRAY_SIZE(tgid_base_stuff)); } @@ -3241,13 +3237,13 @@ static struct dentry *proc_pid_instantiate(struct inode *dir, d_add(dentry, inode); /* Close the race of the process dying before we return the dentry */ - if (pid_revalidate(dentry, 0)) + if (pid_revalidate(dentry, NULL)) error = NULL; out: return error; } -struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsigned int flags) +struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd) { struct dentry *result; struct task_struct *task; @@ -3474,8 +3470,7 @@ static int proc_tid_base_readdir(struct file * filp, tid_base_stuff,ARRAY_SIZE(tid_base_stuff)); } -static struct dentry *proc_tid_base_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) -{ +static struct dentry *proc_tid_base_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd){ return proc_pident_lookup(dir, dentry, tid_base_stuff, ARRAY_SIZE(tid_base_stuff)); } @@ -3513,13 +3508,13 @@ static struct dentry *proc_task_instantiate(struct inode *dir, d_add(dentry, inode); /* Close the race of the process dying before we return the dentry */ - if (pid_revalidate(dentry, 0)) + if (pid_revalidate(dentry, NULL)) error = NULL; out: return error; } -static struct dentry *proc_task_lookup(struct inode *dir, struct dentry * dentry, unsigned int flags) +static struct dentry *proc_task_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd) { struct dentry *result = ERR_PTR(-ENOENT); struct task_struct *task; diff --git a/trunk/fs/proc/generic.c b/trunk/fs/proc/generic.c index b3647fe6a608..2edf34f2eb61 100644 --- a/trunk/fs/proc/generic.c +++ b/trunk/fs/proc/generic.c @@ -446,7 +446,7 @@ struct dentry *proc_lookup_de(struct proc_dir_entry *de, struct inode *dir, } struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry, - unsigned int flags) + struct nameidata *nd) { return proc_lookup_de(PDE(dir), dir, dentry); } diff --git a/trunk/fs/proc/internal.h b/trunk/fs/proc/internal.h index e1167a1c9126..eca4aca5b6e2 100644 --- a/trunk/fs/proc/internal.h +++ b/trunk/fs/proc/internal.h @@ -106,7 +106,7 @@ void pde_users_dec(struct proc_dir_entry *pde); extern spinlock_t proc_subdir_lock; -struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsigned int); +struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *); int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir); unsigned long task_vsize(struct mm_struct *); unsigned long task_statm(struct mm_struct *, @@ -132,7 +132,7 @@ int proc_remount(struct super_block *sb, int *flags, char *data); * of the /proc/ subdirectories. */ int proc_readdir(struct file *, void *, filldir_t); -struct dentry *proc_lookup(struct inode *, struct dentry *, unsigned int); +struct dentry *proc_lookup(struct inode *, struct dentry *, struct nameidata *); @@ -142,7 +142,7 @@ typedef struct dentry *instantiate_t(struct inode *, struct dentry *, int proc_fill_cache(struct file *filp, void *dirent, filldir_t filldir, const char *name, int len, instantiate_t instantiate, struct task_struct *task, const void *ptr); -int pid_revalidate(struct dentry *dentry, unsigned int flags); +int pid_revalidate(struct dentry *dentry, struct nameidata *nd); struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *task); extern const struct dentry_operations pid_dentry_operations; int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat); diff --git a/trunk/fs/proc/namespaces.c b/trunk/fs/proc/namespaces.c index b178ed733c36..0d9e23a39e49 100644 --- a/trunk/fs/proc/namespaces.c +++ b/trunk/fs/proc/namespaces.c @@ -56,7 +56,7 @@ static struct dentry *proc_ns_instantiate(struct inode *dir, d_set_d_op(dentry, &pid_dentry_operations); d_add(dentry, inode); /* Close the race of the process dying before we return the dentry */ - if (pid_revalidate(dentry, 0)) + if (pid_revalidate(dentry, NULL)) error = NULL; out: return error; @@ -140,7 +140,7 @@ const struct file_operations proc_ns_dir_operations = { }; static struct dentry *proc_ns_dir_lookup(struct inode *dir, - struct dentry *dentry, unsigned int flags) + struct dentry *dentry, struct nameidata *nd) { struct dentry *error; struct task_struct *task = get_proc_task(dir); diff --git a/trunk/fs/proc/proc_net.c b/trunk/fs/proc/proc_net.c index fe72cd073dea..06e1cc17caf6 100644 --- a/trunk/fs/proc/proc_net.c +++ b/trunk/fs/proc/proc_net.c @@ -119,7 +119,7 @@ static struct net *get_proc_task_net(struct inode *dir) } static struct dentry *proc_tgid_net_lookup(struct inode *dir, - struct dentry *dentry, unsigned int flags) + struct dentry *dentry, struct nameidata *nd) { struct dentry *de; struct net *net; diff --git a/trunk/fs/proc/proc_sysctl.c b/trunk/fs/proc/proc_sysctl.c index dfafeb2b05a0..3476bca8f7af 100644 --- a/trunk/fs/proc/proc_sysctl.c +++ b/trunk/fs/proc/proc_sysctl.c @@ -433,7 +433,7 @@ static struct ctl_table_header *grab_header(struct inode *inode) } static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry, - unsigned int flags) + struct nameidata *nd) { struct ctl_table_header *head = grab_header(dir); struct ctl_table_header *h = NULL; @@ -794,9 +794,9 @@ static const struct inode_operations proc_sys_dir_operations = { .getattr = proc_sys_getattr, }; -static int proc_sys_revalidate(struct dentry *dentry, unsigned int flags) +static int proc_sys_revalidate(struct dentry *dentry, struct nameidata *nd) { - if (flags & LOOKUP_RCU) + if (nd->flags & LOOKUP_RCU) return -ECHILD; return !PROC_I(dentry->d_inode)->sysctl->unregistering; } diff --git a/trunk/fs/proc/root.c b/trunk/fs/proc/root.c index 9a2d9fd7cadd..7c30fce037c0 100644 --- a/trunk/fs/proc/root.c +++ b/trunk/fs/proc/root.c @@ -111,7 +111,7 @@ static struct dentry *proc_mount(struct file_system_type *fs_type, options = data; } - sb = sget(fs_type, proc_test_super, proc_set_super, flags, ns); + sb = sget(fs_type, proc_test_super, proc_set_super, ns); if (IS_ERR(sb)) return ERR_CAST(sb); @@ -121,6 +121,7 @@ static struct dentry *proc_mount(struct file_system_type *fs_type, } if (!sb->s_root) { + sb->s_flags = flags; err = proc_fill_super(sb); if (err) { deactivate_locked_super(sb); @@ -199,12 +200,13 @@ static int proc_root_getattr(struct vfsmount *mnt, struct dentry *dentry, struct return 0; } -static struct dentry *proc_root_lookup(struct inode * dir, struct dentry * dentry, unsigned int flags) +static struct dentry *proc_root_lookup(struct inode * dir, struct dentry * dentry, struct nameidata *nd) { - if (!proc_lookup(dir, dentry, flags)) + if (!proc_lookup(dir, dentry, nd)) { return NULL; + } - return proc_pid_lookup(dir, dentry, flags); + return proc_pid_lookup(dir, dentry, nd); } static int proc_root_readdir(struct file * filp, diff --git a/trunk/fs/proc_namespace.c b/trunk/fs/proc_namespace.c index 5fe34c355e85..5e289a7cbad1 100644 --- a/trunk/fs/proc_namespace.c +++ b/trunk/fs/proc_namespace.c @@ -17,7 +17,7 @@ static unsigned mounts_poll(struct file *file, poll_table *wait) { - struct proc_mounts *p = proc_mounts(file->private_data); + struct proc_mounts *p = file->private_data; struct mnt_namespace *ns = p->ns; unsigned res = POLLIN | POLLRDNORM; @@ -121,7 +121,7 @@ static int show_vfsmnt(struct seq_file *m, struct vfsmount *mnt) static int show_mountinfo(struct seq_file *m, struct vfsmount *mnt) { - struct proc_mounts *p = proc_mounts(m); + struct proc_mounts *p = m->private; struct mount *r = real_mount(mnt); struct super_block *sb = mnt->mnt_sb; struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt }; @@ -268,6 +268,7 @@ static int mounts_open_common(struct inode *inode, struct file *file, if (ret) goto err_free; + p->m.private = p; p->ns = ns; p->root = root; p->m.poll_event = ns->event; @@ -287,7 +288,7 @@ static int mounts_open_common(struct inode *inode, struct file *file, static int mounts_release(struct inode *inode, struct file *file) { - struct proc_mounts *p = proc_mounts(file->private_data); + struct proc_mounts *p = file->private_data; path_put(&p->root); put_mnt_ns(p->ns); return seq_release(inode, file); diff --git a/trunk/fs/qnx4/namei.c b/trunk/fs/qnx4/namei.c index d024505ba007..a512c0b30e8e 100644 --- a/trunk/fs/qnx4/namei.c +++ b/trunk/fs/qnx4/namei.c @@ -95,7 +95,7 @@ static struct buffer_head *qnx4_find_entry(int len, struct inode *dir, return NULL; } -struct dentry * qnx4_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) +struct dentry * qnx4_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { int ino; struct qnx4_inode_entry *de; diff --git a/trunk/fs/qnx4/qnx4.h b/trunk/fs/qnx4/qnx4.h index 34e2d329c97e..244d4620189b 100644 --- a/trunk/fs/qnx4/qnx4.h +++ b/trunk/fs/qnx4/qnx4.h @@ -23,7 +23,7 @@ struct qnx4_inode_info { }; extern struct inode *qnx4_iget(struct super_block *, unsigned long); -extern struct dentry *qnx4_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags); +extern struct dentry *qnx4_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd); extern unsigned long qnx4_count_free_blocks(struct super_block *sb); extern unsigned long qnx4_block_map(struct inode *inode, long iblock); diff --git a/trunk/fs/qnx6/inode.c b/trunk/fs/qnx6/inode.c index 2049c814bda4..e44012dc5645 100644 --- a/trunk/fs/qnx6/inode.c +++ b/trunk/fs/qnx6/inode.c @@ -622,6 +622,7 @@ static struct inode *qnx6_alloc_inode(struct super_block *sb) static void qnx6_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); + INIT_LIST_HEAD(&inode->i_dentry); kmem_cache_free(qnx6_inode_cachep, QNX6_I(inode)); } diff --git a/trunk/fs/qnx6/namei.c b/trunk/fs/qnx6/namei.c index 0561326a94f5..8a97289e04ad 100644 --- a/trunk/fs/qnx6/namei.c +++ b/trunk/fs/qnx6/namei.c @@ -13,7 +13,7 @@ #include "qnx6.h" struct dentry *qnx6_lookup(struct inode *dir, struct dentry *dentry, - unsigned int flags) + struct nameidata *nd) { unsigned ino; struct page *page; diff --git a/trunk/fs/qnx6/qnx6.h b/trunk/fs/qnx6/qnx6.h index b00fcc960d37..6c5e02a0b6a8 100644 --- a/trunk/fs/qnx6/qnx6.h +++ b/trunk/fs/qnx6/qnx6.h @@ -45,7 +45,7 @@ struct qnx6_inode_info { extern struct inode *qnx6_iget(struct super_block *sb, unsigned ino); extern struct dentry *qnx6_lookup(struct inode *dir, struct dentry *dentry, - unsigned int flags); + struct nameidata *nd); #ifdef CONFIG_QNX6FS_DEBUG extern void qnx6_superblock_debug(struct qnx6_super_block *, diff --git a/trunk/fs/quota/dquot.c b/trunk/fs/quota/dquot.c index d679fc48ef27..10cbe841cb7e 100644 --- a/trunk/fs/quota/dquot.c +++ b/trunk/fs/quota/dquot.c @@ -595,14 +595,12 @@ int dquot_scan_active(struct super_block *sb, } EXPORT_SYMBOL(dquot_scan_active); -/* Write all dquot structures to quota files */ -int dquot_writeback_dquots(struct super_block *sb, int type) +int dquot_quota_sync(struct super_block *sb, int type, int wait) { struct list_head *dirty; struct dquot *dquot; struct quota_info *dqopt = sb_dqopt(sb); int cnt; - int err, ret = 0; mutex_lock(&dqopt->dqonoff_mutex); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { @@ -626,9 +624,7 @@ int dquot_writeback_dquots(struct super_block *sb, int type) atomic_inc(&dquot->dq_count); spin_unlock(&dq_list_lock); dqstats_inc(DQST_LOOKUPS); - err = sb->dq_op->write_dquot(dquot); - if (!ret && err) - err = ret; + sb->dq_op->write_dquot(dquot); dqput(dquot); spin_lock(&dq_list_lock); } @@ -642,21 +638,7 @@ int dquot_writeback_dquots(struct super_block *sb, int type) dqstats_inc(DQST_SYNCS); mutex_unlock(&dqopt->dqonoff_mutex); - return ret; -} -EXPORT_SYMBOL(dquot_writeback_dquots); - -/* Write all dquot structures to disk and make them visible from userspace */ -int dquot_quota_sync(struct super_block *sb, int type) -{ - struct quota_info *dqopt = sb_dqopt(sb); - int cnt; - int ret; - - ret = dquot_writeback_dquots(sb, type); - if (ret) - return ret; - if (dqopt->flags & DQUOT_QUOTA_SYS_FILE) + if (!wait || (dqopt->flags & DQUOT_QUOTA_SYS_FILE)) return 0; /* This is not very clever (and fast) but currently I don't know about diff --git a/trunk/fs/quota/quota.c b/trunk/fs/quota/quota.c index c659f92298d3..9a391204ca27 100644 --- a/trunk/fs/quota/quota.c +++ b/trunk/fs/quota/quota.c @@ -47,7 +47,7 @@ static int check_quotactl_permission(struct super_block *sb, int type, int cmd, static void quota_sync_one(struct super_block *sb, void *arg) { if (sb->s_qcop && sb->s_qcop->quota_sync) - sb->s_qcop->quota_sync(sb, *(int *)arg); + sb->s_qcop->quota_sync(sb, *(int *)arg, 1); } static int quota_sync_all(int type) @@ -270,7 +270,7 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, case Q_SYNC: if (!sb->s_qcop->quota_sync) return -ENOSYS; - return sb->s_qcop->quota_sync(sb, type); + return sb->s_qcop->quota_sync(sb, type, 1); case Q_XQUOTAON: case Q_XQUOTAOFF: case Q_XQUOTARM: diff --git a/trunk/fs/ramfs/inode.c b/trunk/fs/ramfs/inode.c index eab8c09d3801..a1fdabe21dec 100644 --- a/trunk/fs/ramfs/inode.c +++ b/trunk/fs/ramfs/inode.c @@ -114,7 +114,7 @@ static int ramfs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode) return retval; } -static int ramfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) +static int ramfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, struct nameidata *nd) { return ramfs_mknod(dir, dentry, mode | S_IFREG, 0); } diff --git a/trunk/fs/read_write.c b/trunk/fs/read_write.c index 1adfb691e4f1..c20614f86c01 100644 --- a/trunk/fs/read_write.c +++ b/trunk/fs/read_write.c @@ -55,11 +55,10 @@ static loff_t lseek_execute(struct file *file, struct inode *inode, * @file: file structure to seek on * @offset: file offset to seek to * @origin: type of seek - * @size: max size of this file in file system - * @eof: offset used for SEEK_END position + * @size: max size of file system * * This is a variant of generic_file_llseek that allows passing in a custom - * maximum file size and a custom EOF position, for e.g. hashed directories + * file size. * * Synchronization: * SEEK_SET and SEEK_END are unsynchronized (but atomic on 64bit platforms) @@ -68,13 +67,13 @@ static loff_t lseek_execute(struct file *file, struct inode *inode, */ loff_t generic_file_llseek_size(struct file *file, loff_t offset, int origin, - loff_t maxsize, loff_t eof) + loff_t maxsize) { struct inode *inode = file->f_mapping->host; switch (origin) { case SEEK_END: - offset += eof; + offset += i_size_read(inode); break; case SEEK_CUR: /* @@ -100,7 +99,7 @@ generic_file_llseek_size(struct file *file, loff_t offset, int origin, * In the generic case the entire file is data, so as long as * offset isn't at the end of the file then the offset is data. */ - if (offset >= eof) + if (offset >= i_size_read(inode)) return -ENXIO; break; case SEEK_HOLE: @@ -108,9 +107,9 @@ generic_file_llseek_size(struct file *file, loff_t offset, int origin, * There is a virtual hole at the end of the file, so as long as * offset isn't i_size or larger, return i_size. */ - if (offset >= eof) + if (offset >= i_size_read(inode)) return -ENXIO; - offset = eof; + offset = i_size_read(inode); break; } @@ -133,8 +132,7 @@ loff_t generic_file_llseek(struct file *file, loff_t offset, int origin) struct inode *inode = file->f_mapping->host; return generic_file_llseek_size(file, offset, origin, - inode->i_sb->s_maxbytes, - i_size_read(inode)); + inode->i_sb->s_maxbytes); } EXPORT_SYMBOL(generic_file_llseek); diff --git a/trunk/fs/reiserfs/namei.c b/trunk/fs/reiserfs/namei.c index 8567fb847601..84e8a69cee9d 100644 --- a/trunk/fs/reiserfs/namei.c +++ b/trunk/fs/reiserfs/namei.c @@ -322,7 +322,7 @@ static int reiserfs_find_entry(struct inode *dir, const char *name, int namelen, } static struct dentry *reiserfs_lookup(struct inode *dir, struct dentry *dentry, - unsigned int flags) + struct nameidata *nd) { int retval; int lock_depth; @@ -573,7 +573,7 @@ static int new_inode_init(struct inode *inode, struct inode *dir, umode_t mode) } static int reiserfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, - bool excl) + struct nameidata *nd) { int retval; struct inode *inode; @@ -634,8 +634,8 @@ static int reiserfs_create(struct inode *dir, struct dentry *dentry, umode_t mod reiserfs_update_inode_transaction(inode); reiserfs_update_inode_transaction(dir); - unlock_new_inode(inode); d_instantiate(dentry, inode); + unlock_new_inode(inode); retval = journal_end(&th, dir->i_sb, jbegin_count); out_failed: @@ -712,8 +712,8 @@ static int reiserfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode goto out_failed; } - unlock_new_inode(inode); d_instantiate(dentry, inode); + unlock_new_inode(inode); retval = journal_end(&th, dir->i_sb, jbegin_count); out_failed: @@ -800,8 +800,8 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode // the above add_entry did not update dir's stat data reiserfs_update_sd(&th, dir); - unlock_new_inode(inode); d_instantiate(dentry, inode); + unlock_new_inode(inode); retval = journal_end(&th, dir->i_sb, jbegin_count); out_failed: reiserfs_write_unlock_once(dir->i_sb, lock_depth); @@ -1096,8 +1096,8 @@ static int reiserfs_symlink(struct inode *parent_dir, goto out_failed; } - unlock_new_inode(inode); d_instantiate(dentry, inode); + unlock_new_inode(inode); retval = journal_end(&th, parent_dir->i_sb, jbegin_count); out_failed: reiserfs_write_unlock(parent_dir->i_sb); diff --git a/trunk/fs/reiserfs/procfs.c b/trunk/fs/reiserfs/procfs.c index e60e87035bb3..2c1ade692cc8 100644 --- a/trunk/fs/reiserfs/procfs.c +++ b/trunk/fs/reiserfs/procfs.c @@ -403,7 +403,7 @@ static void *r_start(struct seq_file *m, loff_t * pos) if (l) return NULL; - if (IS_ERR(sget(&reiserfs_fs_type, test_sb, set_sb, 0, s))) + if (IS_ERR(sget(&reiserfs_fs_type, test_sb, set_sb, s))) return NULL; up_write(&s->s_umount); diff --git a/trunk/fs/reiserfs/super.c b/trunk/fs/reiserfs/super.c index 7a37dabf5a96..651ce767b55d 100644 --- a/trunk/fs/reiserfs/super.c +++ b/trunk/fs/reiserfs/super.c @@ -68,11 +68,6 @@ static int reiserfs_sync_fs(struct super_block *s, int wait) { struct reiserfs_transaction_handle th; - /* - * Writeback quota in non-journalled quota case - journalled quota has - * no dirty dquots - */ - dquot_writeback_dquots(s, -1); reiserfs_write_lock(s); if (!journal_begin(&th, s, 1)) if (!journal_end_sync(&th, s, 1)) diff --git a/trunk/fs/reiserfs/xattr.c b/trunk/fs/reiserfs/xattr.c index d319963aeb11..46fc1c20a6b1 100644 --- a/trunk/fs/reiserfs/xattr.c +++ b/trunk/fs/reiserfs/xattr.c @@ -62,7 +62,7 @@ static int xattr_create(struct inode *dir, struct dentry *dentry, int mode) { BUG_ON(!mutex_is_locked(&dir->i_mutex)); - return dir->i_op->create(dir, dentry, mode, true); + return dir->i_op->create(dir, dentry, mode, NULL); } #endif @@ -942,7 +942,7 @@ int reiserfs_permission(struct inode *inode, int mask) return generic_permission(inode, mask); } -static int xattr_hide_revalidate(struct dentry *dentry, unsigned int flags) +static int xattr_hide_revalidate(struct dentry *dentry, struct nameidata *nd) { return -EPERM; } diff --git a/trunk/fs/romfs/super.c b/trunk/fs/romfs/super.c index 77c5f2173983..e64f6b5f7ae5 100644 --- a/trunk/fs/romfs/super.c +++ b/trunk/fs/romfs/super.c @@ -210,7 +210,7 @@ static int romfs_readdir(struct file *filp, void *dirent, filldir_t filldir) * look up an entry in a directory */ static struct dentry *romfs_lookup(struct inode *dir, struct dentry *dentry, - unsigned int flags) + struct nameidata *nd) { unsigned long offset, maxoff; struct inode *inode; diff --git a/trunk/fs/squashfs/namei.c b/trunk/fs/squashfs/namei.c index 7834a517f7f4..abcc58f3c152 100644 --- a/trunk/fs/squashfs/namei.c +++ b/trunk/fs/squashfs/namei.c @@ -134,7 +134,7 @@ static int get_dir_index_using_name(struct super_block *sb, static struct dentry *squashfs_lookup(struct inode *dir, struct dentry *dentry, - unsigned int flags) + struct nameidata *nd) { const unsigned char *name = dentry->d_name.name; int len = dentry->d_name.len; diff --git a/trunk/fs/super.c b/trunk/fs/super.c index c743fb3be4b8..cf001775617f 100644 --- a/trunk/fs/super.c +++ b/trunk/fs/super.c @@ -105,12 +105,11 @@ static int prune_super(struct shrinker *shrink, struct shrink_control *sc) /** * alloc_super - create new superblock * @type: filesystem type superblock should belong to - * @flags: the mount flags * * Allocates and initializes a new &struct super_block. alloc_super() * returns a pointer new superblock or %NULL if allocation had failed. */ -static struct super_block *alloc_super(struct file_system_type *type, int flags) +static struct super_block *alloc_super(struct file_system_type *type) { struct super_block *s = kzalloc(sizeof(struct super_block), GFP_USER); static const struct super_operations default_op; @@ -137,7 +136,6 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags) #else INIT_LIST_HEAD(&s->s_files); #endif - s->s_flags = flags; s->s_bdi = &default_backing_dev_info; INIT_HLIST_NODE(&s->s_instances); INIT_HLIST_BL_HEAD(&s->s_anon); @@ -417,13 +415,11 @@ EXPORT_SYMBOL(generic_shutdown_super); * @type: filesystem type superblock should belong to * @test: comparison callback * @set: setup callback - * @flags: mount flags * @data: argument to each of them */ struct super_block *sget(struct file_system_type *type, int (*test)(struct super_block *,void *), int (*set)(struct super_block *,void *), - int flags, void *data) { struct super_block *s = NULL; @@ -454,7 +450,7 @@ struct super_block *sget(struct file_system_type *type, } if (!s) { spin_unlock(&sb_lock); - s = alloc_super(type, flags); + s = alloc_super(type); if (!s) return ERR_PTR(-ENOMEM); goto retry; @@ -929,12 +925,13 @@ struct dentry *mount_ns(struct file_system_type *fs_type, int flags, { struct super_block *sb; - sb = sget(fs_type, ns_test_super, ns_set_super, flags, data); + sb = sget(fs_type, ns_test_super, ns_set_super, data); if (IS_ERR(sb)) return ERR_CAST(sb); if (!sb->s_root) { int err; + sb->s_flags = flags; err = fill_super(sb, data, flags & MS_SILENT ? 1 : 0); if (err) { deactivate_locked_super(sb); @@ -995,8 +992,7 @@ struct dentry *mount_bdev(struct file_system_type *fs_type, error = -EBUSY; goto error_bdev; } - s = sget(fs_type, test_bdev_super, set_bdev_super, flags | MS_NOSEC, - bdev); + s = sget(fs_type, test_bdev_super, set_bdev_super, bdev); mutex_unlock(&bdev->bd_fsfreeze_mutex); if (IS_ERR(s)) goto error_s; @@ -1021,6 +1017,7 @@ struct dentry *mount_bdev(struct file_system_type *fs_type, } else { char b[BDEVNAME_SIZE]; + s->s_flags = flags | MS_NOSEC; s->s_mode = mode; strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id)); sb_set_blocksize(s, block_size(bdev)); @@ -1065,11 +1062,13 @@ struct dentry *mount_nodev(struct file_system_type *fs_type, int (*fill_super)(struct super_block *, void *, int)) { int error; - struct super_block *s = sget(fs_type, NULL, set_anon_super, flags, NULL); + struct super_block *s = sget(fs_type, NULL, set_anon_super, NULL); if (IS_ERR(s)) return ERR_CAST(s); + s->s_flags = flags; + error = fill_super(s, data, flags & MS_SILENT ? 1 : 0); if (error) { deactivate_locked_super(s); @@ -1092,10 +1091,11 @@ struct dentry *mount_single(struct file_system_type *fs_type, struct super_block *s; int error; - s = sget(fs_type, compare_single, set_anon_super, flags, NULL); + s = sget(fs_type, compare_single, set_anon_super, NULL); if (IS_ERR(s)) return ERR_CAST(s); if (!s->s_root) { + s->s_flags = flags; error = fill_super(s, data, flags & MS_SILENT ? 1 : 0); if (error) { deactivate_locked_super(s); diff --git a/trunk/fs/sync.c b/trunk/fs/sync.c index eb8722dc556f..11e3d1c44901 100644 --- a/trunk/fs/sync.c +++ b/trunk/fs/sync.c @@ -29,6 +29,16 @@ */ static int __sync_filesystem(struct super_block *sb, int wait) { + /* + * This should be safe, as we require bdi backing to actually + * write out data in the first place + */ + if (sb->s_bdi == &noop_backing_dev_info) + return 0; + + if (sb->s_qcop && sb->s_qcop->quota_sync) + sb->s_qcop->quota_sync(sb, -1, wait); + if (wait) sync_inodes_sb(sb); else @@ -67,48 +77,29 @@ int sync_filesystem(struct super_block *sb) } EXPORT_SYMBOL_GPL(sync_filesystem); -static void sync_inodes_one_sb(struct super_block *sb, void *arg) +static void sync_one_sb(struct super_block *sb, void *arg) { if (!(sb->s_flags & MS_RDONLY)) - sync_inodes_sb(sb); -} - -static void sync_fs_one_sb(struct super_block *sb, void *arg) -{ - if (!(sb->s_flags & MS_RDONLY) && sb->s_op->sync_fs) - sb->s_op->sync_fs(sb, *(int *)arg); + __sync_filesystem(sb, *(int *)arg); } - -static void fdatawrite_one_bdev(struct block_device *bdev, void *arg) -{ - filemap_fdatawrite(bdev->bd_inode->i_mapping); -} - -static void fdatawait_one_bdev(struct block_device *bdev, void *arg) +/* + * Sync all the data for all the filesystems (called by sys_sync() and + * emergency sync) + */ +static void sync_filesystems(int wait) { - filemap_fdatawait(bdev->bd_inode->i_mapping); + iterate_supers(sync_one_sb, &wait); } /* - * Sync everything. We start by waking flusher threads so that most of - * writeback runs on all devices in parallel. Then we sync all inodes reliably - * which effectively also waits for all flusher threads to finish doing - * writeback. At this point all data is on disk so metadata should be stable - * and we tell filesystems to sync their metadata via ->sync_fs() calls. - * Finally, we writeout all block devices because some filesystems (e.g. ext2) - * just write metadata (such as inodes or bitmaps) to block device page cache - * and do not sync it on their own in ->sync_fs(). + * sync everything. Start out by waking pdflush, because that writes back + * all queues in parallel. */ SYSCALL_DEFINE0(sync) { - int nowait = 0, wait = 1; - wakeup_flusher_threads(0, WB_REASON_SYNC); - iterate_supers(sync_inodes_one_sb, NULL); - iterate_supers(sync_fs_one_sb, &nowait); - iterate_supers(sync_fs_one_sb, &wait); - iterate_bdevs(fdatawrite_one_bdev, NULL); - iterate_bdevs(fdatawait_one_bdev, NULL); + sync_filesystems(0); + sync_filesystems(1); if (unlikely(laptop_mode)) laptop_sync_completion(); return 0; @@ -116,18 +107,12 @@ SYSCALL_DEFINE0(sync) static void do_sync_work(struct work_struct *work) { - int nowait = 0; - /* * Sync twice to reduce the possibility we skipped some inodes / pages * because they were temporarily locked */ - iterate_supers(sync_inodes_one_sb, &nowait); - iterate_supers(sync_fs_one_sb, &nowait); - iterate_bdevs(fdatawrite_one_bdev, NULL); - iterate_supers(sync_inodes_one_sb, &nowait); - iterate_supers(sync_fs_one_sb, &nowait); - iterate_bdevs(fdatawrite_one_bdev, NULL); + sync_filesystems(0); + sync_filesystems(0); printk("Emergency Sync complete\n"); kfree(work); } diff --git a/trunk/fs/sysfs/dir.c b/trunk/fs/sysfs/dir.c index a5cf784f9cc2..e6bb9b2a4cbe 100644 --- a/trunk/fs/sysfs/dir.c +++ b/trunk/fs/sysfs/dir.c @@ -300,15 +300,15 @@ void release_sysfs_dirent(struct sysfs_dirent * sd) static int sysfs_dentry_delete(const struct dentry *dentry) { struct sysfs_dirent *sd = dentry->d_fsdata; - return !(sd && !(sd->s_flags & SYSFS_FLAG_REMOVED)); + return !!(sd->s_flags & SYSFS_FLAG_REMOVED); } -static int sysfs_dentry_revalidate(struct dentry *dentry, unsigned int flags) +static int sysfs_dentry_revalidate(struct dentry *dentry, struct nameidata *nd) { struct sysfs_dirent *sd; int is_dir; - if (flags & LOOKUP_RCU) + if (nd->flags & LOOKUP_RCU) return -ECHILD; sd = dentry->d_fsdata; @@ -355,15 +355,18 @@ static int sysfs_dentry_revalidate(struct dentry *dentry, unsigned int flags) return 0; } -static void sysfs_dentry_release(struct dentry *dentry) +static void sysfs_dentry_iput(struct dentry *dentry, struct inode *inode) { - sysfs_put(dentry->d_fsdata); + struct sysfs_dirent * sd = dentry->d_fsdata; + + sysfs_put(sd); + iput(inode); } -const struct dentry_operations sysfs_dentry_ops = { +static const struct dentry_operations sysfs_dentry_ops = { .d_revalidate = sysfs_dentry_revalidate, .d_delete = sysfs_dentry_delete, - .d_release = sysfs_dentry_release, + .d_iput = sysfs_dentry_iput, }; struct sysfs_dirent *sysfs_new_dirent(const char *name, umode_t mode, int type) @@ -761,7 +764,7 @@ int sysfs_create_dir(struct kobject * kobj) } static struct dentry * sysfs_lookup(struct inode *dir, struct dentry *dentry, - unsigned int flags) + struct nameidata *nd) { struct dentry *ret = NULL; struct dentry *parent = dentry->d_parent; @@ -783,7 +786,6 @@ static struct dentry * sysfs_lookup(struct inode *dir, struct dentry *dentry, ret = ERR_PTR(-ENOENT); goto out_unlock; } - dentry->d_fsdata = sysfs_get(sd); /* attach dentry and inode */ inode = sysfs_get_inode(dir->i_sb, sd); @@ -793,7 +795,16 @@ static struct dentry * sysfs_lookup(struct inode *dir, struct dentry *dentry, } /* instantiate and hash dentry */ - ret = d_materialise_unique(dentry, inode); + ret = d_find_alias(inode); + if (!ret) { + d_set_d_op(dentry, &sysfs_dentry_ops); + dentry->d_fsdata = sysfs_get(sd); + d_add(dentry, inode); + } else { + d_move(ret, dentry); + iput(inode); + } + out_unlock: mutex_unlock(&sysfs_mutex); return ret; diff --git a/trunk/fs/sysfs/mount.c b/trunk/fs/sysfs/mount.c index 71eb7e253927..52c3bdb66a84 100644 --- a/trunk/fs/sysfs/mount.c +++ b/trunk/fs/sysfs/mount.c @@ -68,7 +68,6 @@ static int sysfs_fill_super(struct super_block *sb, void *data, int silent) } root->d_fsdata = &sysfs_root; sb->s_root = root; - sb->s_d_op = &sysfs_dentry_ops; return 0; } @@ -118,12 +117,13 @@ static struct dentry *sysfs_mount(struct file_system_type *fs_type, for (type = KOBJ_NS_TYPE_NONE; type < KOBJ_NS_TYPES; type++) info->ns[type] = kobj_ns_grab_current(type); - sb = sget(fs_type, sysfs_test_super, sysfs_set_super, flags, info); + sb = sget(fs_type, sysfs_test_super, sysfs_set_super, info); if (IS_ERR(sb) || sb->s_fs_info != info) free_sysfs_super_info(info); if (IS_ERR(sb)) return ERR_CAST(sb); if (!sb->s_root) { + sb->s_flags = flags; error = sysfs_fill_super(sb, data, flags & MS_SILENT ? 1 : 0); if (error) { deactivate_locked_super(sb); diff --git a/trunk/fs/sysfs/sysfs.h b/trunk/fs/sysfs/sysfs.h index d73c0932bbd6..661a9639570b 100644 --- a/trunk/fs/sysfs/sysfs.h +++ b/trunk/fs/sysfs/sysfs.h @@ -157,7 +157,6 @@ extern struct kmem_cache *sysfs_dir_cachep; */ extern struct mutex sysfs_mutex; extern spinlock_t sysfs_assoc_lock; -extern const struct dentry_operations sysfs_dentry_ops; extern const struct file_operations sysfs_dir_operations; extern const struct inode_operations sysfs_dir_inode_operations; diff --git a/trunk/fs/sysv/inode.c b/trunk/fs/sysv/inode.c index 80e1e2b18df1..08d0b2568cd3 100644 --- a/trunk/fs/sysv/inode.c +++ b/trunk/fs/sysv/inode.c @@ -43,6 +43,7 @@ static int sysv_sync_fs(struct super_block *sb, int wait) * then attach current time stamp. * But if the filesystem was marked clean, keep it clean. */ + sb->s_dirt = 0; old_time = fs32_to_cpu(sbi, *sbi->s_sb_time); if (sbi->s_type == FSTYPE_SYSV4) { if (*sbi->s_sb_state == cpu_to_fs32(sbi, 0x7c269d38 - old_time)) @@ -56,12 +57,23 @@ static int sysv_sync_fs(struct super_block *sb, int wait) return 0; } +static void sysv_write_super(struct super_block *sb) +{ + if (!(sb->s_flags & MS_RDONLY)) + sysv_sync_fs(sb, 1); + else + sb->s_dirt = 0; +} + static int sysv_remount(struct super_block *sb, int *flags, char *data) { struct sysv_sb_info *sbi = SYSV_SB(sb); - + lock_super(sb); if (sbi->s_forced_ro) *flags |= MS_RDONLY; + if (*flags & MS_RDONLY) + sysv_write_super(sb); + unlock_super(sb); return 0; } @@ -69,6 +81,9 @@ static void sysv_put_super(struct super_block *sb) { struct sysv_sb_info *sbi = SYSV_SB(sb); + if (sb->s_dirt) + sysv_write_super(sb); + if (!(sb->s_flags & MS_RDONLY)) { /* XXX ext2 also updates the state here */ mark_buffer_dirty(sbi->s_bh1); @@ -342,6 +357,7 @@ const struct super_operations sysv_sops = { .write_inode = sysv_write_inode, .evict_inode = sysv_evict_inode, .put_super = sysv_put_super, + .write_super = sysv_write_super, .sync_fs = sysv_sync_fs, .remount_fs = sysv_remount, .statfs = sysv_statfs, diff --git a/trunk/fs/sysv/namei.c b/trunk/fs/sysv/namei.c index 1c0d5f264767..d7466e293614 100644 --- a/trunk/fs/sysv/namei.c +++ b/trunk/fs/sysv/namei.c @@ -43,7 +43,7 @@ const struct dentry_operations sysv_dentry_operations = { .d_hash = sysv_hash, }; -static struct dentry *sysv_lookup(struct inode * dir, struct dentry * dentry, unsigned int flags) +static struct dentry *sysv_lookup(struct inode * dir, struct dentry * dentry, struct nameidata *nd) { struct inode * inode = NULL; ino_t ino; @@ -80,7 +80,7 @@ static int sysv_mknod(struct inode * dir, struct dentry * dentry, umode_t mode, return err; } -static int sysv_create(struct inode * dir, struct dentry * dentry, umode_t mode, bool excl) +static int sysv_create(struct inode * dir, struct dentry * dentry, umode_t mode, struct nameidata *nd) { return sysv_mknod(dir, dentry, mode, 0); } diff --git a/trunk/fs/sysv/sysv.h b/trunk/fs/sysv/sysv.h index 0bc35fdc58e2..11b07672f6c5 100644 --- a/trunk/fs/sysv/sysv.h +++ b/trunk/fs/sysv/sysv.h @@ -117,6 +117,7 @@ static inline void dirty_sb(struct super_block *sb) mark_buffer_dirty(sbi->s_bh1); if (sbi->s_bh1 != sbi->s_bh2) mark_buffer_dirty(sbi->s_bh2); + sb->s_dirt = 1; } diff --git a/trunk/fs/ubifs/debug.c b/trunk/fs/ubifs/debug.c index bb3167257aab..92df3b081539 100644 --- a/trunk/fs/ubifs/debug.c +++ b/trunk/fs/ubifs/debug.c @@ -2802,8 +2802,6 @@ static ssize_t dfs_file_read(struct file *file, char __user *u, size_t count, val = d->chk_fs; else if (dent == d->dfs_tst_rcvry) val = d->tst_rcvry; - else if (dent == d->dfs_ro_error) - val = c->ro_error; else return -EINVAL; @@ -2887,8 +2885,6 @@ static ssize_t dfs_file_write(struct file *file, const char __user *u, d->chk_fs = val; else if (dent == d->dfs_tst_rcvry) d->tst_rcvry = val; - else if (dent == d->dfs_ro_error) - c->ro_error = !!val; else return -EINVAL; @@ -3000,13 +2996,6 @@ int dbg_debugfs_init_fs(struct ubifs_info *c) goto out_remove; d->dfs_tst_rcvry = dent; - fname = "ro_error"; - dent = debugfs_create_file(fname, S_IRUSR | S_IWUSR, d->dfs_dir, c, - &dfs_fops); - if (IS_ERR_OR_NULL(dent)) - goto out_remove; - d->dfs_ro_error = dent; - return 0; out_remove: diff --git a/trunk/fs/ubifs/debug.h b/trunk/fs/ubifs/debug.h index 8b8cc4e945f4..486a8e024fb6 100644 --- a/trunk/fs/ubifs/debug.h +++ b/trunk/fs/ubifs/debug.h @@ -79,10 +79,6 @@ typedef int (*dbg_znode_callback)(struct ubifs_info *c, * @dfs_chk_lprops: debugfs knob to enable UBIFS LEP properties extra checks * @dfs_chk_fs: debugfs knob to enable UBIFS contents extra checks * @dfs_tst_rcvry: debugfs knob to enable UBIFS recovery testing - * @dfs_ro_error: debugfs knob to switch UBIFS to R/O mode (different to - * re-mounting to R/O mode because it does not flush any buffers - * and UBIFS just starts returning -EROFS on all write - * operations) */ struct ubifs_debug_info { struct ubifs_zbranch old_zroot; @@ -126,7 +122,6 @@ struct ubifs_debug_info { struct dentry *dfs_chk_lprops; struct dentry *dfs_chk_fs; struct dentry *dfs_tst_rcvry; - struct dentry *dfs_ro_error; }; /** diff --git a/trunk/fs/ubifs/dir.c b/trunk/fs/ubifs/dir.c index c95681cf1b71..a6d42efc76d2 100644 --- a/trunk/fs/ubifs/dir.c +++ b/trunk/fs/ubifs/dir.c @@ -184,7 +184,7 @@ static int dbg_check_name(const struct ubifs_info *c, } static struct dentry *ubifs_lookup(struct inode *dir, struct dentry *dentry, - unsigned int flags) + struct nameidata *nd) { int err; union ubifs_key key; @@ -246,7 +246,7 @@ static struct dentry *ubifs_lookup(struct inode *dir, struct dentry *dentry, } static int ubifs_create(struct inode *dir, struct dentry *dentry, umode_t mode, - bool excl) + struct nameidata *nd) { struct inode *inode; struct ubifs_info *c = dir->i_sb->s_fs_info; @@ -969,7 +969,7 @@ static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry, struct ubifs_budget_req ino_req = { .dirtied_ino = 1, .dirtied_ino_d = ALIGN(old_inode_ui->data_len, 8) }; struct timespec time; - unsigned int uninitialized_var(saved_nlink); + unsigned int saved_nlink; /* * Budget request settings: deletion direntry, new direntry, removing diff --git a/trunk/fs/ubifs/orphan.c b/trunk/fs/ubifs/orphan.c index cebf17ea0458..b02734db187c 100644 --- a/trunk/fs/ubifs/orphan.c +++ b/trunk/fs/ubifs/orphan.c @@ -176,7 +176,7 @@ int ubifs_orphan_start_commit(struct ubifs_info *c) *last = orphan; last = &orphan->cnext; } - *last = NULL; + *last = orphan->cnext; c->cmt_orphans = c->new_orphans; c->new_orphans = 0; dbg_cmt("%d orphans to commit", c->cmt_orphans); @@ -382,7 +382,7 @@ static int consolidate(struct ubifs_info *c) last = &orphan->cnext; cnt += 1; } - *last = NULL; + *last = orphan->cnext; ubifs_assert(cnt == c->tot_orphans - c->new_orphans); c->cmt_orphans = cnt; c->ohead_lnum = c->orph_first; diff --git a/trunk/fs/ubifs/replay.c b/trunk/fs/ubifs/replay.c index eba46d4a7619..3a2da7e476e5 100644 --- a/trunk/fs/ubifs/replay.c +++ b/trunk/fs/ubifs/replay.c @@ -1007,7 +1007,7 @@ static int take_ihead(struct ubifs_info *c) */ int ubifs_replay_journal(struct ubifs_info *c) { - int err, lnum, free; + int err, i, lnum, offs, free; BUILD_BUG_ON(UBIFS_TRUN_KEY > 5); @@ -1025,17 +1025,25 @@ int ubifs_replay_journal(struct ubifs_info *c) dbg_mnt("start replaying the journal"); c->replaying = 1; lnum = c->ltail_lnum = c->lhead_lnum; + offs = c->lhead_offs; - lnum = UBIFS_LOG_LNUM; - do { - err = replay_log_leb(c, lnum, 0, c->sbuf); + for (i = 0; i < c->log_lebs; i++, lnum++) { + if (lnum >= UBIFS_LOG_LNUM + c->log_lebs) { + /* + * The log is logically circular, we reached the last + * LEB, switch to the first one. + */ + lnum = UBIFS_LOG_LNUM; + offs = 0; + } + err = replay_log_leb(c, lnum, offs, c->sbuf); if (err == 1) /* We hit the end of the log */ break; if (err) goto out; - lnum = ubifs_next_log_lnum(c, lnum); - } while (lnum != UBIFS_LOG_LNUM); + offs = 0; + } err = replay_buds(c); if (err) diff --git a/trunk/fs/ubifs/sb.c b/trunk/fs/ubifs/sb.c index 15e2fc5aa60b..ef3d1ba6d992 100644 --- a/trunk/fs/ubifs/sb.c +++ b/trunk/fs/ubifs/sb.c @@ -718,12 +718,8 @@ static int fixup_free_space(struct ubifs_info *c) lnum = ubifs_next_log_lnum(c, lnum); } - /* - * Fixup the log head which contains the only a CS node at the - * beginning. - */ - err = fixup_leb(c, c->lhead_lnum, - ALIGN(UBIFS_CS_NODE_SZ, c->min_io_size)); + /* Fixup the current log head */ + err = fixup_leb(c, c->lhead_lnum, c->lhead_offs); if (err) goto out; diff --git a/trunk/fs/ubifs/super.c b/trunk/fs/ubifs/super.c index 1c766c39c038..5862dd9d2784 100644 --- a/trunk/fs/ubifs/super.c +++ b/trunk/fs/ubifs/super.c @@ -2136,7 +2136,7 @@ static struct dentry *ubifs_mount(struct file_system_type *fs_type, int flags, dbg_gen("opened ubi%d_%d", c->vi.ubi_num, c->vi.vol_id); - sb = sget(fs_type, sb_test, sb_set, flags, c); + sb = sget(fs_type, sb_test, sb_set, c); if (IS_ERR(sb)) { err = PTR_ERR(sb); kfree(c); @@ -2153,6 +2153,7 @@ static struct dentry *ubifs_mount(struct file_system_type *fs_type, int flags, goto out_deact; } } else { + sb->s_flags = flags; err = ubifs_fill_super(sb, data, flags & MS_SILENT ? 1 : 0); if (err) goto out_deact; diff --git a/trunk/fs/udf/namei.c b/trunk/fs/udf/namei.c index 544b2799a911..18024178ac4c 100644 --- a/trunk/fs/udf/namei.c +++ b/trunk/fs/udf/namei.c @@ -251,7 +251,7 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir, } static struct dentry *udf_lookup(struct inode *dir, struct dentry *dentry, - unsigned int flags) + struct nameidata *nd) { struct inode *inode = NULL; struct fileIdentDesc cfi; @@ -551,7 +551,7 @@ static int udf_delete_entry(struct inode *inode, struct fileIdentDesc *fi, } static int udf_create(struct inode *dir, struct dentry *dentry, umode_t mode, - bool excl) + struct nameidata *nd) { struct udf_fileident_bh fibh; struct inode *inode; diff --git a/trunk/fs/ufs/balloc.c b/trunk/fs/ufs/balloc.c index 1b3e410bf334..42694e11c23d 100644 --- a/trunk/fs/ufs/balloc.c +++ b/trunk/fs/ufs/balloc.c @@ -116,7 +116,7 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count) ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); if (sb->s_flags & MS_SYNCHRONOUS) ubh_sync_block(UCPI_UBH(ucpi)); - ufs_mark_sb_dirty(sb); + sb->s_dirt = 1; unlock_super (sb); UFSD("EXIT\n"); @@ -214,7 +214,7 @@ void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count) goto do_more; } - ufs_mark_sb_dirty(sb); + sb->s_dirt = 1; unlock_super (sb); UFSD("EXIT\n"); return; @@ -557,7 +557,7 @@ static u64 ufs_add_fragments(struct inode *inode, u64 fragment, ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); if (sb->s_flags & MS_SYNCHRONOUS) ubh_sync_block(UCPI_UBH(ucpi)); - ufs_mark_sb_dirty(sb); + sb->s_dirt = 1; UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment); @@ -677,7 +677,7 @@ static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno, ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); if (sb->s_flags & MS_SYNCHRONOUS) ubh_sync_block(UCPI_UBH(ucpi)); - ufs_mark_sb_dirty(sb); + sb->s_dirt = 1; result += cgno * uspi->s_fpg; UFSD("EXIT3, result %llu\n", (unsigned long long)result); diff --git a/trunk/fs/ufs/ialloc.c b/trunk/fs/ufs/ialloc.c index e84cbe21b986..4ec5c1085a87 100644 --- a/trunk/fs/ufs/ialloc.c +++ b/trunk/fs/ufs/ialloc.c @@ -116,7 +116,7 @@ void ufs_free_inode (struct inode * inode) if (sb->s_flags & MS_SYNCHRONOUS) ubh_sync_block(UCPI_UBH(ucpi)); - ufs_mark_sb_dirty(sb); + sb->s_dirt = 1; unlock_super (sb); UFSD("EXIT\n"); } @@ -288,7 +288,7 @@ struct inode *ufs_new_inode(struct inode *dir, umode_t mode) ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); if (sb->s_flags & MS_SYNCHRONOUS) ubh_sync_block(UCPI_UBH(ucpi)); - ufs_mark_sb_dirty(sb); + sb->s_dirt = 1; inode->i_ino = cg * uspi->s_ipg + bit; inode_init_owner(inode, dir, mode); diff --git a/trunk/fs/ufs/namei.c b/trunk/fs/ufs/namei.c index 90d74b8f8eba..a2281cadefa1 100644 --- a/trunk/fs/ufs/namei.c +++ b/trunk/fs/ufs/namei.c @@ -46,7 +46,7 @@ static inline int ufs_add_nondir(struct dentry *dentry, struct inode *inode) return err; } -static struct dentry *ufs_lookup(struct inode * dir, struct dentry *dentry, unsigned int flags) +static struct dentry *ufs_lookup(struct inode * dir, struct dentry *dentry, struct nameidata *nd) { struct inode * inode = NULL; ino_t ino; @@ -71,7 +71,7 @@ static struct dentry *ufs_lookup(struct inode * dir, struct dentry *dentry, unsi * with d_instantiate(). */ static int ufs_create (struct inode * dir, struct dentry * dentry, umode_t mode, - bool excl) + struct nameidata *nd) { struct inode *inode; int err; diff --git a/trunk/fs/ufs/super.c b/trunk/fs/ufs/super.c index 444927e5706b..302f340d0071 100644 --- a/trunk/fs/ufs/super.c +++ b/trunk/fs/ufs/super.c @@ -302,7 +302,7 @@ void ufs_error (struct super_block * sb, const char * function, if (!(sb->s_flags & MS_RDONLY)) { usb1->fs_clean = UFS_FSBAD; ubh_mark_buffer_dirty(USPI_UBH(uspi)); - ufs_mark_sb_dirty(sb); + sb->s_dirt = 1; sb->s_flags |= MS_RDONLY; } va_start (args, fmt); @@ -334,7 +334,7 @@ void ufs_panic (struct super_block * sb, const char * function, if (!(sb->s_flags & MS_RDONLY)) { usb1->fs_clean = UFS_FSBAD; ubh_mark_buffer_dirty(USPI_UBH(uspi)); - ufs_mark_sb_dirty(sb); + sb->s_dirt = 1; } va_start (args, fmt); vsnprintf (error_buf, sizeof(error_buf), fmt, args); @@ -691,83 +691,6 @@ static void ufs_put_super_internal(struct super_block *sb) UFSD("EXIT\n"); } -static int ufs_sync_fs(struct super_block *sb, int wait) -{ - struct ufs_sb_private_info * uspi; - struct ufs_super_block_first * usb1; - struct ufs_super_block_third * usb3; - unsigned flags; - - lock_ufs(sb); - lock_super(sb); - - UFSD("ENTER\n"); - - flags = UFS_SB(sb)->s_flags; - uspi = UFS_SB(sb)->s_uspi; - usb1 = ubh_get_usb_first(uspi); - usb3 = ubh_get_usb_third(uspi); - - usb1->fs_time = cpu_to_fs32(sb, get_seconds()); - if ((flags & UFS_ST_MASK) == UFS_ST_SUN || - (flags & UFS_ST_MASK) == UFS_ST_SUNOS || - (flags & UFS_ST_MASK) == UFS_ST_SUNx86) - ufs_set_fs_state(sb, usb1, usb3, - UFS_FSOK - fs32_to_cpu(sb, usb1->fs_time)); - ufs_put_cstotal(sb); - - UFSD("EXIT\n"); - unlock_super(sb); - unlock_ufs(sb); - - return 0; -} - -static void delayed_sync_fs(struct work_struct *work) -{ - struct ufs_sb_info *sbi; - - sbi = container_of(work, struct ufs_sb_info, sync_work.work); - - spin_lock(&sbi->work_lock); - sbi->work_queued = 0; - spin_unlock(&sbi->work_lock); - - ufs_sync_fs(sbi->sb, 1); -} - -void ufs_mark_sb_dirty(struct super_block *sb) -{ - struct ufs_sb_info *sbi = UFS_SB(sb); - unsigned long delay; - - spin_lock(&sbi->work_lock); - if (!sbi->work_queued) { - delay = msecs_to_jiffies(dirty_writeback_interval * 10); - queue_delayed_work(system_long_wq, &sbi->sync_work, delay); - sbi->work_queued = 1; - } - spin_unlock(&sbi->work_lock); -} - -static void ufs_put_super(struct super_block *sb) -{ - struct ufs_sb_info * sbi = UFS_SB(sb); - - UFSD("ENTER\n"); - - if (!(sb->s_flags & MS_RDONLY)) - ufs_put_super_internal(sb); - cancel_delayed_work_sync(&sbi->sync_work); - - ubh_brelse_uspi (sbi->s_uspi); - kfree (sbi->s_uspi); - kfree (sbi); - sb->s_fs_info = NULL; - UFSD("EXIT\n"); - return; -} - static int ufs_fill_super(struct super_block *sb, void *data, int silent) { struct ufs_sb_info * sbi; @@ -793,7 +716,6 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent) if (!sbi) goto failed_nomem; sb->s_fs_info = sbi; - sbi->sb = sb; UFSD("flag %u\n", (int)(sb->s_flags & MS_RDONLY)); @@ -805,8 +727,6 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent) } #endif mutex_init(&sbi->mutex); - spin_lock_init(&sbi->work_lock); - INIT_DELAYED_WORK(&sbi->sync_work, delayed_sync_fs); /* * Set default mount options * Parse mount options @@ -1271,6 +1191,68 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent) return -ENOMEM; } +static int ufs_sync_fs(struct super_block *sb, int wait) +{ + struct ufs_sb_private_info * uspi; + struct ufs_super_block_first * usb1; + struct ufs_super_block_third * usb3; + unsigned flags; + + lock_ufs(sb); + lock_super(sb); + + UFSD("ENTER\n"); + + flags = UFS_SB(sb)->s_flags; + uspi = UFS_SB(sb)->s_uspi; + usb1 = ubh_get_usb_first(uspi); + usb3 = ubh_get_usb_third(uspi); + + usb1->fs_time = cpu_to_fs32(sb, get_seconds()); + if ((flags & UFS_ST_MASK) == UFS_ST_SUN || + (flags & UFS_ST_MASK) == UFS_ST_SUNOS || + (flags & UFS_ST_MASK) == UFS_ST_SUNx86) + ufs_set_fs_state(sb, usb1, usb3, + UFS_FSOK - fs32_to_cpu(sb, usb1->fs_time)); + ufs_put_cstotal(sb); + sb->s_dirt = 0; + + UFSD("EXIT\n"); + unlock_super(sb); + unlock_ufs(sb); + + return 0; +} + +static void ufs_write_super(struct super_block *sb) +{ + if (!(sb->s_flags & MS_RDONLY)) + ufs_sync_fs(sb, 1); + else + sb->s_dirt = 0; +} + +static void ufs_put_super(struct super_block *sb) +{ + struct ufs_sb_info * sbi = UFS_SB(sb); + + UFSD("ENTER\n"); + + if (sb->s_dirt) + ufs_write_super(sb); + + if (!(sb->s_flags & MS_RDONLY)) + ufs_put_super_internal(sb); + + ubh_brelse_uspi (sbi->s_uspi); + kfree (sbi->s_uspi); + kfree (sbi); + sb->s_fs_info = NULL; + UFSD("EXIT\n"); + return; +} + + static int ufs_remount (struct super_block *sb, int *mount_flags, char *data) { struct ufs_sb_private_info * uspi; @@ -1326,6 +1308,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data) ufs_set_fs_state(sb, usb1, usb3, UFS_FSOK - fs32_to_cpu(sb, usb1->fs_time)); ubh_mark_buffer_dirty (USPI_UBH(uspi)); + sb->s_dirt = 0; sb->s_flags |= MS_RDONLY; } else { /* @@ -1475,6 +1458,7 @@ static const struct super_operations ufs_super_ops = { .write_inode = ufs_write_inode, .evict_inode = ufs_evict_inode, .put_super = ufs_put_super, + .write_super = ufs_write_super, .sync_fs = ufs_sync_fs, .statfs = ufs_statfs, .remount_fs = ufs_remount, diff --git a/trunk/fs/ufs/ufs.h b/trunk/fs/ufs/ufs.h index 343e6fc571e5..528750b7e701 100644 --- a/trunk/fs/ufs/ufs.h +++ b/trunk/fs/ufs/ufs.h @@ -20,10 +20,6 @@ struct ufs_sb_info { unsigned s_mount_opt; struct mutex mutex; struct task_struct *mutex_owner; - struct super_block *sb; - int work_queued; /* non-zero if the delayed work is queued */ - struct delayed_work sync_work; /* FS sync delayed work */ - spinlock_t work_lock; /* protects sync_work and work_queued */ }; struct ufs_inode_info { @@ -127,7 +123,6 @@ extern __printf(3, 4) void ufs_error(struct super_block *, const char *, const char *, ...); extern __printf(3, 4) void ufs_panic(struct super_block *, const char *, const char *, ...); -void ufs_mark_sb_dirty(struct super_block *sb); /* symlink.c */ extern const struct inode_operations ufs_fast_symlink_inode_operations; diff --git a/trunk/fs/ufs/ufs_fs.h b/trunk/fs/ufs/ufs_fs.h index 0cbd5d340b67..8aba544f9fad 100644 --- a/trunk/fs/ufs/ufs_fs.h +++ b/trunk/fs/ufs/ufs_fs.h @@ -34,7 +34,6 @@ #include #include #include -#include #include typedef __u64 __bitwise __fs64; diff --git a/trunk/fs/xfs/xfs_alloc.c b/trunk/fs/xfs/xfs_alloc.c index 4f33c32affe3..9d1aeb7e2734 100644 --- a/trunk/fs/xfs/xfs_alloc.c +++ b/trunk/fs/xfs/xfs_alloc.c @@ -1074,13 +1074,13 @@ xfs_alloc_ag_vextent_near( * If we couldn't get anything, give up. */ if (bno_cur_lt == NULL && bno_cur_gt == NULL) { - xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); - if (!forced++) { trace_xfs_alloc_near_busy(args); xfs_log_force(args->mp, XFS_LOG_SYNC); goto restart; } + + xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); trace_xfs_alloc_size_neither(args); args->agbno = NULLAGBLOCK; return 0; @@ -2434,22 +2434,13 @@ xfs_alloc_vextent_worker( current_restore_flags_nested(&pflags, PF_FSTRANS); } -/* - * Data allocation requests often come in with little stack to work on. Push - * them off to a worker thread so there is lots of stack to use. Metadata - * requests, OTOH, are generally from low stack usage paths, so avoid the - * context switch overhead here. - */ -int + +int /* error */ xfs_alloc_vextent( - struct xfs_alloc_arg *args) + xfs_alloc_arg_t *args) /* allocation argument structure */ { DECLARE_COMPLETION_ONSTACK(done); - if (!args->userdata) - return __xfs_alloc_vextent(args); - - args->done = &done; INIT_WORK_ONSTACK(&args->work, xfs_alloc_vextent_worker); queue_work(xfs_alloc_wq, &args->work); diff --git a/trunk/fs/xfs/xfs_buf.c b/trunk/fs/xfs/xfs_buf.c index 269b35c084da..a4beb421018a 100644 --- a/trunk/fs/xfs/xfs_buf.c +++ b/trunk/fs/xfs/xfs_buf.c @@ -989,6 +989,27 @@ xfs_buf_ioerror_alert( (__uint64_t)XFS_BUF_ADDR(bp), func, bp->b_error, bp->b_length); } +int +xfs_bwrite( + struct xfs_buf *bp) +{ + int error; + + ASSERT(xfs_buf_islocked(bp)); + + bp->b_flags |= XBF_WRITE; + bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q); + + xfs_bdstrat_cb(bp); + + error = xfs_buf_iowait(bp); + if (error) { + xfs_force_shutdown(bp->b_target->bt_mount, + SHUTDOWN_META_IO_ERROR); + } + return error; +} + /* * Called when we want to stop a buffer from getting written or read. * We attach the EIO error, muck with its flags, and call xfs_buf_ioend @@ -1058,7 +1079,14 @@ xfs_bioerror_relse( return EIO; } -STATIC int + +/* + * All xfs metadata buffers except log state machine buffers + * get this attached as their b_bdstrat callback function. + * This is so that we can catch a buffer + * after prematurely unpinning it to forcibly shutdown the filesystem. + */ +int xfs_bdstrat_cb( struct xfs_buf *bp) { @@ -1079,27 +1107,6 @@ xfs_bdstrat_cb( return 0; } -int -xfs_bwrite( - struct xfs_buf *bp) -{ - int error; - - ASSERT(xfs_buf_islocked(bp)); - - bp->b_flags |= XBF_WRITE; - bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q); - - xfs_bdstrat_cb(bp); - - error = xfs_buf_iowait(bp); - if (error) { - xfs_force_shutdown(bp->b_target->bt_mount, - SHUTDOWN_META_IO_ERROR); - } - return error; -} - /* * Wrapper around bdstrat so that we can stop data from going to disk in case * we are shutting down the filesystem. Typically user data goes thru this @@ -1236,7 +1243,7 @@ xfs_buf_iorequest( */ atomic_set(&bp->b_io_remaining, 1); _xfs_buf_ioapply(bp); - _xfs_buf_ioend(bp, 1); + _xfs_buf_ioend(bp, 0); xfs_buf_rele(bp); } diff --git a/trunk/fs/xfs/xfs_buf.h b/trunk/fs/xfs/xfs_buf.h index 79344c48008e..7f1d1392ce37 100644 --- a/trunk/fs/xfs/xfs_buf.h +++ b/trunk/fs/xfs/xfs_buf.h @@ -180,6 +180,7 @@ extern void xfs_buf_unlock(xfs_buf_t *); extern int xfs_bwrite(struct xfs_buf *bp); extern void xfsbdstrat(struct xfs_mount *, struct xfs_buf *); +extern int xfs_bdstrat_cb(struct xfs_buf *); extern void xfs_buf_ioend(xfs_buf_t *, int); extern void xfs_buf_ioerror(xfs_buf_t *, int); diff --git a/trunk/fs/xfs/xfs_buf_item.c b/trunk/fs/xfs/xfs_buf_item.c index d9e451115f98..45df2b857d48 100644 --- a/trunk/fs/xfs/xfs_buf_item.c +++ b/trunk/fs/xfs/xfs_buf_item.c @@ -954,7 +954,7 @@ xfs_buf_iodone_callbacks( if (!XFS_BUF_ISSTALE(bp)) { bp->b_flags |= XBF_WRITE | XBF_ASYNC | XBF_DONE; - xfs_buf_iorequest(bp); + xfs_bdstrat_cb(bp); } else { xfs_buf_relse(bp); } diff --git a/trunk/fs/xfs/xfs_ioctl.c b/trunk/fs/xfs/xfs_ioctl.c index 1f1535d25a9b..3a05a41b5d76 100644 --- a/trunk/fs/xfs/xfs_ioctl.c +++ b/trunk/fs/xfs/xfs_ioctl.c @@ -208,7 +208,6 @@ xfs_open_by_handle( struct inode *inode; struct dentry *dentry; fmode_t fmode; - struct path path; if (!capable(CAP_SYS_ADMIN)) return -XFS_ERROR(EPERM); @@ -253,10 +252,8 @@ xfs_open_by_handle( goto out_dput; } - path.mnt = parfilp->f_path.mnt; - path.dentry = dentry; - filp = dentry_open(&path, hreq->oflags, cred); - dput(dentry); + filp = dentry_open(dentry, mntget(parfilp->f_path.mnt), + hreq->oflags, cred); if (IS_ERR(filp)) { put_unused_fd(fd); return PTR_ERR(filp); diff --git a/trunk/fs/xfs/xfs_iops.c b/trunk/fs/xfs/xfs_iops.c index 9c4340f5c3e0..1a25fd802798 100644 --- a/trunk/fs/xfs/xfs_iops.c +++ b/trunk/fs/xfs/xfs_iops.c @@ -179,7 +179,7 @@ xfs_vn_create( struct inode *dir, struct dentry *dentry, umode_t mode, - bool flags) + struct nameidata *nd) { return xfs_vn_mknod(dir, dentry, mode, 0); } @@ -197,7 +197,7 @@ STATIC struct dentry * xfs_vn_lookup( struct inode *dir, struct dentry *dentry, - unsigned int flags) + struct nameidata *nd) { struct xfs_inode *cip; struct xfs_name name; @@ -222,7 +222,7 @@ STATIC struct dentry * xfs_vn_ci_lookup( struct inode *dir, struct dentry *dentry, - unsigned int flags) + struct nameidata *nd) { struct xfs_inode *ip; struct xfs_name xname; diff --git a/trunk/include/acpi/acpi_bus.h b/trunk/include/acpi/acpi_bus.h index 18fd41033e03..9e6e1c6eb60a 100644 --- a/trunk/include/acpi/acpi_bus.h +++ b/trunk/include/acpi/acpi_bus.h @@ -117,6 +117,9 @@ struct acpi_device; typedef int (*acpi_op_add) (struct acpi_device * device); typedef int (*acpi_op_remove) (struct acpi_device * device, int type); typedef int (*acpi_op_start) (struct acpi_device * device); +typedef int (*acpi_op_suspend) (struct acpi_device * device, + pm_message_t state); +typedef int (*acpi_op_resume) (struct acpi_device * device); typedef int (*acpi_op_bind) (struct acpi_device * device); typedef int (*acpi_op_unbind) (struct acpi_device * device); typedef void (*acpi_op_notify) (struct acpi_device * device, u32 event); @@ -130,6 +133,8 @@ struct acpi_device_ops { acpi_op_add add; acpi_op_remove remove; acpi_op_start start; + acpi_op_suspend suspend; + acpi_op_resume resume; acpi_op_bind bind; acpi_op_unbind unbind; acpi_op_notify notify; diff --git a/trunk/include/acpi/processor.h b/trunk/include/acpi/processor.h index 64ec644808bc..9d650476d5dc 100644 --- a/trunk/include/acpi/processor.h +++ b/trunk/include/acpi/processor.h @@ -59,7 +59,10 @@ struct acpi_processor_cx { u8 entry_method; u8 index; u32 latency; + u32 latency_ticks; u32 power; + u32 usage; + u64 time; u8 bm_sts_skip; char desc[ACPI_CX_DESC_LEN]; }; @@ -331,8 +334,8 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr); int acpi_processor_hotplug(struct acpi_processor *pr); int acpi_processor_power_exit(struct acpi_processor *pr, struct acpi_device *device); -int acpi_processor_suspend(struct device *dev); -int acpi_processor_resume(struct device *dev); +int acpi_processor_suspend(struct acpi_device * device, pm_message_t state); +int acpi_processor_resume(struct acpi_device * device); extern struct cpuidle_driver acpi_idle_driver; /* in processor_thermal.c */ diff --git a/trunk/include/asm-generic/dma-contiguous.h b/trunk/include/asm-generic/dma-contiguous.h index 294b1e755ab2..c544356b374b 100644 --- a/trunk/include/asm-generic/dma-contiguous.h +++ b/trunk/include/asm-generic/dma-contiguous.h @@ -18,7 +18,7 @@ static inline void dev_set_cma_area(struct device *dev, struct cma *cma) { if (dev) dev->cma_area = cma; - if (!dev && !dma_contiguous_default_area) + if (!dev || !dma_contiguous_default_area) dma_contiguous_default_area = cma; } diff --git a/trunk/include/linux/capability.h b/trunk/include/linux/capability.h index d10b7ed595b1..68d56effc328 100644 --- a/trunk/include/linux/capability.h +++ b/trunk/include/linux/capability.h @@ -360,11 +360,11 @@ struct cpu_vfs_cap_data { #define CAP_WAKE_ALARM 35 -/* Allow preventing system suspends */ +/* Allow preventing system suspends while epoll events are pending */ -#define CAP_BLOCK_SUSPEND 36 +#define CAP_EPOLLWAKEUP 36 -#define CAP_LAST_CAP CAP_BLOCK_SUSPEND +#define CAP_LAST_CAP CAP_EPOLLWAKEUP #define cap_valid(x) ((x) >= 0 && (x) <= CAP_LAST_CAP) diff --git a/trunk/include/linux/ceph/messenger.h b/trunk/include/linux/ceph/messenger.h index 44c87e731e9d..2521a95fa6d9 100644 --- a/trunk/include/linux/ceph/messenger.h +++ b/trunk/include/linux/ceph/messenger.h @@ -163,8 +163,16 @@ struct ceph_connection { /* connection negotiation temps */ char in_banner[CEPH_BANNER_MAX_LEN]; - struct ceph_msg_connect out_connect; - struct ceph_msg_connect_reply in_reply; + union { + struct { /* outgoing connection */ + struct ceph_msg_connect out_connect; + struct ceph_msg_connect_reply in_reply; + }; + struct { /* incoming */ + struct ceph_msg_connect in_connect; + struct ceph_msg_connect_reply out_reply; + }; + }; struct ceph_entity_addr actual_peer_addr; /* message out temps */ diff --git a/trunk/include/linux/cpuidle.h b/trunk/include/linux/cpuidle.h index 89dcd30ac8ea..6c26a3da0e03 100644 --- a/trunk/include/linux/cpuidle.h +++ b/trunk/include/linux/cpuidle.h @@ -34,7 +34,6 @@ struct cpuidle_driver; struct cpuidle_state_usage { void *driver_data; - unsigned long long disable; unsigned long long usage; unsigned long long time; /* in US */ }; @@ -47,7 +46,7 @@ struct cpuidle_state { unsigned int exit_latency; /* in US */ int power_usage; /* in mW */ unsigned int target_residency; /* in US */ - bool disabled; /* disabled on all CPUs */ + unsigned int disable; int (*enter) (struct cpuidle_device *dev, struct cpuidle_driver *drv, @@ -137,17 +136,13 @@ struct cpuidle_driver { extern void disable_cpuidle(void); extern int cpuidle_idle_call(void); extern int cpuidle_register_driver(struct cpuidle_driver *drv); -extern struct cpuidle_driver *cpuidle_get_driver(void); -extern struct cpuidle_driver *cpuidle_driver_ref(void); -extern void cpuidle_driver_unref(void); +struct cpuidle_driver *cpuidle_get_driver(void); extern void cpuidle_unregister_driver(struct cpuidle_driver *drv); extern int cpuidle_register_device(struct cpuidle_device *dev); extern void cpuidle_unregister_device(struct cpuidle_device *dev); extern void cpuidle_pause_and_lock(void); extern void cpuidle_resume_and_unlock(void); -extern void cpuidle_pause(void); -extern void cpuidle_resume(void); extern int cpuidle_enable_device(struct cpuidle_device *dev); extern void cpuidle_disable_device(struct cpuidle_device *dev); extern int cpuidle_wrap_enter(struct cpuidle_device *dev, @@ -162,8 +157,6 @@ static inline int cpuidle_idle_call(void) { return -ENODEV; } static inline int cpuidle_register_driver(struct cpuidle_driver *drv) {return -ENODEV; } static inline struct cpuidle_driver *cpuidle_get_driver(void) {return NULL; } -static inline struct cpuidle_driver *cpuidle_driver_ref(void) {return NULL; } -static inline void cpuidle_driver_unref(void) {} static inline void cpuidle_unregister_driver(struct cpuidle_driver *drv) { } static inline int cpuidle_register_device(struct cpuidle_device *dev) {return -ENODEV; } @@ -171,8 +164,6 @@ static inline void cpuidle_unregister_device(struct cpuidle_device *dev) { } static inline void cpuidle_pause_and_lock(void) { } static inline void cpuidle_resume_and_unlock(void) { } -static inline void cpuidle_pause(void) { } -static inline void cpuidle_resume(void) { } static inline int cpuidle_enable_device(struct cpuidle_device *dev) {return -ENODEV; } static inline void cpuidle_disable_device(struct cpuidle_device *dev) { } @@ -211,7 +202,14 @@ struct cpuidle_governor { extern int cpuidle_register_governor(struct cpuidle_governor *gov); extern void cpuidle_unregister_governor(struct cpuidle_governor *gov); +#ifdef CONFIG_INTEL_IDLE +extern int intel_idle_cpu_init(int cpu); #else +static inline int intel_idle_cpu_init(int cpu) { return -1; } +#endif + +#else +static inline int intel_idle_cpu_init(int cpu) { return -1; } static inline int cpuidle_register_governor(struct cpuidle_governor *gov) {return 0;} diff --git a/trunk/include/linux/dcache.h b/trunk/include/linux/dcache.h index caa34e50537e..094789ff3e9f 100644 --- a/trunk/include/linux/dcache.h +++ b/trunk/include/linux/dcache.h @@ -128,7 +128,7 @@ struct dentry { struct rcu_head d_rcu; } d_u; struct list_head d_subdirs; /* our children */ - struct hlist_node d_alias; /* inode alias list */ + struct list_head d_alias; /* inode alias list */ }; /* @@ -144,7 +144,7 @@ enum dentry_d_lock_class }; struct dentry_operations { - int (*d_revalidate)(struct dentry *, unsigned int); + int (*d_revalidate)(struct dentry *, struct nameidata *); int (*d_hash)(const struct dentry *, const struct inode *, struct qstr *); int (*d_compare)(const struct dentry *, const struct inode *, diff --git a/trunk/include/linux/device.h b/trunk/include/linux/device.h index 6de94151ff6f..161d96241b1b 100644 --- a/trunk/include/linux/device.h +++ b/trunk/include/linux/device.h @@ -865,6 +865,8 @@ extern int (*platform_notify_remove)(struct device *dev); extern struct device *get_device(struct device *dev); extern void put_device(struct device *dev); +extern void wait_for_device_probe(void); + #ifdef CONFIG_DEVTMPFS extern int devtmpfs_create_node(struct device *dev); extern int devtmpfs_delete_node(struct device *dev); diff --git a/trunk/include/linux/eventpoll.h b/trunk/include/linux/eventpoll.h index f4bb378ccf6a..6f8be328770a 100644 --- a/trunk/include/linux/eventpoll.h +++ b/trunk/include/linux/eventpoll.h @@ -34,7 +34,7 @@ * re-allowed until epoll_wait is called again after consuming the wakeup * event(s). * - * Requires CAP_BLOCK_SUSPEND + * Requires CAP_EPOLLWAKEUP */ #define EPOLLWAKEUP (1 << 29) diff --git a/trunk/include/linux/file.h b/trunk/include/linux/file.h index a22408bac0d0..58bf158c53d9 100644 --- a/trunk/include/linux/file.h +++ b/trunk/include/linux/file.h @@ -39,7 +39,4 @@ extern void put_unused_fd(unsigned int fd); extern void fd_install(unsigned int fd, struct file *file); -extern void flush_delayed_fput(void); -extern void __fput_sync(struct file *); - #endif /* __LINUX_FILE_H */ diff --git a/trunk/include/linux/fs.h b/trunk/include/linux/fs.h index 8fabb037a48d..17fd887c798f 100644 --- a/trunk/include/linux/fs.h +++ b/trunk/include/linux/fs.h @@ -826,7 +826,7 @@ struct inode { struct list_head i_lru; /* inode LRU list */ struct list_head i_sb_list; union { - struct hlist_head i_dentry; + struct list_head i_dentry; struct rcu_head i_rcu; }; u64 i_version; @@ -1571,7 +1571,7 @@ extern void unlock_super(struct super_block *); /* * VFS helper functions.. */ -extern int vfs_create(struct inode *, struct dentry *, umode_t, bool); +extern int vfs_create(struct inode *, struct dentry *, umode_t, struct nameidata *); extern int vfs_mkdir(struct inode *, struct dentry *, umode_t); extern int vfs_mknod(struct inode *, struct dentry *, umode_t, dev_t); extern int vfs_symlink(struct inode *, struct dentry *, const char *); @@ -1666,7 +1666,7 @@ struct file_operations { }; struct inode_operations { - struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int); + struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *); void * (*follow_link) (struct dentry *, struct nameidata *); int (*permission) (struct inode *, int); struct posix_acl * (*get_acl)(struct inode *, int); @@ -1674,7 +1674,7 @@ struct inode_operations { int (*readlink) (struct dentry *, char __user *,int); void (*put_link) (struct dentry *, struct nameidata *, void *); - int (*create) (struct inode *,struct dentry *, umode_t, bool); + int (*create) (struct inode *,struct dentry *,umode_t,struct nameidata *); int (*link) (struct dentry *,struct inode *,struct dentry *); int (*unlink) (struct inode *,struct dentry *); int (*symlink) (struct inode *,struct dentry *,const char *); @@ -1693,9 +1693,6 @@ struct inode_operations { int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len); int (*update_time)(struct inode *, struct timespec *, int); - int (*atomic_open)(struct inode *, struct dentry *, - struct file *, unsigned open_flag, - umode_t create_mode, int *opened); } ____cacheline_aligned; struct seq_file; @@ -1914,7 +1911,7 @@ void free_anon_bdev(dev_t); struct super_block *sget(struct file_system_type *type, int (*test)(struct super_block *,void *), int (*set)(struct super_block *,void *), - int flags, void *data); + void *data); extern struct dentry *mount_pseudo(struct file_system_type *, char *, const struct super_operations *ops, const struct dentry_operations *dops, @@ -2060,17 +2057,10 @@ extern long do_sys_open(int dfd, const char __user *filename, int flags, extern struct file *filp_open(const char *, int, umode_t); extern struct file *file_open_root(struct dentry *, struct vfsmount *, const char *, int); -extern struct file * dentry_open(const struct path *, int, const struct cred *); +extern struct file * dentry_open(struct dentry *, struct vfsmount *, int, + const struct cred *); extern int filp_close(struct file *, fl_owner_t id); extern char * getname(const char __user *); -enum { - FILE_CREATED = 1, - FILE_OPENED = 2 -}; -extern int finish_open(struct file *file, struct dentry *dentry, - int (*open)(struct inode *, struct file *), - int *opened); -extern int finish_no_open(struct file *file, struct dentry *dentry); /* fs/ioctl.c */ @@ -2101,7 +2091,6 @@ extern sector_t blkdev_max_block(struct block_device *bdev); extern void bd_forget(struct inode *inode); extern void bdput(struct block_device *); extern void invalidate_bdev(struct block_device *); -extern void iterate_bdevs(void (*)(struct block_device *, void *), void *); extern int sync_blockdev(struct block_device *bdev); extern void kill_bdev(struct block_device *); extern struct super_block *freeze_bdev(struct block_device *); @@ -2123,10 +2112,6 @@ static inline int thaw_bdev(struct block_device *bdev, struct super_block *sb) { return 0; } - -static inline void iterate_bdevs(void (*f)(struct block_device *, void *), void *arg) -{ -} #endif extern int sync_filesystem(struct super_block *); extern const struct file_operations def_blk_fops; @@ -2453,7 +2438,7 @@ extern loff_t noop_llseek(struct file *file, loff_t offset, int origin); extern loff_t no_llseek(struct file *file, loff_t offset, int origin); extern loff_t generic_file_llseek(struct file *file, loff_t offset, int origin); extern loff_t generic_file_llseek_size(struct file *file, loff_t offset, - int origin, loff_t maxsize, loff_t eof); + int origin, loff_t maxsize); extern int generic_file_open(struct inode * inode, struct file * filp); extern int nonseekable_open(struct inode * inode, struct file * filp); @@ -2575,7 +2560,7 @@ extern int simple_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata); -extern struct dentry *simple_lookup(struct inode *, struct dentry *, unsigned int flags); +extern struct dentry *simple_lookup(struct inode *, struct dentry *, struct nameidata *); extern ssize_t generic_read_dir(struct file *, char __user *, size_t, loff_t *); extern const struct file_operations simple_dir_operations; extern const struct inode_operations simple_dir_inode_operations; diff --git a/trunk/include/linux/ftrace_event.h b/trunk/include/linux/ftrace_event.h index af961d6f7ab1..176a939d1547 100644 --- a/trunk/include/linux/ftrace_event.h +++ b/trunk/include/linux/ftrace_event.h @@ -65,7 +65,7 @@ struct trace_iterator { void *private; int cpu_file; struct mutex mutex; - struct ring_buffer_iter **buffer_iter; + struct ring_buffer_iter *buffer_iter[NR_CPUS]; unsigned long iter_flags; /* trace_seq for __print_flags() and __print_symbolic() etc. */ @@ -207,9 +207,6 @@ struct ftrace_event_call { * bit 1: enabled * bit 2: filter_active * bit 3: enabled cmd record - * bit 4: allow trace by non root (cap any) - * bit 5: failed to apply filter - * bit 6: ftrace internal event (do not enable) * * Changes to flags must hold the event_mutex. * diff --git a/trunk/include/linux/init_task.h b/trunk/include/linux/init_task.h index 8a7476186990..9e65eff6af3b 100644 --- a/trunk/include/linux/init_task.h +++ b/trunk/include/linux/init_task.h @@ -168,8 +168,8 @@ extern struct cred init_cred; .children = LIST_HEAD_INIT(tsk.children), \ .sibling = LIST_HEAD_INIT(tsk.sibling), \ .group_leader = &tsk, \ - RCU_POINTER_INITIALIZER(real_cred, &init_cred), \ - RCU_POINTER_INITIALIZER(cred, &init_cred), \ + RCU_INIT_POINTER(.real_cred, &init_cred), \ + RCU_INIT_POINTER(.cred, &init_cred), \ .comm = INIT_TASK_COMM, \ .thread = INIT_THREAD, \ .fs = &init_fs, \ diff --git a/trunk/include/linux/intel-iommu.h b/trunk/include/linux/intel-iommu.h index 78e2ada50cd5..e6ca56de9936 100644 --- a/trunk/include/linux/intel-iommu.h +++ b/trunk/include/linux/intel-iommu.h @@ -308,8 +308,6 @@ enum { struct intel_iommu { void __iomem *reg; /* Pointer to hardware regs, virtual addr */ - u64 reg_phys; /* physical address of hw register set */ - u64 reg_size; /* size of hw register set */ u64 cap; u64 ecap; u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */ diff --git a/trunk/include/linux/irq.h b/trunk/include/linux/irq.h index 553fb66da130..a5261e3d2e3c 100644 --- a/trunk/include/linux/irq.h +++ b/trunk/include/linux/irq.h @@ -150,7 +150,9 @@ struct irq_data { void *handler_data; void *chip_data; struct msi_desc *msi_desc; +#ifdef CONFIG_SMP cpumask_var_t affinity; +#endif }; /* diff --git a/trunk/include/linux/jump_label.h b/trunk/include/linux/jump_label.h index 0976fc46d1e0..c513a40510f5 100644 --- a/trunk/include/linux/jump_label.h +++ b/trunk/include/linux/jump_label.h @@ -42,7 +42,8 @@ * allowed. * * Not initializing the key (static data is initialized to 0s anyway) is the - * same as using STATIC_KEY_INIT_FALSE. + * same as using STATIC_KEY_INIT_FALSE and static_key_false() is + * equivalent with static_branch(). * */ @@ -106,6 +107,12 @@ static __always_inline bool static_key_true(struct static_key *key) return !static_key_false(key); } +/* Deprecated. Please use 'static_key_false() instead. */ +static __always_inline bool static_branch(struct static_key *key) +{ + return arch_static_branch(key); +} + extern struct jump_entry __start___jump_table[]; extern struct jump_entry __stop___jump_table[]; @@ -159,6 +166,14 @@ static __always_inline bool static_key_true(struct static_key *key) return false; } +/* Deprecated. Please use 'static_key_false() instead. */ +static __always_inline bool static_branch(struct static_key *key) +{ + if (unlikely(atomic_read(&key->enabled)) > 0) + return true; + return false; +} + static inline void static_key_slow_inc(struct static_key *key) { atomic_inc(&key->enabled); diff --git a/trunk/include/linux/kernel.h b/trunk/include/linux/kernel.h index 604382143bcf..e07f5e0c5df4 100644 --- a/trunk/include/linux/kernel.h +++ b/trunk/include/linux/kernel.h @@ -377,6 +377,7 @@ extern enum system_states { SYSTEM_HALT, SYSTEM_POWER_OFF, SYSTEM_RESTART, + SYSTEM_SUSPEND_DISK, } system_state; #define TAINT_PROPRIETARY_MODULE 0 diff --git a/trunk/include/linux/key.h b/trunk/include/linux/key.h index cef3b315ba7c..4cd22ed627ef 100644 --- a/trunk/include/linux/key.h +++ b/trunk/include/linux/key.h @@ -303,9 +303,7 @@ static inline bool key_is_instantiated(const struct key *key) rwsem_is_locked(&((struct key *)(KEY))->sem))) #define rcu_assign_keypointer(KEY, PAYLOAD) \ -do { \ - rcu_assign_pointer((KEY)->payload.rcudata, (PAYLOAD)); \ -} while (0) + (rcu_assign_pointer((KEY)->payload.rcudata, PAYLOAD)) #ifdef CONFIG_SYSCTL extern ctl_table key_sysctls[]; diff --git a/trunk/include/linux/kmsg_dump.h b/trunk/include/linux/kmsg_dump.h index 2e7a1e032c71..d6bd50110ec2 100644 --- a/trunk/include/linux/kmsg_dump.h +++ b/trunk/include/linux/kmsg_dump.h @@ -55,17 +55,12 @@ struct kmsg_dumper { #ifdef CONFIG_PRINTK void kmsg_dump(enum kmsg_dump_reason reason); -bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog, - char *line, size_t size, size_t *len); - bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog, char *line, size_t size, size_t *len); bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog, char *buf, size_t size, size_t *len); -void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper); - void kmsg_dump_rewind(struct kmsg_dumper *dumper); int kmsg_dump_register(struct kmsg_dumper *dumper); @@ -76,13 +71,6 @@ static inline void kmsg_dump(enum kmsg_dump_reason reason) { } -static inline bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, - bool syslog, const char *line, - size_t size, size_t *len) -{ - return false; -} - static inline bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog, const char *line, size_t size, size_t *len) { @@ -95,10 +83,6 @@ static inline bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog, return false; } -static inline void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper) -{ -} - static inline void kmsg_dump_rewind(struct kmsg_dumper *dumper) { } diff --git a/trunk/include/linux/mfd/s5m87xx/s5m-core.h b/trunk/include/linux/mfd/s5m87xx/s5m-core.h index 0b2e0ed309f5..21603b42f22f 100644 --- a/trunk/include/linux/mfd/s5m87xx/s5m-core.h +++ b/trunk/include/linux/mfd/s5m87xx/s5m-core.h @@ -347,7 +347,6 @@ struct s5m_platform_data { bool buck_voltage_lock; int buck_gpios[3]; - int buck_ds[3]; int buck2_voltage[8]; bool buck2_gpiodvs; int buck3_voltage[8]; @@ -370,10 +369,6 @@ struct s5m_platform_data { bool buck2_ramp_enable; bool buck3_ramp_enable; bool buck4_ramp_enable; - - int buck2_init; - int buck3_init; - int buck4_init; }; #endif /* __LINUX_MFD_S5M_CORE_H */ diff --git a/trunk/include/linux/mfd/tmio.h b/trunk/include/linux/mfd/tmio.h index d83af39815ab..f5171dbf8850 100644 --- a/trunk/include/linux/mfd/tmio.h +++ b/trunk/include/linux/mfd/tmio.h @@ -101,7 +101,6 @@ struct tmio_mmc_host; struct tmio_mmc_data { unsigned int hclk; unsigned long capabilities; - unsigned long capabilities2; unsigned long flags; u32 ocr_mask; /* available voltages */ struct tmio_mmc_dma *dma; @@ -111,9 +110,6 @@ struct tmio_mmc_data { void (*set_clk_div)(struct platform_device *host, int state); int (*get_cd)(struct platform_device *host); int (*write16_hook)(struct tmio_mmc_host *host, int addr); - /* clock management callbacks */ - int (*clk_enable)(struct platform_device *pdev, unsigned int *f); - void (*clk_disable)(struct platform_device *pdev); }; /* diff --git a/trunk/include/linux/mfd/tps65217.h b/trunk/include/linux/mfd/tps65217.h index 12c06870829a..e030ef9a64ee 100644 --- a/trunk/include/linux/mfd/tps65217.h +++ b/trunk/include/linux/mfd/tps65217.h @@ -217,8 +217,7 @@ enum tps65217_regulator_id { * Board data may be used to initialize regulator. */ struct tps65217_board { - struct regulator_init_data *tps65217_init_data[TPS65217_NUM_REGULATOR]; - struct device_node *of_node[TPS65217_NUM_REGULATOR]; + struct regulator_init_data *tps65217_init_data; }; /** @@ -228,6 +227,11 @@ struct tps65217_board { * @max_uV: minimum micro volts * @vsel_to_uv: Function pointer to get voltage from selector * @uv_to_vsel: Function pointer to get selector from voltage + * @table: Table for non-uniform voltage step-size + * @table_len: Length of the voltage table + * @enable_mask: Regulator enable mask bits + * @set_vout_reg: Regulator output voltage set register + * @set_vout_mask: Regulator output voltage set mask * * This data is used to check the regualtor voltage limits while setting. */ @@ -237,6 +241,11 @@ struct tps_info { int max_uV; int (*vsel_to_uv)(unsigned int vsel); int (*uv_to_vsel)(int uV, unsigned int *vsel); + const int *table; + unsigned int table_len; + unsigned int enable_mask; + unsigned int set_vout_reg; + unsigned int set_vout_mask; }; /** diff --git a/trunk/include/linux/mfd/tps65910.h b/trunk/include/linux/mfd/tps65910.h index 6c4c478e21a4..dd8dc0a6c462 100644 --- a/trunk/include/linux/mfd/tps65910.h +++ b/trunk/include/linux/mfd/tps65910.h @@ -880,10 +880,4 @@ static inline int tps65910_reg_clear_bits(struct tps65910 *tps65910, u8 reg, return regmap_update_bits(tps65910->regmap, reg, mask, 0); } -static inline int tps65910_reg_update_bits(struct tps65910 *tps65910, u8 reg, - u8 mask, u8 val) -{ - return regmap_update_bits(tps65910->regmap, reg, mask, val); -} - #endif /* __LINUX_MFD_TPS65910_H */ diff --git a/trunk/include/linux/mm.h b/trunk/include/linux/mm.h index f9f279cf5b1b..b36d08ce5c57 100644 --- a/trunk/include/linux/mm.h +++ b/trunk/include/linux/mm.h @@ -1591,7 +1591,6 @@ void vmemmap_populate_print_last(void); enum mf_flags { MF_COUNT_INCREASED = 1 << 0, MF_ACTION_REQUIRED = 1 << 1, - MF_MUST_KILL = 1 << 2, }; extern int memory_failure(unsigned long pfn, int trapno, int flags); extern void memory_failure_queue(unsigned long pfn, int trapno, int flags); diff --git a/trunk/include/linux/mmc/card.h b/trunk/include/linux/mmc/card.h index 111aca5e97f3..d76513b5b263 100644 --- a/trunk/include/linux/mmc/card.h +++ b/trunk/include/linux/mmc/card.h @@ -149,7 +149,6 @@ struct sd_switch_caps { #define SD_SET_CURRENT_LIMIT_400 1 #define SD_SET_CURRENT_LIMIT_600 2 #define SD_SET_CURRENT_LIMIT_800 3 -#define SD_SET_CURRENT_NO_CHANGE (-1) #define SD_MAX_CURRENT_200 (1 << SD_SET_CURRENT_LIMIT_200) #define SD_MAX_CURRENT_400 (1 << SD_SET_CURRENT_LIMIT_400) diff --git a/trunk/include/linux/mmc/cd-gpio.h b/trunk/include/linux/mmc/cd-gpio.h new file mode 100644 index 000000000000..cefaba038ccb --- /dev/null +++ b/trunk/include/linux/mmc/cd-gpio.h @@ -0,0 +1,18 @@ +/* + * Generic GPIO card-detect helper header + * + * Copyright (C) 2011, Guennadi Liakhovetski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef MMC_CD_GPIO_H +#define MMC_CD_GPIO_H + +struct mmc_host; +int mmc_cd_gpio_request(struct mmc_host *host, unsigned int gpio); +void mmc_cd_gpio_free(struct mmc_host *host); + +#endif diff --git a/trunk/include/linux/mmc/host.h b/trunk/include/linux/mmc/host.h index f578a71d82a6..0707d228d7f1 100644 --- a/trunk/include/linux/mmc/host.h +++ b/trunk/include/linux/mmc/host.h @@ -11,7 +11,6 @@ #define LINUX_MMC_HOST_H #include -#include #include #include #include @@ -151,31 +150,11 @@ struct mmc_async_req { int (*err_check) (struct mmc_card *, struct mmc_async_req *); }; -/** - * struct mmc_slot - MMC slot functions - * - * @cd_irq: MMC/SD-card slot hotplug detection IRQ or -EINVAL - * @lock: protect the @handler_priv pointer - * @handler_priv: MMC/SD-card slot context - * - * Some MMC/SD host controllers implement slot-functions like card and - * write-protect detection natively. However, a large number of controllers - * leave these functions to the CPU. This struct provides a hook to attach - * such slot-function drivers. - */ -struct mmc_slot { - int cd_irq; - struct mutex lock; +struct mmc_hotplug { + unsigned int irq; void *handler_priv; }; -struct regulator; - -struct mmc_supply { - struct regulator *vmmc; /* Card power supply */ - struct regulator *vqmmc; /* Optional Vccq supply */ -}; - struct mmc_host { struct device *parent; struct device class_dev; @@ -189,9 +168,6 @@ struct mmc_host { u32 ocr_avail_sd; /* SD-specific OCR */ u32 ocr_avail_mmc; /* MMC-specific OCR */ struct notifier_block pm_notify; - u32 max_current_330; - u32 max_current_300; - u32 max_current_180; #define MMC_VDD_165_195 0x00000080 /* VDD voltage 1.65 - 1.95 */ #define MMC_VDD_20_21 0x00000100 /* VDD voltage 2.0 ~ 2.1 */ @@ -235,9 +211,16 @@ struct mmc_host { #define MMC_CAP_UHS_SDR50 (1 << 17) /* Host supports UHS SDR50 mode */ #define MMC_CAP_UHS_SDR104 (1 << 18) /* Host supports UHS SDR104 mode */ #define MMC_CAP_UHS_DDR50 (1 << 19) /* Host supports UHS DDR50 mode */ +#define MMC_CAP_SET_XPC_330 (1 << 20) /* Host supports >150mA current at 3.3V */ +#define MMC_CAP_SET_XPC_300 (1 << 21) /* Host supports >150mA current at 3.0V */ +#define MMC_CAP_SET_XPC_180 (1 << 22) /* Host supports >150mA current at 1.8V */ #define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */ #define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */ #define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */ +#define MMC_CAP_MAX_CURRENT_200 (1 << 26) /* Host max current limit is 200mA */ +#define MMC_CAP_MAX_CURRENT_400 (1 << 27) /* Host max current limit is 400mA */ +#define MMC_CAP_MAX_CURRENT_600 (1 << 28) /* Host max current limit is 600mA */ +#define MMC_CAP_MAX_CURRENT_800 (1 << 29) /* Host max current limit is 800mA */ #define MMC_CAP_CMD23 (1 << 30) /* CMD23 supported. */ #define MMC_CAP_HW_RESET (1 << 31) /* Hardware reset */ @@ -255,8 +238,6 @@ struct mmc_host { #define MMC_CAP2_BROKEN_VOLTAGE (1 << 7) /* Use the broken voltage */ #define MMC_CAP2_DETECT_ON_ERR (1 << 8) /* On I/O err check card removal */ #define MMC_CAP2_HC_ERASE_SZ (1 << 9) /* High-capacity erase size */ -#define MMC_CAP2_CD_ACTIVE_HIGH (1 << 10) /* Card-detect signal active high */ -#define MMC_CAP2_RO_ACTIVE_HIGH (1 << 11) /* Write-protect signal active high */ mmc_pm_flag_t pm_caps; /* supported pm features */ unsigned int power_notify_type; @@ -309,7 +290,7 @@ struct mmc_host { struct delayed_work detect; int detect_change; /* card detect flag */ - struct mmc_slot slot; + struct mmc_hotplug hotplug; const struct mmc_bus_ops *bus_ops; /* current bus driver */ unsigned int bus_refs; /* reference counter */ @@ -328,7 +309,6 @@ struct mmc_host { #ifdef CONFIG_REGULATOR bool regulator_enabled; /* regulator state */ #endif - struct mmc_supply supply; struct dentry *debugfs_root; @@ -377,12 +357,13 @@ static inline void mmc_signal_sdio_irq(struct mmc_host *host) wake_up_process(host->sdio_irq_thread); } +struct regulator; + #ifdef CONFIG_REGULATOR int mmc_regulator_get_ocrmask(struct regulator *supply); int mmc_regulator_set_ocr(struct mmc_host *mmc, struct regulator *supply, unsigned short vdd_bit); -int mmc_regulator_get_supply(struct mmc_host *mmc); #else static inline int mmc_regulator_get_ocrmask(struct regulator *supply) { @@ -395,11 +376,6 @@ static inline int mmc_regulator_set_ocr(struct mmc_host *mmc, { return 0; } - -static inline int mmc_regulator_get_supply(struct mmc_host *mmc) -{ - return 0; -} #endif int mmc_card_awake(struct mmc_host *host); diff --git a/trunk/include/linux/mmc/sdhci.h b/trunk/include/linux/mmc/sdhci.h index ac83b105bedd..e9051e1cb1ce 100644 --- a/trunk/include/linux/mmc/sdhci.h +++ b/trunk/include/linux/mmc/sdhci.h @@ -122,7 +122,6 @@ struct sdhci_host { #define SDHCI_PV_ENABLED (1<<8) /* Preset value enabled */ #define SDHCI_SDIO_IRQ_ENABLED (1<<9) /* SDIO irq enabled */ #define SDHCI_HS200_NEEDS_TUNING (1<<10) /* HS200 needs tuning */ -#define SDHCI_USING_RETUNING_TIMER (1<<11) /* Host is using a retuning timer for the card */ unsigned int version; /* SDHCI spec. version */ @@ -156,8 +155,7 @@ struct sdhci_host { struct timer_list timer; /* Timer for timeouts */ - unsigned int caps; /* Alternative CAPABILITY_0 */ - unsigned int caps1; /* Alternative CAPABILITY_1 */ + unsigned int caps; /* Alternative capabilities */ unsigned int ocr_avail_sdio; /* OCR bit masks */ unsigned int ocr_avail_sd; diff --git a/trunk/include/linux/mmc/sh_mmcif.h b/trunk/include/linux/mmc/sh_mmcif.h index c2f73cbb4d5c..05f0e3db1c12 100644 --- a/trunk/include/linux/mmc/sh_mmcif.h +++ b/trunk/include/linux/mmc/sh_mmcif.h @@ -44,8 +44,6 @@ struct sh_mmcif_plat_data { struct sh_mmcif_dma *dma; /* Deprecated. Instead */ unsigned int slave_id_tx; /* use embedded slave_id_[tr]x */ unsigned int slave_id_rx; - bool use_cd_gpio : 1; - unsigned int cd_gpio; u8 sup_pclk; /* 1 :SH7757, 0: SH7724/SH7372 */ unsigned long caps; u32 ocr; diff --git a/trunk/include/linux/mmc/sh_mobile_sdhi.h b/trunk/include/linux/mmc/sh_mobile_sdhi.h index b65679ffa880..e94e620aeddc 100644 --- a/trunk/include/linux/mmc/sh_mobile_sdhi.h +++ b/trunk/include/linux/mmc/sh_mobile_sdhi.h @@ -23,7 +23,6 @@ struct sh_mobile_sdhi_info { int dma_slave_rx; unsigned long tmio_flags; unsigned long tmio_caps; - unsigned long tmio_caps2; u32 tmio_ocr_mask; /* available MMC voltages */ unsigned int cd_gpio; struct tmio_mmc_data *pdata; diff --git a/trunk/include/linux/mmc/slot-gpio.h b/trunk/include/linux/mmc/slot-gpio.h deleted file mode 100644 index 7d88d27bfafa..000000000000 --- a/trunk/include/linux/mmc/slot-gpio.h +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Generic GPIO card-detect helper header - * - * Copyright (C) 2011, Guennadi Liakhovetski - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef MMC_SLOT_GPIO_H -#define MMC_SLOT_GPIO_H - -struct mmc_host; - -int mmc_gpio_get_ro(struct mmc_host *host); -int mmc_gpio_request_ro(struct mmc_host *host, unsigned int gpio); -void mmc_gpio_free_ro(struct mmc_host *host); - -int mmc_gpio_get_cd(struct mmc_host *host); -int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio); -void mmc_gpio_free_cd(struct mmc_host *host); - -#endif diff --git a/trunk/include/linux/namei.h b/trunk/include/linux/namei.h index d2ef8b34b967..ffc02135c483 100644 --- a/trunk/include/linux/namei.h +++ b/trunk/include/linux/namei.h @@ -7,6 +7,12 @@ struct vfsmount; +struct open_intent { + int flags; + int create_mode; + struct file *file; +}; + enum { MAX_NESTED_LINKS = 8 }; struct nameidata { @@ -19,6 +25,11 @@ struct nameidata { int last_type; unsigned depth; char *saved_names[MAX_NESTED_LINKS + 1]; + + /* Intent data */ + union { + struct open_intent open; + } intent; }; /* @@ -67,10 +78,13 @@ extern int kern_path(const char *, unsigned, struct path *); extern struct dentry *kern_path_create(int, const char *, struct path *, int); extern struct dentry *user_path_create(int, const char __user *, struct path *, int); -extern struct dentry *kern_path_locked(const char *, struct path *); +extern int kern_path_parent(const char *, struct nameidata *); extern int vfs_path_lookup(struct dentry *, struct vfsmount *, const char *, unsigned int, struct path *); +extern struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry, + int (*open)(struct inode *, struct file *)); + extern struct dentry *lookup_one_len(const char *, struct dentry *, int); extern int follow_down_one(struct path *); @@ -80,8 +94,6 @@ extern int follow_up(struct path *); extern struct dentry *lock_rename(struct dentry *, struct dentry *); extern void unlock_rename(struct dentry *, struct dentry *); -extern void nd_jump_link(struct nameidata *nd, struct path *path); - static inline void nd_set_link(struct nameidata *nd, char *path) { nd->saved_names[nd->depth] = path; diff --git a/trunk/include/linux/nfs_xdr.h b/trunk/include/linux/nfs_xdr.h index d3b7c18b18f4..8aadd90b808a 100644 --- a/trunk/include/linux/nfs_xdr.h +++ b/trunk/include/linux/nfs_xdr.h @@ -1374,7 +1374,7 @@ struct nfs_rpc_ops { int (*readlink)(struct inode *, struct page *, unsigned int, unsigned int); int (*create) (struct inode *, struct dentry *, - struct iattr *, int); + struct iattr *, int, struct nfs_open_context *); int (*remove) (struct inode *, struct qstr *); void (*unlink_setup) (struct rpc_message *, struct inode *dir); void (*unlink_rpc_prepare) (struct rpc_task *, struct nfs_unlinkdata *); diff --git a/trunk/include/linux/pci_ids.h b/trunk/include/linux/pci_ids.h index fc3526077348..ab741b0d0074 100644 --- a/trunk/include/linux/pci_ids.h +++ b/trunk/include/linux/pci_ids.h @@ -517,7 +517,6 @@ #define PCI_DEVICE_ID_AMD_11H_NB_DRAM 0x1302 #define PCI_DEVICE_ID_AMD_11H_NB_MISC 0x1303 #define PCI_DEVICE_ID_AMD_11H_NB_LINK 0x1304 -#define PCI_DEVICE_ID_AMD_15H_M10H_F3 0x1403 #define PCI_DEVICE_ID_AMD_15H_NB_F0 0x1600 #define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601 #define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602 @@ -2756,17 +2755,6 @@ #define PCI_DEVICE_ID_INTEL_IOAT_SNB7 0x3c27 #define PCI_DEVICE_ID_INTEL_IOAT_SNB8 0x3c2e #define PCI_DEVICE_ID_INTEL_IOAT_SNB9 0x3c2f -#define PCI_DEVICE_ID_INTEL_UNC_HA 0x3c46 -#define PCI_DEVICE_ID_INTEL_UNC_IMC0 0x3cb0 -#define PCI_DEVICE_ID_INTEL_UNC_IMC1 0x3cb1 -#define PCI_DEVICE_ID_INTEL_UNC_IMC2 0x3cb4 -#define PCI_DEVICE_ID_INTEL_UNC_IMC3 0x3cb5 -#define PCI_DEVICE_ID_INTEL_UNC_QPI0 0x3c41 -#define PCI_DEVICE_ID_INTEL_UNC_QPI1 0x3c42 -#define PCI_DEVICE_ID_INTEL_UNC_R2PCIE 0x3c43 -#define PCI_DEVICE_ID_INTEL_UNC_R3QPI0 0x3c44 -#define PCI_DEVICE_ID_INTEL_UNC_R3QPI1 0x3c45 -#define PCI_DEVICE_ID_INTEL_JAKETOWN_UBOX 0x3ce0 #define PCI_DEVICE_ID_INTEL_IOAT_SNB 0x402f #define PCI_DEVICE_ID_INTEL_5100_16 0x65f0 #define PCI_DEVICE_ID_INTEL_5100_21 0x65f5 diff --git a/trunk/include/linux/perf_event.h b/trunk/include/linux/perf_event.h index 76c5c8b724a7..45db49f64bb4 100644 --- a/trunk/include/linux/perf_event.h +++ b/trunk/include/linux/perf_event.h @@ -677,7 +677,6 @@ struct hw_perf_event { u64 last_tag; unsigned long config_base; unsigned long event_base; - int event_base_rdpmc; int idx; int last_cpu; @@ -1107,8 +1106,6 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, struct task_struct *task, perf_overflow_handler_t callback, void *context); -extern void perf_pmu_migrate_context(struct pmu *pmu, - int src_cpu, int dst_cpu); extern u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running); diff --git a/trunk/include/linux/pm_domain.h b/trunk/include/linux/pm_domain.h index a7d6172922d4..30f794eb3826 100644 --- a/trunk/include/linux/pm_domain.h +++ b/trunk/include/linux/pm_domain.h @@ -15,7 +15,6 @@ #include #include #include -#include enum gpd_status { GPD_STATE_ACTIVE = 0, /* PM domain is active */ @@ -46,11 +45,6 @@ struct gpd_dev_ops { bool (*active_wakeup)(struct device *dev); }; -struct gpd_cpu_data { - unsigned int saved_exit_latency; - struct cpuidle_state *idle_state; -}; - struct generic_pm_domain { struct dev_pm_domain domain; /* PM domain operations */ struct list_head gpd_list_node; /* Node in the global PM domains list */ @@ -81,7 +75,6 @@ struct generic_pm_domain { bool max_off_time_changed; bool cached_power_down_ok; struct device_node *of_node; /* Node in device tree */ - struct gpd_cpu_data *cpu_data; }; static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd) @@ -112,7 +105,6 @@ struct generic_pm_domain_data { struct gpd_timing_data td; struct notifier_block nb; struct mutex lock; - unsigned int refcount; bool need_restore; bool always_on; }; @@ -163,8 +155,6 @@ extern int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops, struct gpd_timing_data *td); extern int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td); -extern int genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state); -extern int genpd_detach_cpuidle(struct generic_pm_domain *genpd); extern void pm_genpd_init(struct generic_pm_domain *genpd, struct dev_power_governor *gov, bool is_off); @@ -221,14 +211,6 @@ static inline int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td) { return -ENOSYS; } -static inline int genpd_attach_cpuidle(struct generic_pm_domain *genpd, int st) -{ - return -ENOSYS; -} -static inline int genpd_detach_cpuidle(struct generic_pm_domain *genpd) -{ - return -ENOSYS; -} static inline void pm_genpd_init(struct generic_pm_domain *genpd, struct dev_power_governor *gov, bool is_off) { diff --git a/trunk/include/linux/pm_qos.h b/trunk/include/linux/pm_qos.h index 9924ea1f22e0..233149cb19f4 100644 --- a/trunk/include/linux/pm_qos.h +++ b/trunk/include/linux/pm_qos.h @@ -66,7 +66,7 @@ enum pm_qos_req_action { static inline int dev_pm_qos_request_active(struct dev_pm_qos_request *req) { - return req->dev != NULL; + return req->dev != 0; } int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, diff --git a/trunk/include/linux/quota.h b/trunk/include/linux/quota.h index 524ede8a160a..c09fa042b5ea 100644 --- a/trunk/include/linux/quota.h +++ b/trunk/include/linux/quota.h @@ -333,7 +333,7 @@ struct quotactl_ops { int (*quota_on)(struct super_block *, int, int, struct path *); int (*quota_on_meta)(struct super_block *, int, int); int (*quota_off)(struct super_block *, int); - int (*quota_sync)(struct super_block *, int); + int (*quota_sync)(struct super_block *, int, int); int (*get_info)(struct super_block *, int, struct if_dqinfo *); int (*set_info)(struct super_block *, int, struct if_dqinfo *); int (*get_dqblk)(struct super_block *, int, qid_t, struct fs_disk_quota *); diff --git a/trunk/include/linux/quotaops.h b/trunk/include/linux/quotaops.h index ec6b65feaaba..17b977304a09 100644 --- a/trunk/include/linux/quotaops.h +++ b/trunk/include/linux/quotaops.h @@ -83,8 +83,7 @@ int dquot_quota_on(struct super_block *sb, int type, int format_id, int dquot_quota_on_mount(struct super_block *sb, char *qf_name, int format_id, int type); int dquot_quota_off(struct super_block *sb, int type); -int dquot_writeback_dquots(struct super_block *sb, int type); -int dquot_quota_sync(struct super_block *sb, int type); +int dquot_quota_sync(struct super_block *sb, int type, int wait); int dquot_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii); int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii); int dquot_get_dqblk(struct super_block *sb, int type, qid_t id, @@ -256,11 +255,6 @@ static inline int dquot_resume(struct super_block *sb, int type) #define dquot_file_open generic_file_open -static inline int dquot_writeback_dquots(struct super_block *sb, int type) -{ - return 0; -} - #endif /* CONFIG_QUOTA */ static inline int dquot_alloc_space_nodirty(struct inode *inode, qsize_t nr) diff --git a/trunk/include/linux/rcupdate.h b/trunk/include/linux/rcupdate.h index 115ead2b5155..9cac722b169c 100644 --- a/trunk/include/linux/rcupdate.h +++ b/trunk/include/linux/rcupdate.h @@ -147,7 +147,6 @@ extern void synchronize_sched(void); extern void __rcu_read_lock(void); extern void __rcu_read_unlock(void); -extern void rcu_read_unlock_special(struct task_struct *t); void synchronize_rcu(void); /* @@ -256,10 +255,6 @@ static inline void destroy_rcu_head_on_stack(struct rcu_head *head) } #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ -#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SMP) -extern int rcu_is_cpu_idle(void); -#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SMP) */ - #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) bool rcu_lockdep_current_cpu_online(void); #else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ @@ -271,6 +266,15 @@ static inline bool rcu_lockdep_current_cpu_online(void) #ifdef CONFIG_DEBUG_LOCK_ALLOC +#ifdef CONFIG_PROVE_RCU +extern int rcu_is_cpu_idle(void); +#else /* !CONFIG_PROVE_RCU */ +static inline int rcu_is_cpu_idle(void) +{ + return 0; +} +#endif /* else !CONFIG_PROVE_RCU */ + static inline void rcu_lock_acquire(struct lockdep_map *map) { lock_acquire(map, 0, 0, 2, 1, NULL, _THIS_IP_); @@ -427,7 +431,8 @@ extern int rcu_my_thread_group_empty(void); static inline void rcu_preempt_sleep_check(void) { rcu_lockdep_assert(!lock_is_held(&rcu_lock_map), - "Illegal context switch in RCU read-side critical section"); + "Illegal context switch in RCU read-side " + "critical section"); } #else /* #ifdef CONFIG_PROVE_RCU */ static inline void rcu_preempt_sleep_check(void) @@ -508,10 +513,10 @@ static inline void rcu_preempt_sleep_check(void) (_________p1); \ }) #define __rcu_assign_pointer(p, v, space) \ - do { \ + ({ \ smp_wmb(); \ (p) = (typeof(*v) __force space *)(v); \ - } while (0) + }) /** @@ -846,7 +851,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) * * Assigns the specified value to the specified RCU-protected * pointer, ensuring that any concurrent RCU readers will see - * any prior initialization. + * any prior initialization. Returns the value assigned. * * Inserts memory barriers on architectures that require them * (which is most of them), and also prevents the compiler from @@ -898,17 +903,25 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) * the reader-accessible portions of the linked structure. */ #define RCU_INIT_POINTER(p, v) \ - do { \ - p = (typeof(*v) __force __rcu *)(v); \ - } while (0) + p = (typeof(*v) __force __rcu *)(v) -/** - * RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer - * - * GCC-style initialization for an RCU-protected pointer in a structure field. - */ -#define RCU_POINTER_INITIALIZER(p, v) \ - .p = (typeof(*v) __force __rcu *)(v) +static __always_inline bool __is_kfree_rcu_offset(unsigned long offset) +{ + return offset < 4096; +} + +static __always_inline +void __kfree_rcu(struct rcu_head *head, unsigned long offset) +{ + typedef void (*rcu_callback)(struct rcu_head *); + + BUILD_BUG_ON(!__builtin_constant_p(offset)); + + /* See the kfree_rcu() header comment. */ + BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); + + kfree_call_rcu(head, (rcu_callback)offset); +} /* * Does the specified offset indicate that the corresponding rcu_head @@ -922,7 +935,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) #define __kfree_rcu(head, offset) \ do { \ BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); \ - kfree_call_rcu(head, (void (*)(struct rcu_head *))(unsigned long)(offset)); \ + call_rcu(head, (void (*)(struct rcu_head *))(unsigned long)(offset)); \ } while (0) /** diff --git a/trunk/include/linux/regmap.h b/trunk/include/linux/regmap.h index 7f7e00df3adf..56af22ec9aba 100644 --- a/trunk/include/linux/regmap.h +++ b/trunk/include/linux/regmap.h @@ -14,14 +14,12 @@ */ #include -#include struct module; struct device; struct i2c_client; struct spi_device; struct regmap; -struct regmap_range_cfg; /* An enum of all the supported cache types */ enum regcache_type { @@ -45,14 +43,6 @@ struct reg_default { #ifdef CONFIG_REGMAP -enum regmap_endian { - /* Unspecified -> 0 -> Backwards compatible default */ - REGMAP_ENDIAN_DEFAULT = 0, - REGMAP_ENDIAN_BIG, - REGMAP_ENDIAN_LITTLE, - REGMAP_ENDIAN_NATIVE, -}; - /** * Configuration for the register map of a device. * @@ -94,15 +84,6 @@ enum regmap_endian { * @reg_defaults_raw: Power on reset values for registers (for use with * register cache support). * @num_reg_defaults_raw: Number of elements in reg_defaults_raw. - * @reg_format_endian: Endianness for formatted register addresses. If this is - * DEFAULT, the @reg_format_endian_default value from the - * regmap bus is used. - * @val_format_endian: Endianness for formatted register values. If this is - * DEFAULT, the @reg_format_endian_default value from the - * regmap bus is used. - * - * @ranges: Array of configuration entries for virtual address ranges. - * @num_ranges: Number of range configuration entries. */ struct regmap_config { const char *name; @@ -128,43 +109,6 @@ struct regmap_config { u8 write_flag_mask; bool use_single_rw; - - enum regmap_endian reg_format_endian; - enum regmap_endian val_format_endian; - - const struct regmap_range_cfg *ranges; - unsigned int n_ranges; -}; - -/** - * Configuration for indirectly accessed or paged registers. - * Registers, mapped to this virtual range, are accessed in two steps: - * 1. page selector register update; - * 2. access through data window registers. - * - * @range_min: Address of the lowest register address in virtual range. - * @range_max: Address of the highest register in virtual range. - * - * @page_sel_reg: Register with selector field. - * @page_sel_mask: Bit shift for selector value. - * @page_sel_shift: Bit mask for selector value. - * - * @window_start: Address of first (lowest) register in data window. - * @window_len: Number of registers in data window. - */ -struct regmap_range_cfg { - /* Registers of virtual address range */ - unsigned int range_min; - unsigned int range_max; - - /* Page selector for indirect addressing */ - unsigned int selector_reg; - unsigned int selector_mask; - int selector_shift; - - /* Data window (per each page) */ - unsigned int window_start; - unsigned int window_len; }; typedef int (*regmap_hw_write)(void *context, const void *data, @@ -189,12 +133,6 @@ typedef void (*regmap_hw_free_context)(void *context); * data. * @read_flag_mask: Mask to be set in the top byte of the register when doing * a read. - * @reg_format_endian_default: Default endianness for formatted register - * addresses. Used when the regmap_config specifies DEFAULT. If this is - * DEFAULT, BIG is assumed. - * @val_format_endian_default: Default endianness for formatted register - * values. Used when the regmap_config specifies DEFAULT. If this is - * DEFAULT, BIG is assumed. */ struct regmap_bus { bool fast_io; @@ -203,8 +141,6 @@ struct regmap_bus { regmap_hw_read read; regmap_hw_free_context free_context; u8 read_flag_mask; - enum regmap_endian reg_format_endian_default; - enum regmap_endian val_format_endian_default; }; struct regmap *regmap_init(struct device *dev, @@ -283,7 +219,6 @@ struct regmap_irq { * @status_base: Base status register address. * @mask_base: Base mask register address. * @ack_base: Base ack address. If zero then the chip is clear on read. - * @wake_base: Base address for wake enables. If zero unsupported. * @irq_reg_stride: Stride to use for chips where registers are not contiguous. * * @num_regs: Number of registers in each control bank. @@ -297,7 +232,6 @@ struct regmap_irq_chip { unsigned int status_base; unsigned int mask_base; unsigned int ack_base; - unsigned int wake_base; unsigned int irq_reg_stride; int num_regs; @@ -309,7 +243,7 @@ struct regmap_irq_chip { struct regmap_irq_chip_data; int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, - int irq_base, const struct regmap_irq_chip *chip, + int irq_base, struct regmap_irq_chip *chip, struct regmap_irq_chip_data **data); void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *data); int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data); @@ -427,6 +361,7 @@ static inline int regmap_register_patch(struct regmap *map, static inline struct regmap *dev_get_regmap(struct device *dev, const char *name) { + WARN_ONCE(1, "regmap API is disabled"); return NULL; } diff --git a/trunk/include/linux/regulator/consumer.h b/trunk/include/linux/regulator/consumer.h index da339fd8c755..4ed1b30ac5fc 100644 --- a/trunk/include/linux/regulator/consumer.h +++ b/trunk/include/linux/regulator/consumer.h @@ -290,12 +290,6 @@ static inline int regulator_set_voltage(struct regulator *regulator, } static inline int regulator_get_voltage(struct regulator *regulator) -{ - return -EINVAL; -} - -static inline int regulator_is_supported_voltage(struct regulator *regulator, - int min_uV, int max_uV) { return 0; } diff --git a/trunk/include/linux/regulator/driver.h b/trunk/include/linux/regulator/driver.h index bac4c871f3bd..b0432cc2b169 100644 --- a/trunk/include/linux/regulator/driver.h +++ b/trunk/include/linux/regulator/driver.h @@ -32,8 +32,6 @@ enum regulator_status { REGULATOR_STATUS_NORMAL, REGULATOR_STATUS_IDLE, REGULATOR_STATUS_STANDBY, - /* in case that any other status doesn't apply */ - REGULATOR_STATUS_UNDEFINED, }; /** @@ -69,8 +67,6 @@ enum regulator_status { * * @enable_time: Time taken for the regulator voltage output voltage to * stabilise after being enabled, in microseconds. - * @set_ramp_delay: Set the ramp delay for the regulator. The driver should - * select ramp delay equal to or less than(closest) ramp_delay. * @set_voltage_time_sel: Time taken for the regulator voltage output voltage * to stabilise after being set to a new value, in microseconds. * The function provides the from and to voltage selector, the @@ -117,7 +113,6 @@ struct regulator_ops { /* Time taken to enable or set voltage on the regulator */ int (*enable_time) (struct regulator_dev *); - int (*set_ramp_delay) (struct regulator_dev *, int ramp_delay); int (*set_voltage_time_sel) (struct regulator_dev *, unsigned int old_selector, unsigned int new_selector); @@ -175,15 +170,11 @@ enum regulator_type { * * @min_uV: Voltage given by the lowest selector (if linear mapping) * @uV_step: Voltage increase with each selector (if linear mapping) - * @ramp_delay: Time to settle down after voltage change (unit: uV/us) - * @volt_table: Voltage mapping table (if table based mapping) * * @vsel_reg: Register for selector when using regulator_regmap_X_voltage_ * @vsel_mask: Mask for register bitfield used for selector * @enable_reg: Register for control when using regmap enable/disable ops * @enable_mask: Mask for control when using regmap enable/disable ops - * - * @enable_time: Time taken for initial enable of regulator (in uS). */ struct regulator_desc { const char *name; @@ -197,16 +188,11 @@ struct regulator_desc { unsigned int min_uV; unsigned int uV_step; - unsigned int ramp_delay; - - const unsigned int *volt_table; unsigned int vsel_reg; unsigned int vsel_mask; unsigned int enable_reg; unsigned int enable_mask; - - unsigned int enable_time; }; /** @@ -222,9 +208,6 @@ struct regulator_desc { * @of_node: OpenFirmware node to parse for device tree bindings (may be * NULL). * @regmap: regmap to use for core regmap helpers - * @ena_gpio: GPIO controlling regulator enable. - * @ena_gpio_invert: Sense for GPIO enable control. - * @ena_gpio_flags: Flags to use when calling gpio_request_one() */ struct regulator_config { struct device *dev; @@ -232,10 +215,6 @@ struct regulator_config { void *driver_data; struct device_node *of_node; struct regmap *regmap; - - int ena_gpio; - unsigned int ena_gpio_invert:1; - unsigned int ena_gpio_flags; }; /* @@ -274,10 +253,6 @@ struct regulator_dev { void *reg_data; /* regulator_dev data */ struct dentry *debugfs; - - int ena_gpio; - unsigned int ena_gpio_invert:1; - unsigned int ena_gpio_state:1; }; struct regulator_dev * @@ -296,8 +271,6 @@ int regulator_mode_to_status(unsigned int); int regulator_list_voltage_linear(struct regulator_dev *rdev, unsigned int selector); -int regulator_list_voltage_table(struct regulator_dev *rdev, - unsigned int selector); int regulator_map_voltage_linear(struct regulator_dev *rdev, int min_uV, int max_uV); int regulator_map_voltage_iterate(struct regulator_dev *rdev, @@ -307,9 +280,6 @@ int regulator_set_voltage_sel_regmap(struct regulator_dev *rdev, unsigned sel); int regulator_is_enabled_regmap(struct regulator_dev *rdev); int regulator_enable_regmap(struct regulator_dev *rdev); int regulator_disable_regmap(struct regulator_dev *rdev); -int regulator_set_voltage_time_sel(struct regulator_dev *rdev, - unsigned int old_selector, - unsigned int new_selector); void *regulator_get_init_drvdata(struct regulator_init_data *reg_init_data); diff --git a/trunk/include/linux/regulator/fixed.h b/trunk/include/linux/regulator/fixed.h index 48918be649d4..f83f7440b488 100644 --- a/trunk/include/linux/regulator/fixed.h +++ b/trunk/include/linux/regulator/fixed.h @@ -22,7 +22,6 @@ struct regulator_init_data; /** * struct fixed_voltage_config - fixed_voltage_config structure * @supply_name: Name of the regulator supply - * @input_supply: Name of the input regulator supply * @microvolts: Output voltage of regulator * @gpio: GPIO to use for enable control * set to -EINVAL if not used @@ -47,7 +46,6 @@ struct regulator_init_data; */ struct fixed_voltage_config { const char *supply_name; - const char *input_supply; int microvolts; int gpio; unsigned startup_delay; @@ -60,17 +58,14 @@ struct fixed_voltage_config { struct regulator_consumer_supply; #if IS_ENABLED(CONFIG_REGULATOR) -struct platform_device *regulator_register_always_on(int id, const char *name, - struct regulator_consumer_supply *supplies, int num_supplies, int uv); +struct platform_device *regulator_register_fixed(int id, + struct regulator_consumer_supply *supplies, int num_supplies); #else -static inline struct platform_device *regulator_register_always_on(int id, const char *name, - struct regulator_consumer_supply *supplies, int num_supplies, int uv) +static inline struct platform_device *regulator_register_fixed(int id, + struct regulator_consumer_supply *supplies, int num_supplies) { return NULL; } #endif -#define regulator_register_fixed(id, s, ns) regulator_register_always_on(id, \ - "fixed-dummy", s, ns, 0) - #endif diff --git a/trunk/include/linux/regulator/lp872x.h b/trunk/include/linux/regulator/lp872x.h deleted file mode 100644 index 132e05c46661..000000000000 --- a/trunk/include/linux/regulator/lp872x.h +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Copyright 2012 Texas Instruments - * - * Author: Milo(Woogyom) Kim - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - */ - -#ifndef __LP872X_REGULATOR_H__ -#define __LP872X_REGULATOR_H__ - -#include -#include -#include - -#define LP872X_MAX_REGULATORS 9 - -enum lp872x_regulator_id { - LP8720_ID_BASE, - LP8720_ID_LDO1 = LP8720_ID_BASE, - LP8720_ID_LDO2, - LP8720_ID_LDO3, - LP8720_ID_LDO4, - LP8720_ID_LDO5, - LP8720_ID_BUCK, - - LP8725_ID_BASE, - LP8725_ID_LDO1 = LP8725_ID_BASE, - LP8725_ID_LDO2, - LP8725_ID_LDO3, - LP8725_ID_LDO4, - LP8725_ID_LDO5, - LP8725_ID_LILO1, - LP8725_ID_LILO2, - LP8725_ID_BUCK1, - LP8725_ID_BUCK2, - - LP872X_ID_MAX, -}; - -enum lp872x_dvs_state { - DVS_LOW = GPIOF_OUT_INIT_LOW, - DVS_HIGH = GPIOF_OUT_INIT_HIGH, -}; - -enum lp872x_dvs_sel { - SEL_V1, - SEL_V2, -}; - -/** - * lp872x_dvs - * @gpio : gpio pin number for dvs control - * @vsel : dvs selector for buck v1 or buck v2 register - * @init_state : initial dvs pin state - */ -struct lp872x_dvs { - int gpio; - enum lp872x_dvs_sel vsel; - enum lp872x_dvs_state init_state; -}; - -/** - * lp872x_regdata - * @id : regulator id - * @init_data : init data for each regulator - */ -struct lp872x_regulator_data { - enum lp872x_regulator_id id; - struct regulator_init_data *init_data; -}; - -/** - * lp872x_platform_data - * @general_config : the value of LP872X_GENERAL_CFG register - * @update_config : if LP872X_GENERAL_CFG register is updated, set true - * @regulator_data : platform regulator id and init data - * @dvs : dvs data for buck voltage control - */ -struct lp872x_platform_data { - u8 general_config; - bool update_config; - struct lp872x_regulator_data regulator_data[LP872X_MAX_REGULATORS]; - struct lp872x_dvs *dvs; -}; - -#endif diff --git a/trunk/include/linux/regulator/machine.h b/trunk/include/linux/regulator/machine.h index 40dd0a394cfa..b02108446be7 100644 --- a/trunk/include/linux/regulator/machine.h +++ b/trunk/include/linux/regulator/machine.h @@ -92,7 +92,6 @@ struct regulator_state { * mode. * @initial_state: Suspend state to set by default. * @initial_mode: Mode to set at startup. - * @ramp_delay: Time to settle down after voltage change (unit: uV/us) */ struct regulation_constraints { @@ -126,8 +125,6 @@ struct regulation_constraints { /* mode to set on startup */ unsigned int initial_mode; - unsigned int ramp_delay; - /* constraint flags */ unsigned always_on:1; /* regulator never off when system is on */ unsigned boot_on:1; /* bootloader/firmware enabled regulator */ diff --git a/trunk/include/linux/sched.h b/trunk/include/linux/sched.h index 1a2ebd39b800..4a1f493e0fef 100644 --- a/trunk/include/linux/sched.h +++ b/trunk/include/linux/sched.h @@ -1405,7 +1405,7 @@ struct task_struct { int (*notifier)(void *priv); void *notifier_data; sigset_t *notifier_mask; - struct callback_head *task_works; + struct hlist_head task_works; struct audit_context *audit_context; #ifdef CONFIG_AUDITSYSCALL @@ -1546,6 +1546,7 @@ struct task_struct { unsigned long timer_slack_ns; unsigned long default_timer_slack_ns; + struct list_head *scm_work_list; #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* Index of current stored address in ret_stack */ int curr_ret_stack; @@ -1580,6 +1581,7 @@ struct task_struct { #endif #ifdef CONFIG_UPROBES struct uprobe_task *utask; + int uprobe_srcu_id; #endif }; diff --git a/trunk/include/linux/smp.h b/trunk/include/linux/smp.h index dd6f06be3c9f..717fb746c9a8 100644 --- a/trunk/include/linux/smp.h +++ b/trunk/include/linux/smp.h @@ -90,6 +90,10 @@ void kick_all_cpus_sync(void); void __init call_function_init(void); void generic_smp_call_function_single_interrupt(void); void generic_smp_call_function_interrupt(void); +void ipi_call_lock(void); +void ipi_call_unlock(void); +void ipi_call_lock_irq(void); +void ipi_call_unlock_irq(void); #else static inline void call_function_init(void) { } #endif @@ -177,6 +181,7 @@ static inline int up_smp_call_function(smp_call_func_t func, void *info) } while (0) static inline void smp_send_reschedule(int cpu) { } +#define num_booting_cpus() 1 #define smp_prepare_boot_cpu() do {} while (0) #define smp_call_function_many(mask, func, info, wait) \ (up_smp_call_function(func, info)) diff --git a/trunk/include/linux/suspend.h b/trunk/include/linux/suspend.h index 0c808d7fa579..cd83059fb592 100644 --- a/trunk/include/linux/suspend.h +++ b/trunk/include/linux/suspend.h @@ -408,12 +408,6 @@ static inline void unlock_system_sleep(void) {} #endif /* !CONFIG_PM_SLEEP */ -#ifdef CONFIG_PM_SLEEP_DEBUG -extern bool pm_print_times_enabled; -#else -#define pm_print_times_enabled (false) -#endif - #ifdef CONFIG_PM_AUTOSLEEP /* kernel/power/autosleep.c */ diff --git a/trunk/include/linux/task_work.h b/trunk/include/linux/task_work.h index fb46b03b1852..294d5d5e90b1 100644 --- a/trunk/include/linux/task_work.h +++ b/trunk/include/linux/task_work.h @@ -4,21 +4,29 @@ #include #include -typedef void (*task_work_func_t)(struct callback_head *); +struct task_work; +typedef void (*task_work_func_t)(struct task_work *); + +struct task_work { + struct hlist_node hlist; + task_work_func_t func; + void *data; +}; static inline void -init_task_work(struct callback_head *twork, task_work_func_t func) +init_task_work(struct task_work *twork, task_work_func_t func, void *data) { twork->func = func; + twork->data = data; } -int task_work_add(struct task_struct *task, struct callback_head *twork, bool); -struct callback_head *task_work_cancel(struct task_struct *, task_work_func_t); +int task_work_add(struct task_struct *task, struct task_work *twork, bool); +struct task_work *task_work_cancel(struct task_struct *, task_work_func_t); void task_work_run(void); static inline void exit_task_work(struct task_struct *task) { - if (unlikely(task->task_works)) + if (unlikely(!hlist_empty(&task->task_works))) task_work_run(); } diff --git a/trunk/include/linux/tick.h b/trunk/include/linux/tick.h index f37fceb69b73..ab8be90b5cc9 100644 --- a/trunk/include/linux/tick.h +++ b/trunk/include/linux/tick.h @@ -31,10 +31,10 @@ enum tick_nohz_mode { * struct tick_sched - sched tick emulation and no idle tick control/stats * @sched_timer: hrtimer to schedule the periodic tick in high * resolution mode - * @last_tick: Store the last tick expiry time when the tick - * timer is modified for nohz sleeps. This is necessary + * @idle_tick: Store the last idle tick expiry time when the tick + * timer is modified for idle sleeps. This is necessary * to resume the tick timer operation in the timeline - * when the CPU returns from nohz sleep. + * when the CPU returns from idle * @tick_stopped: Indicator that the idle tick has been stopped * @idle_jiffies: jiffies at the entry to idle for idle time accounting * @idle_calls: Total number of idle calls @@ -51,7 +51,7 @@ struct tick_sched { struct hrtimer sched_timer; unsigned long check_clocks; enum tick_nohz_mode nohz_mode; - ktime_t last_tick; + ktime_t idle_tick; int inidle; int tick_stopped; unsigned long idle_jiffies; diff --git a/trunk/include/linux/tracehook.h b/trunk/include/linux/tracehook.h index 1e98b5530425..6a4d82bedb03 100644 --- a/trunk/include/linux/tracehook.h +++ b/trunk/include/linux/tracehook.h @@ -192,7 +192,7 @@ static inline void tracehook_notify_resume(struct pt_regs *regs) * hlist_add_head(task->task_works); */ smp_mb__after_clear_bit(); - if (unlikely(current->task_works)) + if (unlikely(!hlist_empty(¤t->task_works))) task_work_run(); } diff --git a/trunk/include/linux/tracepoint.h b/trunk/include/linux/tracepoint.h index 802de56c41e8..bd96ecd0e05c 100644 --- a/trunk/include/linux/tracepoint.h +++ b/trunk/include/linux/tracepoint.h @@ -153,7 +153,7 @@ static inline void tracepoint_synchronize_unregister(void) } \ static inline void trace_##name##_rcuidle(proto) \ { \ - if (static_key_false(&__tracepoint_##name.key)) \ + if (static_branch(&__tracepoint_##name.key)) \ __DO_TRACE(&__tracepoint_##name, \ TP_PROTO(data_proto), \ TP_ARGS(data_args), \ diff --git a/trunk/include/linux/types.h b/trunk/include/linux/types.h index bf0dd7524b2a..9c1bd539ea70 100644 --- a/trunk/include/linux/types.h +++ b/trunk/include/linux/types.h @@ -246,15 +246,14 @@ struct ustat { }; /** - * struct callback_head - callback structure for use with RCU and task_work + * struct rcu_head - callback structure for use with RCU * @next: next update requests in a list * @func: actual update function to call after the grace period. */ -struct callback_head { - struct callback_head *next; - void (*func)(struct callback_head *head); +struct rcu_head { + struct rcu_head *next; + void (*func)(struct rcu_head *head); }; -#define rcu_head callback_head #endif /* __KERNEL__ */ #endif /* __ASSEMBLY__ */ diff --git a/trunk/include/net/ip_vs.h b/trunk/include/net/ip_vs.h index 95374d1696a1..d6146b4811c2 100644 --- a/trunk/include/net/ip_vs.h +++ b/trunk/include/net/ip_vs.h @@ -1425,7 +1425,7 @@ static inline void ip_vs_notrack(struct sk_buff *skb) struct nf_conn *ct = nf_ct_get(skb, &ctinfo); if (!ct || !nf_ct_is_untracked(ct)) { - nf_conntrack_put(skb->nfct); + nf_reset(skb); skb->nfct = &nf_ct_untracked_get()->ct_general; skb->nfctinfo = IP_CT_NEW; nf_conntrack_get(skb->nfct); diff --git a/trunk/include/net/netfilter/nf_conntrack_ecache.h b/trunk/include/net/netfilter/nf_conntrack_ecache.h index e1ce1048fe5f..a88fb6939387 100644 --- a/trunk/include/net/netfilter/nf_conntrack_ecache.h +++ b/trunk/include/net/netfilter/nf_conntrack_ecache.h @@ -78,7 +78,7 @@ nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct) struct net *net = nf_ct_net(ct); struct nf_conntrack_ecache *e; - if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb)) + if (net->ct.nf_conntrack_event_cb == NULL) return; e = nf_ct_ecache_find(ct); diff --git a/trunk/include/net/scm.h b/trunk/include/net/scm.h index 079d7887dac1..d456f4c71a32 100644 --- a/trunk/include/net/scm.h +++ b/trunk/include/net/scm.h @@ -13,6 +13,7 @@ #define SCM_MAX_FD 253 struct scm_fp_list { + struct list_head list; short count; short max; struct file *fp[SCM_MAX_FD]; diff --git a/trunk/include/target/target_core_backend.h b/trunk/include/target/target_core_backend.h index f1405d335a96..2d7db85e93ae 100644 --- a/trunk/include/target/target_core_backend.h +++ b/trunk/include/target/target_core_backend.h @@ -24,8 +24,10 @@ struct se_subsystem_api { struct se_subsystem_dev *, void *); void (*free_device)(void *); int (*transport_complete)(struct se_cmd *cmd, struct scatterlist *); - - int (*parse_cdb)(struct se_cmd *cmd); + int (*execute_cmd)(struct se_cmd *, struct scatterlist *, u32, + enum dma_data_direction); + int (*do_discard)(struct se_device *, sector_t, u32); + void (*do_sync_cache)(struct se_cmd *); ssize_t (*check_configfs_dev_params)(struct se_hba *, struct se_subsystem_dev *); ssize_t (*set_configfs_dev_params)(struct se_hba *, @@ -38,13 +40,6 @@ struct se_subsystem_api { unsigned char *(*get_sense_buffer)(struct se_cmd *); }; -struct spc_ops { - int (*execute_rw)(struct se_cmd *cmd); - int (*execute_sync_cache)(struct se_cmd *cmd); - int (*execute_write_same)(struct se_cmd *cmd); - int (*execute_unmap)(struct se_cmd *cmd); -}; - int transport_subsystem_register(struct se_subsystem_api *); void transport_subsystem_release(struct se_subsystem_api *); @@ -54,10 +49,6 @@ struct se_device *transport_add_device_to_core_hba(struct se_hba *, void target_complete_cmd(struct se_cmd *, u8); -int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops); -int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size); -int spc_get_write_same_sectors(struct se_cmd *cmd); - void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *); int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *); int transport_set_vpd_ident_type(struct t10_vpd *, unsigned char *); diff --git a/trunk/include/target/target_core_base.h b/trunk/include/target/target_core_base.h index 128ce46fa48a..dc35d8660aa6 100644 --- a/trunk/include/target/target_core_base.h +++ b/trunk/include/target/target_core_base.h @@ -145,9 +145,12 @@ enum transport_state_table { TRANSPORT_NO_STATE = 0, TRANSPORT_NEW_CMD = 1, TRANSPORT_WRITE_PENDING = 3, + TRANSPORT_PROCESS_WRITE = 4, TRANSPORT_PROCESSING = 5, TRANSPORT_COMPLETE = 6, + TRANSPORT_PROCESS_TMR = 9, TRANSPORT_ISTATE_PROCESSING = 11, + TRANSPORT_NEW_CMD_MAP = 16, TRANSPORT_COMPLETE_QF_WP = 18, TRANSPORT_COMPLETE_QF_OK = 19, }; @@ -157,20 +160,25 @@ enum se_cmd_flags_table { SCF_SUPPORTED_SAM_OPCODE = 0x00000001, SCF_TRANSPORT_TASK_SENSE = 0x00000002, SCF_EMULATED_TASK_SENSE = 0x00000004, - SCF_SCSI_DATA_CDB = 0x00000008, - SCF_SCSI_TMR_CDB = 0x00000010, - SCF_SCSI_CDB_EXCEPTION = 0x00000020, - SCF_SCSI_RESERVATION_CONFLICT = 0x00000040, - SCF_FUA = 0x00000080, - SCF_SE_LUN_CMD = 0x00000100, - SCF_BIDI = 0x00000400, - SCF_SENT_CHECK_CONDITION = 0x00000800, - SCF_OVERFLOW_BIT = 0x00001000, - SCF_UNDERFLOW_BIT = 0x00002000, - SCF_SENT_DELAYED_TAS = 0x00004000, - SCF_ALUA_NON_OPTIMIZED = 0x00008000, - SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00020000, - SCF_ACK_KREF = 0x00040000, + SCF_SCSI_DATA_SG_IO_CDB = 0x00000008, + SCF_SCSI_CONTROL_SG_IO_CDB = 0x00000010, + SCF_SCSI_NON_DATA_CDB = 0x00000020, + SCF_SCSI_TMR_CDB = 0x00000040, + SCF_SCSI_CDB_EXCEPTION = 0x00000080, + SCF_SCSI_RESERVATION_CONFLICT = 0x00000100, + SCF_FUA = 0x00000200, + SCF_SE_LUN_CMD = 0x00000800, + SCF_SE_ALLOW_EOO = 0x00001000, + SCF_BIDI = 0x00002000, + SCF_SENT_CHECK_CONDITION = 0x00004000, + SCF_OVERFLOW_BIT = 0x00008000, + SCF_UNDERFLOW_BIT = 0x00010000, + SCF_SENT_DELAYED_TAS = 0x00020000, + SCF_ALUA_NON_OPTIMIZED = 0x00040000, + SCF_DELAYED_CMD_FROM_SAM_ATTR = 0x00080000, + SCF_UNUSED = 0x00100000, + SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00200000, + SCF_ACK_KREF = 0x00400000, }; /* struct se_dev_entry->lun_flags and struct se_lun->lun_access */ @@ -212,7 +220,6 @@ enum tcm_sense_reason_table { TCM_CHECK_CONDITION_UNIT_ATTENTION = 0x0e, TCM_CHECK_CONDITION_NOT_READY = 0x0f, TCM_RESERVATION_CONFLICT = 0x10, - TCM_ADDRESS_OUT_OF_RANGE = 0x11, }; enum target_sc_flags_table { @@ -464,6 +471,13 @@ struct t10_reservation { struct t10_reservation_ops pr_ops; }; +struct se_queue_obj { + atomic_t queue_cnt; + spinlock_t cmd_queue_lock; + struct list_head qobj_list; + wait_queue_head_t thread_wq; +}; + struct se_tmr_req { /* Task Management function to be performed */ u8 function; @@ -472,8 +486,11 @@ struct se_tmr_req { int call_transport; /* Reference to ITT that Task Mgmt should be performed */ u32 ref_task_tag; + /* 64-bit encoded SAM LUN from $FABRIC_MOD TMR header */ + u64 ref_task_lun; void *fabric_tmr_ptr; struct se_cmd *task_cmd; + struct se_cmd *ref_cmd; struct se_device *tmr_dev; struct se_lun *tmr_lun; struct list_head tmr_list; @@ -520,6 +537,7 @@ struct se_cmd { /* Only used for internal passthrough and legacy TCM fabric modules */ struct se_session *se_sess; struct se_tmr_req *se_tmr_req; + struct list_head se_queue_node; struct list_head se_cmd_list; struct completion cmd_wait_comp; struct kref cmd_kref; @@ -557,6 +575,7 @@ struct se_cmd { struct scatterlist *t_bidi_data_sg; unsigned int t_bidi_data_nents; + struct list_head execute_list; struct list_head state_list; bool state_active; @@ -614,6 +633,7 @@ struct se_session { struct list_head sess_list; struct list_head sess_acl_list; struct list_head sess_cmd_list; + struct list_head sess_wait_list; spinlock_t sess_cmd_lock; struct kref sess_kref; }; @@ -760,11 +780,13 @@ struct se_device { /* Active commands on this virtual SE device */ atomic_t simple_cmds; atomic_t dev_ordered_id; + atomic_t execute_tasks; atomic_t dev_ordered_sync; atomic_t dev_qf_count; struct se_obj dev_obj; struct se_obj dev_access_obj; struct se_obj dev_export_obj; + struct se_queue_obj dev_queue_obj; spinlock_t delayed_cmd_lock; spinlock_t execute_task_lock; spinlock_t dev_reservation_lock; @@ -780,9 +802,11 @@ struct se_device { struct t10_pr_registration *dev_pr_res_holder; struct list_head dev_sep_list; struct list_head dev_tmr_list; - struct workqueue_struct *tmr_wq; + /* Pointer to descriptor for processing thread */ + struct task_struct *process_thread; struct work_struct qf_work_queue; struct list_head delayed_cmd_list; + struct list_head execute_list; struct list_head state_list; struct list_head qf_cmd_list; /* Pointer to associated SE HBA */ diff --git a/trunk/include/target/target_core_fabric.h b/trunk/include/target/target_core_fabric.h index 69fb3cfd02d7..c78a23333c4f 100644 --- a/trunk/include/target/target_core_fabric.h +++ b/trunk/include/target/target_core_fabric.h @@ -32,6 +32,12 @@ struct target_core_fabric_ops { void (*tpg_release_fabric_acl)(struct se_portal_group *, struct se_node_acl *); u32 (*tpg_get_inst_index)(struct se_portal_group *); + /* + * Optional function pointer for TCM to perform command map + * from TCM processing thread context, for those struct se_cmd + * initially allocated in interrupt context. + */ + int (*new_cmd_map)(struct se_cmd *); /* * Optional to release struct se_cmd and fabric dependent allocated * I/O descriptor in transport_cmd_check_stop(). @@ -102,18 +108,20 @@ void transport_init_se_cmd(struct se_cmd *, struct target_core_fabric_ops *, struct se_session *, u32, int, int, unsigned char *); int transport_lookup_cmd_lun(struct se_cmd *, u32); int target_setup_cmd_from_cdb(struct se_cmd *, unsigned char *); -int target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *, +void target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *, unsigned char *, u32, u32, int, int, int); int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, unsigned char *sense, u32 unpacked_lun, void *fabric_tmr_ptr, unsigned char tm_type, gfp_t, unsigned int, int); int transport_handle_cdb_direct(struct se_cmd *); +int transport_generic_handle_cdb_map(struct se_cmd *); +int transport_generic_handle_data(struct se_cmd *); int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32, struct scatterlist *, u32); int transport_generic_new_cmd(struct se_cmd *); -void target_execute_cmd(struct se_cmd *cmd); +void transport_generic_process_write(struct se_cmd *); void transport_generic_free_cmd(struct se_cmd *, int); @@ -121,8 +129,9 @@ bool transport_wait_for_tasks(struct se_cmd *); int transport_check_aborted_status(struct se_cmd *, int); int transport_send_check_condition_and_sense(struct se_cmd *, u8, int); +void target_get_sess_cmd(struct se_session *, struct se_cmd *, bool); int target_put_sess_cmd(struct se_session *, struct se_cmd *); -void target_sess_cmd_list_set_waiting(struct se_session *); +void target_splice_sess_cmd_list(struct se_session *); void target_wait_for_sess_cmds(struct se_session *, int); int core_alua_check_nonop_delay(struct se_cmd *); diff --git a/trunk/include/trace/events/rcu.h b/trunk/include/trace/events/rcu.h index 5bde94d8585b..d274734b2aa4 100644 --- a/trunk/include/trace/events/rcu.h +++ b/trunk/include/trace/events/rcu.h @@ -541,50 +541,6 @@ TRACE_EVENT(rcu_torture_read, __entry->rcutorturename, __entry->rhp) ); -/* - * Tracepoint for _rcu_barrier() execution. The string "s" describes - * the _rcu_barrier phase: - * "Begin": rcu_barrier_callback() started. - * "Check": rcu_barrier_callback() checking for piggybacking. - * "EarlyExit": rcu_barrier_callback() piggybacked, thus early exit. - * "Inc1": rcu_barrier_callback() piggyback check counter incremented. - * "Offline": rcu_barrier_callback() found offline CPU - * "OnlineQ": rcu_barrier_callback() found online CPU with callbacks. - * "OnlineNQ": rcu_barrier_callback() found online CPU, no callbacks. - * "IRQ": An rcu_barrier_callback() callback posted on remote CPU. - * "CB": An rcu_barrier_callback() invoked a callback, not the last. - * "LastCB": An rcu_barrier_callback() invoked the last callback. - * "Inc2": rcu_barrier_callback() piggyback check counter incremented. - * The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument - * is the count of remaining callbacks, and "done" is the piggybacking count. - */ -TRACE_EVENT(rcu_barrier, - - TP_PROTO(char *rcuname, char *s, int cpu, int cnt, unsigned long done), - - TP_ARGS(rcuname, s, cpu, cnt, done), - - TP_STRUCT__entry( - __field(char *, rcuname) - __field(char *, s) - __field(int, cpu) - __field(int, cnt) - __field(unsigned long, done) - ), - - TP_fast_assign( - __entry->rcuname = rcuname; - __entry->s = s; - __entry->cpu = cpu; - __entry->cnt = cnt; - __entry->done = done; - ), - - TP_printk("%s %s cpu %d remaining %d # %lu", - __entry->rcuname, __entry->s, __entry->cpu, __entry->cnt, - __entry->done) -); - #else /* #ifdef CONFIG_RCU_TRACE */ #define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0) @@ -608,7 +564,6 @@ TRACE_EVENT(rcu_barrier, #define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \ do { } while (0) #define trace_rcu_torture_read(rcutorturename, rhp) do { } while (0) -#define trace_rcu_barrier(name, s, cpu, cnt, done) do { } while (0) #endif /* #else #ifdef CONFIG_RCU_TRACE */ diff --git a/trunk/include/trace/ftrace.h b/trunk/include/trace/ftrace.h index c6bc2faaf261..769724944fc6 100644 --- a/trunk/include/trace/ftrace.h +++ b/trunk/include/trace/ftrace.h @@ -571,7 +571,6 @@ static inline void ftrace_test_probe_##call(void) \ #undef __print_flags #undef __print_symbolic -#undef __print_hex #undef __get_dynamic_array #undef __get_str diff --git a/trunk/init/main.c b/trunk/init/main.c index 3f151f6c6da7..b5cc0a7c4708 100644 --- a/trunk/init/main.c +++ b/trunk/init/main.c @@ -68,7 +68,6 @@ #include #include #include -#include #include #include @@ -805,8 +804,8 @@ static noinline int init_post(void) system_state = SYSTEM_RUNNING; numa_default_policy(); + current->signal->flags |= SIGNAL_UNKILLABLE; - flush_delayed_fput(); if (ramdisk_execute_command) { run_init_process(ramdisk_execute_command); diff --git a/trunk/ipc/mqueue.c b/trunk/ipc/mqueue.c index f8e54f5b9080..8ce57691e7b6 100644 --- a/trunk/ipc/mqueue.c +++ b/trunk/ipc/mqueue.c @@ -413,7 +413,7 @@ static void mqueue_evict_inode(struct inode *inode) } static int mqueue_create(struct inode *dir, struct dentry *dentry, - umode_t mode, bool excl) + umode_t mode, struct nameidata *nd) { struct inode *inode; struct mq_attr *attr = dentry->d_fsdata; @@ -721,8 +721,8 @@ static int mq_attr_ok(struct ipc_namespace *ipc_ns, struct mq_attr *attr) /* * Invoked when creating a new queue via sys_mq_open */ -static struct file *do_create(struct ipc_namespace *ipc_ns, struct inode *dir, - struct path *path, int oflag, umode_t mode, +static struct file *do_create(struct ipc_namespace *ipc_ns, struct dentry *dir, + struct dentry *dentry, int oflag, umode_t mode, struct mq_attr *attr) { const struct cred *cred = current_cred(); @@ -732,9 +732,9 @@ static struct file *do_create(struct ipc_namespace *ipc_ns, struct inode *dir, if (attr) { ret = mq_attr_ok(ipc_ns, attr); if (ret) - return ERR_PTR(ret); + goto out; /* store for use during create */ - path->dentry->d_fsdata = attr; + dentry->d_fsdata = attr; } else { struct mq_attr def_attr; @@ -744,51 +744,71 @@ static struct file *do_create(struct ipc_namespace *ipc_ns, struct inode *dir, ipc_ns->mq_msgsize_default); ret = mq_attr_ok(ipc_ns, &def_attr); if (ret) - return ERR_PTR(ret); + goto out; } mode &= ~current_umask(); - ret = mnt_want_write(path->mnt); + ret = mnt_want_write(ipc_ns->mq_mnt); if (ret) - return ERR_PTR(ret); - ret = vfs_create(dir, path->dentry, mode, true); - path->dentry->d_fsdata = NULL; - if (!ret) - result = dentry_open(path, oflag, cred); - else - result = ERR_PTR(ret); + goto out; + ret = vfs_create(dir->d_inode, dentry, mode, NULL); + dentry->d_fsdata = NULL; + if (ret) + goto out_drop_write; + + result = dentry_open(dentry, ipc_ns->mq_mnt, oflag, cred); /* * dentry_open() took a persistent mnt_want_write(), * so we can now drop this one. */ - mnt_drop_write(path->mnt); + mnt_drop_write(ipc_ns->mq_mnt); return result; + +out_drop_write: + mnt_drop_write(ipc_ns->mq_mnt); +out: + dput(dentry); + mntput(ipc_ns->mq_mnt); + return ERR_PTR(ret); } /* Opens existing queue */ -static struct file *do_open(struct path *path, int oflag) +static struct file *do_open(struct ipc_namespace *ipc_ns, + struct dentry *dentry, int oflag) { + int ret; + const struct cred *cred = current_cred(); + static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE, MAY_READ | MAY_WRITE }; - int acc; - if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) - return ERR_PTR(-EINVAL); - acc = oflag2acc[oflag & O_ACCMODE]; - if (inode_permission(path->dentry->d_inode, acc)) - return ERR_PTR(-EACCES); - return dentry_open(path, oflag, current_cred()); + + if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) { + ret = -EINVAL; + goto err; + } + + if (inode_permission(dentry->d_inode, oflag2acc[oflag & O_ACCMODE])) { + ret = -EACCES; + goto err; + } + + return dentry_open(dentry, ipc_ns->mq_mnt, oflag, cred); + +err: + dput(dentry); + mntput(ipc_ns->mq_mnt); + return ERR_PTR(ret); } SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode, struct mq_attr __user *, u_attr) { - struct path path; + struct dentry *dentry; struct file *filp; char *name; struct mq_attr attr; int fd, error; struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; - struct dentry *root = ipc_ns->mq_mnt->mnt_root; if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr))) return -EFAULT; @@ -802,49 +822,52 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode, if (fd < 0) goto out_putname; - error = 0; - mutex_lock(&root->d_inode->i_mutex); - path.dentry = lookup_one_len(name, root, strlen(name)); - if (IS_ERR(path.dentry)) { - error = PTR_ERR(path.dentry); + mutex_lock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex); + dentry = lookup_one_len(name, ipc_ns->mq_mnt->mnt_root, strlen(name)); + if (IS_ERR(dentry)) { + error = PTR_ERR(dentry); goto out_putfd; } - path.mnt = mntget(ipc_ns->mq_mnt); + mntget(ipc_ns->mq_mnt); if (oflag & O_CREAT) { - if (path.dentry->d_inode) { /* entry already exists */ - audit_inode(name, path.dentry); + if (dentry->d_inode) { /* entry already exists */ + audit_inode(name, dentry); if (oflag & O_EXCL) { error = -EEXIST; goto out; } - filp = do_open(&path, oflag); + filp = do_open(ipc_ns, dentry, oflag); } else { - filp = do_create(ipc_ns, root->d_inode, - &path, oflag, mode, + filp = do_create(ipc_ns, ipc_ns->mq_mnt->mnt_root, + dentry, oflag, mode, u_attr ? &attr : NULL); } } else { - if (!path.dentry->d_inode) { + if (!dentry->d_inode) { error = -ENOENT; goto out; } - audit_inode(name, path.dentry); - filp = do_open(&path, oflag); + audit_inode(name, dentry); + filp = do_open(ipc_ns, dentry, oflag); } - if (!IS_ERR(filp)) - fd_install(fd, filp); - else + if (IS_ERR(filp)) { error = PTR_ERR(filp); + goto out_putfd; + } + + fd_install(fd, filp); + goto out_upsem; + out: - path_put(&path); + dput(dentry); + mntput(ipc_ns->mq_mnt); out_putfd: - if (error) { - put_unused_fd(fd); - fd = error; - } - mutex_unlock(&root->d_inode->i_mutex); + put_unused_fd(fd); + fd = error; +out_upsem: + mutex_unlock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex); out_putname: putname(name); return fd; diff --git a/trunk/kernel/audit_tree.c b/trunk/kernel/audit_tree.c index 3a5ca582ba1e..5bf0790497e7 100644 --- a/trunk/kernel/audit_tree.c +++ b/trunk/kernel/audit_tree.c @@ -595,7 +595,7 @@ void audit_trim_trees(void) root_mnt = collect_mounts(&path); path_put(&path); - if (IS_ERR(root_mnt)) + if (!root_mnt) goto skip_it; spin_lock(&hash_lock); @@ -669,8 +669,8 @@ int audit_add_tree_rule(struct audit_krule *rule) goto Err; mnt = collect_mounts(&path); path_put(&path); - if (IS_ERR(mnt)) { - err = PTR_ERR(mnt); + if (!mnt) { + err = -ENOMEM; goto Err; } @@ -719,8 +719,8 @@ int audit_tag_tree(char *old, char *new) return err; tagged = collect_mounts(&path2); path_put(&path2); - if (IS_ERR(tagged)) - return PTR_ERR(tagged); + if (!tagged) + return -ENOMEM; err = kern_path(old, 0, &path1); if (err) { diff --git a/trunk/kernel/audit_watch.c b/trunk/kernel/audit_watch.c index 3823281401b5..e683869365d9 100644 --- a/trunk/kernel/audit_watch.c +++ b/trunk/kernel/audit_watch.c @@ -355,15 +355,34 @@ static void audit_remove_parent_watches(struct audit_parent *parent) /* Get path information necessary for adding watches. */ static int audit_get_nd(struct audit_watch *watch, struct path *parent) { - struct dentry *d = kern_path_locked(watch->path, parent); - if (IS_ERR(d)) + struct nameidata nd; + struct dentry *d; + int err; + + err = kern_path_parent(watch->path, &nd); + if (err) + return err; + + if (nd.last_type != LAST_NORM) { + path_put(&nd.path); + return -EINVAL; + } + + mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT); + d = lookup_one_len(nd.last.name, nd.path.dentry, nd.last.len); + if (IS_ERR(d)) { + mutex_unlock(&nd.path.dentry->d_inode->i_mutex); + path_put(&nd.path); return PTR_ERR(d); - mutex_unlock(&parent->dentry->d_inode->i_mutex); + } if (d->d_inode) { /* update watch filter fields */ watch->dev = d->d_inode->i_sb->s_dev; watch->ino = d->d_inode->i_ino; } + mutex_unlock(&nd.path.dentry->d_inode->i_mutex); + + *parent = nd.path; dput(d); return 0; } diff --git a/trunk/kernel/cgroup.c b/trunk/kernel/cgroup.c index af2b5641fc8b..b303dfc7dce0 100644 --- a/trunk/kernel/cgroup.c +++ b/trunk/kernel/cgroup.c @@ -822,7 +822,7 @@ EXPORT_SYMBOL_GPL(cgroup_unlock); */ static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode); -static struct dentry *cgroup_lookup(struct inode *, struct dentry *, unsigned int); +static struct dentry *cgroup_lookup(struct inode *, struct dentry *, struct nameidata *); static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry); static int cgroup_populate_dir(struct cgroup *cgrp); static const struct inode_operations cgroup_dir_inode_operations; @@ -1587,7 +1587,7 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, opts.new_root = new_root; /* Locate an existing or new sb for this hierarchy */ - sb = sget(fs_type, cgroup_test_super, cgroup_set_super, 0, &opts); + sb = sget(fs_type, cgroup_test_super, cgroup_set_super, &opts); if (IS_ERR(sb)) { ret = PTR_ERR(sb); cgroup_drop_root(opts.new_root); @@ -2570,7 +2570,7 @@ static const struct inode_operations cgroup_dir_inode_operations = { .rename = cgroup_rename, }; -static struct dentry *cgroup_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) +static struct dentry *cgroup_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { if (dentry->d_name.len > NAME_MAX) return ERR_PTR(-ENAMETOOLONG); diff --git a/trunk/kernel/debug/kdb/kdb_main.c b/trunk/kernel/debug/kdb/kdb_main.c index 1f91413edb87..67b847dfa2bb 100644 --- a/trunk/kernel/debug/kdb/kdb_main.c +++ b/trunk/kernel/debug/kdb/kdb_main.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include #include @@ -2041,15 +2040,8 @@ static int kdb_env(int argc, const char **argv) */ static int kdb_dmesg(int argc, const char **argv) { - int diag; - int logging; - int lines = 0; - int adjust = 0; - int n = 0; - int skip = 0; - struct kmsg_dumper dumper = { .active = 1 }; - size_t len; - char buf[201]; + char *syslog_data[4], *start, *end, c = '\0', *p; + int diag, logging, logsize, lines = 0, adjust = 0, n; if (argc > 2) return KDB_ARGCOUNT; @@ -2072,10 +2064,22 @@ static int kdb_dmesg(int argc, const char **argv) kdb_set(2, setargs); } - kmsg_dump_rewind_nolock(&dumper); - while (kmsg_dump_get_line_nolock(&dumper, 1, NULL, 0, NULL)) - n++; - + /* syslog_data[0,1] physical start, end+1. syslog_data[2,3] + * logical start, end+1. */ + kdb_syslog_data(syslog_data); + if (syslog_data[2] == syslog_data[3]) + return 0; + logsize = syslog_data[1] - syslog_data[0]; + start = syslog_data[2]; + end = syslog_data[3]; +#define KDB_WRAP(p) (((p - syslog_data[0]) % logsize) + syslog_data[0]) + for (n = 0, p = start; p < end; ++p) { + c = *KDB_WRAP(p); + if (c == '\n') + ++n; + } + if (c != '\n') + ++n; if (lines < 0) { if (adjust >= n) kdb_printf("buffer only contains %d lines, nothing " @@ -2083,11 +2087,21 @@ static int kdb_dmesg(int argc, const char **argv) else if (adjust - lines >= n) kdb_printf("buffer only contains %d lines, last %d " "lines printed\n", n, n - adjust); - skip = adjust; - lines = abs(lines); + if (adjust) { + for (; start < end && adjust; ++start) { + if (*KDB_WRAP(start) == '\n') + --adjust; + } + if (start < end) + ++start; + } + for (p = start; p < end && lines; ++p) { + if (*KDB_WRAP(p) == '\n') + ++lines; + } + end = p; } else if (lines > 0) { - skip = n - lines - adjust; - lines = abs(lines); + int skip = n - (adjust + lines); if (adjust >= n) { kdb_printf("buffer only contains %d lines, " "nothing printed\n", n); @@ -2098,24 +2112,35 @@ static int kdb_dmesg(int argc, const char **argv) kdb_printf("buffer only contains %d lines, first " "%d lines printed\n", n, lines); } - } else { - lines = n; + for (; start < end && skip; ++start) { + if (*KDB_WRAP(start) == '\n') + --skip; + } + for (p = start; p < end && lines; ++p) { + if (*KDB_WRAP(p) == '\n') + --lines; + } + end = p; } - - if (skip >= n || skip < 0) - return 0; - - kmsg_dump_rewind_nolock(&dumper); - while (kmsg_dump_get_line_nolock(&dumper, 1, buf, sizeof(buf), &len)) { - if (skip) { - skip--; - continue; + /* Do a line at a time (max 200 chars) to reduce protocol overhead */ + c = '\n'; + while (start != end) { + char buf[201]; + p = buf; + if (KDB_FLAG(CMD_INTERRUPT)) + return 0; + while (start < end && (c = *KDB_WRAP(start)) && + (p - buf) < sizeof(buf)-1) { + ++start; + *p++ = c; + if (c == '\n') + break; } - if (!lines--) - break; - - kdb_printf("%.*s\n", (int)len - 1, buf); + *p = '\0'; + kdb_printf("%s", buf); } + if (c != '\n') + kdb_printf("\n"); return 0; } diff --git a/trunk/kernel/debug/kdb/kdb_private.h b/trunk/kernel/debug/kdb/kdb_private.h index 392ec6a25844..47c4e56e513b 100644 --- a/trunk/kernel/debug/kdb/kdb_private.h +++ b/trunk/kernel/debug/kdb/kdb_private.h @@ -205,6 +205,7 @@ extern char kdb_grep_string[]; extern int kdb_grep_leading; extern int kdb_grep_trailing; extern char *kdb_cmds[]; +extern void kdb_syslog_data(char *syslog_data[]); extern unsigned long kdb_task_state_string(const char *); extern char kdb_task_state_char (const struct task_struct *); extern unsigned long kdb_task_state(const struct task_struct *p, diff --git a/trunk/kernel/events/core.c b/trunk/kernel/events/core.c index f1cf0edeb39a..d7d71d6ec972 100644 --- a/trunk/kernel/events/core.c +++ b/trunk/kernel/events/core.c @@ -1645,8 +1645,6 @@ perf_install_in_context(struct perf_event_context *ctx, lockdep_assert_held(&ctx->mutex); event->ctx = ctx; - if (event->cpu != -1) - event->cpu = cpu; if (!task) { /* @@ -6254,8 +6252,6 @@ SYSCALL_DEFINE5(perf_event_open, } } - get_online_cpus(); - event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, NULL, NULL); if (IS_ERR(event)) { @@ -6308,7 +6304,7 @@ SYSCALL_DEFINE5(perf_event_open, /* * Get the target context (task or percpu): */ - ctx = find_get_context(pmu, task, event->cpu); + ctx = find_get_context(pmu, task, cpu); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); goto err_alloc; @@ -6381,23 +6377,20 @@ SYSCALL_DEFINE5(perf_event_open, mutex_lock(&ctx->mutex); if (move_group) { - synchronize_rcu(); - perf_install_in_context(ctx, group_leader, event->cpu); + perf_install_in_context(ctx, group_leader, cpu); get_ctx(ctx); list_for_each_entry(sibling, &group_leader->sibling_list, group_entry) { - perf_install_in_context(ctx, sibling, event->cpu); + perf_install_in_context(ctx, sibling, cpu); get_ctx(ctx); } } - perf_install_in_context(ctx, event, event->cpu); + perf_install_in_context(ctx, event, cpu); ++ctx->generation; perf_unpin_context(ctx); mutex_unlock(&ctx->mutex); - put_online_cpus(); - event->owner = current; mutex_lock(¤t->perf_event_mutex); @@ -6426,7 +6419,6 @@ SYSCALL_DEFINE5(perf_event_open, err_alloc: free_event(event); err_task: - put_online_cpus(); if (task) put_task_struct(task); err_group_fd: @@ -6487,39 +6479,6 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, } EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter); -void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu) -{ - struct perf_event_context *src_ctx; - struct perf_event_context *dst_ctx; - struct perf_event *event, *tmp; - LIST_HEAD(events); - - src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx; - dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx; - - mutex_lock(&src_ctx->mutex); - list_for_each_entry_safe(event, tmp, &src_ctx->event_list, - event_entry) { - perf_remove_from_context(event); - put_ctx(src_ctx); - list_add(&event->event_entry, &events); - } - mutex_unlock(&src_ctx->mutex); - - synchronize_rcu(); - - mutex_lock(&dst_ctx->mutex); - list_for_each_entry_safe(event, tmp, &events, event_entry) { - list_del(&event->event_entry); - if (event->state >= PERF_EVENT_STATE_OFF) - event->state = PERF_EVENT_STATE_INACTIVE; - perf_install_in_context(dst_ctx, event, dst_cpu); - get_ctx(dst_ctx); - } - mutex_unlock(&dst_ctx->mutex); -} -EXPORT_SYMBOL_GPL(perf_pmu_migrate_context); - static void sync_child_event(struct perf_event *child_event, struct task_struct *child) { diff --git a/trunk/kernel/events/uprobes.c b/trunk/kernel/events/uprobes.c index f93532748bca..985be4d80fe8 100644 --- a/trunk/kernel/events/uprobes.c +++ b/trunk/kernel/events/uprobes.c @@ -38,29 +38,13 @@ #define UINSNS_PER_PAGE (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES) #define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE +static struct srcu_struct uprobes_srcu; static struct rb_root uprobes_tree = RB_ROOT; static DEFINE_SPINLOCK(uprobes_treelock); /* serialize rbtree access */ #define UPROBES_HASH_SZ 13 -/* - * We need separate register/unregister and mmap/munmap lock hashes because - * of mmap_sem nesting. - * - * uprobe_register() needs to install probes on (potentially) all processes - * and thus needs to acquire multiple mmap_sems (consequtively, not - * concurrently), whereas uprobe_mmap() is called while holding mmap_sem - * for the particular process doing the mmap. - * - * uprobe_register()->register_for_each_vma() needs to drop/acquire mmap_sem - * because of lock order against i_mmap_mutex. This means there's a hole in - * the register vma iteration where a mmap() can happen. - * - * Thus uprobe_register() can race with uprobe_mmap() and we can try and - * install a probe where one is already installed. - */ - /* serialize (un)register */ static struct mutex uprobes_mutex[UPROBES_HASH_SZ]; @@ -77,6 +61,17 @@ static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ]; */ static atomic_t uprobe_events = ATOMIC_INIT(0); +/* + * Maintain a temporary per vma info that can be used to search if a vma + * has already been handled. This structure is introduced since extending + * vm_area_struct wasnt recommended. + */ +struct vma_info { + struct list_head probe_list; + struct mm_struct *mm; + loff_t vaddr; +}; + struct uprobe { struct rb_node rb_node; /* node in the rb tree */ atomic_t ref; @@ -105,8 +100,7 @@ static bool valid_vma(struct vm_area_struct *vma, bool is_register) if (!is_register) return true; - if ((vma->vm_flags & (VM_HUGETLB|VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)) - == (VM_READ|VM_EXEC)) + if ((vma->vm_flags & (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)) == (VM_READ|VM_EXEC)) return true; return false; @@ -135,17 +129,33 @@ static loff_t vma_address(struct vm_area_struct *vma, loff_t offset) static int __replace_page(struct vm_area_struct *vma, struct page *page, struct page *kpage) { struct mm_struct *mm = vma->vm_mm; - unsigned long addr; - spinlock_t *ptl; + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; pte_t *ptep; + spinlock_t *ptl; + unsigned long addr; + int err = -EFAULT; addr = page_address_in_vma(page, vma); if (addr == -EFAULT) - return -EFAULT; + goto out; + + pgd = pgd_offset(mm, addr); + if (!pgd_present(*pgd)) + goto out; + + pud = pud_offset(pgd, addr); + if (!pud_present(*pud)) + goto out; + + pmd = pmd_offset(pud, addr); + if (!pmd_present(*pmd)) + goto out; - ptep = page_check_address(page, mm, addr, &ptl, 0); + ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); if (!ptep) - return -EAGAIN; + goto out; get_page(kpage); page_add_new_anon_rmap(kpage, vma, addr); @@ -164,8 +174,10 @@ static int __replace_page(struct vm_area_struct *vma, struct page *page, struct try_to_free_swap(page); put_page(page); pte_unmap_unlock(ptep, ptl); + err = 0; - return 0; +out: + return err; } /** @@ -210,8 +222,9 @@ static int write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm, void *vaddr_old, *vaddr_new; struct vm_area_struct *vma; struct uprobe *uprobe; + loff_t addr; int ret; -retry: + /* Read the page with vaddr into memory */ ret = get_user_pages(NULL, mm, vaddr, 1, 0, 0, &old_page, &vma); if (ret <= 0) @@ -233,6 +246,10 @@ static int write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm, if (mapping != vma->vm_file->f_mapping) goto put_out; + addr = vma_address(vma, uprobe->offset); + if (vaddr != (unsigned long)addr) + goto put_out; + ret = -ENOMEM; new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr); if (!new_page) @@ -250,7 +267,11 @@ static int write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm, vaddr_new = kmap_atomic(new_page); memcpy(vaddr_new, vaddr_old, PAGE_SIZE); - memcpy(vaddr_new + (vaddr & ~PAGE_MASK), &opcode, UPROBE_SWBP_INSN_SIZE); + + /* poke the new insn in, ASSUMES we don't cross page boundary */ + vaddr &= ~PAGE_MASK; + BUG_ON(vaddr + UPROBE_SWBP_INSN_SIZE > PAGE_SIZE); + memcpy(vaddr_new + vaddr, &opcode, UPROBE_SWBP_INSN_SIZE); kunmap_atomic(vaddr_new); kunmap_atomic(vaddr_old); @@ -270,8 +291,6 @@ static int write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm, put_out: put_page(old_page); - if (unlikely(ret == -EAGAIN)) - goto retry; return ret; } @@ -293,7 +312,7 @@ static int read_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_ void *vaddr_new; int ret; - ret = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &page, NULL); + ret = get_user_pages(NULL, mm, vaddr, 1, 0, 0, &page, NULL); if (ret <= 0) return ret; @@ -314,20 +333,10 @@ static int is_swbp_at_addr(struct mm_struct *mm, unsigned long vaddr) uprobe_opcode_t opcode; int result; - if (current->mm == mm) { - pagefault_disable(); - result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr, - sizeof(opcode)); - pagefault_enable(); - - if (likely(result == 0)) - goto out; - } - result = read_opcode(mm, vaddr, &opcode); if (result) return result; -out: + if (is_swbp_insn(&opcode)) return 1; @@ -346,9 +355,7 @@ static int is_swbp_at_addr(struct mm_struct *mm, unsigned long vaddr) int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) { int result; - /* - * See the comment near uprobes_hash(). - */ + result = is_swbp_at_addr(mm, vaddr); if (result == 1) return -EEXIST; @@ -513,6 +520,7 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset) uprobe->inode = igrab(inode); uprobe->offset = offset; init_rwsem(&uprobe->consumer_rwsem); + INIT_LIST_HEAD(&uprobe->pending_list); /* add to uprobes_tree, sorted on inode:offset */ cur_uprobe = insert_uprobe(uprobe); @@ -580,22 +588,20 @@ static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc) } static int -__copy_insn(struct address_space *mapping, struct file *filp, char *insn, - unsigned long nbytes, loff_t offset) +__copy_insn(struct address_space *mapping, struct vm_area_struct *vma, char *insn, + unsigned long nbytes, unsigned long offset) { + struct file *filp = vma->vm_file; struct page *page; void *vaddr; - unsigned long off; - pgoff_t idx; + unsigned long off1; + unsigned long idx; if (!filp) return -EINVAL; - if (!mapping->a_ops->readpage) - return -EIO; - - idx = offset >> PAGE_CACHE_SHIFT; - off = offset & ~PAGE_MASK; + idx = (unsigned long)(offset >> PAGE_CACHE_SHIFT); + off1 = offset &= ~PAGE_MASK; /* * Ensure that the page that has the original instruction is @@ -606,20 +612,22 @@ __copy_insn(struct address_space *mapping, struct file *filp, char *insn, return PTR_ERR(page); vaddr = kmap_atomic(page); - memcpy(insn, vaddr + off, nbytes); + memcpy(insn, vaddr + off1, nbytes); kunmap_atomic(vaddr); page_cache_release(page); return 0; } -static int copy_insn(struct uprobe *uprobe, struct file *filp) +static int +copy_insn(struct uprobe *uprobe, struct vm_area_struct *vma, unsigned long addr) { struct address_space *mapping; unsigned long nbytes; int bytes; - nbytes = PAGE_SIZE - (uprobe->offset & ~PAGE_MASK); + addr &= ~PAGE_MASK; + nbytes = PAGE_SIZE - addr; mapping = uprobe->inode->i_mapping; /* Instruction at end of binary; copy only available bytes */ @@ -630,13 +638,13 @@ static int copy_insn(struct uprobe *uprobe, struct file *filp) /* Instruction at the page-boundary; copy bytes in second page */ if (nbytes < bytes) { - int err = __copy_insn(mapping, filp, uprobe->arch.insn + nbytes, - bytes - nbytes, uprobe->offset + nbytes); - if (err) - return err; + if (__copy_insn(mapping, vma, uprobe->arch.insn + nbytes, + bytes - nbytes, uprobe->offset + nbytes)) + return -ENOMEM; + bytes = nbytes; } - return __copy_insn(mapping, filp, uprobe->arch.insn, bytes, uprobe->offset); + return __copy_insn(mapping, vma, uprobe->arch.insn, bytes, uprobe->offset); } /* @@ -664,8 +672,9 @@ static int copy_insn(struct uprobe *uprobe, struct file *filp) */ static int install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, - struct vm_area_struct *vma, unsigned long vaddr) + struct vm_area_struct *vma, loff_t vaddr) { + unsigned long addr; int ret; /* @@ -678,22 +687,20 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, if (!uprobe->consumers) return -EEXIST; + addr = (unsigned long)vaddr; + if (!(uprobe->flags & UPROBE_COPY_INSN)) { - ret = copy_insn(uprobe, vma->vm_file); + ret = copy_insn(uprobe, vma, addr); if (ret) return ret; if (is_swbp_insn((uprobe_opcode_t *)uprobe->arch.insn)) - return -ENOTSUPP; + return -EEXIST; - ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr); + ret = arch_uprobe_analyze_insn(&uprobe->arch, mm); if (ret) return ret; - /* write_opcode() assumes we don't cross page boundary */ - BUG_ON((uprobe->offset & ~PAGE_MASK) + - UPROBE_SWBP_INSN_SIZE > PAGE_SIZE); - uprobe->flags |= UPROBE_COPY_INSN; } @@ -706,7 +713,7 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, * Hence increment before and decrement on failure. */ atomic_inc(&mm->uprobes_state.count); - ret = set_swbp(&uprobe->arch, mm, vaddr); + ret = set_swbp(&uprobe->arch, mm, addr); if (ret) atomic_dec(&mm->uprobes_state.count); @@ -714,21 +721,27 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, } static void -remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr) +remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, loff_t vaddr) { - if (!set_orig_insn(&uprobe->arch, mm, vaddr, true)) + if (!set_orig_insn(&uprobe->arch, mm, (unsigned long)vaddr, true)) atomic_dec(&mm->uprobes_state.count); } /* - * There could be threads that have already hit the breakpoint. They - * will recheck the current insn and restart if find_uprobe() fails. - * See find_active_uprobe(). + * There could be threads that have hit the breakpoint and are entering the + * notifier code and trying to acquire the uprobes_treelock. The thread + * calling delete_uprobe() that is removing the uprobe from the rb_tree can + * race with these threads and might acquire the uprobes_treelock compared + * to some of the breakpoint hit threads. In such a case, the breakpoint + * hit threads will not find the uprobe. The current unregistering thread + * waits till all other threads have hit a breakpoint, to acquire the + * uprobes_treelock before the uprobe is removed from the rbtree. */ static void delete_uprobe(struct uprobe *uprobe) { unsigned long flags; + synchronize_srcu(&uprobes_srcu); spin_lock_irqsave(&uprobes_treelock, flags); rb_erase(&uprobe->rb_node, &uprobes_tree); spin_unlock_irqrestore(&uprobes_treelock, flags); @@ -737,135 +750,139 @@ static void delete_uprobe(struct uprobe *uprobe) atomic_dec(&uprobe_events); } -struct map_info { - struct map_info *next; - struct mm_struct *mm; - unsigned long vaddr; -}; - -static inline struct map_info *free_map_info(struct map_info *info) -{ - struct map_info *next = info->next; - kfree(info); - return next; -} - -static struct map_info * -build_map_info(struct address_space *mapping, loff_t offset, bool is_register) +static struct vma_info * +__find_next_vma_info(struct address_space *mapping, struct list_head *head, + struct vma_info *vi, loff_t offset, bool is_register) { - unsigned long pgoff = offset >> PAGE_SHIFT; struct prio_tree_iter iter; struct vm_area_struct *vma; - struct map_info *curr = NULL; - struct map_info *prev = NULL; - struct map_info *info; - int more = 0; + struct vma_info *tmpvi; + unsigned long pgoff; + int existing_vma; + loff_t vaddr; + + pgoff = offset >> PAGE_SHIFT; - again: - mutex_lock(&mapping->i_mmap_mutex); vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { if (!valid_vma(vma, is_register)) continue; - if (!prev && !more) { - /* - * Needs GFP_NOWAIT to avoid i_mmap_mutex recursion through - * reclaim. This is optimistic, no harm done if it fails. - */ - prev = kmalloc(sizeof(struct map_info), - GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN); - if (prev) - prev->next = NULL; + existing_vma = 0; + vaddr = vma_address(vma, offset); + + list_for_each_entry(tmpvi, head, probe_list) { + if (tmpvi->mm == vma->vm_mm && tmpvi->vaddr == vaddr) { + existing_vma = 1; + break; + } } - if (!prev) { - more++; - continue; + + /* + * Another vma needs a probe to be installed. However skip + * installing the probe if the vma is about to be unlinked. + */ + if (!existing_vma && atomic_inc_not_zero(&vma->vm_mm->mm_users)) { + vi->mm = vma->vm_mm; + vi->vaddr = vaddr; + list_add(&vi->probe_list, head); + + return vi; } + } - if (!atomic_inc_not_zero(&vma->vm_mm->mm_users)) - continue; + return NULL; +} - info = prev; - prev = prev->next; - info->next = curr; - curr = info; +/* + * Iterate in the rmap prio tree and find a vma where a probe has not + * yet been inserted. + */ +static struct vma_info * +find_next_vma_info(struct address_space *mapping, struct list_head *head, + loff_t offset, bool is_register) +{ + struct vma_info *vi, *retvi; - info->mm = vma->vm_mm; - info->vaddr = vma_address(vma, offset); - } - mutex_unlock(&mapping->i_mmap_mutex); + vi = kzalloc(sizeof(struct vma_info), GFP_KERNEL); + if (!vi) + return ERR_PTR(-ENOMEM); - if (!more) - goto out; + mutex_lock(&mapping->i_mmap_mutex); + retvi = __find_next_vma_info(mapping, head, vi, offset, is_register); + mutex_unlock(&mapping->i_mmap_mutex); - prev = curr; - while (curr) { - mmput(curr->mm); - curr = curr->next; - } + if (!retvi) + kfree(vi); - do { - info = kmalloc(sizeof(struct map_info), GFP_KERNEL); - if (!info) { - curr = ERR_PTR(-ENOMEM); - goto out; - } - info->next = prev; - prev = info; - } while (--more); - - goto again; - out: - while (prev) - prev = free_map_info(prev); - return curr; + return retvi; } static int register_for_each_vma(struct uprobe *uprobe, bool is_register) { - struct map_info *info; - int err = 0; + struct list_head try_list; + struct vm_area_struct *vma; + struct address_space *mapping; + struct vma_info *vi, *tmpvi; + struct mm_struct *mm; + loff_t vaddr; + int ret; - info = build_map_info(uprobe->inode->i_mapping, - uprobe->offset, is_register); - if (IS_ERR(info)) - return PTR_ERR(info); + mapping = uprobe->inode->i_mapping; + INIT_LIST_HEAD(&try_list); - while (info) { - struct mm_struct *mm = info->mm; - struct vm_area_struct *vma; + ret = 0; - if (err) - goto free; + for (;;) { + vi = find_next_vma_info(mapping, &try_list, uprobe->offset, is_register); + if (!vi) + break; - down_write(&mm->mmap_sem); - vma = find_vma(mm, (unsigned long)info->vaddr); - if (!vma || !valid_vma(vma, is_register)) - goto unlock; + if (IS_ERR(vi)) { + ret = PTR_ERR(vi); + break; + } + mm = vi->mm; + down_read(&mm->mmap_sem); + vma = find_vma(mm, (unsigned long)vi->vaddr); + if (!vma || !valid_vma(vma, is_register)) { + list_del(&vi->probe_list); + kfree(vi); + up_read(&mm->mmap_sem); + mmput(mm); + continue; + } + vaddr = vma_address(vma, uprobe->offset); if (vma->vm_file->f_mapping->host != uprobe->inode || - vma_address(vma, uprobe->offset) != info->vaddr) - goto unlock; + vaddr != vi->vaddr) { + list_del(&vi->probe_list); + kfree(vi); + up_read(&mm->mmap_sem); + mmput(mm); + continue; + } + + if (is_register) + ret = install_breakpoint(uprobe, mm, vma, vi->vaddr); + else + remove_breakpoint(uprobe, mm, vi->vaddr); + up_read(&mm->mmap_sem); + mmput(mm); if (is_register) { - err = install_breakpoint(uprobe, mm, vma, info->vaddr); - /* - * We can race against uprobe_mmap(), see the - * comment near uprobe_hash(). - */ - if (err == -EEXIST) - err = 0; - } else { - remove_breakpoint(uprobe, mm, info->vaddr); + if (ret && ret == -EEXIST) + ret = 0; + if (ret) + break; } - unlock: - up_write(&mm->mmap_sem); - free: - mmput(mm); - info = free_map_info(info); } - return err; + list_for_each_entry_safe(vi, tmpvi, &try_list, probe_list) { + list_del(&vi->probe_list); + kfree(vi); + } + + return ret; } static int __uprobe_register(struct uprobe *uprobe) @@ -1031,7 +1048,7 @@ static void build_probe_list(struct inode *inode, struct list_head *head) int uprobe_mmap(struct vm_area_struct *vma) { struct list_head tmp_list; - struct uprobe *uprobe; + struct uprobe *uprobe, *u; struct inode *inode; int ret, count; @@ -1049,9 +1066,12 @@ int uprobe_mmap(struct vm_area_struct *vma) ret = 0; count = 0; - list_for_each_entry(uprobe, &tmp_list, pending_list) { + list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) { + loff_t vaddr; + + list_del(&uprobe->pending_list); if (!ret) { - loff_t vaddr = vma_address(vma, uprobe->offset); + vaddr = vma_address(vma, uprobe->offset); if (vaddr < vma->vm_start || vaddr >= vma->vm_end) { put_uprobe(uprobe); @@ -1059,10 +1079,8 @@ int uprobe_mmap(struct vm_area_struct *vma) } ret = install_breakpoint(uprobe, vma->vm_mm, vma, vaddr); - /* - * We can race against uprobe_register(), see the - * comment near uprobe_hash(). - */ + + /* Ignore double add: */ if (ret == -EEXIST) { ret = 0; @@ -1097,7 +1115,7 @@ int uprobe_mmap(struct vm_area_struct *vma) void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct list_head tmp_list; - struct uprobe *uprobe; + struct uprobe *uprobe, *u; struct inode *inode; if (!atomic_read(&uprobe_events) || !valid_vma(vma, false)) @@ -1114,8 +1132,11 @@ void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned lon mutex_lock(uprobes_mmap_hash(inode)); build_probe_list(inode, &tmp_list); - list_for_each_entry(uprobe, &tmp_list, pending_list) { - loff_t vaddr = vma_address(vma, uprobe->offset); + list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) { + loff_t vaddr; + + list_del(&uprobe->pending_list); + vaddr = vma_address(vma, uprobe->offset); if (vaddr >= start && vaddr < end) { /* @@ -1357,6 +1378,9 @@ void uprobe_free_utask(struct task_struct *t) { struct uprobe_task *utask = t->utask; + if (t->uprobe_srcu_id != -1) + srcu_read_unlock_raw(&uprobes_srcu, t->uprobe_srcu_id); + if (!utask) return; @@ -1374,6 +1398,7 @@ void uprobe_free_utask(struct task_struct *t) void uprobe_copy_process(struct task_struct *t) { t->utask = NULL; + t->uprobe_srcu_id = -1; } /* @@ -1392,6 +1417,7 @@ static struct uprobe_task *add_utask(void) if (unlikely(!utask)) return NULL; + utask->active_uprobe = NULL; current->utask = utask; return utask; } @@ -1453,64 +1479,41 @@ static bool can_skip_sstep(struct uprobe *uprobe, struct pt_regs *regs) return false; } -static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp) -{ - struct mm_struct *mm = current->mm; - struct uprobe *uprobe = NULL; - struct vm_area_struct *vma; - - down_read(&mm->mmap_sem); - vma = find_vma(mm, bp_vaddr); - if (vma && vma->vm_start <= bp_vaddr) { - if (valid_vma(vma, false)) { - struct inode *inode; - loff_t offset; - - inode = vma->vm_file->f_mapping->host; - offset = bp_vaddr - vma->vm_start; - offset += (vma->vm_pgoff << PAGE_SHIFT); - uprobe = find_uprobe(inode, offset); - } - - if (!uprobe) - *is_swbp = is_swbp_at_addr(mm, bp_vaddr); - } else { - *is_swbp = -EFAULT; - } - up_read(&mm->mmap_sem); - - return uprobe; -} - /* * Run handler and ask thread to singlestep. * Ensure all non-fatal signals cannot interrupt thread while it singlesteps. */ static void handle_swbp(struct pt_regs *regs) { + struct vm_area_struct *vma; struct uprobe_task *utask; struct uprobe *uprobe; + struct mm_struct *mm; unsigned long bp_vaddr; - int uninitialized_var(is_swbp); + uprobe = NULL; bp_vaddr = uprobe_get_swbp_addr(regs); - uprobe = find_active_uprobe(bp_vaddr, &is_swbp); + mm = current->mm; + down_read(&mm->mmap_sem); + vma = find_vma(mm, bp_vaddr); + + if (vma && vma->vm_start <= bp_vaddr && valid_vma(vma, false)) { + struct inode *inode; + loff_t offset; + + inode = vma->vm_file->f_mapping->host; + offset = bp_vaddr - vma->vm_start; + offset += (vma->vm_pgoff << PAGE_SHIFT); + uprobe = find_uprobe(inode, offset); + } + + srcu_read_unlock_raw(&uprobes_srcu, current->uprobe_srcu_id); + current->uprobe_srcu_id = -1; + up_read(&mm->mmap_sem); if (!uprobe) { - if (is_swbp > 0) { - /* No matching uprobe; signal SIGTRAP. */ - send_sig(SIGTRAP, current, 0); - } else { - /* - * Either we raced with uprobe_unregister() or we can't - * access this memory. The latter is only possible if - * another thread plays with our ->mm. In both cases - * we can simply restart. If this vma was unmapped we - * can pretend this insn was not executed yet and get - * the (correct) SIGSEGV after restart. - */ - instruction_pointer_set(regs, bp_vaddr); - } + /* No matching uprobe; signal SIGTRAP. */ + send_sig(SIGTRAP, current, 0); return; } @@ -1617,6 +1620,7 @@ int uprobe_pre_sstep_notifier(struct pt_regs *regs) utask->state = UTASK_BP_HIT; set_thread_flag(TIF_UPROBE); + current->uprobe_srcu_id = srcu_read_lock_raw(&uprobes_srcu); return 1; } @@ -1651,6 +1655,7 @@ static int __init init_uprobes(void) mutex_init(&uprobes_mutex[i]); mutex_init(&uprobes_mmap_mutex[i]); } + init_srcu_struct(&uprobes_srcu); return register_die_notifier(&uprobe_exception_nb); } diff --git a/trunk/kernel/exit.c b/trunk/kernel/exit.c index d17f6c4ddfa9..2f59cc334516 100644 --- a/trunk/kernel/exit.c +++ b/trunk/kernel/exit.c @@ -953,11 +953,14 @@ void do_exit(long code) exit_signals(tsk); /* sets PF_EXITING */ /* * tsk->flags are checked in the futex code to protect against - * an exiting task cleaning up the robust pi futexes. + * an exiting task cleaning up the robust pi futexes, and in + * task_work_add() to avoid the race with exit_task_work(). */ smp_mb(); raw_spin_unlock_wait(&tsk->pi_lock); + exit_task_work(tsk); + if (unlikely(in_atomic())) printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n", current->comm, task_pid_nr(current), @@ -992,7 +995,6 @@ void do_exit(long code) exit_shm(tsk); exit_files(tsk); exit_fs(tsk); - exit_task_work(tsk); check_stack_usage(); exit_thread(); diff --git a/trunk/kernel/fork.c b/trunk/kernel/fork.c index ff1cad3b7bdc..f00e319d8376 100644 --- a/trunk/kernel/fork.c +++ b/trunk/kernel/fork.c @@ -1420,7 +1420,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, */ p->group_leader = p; INIT_LIST_HEAD(&p->thread_group); - p->task_works = NULL; + INIT_HLIST_HEAD(&p->task_works); /* Now that the task is set up, run cgroup callbacks if * necessary. We need to run them before the task is visible diff --git a/trunk/kernel/irq/manage.c b/trunk/kernel/irq/manage.c index 814c9ef6bba1..8c548232ba39 100644 --- a/trunk/kernel/irq/manage.c +++ b/trunk/kernel/irq/manage.c @@ -781,7 +781,7 @@ static void wake_threads_waitq(struct irq_desc *desc) wake_up(&desc->wait_for_threads); } -static void irq_thread_dtor(struct callback_head *unused) +static void irq_thread_dtor(struct task_work *unused) { struct task_struct *tsk = current; struct irq_desc *desc; @@ -813,7 +813,7 @@ static void irq_thread_dtor(struct callback_head *unused) */ static int irq_thread(void *data) { - struct callback_head on_exit_work; + struct task_work on_exit_work; static const struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2, }; @@ -830,7 +830,7 @@ static int irq_thread(void *data) sched_setscheduler(current, SCHED_FIFO, ¶m); - init_task_work(&on_exit_work, irq_thread_dtor); + init_task_work(&on_exit_work, irq_thread_dtor, NULL); task_work_add(current, &on_exit_work, false); while (!irq_wait_for_interrupt(action)) { diff --git a/trunk/kernel/power/Kconfig b/trunk/kernel/power/Kconfig index a70518c9d82f..8f9b4eb974e0 100644 --- a/trunk/kernel/power/Kconfig +++ b/trunk/kernel/power/Kconfig @@ -175,7 +175,7 @@ config PM_TEST_SUSPEND You probably want to have your system's RTC driver statically linked, ensuring that it's available when this test runs. -config PM_SLEEP_DEBUG +config CAN_PM_TRACE def_bool y depends on PM_DEBUG && PM_SLEEP @@ -196,7 +196,7 @@ config PM_TRACE config PM_TRACE_RTC bool "Suspend/resume event tracing" - depends on PM_SLEEP_DEBUG + depends on CAN_PM_TRACE depends on X86 select PM_TRACE ---help--- diff --git a/trunk/kernel/power/hibernate.c b/trunk/kernel/power/hibernate.c index b26f5f1e773e..8b53db38a279 100644 --- a/trunk/kernel/power/hibernate.c +++ b/trunk/kernel/power/hibernate.c @@ -5,7 +5,6 @@ * Copyright (c) 2003 Open Source Development Lab * Copyright (c) 2004 Pavel Machek * Copyright (c) 2009 Rafael J. Wysocki, Novell Inc. - * Copyright (C) 2012 Bojan Smojver * * This file is released under the GPLv2. */ @@ -28,6 +27,7 @@ #include #include #include +#include #include "power.h" @@ -46,9 +46,6 @@ enum { HIBERNATION_PLATFORM, HIBERNATION_SHUTDOWN, HIBERNATION_REBOOT, -#ifdef CONFIG_SUSPEND - HIBERNATION_SUSPEND, -#endif /* keep last */ __HIBERNATION_AFTER_LAST }; @@ -357,7 +354,6 @@ int hibernation_snapshot(int platform_mode) } suspend_console(); - ftrace_stop(); pm_restrict_gfp_mask(); error = dpm_suspend(PMSG_FREEZE); @@ -383,7 +379,6 @@ int hibernation_snapshot(int platform_mode) if (error || !in_suspend) pm_restore_gfp_mask(); - ftrace_start(); resume_console(); dpm_complete(msg); @@ -486,7 +481,6 @@ int hibernation_restore(int platform_mode) pm_prepare_console(); suspend_console(); - ftrace_stop(); pm_restrict_gfp_mask(); error = dpm_suspend_start(PMSG_QUIESCE); if (!error) { @@ -494,7 +488,6 @@ int hibernation_restore(int platform_mode) dpm_resume_end(PMSG_RECOVER); } pm_restore_gfp_mask(); - ftrace_start(); resume_console(); pm_restore_console(); return error; @@ -521,7 +514,6 @@ int hibernation_platform_enter(void) entering_platform_hibernation = true; suspend_console(); - ftrace_stop(); error = dpm_suspend_start(PMSG_HIBERNATE); if (error) { if (hibernation_ops->recover) @@ -565,7 +557,6 @@ int hibernation_platform_enter(void) Resume_devices: entering_platform_hibernation = false; dpm_resume_end(PMSG_RESTORE); - ftrace_start(); resume_console(); Close: @@ -583,10 +574,6 @@ int hibernation_platform_enter(void) */ static void power_down(void) { -#ifdef CONFIG_SUSPEND - int error; -#endif - switch (hibernation_mode) { case HIBERNATION_REBOOT: kernel_restart(NULL); @@ -596,25 +583,6 @@ static void power_down(void) case HIBERNATION_SHUTDOWN: kernel_power_off(); break; -#ifdef CONFIG_SUSPEND - case HIBERNATION_SUSPEND: - error = suspend_devices_and_enter(PM_SUSPEND_MEM); - if (error) { - if (hibernation_ops) - hibernation_mode = HIBERNATION_PLATFORM; - else - hibernation_mode = HIBERNATION_SHUTDOWN; - power_down(); - } - /* - * Restore swap signature. - */ - error = swsusp_unmark(); - if (error) - printk(KERN_ERR "PM: Swap will be unusable! " - "Try swapon -a.\n"); - return; -#endif } kernel_halt(); /* @@ -780,6 +748,13 @@ static int software_resume(void) async_synchronize_full(); } + /* + * We can't depend on SCSI devices being available after loading + * one of their modules until scsi_complete_async_scans() is + * called and the resume device usually is a SCSI one. + */ + scsi_complete_async_scans(); + swsusp_resume_device = name_to_dev_t(resume_file); if (!swsusp_resume_device) { error = -ENODEV; @@ -852,9 +827,6 @@ static const char * const hibernation_modes[] = { [HIBERNATION_PLATFORM] = "platform", [HIBERNATION_SHUTDOWN] = "shutdown", [HIBERNATION_REBOOT] = "reboot", -#ifdef CONFIG_SUSPEND - [HIBERNATION_SUSPEND] = "suspend", -#endif }; /* @@ -895,9 +867,6 @@ static ssize_t disk_show(struct kobject *kobj, struct kobj_attribute *attr, switch (i) { case HIBERNATION_SHUTDOWN: case HIBERNATION_REBOOT: -#ifdef CONFIG_SUSPEND - case HIBERNATION_SUSPEND: -#endif break; case HIBERNATION_PLATFORM: if (hibernation_ops) @@ -938,9 +907,6 @@ static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr, switch (mode) { case HIBERNATION_SHUTDOWN: case HIBERNATION_REBOOT: -#ifdef CONFIG_SUSPEND - case HIBERNATION_SUSPEND: -#endif hibernation_mode = mode; break; case HIBERNATION_PLATFORM: diff --git a/trunk/kernel/power/main.c b/trunk/kernel/power/main.c index f458238109cc..428f8a034e96 100644 --- a/trunk/kernel/power/main.c +++ b/trunk/kernel/power/main.c @@ -235,47 +235,6 @@ late_initcall(pm_debugfs_init); #endif /* CONFIG_PM_SLEEP */ -#ifdef CONFIG_PM_SLEEP_DEBUG -/* - * pm_print_times: print time taken by devices to suspend and resume. - * - * show() returns whether printing of suspend and resume times is enabled. - * store() accepts 0 or 1. 0 disables printing and 1 enables it. - */ -bool pm_print_times_enabled; - -static ssize_t pm_print_times_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf) -{ - return sprintf(buf, "%d\n", pm_print_times_enabled); -} - -static ssize_t pm_print_times_store(struct kobject *kobj, - struct kobj_attribute *attr, - const char *buf, size_t n) -{ - unsigned long val; - - if (kstrtoul(buf, 10, &val)) - return -EINVAL; - - if (val > 1) - return -EINVAL; - - pm_print_times_enabled = !!val; - return n; -} - -power_attr(pm_print_times); - -static inline void pm_print_times_init(void) -{ - pm_print_times_enabled = !!initcall_debug; -} -#else /* !CONFIG_PP_SLEEP_DEBUG */ -static inline void pm_print_times_init(void) {} -#endif /* CONFIG_PM_SLEEP_DEBUG */ - struct kobject *power_kobj; /** @@ -572,9 +531,6 @@ static struct attribute * g[] = { #ifdef CONFIG_PM_DEBUG &pm_test_attr.attr, #endif -#ifdef CONFIG_PM_SLEEP_DEBUG - &pm_print_times_attr.attr, -#endif #endif NULL, }; @@ -610,7 +566,6 @@ static int __init pm_init(void) error = sysfs_create_group(power_kobj, &attr_group); if (error) return error; - pm_print_times_init(); return pm_autosleep_init(); } diff --git a/trunk/kernel/power/power.h b/trunk/kernel/power/power.h index 7d4b7ffb3c1d..b0bd4beaebfe 100644 --- a/trunk/kernel/power/power.h +++ b/trunk/kernel/power/power.h @@ -156,9 +156,6 @@ extern void swsusp_free(void); extern int swsusp_read(unsigned int *flags_p); extern int swsusp_write(unsigned int flags); extern void swsusp_close(fmode_t); -#ifdef CONFIG_SUSPEND -extern int swsusp_unmark(void); -#endif /* kernel/power/block_io.c */ extern struct block_device *hib_resume_bdev; diff --git a/trunk/kernel/power/suspend.c b/trunk/kernel/power/suspend.c index c8b7446b27df..396d262b8fd0 100644 --- a/trunk/kernel/power/suspend.c +++ b/trunk/kernel/power/suspend.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include "power.h" @@ -213,7 +212,6 @@ int suspend_devices_and_enter(suspend_state_t state) goto Close; } suspend_console(); - ftrace_stop(); suspend_test_start(); error = dpm_suspend_start(PMSG_SUSPEND); if (error) { @@ -233,7 +231,6 @@ int suspend_devices_and_enter(suspend_state_t state) suspend_test_start(); dpm_resume_end(PMSG_RESUME); suspend_test_finish("resume devices"); - ftrace_start(); resume_console(); Close: if (suspend_ops->end) diff --git a/trunk/kernel/power/swap.c b/trunk/kernel/power/swap.c index 3c9d764eb0d8..11e22c068e8b 100644 --- a/trunk/kernel/power/swap.c +++ b/trunk/kernel/power/swap.c @@ -448,9 +448,9 @@ static int save_image(struct swap_map_handle *handle, struct timeval start; struct timeval stop; - printk(KERN_INFO "PM: Saving image data pages (%u pages)...\n", + printk(KERN_INFO "PM: Saving image data pages (%u pages) ... ", nr_to_write); - m = nr_to_write / 10; + m = nr_to_write / 100; if (!m) m = 1; nr_pages = 0; @@ -464,8 +464,7 @@ static int save_image(struct swap_map_handle *handle, if (ret) break; if (!(nr_pages % m)) - printk(KERN_INFO "PM: Image saving progress: %3d%%\n", - nr_pages / m * 10); + printk(KERN_CONT "\b\b\b\b%3d%%", nr_pages / m); nr_pages++; } err2 = hib_wait_on_bio_chain(&bio); @@ -473,7 +472,9 @@ static int save_image(struct swap_map_handle *handle, if (!ret) ret = err2; if (!ret) - printk(KERN_INFO "PM: Image saving done.\n"); + printk(KERN_CONT "\b\b\b\bdone\n"); + else + printk(KERN_CONT "\n"); swsusp_show_speed(&start, &stop, nr_to_write, "Wrote"); return ret; } @@ -667,9 +668,9 @@ static int save_image_lzo(struct swap_map_handle *handle, printk(KERN_INFO "PM: Using %u thread(s) for compression.\n" - "PM: Compressing and saving image data (%u pages)...\n", + "PM: Compressing and saving image data (%u pages) ... ", nr_threads, nr_to_write); - m = nr_to_write / 10; + m = nr_to_write / 100; if (!m) m = 1; nr_pages = 0; @@ -689,10 +690,8 @@ static int save_image_lzo(struct swap_map_handle *handle, data_of(*snapshot), PAGE_SIZE); if (!(nr_pages % m)) - printk(KERN_INFO - "PM: Image saving progress: " - "%3d%%\n", - nr_pages / m * 10); + printk(KERN_CONT "\b\b\b\b%3d%%", + nr_pages / m); nr_pages++; } if (!off) @@ -762,8 +761,11 @@ static int save_image_lzo(struct swap_map_handle *handle, do_gettimeofday(&stop); if (!ret) ret = err2; - if (!ret) - printk(KERN_INFO "PM: Image saving done.\n"); + if (!ret) { + printk(KERN_CONT "\b\b\b\bdone\n"); + } else { + printk(KERN_CONT "\n"); + } swsusp_show_speed(&start, &stop, nr_to_write, "Wrote"); out_clean: if (crc) { @@ -971,9 +973,9 @@ static int load_image(struct swap_map_handle *handle, int err2; unsigned nr_pages; - printk(KERN_INFO "PM: Loading image data pages (%u pages)...\n", + printk(KERN_INFO "PM: Loading image data pages (%u pages) ... ", nr_to_read); - m = nr_to_read / 10; + m = nr_to_read / 100; if (!m) m = 1; nr_pages = 0; @@ -991,8 +993,7 @@ static int load_image(struct swap_map_handle *handle, if (ret) break; if (!(nr_pages % m)) - printk(KERN_INFO "PM: Image loading progress: %3d%%\n", - nr_pages / m * 10); + printk("\b\b\b\b%3d%%", nr_pages / m); nr_pages++; } err2 = hib_wait_on_bio_chain(&bio); @@ -1000,11 +1001,12 @@ static int load_image(struct swap_map_handle *handle, if (!ret) ret = err2; if (!ret) { - printk(KERN_INFO "PM: Image loading done.\n"); + printk("\b\b\b\bdone\n"); snapshot_write_finalize(snapshot); if (!snapshot_image_loaded(snapshot)) ret = -ENODATA; - } + } else + printk("\n"); swsusp_show_speed(&start, &stop, nr_to_read, "Read"); return ret; } @@ -1183,9 +1185,9 @@ static int load_image_lzo(struct swap_map_handle *handle, printk(KERN_INFO "PM: Using %u thread(s) for decompression.\n" - "PM: Loading and decompressing image data (%u pages)...\n", + "PM: Loading and decompressing image data (%u pages) ... ", nr_threads, nr_to_read); - m = nr_to_read / 10; + m = nr_to_read / 100; if (!m) m = 1; nr_pages = 0; @@ -1317,10 +1319,7 @@ static int load_image_lzo(struct swap_map_handle *handle, data[thr].unc + off, PAGE_SIZE); if (!(nr_pages % m)) - printk(KERN_INFO - "PM: Image loading progress: " - "%3d%%\n", - nr_pages / m * 10); + printk("\b\b\b\b%3d%%", nr_pages / m); nr_pages++; ret = snapshot_write_next(snapshot); @@ -1345,7 +1344,7 @@ static int load_image_lzo(struct swap_map_handle *handle, } do_gettimeofday(&stop); if (!ret) { - printk(KERN_INFO "PM: Image loading done.\n"); + printk("\b\b\b\bdone\n"); snapshot_write_finalize(snapshot); if (!snapshot_image_loaded(snapshot)) ret = -ENODATA; @@ -1358,7 +1357,8 @@ static int load_image_lzo(struct swap_map_handle *handle, } } } - } + } else + printk("\n"); swsusp_show_speed(&start, &stop, nr_to_read, "Read"); out_clean: for (i = 0; i < ring_size; i++) @@ -1472,34 +1472,6 @@ void swsusp_close(fmode_t mode) blkdev_put(hib_resume_bdev, mode); } -/** - * swsusp_unmark - Unmark swsusp signature in the resume device - */ - -#ifdef CONFIG_SUSPEND -int swsusp_unmark(void) -{ - int error; - - hib_bio_read_page(swsusp_resume_block, swsusp_header, NULL); - if (!memcmp(HIBERNATE_SIG,swsusp_header->sig, 10)) { - memcpy(swsusp_header->sig,swsusp_header->orig_sig, 10); - error = hib_bio_write_page(swsusp_resume_block, - swsusp_header, NULL); - } else { - printk(KERN_ERR "PM: Cannot find swsusp signature!\n"); - error = -ENODEV; - } - - /* - * We just returned from suspend, we don't need the image any more. - */ - free_all_swap_pages(root_swap); - - return error; -} -#endif - static int swsusp_header_init(void) { swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL); diff --git a/trunk/kernel/power/user.c b/trunk/kernel/power/user.c index 4ed81e74f86f..91b0fd021a95 100644 --- a/trunk/kernel/power/user.c +++ b/trunk/kernel/power/user.c @@ -24,6 +24,7 @@ #include #include #include +#include #include @@ -83,6 +84,7 @@ static int snapshot_open(struct inode *inode, struct file *filp) * appear. */ wait_for_device_probe(); + scsi_complete_async_scans(); data->swap = -1; data->mode = O_WRONLY; diff --git a/trunk/kernel/power/wakelock.c b/trunk/kernel/power/wakelock.c index 8f50de394d22..c8fba3380076 100644 --- a/trunk/kernel/power/wakelock.c +++ b/trunk/kernel/power/wakelock.c @@ -9,7 +9,6 @@ * manipulate wakelocks on Android. */ -#include #include #include #include @@ -189,9 +188,6 @@ int pm_wake_lock(const char *buf) size_t len; int ret = 0; - if (!capable(CAP_BLOCK_SUSPEND)) - return -EPERM; - while (*str && !isspace(*str)) str++; @@ -235,9 +231,6 @@ int pm_wake_unlock(const char *buf) size_t len; int ret = 0; - if (!capable(CAP_BLOCK_SUSPEND)) - return -EPERM; - len = strlen(buf); if (!len) return -EINVAL; diff --git a/trunk/kernel/printk.c b/trunk/kernel/printk.c index ac4bc9e79465..177fa49357a5 100644 --- a/trunk/kernel/printk.c +++ b/trunk/kernel/printk.c @@ -1192,6 +1192,21 @@ SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len) return do_syslog(type, buf, len, SYSLOG_FROM_CALL); } +#ifdef CONFIG_KGDB_KDB +/* kdb dmesg command needs access to the syslog buffer. do_syslog() + * uses locks so it cannot be used during debugging. Just tell kdb + * where the start and end of the physical and logical logs are. This + * is equivalent to do_syslog(3). + */ +void kdb_syslog_data(char *syslog_data[4]) +{ + syslog_data[0] = log_buf; + syslog_data[1] = log_buf + log_buf_len; + syslog_data[2] = log_buf + log_first_idx; + syslog_data[3] = log_buf + log_next_idx; +} +#endif /* CONFIG_KGDB_KDB */ + static bool __read_mostly ignore_loglevel; static int __init ignore_loglevel_setup(char *str) @@ -2510,7 +2525,7 @@ void kmsg_dump(enum kmsg_dump_reason reason) } /** - * kmsg_dump_get_line_nolock - retrieve one kmsg log line (unlocked version) + * kmsg_dump_get_line - retrieve one kmsg log line * @dumper: registered kmsg dumper * @syslog: include the "<4>" prefixes * @line: buffer to copy the line to @@ -2525,12 +2540,11 @@ void kmsg_dump(enum kmsg_dump_reason reason) * * A return value of FALSE indicates that there are no more records to * read. - * - * The function is similar to kmsg_dump_get_line(), but grabs no locks. */ -bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog, - char *line, size_t size, size_t *len) +bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog, + char *line, size_t size, size_t *len) { + unsigned long flags; struct log *msg; size_t l = 0; bool ret = false; @@ -2538,6 +2552,7 @@ bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog, if (!dumper->active) goto out; + raw_spin_lock_irqsave(&logbuf_lock, flags); if (dumper->cur_seq < log_first_seq) { /* messages are gone, move to first available one */ dumper->cur_seq = log_first_seq; @@ -2545,8 +2560,10 @@ bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog, } /* last entry */ - if (dumper->cur_seq >= log_next_seq) + if (dumper->cur_seq >= log_next_seq) { + raw_spin_unlock_irqrestore(&logbuf_lock, flags); goto out; + } msg = log_from_idx(dumper->cur_idx); l = msg_print_text(msg, 0, syslog, line, size); @@ -2554,41 +2571,12 @@ bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog, dumper->cur_idx = log_next(dumper->cur_idx); dumper->cur_seq++; ret = true; + raw_spin_unlock_irqrestore(&logbuf_lock, flags); out: if (len) *len = l; return ret; } - -/** - * kmsg_dump_get_line - retrieve one kmsg log line - * @dumper: registered kmsg dumper - * @syslog: include the "<4>" prefixes - * @line: buffer to copy the line to - * @size: maximum size of the buffer - * @len: length of line placed into buffer - * - * Start at the beginning of the kmsg buffer, with the oldest kmsg - * record, and copy one record into the provided buffer. - * - * Consecutive calls will return the next available record moving - * towards the end of the buffer with the youngest messages. - * - * A return value of FALSE indicates that there are no more records to - * read. - */ -bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog, - char *line, size_t size, size_t *len) -{ - unsigned long flags; - bool ret; - - raw_spin_lock_irqsave(&logbuf_lock, flags); - ret = kmsg_dump_get_line_nolock(dumper, syslog, line, size, len); - raw_spin_unlock_irqrestore(&logbuf_lock, flags); - - return ret; -} EXPORT_SYMBOL_GPL(kmsg_dump_get_line); /** @@ -2690,24 +2678,6 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog, } EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer); -/** - * kmsg_dump_rewind_nolock - reset the interator (unlocked version) - * @dumper: registered kmsg dumper - * - * Reset the dumper's iterator so that kmsg_dump_get_line() and - * kmsg_dump_get_buffer() can be called again and used multiple - * times within the same dumper.dump() callback. - * - * The function is similar to kmsg_dump_rewind(), but grabs no locks. - */ -void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper) -{ - dumper->cur_seq = clear_seq; - dumper->cur_idx = clear_idx; - dumper->next_seq = log_next_seq; - dumper->next_idx = log_next_idx; -} - /** * kmsg_dump_rewind - reset the interator * @dumper: registered kmsg dumper @@ -2721,7 +2691,10 @@ void kmsg_dump_rewind(struct kmsg_dumper *dumper) unsigned long flags; raw_spin_lock_irqsave(&logbuf_lock, flags); - kmsg_dump_rewind_nolock(dumper); + dumper->cur_seq = clear_seq; + dumper->cur_idx = clear_idx; + dumper->next_seq = log_next_seq; + dumper->next_idx = log_next_idx; raw_spin_unlock_irqrestore(&logbuf_lock, flags); } EXPORT_SYMBOL_GPL(kmsg_dump_rewind); diff --git a/trunk/kernel/rcupdate.c b/trunk/kernel/rcupdate.c index 4e6a61b15e86..95cba41ce1e9 100644 --- a/trunk/kernel/rcupdate.c +++ b/trunk/kernel/rcupdate.c @@ -53,50 +53,6 @@ #ifdef CONFIG_PREEMPT_RCU -/* - * Preemptible RCU implementation for rcu_read_lock(). - * Just increment ->rcu_read_lock_nesting, shared state will be updated - * if we block. - */ -void __rcu_read_lock(void) -{ - current->rcu_read_lock_nesting++; - barrier(); /* critical section after entry code. */ -} -EXPORT_SYMBOL_GPL(__rcu_read_lock); - -/* - * Preemptible RCU implementation for rcu_read_unlock(). - * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost - * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then - * invoke rcu_read_unlock_special() to clean up after a context switch - * in an RCU read-side critical section and other special cases. - */ -void __rcu_read_unlock(void) -{ - struct task_struct *t = current; - - if (t->rcu_read_lock_nesting != 1) { - --t->rcu_read_lock_nesting; - } else { - barrier(); /* critical section before exit code. */ - t->rcu_read_lock_nesting = INT_MIN; - barrier(); /* assign before ->rcu_read_unlock_special load */ - if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) - rcu_read_unlock_special(t); - barrier(); /* ->rcu_read_unlock_special load before assign */ - t->rcu_read_lock_nesting = 0; - } -#ifdef CONFIG_PROVE_LOCKING - { - int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting); - - WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2); - } -#endif /* #ifdef CONFIG_PROVE_LOCKING */ -} -EXPORT_SYMBOL_GPL(__rcu_read_unlock); - /* * Check for a task exiting while in a preemptible-RCU read-side * critical section, clean up if so. No need to issue warnings, diff --git a/trunk/kernel/rcutiny.c b/trunk/kernel/rcutiny.c index 547b1fe5b052..37a5444204d2 100644 --- a/trunk/kernel/rcutiny.c +++ b/trunk/kernel/rcutiny.c @@ -172,7 +172,7 @@ void rcu_irq_enter(void) local_irq_restore(flags); } -#ifdef CONFIG_DEBUG_LOCK_ALLOC +#ifdef CONFIG_PROVE_RCU /* * Test whether RCU thinks that the current CPU is idle. @@ -183,7 +183,7 @@ int rcu_is_cpu_idle(void) } EXPORT_SYMBOL(rcu_is_cpu_idle); -#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ +#endif /* #ifdef CONFIG_PROVE_RCU */ /* * Test whether the current CPU was interrupted from idle. Nested diff --git a/trunk/kernel/rcutiny_plugin.h b/trunk/kernel/rcutiny_plugin.h index 918fd1e8509c..fc31a2d65100 100644 --- a/trunk/kernel/rcutiny_plugin.h +++ b/trunk/kernel/rcutiny_plugin.h @@ -132,6 +132,7 @@ static struct rcu_preempt_ctrlblk rcu_preempt_ctrlblk = { RCU_TRACE(.rcb.name = "rcu_preempt") }; +static void rcu_read_unlock_special(struct task_struct *t); static int rcu_preempted_readers_exp(void); static void rcu_report_exp_done(void); @@ -350,9 +351,8 @@ static int rcu_initiate_boost(void) rcu_preempt_ctrlblk.boost_tasks = rcu_preempt_ctrlblk.gp_tasks; invoke_rcu_callbacks(); - } else { + } else RCU_TRACE(rcu_initiate_boost_trace()); - } return 1; } @@ -526,12 +526,24 @@ void rcu_preempt_note_context_switch(void) local_irq_restore(flags); } +/* + * Tiny-preemptible RCU implementation for rcu_read_lock(). + * Just increment ->rcu_read_lock_nesting, shared state will be updated + * if we block. + */ +void __rcu_read_lock(void) +{ + current->rcu_read_lock_nesting++; + barrier(); /* needed if we ever invoke rcu_read_lock in rcutiny.c */ +} +EXPORT_SYMBOL_GPL(__rcu_read_lock); + /* * Handle special cases during rcu_read_unlock(), such as needing to * notify RCU core processing or task having blocked during the RCU * read-side critical section. */ -void rcu_read_unlock_special(struct task_struct *t) +static noinline void rcu_read_unlock_special(struct task_struct *t) { int empty; int empty_exp; @@ -614,6 +626,38 @@ void rcu_read_unlock_special(struct task_struct *t) local_irq_restore(flags); } +/* + * Tiny-preemptible RCU implementation for rcu_read_unlock(). + * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost + * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then + * invoke rcu_read_unlock_special() to clean up after a context switch + * in an RCU read-side critical section and other special cases. + */ +void __rcu_read_unlock(void) +{ + struct task_struct *t = current; + + barrier(); /* needed if we ever invoke rcu_read_unlock in rcutiny.c */ + if (t->rcu_read_lock_nesting != 1) + --t->rcu_read_lock_nesting; + else { + t->rcu_read_lock_nesting = INT_MIN; + barrier(); /* assign before ->rcu_read_unlock_special load */ + if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) + rcu_read_unlock_special(t); + barrier(); /* ->rcu_read_unlock_special load before assign */ + t->rcu_read_lock_nesting = 0; + } +#ifdef CONFIG_PROVE_LOCKING + { + int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting); + + WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2); + } +#endif /* #ifdef CONFIG_PROVE_LOCKING */ +} +EXPORT_SYMBOL_GPL(__rcu_read_unlock); + /* * Check for a quiescent state from the current CPU. When a task blocks, * the task is recorded in the rcu_preempt_ctrlblk structure, which is @@ -779,9 +823,9 @@ void synchronize_rcu_expedited(void) rpcp->exp_tasks = NULL; /* Wait for tail of ->blkd_tasks list to drain. */ - if (!rcu_preempted_readers_exp()) { + if (!rcu_preempted_readers_exp()) local_irq_restore(flags); - } else { + else { rcu_initiate_boost(); local_irq_restore(flags); wait_event(sync_rcu_preempt_exp_wq, @@ -802,6 +846,8 @@ EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); */ int rcu_preempt_needs_cpu(void) { + if (!rcu_preempt_running_reader()) + rcu_preempt_cpu_qs(); return rcu_preempt_ctrlblk.rcb.rcucblist != NULL; } diff --git a/trunk/kernel/rcutorture.c b/trunk/kernel/rcutorture.c index 25b15033c61f..e66b34ab7555 100644 --- a/trunk/kernel/rcutorture.c +++ b/trunk/kernel/rcutorture.c @@ -49,7 +49,8 @@ #include MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Paul E. McKenney and Josh Triplett "); +MODULE_AUTHOR("Paul E. McKenney and " + "Josh Triplett "); static int nreaders = -1; /* # reader threads, defaults to 2*ncpus */ static int nfakewriters = 4; /* # fake writer threads */ @@ -205,7 +206,6 @@ static unsigned long boost_starttime; /* jiffies of next boost test start. */ DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */ /* and boost task create/destroy. */ static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */ -static bool barrier_phase; /* Test phase. */ static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */ static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */ static DECLARE_WAIT_QUEUE_HEAD(barrier_wq); @@ -407,9 +407,8 @@ rcu_torture_cb(struct rcu_head *p) if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) { rp->rtort_mbtest = 0; rcu_torture_free(rp); - } else { + } else cur_ops->deferred_free(rp); - } } static int rcu_no_completed(void) @@ -636,17 +635,6 @@ static void srcu_torture_synchronize(void) synchronize_srcu(&srcu_ctl); } -static void srcu_torture_call(struct rcu_head *head, - void (*func)(struct rcu_head *head)) -{ - call_srcu(&srcu_ctl, head, func); -} - -static void srcu_torture_barrier(void) -{ - srcu_barrier(&srcu_ctl); -} - static int srcu_torture_stats(char *page) { int cnt = 0; @@ -673,8 +661,8 @@ static struct rcu_torture_ops srcu_ops = { .completed = srcu_torture_completed, .deferred_free = srcu_torture_deferred_free, .sync = srcu_torture_synchronize, - .call = srcu_torture_call, - .cb_barrier = srcu_torture_barrier, + .call = NULL, + .cb_barrier = NULL, .stats = srcu_torture_stats, .name = "srcu" }; @@ -1025,11 +1013,7 @@ rcu_torture_fakewriter(void *arg) do { schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10); udelay(rcu_random(&rand) & 0x3ff); - if (cur_ops->cb_barrier != NULL && - rcu_random(&rand) % (nfakewriters * 8) == 0) - cur_ops->cb_barrier(); - else - cur_ops->sync(); + cur_ops->sync(); rcu_stutter_wait("rcu_torture_fakewriter"); } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP); @@ -1199,27 +1183,27 @@ rcu_torture_printk(char *page) } cnt += sprintf(&page[cnt], "%s%s ", torture_type, TORTURE_FLAG); cnt += sprintf(&page[cnt], - "rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ", + "rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d " + "rtmbe: %d rtbke: %ld rtbre: %ld " + "rtbf: %ld rtb: %ld nt: %ld " + "onoff: %ld/%ld:%ld/%ld " + "barrier: %ld/%ld:%ld", rcu_torture_current, rcu_torture_current_version, list_empty(&rcu_torture_freelist), atomic_read(&n_rcu_torture_alloc), atomic_read(&n_rcu_torture_alloc_fail), - atomic_read(&n_rcu_torture_free)); - cnt += sprintf(&page[cnt], "rtmbe: %d rtbke: %ld rtbre: %ld ", + atomic_read(&n_rcu_torture_free), atomic_read(&n_rcu_torture_mberror), n_rcu_torture_boost_ktrerror, - n_rcu_torture_boost_rterror); - cnt += sprintf(&page[cnt], "rtbf: %ld rtb: %ld nt: %ld ", + n_rcu_torture_boost_rterror, n_rcu_torture_boost_failure, n_rcu_torture_boosts, - n_rcu_torture_timers); - cnt += sprintf(&page[cnt], "onoff: %ld/%ld:%ld/%ld ", + n_rcu_torture_timers, n_online_successes, n_online_attempts, n_offline_successes, - n_offline_attempts); - cnt += sprintf(&page[cnt], "barrier: %ld/%ld:%ld", + n_offline_attempts, n_barrier_successes, n_barrier_attempts, n_rcu_torture_barrier_error); @@ -1461,7 +1445,8 @@ rcu_torture_shutdown(void *arg) delta = shutdown_time - jiffies_snap; if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG - "rcu_torture_shutdown task: %lu jiffies remaining\n", + "rcu_torture_shutdown task: %lu " + "jiffies remaining\n", torture_type, delta); schedule_timeout_interruptible(delta); jiffies_snap = ACCESS_ONCE(jiffies); @@ -1513,7 +1498,8 @@ rcu_torture_onoff(void *arg) if (cpu_down(cpu) == 0) { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG - "rcu_torture_onoff task: offlined %d\n", + "rcu_torture_onoff task: " + "offlined %d\n", torture_type, cpu); n_offline_successes++; } @@ -1526,7 +1512,8 @@ rcu_torture_onoff(void *arg) if (cpu_up(cpu) == 0) { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG - "rcu_torture_onoff task: onlined %d\n", + "rcu_torture_onoff task: " + "onlined %d\n", torture_type, cpu); n_online_successes++; } @@ -1644,7 +1631,6 @@ void rcu_torture_barrier_cbf(struct rcu_head *rcu) static int rcu_torture_barrier_cbs(void *arg) { long myid = (long)arg; - bool lastphase = 0; struct rcu_head rcu; init_rcu_head_on_stack(&rcu); @@ -1652,11 +1638,9 @@ static int rcu_torture_barrier_cbs(void *arg) set_user_nice(current, 19); do { wait_event(barrier_cbs_wq[myid], - barrier_phase != lastphase || + atomic_read(&barrier_cbs_count) == n_barrier_cbs || kthread_should_stop() || fullstop != FULLSTOP_DONTSTOP); - lastphase = barrier_phase; - smp_mb(); /* ensure barrier_phase load before ->call(). */ if (kthread_should_stop() || fullstop != FULLSTOP_DONTSTOP) break; cur_ops->call(&rcu, rcu_torture_barrier_cbf); @@ -1681,8 +1665,7 @@ static int rcu_torture_barrier(void *arg) do { atomic_set(&barrier_cbs_invoked, 0); atomic_set(&barrier_cbs_count, n_barrier_cbs); - smp_mb(); /* Ensure barrier_phase after prior assignments. */ - barrier_phase = !barrier_phase; + /* wake_up() path contains the required barriers. */ for (i = 0; i < n_barrier_cbs; i++) wake_up(&barrier_cbs_wq[i]); wait_event(barrier_wq, @@ -1701,7 +1684,7 @@ static int rcu_torture_barrier(void *arg) schedule_timeout_interruptible(HZ / 10); } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP); VERBOSE_PRINTK_STRING("rcu_torture_barrier task stopping"); - rcutorture_shutdown_absorb("rcu_torture_barrier"); + rcutorture_shutdown_absorb("rcu_torture_barrier_cbs"); while (!kthread_should_stop()) schedule_timeout_interruptible(1); return 0; @@ -1925,8 +1908,8 @@ rcu_torture_init(void) static struct rcu_torture_ops *torture_ops[] = { &rcu_ops, &rcu_sync_ops, &rcu_expedited_ops, &rcu_bh_ops, &rcu_bh_sync_ops, &rcu_bh_expedited_ops, - &srcu_ops, &srcu_sync_ops, &srcu_expedited_ops, - &srcu_raw_ops, &srcu_raw_sync_ops, + &srcu_ops, &srcu_sync_ops, &srcu_raw_ops, + &srcu_raw_sync_ops, &srcu_expedited_ops, &sched_ops, &sched_sync_ops, &sched_expedited_ops, }; mutex_lock(&fullstop_mutex); @@ -1948,7 +1931,8 @@ rcu_torture_init(void) return -EINVAL; } if (cur_ops->fqs == NULL && fqs_duration != 0) { - printk(KERN_ALERT "rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n"); + printk(KERN_ALERT "rcu-torture: ->fqs NULL and non-zero " + "fqs_duration, fqs disabled.\n"); fqs_duration = 0; } if (cur_ops->init) diff --git a/trunk/kernel/rcutree.c b/trunk/kernel/rcutree.c index f280e542e3e9..4b97bba7396e 100644 --- a/trunk/kernel/rcutree.c +++ b/trunk/kernel/rcutree.c @@ -60,44 +60,36 @@ /* Data structures. */ -static struct lock_class_key rcu_node_class[RCU_NUM_LVLS]; - -#define RCU_STATE_INITIALIZER(sname, cr) { \ - .level = { &sname##_state.node[0] }, \ - .call = cr, \ +static struct lock_class_key rcu_node_class[NUM_RCU_LVLS]; + +#define RCU_STATE_INITIALIZER(structname) { \ + .level = { &structname##_state.node[0] }, \ + .levelcnt = { \ + NUM_RCU_LVL_0, /* root of hierarchy. */ \ + NUM_RCU_LVL_1, \ + NUM_RCU_LVL_2, \ + NUM_RCU_LVL_3, \ + NUM_RCU_LVL_4, /* == MAX_RCU_LVLS */ \ + }, \ .fqs_state = RCU_GP_IDLE, \ .gpnum = -300, \ .completed = -300, \ - .onofflock = __RAW_SPIN_LOCK_UNLOCKED(&sname##_state.onofflock), \ - .orphan_nxttail = &sname##_state.orphan_nxtlist, \ - .orphan_donetail = &sname##_state.orphan_donelist, \ - .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \ - .fqslock = __RAW_SPIN_LOCK_UNLOCKED(&sname##_state.fqslock), \ - .name = #sname, \ + .onofflock = __RAW_SPIN_LOCK_UNLOCKED(&structname##_state.onofflock), \ + .orphan_nxttail = &structname##_state.orphan_nxtlist, \ + .orphan_donetail = &structname##_state.orphan_donelist, \ + .fqslock = __RAW_SPIN_LOCK_UNLOCKED(&structname##_state.fqslock), \ + .n_force_qs = 0, \ + .n_force_qs_ngp = 0, \ + .name = #structname, \ } -struct rcu_state rcu_sched_state = - RCU_STATE_INITIALIZER(rcu_sched, call_rcu_sched); +struct rcu_state rcu_sched_state = RCU_STATE_INITIALIZER(rcu_sched); DEFINE_PER_CPU(struct rcu_data, rcu_sched_data); -struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh, call_rcu_bh); +struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh); DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); static struct rcu_state *rcu_state; -LIST_HEAD(rcu_struct_flavors); - -/* Increase (but not decrease) the CONFIG_RCU_FANOUT_LEAF at boot time. */ -static int rcu_fanout_leaf = CONFIG_RCU_FANOUT_LEAF; -module_param(rcu_fanout_leaf, int, 0); -int rcu_num_lvls __read_mostly = RCU_NUM_LVLS; -static int num_rcu_lvl[] = { /* Number of rcu_nodes at specified level. */ - NUM_RCU_LVL_0, - NUM_RCU_LVL_1, - NUM_RCU_LVL_2, - NUM_RCU_LVL_3, - NUM_RCU_LVL_4, -}; -int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */ /* * The rcu_scheduler_active variable transitions from zero to one just @@ -155,6 +147,13 @@ static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp); unsigned long rcutorture_testseq; unsigned long rcutorture_vernum; +/* State information for rcu_barrier() and friends. */ + +static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL}; +static atomic_t rcu_barrier_cpu_count; +static DEFINE_MUTEX(rcu_barrier_mutex); +static struct completion rcu_barrier_completion; + /* * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s * permit this function to be invoked without holding the root rcu_node @@ -359,7 +358,7 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval) struct task_struct *idle = idle_task(smp_processor_id()); trace_rcu_dyntick("Error on entry: not idle task", oldval, 0); - ftrace_dump(DUMP_ORIG); + ftrace_dump(DUMP_ALL); WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", current->pid, current->comm, idle->pid, idle->comm); /* must be idle task! */ @@ -469,7 +468,7 @@ static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval) trace_rcu_dyntick("Error on exit: not idle task", oldval, rdtp->dynticks_nesting); - ftrace_dump(DUMP_ORIG); + ftrace_dump(DUMP_ALL); WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", current->pid, current->comm, idle->pid, idle->comm); /* must be idle task! */ @@ -586,6 +585,8 @@ void rcu_nmi_exit(void) WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1); } +#ifdef CONFIG_PROVE_RCU + /** * rcu_is_cpu_idle - see if RCU thinks that the current CPU is idle * @@ -603,7 +604,7 @@ int rcu_is_cpu_idle(void) } EXPORT_SYMBOL(rcu_is_cpu_idle); -#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) +#ifdef CONFIG_HOTPLUG_CPU /* * Is the current CPU online? Disable preemption to avoid false positives @@ -644,7 +645,9 @@ bool rcu_lockdep_current_cpu_online(void) } EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online); -#endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */ +#endif /* #ifdef CONFIG_HOTPLUG_CPU */ + +#endif /* #ifdef CONFIG_PROVE_RCU */ /** * rcu_is_cpu_rrupt_from_idle - see if idle or immediately interrupted from idle @@ -730,7 +733,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp) int cpu; long delta; unsigned long flags; - int ndetected = 0; + int ndetected; struct rcu_node *rnp = rcu_get_root(rsp); /* Only let one CPU complain about others per time interval. */ @@ -771,7 +774,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp) */ rnp = rcu_get_root(rsp); raw_spin_lock_irqsave(&rnp->lock, flags); - ndetected += rcu_print_task_stall(rnp); + ndetected = rcu_print_task_stall(rnp); raw_spin_unlock_irqrestore(&rnp->lock, flags); print_cpu_stall_info_end(); @@ -857,10 +860,9 @@ static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr) */ void rcu_cpu_stall_reset(void) { - struct rcu_state *rsp; - - for_each_rcu_flavor(rsp) - rsp->jiffies_stall = jiffies + ULONG_MAX / 2; + rcu_sched_state.jiffies_stall = jiffies + ULONG_MAX / 2; + rcu_bh_state.jiffies_stall = jiffies + ULONG_MAX / 2; + rcu_preempt_stall_reset(); } static struct notifier_block rcu_panic_block = { @@ -892,9 +894,8 @@ static void __note_new_gpnum(struct rcu_state *rsp, struct rcu_node *rnp, struct if (rnp->qsmask & rdp->grpmask) { rdp->qs_pending = 1; rdp->passed_quiesce = 0; - } else { + } else rdp->qs_pending = 0; - } zero_cpu_stall_ticks(rdp); } } @@ -935,18 +936,6 @@ check_for_new_grace_period(struct rcu_state *rsp, struct rcu_data *rdp) return ret; } -/* - * Initialize the specified rcu_data structure's callback list to empty. - */ -static void init_callback_list(struct rcu_data *rdp) -{ - int i; - - rdp->nxtlist = NULL; - for (i = 0; i < RCU_NEXT_SIZE; i++) - rdp->nxttail[i] = &rdp->nxtlist; -} - /* * Advance this CPU's callbacks, but only if the current grace period * has ended. This may be called only from the CPU to whom the rdp @@ -1339,6 +1328,8 @@ static void rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp) { + int i; + /* * Orphan the callbacks. First adjust the counts. This is safe * because ->onofflock excludes _rcu_barrier()'s adoption of @@ -1349,7 +1340,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp, rsp->qlen += rdp->qlen; rdp->n_cbs_orphaned += rdp->qlen; rdp->qlen_lazy = 0; - ACCESS_ONCE(rdp->qlen) = 0; + rdp->qlen = 0; } /* @@ -1378,7 +1369,9 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp, } /* Finally, initialize the rcu_data structure's list to empty. */ - init_callback_list(rdp); + rdp->nxtlist = NULL; + for (i = 0; i < RCU_NEXT_SIZE; i++) + rdp->nxttail[i] = &rdp->nxtlist; } /* @@ -1512,9 +1505,6 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp) raw_spin_unlock_irqrestore(&rnp->lock, flags); if (need_report & RCU_OFL_TASKS_EXP_GP) rcu_report_exp_rnp(rsp, rnp, true); - WARN_ONCE(rdp->qlen != 0 || rdp->nxtlist != NULL, - "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, nxtlist=%p\n", - cpu, rdp->qlen, rdp->nxtlist); } #else /* #ifdef CONFIG_HOTPLUG_CPU */ @@ -1602,7 +1592,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) } smp_mb(); /* List handling before counting for rcu_barrier(). */ rdp->qlen_lazy -= count_lazy; - ACCESS_ONCE(rdp->qlen) -= count; + rdp->qlen -= count; rdp->n_cbs_invoked += count; /* Reinstate batch limit if we have worked down the excess. */ @@ -1615,7 +1605,6 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) rdp->n_force_qs_snap = rsp->n_force_qs; } else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark) rdp->qlen_last_fqs_check = rdp->qlen; - WARN_ON_ONCE((rdp->nxtlist == NULL) != (rdp->qlen == 0)); local_irq_restore(flags); @@ -1756,6 +1745,8 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) break; /* grace period idle or initializing, ignore. */ case RCU_SAVE_DYNTICK: + if (RCU_SIGNAL_INIT != RCU_SAVE_DYNTICK) + break; /* So gcc recognizes the dead code. */ raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ @@ -1797,10 +1788,9 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) * whom the rdp belongs. */ static void -__rcu_process_callbacks(struct rcu_state *rsp) +__rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) { unsigned long flags; - struct rcu_data *rdp = __this_cpu_ptr(rsp->rda); WARN_ON_ONCE(rdp->beenonline == 0); @@ -1836,11 +1826,11 @@ __rcu_process_callbacks(struct rcu_state *rsp) */ static void rcu_process_callbacks(struct softirq_action *unused) { - struct rcu_state *rsp; - trace_rcu_utilization("Start RCU core"); - for_each_rcu_flavor(rsp) - __rcu_process_callbacks(rsp); + __rcu_process_callbacks(&rcu_sched_state, + &__get_cpu_var(rcu_sched_data)); + __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data)); + rcu_preempt_process_callbacks(); trace_rcu_utilization("End RCU core"); } @@ -1867,56 +1857,6 @@ static void invoke_rcu_core(void) raise_softirq(RCU_SOFTIRQ); } -/* - * Handle any core-RCU processing required by a call_rcu() invocation. - */ -static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp, - struct rcu_head *head, unsigned long flags) -{ - /* - * If called from an extended quiescent state, invoke the RCU - * core in order to force a re-evaluation of RCU's idleness. - */ - if (rcu_is_cpu_idle() && cpu_online(smp_processor_id())) - invoke_rcu_core(); - - /* If interrupts were disabled or CPU offline, don't invoke RCU core. */ - if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id())) - return; - - /* - * Force the grace period if too many callbacks or too long waiting. - * Enforce hysteresis, and don't invoke force_quiescent_state() - * if some other CPU has recently done so. Also, don't bother - * invoking force_quiescent_state() if the newly enqueued callback - * is the only one waiting for a grace period to complete. - */ - if (unlikely(rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) { - - /* Are we ignoring a completed grace period? */ - rcu_process_gp_end(rsp, rdp); - check_for_new_grace_period(rsp, rdp); - - /* Start a new grace period if one not already started. */ - if (!rcu_gp_in_progress(rsp)) { - unsigned long nestflag; - struct rcu_node *rnp_root = rcu_get_root(rsp); - - raw_spin_lock_irqsave(&rnp_root->lock, nestflag); - rcu_start_gp(rsp, nestflag); /* rlses rnp_root->lock */ - } else { - /* Give the grace period a kick. */ - rdp->blimit = LONG_MAX; - if (rsp->n_force_qs == rdp->n_force_qs_snap && - *rdp->nxttail[RCU_DONE_TAIL] != head) - force_quiescent_state(rsp, 0); - rdp->n_force_qs_snap = rsp->n_force_qs; - rdp->qlen_last_fqs_check = rdp->qlen; - } - } else if (ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies)) - force_quiescent_state(rsp, 1); -} - static void __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), struct rcu_state *rsp, bool lazy) @@ -1941,7 +1881,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), rdp = this_cpu_ptr(rsp->rda); /* Add the callback to our list. */ - ACCESS_ONCE(rdp->qlen)++; + rdp->qlen++; if (lazy) rdp->qlen_lazy++; else @@ -1956,8 +1896,43 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), else trace_rcu_callback(rsp->name, head, rdp->qlen_lazy, rdp->qlen); - /* Go handle any RCU core processing required. */ - __call_rcu_core(rsp, rdp, head, flags); + /* If interrupts were disabled, don't dive into RCU core. */ + if (irqs_disabled_flags(flags)) { + local_irq_restore(flags); + return; + } + + /* + * Force the grace period if too many callbacks or too long waiting. + * Enforce hysteresis, and don't invoke force_quiescent_state() + * if some other CPU has recently done so. Also, don't bother + * invoking force_quiescent_state() if the newly enqueued callback + * is the only one waiting for a grace period to complete. + */ + if (unlikely(rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) { + + /* Are we ignoring a completed grace period? */ + rcu_process_gp_end(rsp, rdp); + check_for_new_grace_period(rsp, rdp); + + /* Start a new grace period if one not already started. */ + if (!rcu_gp_in_progress(rsp)) { + unsigned long nestflag; + struct rcu_node *rnp_root = rcu_get_root(rsp); + + raw_spin_lock_irqsave(&rnp_root->lock, nestflag); + rcu_start_gp(rsp, nestflag); /* rlses rnp_root->lock */ + } else { + /* Give the grace period a kick. */ + rdp->blimit = LONG_MAX; + if (rsp->n_force_qs == rdp->n_force_qs_snap && + *rdp->nxttail[RCU_DONE_TAIL] != head) + force_quiescent_state(rsp, 0); + rdp->n_force_qs_snap = rsp->n_force_qs; + rdp->qlen_last_fqs_check = rdp->qlen; + } + } else if (ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies)) + force_quiescent_state(rsp, 1); local_irq_restore(flags); } @@ -1987,16 +1962,28 @@ EXPORT_SYMBOL_GPL(call_rcu_bh); * occasionally incorrectly indicate that there are multiple CPUs online * when there was in fact only one the whole time, as this just adds * some overhead: RCU still operates correctly. + * + * Of course, sampling num_online_cpus() with preemption enabled can + * give erroneous results if there are concurrent CPU-hotplug operations. + * For example, given a demonic sequence of preemptions in num_online_cpus() + * and CPU-hotplug operations, there could be two or more CPUs online at + * all times, but num_online_cpus() might well return one (or even zero). + * + * However, all such demonic sequences require at least one CPU-offline + * operation. Furthermore, rcu_blocking_is_gp() giving the wrong answer + * is only a problem if there is an RCU read-side critical section executing + * throughout. But RCU-sched and RCU-bh read-side critical sections + * disable either preemption or bh, which prevents a CPU from going offline. + * Therefore, the only way that rcu_blocking_is_gp() can incorrectly return + * that there is only one CPU when in fact there was more than one throughout + * is when there were no RCU readers in the system. If there are no + * RCU readers, the grace period by definition can be of zero length, + * regardless of the number of online CPUs. */ static inline int rcu_blocking_is_gp(void) { - int ret; - might_sleep(); /* Check for RCU read-side critical section. */ - preempt_disable(); - ret = num_online_cpus() <= 1; - preempt_enable(); - return ret; + return num_online_cpus() <= 1; } /** @@ -2131,9 +2118,9 @@ void synchronize_sched_expedited(void) put_online_cpus(); /* No joy, try again later. Or just synchronize_sched(). */ - if (trycount++ < 10) { + if (trycount++ < 10) udelay(trycount * num_online_cpus()); - } else { + else { synchronize_sched(); return; } @@ -2254,12 +2241,9 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) */ static int rcu_pending(int cpu) { - struct rcu_state *rsp; - - for_each_rcu_flavor(rsp) - if (__rcu_pending(rsp, per_cpu_ptr(rsp->rda, cpu))) - return 1; - return 0; + return __rcu_pending(&rcu_sched_state, &per_cpu(rcu_sched_data, cpu)) || + __rcu_pending(&rcu_bh_state, &per_cpu(rcu_bh_data, cpu)) || + rcu_preempt_pending(cpu); } /* @@ -2269,41 +2253,20 @@ static int rcu_pending(int cpu) */ static int rcu_cpu_has_callbacks(int cpu) { - struct rcu_state *rsp; - /* RCU callbacks either ready or pending? */ - for_each_rcu_flavor(rsp) - if (per_cpu_ptr(rsp->rda, cpu)->nxtlist) - return 1; - return 0; -} - -/* - * Helper function for _rcu_barrier() tracing. If tracing is disabled, - * the compiler is expected to optimize this away. - */ -static void _rcu_barrier_trace(struct rcu_state *rsp, char *s, - int cpu, unsigned long done) -{ - trace_rcu_barrier(rsp->name, s, cpu, - atomic_read(&rsp->barrier_cpu_count), done); + return per_cpu(rcu_sched_data, cpu).nxtlist || + per_cpu(rcu_bh_data, cpu).nxtlist || + rcu_preempt_cpu_has_callbacks(cpu); } /* * RCU callback function for _rcu_barrier(). If we are last, wake * up the task executing _rcu_barrier(). */ -static void rcu_barrier_callback(struct rcu_head *rhp) +static void rcu_barrier_callback(struct rcu_head *notused) { - struct rcu_data *rdp = container_of(rhp, struct rcu_data, barrier_head); - struct rcu_state *rsp = rdp->rsp; - - if (atomic_dec_and_test(&rsp->barrier_cpu_count)) { - _rcu_barrier_trace(rsp, "LastCB", -1, rsp->n_barrier_done); - complete(&rsp->barrier_completion); - } else { - _rcu_barrier_trace(rsp, "CB", -1, rsp->n_barrier_done); - } + if (atomic_dec_and_test(&rcu_barrier_cpu_count)) + complete(&rcu_barrier_completion); } /* @@ -2311,63 +2274,35 @@ static void rcu_barrier_callback(struct rcu_head *rhp) */ static void rcu_barrier_func(void *type) { - struct rcu_state *rsp = type; - struct rcu_data *rdp = __this_cpu_ptr(rsp->rda); + int cpu = smp_processor_id(); + struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu); + void (*call_rcu_func)(struct rcu_head *head, + void (*func)(struct rcu_head *head)); - _rcu_barrier_trace(rsp, "IRQ", -1, rsp->n_barrier_done); - atomic_inc(&rsp->barrier_cpu_count); - rsp->call(&rdp->barrier_head, rcu_barrier_callback); + atomic_inc(&rcu_barrier_cpu_count); + call_rcu_func = type; + call_rcu_func(head, rcu_barrier_callback); } /* * Orchestrate the specified type of RCU barrier, waiting for all * RCU callbacks of the specified type to complete. */ -static void _rcu_barrier(struct rcu_state *rsp) +static void _rcu_barrier(struct rcu_state *rsp, + void (*call_rcu_func)(struct rcu_head *head, + void (*func)(struct rcu_head *head))) { int cpu; unsigned long flags; struct rcu_data *rdp; - struct rcu_data rd; - unsigned long snap = ACCESS_ONCE(rsp->n_barrier_done); - unsigned long snap_done; + struct rcu_head rh; - init_rcu_head_on_stack(&rd.barrier_head); - _rcu_barrier_trace(rsp, "Begin", -1, snap); + init_rcu_head_on_stack(&rh); /* Take mutex to serialize concurrent rcu_barrier() requests. */ - mutex_lock(&rsp->barrier_mutex); - - /* - * Ensure that all prior references, including to ->n_barrier_done, - * are ordered before the _rcu_barrier() machinery. - */ - smp_mb(); /* See above block comment. */ - - /* - * Recheck ->n_barrier_done to see if others did our work for us. - * This means checking ->n_barrier_done for an even-to-odd-to-even - * transition. The "if" expression below therefore rounds the old - * value up to the next even number and adds two before comparing. - */ - snap_done = ACCESS_ONCE(rsp->n_barrier_done); - _rcu_barrier_trace(rsp, "Check", -1, snap_done); - if (ULONG_CMP_GE(snap_done, ((snap + 1) & ~0x1) + 2)) { - _rcu_barrier_trace(rsp, "EarlyExit", -1, snap_done); - smp_mb(); /* caller's subsequent code after above check. */ - mutex_unlock(&rsp->barrier_mutex); - return; - } + mutex_lock(&rcu_barrier_mutex); - /* - * Increment ->n_barrier_done to avoid duplicate work. Use - * ACCESS_ONCE() to prevent the compiler from speculating - * the increment to precede the early-exit check. - */ - ACCESS_ONCE(rsp->n_barrier_done)++; - WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1); - _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done); - smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */ + smp_mb(); /* Prevent any prior operations from leaking in. */ /* * Initialize the count to one rather than to zero in order to @@ -2386,8 +2321,8 @@ static void _rcu_barrier(struct rcu_state *rsp) * 6. Both rcu_barrier_callback() callbacks are invoked, awakening * us -- but before CPU 1's orphaned callbacks are invoked!!! */ - init_completion(&rsp->barrier_completion); - atomic_set(&rsp->barrier_cpu_count, 1); + init_completion(&rcu_barrier_completion); + atomic_set(&rcu_barrier_cpu_count, 1); raw_spin_lock_irqsave(&rsp->onofflock, flags); rsp->rcu_barrier_in_progress = current; raw_spin_unlock_irqrestore(&rsp->onofflock, flags); @@ -2403,19 +2338,14 @@ static void _rcu_barrier(struct rcu_state *rsp) preempt_disable(); rdp = per_cpu_ptr(rsp->rda, cpu); if (cpu_is_offline(cpu)) { - _rcu_barrier_trace(rsp, "Offline", cpu, - rsp->n_barrier_done); preempt_enable(); while (cpu_is_offline(cpu) && ACCESS_ONCE(rdp->qlen)) schedule_timeout_interruptible(1); } else if (ACCESS_ONCE(rdp->qlen)) { - _rcu_barrier_trace(rsp, "OnlineQ", cpu, - rsp->n_barrier_done); - smp_call_function_single(cpu, rcu_barrier_func, rsp, 1); + smp_call_function_single(cpu, rcu_barrier_func, + (void *)call_rcu_func, 1); preempt_enable(); } else { - _rcu_barrier_trace(rsp, "OnlineNQ", cpu, - rsp->n_barrier_done); preempt_enable(); } } @@ -2432,32 +2362,24 @@ static void _rcu_barrier(struct rcu_state *rsp) rcu_adopt_orphan_cbs(rsp); rsp->rcu_barrier_in_progress = NULL; raw_spin_unlock_irqrestore(&rsp->onofflock, flags); - atomic_inc(&rsp->barrier_cpu_count); + atomic_inc(&rcu_barrier_cpu_count); smp_mb__after_atomic_inc(); /* Ensure atomic_inc() before callback. */ - rd.rsp = rsp; - rsp->call(&rd.barrier_head, rcu_barrier_callback); + call_rcu_func(&rh, rcu_barrier_callback); /* * Now that we have an rcu_barrier_callback() callback on each * CPU, and thus each counted, remove the initial count. */ - if (atomic_dec_and_test(&rsp->barrier_cpu_count)) - complete(&rsp->barrier_completion); - - /* Increment ->n_barrier_done to prevent duplicate work. */ - smp_mb(); /* Keep increment after above mechanism. */ - ACCESS_ONCE(rsp->n_barrier_done)++; - WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0); - _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done); - smp_mb(); /* Keep increment before caller's subsequent code. */ + if (atomic_dec_and_test(&rcu_barrier_cpu_count)) + complete(&rcu_barrier_completion); /* Wait for all rcu_barrier_callback() callbacks to be invoked. */ - wait_for_completion(&rsp->barrier_completion); + wait_for_completion(&rcu_barrier_completion); /* Other rcu_barrier() invocations can now safely proceed. */ - mutex_unlock(&rsp->barrier_mutex); + mutex_unlock(&rcu_barrier_mutex); - destroy_rcu_head_on_stack(&rd.barrier_head); + destroy_rcu_head_on_stack(&rh); } /** @@ -2465,7 +2387,7 @@ static void _rcu_barrier(struct rcu_state *rsp) */ void rcu_barrier_bh(void) { - _rcu_barrier(&rcu_bh_state); + _rcu_barrier(&rcu_bh_state, call_rcu_bh); } EXPORT_SYMBOL_GPL(rcu_barrier_bh); @@ -2474,7 +2396,7 @@ EXPORT_SYMBOL_GPL(rcu_barrier_bh); */ void rcu_barrier_sched(void) { - _rcu_barrier(&rcu_sched_state); + _rcu_barrier(&rcu_sched_state, call_rcu_sched); } EXPORT_SYMBOL_GPL(rcu_barrier_sched); @@ -2485,15 +2407,18 @@ static void __init rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) { unsigned long flags; + int i; struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); struct rcu_node *rnp = rcu_get_root(rsp); /* Set up local state, ensuring consistent view of global state. */ raw_spin_lock_irqsave(&rnp->lock, flags); rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo); - init_callback_list(rdp); + rdp->nxtlist = NULL; + for (i = 0; i < RCU_NEXT_SIZE; i++) + rdp->nxttail[i] = &rdp->nxtlist; rdp->qlen_lazy = 0; - ACCESS_ONCE(rdp->qlen) = 0; + rdp->qlen = 0; rdp->dynticks = &per_cpu(rcu_dynticks, cpu); WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE); WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1); @@ -2567,11 +2492,9 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible) static void __cpuinit rcu_prepare_cpu(int cpu) { - struct rcu_state *rsp; - - for_each_rcu_flavor(rsp) - rcu_init_percpu_data(cpu, rsp, - strcmp(rsp->name, "rcu_preempt") == 0); + rcu_init_percpu_data(cpu, &rcu_sched_state, 0); + rcu_init_percpu_data(cpu, &rcu_bh_state, 0); + rcu_preempt_init_percpu_data(cpu); } /* @@ -2583,7 +2506,6 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self, long cpu = (long)hcpu; struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); struct rcu_node *rnp = rdp->mynode; - struct rcu_state *rsp; trace_rcu_utilization("Start CPU hotplug"); switch (action) { @@ -2608,16 +2530,18 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self, * touch any data without introducing corruption. We send the * dying CPU's callbacks to an arbitrarily chosen online CPU. */ - for_each_rcu_flavor(rsp) - rcu_cleanup_dying_cpu(rsp); + rcu_cleanup_dying_cpu(&rcu_bh_state); + rcu_cleanup_dying_cpu(&rcu_sched_state); + rcu_preempt_cleanup_dying_cpu(); rcu_cleanup_after_idle(cpu); break; case CPU_DEAD: case CPU_DEAD_FROZEN: case CPU_UP_CANCELED: case CPU_UP_CANCELED_FROZEN: - for_each_rcu_flavor(rsp) - rcu_cleanup_dead_cpu(cpu, rsp); + rcu_cleanup_dead_cpu(cpu, &rcu_bh_state); + rcu_cleanup_dead_cpu(cpu, &rcu_sched_state); + rcu_preempt_cleanup_dead_cpu(cpu); break; default: break; @@ -2650,9 +2574,9 @@ static void __init rcu_init_levelspread(struct rcu_state *rsp) { int i; - for (i = rcu_num_lvls - 1; i > 0; i--) + for (i = NUM_RCU_LVLS - 1; i > 0; i--) rsp->levelspread[i] = CONFIG_RCU_FANOUT; - rsp->levelspread[0] = rcu_fanout_leaf; + rsp->levelspread[0] = CONFIG_RCU_FANOUT_LEAF; } #else /* #ifdef CONFIG_RCU_FANOUT_EXACT */ static void __init rcu_init_levelspread(struct rcu_state *rsp) @@ -2662,7 +2586,7 @@ static void __init rcu_init_levelspread(struct rcu_state *rsp) int i; cprv = NR_CPUS; - for (i = rcu_num_lvls - 1; i >= 0; i--) { + for (i = NUM_RCU_LVLS - 1; i >= 0; i--) { ccur = rsp->levelcnt[i]; rsp->levelspread[i] = (cprv + ccur - 1) / ccur; cprv = ccur; @@ -2689,15 +2613,13 @@ static void __init rcu_init_one(struct rcu_state *rsp, /* Initialize the level-tracking arrays. */ - for (i = 0; i < rcu_num_lvls; i++) - rsp->levelcnt[i] = num_rcu_lvl[i]; - for (i = 1; i < rcu_num_lvls; i++) + for (i = 1; i < NUM_RCU_LVLS; i++) rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1]; rcu_init_levelspread(rsp); /* Initialize the elements themselves, starting from the leaves. */ - for (i = rcu_num_lvls - 1; i >= 0; i--) { + for (i = NUM_RCU_LVLS - 1; i >= 0; i--) { cpustride *= rsp->levelspread[i]; rnp = rsp->level[i]; for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) { @@ -2727,74 +2649,13 @@ static void __init rcu_init_one(struct rcu_state *rsp, } rsp->rda = rda; - rnp = rsp->level[rcu_num_lvls - 1]; + rnp = rsp->level[NUM_RCU_LVLS - 1]; for_each_possible_cpu(i) { while (i > rnp->grphi) rnp++; per_cpu_ptr(rsp->rda, i)->mynode = rnp; rcu_boot_init_percpu_data(i, rsp); } - list_add(&rsp->flavors, &rcu_struct_flavors); -} - -/* - * Compute the rcu_node tree geometry from kernel parameters. This cannot - * replace the definitions in rcutree.h because those are needed to size - * the ->node array in the rcu_state structure. - */ -static void __init rcu_init_geometry(void) -{ - int i; - int j; - int n = nr_cpu_ids; - int rcu_capacity[MAX_RCU_LVLS + 1]; - - /* If the compile-time values are accurate, just leave. */ - if (rcu_fanout_leaf == CONFIG_RCU_FANOUT_LEAF) - return; - - /* - * Compute number of nodes that can be handled an rcu_node tree - * with the given number of levels. Setting rcu_capacity[0] makes - * some of the arithmetic easier. - */ - rcu_capacity[0] = 1; - rcu_capacity[1] = rcu_fanout_leaf; - for (i = 2; i <= MAX_RCU_LVLS; i++) - rcu_capacity[i] = rcu_capacity[i - 1] * CONFIG_RCU_FANOUT; - - /* - * The boot-time rcu_fanout_leaf parameter is only permitted - * to increase the leaf-level fanout, not decrease it. Of course, - * the leaf-level fanout cannot exceed the number of bits in - * the rcu_node masks. Finally, the tree must be able to accommodate - * the configured number of CPUs. Complain and fall back to the - * compile-time values if these limits are exceeded. - */ - if (rcu_fanout_leaf < CONFIG_RCU_FANOUT_LEAF || - rcu_fanout_leaf > sizeof(unsigned long) * 8 || - n > rcu_capacity[MAX_RCU_LVLS]) { - WARN_ON(1); - return; - } - - /* Calculate the number of rcu_nodes at each level of the tree. */ - for (i = 1; i <= MAX_RCU_LVLS; i++) - if (n <= rcu_capacity[i]) { - for (j = 0; j <= i; j++) - num_rcu_lvl[j] = - DIV_ROUND_UP(n, rcu_capacity[i - j]); - rcu_num_lvls = i; - for (j = i + 1; j <= MAX_RCU_LVLS; j++) - num_rcu_lvl[j] = 0; - break; - } - - /* Calculate the total number of rcu_node structures. */ - rcu_num_nodes = 0; - for (i = 0; i <= MAX_RCU_LVLS; i++) - rcu_num_nodes += num_rcu_lvl[i]; - rcu_num_nodes -= n; } void __init rcu_init(void) @@ -2802,7 +2663,6 @@ void __init rcu_init(void) int cpu; rcu_bootup_announce(); - rcu_init_geometry(); rcu_init_one(&rcu_sched_state, &rcu_sched_data); rcu_init_one(&rcu_bh_state, &rcu_bh_data); __rcu_init_preempt(); diff --git a/trunk/kernel/rcutree.h b/trunk/kernel/rcutree.h index 4d29169f2124..19b61ac1079f 100644 --- a/trunk/kernel/rcutree.h +++ b/trunk/kernel/rcutree.h @@ -42,28 +42,28 @@ #define RCU_FANOUT_4 (RCU_FANOUT_3 * CONFIG_RCU_FANOUT) #if NR_CPUS <= RCU_FANOUT_1 -# define RCU_NUM_LVLS 1 +# define NUM_RCU_LVLS 1 # define NUM_RCU_LVL_0 1 # define NUM_RCU_LVL_1 (NR_CPUS) # define NUM_RCU_LVL_2 0 # define NUM_RCU_LVL_3 0 # define NUM_RCU_LVL_4 0 #elif NR_CPUS <= RCU_FANOUT_2 -# define RCU_NUM_LVLS 2 +# define NUM_RCU_LVLS 2 # define NUM_RCU_LVL_0 1 # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1) # define NUM_RCU_LVL_2 (NR_CPUS) # define NUM_RCU_LVL_3 0 # define NUM_RCU_LVL_4 0 #elif NR_CPUS <= RCU_FANOUT_3 -# define RCU_NUM_LVLS 3 +# define NUM_RCU_LVLS 3 # define NUM_RCU_LVL_0 1 # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2) # define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1) # define NUM_RCU_LVL_3 (NR_CPUS) # define NUM_RCU_LVL_4 0 #elif NR_CPUS <= RCU_FANOUT_4 -# define RCU_NUM_LVLS 4 +# define NUM_RCU_LVLS 4 # define NUM_RCU_LVL_0 1 # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_3) # define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2) @@ -76,9 +76,6 @@ #define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3 + NUM_RCU_LVL_4) #define NUM_RCU_NODES (RCU_SUM - NR_CPUS) -extern int rcu_num_lvls; -extern int rcu_num_nodes; - /* * Dynticks per-CPU state. */ @@ -100,7 +97,6 @@ struct rcu_dynticks { /* # times non-lazy CBs posted to CPU. */ unsigned long nonlazy_posted_snap; /* idle-period nonlazy_posted snapshot. */ - int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */ #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */ }; @@ -210,7 +206,7 @@ struct rcu_node { */ #define rcu_for_each_node_breadth_first(rsp, rnp) \ for ((rnp) = &(rsp)->node[0]; \ - (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++) + (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++) /* * Do a breadth-first scan of the non-leaf rcu_node structures for the @@ -219,7 +215,7 @@ struct rcu_node { */ #define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \ for ((rnp) = &(rsp)->node[0]; \ - (rnp) < (rsp)->level[rcu_num_lvls - 1]; (rnp)++) + (rnp) < (rsp)->level[NUM_RCU_LVLS - 1]; (rnp)++) /* * Scan the leaves of the rcu_node hierarchy for the specified rcu_state @@ -228,8 +224,8 @@ struct rcu_node { * It is still a leaf node, even if it is also the root node. */ #define rcu_for_each_leaf_node(rsp, rnp) \ - for ((rnp) = (rsp)->level[rcu_num_lvls - 1]; \ - (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++) + for ((rnp) = (rsp)->level[NUM_RCU_LVLS - 1]; \ + (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++) /* Index values for nxttail array in struct rcu_data. */ #define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */ @@ -315,9 +311,6 @@ struct rcu_data { unsigned long n_rp_need_fqs; unsigned long n_rp_need_nothing; - /* 6) _rcu_barrier() callback. */ - struct rcu_head barrier_head; - int cpu; struct rcu_state *rsp; }; @@ -364,12 +357,10 @@ do { \ */ struct rcu_state { struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */ - struct rcu_node *level[RCU_NUM_LVLS]; /* Hierarchy levels. */ + struct rcu_node *level[NUM_RCU_LVLS]; /* Hierarchy levels. */ u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */ - u8 levelspread[RCU_NUM_LVLS]; /* kids/node in each level. */ + u8 levelspread[NUM_RCU_LVLS]; /* kids/node in each level. */ struct rcu_data __percpu *rda; /* pointer of percu rcu_data. */ - void (*call)(struct rcu_head *head, /* call_rcu() flavor. */ - void (*func)(struct rcu_head *head)); /* The following fields are guarded by the root rcu_node's lock. */ @@ -401,11 +392,6 @@ struct rcu_state { struct task_struct *rcu_barrier_in_progress; /* Task doing rcu_barrier(), */ /* or NULL if no barrier. */ - struct mutex barrier_mutex; /* Guards barrier fields. */ - atomic_t barrier_cpu_count; /* # CPUs waiting on. */ - struct completion barrier_completion; /* Wake at barrier end. */ - unsigned long n_barrier_done; /* ++ at start and end of */ - /* _rcu_barrier(). */ raw_spinlock_t fqslock; /* Only one task forcing */ /* quiescent states. */ unsigned long jiffies_force_qs; /* Time at which to invoke */ @@ -423,13 +409,8 @@ struct rcu_state { unsigned long gp_max; /* Maximum GP duration in */ /* jiffies. */ char *name; /* Name of structure. */ - struct list_head flavors; /* List of RCU flavors. */ }; -extern struct list_head rcu_struct_flavors; -#define for_each_rcu_flavor(rsp) \ - list_for_each_entry((rsp), &rcu_struct_flavors, flavors) - /* Return values for rcu_preempt_offline_tasks(). */ #define RCU_OFL_TASKS_NORM_GP 0x1 /* Tasks blocking normal */ @@ -472,18 +453,25 @@ static void rcu_stop_cpu_kthread(int cpu); #endif /* #ifdef CONFIG_HOTPLUG_CPU */ static void rcu_print_detail_task_stall(struct rcu_state *rsp); static int rcu_print_task_stall(struct rcu_node *rnp); +static void rcu_preempt_stall_reset(void); static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); #ifdef CONFIG_HOTPLUG_CPU static int rcu_preempt_offline_tasks(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp); #endif /* #ifdef CONFIG_HOTPLUG_CPU */ +static void rcu_preempt_cleanup_dead_cpu(int cpu); static void rcu_preempt_check_callbacks(int cpu); +static void rcu_preempt_process_callbacks(void); void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU) static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, bool wake); #endif /* #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU) */ +static int rcu_preempt_pending(int cpu); +static int rcu_preempt_cpu_has_callbacks(int cpu); +static void __cpuinit rcu_preempt_init_percpu_data(int cpu); +static void rcu_preempt_cleanup_dying_cpu(void); static void __init __rcu_init_preempt(void); static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); diff --git a/trunk/kernel/rcutree_plugin.h b/trunk/kernel/rcutree_plugin.h index 7f3244c0df01..3e4899459f3d 100644 --- a/trunk/kernel/rcutree_plugin.h +++ b/trunk/kernel/rcutree_plugin.h @@ -68,21 +68,17 @@ static void __init rcu_bootup_announce_oddness(void) printk(KERN_INFO "\tAdditional per-CPU info printed with stalls.\n"); #endif #if NUM_RCU_LVL_4 != 0 - printk(KERN_INFO "\tFour-level hierarchy is enabled.\n"); + printk(KERN_INFO "\tExperimental four-level hierarchy is enabled.\n"); #endif - if (rcu_fanout_leaf != CONFIG_RCU_FANOUT_LEAF) - printk(KERN_INFO "\tExperimental boot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf); - if (nr_cpu_ids != NR_CPUS) - printk(KERN_INFO "\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids); } #ifdef CONFIG_TREE_PREEMPT_RCU -struct rcu_state rcu_preempt_state = - RCU_STATE_INITIALIZER(rcu_preempt, call_rcu); +struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt); DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data); static struct rcu_state *rcu_state = &rcu_preempt_state; +static void rcu_read_unlock_special(struct task_struct *t); static int rcu_preempted_readers_exp(struct rcu_node *rnp); /* @@ -236,6 +232,18 @@ static void rcu_preempt_note_context_switch(int cpu) local_irq_restore(flags); } +/* + * Tree-preemptible RCU implementation for rcu_read_lock(). + * Just increment ->rcu_read_lock_nesting, shared state will be updated + * if we block. + */ +void __rcu_read_lock(void) +{ + current->rcu_read_lock_nesting++; + barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */ +} +EXPORT_SYMBOL_GPL(__rcu_read_lock); + /* * Check for preempted RCU readers blocking the current grace period * for the specified rcu_node structure. If the caller needs a reliable @@ -302,7 +310,7 @@ static struct list_head *rcu_next_node_entry(struct task_struct *t, * notify RCU core processing or task having blocked during the RCU * read-side critical section. */ -void rcu_read_unlock_special(struct task_struct *t) +static noinline void rcu_read_unlock_special(struct task_struct *t) { int empty; int empty_exp; @@ -390,9 +398,8 @@ void rcu_read_unlock_special(struct task_struct *t) rnp->grphi, !!rnp->gp_tasks); rcu_report_unblock_qs_rnp(rnp, flags); - } else { + } else raw_spin_unlock_irqrestore(&rnp->lock, flags); - } #ifdef CONFIG_RCU_BOOST /* Unboost if we were boosted. */ @@ -411,6 +418,38 @@ void rcu_read_unlock_special(struct task_struct *t) } } +/* + * Tree-preemptible RCU implementation for rcu_read_unlock(). + * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost + * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then + * invoke rcu_read_unlock_special() to clean up after a context switch + * in an RCU read-side critical section and other special cases. + */ +void __rcu_read_unlock(void) +{ + struct task_struct *t = current; + + if (t->rcu_read_lock_nesting != 1) + --t->rcu_read_lock_nesting; + else { + barrier(); /* critical section before exit code. */ + t->rcu_read_lock_nesting = INT_MIN; + barrier(); /* assign before ->rcu_read_unlock_special load */ + if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) + rcu_read_unlock_special(t); + barrier(); /* ->rcu_read_unlock_special load before assign */ + t->rcu_read_lock_nesting = 0; + } +#ifdef CONFIG_PROVE_LOCKING + { + int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting); + + WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2); + } +#endif /* #ifdef CONFIG_PROVE_LOCKING */ +} +EXPORT_SYMBOL_GPL(__rcu_read_unlock); + #ifdef CONFIG_RCU_CPU_STALL_VERBOSE /* @@ -500,6 +539,16 @@ static int rcu_print_task_stall(struct rcu_node *rnp) return ndetected; } +/* + * Suppress preemptible RCU's CPU stall warnings by pushing the + * time of the next stall-warning message comfortably far into the + * future. + */ +static void rcu_preempt_stall_reset(void) +{ + rcu_preempt_state.jiffies_stall = jiffies + ULONG_MAX / 2; +} + /* * Check that the list of blocked tasks for the newly completed grace * period is in fact empty. It is a serious bug to complete a grace @@ -600,6 +649,14 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp, #endif /* #ifdef CONFIG_HOTPLUG_CPU */ +/* + * Do CPU-offline processing for preemptible RCU. + */ +static void rcu_preempt_cleanup_dead_cpu(int cpu) +{ + rcu_cleanup_dead_cpu(cpu, &rcu_preempt_state); +} + /* * Check for a quiescent state from the current CPU. When a task blocks, * the task is recorded in the corresponding CPU's rcu_node structure, @@ -620,6 +677,15 @@ static void rcu_preempt_check_callbacks(int cpu) t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; } +/* + * Process callbacks for preemptible RCU. + */ +static void rcu_preempt_process_callbacks(void) +{ + __rcu_process_callbacks(&rcu_preempt_state, + &__get_cpu_var(rcu_preempt_data)); +} + #ifdef CONFIG_RCU_BOOST static void rcu_preempt_do_callbacks(void) @@ -758,9 +824,9 @@ sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp) int must_wait = 0; raw_spin_lock_irqsave(&rnp->lock, flags); - if (list_empty(&rnp->blkd_tasks)) { + if (list_empty(&rnp->blkd_tasks)) raw_spin_unlock_irqrestore(&rnp->lock, flags); - } else { + else { rnp->exp_tasks = rnp->blkd_tasks.next; rcu_initiate_boost(rnp, flags); /* releases rnp->lock */ must_wait = 1; @@ -804,9 +870,9 @@ void synchronize_rcu_expedited(void) * expedited grace period for us, just leave. */ while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) { - if (trycount++ < 10) { + if (trycount++ < 10) udelay(trycount * num_online_cpus()); - } else { + else { synchronize_rcu(); return; } @@ -851,15 +917,50 @@ void synchronize_rcu_expedited(void) } EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); +/* + * Check to see if there is any immediate preemptible-RCU-related work + * to be done. + */ +static int rcu_preempt_pending(int cpu) +{ + return __rcu_pending(&rcu_preempt_state, + &per_cpu(rcu_preempt_data, cpu)); +} + +/* + * Does preemptible RCU have callbacks on this CPU? + */ +static int rcu_preempt_cpu_has_callbacks(int cpu) +{ + return !!per_cpu(rcu_preempt_data, cpu).nxtlist; +} + /** * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete. */ void rcu_barrier(void) { - _rcu_barrier(&rcu_preempt_state); + _rcu_barrier(&rcu_preempt_state, call_rcu); } EXPORT_SYMBOL_GPL(rcu_barrier); +/* + * Initialize preemptible RCU's per-CPU data. + */ +static void __cpuinit rcu_preempt_init_percpu_data(int cpu) +{ + rcu_init_percpu_data(cpu, &rcu_preempt_state, 1); +} + +/* + * Move preemptible RCU's callbacks from dying CPU to other online CPU + * and record a quiescent state. + */ +static void rcu_preempt_cleanup_dying_cpu(void) +{ + rcu_cleanup_dying_cpu(&rcu_preempt_state); +} + /* * Initialize preemptible RCU's state structures. */ @@ -944,6 +1045,14 @@ static int rcu_print_task_stall(struct rcu_node *rnp) return 0; } +/* + * Because preemptible RCU does not exist, there is no need to suppress + * its CPU stall warnings. + */ +static void rcu_preempt_stall_reset(void) +{ +} + /* * Because there is no preemptible RCU, there can be no readers blocked, * so there is no need to check for blocked tasks. So check only for @@ -971,6 +1080,14 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp, #endif /* #ifdef CONFIG_HOTPLUG_CPU */ +/* + * Because preemptible RCU does not exist, it never needs CPU-offline + * processing. + */ +static void rcu_preempt_cleanup_dead_cpu(int cpu) +{ +} + /* * Because preemptible RCU does not exist, it never has any callbacks * to check. @@ -979,6 +1096,14 @@ static void rcu_preempt_check_callbacks(int cpu) { } +/* + * Because preemptible RCU does not exist, it never has any callbacks + * to process. + */ +static void rcu_preempt_process_callbacks(void) +{ +} + /* * Queue an RCU callback for lazy invocation after a grace period. * This will likely be later named something like "call_rcu_lazy()", @@ -1019,6 +1144,22 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, #endif /* #ifdef CONFIG_HOTPLUG_CPU */ +/* + * Because preemptible RCU does not exist, it never has any work to do. + */ +static int rcu_preempt_pending(int cpu) +{ + return 0; +} + +/* + * Because preemptible RCU does not exist, it never has callbacks + */ +static int rcu_preempt_cpu_has_callbacks(int cpu) +{ + return 0; +} + /* * Because preemptible RCU does not exist, rcu_barrier() is just * another name for rcu_barrier_sched(). @@ -1029,6 +1170,21 @@ void rcu_barrier(void) } EXPORT_SYMBOL_GPL(rcu_barrier); +/* + * Because preemptible RCU does not exist, there is no per-CPU + * data to initialize. + */ +static void __cpuinit rcu_preempt_init_percpu_data(int cpu) +{ +} + +/* + * Because there is no preemptible RCU, there is no cleanup to do. + */ +static void rcu_preempt_cleanup_dying_cpu(void) +{ +} + /* * Because preemptible RCU does not exist, it need not be initialized. */ @@ -1812,11 +1968,9 @@ static void rcu_idle_count_callbacks_posted(void) */ #define RCU_IDLE_FLUSHES 5 /* Number of dyntick-idle tries. */ #define RCU_IDLE_OPT_FLUSHES 3 /* Optional dyntick-idle tries. */ -#define RCU_IDLE_GP_DELAY 4 /* Roughly one grace period. */ +#define RCU_IDLE_GP_DELAY 6 /* Roughly one grace period. */ #define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */ -extern int tick_nohz_enabled; - /* * Does the specified flavor of RCU have non-lazy callbacks pending on * the specified CPU? Both RCU flavor and CPU are specified by the @@ -1893,13 +2047,10 @@ int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) return 1; } /* Set up for the possibility that RCU will post a timer. */ - if (rcu_cpu_has_nonlazy_callbacks(cpu)) { - *delta_jiffies = round_up(RCU_IDLE_GP_DELAY + jiffies, - RCU_IDLE_GP_DELAY) - jiffies; - } else { - *delta_jiffies = jiffies + RCU_IDLE_LAZY_GP_DELAY; - *delta_jiffies = round_jiffies(*delta_jiffies) - jiffies; - } + if (rcu_cpu_has_nonlazy_callbacks(cpu)) + *delta_jiffies = RCU_IDLE_GP_DELAY; + else + *delta_jiffies = RCU_IDLE_LAZY_GP_DELAY; return 0; } @@ -1958,7 +2109,6 @@ static void rcu_cleanup_after_idle(int cpu) del_timer(&rdtp->idle_gp_timer); trace_rcu_prep_idle("Cleanup after idle"); - rdtp->tick_nohz_enabled_snap = ACCESS_ONCE(tick_nohz_enabled); } /* @@ -1984,18 +2134,6 @@ static void rcu_prepare_for_idle(int cpu) { struct timer_list *tp; struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); - int tne; - - /* Handle nohz enablement switches conservatively. */ - tne = ACCESS_ONCE(tick_nohz_enabled); - if (tne != rdtp->tick_nohz_enabled_snap) { - if (rcu_cpu_has_callbacks(cpu)) - invoke_rcu_core(); /* force nohz to see update. */ - rdtp->tick_nohz_enabled_snap = tne; - return; - } - if (!tne) - return; /* * If this is an idle re-entry, for example, due to use of @@ -2049,11 +2187,10 @@ static void rcu_prepare_for_idle(int cpu) if (rcu_cpu_has_nonlazy_callbacks(cpu)) { trace_rcu_prep_idle("Dyntick with callbacks"); rdtp->idle_gp_timer_expires = - round_up(jiffies + RCU_IDLE_GP_DELAY, - RCU_IDLE_GP_DELAY); + jiffies + RCU_IDLE_GP_DELAY; } else { rdtp->idle_gp_timer_expires = - round_jiffies(jiffies + RCU_IDLE_LAZY_GP_DELAY); + jiffies + RCU_IDLE_LAZY_GP_DELAY; trace_rcu_prep_idle("Dyntick with lazy callbacks"); } tp = &rdtp->idle_gp_timer; @@ -2094,9 +2231,8 @@ static void rcu_prepare_for_idle(int cpu) if (rcu_cpu_has_callbacks(cpu)) { trace_rcu_prep_idle("More callbacks"); invoke_rcu_core(); - } else { + } else trace_rcu_prep_idle("Callbacks drained"); - } } /* @@ -2133,7 +2269,6 @@ static void print_cpu_stall_fast_no_hz(char *cp, int cpu) static void print_cpu_stall_fast_no_hz(char *cp, int cpu) { - *cp = '\0'; } #endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */ diff --git a/trunk/kernel/rcutree_trace.c b/trunk/kernel/rcutree_trace.c index abffb486e94e..d4bc16ddd1d4 100644 --- a/trunk/kernel/rcutree_trace.c +++ b/trunk/kernel/rcutree_trace.c @@ -46,31 +46,6 @@ #define RCU_TREE_NONCORE #include "rcutree.h" -static int show_rcubarrier(struct seq_file *m, void *unused) -{ - struct rcu_state *rsp; - - for_each_rcu_flavor(rsp) - seq_printf(m, "%s: %c bcc: %d nbd: %lu\n", - rsp->name, rsp->rcu_barrier_in_progress ? 'B' : '.', - atomic_read(&rsp->barrier_cpu_count), - rsp->n_barrier_done); - return 0; -} - -static int rcubarrier_open(struct inode *inode, struct file *file) -{ - return single_open(file, show_rcubarrier, NULL); -} - -static const struct file_operations rcubarrier_fops = { - .owner = THIS_MODULE, - .open = rcubarrier_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - #ifdef CONFIG_RCU_BOOST static char convert_kthread_status(unsigned int kthread_status) @@ -120,16 +95,24 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted); } +#define PRINT_RCU_DATA(name, func, m) \ + do { \ + int _p_r_d_i; \ + \ + for_each_possible_cpu(_p_r_d_i) \ + func(m, &per_cpu(name, _p_r_d_i)); \ + } while (0) + static int show_rcudata(struct seq_file *m, void *unused) { - int cpu; - struct rcu_state *rsp; - - for_each_rcu_flavor(rsp) { - seq_printf(m, "%s:\n", rsp->name); - for_each_possible_cpu(cpu) - print_one_rcu_data(m, per_cpu_ptr(rsp->rda, cpu)); - } +#ifdef CONFIG_TREE_PREEMPT_RCU + seq_puts(m, "rcu_preempt:\n"); + PRINT_RCU_DATA(rcu_preempt_data, print_one_rcu_data, m); +#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ + seq_puts(m, "rcu_sched:\n"); + PRINT_RCU_DATA(rcu_sched_data, print_one_rcu_data, m); + seq_puts(m, "rcu_bh:\n"); + PRINT_RCU_DATA(rcu_bh_data, print_one_rcu_data, m); return 0; } @@ -183,9 +166,6 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp) static int show_rcudata_csv(struct seq_file *m, void *unused) { - int cpu; - struct rcu_state *rsp; - seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pgp\",\"pq\","); seq_puts(m, "\"dt\",\"dt nesting\",\"dt NMI nesting\",\"df\","); seq_puts(m, "\"of\",\"qll\",\"ql\",\"qs\""); @@ -193,11 +173,14 @@ static int show_rcudata_csv(struct seq_file *m, void *unused) seq_puts(m, "\"kt\",\"ktl\""); #endif /* #ifdef CONFIG_RCU_BOOST */ seq_puts(m, ",\"b\",\"ci\",\"co\",\"ca\"\n"); - for_each_rcu_flavor(rsp) { - seq_printf(m, "\"%s:\"\n", rsp->name); - for_each_possible_cpu(cpu) - print_one_rcu_data_csv(m, per_cpu_ptr(rsp->rda, cpu)); - } +#ifdef CONFIG_TREE_PREEMPT_RCU + seq_puts(m, "\"rcu_preempt:\"\n"); + PRINT_RCU_DATA(rcu_preempt_data, print_one_rcu_data_csv, m); +#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ + seq_puts(m, "\"rcu_sched:\"\n"); + PRINT_RCU_DATA(rcu_sched_data, print_one_rcu_data_csv, m); + seq_puts(m, "\"rcu_bh:\"\n"); + PRINT_RCU_DATA(rcu_bh_data, print_one_rcu_data_csv, m); return 0; } @@ -218,7 +201,8 @@ static const struct file_operations rcudata_csv_fops = { static void print_one_rcu_node_boost(struct seq_file *m, struct rcu_node *rnp) { - seq_printf(m, "%d:%d tasks=%c%c%c%c kt=%c ntb=%lu neb=%lu nnb=%lu ", + seq_printf(m, "%d:%d tasks=%c%c%c%c kt=%c ntb=%lu neb=%lu nnb=%lu " + "j=%04x bt=%04x\n", rnp->grplo, rnp->grphi, "T."[list_empty(&rnp->blkd_tasks)], "N."[!rnp->gp_tasks], @@ -226,11 +210,11 @@ static void print_one_rcu_node_boost(struct seq_file *m, struct rcu_node *rnp) "B."[!rnp->boost_tasks], convert_kthread_status(rnp->boost_kthread_status), rnp->n_tasks_boosted, rnp->n_exp_boosts, - rnp->n_normal_boosts); - seq_printf(m, "j=%04x bt=%04x\n", + rnp->n_normal_boosts, (int)(jiffies & 0xffff), (int)(rnp->boost_time & 0xffff)); - seq_printf(m, " balk: nt=%lu egt=%lu bt=%lu nb=%lu ny=%lu nos=%lu\n", + seq_printf(m, "%s: nt=%lu egt=%lu bt=%lu nb=%lu ny=%lu nos=%lu\n", + " balk", rnp->n_balk_blkd_tasks, rnp->n_balk_exp_gp_tasks, rnp->n_balk_boost_tasks, @@ -286,15 +270,15 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp) struct rcu_node *rnp; gpnum = rsp->gpnum; - seq_printf(m, "%s: c=%lu g=%lu s=%d jfq=%ld j=%x ", - rsp->name, rsp->completed, gpnum, rsp->fqs_state, + seq_printf(m, "c=%lu g=%lu s=%d jfq=%ld j=%x " + "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu oqlen=%ld/%ld\n", + rsp->completed, gpnum, rsp->fqs_state, (long)(rsp->jiffies_force_qs - jiffies), - (int)(jiffies & 0xffff)); - seq_printf(m, "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu oqlen=%ld/%ld\n", + (int)(jiffies & 0xffff), rsp->n_force_qs, rsp->n_force_qs_ngp, rsp->n_force_qs - rsp->n_force_qs_ngp, rsp->n_force_qs_lh, rsp->qlen_lazy, rsp->qlen); - for (rnp = &rsp->node[0]; rnp - &rsp->node[0] < rcu_num_nodes; rnp++) { + for (rnp = &rsp->node[0]; rnp - &rsp->node[0] < NUM_RCU_NODES; rnp++) { if (rnp->level != level) { seq_puts(m, "\n"); level = rnp->level; @@ -311,10 +295,14 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp) static int show_rcuhier(struct seq_file *m, void *unused) { - struct rcu_state *rsp; - - for_each_rcu_flavor(rsp) - print_one_rcu_state(m, rsp); +#ifdef CONFIG_TREE_PREEMPT_RCU + seq_puts(m, "rcu_preempt:\n"); + print_one_rcu_state(m, &rcu_preempt_state); +#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ + seq_puts(m, "rcu_sched:\n"); + print_one_rcu_state(m, &rcu_sched_state); + seq_puts(m, "rcu_bh:\n"); + print_one_rcu_state(m, &rcu_bh_state); return 0; } @@ -355,10 +343,11 @@ static void show_one_rcugp(struct seq_file *m, struct rcu_state *rsp) static int show_rcugp(struct seq_file *m, void *unused) { - struct rcu_state *rsp; - - for_each_rcu_flavor(rsp) - show_one_rcugp(m, rsp); +#ifdef CONFIG_TREE_PREEMPT_RCU + show_one_rcugp(m, &rcu_preempt_state); +#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ + show_one_rcugp(m, &rcu_sched_state); + show_one_rcugp(m, &rcu_bh_state); return 0; } @@ -377,36 +366,44 @@ static const struct file_operations rcugp_fops = { static void print_one_rcu_pending(struct seq_file *m, struct rcu_data *rdp) { - seq_printf(m, "%3d%cnp=%ld ", + seq_printf(m, "%3d%cnp=%ld " + "qsp=%ld rpq=%ld cbr=%ld cng=%ld " + "gpc=%ld gps=%ld nf=%ld nn=%ld\n", rdp->cpu, cpu_is_offline(rdp->cpu) ? '!' : ' ', - rdp->n_rcu_pending); - seq_printf(m, "qsp=%ld rpq=%ld cbr=%ld cng=%ld ", + rdp->n_rcu_pending, rdp->n_rp_qs_pending, rdp->n_rp_report_qs, rdp->n_rp_cb_ready, - rdp->n_rp_cpu_needs_gp); - seq_printf(m, "gpc=%ld gps=%ld nf=%ld nn=%ld\n", + rdp->n_rp_cpu_needs_gp, rdp->n_rp_gp_completed, rdp->n_rp_gp_started, rdp->n_rp_need_fqs, rdp->n_rp_need_nothing); } -static int show_rcu_pending(struct seq_file *m, void *unused) +static void print_rcu_pendings(struct seq_file *m, struct rcu_state *rsp) { int cpu; struct rcu_data *rdp; - struct rcu_state *rsp; - - for_each_rcu_flavor(rsp) { - seq_printf(m, "%s:\n", rsp->name); - for_each_possible_cpu(cpu) { - rdp = per_cpu_ptr(rsp->rda, cpu); - if (rdp->beenonline) - print_one_rcu_pending(m, rdp); - } + + for_each_possible_cpu(cpu) { + rdp = per_cpu_ptr(rsp->rda, cpu); + if (rdp->beenonline) + print_one_rcu_pending(m, rdp); } +} + +static int show_rcu_pending(struct seq_file *m, void *unused) +{ +#ifdef CONFIG_TREE_PREEMPT_RCU + seq_puts(m, "rcu_preempt:\n"); + print_rcu_pendings(m, &rcu_preempt_state); +#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ + seq_puts(m, "rcu_sched:\n"); + print_rcu_pendings(m, &rcu_sched_state); + seq_puts(m, "rcu_bh:\n"); + print_rcu_pendings(m, &rcu_bh_state); return 0; } @@ -456,11 +453,6 @@ static int __init rcutree_trace_init(void) if (!rcudir) goto free_out; - retval = debugfs_create_file("rcubarrier", 0444, rcudir, - NULL, &rcubarrier_fops); - if (!retval) - goto free_out; - retval = debugfs_create_file("rcudata", 0444, rcudir, NULL, &rcudata_fops); if (!retval) diff --git a/trunk/kernel/signal.c b/trunk/kernel/signal.c index be4f856d52f8..677102789cf2 100644 --- a/trunk/kernel/signal.c +++ b/trunk/kernel/signal.c @@ -1971,13 +1971,6 @@ static void ptrace_do_notify(int signr, int exit_code, int why) void ptrace_notify(int exit_code) { BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP); - if (unlikely(current->task_works)) { - if (test_and_clear_ti_thread_flag(current_thread_info(), - TIF_NOTIFY_RESUME)) { - smp_mb__after_clear_bit(); - task_work_run(); - } - } spin_lock_irq(¤t->sighand->siglock); ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED); @@ -2198,14 +2191,6 @@ int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, struct signal_struct *signal = current->signal; int signr; - if (unlikely(current->task_works)) { - if (test_and_clear_ti_thread_flag(current_thread_info(), - TIF_NOTIFY_RESUME)) { - smp_mb__after_clear_bit(); - task_work_run(); - } - } - if (unlikely(uprobe_deny_signal())) return 0; diff --git a/trunk/kernel/smp.c b/trunk/kernel/smp.c index 29dd40a9f2f4..d0ae5b24875e 100644 --- a/trunk/kernel/smp.c +++ b/trunk/kernel/smp.c @@ -581,6 +581,26 @@ int smp_call_function(smp_call_func_t func, void *info, int wait) return 0; } EXPORT_SYMBOL(smp_call_function); + +void ipi_call_lock(void) +{ + raw_spin_lock(&call_function.lock); +} + +void ipi_call_unlock(void) +{ + raw_spin_unlock(&call_function.lock); +} + +void ipi_call_lock_irq(void) +{ + raw_spin_lock_irq(&call_function.lock); +} + +void ipi_call_unlock_irq(void) +{ + raw_spin_unlock_irq(&call_function.lock); +} #endif /* USE_GENERIC_SMP_HELPERS */ /* Setup configured maximum number of CPUs to activate */ diff --git a/trunk/kernel/smpboot.h b/trunk/kernel/smpboot.h index 6ef9433e1c70..80c0acfb8472 100644 --- a/trunk/kernel/smpboot.h +++ b/trunk/kernel/smpboot.h @@ -3,6 +3,8 @@ struct task_struct; +int smpboot_prepare(unsigned int cpu); + #ifdef CONFIG_GENERIC_SMP_IDLE_THREAD struct task_struct *idle_thread_get(unsigned int cpu); void idle_thread_set_boot_cpu(void); diff --git a/trunk/kernel/task_work.c b/trunk/kernel/task_work.c index 91d4e1742a0c..82d1c794066d 100644 --- a/trunk/kernel/task_work.c +++ b/trunk/kernel/task_work.c @@ -3,78 +3,82 @@ #include int -task_work_add(struct task_struct *task, struct callback_head *twork, bool notify) +task_work_add(struct task_struct *task, struct task_work *twork, bool notify) { - struct callback_head *last, *first; unsigned long flags; + int err = -ESRCH; +#ifndef TIF_NOTIFY_RESUME + if (notify) + return -ENOTSUPP; +#endif /* - * Not inserting the new work if the task has already passed - * exit_task_work() is the responisbility of callers. + * We must not insert the new work if the task has already passed + * exit_task_work(). We rely on do_exit()->raw_spin_unlock_wait() + * and check PF_EXITING under pi_lock. */ raw_spin_lock_irqsave(&task->pi_lock, flags); - last = task->task_works; - first = last ? last->next : twork; - twork->next = first; - if (last) - last->next = twork; - task->task_works = twork; + if (likely(!(task->flags & PF_EXITING))) { + hlist_add_head(&twork->hlist, &task->task_works); + err = 0; + } raw_spin_unlock_irqrestore(&task->pi_lock, flags); /* test_and_set_bit() implies mb(), see tracehook_notify_resume(). */ - if (notify) + if (likely(!err) && notify) set_notify_resume(task); - return 0; + return err; } -struct callback_head * +struct task_work * task_work_cancel(struct task_struct *task, task_work_func_t func) { unsigned long flags; - struct callback_head *last, *res = NULL; + struct task_work *twork; + struct hlist_node *pos; raw_spin_lock_irqsave(&task->pi_lock, flags); - last = task->task_works; - if (last) { - struct callback_head *q = last, *p = q->next; - while (1) { - if (p->func == func) { - q->next = p->next; - if (p == last) - task->task_works = q == p ? NULL : q; - res = p; - break; - } - if (p == last) - break; - q = p; - p = q->next; + hlist_for_each_entry(twork, pos, &task->task_works, hlist) { + if (twork->func == func) { + hlist_del(&twork->hlist); + goto found; } } + twork = NULL; + found: raw_spin_unlock_irqrestore(&task->pi_lock, flags); - return res; + + return twork; } void task_work_run(void) { struct task_struct *task = current; - struct callback_head *p, *q; + struct hlist_head task_works; + struct hlist_node *pos; - while (1) { - raw_spin_lock_irq(&task->pi_lock); - p = task->task_works; - task->task_works = NULL; - raw_spin_unlock_irq(&task->pi_lock); + raw_spin_lock_irq(&task->pi_lock); + hlist_move_list(&task->task_works, &task_works); + raw_spin_unlock_irq(&task->pi_lock); - if (unlikely(!p)) - return; + if (unlikely(hlist_empty(&task_works))) + return; + /* + * We use hlist to save the space in task_struct, but we want fifo. + * Find the last entry, the list should be short, then process them + * in reverse order. + */ + for (pos = task_works.first; pos->next; pos = pos->next) + ; - q = p->next; /* head */ - p->next = NULL; /* cut it */ - while (q) { - p = q->next; - q->func(q); - q = p; - } + for (;;) { + struct hlist_node **pprev = pos->pprev; + struct task_work *twork = container_of(pos, struct task_work, + hlist); + twork->func(twork); + + if (pprev == &task_works.first) + break; + pos = container_of(pprev, struct hlist_node, next); } } diff --git a/trunk/kernel/time/ntp.c b/trunk/kernel/time/ntp.c index b7fbadc5c973..70b33abcc7bb 100644 --- a/trunk/kernel/time/ntp.c +++ b/trunk/kernel/time/ntp.c @@ -409,9 +409,7 @@ int second_overflow(unsigned long secs) time_state = TIME_DEL; break; case TIME_INS: - if (!(time_status & STA_INS)) - time_state = TIME_OK; - else if (secs % 86400 == 0) { + if (secs % 86400 == 0) { leap = -1; time_state = TIME_OOP; time_tai++; @@ -420,9 +418,7 @@ int second_overflow(unsigned long secs) } break; case TIME_DEL: - if (!(time_status & STA_DEL)) - time_state = TIME_OK; - else if ((secs + 1) % 86400 == 0) { + if ((secs + 1) % 86400 == 0) { leap = 1; time_tai--; time_state = TIME_WAIT; diff --git a/trunk/kernel/time/tick-sched.c b/trunk/kernel/time/tick-sched.c index 024540f97f74..4a08472c3ca7 100644 --- a/trunk/kernel/time/tick-sched.c +++ b/trunk/kernel/time/tick-sched.c @@ -105,7 +105,7 @@ static ktime_t tick_init_jiffy_update(void) /* * NO HZ enabled ? */ -int tick_nohz_enabled __read_mostly = 1; +static int tick_nohz_enabled __read_mostly = 1; /* * Enable / Disable tickless mode @@ -271,15 +271,50 @@ u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time) } EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us); -static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, - ktime_t now, int cpu) +static void tick_nohz_stop_sched_tick(struct tick_sched *ts) { unsigned long seq, last_jiffies, next_jiffies, delta_jiffies; - ktime_t last_update, expires, ret = { .tv64 = 0 }; unsigned long rcu_delta_jiffies; + ktime_t last_update, expires, now; struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; u64 time_delta; + int cpu; + + cpu = smp_processor_id(); + ts = &per_cpu(tick_cpu_sched, cpu); + + now = tick_nohz_start_idle(cpu, ts); + + /* + * If this cpu is offline and it is the one which updates + * jiffies, then give up the assignment and let it be taken by + * the cpu which runs the tick timer next. If we don't drop + * this here the jiffies might be stale and do_timer() never + * invoked. + */ + if (unlikely(!cpu_online(cpu))) { + if (cpu == tick_do_timer_cpu) + tick_do_timer_cpu = TICK_DO_TIMER_NONE; + } + + if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) + return; + if (need_resched()) + return; + + if (unlikely(local_softirq_pending() && cpu_online(cpu))) { + static int ratelimit; + + if (ratelimit < 10) { + printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", + (unsigned int) local_softirq_pending()); + ratelimit++; + } + return; + } + + ts->idle_calls++; /* Read jiffies and the time when jiffies were updated last */ do { seq = read_seqbegin(&xtime_lock); @@ -362,8 +397,6 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, if (ts->tick_stopped && ktime_equal(expires, dev->next_event)) goto out; - ret = expires; - /* * nohz_stop_sched_tick can be called several times before * the nohz_restart_sched_tick is called. This happens when @@ -375,10 +408,16 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, select_nohz_load_balancer(1); calc_load_enter_idle(); - ts->last_tick = hrtimer_get_expires(&ts->sched_timer); + ts->idle_tick = hrtimer_get_expires(&ts->sched_timer); ts->tick_stopped = 1; + ts->idle_jiffies = last_jiffies; } + ts->idle_sleeps++; + + /* Mark expires */ + ts->idle_expires = expires; + /* * If the expiration time == KTIME_MAX, then * in this case we simply stop the tick timer. @@ -409,65 +448,6 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, ts->next_jiffies = next_jiffies; ts->last_jiffies = last_jiffies; ts->sleep_length = ktime_sub(dev->next_event, now); - - return ret; -} - -static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) -{ - /* - * If this cpu is offline and it is the one which updates - * jiffies, then give up the assignment and let it be taken by - * the cpu which runs the tick timer next. If we don't drop - * this here the jiffies might be stale and do_timer() never - * invoked. - */ - if (unlikely(!cpu_online(cpu))) { - if (cpu == tick_do_timer_cpu) - tick_do_timer_cpu = TICK_DO_TIMER_NONE; - } - - if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) - return false; - - if (need_resched()) - return false; - - if (unlikely(local_softirq_pending() && cpu_online(cpu))) { - static int ratelimit; - - if (ratelimit < 10) { - printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", - (unsigned int) local_softirq_pending()); - ratelimit++; - } - return false; - } - - return true; -} - -static void __tick_nohz_idle_enter(struct tick_sched *ts) -{ - ktime_t now, expires; - int cpu = smp_processor_id(); - - now = tick_nohz_start_idle(cpu, ts); - - if (can_stop_idle_tick(cpu, ts)) { - int was_stopped = ts->tick_stopped; - - ts->idle_calls++; - - expires = tick_nohz_stop_sched_tick(ts, now, cpu); - if (expires.tv64 > 0LL) { - ts->idle_sleeps++; - ts->idle_expires = expires; - } - - if (!was_stopped && ts->tick_stopped) - ts->idle_jiffies = ts->last_jiffies; - } } /** @@ -505,7 +485,7 @@ void tick_nohz_idle_enter(void) * update of the idle time accounting in tick_nohz_start_idle(). */ ts->inidle = 1; - __tick_nohz_idle_enter(ts); + tick_nohz_stop_sched_tick(ts); local_irq_enable(); } @@ -525,7 +505,7 @@ void tick_nohz_irq_exit(void) if (!ts->inidle) return; - __tick_nohz_idle_enter(ts); + tick_nohz_stop_sched_tick(ts); } /** @@ -543,7 +523,7 @@ ktime_t tick_nohz_get_sleep_length(void) static void tick_nohz_restart(struct tick_sched *ts, ktime_t now) { hrtimer_cancel(&ts->sched_timer); - hrtimer_set_expires(&ts->sched_timer, ts->last_tick); + hrtimer_set_expires(&ts->sched_timer, ts->idle_tick); while (1) { /* Forward the time to expire in the future */ @@ -566,41 +546,6 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now) } } -static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now) -{ - /* Update jiffies first */ - select_nohz_load_balancer(0); - tick_do_update_jiffies64(now); - update_cpu_load_nohz(); - - touch_softlockup_watchdog(); - /* - * Cancel the scheduled timer and restore the tick - */ - ts->tick_stopped = 0; - ts->idle_exittime = now; - - tick_nohz_restart(ts, now); -} - -static void tick_nohz_account_idle_ticks(struct tick_sched *ts) -{ -#ifndef CONFIG_VIRT_CPU_ACCOUNTING - unsigned long ticks; - /* - * We stopped the tick in idle. Update process times would miss the - * time we slept as update_process_times does only a 1 tick - * accounting. Enforce that this is accounted to idle ! - */ - ticks = jiffies - ts->idle_jiffies; - /* - * We might be one off. Do not randomly account a huge number of ticks! - */ - if (ticks && ticks < LONG_MAX) - account_idle_ticks(ticks); -#endif -} - /** * tick_nohz_idle_exit - restart the idle tick from the idle task * @@ -612,6 +557,9 @@ void tick_nohz_idle_exit(void) { int cpu = smp_processor_id(); struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); +#ifndef CONFIG_VIRT_CPU_ACCOUNTING + unsigned long ticks; +#endif ktime_t now; local_irq_disable(); @@ -626,11 +574,40 @@ void tick_nohz_idle_exit(void) if (ts->idle_active) tick_nohz_stop_idle(cpu, now); - if (ts->tick_stopped) { - tick_nohz_restart_sched_tick(ts, now); - tick_nohz_account_idle_ticks(ts); + if (!ts->tick_stopped) { + local_irq_enable(); + return; } + /* Update jiffies first */ + select_nohz_load_balancer(0); + tick_do_update_jiffies64(now); + update_cpu_load_nohz(); + +#ifndef CONFIG_VIRT_CPU_ACCOUNTING + /* + * We stopped the tick in idle. Update process times would miss the + * time we slept as update_process_times does only a 1 tick + * accounting. Enforce that this is accounted to idle ! + */ + ticks = jiffies - ts->idle_jiffies; + /* + * We might be one off. Do not randomly account a huge number of ticks! + */ + if (ticks && ticks < LONG_MAX) + account_idle_ticks(ticks); +#endif + + calc_load_exit_idle(); + touch_softlockup_watchdog(); + /* + * Cancel the scheduled timer and restore the tick + */ + ts->tick_stopped = 0; + ts->idle_exittime = now; + + tick_nohz_restart(ts, now); + local_irq_enable(); } @@ -834,8 +811,7 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) */ if (ts->tick_stopped) { touch_softlockup_watchdog(); - if (idle_cpu(cpu)) - ts->idle_jiffies++; + ts->idle_jiffies++; } update_process_times(user_mode(regs)); profile_tick(CPU_PROFILING); diff --git a/trunk/kernel/time/timekeeping.c b/trunk/kernel/time/timekeeping.c index f045cc50832d..269b1fe5f2ae 100644 --- a/trunk/kernel/time/timekeeping.c +++ b/trunk/kernel/time/timekeeping.c @@ -24,32 +24,32 @@ /* Structure holding internal timekeeping values. */ struct timekeeper { /* Current clocksource used for timekeeping. */ - struct clocksource *clock; + struct clocksource *clock; /* NTP adjusted clock multiplier */ - u32 mult; + u32 mult; /* The shift value of the current clocksource. */ - u32 shift; + int shift; + /* Number of clock cycles in one NTP interval. */ - cycle_t cycle_interval; + cycle_t cycle_interval; /* Number of clock shifted nano seconds in one NTP interval. */ - u64 xtime_interval; + u64 xtime_interval; /* shifted nano seconds left over when rounding cycle_interval */ - s64 xtime_remainder; + s64 xtime_remainder; /* Raw nano seconds accumulated per NTP interval. */ - u32 raw_interval; - - /* Current CLOCK_REALTIME time in seconds */ - u64 xtime_sec; - /* Clock shifted nano seconds */ - u64 xtime_nsec; + u32 raw_interval; + /* Clock shifted nano seconds remainder not stored in xtime.tv_nsec. */ + u64 xtime_nsec; /* Difference between accumulated time and NTP time in ntp * shifted nano seconds. */ - s64 ntp_error; + s64 ntp_error; /* Shift conversion between clock shifted nano seconds and * ntp shifted nano seconds. */ - u32 ntp_error_shift; + int ntp_error_shift; + /* The current time */ + struct timespec xtime; /* * wall_to_monotonic is what we need to add to xtime (or xtime corrected * for sub jiffie times) to get to monotonic time. Monotonic is pegged @@ -64,17 +64,20 @@ struct timekeeper { * - wall_to_monotonic is no longer the boot time, getboottime must be * used instead. */ - struct timespec wall_to_monotonic; + struct timespec wall_to_monotonic; /* time spent in suspend */ - struct timespec total_sleep_time; + struct timespec total_sleep_time; /* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */ - struct timespec raw_time; + struct timespec raw_time; + /* Offset clock monotonic -> clock realtime */ - ktime_t offs_real; + ktime_t offs_real; + /* Offset clock monotonic -> clock boottime */ - ktime_t offs_boot; + ktime_t offs_boot; + /* Seqlock for all timekeeper values */ - seqlock_t lock; + seqlock_t lock; }; static struct timekeeper timekeeper; @@ -85,37 +88,11 @@ static struct timekeeper timekeeper; */ __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock); + /* flag for if timekeeping is suspended */ int __read_mostly timekeeping_suspended; -static inline void tk_normalize_xtime(struct timekeeper *tk) -{ - while (tk->xtime_nsec >= ((u64)NSEC_PER_SEC << tk->shift)) { - tk->xtime_nsec -= (u64)NSEC_PER_SEC << tk->shift; - tk->xtime_sec++; - } -} -static struct timespec tk_xtime(struct timekeeper *tk) -{ - struct timespec ts; - - ts.tv_sec = tk->xtime_sec; - ts.tv_nsec = (long)(tk->xtime_nsec >> tk->shift); - return ts; -} - -static void tk_set_xtime(struct timekeeper *tk, const struct timespec *ts) -{ - tk->xtime_sec = ts->tv_sec; - tk->xtime_nsec = ts->tv_nsec << tk->shift; -} - -static void tk_xtime_add(struct timekeeper *tk, const struct timespec *ts) -{ - tk->xtime_sec += ts->tv_sec; - tk->xtime_nsec += ts->tv_nsec << tk->shift; -} /** * timekeeper_setup_internals - Set up internals to use clocksource clock. @@ -127,14 +104,12 @@ static void tk_xtime_add(struct timekeeper *tk, const struct timespec *ts) * * Unless you're the timekeeping code, you should not be using this! */ -static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock) +static void timekeeper_setup_internals(struct clocksource *clock) { cycle_t interval; u64 tmp, ntpinterval; - struct clocksource *old_clock; - old_clock = tk->clock; - tk->clock = clock; + timekeeper.clock = clock; clock->cycle_last = clock->read(clock); /* Do the ns -> cycle conversion first, using original mult */ @@ -147,96 +122,80 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock) tmp = 1; interval = (cycle_t) tmp; - tk->cycle_interval = interval; + timekeeper.cycle_interval = interval; /* Go back from cycles -> shifted ns */ - tk->xtime_interval = (u64) interval * clock->mult; - tk->xtime_remainder = ntpinterval - tk->xtime_interval; - tk->raw_interval = + timekeeper.xtime_interval = (u64) interval * clock->mult; + timekeeper.xtime_remainder = ntpinterval - timekeeper.xtime_interval; + timekeeper.raw_interval = ((u64) interval * clock->mult) >> clock->shift; - /* if changing clocks, convert xtime_nsec shift units */ - if (old_clock) { - int shift_change = clock->shift - old_clock->shift; - if (shift_change < 0) - tk->xtime_nsec >>= -shift_change; - else - tk->xtime_nsec <<= shift_change; - } - tk->shift = clock->shift; + timekeeper.xtime_nsec = 0; + timekeeper.shift = clock->shift; - tk->ntp_error = 0; - tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift; + timekeeper.ntp_error = 0; + timekeeper.ntp_error_shift = NTP_SCALE_SHIFT - clock->shift; /* * The timekeeper keeps its own mult values for the currently * active clocksource. These value will be adjusted via NTP * to counteract clock drifting. */ - tk->mult = clock->mult; + timekeeper.mult = clock->mult; } /* Timekeeper helper functions. */ -static inline s64 timekeeping_get_ns(struct timekeeper *tk) +static inline s64 timekeeping_get_ns(void) { cycle_t cycle_now, cycle_delta; struct clocksource *clock; - s64 nsec; /* read clocksource: */ - clock = tk->clock; + clock = timekeeper.clock; cycle_now = clock->read(clock); /* calculate the delta since the last update_wall_time: */ cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; - nsec = cycle_delta * tk->mult + tk->xtime_nsec; - nsec >>= tk->shift; - - /* If arch requires, add in gettimeoffset() */ - return nsec + arch_gettimeoffset(); + /* return delta convert to nanoseconds using ntp adjusted mult. */ + return clocksource_cyc2ns(cycle_delta, timekeeper.mult, + timekeeper.shift); } -static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk) +static inline s64 timekeeping_get_ns_raw(void) { cycle_t cycle_now, cycle_delta; struct clocksource *clock; - s64 nsec; /* read clocksource: */ - clock = tk->clock; + clock = timekeeper.clock; cycle_now = clock->read(clock); /* calculate the delta since the last update_wall_time: */ cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; - /* convert delta to nanoseconds. */ - nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift); - - /* If arch requires, add in gettimeoffset() */ - return nsec + arch_gettimeoffset(); + /* return delta convert to nanoseconds. */ + return clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift); } -static void update_rt_offset(struct timekeeper *tk) +static void update_rt_offset(void) { - struct timespec tmp, *wtm = &tk->wall_to_monotonic; + struct timespec tmp, *wtm = &timekeeper.wall_to_monotonic; set_normalized_timespec(&tmp, -wtm->tv_sec, -wtm->tv_nsec); - tk->offs_real = timespec_to_ktime(tmp); + timekeeper.offs_real = timespec_to_ktime(tmp); } /* must hold write on timekeeper.lock */ -static void timekeeping_update(struct timekeeper *tk, bool clearntp) +static void timekeeping_update(bool clearntp) { - struct timespec xt; - if (clearntp) { - tk->ntp_error = 0; + timekeeper.ntp_error = 0; ntp_clear(); } - update_rt_offset(tk); - xt = tk_xtime(tk); - update_vsyscall(&xt, &tk->wall_to_monotonic, tk->clock, tk->mult); + update_rt_offset(); + update_vsyscall(&timekeeper.xtime, &timekeeper.wall_to_monotonic, + timekeeper.clock, timekeeper.mult); } @@ -247,26 +206,27 @@ static void timekeeping_update(struct timekeeper *tk, bool clearntp) * update_wall_time(). This is useful before significant clock changes, * as it avoids having to deal with this time offset explicitly. */ -static void timekeeping_forward_now(struct timekeeper *tk) +static void timekeeping_forward_now(void) { cycle_t cycle_now, cycle_delta; struct clocksource *clock; s64 nsec; - clock = tk->clock; + clock = timekeeper.clock; cycle_now = clock->read(clock); cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; clock->cycle_last = cycle_now; - tk->xtime_nsec += cycle_delta * tk->mult; + nsec = clocksource_cyc2ns(cycle_delta, timekeeper.mult, + timekeeper.shift); /* If arch requires, add in gettimeoffset() */ - tk->xtime_nsec += arch_gettimeoffset() << tk->shift; + nsec += arch_gettimeoffset(); - tk_normalize_xtime(tk); + timespec_add_ns(&timekeeper.xtime, nsec); nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift); - timespec_add_ns(&tk->raw_time, nsec); + timespec_add_ns(&timekeeper.raw_time, nsec); } /** @@ -278,15 +238,18 @@ static void timekeeping_forward_now(struct timekeeper *tk) void getnstimeofday(struct timespec *ts) { unsigned long seq; - s64 nsecs = 0; + s64 nsecs; WARN_ON(timekeeping_suspended); do { seq = read_seqbegin(&timekeeper.lock); - ts->tv_sec = timekeeper.xtime_sec; - ts->tv_nsec = timekeeping_get_ns(&timekeeper); + *ts = timekeeper.xtime; + nsecs = timekeeping_get_ns(); + + /* If arch requires, add in gettimeoffset() */ + nsecs += arch_gettimeoffset(); } while (read_seqretry(&timekeeper.lock, seq)); @@ -303,10 +266,13 @@ ktime_t ktime_get(void) do { seq = read_seqbegin(&timekeeper.lock); - secs = timekeeper.xtime_sec + + secs = timekeeper.xtime.tv_sec + timekeeper.wall_to_monotonic.tv_sec; - nsecs = timekeeping_get_ns(&timekeeper) + + nsecs = timekeeper.xtime.tv_nsec + timekeeper.wall_to_monotonic.tv_nsec; + nsecs += timekeeping_get_ns(); + /* If arch requires, add in gettimeoffset() */ + nsecs += arch_gettimeoffset(); } while (read_seqretry(&timekeeper.lock, seq)); /* @@ -329,19 +295,22 @@ void ktime_get_ts(struct timespec *ts) { struct timespec tomono; unsigned int seq; + s64 nsecs; WARN_ON(timekeeping_suspended); do { seq = read_seqbegin(&timekeeper.lock); - ts->tv_sec = timekeeper.xtime_sec; - ts->tv_nsec = timekeeping_get_ns(&timekeeper); + *ts = timekeeper.xtime; tomono = timekeeper.wall_to_monotonic; + nsecs = timekeeping_get_ns(); + /* If arch requires, add in gettimeoffset() */ + nsecs += arch_gettimeoffset(); } while (read_seqretry(&timekeeper.lock, seq)); set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec, - ts->tv_nsec + tomono.tv_nsec); + ts->tv_nsec + tomono.tv_nsec + nsecs); } EXPORT_SYMBOL_GPL(ktime_get_ts); @@ -364,14 +333,20 @@ void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real) WARN_ON_ONCE(timekeeping_suspended); do { + u32 arch_offset; + seq = read_seqbegin(&timekeeper.lock); *ts_raw = timekeeper.raw_time; - ts_real->tv_sec = timekeeper.xtime_sec; - ts_real->tv_nsec = 0; + *ts_real = timekeeper.xtime; + + nsecs_raw = timekeeping_get_ns_raw(); + nsecs_real = timekeeping_get_ns(); - nsecs_raw = timekeeping_get_ns_raw(&timekeeper); - nsecs_real = timekeeping_get_ns(&timekeeper); + /* If arch requires, add in gettimeoffset() */ + arch_offset = arch_gettimeoffset(); + nsecs_raw += arch_offset; + nsecs_real += arch_offset; } while (read_seqretry(&timekeeper.lock, seq)); @@ -406,7 +381,7 @@ EXPORT_SYMBOL(do_gettimeofday); */ int do_settimeofday(const struct timespec *tv) { - struct timespec ts_delta, xt; + struct timespec ts_delta; unsigned long flags; if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) @@ -414,18 +389,15 @@ int do_settimeofday(const struct timespec *tv) write_seqlock_irqsave(&timekeeper.lock, flags); - timekeeping_forward_now(&timekeeper); - - xt = tk_xtime(&timekeeper); - ts_delta.tv_sec = tv->tv_sec - xt.tv_sec; - ts_delta.tv_nsec = tv->tv_nsec - xt.tv_nsec; + timekeeping_forward_now(); + ts_delta.tv_sec = tv->tv_sec - timekeeper.xtime.tv_sec; + ts_delta.tv_nsec = tv->tv_nsec - timekeeper.xtime.tv_nsec; timekeeper.wall_to_monotonic = timespec_sub(timekeeper.wall_to_monotonic, ts_delta); - tk_set_xtime(&timekeeper, tv); - - timekeeping_update(&timekeeper, true); + timekeeper.xtime = *tv; + timekeeping_update(true); write_sequnlock_irqrestore(&timekeeper.lock, flags); @@ -452,14 +424,13 @@ int timekeeping_inject_offset(struct timespec *ts) write_seqlock_irqsave(&timekeeper.lock, flags); - timekeeping_forward_now(&timekeeper); - + timekeeping_forward_now(); - tk_xtime_add(&timekeeper, ts); + timekeeper.xtime = timespec_add(timekeeper.xtime, *ts); timekeeper.wall_to_monotonic = timespec_sub(timekeeper.wall_to_monotonic, *ts); - timekeeping_update(&timekeeper, true); + timekeeping_update(true); write_sequnlock_irqrestore(&timekeeper.lock, flags); @@ -484,14 +455,14 @@ static int change_clocksource(void *data) write_seqlock_irqsave(&timekeeper.lock, flags); - timekeeping_forward_now(&timekeeper); + timekeeping_forward_now(); if (!new->enable || new->enable(new) == 0) { old = timekeeper.clock; - tk_setup_internals(&timekeeper, new); + timekeeper_setup_internals(new); if (old->disable) old->disable(old); } - timekeeping_update(&timekeeper, true); + timekeeping_update(true); write_sequnlock_irqrestore(&timekeeper.lock, flags); @@ -541,7 +512,7 @@ void getrawmonotonic(struct timespec *ts) do { seq = read_seqbegin(&timekeeper.lock); - nsecs = timekeeping_get_ns_raw(&timekeeper); + nsecs = timekeeping_get_ns_raw(); *ts = timekeeper.raw_time; } while (read_seqretry(&timekeeper.lock, seq)); @@ -576,7 +547,6 @@ u64 timekeeping_max_deferment(void) { unsigned long seq; u64 ret; - do { seq = read_seqbegin(&timekeeper.lock); @@ -637,17 +607,19 @@ void __init timekeeping_init(void) clock = clocksource_default_clock(); if (clock->enable) clock->enable(clock); - tk_setup_internals(&timekeeper, clock); + timekeeper_setup_internals(clock); - tk_set_xtime(&timekeeper, &now); + timekeeper.xtime.tv_sec = now.tv_sec; + timekeeper.xtime.tv_nsec = now.tv_nsec; timekeeper.raw_time.tv_sec = 0; timekeeper.raw_time.tv_nsec = 0; - if (boot.tv_sec == 0 && boot.tv_nsec == 0) - boot = tk_xtime(&timekeeper); - + if (boot.tv_sec == 0 && boot.tv_nsec == 0) { + boot.tv_sec = timekeeper.xtime.tv_sec; + boot.tv_nsec = timekeeper.xtime.tv_nsec; + } set_normalized_timespec(&timekeeper.wall_to_monotonic, -boot.tv_sec, -boot.tv_nsec); - update_rt_offset(&timekeeper); + update_rt_offset(); timekeeper.total_sleep_time.tv_sec = 0; timekeeper.total_sleep_time.tv_nsec = 0; write_sequnlock_irqrestore(&timekeeper.lock, flags); @@ -669,8 +641,7 @@ static void update_sleep_time(struct timespec t) * Takes a timespec offset measuring a suspend interval and properly * adds the sleep offset to the timekeeping variables. */ -static void __timekeeping_inject_sleeptime(struct timekeeper *tk, - struct timespec *delta) +static void __timekeeping_inject_sleeptime(struct timespec *delta) { if (!timespec_valid(delta)) { printk(KERN_WARNING "__timekeeping_inject_sleeptime: Invalid " @@ -678,9 +649,10 @@ static void __timekeeping_inject_sleeptime(struct timekeeper *tk, return; } - tk_xtime_add(tk, delta); - tk->wall_to_monotonic = timespec_sub(tk->wall_to_monotonic, *delta); - update_sleep_time(timespec_add(tk->total_sleep_time, *delta)); + timekeeper.xtime = timespec_add(timekeeper.xtime, *delta); + timekeeper.wall_to_monotonic = + timespec_sub(timekeeper.wall_to_monotonic, *delta); + update_sleep_time(timespec_add(timekeeper.total_sleep_time, *delta)); } @@ -706,11 +678,11 @@ void timekeeping_inject_sleeptime(struct timespec *delta) write_seqlock_irqsave(&timekeeper.lock, flags); - timekeeping_forward_now(&timekeeper); + timekeeping_forward_now(); - __timekeeping_inject_sleeptime(&timekeeper, delta); + __timekeeping_inject_sleeptime(delta); - timekeeping_update(&timekeeper, true); + timekeeping_update(true); write_sequnlock_irqrestore(&timekeeper.lock, flags); @@ -739,13 +711,12 @@ static void timekeeping_resume(void) if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) { ts = timespec_sub(ts, timekeeping_suspend_time); - __timekeeping_inject_sleeptime(&timekeeper, &ts); + __timekeeping_inject_sleeptime(&ts); } /* re-base the last cycle value */ timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock); timekeeper.ntp_error = 0; timekeeping_suspended = 0; - timekeeping_update(&timekeeper, false); write_sequnlock_irqrestore(&timekeeper.lock, flags); touch_softlockup_watchdog(); @@ -765,7 +736,7 @@ static int timekeeping_suspend(void) read_persistent_clock(&timekeeping_suspend_time); write_seqlock_irqsave(&timekeeper.lock, flags); - timekeeping_forward_now(&timekeeper); + timekeeping_forward_now(); timekeeping_suspended = 1; /* @@ -774,7 +745,7 @@ static int timekeeping_suspend(void) * try to compensate so the difference in system time * and persistent_clock time stays close to constant. */ - delta = timespec_sub(tk_xtime(&timekeeper), timekeeping_suspend_time); + delta = timespec_sub(timekeeper.xtime, timekeeping_suspend_time); delta_delta = timespec_sub(delta, old_delta); if (abs(delta_delta.tv_sec) >= 2) { /* @@ -813,8 +784,7 @@ device_initcall(timekeeping_init_ops); * If the error is already larger, we look ahead even further * to compensate for late or lost adjustments. */ -static __always_inline int timekeeping_bigadjust(struct timekeeper *tk, - s64 error, s64 *interval, +static __always_inline int timekeeping_bigadjust(s64 error, s64 *interval, s64 *offset) { s64 tick_error, i; @@ -830,7 +800,7 @@ static __always_inline int timekeeping_bigadjust(struct timekeeper *tk, * here. This is tuned so that an error of about 1 msec is adjusted * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks). */ - error2 = tk->ntp_error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ); + error2 = timekeeper.ntp_error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ); error2 = abs(error2); for (look_ahead = 0; error2 > 0; look_ahead++) error2 >>= 2; @@ -839,8 +809,8 @@ static __always_inline int timekeeping_bigadjust(struct timekeeper *tk, * Now calculate the error in (1 << look_ahead) ticks, but first * remove the single look ahead already included in the error. */ - tick_error = ntp_tick_length() >> (tk->ntp_error_shift + 1); - tick_error -= tk->xtime_interval >> 1; + tick_error = ntp_tick_length() >> (timekeeper.ntp_error_shift + 1); + tick_error -= timekeeper.xtime_interval >> 1; error = ((error - tick_error) >> look_ahead) + tick_error; /* Finally calculate the adjustment shift value. */ @@ -865,9 +835,9 @@ static __always_inline int timekeeping_bigadjust(struct timekeeper *tk, * this is optimized for the most common adjustments of -1,0,1, * for other values we can do a bit more work. */ -static void timekeeping_adjust(struct timekeeper *tk, s64 offset) +static void timekeeping_adjust(s64 offset) { - s64 error, interval = tk->cycle_interval; + s64 error, interval = timekeeper.cycle_interval; int adj; /* @@ -883,7 +853,7 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset) * * Note: It does not "save" on aggravation when reading the code. */ - error = tk->ntp_error >> (tk->ntp_error_shift - 1); + error = timekeeper.ntp_error >> (timekeeper.ntp_error_shift - 1); if (error > interval) { /* * We now divide error by 4(via shift), which checks if @@ -905,8 +875,7 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset) if (likely(error <= interval)) adj = 1; else - adj = timekeeping_bigadjust(tk, error, &interval, - &offset); + adj = timekeeping_bigadjust(error, &interval, &offset); } else if (error < -interval) { /* See comment above, this is just switched for the negative */ error >>= 2; @@ -915,17 +884,18 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset) interval = -interval; offset = -offset; } else - adj = timekeeping_bigadjust(tk, error, &interval, - &offset); - } else + adj = timekeeping_bigadjust(error, &interval, &offset); + } else /* No adjustment needed */ return; - if (unlikely(tk->clock->maxadj && - (tk->mult + adj > tk->clock->mult + tk->clock->maxadj))) { + if (unlikely(timekeeper.clock->maxadj && + (timekeeper.mult + adj > + timekeeper.clock->mult + timekeeper.clock->maxadj))) { printk_once(KERN_WARNING "Adjusting %s more than 11%% (%ld vs %ld)\n", - tk->clock->name, (long)tk->mult + adj, - (long)tk->clock->mult + tk->clock->maxadj); + timekeeper.clock->name, (long)timekeeper.mult + adj, + (long)timekeeper.clock->mult + + timekeeper.clock->maxadj); } /* * So the following can be confusing. @@ -976,60 +946,11 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset) * * XXX - TODO: Doc ntp_error calculation. */ - tk->mult += adj; - tk->xtime_interval += interval; - tk->xtime_nsec -= offset; - tk->ntp_error -= (interval - offset) << tk->ntp_error_shift; - - /* - * It may be possible that when we entered this function, xtime_nsec - * was very small. Further, if we're slightly speeding the clocksource - * in the code above, its possible the required corrective factor to - * xtime_nsec could cause it to underflow. - * - * Now, since we already accumulated the second, cannot simply roll - * the accumulated second back, since the NTP subsystem has been - * notified via second_overflow. So instead we push xtime_nsec forward - * by the amount we underflowed, and add that amount into the error. - * - * We'll correct this error next time through this function, when - * xtime_nsec is not as small. - */ - if (unlikely((s64)tk->xtime_nsec < 0)) { - s64 neg = -(s64)tk->xtime_nsec; - tk->xtime_nsec = 0; - tk->ntp_error += neg << tk->ntp_error_shift; - } - -} - - -/** - * accumulate_nsecs_to_secs - Accumulates nsecs into secs - * - * Helper function that accumulates a the nsecs greater then a second - * from the xtime_nsec field to the xtime_secs field. - * It also calls into the NTP code to handle leapsecond processing. - * - */ -static inline void accumulate_nsecs_to_secs(struct timekeeper *tk) -{ - u64 nsecps = (u64)NSEC_PER_SEC << tk->shift; - - while (tk->xtime_nsec >= nsecps) { - int leap; - - tk->xtime_nsec -= nsecps; - tk->xtime_sec++; - - /* Figure out if its a leap sec and apply if needed */ - leap = second_overflow(tk->xtime_sec); - tk->xtime_sec += leap; - tk->wall_to_monotonic.tv_sec -= leap; - if (leap) - clock_was_set_delayed(); - - } + timekeeper.mult += adj; + timekeeper.xtime_interval += interval; + timekeeper.xtime_nsec -= offset; + timekeeper.ntp_error -= (interval - offset) << + timekeeper.ntp_error_shift; } @@ -1042,36 +963,46 @@ static inline void accumulate_nsecs_to_secs(struct timekeeper *tk) * * Returns the unconsumed cycles. */ -static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset, - u32 shift) +static cycle_t logarithmic_accumulation(cycle_t offset, int shift) { + u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift; u64 raw_nsecs; - /* If the offset is smaller then a shifted interval, do nothing */ - if (offset < tk->cycle_interval<cycle_interval << shift; - tk->clock->cycle_last += tk->cycle_interval << shift; + offset -= timekeeper.cycle_interval << shift; + timekeeper.clock->cycle_last += timekeeper.cycle_interval << shift; - tk->xtime_nsec += tk->xtime_interval << shift; - accumulate_nsecs_to_secs(tk); + timekeeper.xtime_nsec += timekeeper.xtime_interval << shift; + while (timekeeper.xtime_nsec >= nsecps) { + int leap; + timekeeper.xtime_nsec -= nsecps; + timekeeper.xtime.tv_sec++; + leap = second_overflow(timekeeper.xtime.tv_sec); + timekeeper.xtime.tv_sec += leap; + timekeeper.wall_to_monotonic.tv_sec -= leap; + if (leap) + clock_was_set_delayed(); + } /* Accumulate raw time */ - raw_nsecs = tk->raw_interval << shift; - raw_nsecs += tk->raw_time.tv_nsec; + raw_nsecs = timekeeper.raw_interval << shift; + raw_nsecs += timekeeper.raw_time.tv_nsec; if (raw_nsecs >= NSEC_PER_SEC) { u64 raw_secs = raw_nsecs; raw_nsecs = do_div(raw_secs, NSEC_PER_SEC); - tk->raw_time.tv_sec += raw_secs; + timekeeper.raw_time.tv_sec += raw_secs; } - tk->raw_time.tv_nsec = raw_nsecs; + timekeeper.raw_time.tv_nsec = raw_nsecs; /* Accumulate error between NTP and clock interval */ - tk->ntp_error += ntp_tick_length() << shift; - tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) << - (tk->ntp_error_shift + shift); + timekeeper.ntp_error += ntp_tick_length() << shift; + timekeeper.ntp_error -= + (timekeeper.xtime_interval + timekeeper.xtime_remainder) << + (timekeeper.ntp_error_shift + shift); return offset; } @@ -1087,7 +1018,6 @@ static void update_wall_time(void) cycle_t offset; int shift = 0, maxshift; unsigned long flags; - s64 remainder; write_seqlock_irqsave(&timekeeper.lock, flags); @@ -1102,6 +1032,8 @@ static void update_wall_time(void) #else offset = (clock->read(clock) - clock->cycle_last) & clock->mask; #endif + timekeeper.xtime_nsec = (s64)timekeeper.xtime.tv_nsec << + timekeeper.shift; /* * With NO_HZ we may have to accumulate many cycle_intervals @@ -1117,36 +1049,64 @@ static void update_wall_time(void) maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1; shift = min(shift, maxshift); while (offset >= timekeeper.cycle_interval) { - offset = logarithmic_accumulation(&timekeeper, offset, shift); + offset = logarithmic_accumulation(offset, shift); if(offset < timekeeper.cycle_interval<> + timekeeper.shift) + 1; + timekeeper.xtime_nsec -= (s64)timekeeper.xtime.tv_nsec << + timekeeper.shift; + timekeeper.ntp_error += timekeeper.xtime_nsec << + timekeeper.ntp_error_shift; /* * Finally, make sure that after the rounding - * xtime_nsec isn't larger than NSEC_PER_SEC + * xtime.tv_nsec isn't larger than NSEC_PER_SEC */ - accumulate_nsecs_to_secs(&timekeeper); + if (unlikely(timekeeper.xtime.tv_nsec >= NSEC_PER_SEC)) { + int leap; + timekeeper.xtime.tv_nsec -= NSEC_PER_SEC; + timekeeper.xtime.tv_sec++; + leap = second_overflow(timekeeper.xtime.tv_sec); + timekeeper.xtime.tv_sec += leap; + timekeeper.wall_to_monotonic.tv_sec -= leap; + if (leap) + clock_was_set_delayed(); + } - timekeeping_update(&timekeeper, false); + timekeeping_update(false); out: write_sequnlock_irqrestore(&timekeeper.lock, flags); @@ -1191,20 +1151,21 @@ void get_monotonic_boottime(struct timespec *ts) { struct timespec tomono, sleep; unsigned int seq; + s64 nsecs; WARN_ON(timekeeping_suspended); do { seq = read_seqbegin(&timekeeper.lock); - ts->tv_sec = timekeeper.xtime_sec; - ts->tv_nsec = timekeeping_get_ns(&timekeeper); + *ts = timekeeper.xtime; tomono = timekeeper.wall_to_monotonic; sleep = timekeeper.total_sleep_time; + nsecs = timekeeping_get_ns(); } while (read_seqretry(&timekeeper.lock, seq)); set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec + sleep.tv_sec, - ts->tv_nsec + tomono.tv_nsec + sleep.tv_nsec); + ts->tv_nsec + tomono.tv_nsec + sleep.tv_nsec + nsecs); } EXPORT_SYMBOL_GPL(get_monotonic_boottime); @@ -1237,13 +1198,13 @@ EXPORT_SYMBOL_GPL(monotonic_to_bootbased); unsigned long get_seconds(void) { - return timekeeper.xtime_sec; + return timekeeper.xtime.tv_sec; } EXPORT_SYMBOL(get_seconds); struct timespec __current_kernel_time(void) { - return tk_xtime(&timekeeper); + return timekeeper.xtime; } struct timespec current_kernel_time(void) @@ -1254,7 +1215,7 @@ struct timespec current_kernel_time(void) do { seq = read_seqbegin(&timekeeper.lock); - now = tk_xtime(&timekeeper); + now = timekeeper.xtime; } while (read_seqretry(&timekeeper.lock, seq)); return now; @@ -1269,7 +1230,7 @@ struct timespec get_monotonic_coarse(void) do { seq = read_seqbegin(&timekeeper.lock); - now = tk_xtime(&timekeeper); + now = timekeeper.xtime; mono = timekeeper.wall_to_monotonic; } while (read_seqretry(&timekeeper.lock, seq)); @@ -1304,7 +1265,7 @@ void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim, do { seq = read_seqbegin(&timekeeper.lock); - *xtim = tk_xtime(&timekeeper); + *xtim = timekeeper.xtime; *wtom = timekeeper.wall_to_monotonic; *sleep = timekeeper.total_sleep_time; } while (read_seqretry(&timekeeper.lock, seq)); @@ -1328,8 +1289,11 @@ ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot) do { seq = read_seqbegin(&timekeeper.lock); - secs = timekeeper.xtime_sec; - nsecs = timekeeping_get_ns(&timekeeper); + secs = timekeeper.xtime.tv_sec; + nsecs = timekeeper.xtime.tv_nsec; + nsecs += timekeeping_get_ns(); + /* If arch requires, add in gettimeoffset() */ + nsecs += arch_gettimeoffset(); *offs_real = timekeeper.offs_real; *offs_boot = timekeeper.offs_boot; diff --git a/trunk/kernel/time/timer_list.c b/trunk/kernel/time/timer_list.c index af5a7e9f164b..3258455549f4 100644 --- a/trunk/kernel/time/timer_list.c +++ b/trunk/kernel/time/timer_list.c @@ -167,7 +167,7 @@ static void print_cpu(struct seq_file *m, int cpu, u64 now) { struct tick_sched *ts = tick_get_tick_sched(cpu); P(nohz_mode); - P_ns(last_tick); + P_ns(idle_tick); P(tick_stopped); P(idle_jiffies); P(idle_calls); @@ -259,7 +259,7 @@ static int timer_list_show(struct seq_file *m, void *v) u64 now = ktime_to_ns(ktime_get()); int cpu; - SEQ_printf(m, "Timer List Version: v0.7\n"); + SEQ_printf(m, "Timer List Version: v0.6\n"); SEQ_printf(m, "HRTIMER_MAX_CLOCK_BASES: %d\n", HRTIMER_MAX_CLOCK_BASES); SEQ_printf(m, "now at %Ld nsecs\n", (unsigned long long)now); diff --git a/trunk/kernel/timer.c b/trunk/kernel/timer.c index a61c09374eba..6ec7e7e0db43 100644 --- a/trunk/kernel/timer.c +++ b/trunk/kernel/timer.c @@ -77,7 +77,6 @@ struct tvec_base { struct timer_list *running_timer; unsigned long timer_jiffies; unsigned long next_timer; - unsigned long active_timers; struct tvec_root tv1; struct tvec tv2; struct tvec tv3; @@ -331,8 +330,7 @@ void set_timer_slack(struct timer_list *timer, int slack_hz) } EXPORT_SYMBOL_GPL(set_timer_slack); -static void -__internal_add_timer(struct tvec_base *base, struct timer_list *timer) +static void internal_add_timer(struct tvec_base *base, struct timer_list *timer) { unsigned long expires = timer->expires; unsigned long idx = expires - base->timer_jiffies; @@ -374,19 +372,6 @@ __internal_add_timer(struct tvec_base *base, struct timer_list *timer) list_add_tail(&timer->entry, vec); } -static void internal_add_timer(struct tvec_base *base, struct timer_list *timer) -{ - __internal_add_timer(base, timer); - /* - * Update base->active_timers and base->next_timer - */ - if (!tbase_get_deferrable(timer->base)) { - if (time_before(timer->expires, base->next_timer)) - base->next_timer = timer->expires; - base->active_timers++; - } -} - #ifdef CONFIG_TIMER_STATS void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr) { @@ -669,7 +654,8 @@ void init_timer_deferrable_key(struct timer_list *timer, } EXPORT_SYMBOL(init_timer_deferrable_key); -static inline void detach_timer(struct timer_list *timer, bool clear_pending) +static inline void detach_timer(struct timer_list *timer, + int clear_pending) { struct list_head *entry = &timer->entry; @@ -681,29 +667,6 @@ static inline void detach_timer(struct timer_list *timer, bool clear_pending) entry->prev = LIST_POISON2; } -static inline void -detach_expired_timer(struct timer_list *timer, struct tvec_base *base) -{ - detach_timer(timer, true); - if (!tbase_get_deferrable(timer->base)) - timer->base->active_timers--; -} - -static int detach_if_pending(struct timer_list *timer, struct tvec_base *base, - bool clear_pending) -{ - if (!timer_pending(timer)) - return 0; - - detach_timer(timer, clear_pending); - if (!tbase_get_deferrable(timer->base)) { - timer->base->active_timers--; - if (timer->expires == base->next_timer) - base->next_timer = base->timer_jiffies; - } - return 1; -} - /* * We are using hashed locking: holding per_cpu(tvec_bases).lock * means that all timers which are tied to this base via timer->base are @@ -749,9 +712,16 @@ __mod_timer(struct timer_list *timer, unsigned long expires, base = lock_timer_base(timer, &flags); - ret = detach_if_pending(timer, base, false); - if (!ret && pending_only) - goto out_unlock; + if (timer_pending(timer)) { + detach_timer(timer, 0); + if (timer->expires == base->next_timer && + !tbase_get_deferrable(timer->base)) + base->next_timer = base->timer_jiffies; + ret = 1; + } else { + if (pending_only) + goto out_unlock; + } debug_activate(timer, expires); @@ -782,6 +752,9 @@ __mod_timer(struct timer_list *timer, unsigned long expires, } timer->expires = expires; + if (time_before(timer->expires, base->next_timer) && + !tbase_get_deferrable(timer->base)) + base->next_timer = timer->expires; internal_add_timer(base, timer); out_unlock: @@ -947,6 +920,9 @@ void add_timer_on(struct timer_list *timer, int cpu) spin_lock_irqsave(&base->lock, flags); timer_set_base(timer, base); debug_activate(timer, timer->expires); + if (time_before(timer->expires, base->next_timer) && + !tbase_get_deferrable(timer->base)) + base->next_timer = timer->expires; internal_add_timer(base, timer); /* * Check whether the other CPU is idle and needs to be @@ -983,7 +959,13 @@ int del_timer(struct timer_list *timer) timer_stats_timer_clear_start_info(timer); if (timer_pending(timer)) { base = lock_timer_base(timer, &flags); - ret = detach_if_pending(timer, base, true); + if (timer_pending(timer)) { + detach_timer(timer, 1); + if (timer->expires == base->next_timer && + !tbase_get_deferrable(timer->base)) + base->next_timer = base->timer_jiffies; + ret = 1; + } spin_unlock_irqrestore(&base->lock, flags); } @@ -1008,10 +990,19 @@ int try_to_del_timer_sync(struct timer_list *timer) base = lock_timer_base(timer, &flags); - if (base->running_timer != timer) { - timer_stats_timer_clear_start_info(timer); - ret = detach_if_pending(timer, base, true); + if (base->running_timer == timer) + goto out; + + timer_stats_timer_clear_start_info(timer); + ret = 0; + if (timer_pending(timer)) { + detach_timer(timer, 1); + if (timer->expires == base->next_timer && + !tbase_get_deferrable(timer->base)) + base->next_timer = base->timer_jiffies; + ret = 1; } +out: spin_unlock_irqrestore(&base->lock, flags); return ret; @@ -1098,8 +1089,7 @@ static int cascade(struct tvec_base *base, struct tvec *tv, int index) */ list_for_each_entry_safe(timer, tmp, &tv_list, entry) { BUG_ON(tbase_get_base(timer->base) != base); - /* No accounting, while moving them */ - __internal_add_timer(base, timer); + internal_add_timer(base, timer); } return index; @@ -1188,7 +1178,7 @@ static inline void __run_timers(struct tvec_base *base) timer_stats_account_timer(timer); base->running_timer = timer; - detach_expired_timer(timer, base); + detach_timer(timer, 1); spin_unlock_irq(&base->lock); call_timer_fn(timer, fn, data); @@ -1326,21 +1316,18 @@ static unsigned long cmp_next_hrtimer_event(unsigned long now, unsigned long get_next_timer_interrupt(unsigned long now) { struct tvec_base *base = __this_cpu_read(tvec_bases); - unsigned long expires = now + NEXT_TIMER_MAX_DELTA; + unsigned long expires; /* * Pretend that there is no timer pending if the cpu is offline. * Possible pending timers will be migrated later to an active cpu. */ if (cpu_is_offline(smp_processor_id())) - return expires; - + return now + NEXT_TIMER_MAX_DELTA; spin_lock(&base->lock); - if (base->active_timers) { - if (time_before_eq(base->next_timer, base->timer_jiffies)) - base->next_timer = __next_timer_interrupt(base); - expires = base->next_timer; - } + if (time_before_eq(base->next_timer, base->timer_jiffies)) + base->next_timer = __next_timer_interrupt(base); + expires = base->next_timer; spin_unlock(&base->lock); if (time_before_eq(expires, now)) @@ -1717,7 +1704,6 @@ static int __cpuinit init_timers_cpu(int cpu) base->timer_jiffies = jiffies; base->next_timer = base->timer_jiffies; - base->active_timers = 0; return 0; } @@ -1728,9 +1714,11 @@ static void migrate_timer_list(struct tvec_base *new_base, struct list_head *hea while (!list_empty(head)) { timer = list_first_entry(head, struct timer_list, entry); - /* We ignore the accounting on the dying cpu */ - detach_timer(timer, false); + detach_timer(timer, 0); timer_set_base(timer, new_base); + if (time_before(timer->expires, new_base->next_timer) && + !tbase_get_deferrable(timer->base)) + new_base->next_timer = timer->expires; internal_add_timer(new_base, timer); } } diff --git a/trunk/kernel/trace/ftrace.c b/trunk/kernel/trace/ftrace.c index b4f20fba09fc..a008663d86c8 100644 --- a/trunk/kernel/trace/ftrace.c +++ b/trunk/kernel/trace/ftrace.c @@ -312,7 +312,7 @@ static int remove_ftrace_list_ops(struct ftrace_ops **list, static int __register_ftrace_function(struct ftrace_ops *ops) { - if (unlikely(ftrace_disabled)) + if (ftrace_disabled) return -ENODEV; if (FTRACE_WARN_ON(ops == &global_ops)) @@ -4299,12 +4299,16 @@ int register_ftrace_function(struct ftrace_ops *ops) mutex_lock(&ftrace_lock); + if (unlikely(ftrace_disabled)) + goto out_unlock; + ret = __register_ftrace_function(ops); if (!ret) ret = ftrace_startup(ops, 0); - mutex_unlock(&ftrace_lock); + out_unlock: + mutex_unlock(&ftrace_lock); return ret; } EXPORT_SYMBOL_GPL(register_ftrace_function); diff --git a/trunk/kernel/trace/ring_buffer.c b/trunk/kernel/trace/ring_buffer.c index 49491fa7daa2..f765465bffe4 100644 --- a/trunk/kernel/trace/ring_buffer.c +++ b/trunk/kernel/trace/ring_buffer.c @@ -3239,10 +3239,6 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) if (cpu_buffer->commit_page == cpu_buffer->reader_page) goto out; - /* Don't bother swapping if the ring buffer is empty */ - if (rb_num_of_entries(cpu_buffer) == 0) - goto out; - /* * Reset the reader page to size zero. */ diff --git a/trunk/kernel/trace/trace.c b/trunk/kernel/trace/trace.c index a120f98c4112..a7fa0702be1c 100644 --- a/trunk/kernel/trace/trace.c +++ b/trunk/kernel/trace/trace.c @@ -830,8 +830,6 @@ int register_tracer(struct tracer *type) current_trace = saved_tracer; if (ret) { printk(KERN_CONT "FAILED!\n"); - /* Add the warning after printing 'FAILED' */ - WARN_ON(1); goto out; } /* Only reset on passing, to avoid touching corrupted buffers */ @@ -1710,11 +1708,9 @@ EXPORT_SYMBOL_GPL(trace_vprintk); static void trace_iterator_increment(struct trace_iterator *iter) { - struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu); - iter->idx++; - if (buf_iter) - ring_buffer_read(buf_iter, NULL); + if (iter->buffer_iter[iter->cpu]) + ring_buffer_read(iter->buffer_iter[iter->cpu], NULL); } static struct trace_entry * @@ -1722,7 +1718,7 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts, unsigned long *lost_events) { struct ring_buffer_event *event; - struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu); + struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu]; if (buf_iter) event = ring_buffer_iter_peek(buf_iter, ts); @@ -1860,10 +1856,10 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu) tr->data[cpu]->skipped_entries = 0; - buf_iter = trace_buffer_iter(iter, cpu); - if (!buf_iter) + if (!iter->buffer_iter[cpu]) return; + buf_iter = iter->buffer_iter[cpu]; ring_buffer_iter_reset(buf_iter); /* @@ -2209,15 +2205,13 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter) int trace_empty(struct trace_iterator *iter) { - struct ring_buffer_iter *buf_iter; int cpu; /* If we are looking at one CPU buffer, only check that one */ if (iter->cpu_file != TRACE_PIPE_ALL_CPU) { cpu = iter->cpu_file; - buf_iter = trace_buffer_iter(iter, cpu); - if (buf_iter) { - if (!ring_buffer_iter_empty(buf_iter)) + if (iter->buffer_iter[cpu]) { + if (!ring_buffer_iter_empty(iter->buffer_iter[cpu])) return 0; } else { if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu)) @@ -2227,9 +2221,8 @@ int trace_empty(struct trace_iterator *iter) } for_each_tracing_cpu(cpu) { - buf_iter = trace_buffer_iter(iter, cpu); - if (buf_iter) { - if (!ring_buffer_iter_empty(buf_iter)) + if (iter->buffer_iter[cpu]) { + if (!ring_buffer_iter_empty(iter->buffer_iter[cpu])) return 0; } else { if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu)) @@ -2388,11 +2381,6 @@ __tracing_open(struct inode *inode, struct file *file) if (!iter) return ERR_PTR(-ENOMEM); - iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(), - GFP_KERNEL); - if (!iter->buffer_iter) - goto release; - /* * We make a copy of the current tracer to avoid concurrent * changes on it while we are reading. @@ -2453,8 +2441,6 @@ __tracing_open(struct inode *inode, struct file *file) fail: mutex_unlock(&trace_types_lock); kfree(iter->trace); - kfree(iter->buffer_iter); -release: seq_release_private(inode, file); return ERR_PTR(-ENOMEM); } @@ -2495,7 +2481,6 @@ static int tracing_release(struct inode *inode, struct file *file) mutex_destroy(&iter->mutex); free_cpumask_var(iter->started); kfree(iter->trace); - kfree(iter->buffer_iter); seq_release_private(inode, file); return 0; } diff --git a/trunk/kernel/trace/trace.h b/trunk/kernel/trace/trace.h index 55e1f7f0db12..5aec220d2de0 100644 --- a/trunk/kernel/trace/trace.h +++ b/trunk/kernel/trace/trace.h @@ -317,14 +317,6 @@ struct tracer { #define TRACE_PIPE_ALL_CPU -1 -static inline struct ring_buffer_iter * -trace_buffer_iter(struct trace_iterator *iter, int cpu) -{ - if (iter->buffer_iter && iter->buffer_iter[cpu]) - return iter->buffer_iter[cpu]; - return NULL; -} - int tracer_init(struct tracer *t, struct trace_array *tr); int tracing_is_enabled(void); void trace_wake_up(void); diff --git a/trunk/kernel/trace/trace_functions_graph.c b/trunk/kernel/trace/trace_functions_graph.c index ce27c8ba8d31..a7d2a4c653d8 100644 --- a/trunk/kernel/trace/trace_functions_graph.c +++ b/trunk/kernel/trace/trace_functions_graph.c @@ -538,7 +538,7 @@ get_return_for_leaf(struct trace_iterator *iter, next = &data->ret; } else { - ring_iter = trace_buffer_iter(iter, iter->cpu); + ring_iter = iter->buffer_iter[iter->cpu]; /* First peek to compare current entry and the next one */ if (ring_iter) diff --git a/trunk/kernel/trace/trace_output.c b/trunk/kernel/trace/trace_output.c index 123b189c732c..df611a0e76c5 100644 --- a/trunk/kernel/trace/trace_output.c +++ b/trunk/kernel/trace/trace_output.c @@ -1325,4 +1325,4 @@ __init static int init_events(void) return 0; } -early_initcall(init_events); +device_initcall(init_events); diff --git a/trunk/lib/list_debug.c b/trunk/lib/list_debug.c index c24c2f7e296f..23a5e031cd8b 100644 --- a/trunk/lib/list_debug.c +++ b/trunk/lib/list_debug.c @@ -87,10 +87,12 @@ void __list_add_rcu(struct list_head *new, struct list_head *prev, struct list_head *next) { WARN(next->prev != prev, - "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n", + "list_add_rcu corruption. next->prev should be " + "prev (%p), but was %p. (next=%p).\n", prev, next->prev, next); WARN(prev->next != next, - "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n", + "list_add_rcu corruption. prev->next should be " + "next (%p), but was %p. (prev=%p).\n", next, prev->next, prev); new->next = next; new->prev = prev; diff --git a/trunk/mm/bootmem.c b/trunk/mm/bootmem.c index bcb63ac48cc5..73096630cb35 100644 --- a/trunk/mm/bootmem.c +++ b/trunk/mm/bootmem.c @@ -710,10 +710,6 @@ void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat, if (ptr) return ptr; - /* do not panic in alloc_bootmem_bdata() */ - if (limit && goal + size > limit) - limit = 0; - ptr = alloc_bootmem_bdata(pgdat->bdata, size, align, goal, limit); if (ptr) return ptr; diff --git a/trunk/mm/memory-failure.c b/trunk/mm/memory-failure.c index de4ce7058450..ab1e7145e290 100644 --- a/trunk/mm/memory-failure.c +++ b/trunk/mm/memory-failure.c @@ -345,14 +345,14 @@ static void add_to_kill(struct task_struct *tsk, struct page *p, * Also when FAIL is set do a force kill because something went * wrong earlier. */ -static void kill_procs(struct list_head *to_kill, int forcekill, int trapno, +static void kill_procs(struct list_head *to_kill, int doit, int trapno, int fail, struct page *page, unsigned long pfn, int flags) { struct to_kill *tk, *next; list_for_each_entry_safe (tk, next, to_kill, nd) { - if (forcekill) { + if (doit) { /* * In case something went wrong with munmapping * make sure the process doesn't catch the @@ -858,7 +858,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, struct address_space *mapping; LIST_HEAD(tokill); int ret; - int kill = 1, forcekill; + int kill = 1; struct page *hpage = compound_head(p); struct page *ppage; @@ -888,7 +888,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, * be called inside page lock (it's recommended but not enforced). */ mapping = page_mapping(hpage); - if (!(flags & MF_MUST_KILL) && !PageDirty(hpage) && mapping && + if (!PageDirty(hpage) && mapping && mapping_cap_writeback_dirty(mapping)) { if (page_mkclean(hpage)) { SetPageDirty(hpage); @@ -965,14 +965,12 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, * Now that the dirty bit has been propagated to the * struct page and all unmaps done we can decide if * killing is needed or not. Only kill when the page - * was dirty or the process is not restartable, - * otherwise the tokill list is merely + * was dirty, otherwise the tokill list is merely * freed. When there was a problem unmapping earlier * use a more force-full uncatchable kill to prevent * any accesses to the poisoned memory. */ - forcekill = PageDirty(ppage) || (flags & MF_MUST_KILL); - kill_procs(&tokill, forcekill, trapno, + kill_procs(&tokill, !!PageDirty(ppage), trapno, ret != SWAP_SUCCESS, p, pfn, flags); return ret; diff --git a/trunk/mm/page_alloc.c b/trunk/mm/page_alloc.c index 4a4f9219683f..44030096da63 100644 --- a/trunk/mm/page_alloc.c +++ b/trunk/mm/page_alloc.c @@ -5635,12 +5635,7 @@ static struct page * __alloc_contig_migrate_alloc(struct page *page, unsigned long private, int **resultp) { - gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE; - - if (PageHighMem(page)) - gfp_mask |= __GFP_HIGHMEM; - - return alloc_page(gfp_mask); + return alloc_page(GFP_HIGHUSER_MOVABLE); } /* [start, end) must belong to a single zone. */ diff --git a/trunk/mm/shmem.c b/trunk/mm/shmem.c index c15b998e5a86..bd106361be4b 100644 --- a/trunk/mm/shmem.c +++ b/trunk/mm/shmem.c @@ -1877,7 +1877,7 @@ static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) } static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode, - bool excl) + struct nameidata *nd) { return shmem_mknod(dir, dentry, mode | S_IFREG, 0); } diff --git a/trunk/mm/vmscan.c b/trunk/mm/vmscan.c index 66e431060c05..661576324c7f 100644 --- a/trunk/mm/vmscan.c +++ b/trunk/mm/vmscan.c @@ -2688,10 +2688,7 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx) * them before going back to sleep. */ set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold); - - if (!kthread_should_stop()) - schedule(); - + schedule(); set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold); } else { if (remaining) diff --git a/trunk/net/8021q/vlan.c b/trunk/net/8021q/vlan.c index 9096bcb08132..6089f0cf23b4 100644 --- a/trunk/net/8021q/vlan.c +++ b/trunk/net/8021q/vlan.c @@ -403,9 +403,6 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, break; case NETDEV_DOWN: - if (dev->features & NETIF_F_HW_VLAN_FILTER) - vlan_vid_del(dev, 0); - /* Put all VLANs for this dev in the down state too. */ for (i = 0; i < VLAN_N_VID; i++) { vlandev = vlan_group_get_device(grp, i); diff --git a/trunk/net/ax25/af_ax25.c b/trunk/net/ax25/af_ax25.c index 779095ded689..051f7abae66d 100644 --- a/trunk/net/ax25/af_ax25.c +++ b/trunk/net/ax25/af_ax25.c @@ -842,7 +842,6 @@ static int ax25_create(struct net *net, struct socket *sock, int protocol, case AX25_P_NETROM: if (ax25_protocol_is_registered(AX25_P_NETROM)) return -ESOCKTNOSUPPORT; - break; #endif #ifdef CONFIG_ROSE_MODULE case AX25_P_ROSE: diff --git a/trunk/net/batman-adv/bridge_loop_avoidance.c b/trunk/net/batman-adv/bridge_loop_avoidance.c index c5863f499133..8bf97515a77d 100644 --- a/trunk/net/batman-adv/bridge_loop_avoidance.c +++ b/trunk/net/batman-adv/bridge_loop_avoidance.c @@ -1351,7 +1351,6 @@ void bla_free(struct bat_priv *bat_priv) * @bat_priv: the bat priv with all the soft interface information * @skb: the frame to be checked * @vid: the VLAN ID of the frame - * @is_bcast: the packet came in a broadcast packet type. * * bla_rx avoidance checks if: * * we have to race for a claim @@ -1362,8 +1361,7 @@ void bla_free(struct bat_priv *bat_priv) * process the skb. * */ -int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid, - bool is_bcast) +int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid) { struct ethhdr *ethhdr; struct claim search_claim, *claim = NULL; @@ -1382,7 +1380,7 @@ int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid, if (unlikely(atomic_read(&bat_priv->bla_num_requests))) /* don't allow broadcasts while requests are in flight */ - if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) + if (is_multicast_ether_addr(ethhdr->h_dest)) goto handled; memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN); @@ -1408,13 +1406,8 @@ int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid, } /* if it is a broadcast ... */ - if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) { - /* ... drop it. the responsible gateway is in charge. - * - * We need to check is_bcast because with the gateway - * feature, broadcasts (like DHCP requests) may be sent - * using a unicast packet type. - */ + if (is_multicast_ether_addr(ethhdr->h_dest)) { + /* ... drop it. the responsible gateway is in charge. */ goto handled; } else { /* seems the client considers us as its best gateway. diff --git a/trunk/net/batman-adv/bridge_loop_avoidance.h b/trunk/net/batman-adv/bridge_loop_avoidance.h index dc5227b398d4..e39f93acc28f 100644 --- a/trunk/net/batman-adv/bridge_loop_avoidance.h +++ b/trunk/net/batman-adv/bridge_loop_avoidance.h @@ -23,8 +23,7 @@ #define _NET_BATMAN_ADV_BLA_H_ #ifdef CONFIG_BATMAN_ADV_BLA -int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid, - bool is_bcast); +int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid); int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid); int bla_is_backbone_gw(struct sk_buff *skb, struct orig_node *orig_node, int hdr_size); @@ -42,7 +41,7 @@ void bla_free(struct bat_priv *bat_priv); #else /* ifdef CONFIG_BATMAN_ADV_BLA */ static inline int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, - short vid, bool is_bcast) + short vid) { return 0; } diff --git a/trunk/net/batman-adv/soft-interface.c b/trunk/net/batman-adv/soft-interface.c index a0ec0e4ada4c..6e2530b02043 100644 --- a/trunk/net/batman-adv/soft-interface.c +++ b/trunk/net/batman-adv/soft-interface.c @@ -256,11 +256,7 @@ void interface_rx(struct net_device *soft_iface, struct bat_priv *bat_priv = netdev_priv(soft_iface); struct ethhdr *ethhdr; struct vlan_ethhdr *vhdr; - struct batman_header *batadv_header = (struct batman_header *)skb->data; short vid __maybe_unused = -1; - bool is_bcast; - - is_bcast = (batadv_header->packet_type == BAT_BCAST); /* check if enough space is available for pulling, and pull */ if (!pskb_may_pull(skb, hdr_size)) @@ -306,7 +302,7 @@ void interface_rx(struct net_device *soft_iface, /* Let the bridge loop avoidance check the packet. If will * not handle it, we can safely push it up. */ - if (bla_rx(bat_priv, skb, vid, is_bcast)) + if (bla_rx(bat_priv, skb, vid)) goto out; netif_rx(skb); diff --git a/trunk/net/caif/caif_dev.c b/trunk/net/caif/caif_dev.c index 8c83c175b03a..554b31289607 100644 --- a/trunk/net/caif/caif_dev.c +++ b/trunk/net/caif/caif_dev.c @@ -561,9 +561,9 @@ static int __init caif_device_init(void) static void __exit caif_device_exit(void) { + unregister_pernet_subsys(&caif_net_ops); unregister_netdevice_notifier(&caif_device_notifier); dev_remove_pack(&caif_packet_type); - unregister_pernet_subsys(&caif_net_ops); } module_init(caif_device_init); diff --git a/trunk/net/ceph/messenger.c b/trunk/net/ceph/messenger.c index 10255e81be79..b332c3d76059 100644 --- a/trunk/net/ceph/messenger.c +++ b/trunk/net/ceph/messenger.c @@ -1423,7 +1423,7 @@ static int process_connect(struct ceph_connection *con) * dropped messages. */ dout("process_connect got RESET peer seq %u\n", - le32_to_cpu(con->in_reply.connect_seq)); + le32_to_cpu(con->in_connect.connect_seq)); pr_err("%s%lld %s connection reset\n", ENTITY_NAME(con->peer_name), ceph_pr_addr(&con->peer_addr.in_addr)); @@ -1450,10 +1450,10 @@ static int process_connect(struct ceph_connection *con) * If we sent a smaller connect_seq than the peer has, try * again with a larger value. */ - dout("process_connect got RETRY_SESSION my seq %u, peer %u\n", + dout("process_connect got RETRY my seq = %u, peer_seq = %u\n", le32_to_cpu(con->out_connect.connect_seq), - le32_to_cpu(con->in_reply.connect_seq)); - con->connect_seq = le32_to_cpu(con->in_reply.connect_seq); + le32_to_cpu(con->in_connect.connect_seq)); + con->connect_seq = le32_to_cpu(con->in_connect.connect_seq); ceph_con_out_kvec_reset(con); ret = prepare_write_connect(con); if (ret < 0) @@ -1468,9 +1468,9 @@ static int process_connect(struct ceph_connection *con) */ dout("process_connect got RETRY_GLOBAL my %u peer_gseq %u\n", con->peer_global_seq, - le32_to_cpu(con->in_reply.global_seq)); + le32_to_cpu(con->in_connect.global_seq)); get_global_seq(con->msgr, - le32_to_cpu(con->in_reply.global_seq)); + le32_to_cpu(con->in_connect.global_seq)); ceph_con_out_kvec_reset(con); ret = prepare_write_connect(con); if (ret < 0) diff --git a/trunk/net/core/dev.c b/trunk/net/core/dev.c index 1cb0d8a6aa6c..84f01ba81a34 100644 --- a/trunk/net/core/dev.c +++ b/trunk/net/core/dev.c @@ -2444,12 +2444,8 @@ static void skb_update_prio(struct sk_buff *skb) { struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap); - if (!skb->priority && skb->sk && map) { - unsigned int prioidx = skb->sk->sk_cgrp_prioidx; - - if (prioidx < map->priomap_len) - skb->priority = map->priomap[prioidx]; - } + if ((!skb->priority) && (skb->sk) && map) + skb->priority = map->priomap[skb->sk->sk_cgrp_prioidx]; } #else #define skb_update_prio(skb) @@ -6283,8 +6279,7 @@ static struct hlist_head *netdev_create_hash(void) /* Initialize per network namespace state */ static int __net_init netdev_init(struct net *net) { - if (net != &init_net) - INIT_LIST_HEAD(&net->dev_base_head); + INIT_LIST_HEAD(&net->dev_base_head); net->dev_name_head = netdev_create_hash(); if (net->dev_name_head == NULL) diff --git a/trunk/net/core/net_namespace.c b/trunk/net/core/net_namespace.c index 42f1e1c7514f..dddbacb8f28c 100644 --- a/trunk/net/core/net_namespace.c +++ b/trunk/net/core/net_namespace.c @@ -27,9 +27,7 @@ static DEFINE_MUTEX(net_mutex); LIST_HEAD(net_namespace_list); EXPORT_SYMBOL_GPL(net_namespace_list); -struct net init_net = { - .dev_base_head = LIST_HEAD_INIT(init_net.dev_base_head), -}; +struct net init_net; EXPORT_SYMBOL(init_net); #define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */ diff --git a/trunk/net/core/netprio_cgroup.c b/trunk/net/core/netprio_cgroup.c index b2e9caa1ad1a..5b8aa2fae48b 100644 --- a/trunk/net/core/netprio_cgroup.c +++ b/trunk/net/core/netprio_cgroup.c @@ -49,9 +49,8 @@ static int get_prioidx(u32 *prio) return -ENOSPC; } set_bit(prioidx, prioidx_map); - if (atomic_read(&max_prioidx) < prioidx) - atomic_set(&max_prioidx, prioidx); spin_unlock_irqrestore(&prioidx_map_lock, flags); + atomic_set(&max_prioidx, prioidx); *prio = prioidx; return 0; } @@ -65,7 +64,7 @@ static void put_prioidx(u32 idx) spin_unlock_irqrestore(&prioidx_map_lock, flags); } -static int extend_netdev_table(struct net_device *dev, u32 new_len) +static void extend_netdev_table(struct net_device *dev, u32 new_len) { size_t new_size = sizeof(struct netprio_map) + ((sizeof(u32) * new_len)); @@ -77,7 +76,7 @@ static int extend_netdev_table(struct net_device *dev, u32 new_len) if (!new_priomap) { pr_warn("Unable to alloc new priomap!\n"); - return -ENOMEM; + return; } for (i = 0; @@ -90,79 +89,46 @@ static int extend_netdev_table(struct net_device *dev, u32 new_len) rcu_assign_pointer(dev->priomap, new_priomap); if (old_priomap) kfree_rcu(old_priomap, rcu); - return 0; } -static int write_update_netdev_table(struct net_device *dev) +static void update_netdev_tables(void) { - int ret = 0; - u32 max_len; - struct netprio_map *map; - - rtnl_lock(); - max_len = atomic_read(&max_prioidx) + 1; - map = rtnl_dereference(dev->priomap); - if (!map || map->priomap_len < max_len) - ret = extend_netdev_table(dev, max_len); - rtnl_unlock(); - - return ret; -} - -static int update_netdev_tables(void) -{ - int ret = 0; struct net_device *dev; - u32 max_len; + u32 max_len = atomic_read(&max_prioidx) + 1; struct netprio_map *map; rtnl_lock(); - max_len = atomic_read(&max_prioidx) + 1; for_each_netdev(&init_net, dev) { map = rtnl_dereference(dev->priomap); - /* - * don't allocate priomap if we didn't - * change net_prio.ifpriomap (map == NULL), - * this will speed up skb_update_prio. - */ - if (map && map->priomap_len < max_len) { - ret = extend_netdev_table(dev, max_len); - if (ret < 0) - break; - } + if ((!map) || + (map->priomap_len < max_len)) + extend_netdev_table(dev, max_len); } rtnl_unlock(); - return ret; } static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp) { struct cgroup_netprio_state *cs; - int ret = -EINVAL; + int ret; cs = kzalloc(sizeof(*cs), GFP_KERNEL); if (!cs) return ERR_PTR(-ENOMEM); - if (cgrp->parent && cgrp_netprio_state(cgrp->parent)->prioidx) - goto out; + if (cgrp->parent && cgrp_netprio_state(cgrp->parent)->prioidx) { + kfree(cs); + return ERR_PTR(-EINVAL); + } ret = get_prioidx(&cs->prioidx); - if (ret < 0) { + if (ret != 0) { pr_warn("No space in priority index array\n"); - goto out; - } - - ret = update_netdev_tables(); - if (ret < 0) { - put_prioidx(cs->prioidx); - goto out; + kfree(cs); + return ERR_PTR(ret); } return &cs->css; -out: - kfree(cs); - return ERR_PTR(ret); } static void cgrp_destroy(struct cgroup *cgrp) @@ -175,7 +141,7 @@ static void cgrp_destroy(struct cgroup *cgrp) rtnl_lock(); for_each_netdev(&init_net, dev) { map = rtnl_dereference(dev->priomap); - if (map && cs->prioidx < map->priomap_len) + if (map) map->priomap[cs->prioidx] = 0; } rtnl_unlock(); @@ -199,7 +165,7 @@ static int read_priomap(struct cgroup *cont, struct cftype *cft, rcu_read_lock(); for_each_netdev_rcu(&init_net, dev) { map = rcu_dereference(dev->priomap); - priority = (map && prioidx < map->priomap_len) ? map->priomap[prioidx] : 0; + priority = map ? map->priomap[prioidx] : 0; cb->fill(cb, dev->name, priority); } rcu_read_unlock(); @@ -254,17 +220,13 @@ static int write_priomap(struct cgroup *cgrp, struct cftype *cft, if (!dev) goto out_free_devname; - ret = write_update_netdev_table(dev); - if (ret < 0) - goto out_put_dev; - + update_netdev_tables(); + ret = 0; rcu_read_lock(); map = rcu_dereference(dev->priomap); if (map) map->priomap[prioidx] = priority; rcu_read_unlock(); - -out_put_dev: dev_put(dev); out_free_devname: diff --git a/trunk/net/core/scm.c b/trunk/net/core/scm.c index 8f6ccfd68ef4..611c5efd4cb0 100644 --- a/trunk/net/core/scm.c +++ b/trunk/net/core/scm.c @@ -109,9 +109,25 @@ void __scm_destroy(struct scm_cookie *scm) if (fpl) { scm->fp = NULL; - for (i=fpl->count-1; i>=0; i--) - fput(fpl->fp[i]); - kfree(fpl); + if (current->scm_work_list) { + list_add_tail(&fpl->list, current->scm_work_list); + } else { + LIST_HEAD(work_list); + + current->scm_work_list = &work_list; + + list_add(&fpl->list, &work_list); + while (!list_empty(&work_list)) { + fpl = list_first_entry(&work_list, struct scm_fp_list, list); + + list_del(&fpl->list); + for (i=fpl->count-1; i>=0; i--) + fput(fpl->fp[i]); + kfree(fpl); + } + + current->scm_work_list = NULL; + } } } EXPORT_SYMBOL(__scm_destroy); diff --git a/trunk/net/core/skbuff.c b/trunk/net/core/skbuff.c index d124306b81fd..46a3d23d259e 100644 --- a/trunk/net/core/skbuff.c +++ b/trunk/net/core/skbuff.c @@ -353,7 +353,7 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int fragsz = SKB_DATA_ALIGN(length + NET_SKB_PAD) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); - if (fragsz <= PAGE_SIZE && !(gfp_mask & (__GFP_WAIT | GFP_DMA))) { + if (fragsz <= PAGE_SIZE && !(gfp_mask & __GFP_WAIT)) { void *data = netdev_alloc_frag(fragsz); if (likely(data)) { diff --git a/trunk/net/ieee802154/dgram.c b/trunk/net/ieee802154/dgram.c index 16705611589a..6fbb2ad7bb6d 100644 --- a/trunk/net/ieee802154/dgram.c +++ b/trunk/net/ieee802154/dgram.c @@ -230,12 +230,6 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk, mtu = dev->mtu; pr_debug("name = %s, mtu = %u\n", dev->name, mtu); - if (size > mtu) { - pr_debug("size = %Zu, mtu = %u\n", size, mtu); - err = -EINVAL; - goto out_dev; - } - hlen = LL_RESERVED_SPACE(dev); tlen = dev->needed_tailroom; skb = sock_alloc_send_skb(sk, hlen + tlen + size, @@ -264,6 +258,12 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk, if (err < 0) goto out_skb; + if (size > mtu) { + pr_debug("size = %Zu, mtu = %u\n", size, mtu); + err = -EINVAL; + goto out_skb; + } + skb->dev = dev; skb->sk = sk; skb->protocol = htons(ETH_P_IEEE802154); diff --git a/trunk/net/ipv4/cipso_ipv4.c b/trunk/net/ipv4/cipso_ipv4.c index 667c1d4ca984..c48adc565e92 100644 --- a/trunk/net/ipv4/cipso_ipv4.c +++ b/trunk/net/ipv4/cipso_ipv4.c @@ -1725,10 +1725,8 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option) case CIPSO_V4_TAG_LOCAL: /* This is a non-standard tag that we only allow for * local connections, so if the incoming interface is - * not the loopback device drop the packet. Further, - * there is no legitimate reason for setting this from - * userspace so reject it if skb is NULL. */ - if (skb == NULL || !(skb->dev->flags & IFF_LOOPBACK)) { + * not the loopback device drop the packet. */ + if (!(skb->dev->flags & IFF_LOOPBACK)) { err_offset = opt_iter; goto validate_return_locked; } diff --git a/trunk/net/mac80211/mlme.c b/trunk/net/mac80211/mlme.c index 0db5d34a06b6..a4bb856de08f 100644 --- a/trunk/net/mac80211/mlme.c +++ b/trunk/net/mac80211/mlme.c @@ -2174,13 +2174,15 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, sdata->name, mgmt->sa, status_code); ieee80211_destroy_assoc_data(sdata, false); } else { + printk(KERN_DEBUG "%s: associated\n", sdata->name); + if (!ieee80211_assoc_success(sdata, *bss, mgmt, len)) { /* oops -- internal error -- send timeout for now */ - ieee80211_destroy_assoc_data(sdata, false); + ieee80211_destroy_assoc_data(sdata, true); + sta_info_destroy_addr(sdata, mgmt->bssid); cfg80211_put_bss(*bss); return RX_MGMT_CFG80211_ASSOC_TIMEOUT; } - printk(KERN_DEBUG "%s: associated\n", sdata->name); /* * destroy assoc_data afterwards, as otherwise an idle diff --git a/trunk/net/mac80211/rc80211_minstrel_ht.c b/trunk/net/mac80211/rc80211_minstrel_ht.c index f9e51ef8dfa2..2d1acc6c5445 100644 --- a/trunk/net/mac80211/rc80211_minstrel_ht.c +++ b/trunk/net/mac80211/rc80211_minstrel_ht.c @@ -809,7 +809,7 @@ minstrel_ht_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp) max_rates = sband->n_bitrates; } - msp = kzalloc(sizeof(*msp), gfp); + msp = kzalloc(sizeof(struct minstrel_ht_sta), gfp); if (!msp) return NULL; diff --git a/trunk/net/netfilter/ipvs/ip_vs_ctl.c b/trunk/net/netfilter/ipvs/ip_vs_ctl.c index 84444dda194b..d43e3c122f7b 100644 --- a/trunk/net/netfilter/ipvs/ip_vs_ctl.c +++ b/trunk/net/netfilter/ipvs/ip_vs_ctl.c @@ -1521,12 +1521,11 @@ static int ip_vs_dst_event(struct notifier_block *this, unsigned long event, { struct net_device *dev = ptr; struct net *net = dev_net(dev); - struct netns_ipvs *ipvs = net_ipvs(net); struct ip_vs_service *svc; struct ip_vs_dest *dest; unsigned int idx; - if (event != NETDEV_UNREGISTER || !ipvs) + if (event != NETDEV_UNREGISTER) return NOTIFY_DONE; IP_VS_DBG(3, "%s() dev=%s\n", __func__, dev->name); EnterFunction(2); @@ -1552,7 +1551,7 @@ static int ip_vs_dst_event(struct notifier_block *this, unsigned long event, } } - list_for_each_entry(dest, &ipvs->dest_trash, n_list) { + list_for_each_entry(dest, &net_ipvs(net)->dest_trash, n_list) { __ip_vs_dev_reset(dest, dev); } mutex_unlock(&__ip_vs_mutex); diff --git a/trunk/net/netfilter/xt_set.c b/trunk/net/netfilter/xt_set.c index c6f7db720d84..035960ec5cb9 100644 --- a/trunk/net/netfilter/xt_set.c +++ b/trunk/net/netfilter/xt_set.c @@ -16,7 +16,6 @@ #include #include -#include MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jozsef Kadlecsik "); @@ -311,8 +310,7 @@ set_target_v2(struct sk_buff *skb, const struct xt_action_param *par) info->del_set.flags, 0, UINT_MAX); /* Normalize to fit into jiffies */ - if (add_opt.timeout != IPSET_NO_TIMEOUT && - add_opt.timeout > UINT_MAX/MSEC_PER_SEC) + if (add_opt.timeout > UINT_MAX/MSEC_PER_SEC) add_opt.timeout = UINT_MAX/MSEC_PER_SEC; if (info->add_set.index != IPSET_INVALID_ID) ip_set_add(info->add_set.index, skb, par, &add_opt); diff --git a/trunk/net/nfc/llcp/sock.c b/trunk/net/nfc/llcp/sock.c index e06d458fc719..17a707db40eb 100644 --- a/trunk/net/nfc/llcp/sock.c +++ b/trunk/net/nfc/llcp/sock.c @@ -292,7 +292,7 @@ static int llcp_sock_getname(struct socket *sock, struct sockaddr *addr, pr_debug("%p\n", sk); - if (llcp_sock == NULL || llcp_sock->dev == NULL) + if (llcp_sock == NULL) return -EBADFD; addr->sa_family = AF_NFC; diff --git a/trunk/net/rxrpc/ar-peer.c b/trunk/net/rxrpc/ar-peer.c index bebaa43484bc..2754f098d436 100644 --- a/trunk/net/rxrpc/ar-peer.c +++ b/trunk/net/rxrpc/ar-peer.c @@ -229,7 +229,7 @@ struct rxrpc_peer *rxrpc_find_peer(struct rxrpc_local *local, return peer; new_UDP_peer: - _net("Rx UDP DGRAM from NEW peer"); + _net("Rx UDP DGRAM from NEW peer %d", peer->debug_id); read_unlock_bh(&rxrpc_peer_lock); _leave(" = -EBUSY [new]"); return ERR_PTR(-EBUSY); diff --git a/trunk/net/sched/sch_netem.c b/trunk/net/sched/sch_netem.c index c412ad0d0308..a2a95aabf9c2 100644 --- a/trunk/net/sched/sch_netem.c +++ b/trunk/net/sched/sch_netem.c @@ -331,22 +331,29 @@ static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sche return PSCHED_NS2TICKS(ticks); } -static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) +static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) { struct sk_buff_head *list = &sch->q; psched_time_t tnext = netem_skb_cb(nskb)->time_to_send; - struct sk_buff *skb = skb_peek_tail(list); + struct sk_buff *skb; - /* Optimize for add at tail */ - if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send)) - return __skb_queue_tail(list, nskb); + if (likely(skb_queue_len(list) < sch->limit)) { + skb = skb_peek_tail(list); + /* Optimize for add at tail */ + if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send)) + return qdisc_enqueue_tail(nskb, sch); - skb_queue_reverse_walk(list, skb) { - if (tnext >= netem_skb_cb(skb)->time_to_send) - break; + skb_queue_reverse_walk(list, skb) { + if (tnext >= netem_skb_cb(skb)->time_to_send) + break; + } + + __skb_queue_after(list, skb, nskb); + sch->qstats.backlog += qdisc_pkt_len(nskb); + return NET_XMIT_SUCCESS; } - __skb_queue_after(list, skb, nskb); + return qdisc_reshape_fail(nskb, sch); } /* @@ -361,6 +368,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) /* We don't fill cb now as skb_unshare() may invalidate it */ struct netem_skb_cb *cb; struct sk_buff *skb2; + int ret; int count = 1; /* Random duplication */ @@ -411,11 +419,6 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8); } - if (unlikely(skb_queue_len(&sch->q) >= sch->limit)) - return qdisc_reshape_fail(skb, sch); - - sch->qstats.backlog += qdisc_pkt_len(skb); - cb = netem_skb_cb(skb); if (q->gap == 0 || /* not doing reordering */ q->counter < q->gap - 1 || /* inside last reordering gap */ @@ -447,7 +450,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) cb->time_to_send = now + delay; ++q->counter; - tfifo_enqueue(skb, sch); + ret = tfifo_enqueue(skb, sch); } else { /* * Do re-ordering by putting one out of N packets at the front @@ -457,7 +460,16 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) q->counter = 0; __skb_queue_head(&sch->q, skb); + sch->qstats.backlog += qdisc_pkt_len(skb); sch->qstats.requeues++; + ret = NET_XMIT_SUCCESS; + } + + if (ret != NET_XMIT_SUCCESS) { + if (net_xmit_drop_count(ret)) { + sch->qstats.drops++; + return ret; + } } return NET_XMIT_SUCCESS; diff --git a/trunk/net/sched/sch_sfb.c b/trunk/net/sched/sch_sfb.c index 30ea4674cabd..74305c883bd3 100644 --- a/trunk/net/sched/sch_sfb.c +++ b/trunk/net/sched/sch_sfb.c @@ -570,8 +570,6 @@ static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb) sch->qstats.backlog = q->qdisc->qstats.backlog; opts = nla_nest_start(skb, TCA_OPTIONS); - if (opts == NULL) - goto nla_put_failure; if (nla_put(skb, TCA_SFB_PARMS, sizeof(opt), &opt)) goto nla_put_failure; return nla_nest_end(skb, opts); diff --git a/trunk/net/sctp/input.c b/trunk/net/sctp/input.c index 8b9b6790a3df..80564fe03024 100644 --- a/trunk/net/sctp/input.c +++ b/trunk/net/sctp/input.c @@ -736,12 +736,15 @@ static void __sctp_unhash_endpoint(struct sctp_endpoint *ep) epb = &ep->base; + if (hlist_unhashed(&epb->node)) + return; + epb->hashent = sctp_ep_hashfn(epb->bind_addr.port); head = &sctp_ep_hashtable[epb->hashent]; sctp_write_lock(&head->lock); - hlist_del_init(&epb->node); + __hlist_del(&epb->node); sctp_write_unlock(&head->lock); } @@ -822,7 +825,7 @@ static void __sctp_unhash_established(struct sctp_association *asoc) head = &sctp_assoc_hashtable[epb->hashent]; sctp_write_lock(&head->lock); - hlist_del_init(&epb->node); + __hlist_del(&epb->node); sctp_write_unlock(&head->lock); } diff --git a/trunk/net/sctp/socket.c b/trunk/net/sctp/socket.c index 31c7bfcd9b58..b3b8a8d813eb 100644 --- a/trunk/net/sctp/socket.c +++ b/trunk/net/sctp/socket.c @@ -1231,14 +1231,8 @@ static int __sctp_connect(struct sock* sk, SCTP_DEBUG_PRINTK("About to exit __sctp_connect() free asoc: %p" " kaddrs: %p err: %d\n", asoc, kaddrs, err); - if (asoc) { - /* sctp_primitive_ASSOCIATE may have added this association - * To the hash table, try to unhash it, just in case, its a noop - * if it wasn't hashed so we're safe - */ - sctp_unhash_established(asoc); + if (asoc) sctp_association_free(asoc); - } return err; } @@ -1948,10 +1942,8 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, goto out_unlock; out_free: - if (new_asoc) { - sctp_unhash_established(asoc); + if (new_asoc) sctp_association_free(asoc); - } out_unlock: sctp_release_sock(sk); diff --git a/trunk/security/keys/internal.h b/trunk/security/keys/internal.h index c246ba5d43ab..3dcbf86b0d31 100644 --- a/trunk/security/keys/internal.h +++ b/trunk/security/keys/internal.h @@ -149,7 +149,7 @@ extern key_ref_t lookup_user_key(key_serial_t id, unsigned long flags, #define KEY_LOOKUP_FOR_UNLINK 0x04 extern long join_session_keyring(const char *name); -extern void key_change_session_keyring(struct callback_head *twork); +extern void key_change_session_keyring(struct task_work *twork); extern struct work_struct key_gc_work; extern unsigned key_gc_delay; diff --git a/trunk/security/keys/keyctl.c b/trunk/security/keys/keyctl.c index f1b59ae39d7e..0f5b3f027299 100644 --- a/trunk/security/keys/keyctl.c +++ b/trunk/security/keys/keyctl.c @@ -1456,7 +1456,7 @@ long keyctl_session_to_parent(void) { struct task_struct *me, *parent; const struct cred *mycred, *pcred; - struct callback_head *newwork, *oldwork; + struct task_work *newwork, *oldwork; key_ref_t keyring_r; struct cred *cred; int ret; @@ -1466,17 +1466,19 @@ long keyctl_session_to_parent(void) return PTR_ERR(keyring_r); ret = -ENOMEM; + newwork = kmalloc(sizeof(struct task_work), GFP_KERNEL); + if (!newwork) + goto error_keyring; /* our parent is going to need a new cred struct, a new tgcred struct * and new security data, so we allocate them here to prevent ENOMEM in * our parent */ cred = cred_alloc_blank(); if (!cred) - goto error_keyring; - newwork = &cred->rcu; + goto error_newwork; cred->tgcred->session_keyring = key_ref_to_ptr(keyring_r); - init_task_work(newwork, key_change_session_keyring); + init_task_work(newwork, key_change_session_keyring, cred); me = current; rcu_read_lock(); @@ -1486,7 +1488,6 @@ long keyctl_session_to_parent(void) oldwork = NULL; parent = me->real_parent; - task_lock(parent); /* the parent mustn't be init and mustn't be a kernel thread */ if (parent->pid <= 1 || !parent->mm) goto unlock; @@ -1530,15 +1531,20 @@ long keyctl_session_to_parent(void) if (!ret) newwork = NULL; unlock: - task_unlock(parent); write_unlock_irq(&tasklist_lock); rcu_read_unlock(); - if (oldwork) - put_cred(container_of(oldwork, struct cred, rcu)); - if (newwork) - put_cred(cred); + if (oldwork) { + put_cred(oldwork->data); + kfree(oldwork); + } + if (newwork) { + put_cred(newwork->data); + kfree(newwork); + } return ret; +error_newwork: + kfree(newwork); error_keyring: key_ref_put(keyring_r); return ret; diff --git a/trunk/security/keys/process_keys.c b/trunk/security/keys/process_keys.c index 54339cfd6734..4ad54eea1ea4 100644 --- a/trunk/security/keys/process_keys.c +++ b/trunk/security/keys/process_keys.c @@ -834,11 +834,12 @@ long join_session_keyring(const char *name) * Replace a process's session keyring on behalf of one of its children when * the target process is about to resume userspace execution. */ -void key_change_session_keyring(struct callback_head *twork) +void key_change_session_keyring(struct task_work *twork) { const struct cred *old = current_cred(); - struct cred *new = container_of(twork, struct cred, rcu); + struct cred *new = twork->data; + kfree(twork); if (unlikely(current->flags & PF_EXITING)) { put_cred(new); return; diff --git a/trunk/security/selinux/hooks.c b/trunk/security/selinux/hooks.c index 9292a8971e66..372ec6502aa8 100644 --- a/trunk/security/selinux/hooks.c +++ b/trunk/security/selinux/hooks.c @@ -2157,7 +2157,8 @@ static inline void flush_unauthorized_files(const struct cred *cred, get_file(devnull); } else { devnull = dentry_open( - &selinux_null, + dget(selinux_null), + mntget(selinuxfs_mount), O_RDWR, cred); if (IS_ERR(devnull)) { devnull = NULL; @@ -2716,7 +2717,7 @@ static int selinux_inode_setattr(struct dentry *dentry, struct iattr *iattr) ATTR_ATIME_SET | ATTR_MTIME_SET | ATTR_TIMES_SET)) return dentry_has_perm(cred, dentry, FILE__SETATTR); - if (selinux_policycap_openperm && (ia_valid & ATTR_SIZE)) + if (ia_valid & ATTR_SIZE) av |= FILE__OPEN; return dentry_has_perm(cred, dentry, av); diff --git a/trunk/security/selinux/include/classmap.h b/trunk/security/selinux/include/classmap.h index df2de54a958d..b8c53723e09b 100644 --- a/trunk/security/selinux/include/classmap.h +++ b/trunk/security/selinux/include/classmap.h @@ -145,9 +145,7 @@ struct security_class_mapping secclass_map[] = { "node_bind", "name_connect", NULL } }, { "memprotect", { "mmap_zero", NULL } }, { "peer", { "recv", NULL } }, - { "capability2", - { "mac_override", "mac_admin", "syslog", "wake_alarm", "block_suspend", - NULL } }, + { "capability2", { "mac_override", "mac_admin", "syslog", NULL } }, { "kernel_service", { "use_as_override", "create_files_as", NULL } }, { "tun_socket", { COMMON_SOCK_PERMS, NULL } }, diff --git a/trunk/security/selinux/include/security.h b/trunk/security/selinux/include/security.h index 6d3885165d14..dde2005407aa 100644 --- a/trunk/security/selinux/include/security.h +++ b/trunk/security/selinux/include/security.h @@ -221,7 +221,7 @@ extern void selinux_status_update_policyload(int seqno); extern void selinux_complete_init(void); extern int selinux_disable(void); extern void exit_sel_fs(void); -extern struct path selinux_null; +extern struct dentry *selinux_null; extern struct vfsmount *selinuxfs_mount; extern void selnl_notify_setenforce(int val); extern void selnl_notify_policyload(u32 seqno); diff --git a/trunk/security/selinux/selinuxfs.c b/trunk/security/selinux/selinuxfs.c index 298e695d6822..3ad290251288 100644 --- a/trunk/security/selinux/selinuxfs.c +++ b/trunk/security/selinux/selinuxfs.c @@ -1297,7 +1297,7 @@ static int sel_make_bools(void) #define NULL_FILE_NAME "null" -struct path selinux_null; +struct dentry *selinux_null; static ssize_t sel_read_avc_cache_threshold(struct file *filp, char __user *buf, size_t count, loff_t *ppos) @@ -1838,7 +1838,7 @@ static int sel_fill_super(struct super_block *sb, void *data, int silent) init_special_inode(inode, S_IFCHR | S_IRUGO | S_IWUGO, MKDEV(MEM_MAJOR, 3)); d_add(dentry, inode); - selinux_null.dentry = dentry; + selinux_null = dentry; dentry = sel_make_dir(sb->s_root, "avc", &sel_last_ino); if (IS_ERR(dentry)) { @@ -1912,7 +1912,7 @@ static int __init init_sel_fs(void) return err; } - selinux_null.mnt = selinuxfs_mount = kern_mount(&sel_fs_type); + selinuxfs_mount = kern_mount(&sel_fs_type); if (IS_ERR(selinuxfs_mount)) { printk(KERN_ERR "selinuxfs: could not mount!\n"); err = PTR_ERR(selinuxfs_mount); diff --git a/trunk/tools/lib/traceevent/Makefile b/trunk/tools/lib/traceevent/Makefile index 46c2f6b7b123..3d69aa9ff51e 100644 --- a/trunk/tools/lib/traceevent/Makefile +++ b/trunk/tools/lib/traceevent/Makefile @@ -250,12 +250,8 @@ endef all_objs := $(sort $(ALL_OBJS)) all_deps := $(all_objs:%.o=.%.d) -# let .d file also depends on the source and header files define check_deps - @set -e; $(RM) $@; \ - $(CC) -M $(CFLAGS) $< > $@.$$$$; \ - sed 's,\($*\)\.o[ :]*,\1.o $@ : ,g' < $@.$$$$ > $@; \ - $(RM) $@.$$$$ + $(CC) -M $(CFLAGS) $< > $@; endef $(gui_deps): ks_version.h @@ -274,13 +270,11 @@ endif tags: force $(RM) tags - find . -name '*.[ch]' | xargs ctags --extra=+f --c-kinds=+px \ - --regex-c++='/_PE\(([^,)]*).*/PEVENT_ERRNO__\1/' + find . -name '*.[ch]' | xargs ctags --extra=+f --c-kinds=+px TAGS: force $(RM) TAGS - find . -name '*.[ch]' | xargs etags \ - --regex='/_PE(\([^,)]*\).*/PEVENT_ERRNO__\1/' + find . -name '*.[ch]' | xargs etags define do_install $(print_install) \ @@ -296,7 +290,7 @@ install_lib: all_cmd install_plugins install_python install: install_lib clean: - $(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d + $(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES).*.d $(RM) tags TAGS endif # skip-makefile diff --git a/trunk/tools/lib/traceevent/event-parse.c b/trunk/tools/lib/traceevent/event-parse.c index 5f34aa371b56..554828219c33 100644 --- a/trunk/tools/lib/traceevent/event-parse.c +++ b/trunk/tools/lib/traceevent/event-parse.c @@ -467,10 +467,8 @@ int pevent_register_function(struct pevent *pevent, char *func, item->mod = NULL; item->addr = addr; - if (!item->func || (mod && !item->mod)) - die("malloc func"); - pevent->funclist = item; + pevent->func_count++; return 0; @@ -513,12 +511,12 @@ struct printk_list { static int printk_cmp(const void *a, const void *b) { - const struct printk_map *pa = a; - const struct printk_map *pb = b; + const struct func_map *fa = a; + const struct func_map *fb = b; - if (pa->addr < pb->addr) + if (fa->addr < fb->addr) return -1; - if (pa->addr > pb->addr) + if (fa->addr > fb->addr) return 1; return 0; @@ -585,13 +583,10 @@ int pevent_register_print_string(struct pevent *pevent, char *fmt, item = malloc_or_die(sizeof(*item)); item->next = pevent->printklist; + pevent->printklist = item; item->printk = strdup(fmt); item->addr = addr; - if (!item->printk) - die("malloc fmt"); - - pevent->printklist = item; pevent->printk_count++; return 0; @@ -621,9 +616,7 @@ static struct event_format *alloc_event(void) { struct event_format *event; - event = malloc(sizeof(*event)); - if (!event) - return NULL; + event = malloc_or_die(sizeof(*event)); memset(event, 0, sizeof(*event)); return event; @@ -633,8 +626,12 @@ static void add_event(struct pevent *pevent, struct event_format *event) { int i; - pevent->events = realloc(pevent->events, sizeof(event) * - (pevent->nr_events + 1)); + if (!pevent->events) + pevent->events = malloc_or_die(sizeof(event)); + else + pevent->events = + realloc(pevent->events, sizeof(event) * + (pevent->nr_events + 1)); if (!pevent->events) die("Can not allocate events"); @@ -700,10 +697,6 @@ static void free_arg(struct print_arg *arg) free_arg(arg->symbol.field); free_flag_sym(arg->symbol.symbols); break; - case PRINT_HEX: - free_arg(arg->hex.field); - free_arg(arg->hex.size); - break; case PRINT_TYPE: free(arg->typecast.type); free_arg(arg->typecast.item); @@ -782,25 +775,6 @@ int pevent_peek_char(void) return __peek_char(); } -static int extend_token(char **tok, char *buf, int size) -{ - char *newtok = realloc(*tok, size); - - if (!newtok) { - free(*tok); - *tok = NULL; - return -1; - } - - if (!*tok) - strcpy(newtok, buf); - else - strcat(newtok, buf); - *tok = newtok; - - return 0; -} - static enum event_type force_token(const char *str, char **tok); static enum event_type __read_token(char **tok) @@ -885,10 +859,17 @@ static enum event_type __read_token(char **tok) do { if (i == (BUFSIZ - 1)) { buf[i] = 0; - tok_size += BUFSIZ; + if (*tok) { + *tok = realloc(*tok, tok_size + BUFSIZ); + if (!*tok) + return EVENT_NONE; + strcat(*tok, buf); + } else + *tok = strdup(buf); - if (extend_token(tok, buf, tok_size) < 0) + if (!*tok) return EVENT_NONE; + tok_size += BUFSIZ; i = 0; } last_ch = ch; @@ -927,10 +908,17 @@ static enum event_type __read_token(char **tok) while (get_type(__peek_char()) == type) { if (i == (BUFSIZ - 1)) { buf[i] = 0; - tok_size += BUFSIZ; + if (*tok) { + *tok = realloc(*tok, tok_size + BUFSIZ); + if (!*tok) + return EVENT_NONE; + strcat(*tok, buf); + } else + *tok = strdup(buf); - if (extend_token(tok, buf, tok_size) < 0) + if (!*tok) return EVENT_NONE; + tok_size += BUFSIZ; i = 0; } ch = __read_char(); @@ -939,7 +927,14 @@ static enum event_type __read_token(char **tok) out: buf[i] = 0; - if (extend_token(tok, buf, tok_size + i + 1) < 0) + if (*tok) { + *tok = realloc(*tok, tok_size + i); + if (!*tok) + return EVENT_NONE; + strcat(*tok, buf); + } else + *tok = strdup(buf); + if (!*tok) return EVENT_NONE; if (type == EVENT_ITEM) { @@ -1260,15 +1255,9 @@ static int event_read_fields(struct event_format *event, struct format_field **f field->flags |= FIELD_IS_POINTER; if (field->type) { - char *new_type; - new_type = realloc(field->type, - strlen(field->type) + - strlen(last_token) + 2); - if (!new_type) { - free(last_token); - goto fail; - } - field->type = new_type; + field->type = realloc(field->type, + strlen(field->type) + + strlen(last_token) + 2); strcat(field->type, " "); strcat(field->type, last_token); free(last_token); @@ -1293,7 +1282,6 @@ static int event_read_fields(struct event_format *event, struct format_field **f if (strcmp(token, "[") == 0) { enum event_type last_type = type; char *brackets = token; - char *new_brackets; int len; field->flags |= FIELD_IS_ARRAY; @@ -1313,14 +1301,9 @@ static int event_read_fields(struct event_format *event, struct format_field **f len = 1; last_type = type; - new_brackets = realloc(brackets, - strlen(brackets) + - strlen(token) + len); - if (!new_brackets) { - free(brackets); - goto fail; - } - brackets = new_brackets; + brackets = realloc(brackets, + strlen(brackets) + + strlen(token) + len); if (len == 2) strcat(brackets, " "); strcat(brackets, token); @@ -1336,12 +1319,7 @@ static int event_read_fields(struct event_format *event, struct format_field **f free_token(token); - new_brackets = realloc(brackets, strlen(brackets) + 2); - if (!new_brackets) { - free(brackets); - goto fail; - } - brackets = new_brackets; + brackets = realloc(brackets, strlen(brackets) + 2); strcat(brackets, "]"); /* add brackets to type */ @@ -1352,16 +1330,10 @@ static int event_read_fields(struct event_format *event, struct format_field **f * the format: type [] item; */ if (type == EVENT_ITEM) { - char *new_type; - new_type = realloc(field->type, - strlen(field->type) + - strlen(field->name) + - strlen(brackets) + 2); - if (!new_type) { - free(brackets); - goto fail; - } - field->type = new_type; + field->type = realloc(field->type, + strlen(field->type) + + strlen(field->name) + + strlen(brackets) + 2); strcat(field->type, " "); strcat(field->type, field->name); free_token(field->name); @@ -1369,15 +1341,9 @@ static int event_read_fields(struct event_format *event, struct format_field **f field->name = token; type = read_token(&token); } else { - char *new_type; - new_type = realloc(field->type, - strlen(field->type) + - strlen(brackets) + 1); - if (!new_type) { - free(brackets); - goto fail; - } - field->type = new_type; + field->type = realloc(field->type, + strlen(field->type) + + strlen(brackets) + 1); strcat(field->type, brackets); } free(brackets); @@ -1760,16 +1726,10 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok) /* could just be a type pointer */ if ((strcmp(arg->op.op, "*") == 0) && type == EVENT_DELIM && (strcmp(token, ")") == 0)) { - char *new_atom; - if (left->type != PRINT_ATOM) die("bad pointer type"); - new_atom = realloc(left->atom.atom, + left->atom.atom = realloc(left->atom.atom, strlen(left->atom.atom) + 3); - if (!new_atom) - goto out_free; - - left->atom.atom = new_atom; strcat(left->atom.atom, " *"); free(arg->op.op); *arg = *left; @@ -2186,8 +2146,6 @@ process_fields(struct event_format *event, struct print_flag_sym **list, char ** if (value == NULL) goto out_free; field->value = strdup(value); - if (field->value == NULL) - goto out_free; free_arg(arg); arg = alloc_arg(); @@ -2201,8 +2159,6 @@ process_fields(struct event_format *event, struct print_flag_sym **list, char ** if (value == NULL) goto out_free; field->str = strdup(value); - if (field->str == NULL) - goto out_free; free_arg(arg); arg = NULL; @@ -2303,45 +2259,6 @@ process_symbols(struct event_format *event, struct print_arg *arg, char **tok) return EVENT_ERROR; } -static enum event_type -process_hex(struct event_format *event, struct print_arg *arg, char **tok) -{ - struct print_arg *field; - enum event_type type; - char *token; - - memset(arg, 0, sizeof(*arg)); - arg->type = PRINT_HEX; - - field = alloc_arg(); - type = process_arg(event, field, &token); - - if (test_type_token(type, token, EVENT_DELIM, ",")) - goto out_free; - - arg->hex.field = field; - - free_token(token); - - field = alloc_arg(); - type = process_arg(event, field, &token); - - if (test_type_token(type, token, EVENT_DELIM, ")")) - goto out_free; - - arg->hex.size = field; - - free_token(token); - type = read_token_item(tok); - return type; - - out_free: - free_arg(field); - free_token(token); - *tok = NULL; - return EVENT_ERROR; -} - static enum event_type process_dynamic_array(struct event_format *event, struct print_arg *arg, char **tok) { @@ -2571,10 +2488,6 @@ process_function(struct event_format *event, struct print_arg *arg, is_symbolic_field = 1; return process_symbols(event, arg, tok); } - if (strcmp(token, "__print_hex") == 0) { - free_token(token); - return process_hex(event, arg, tok); - } if (strcmp(token, "__get_str") == 0) { free_token(token); return process_str(event, arg, tok); @@ -2628,16 +2541,7 @@ process_arg_token(struct event_format *event, struct print_arg *arg, } /* atoms can be more than one token long */ while (type == EVENT_ITEM) { - char *new_atom; - new_atom = realloc(atom, - strlen(atom) + strlen(token) + 2); - if (!new_atom) { - free(atom); - *tok = NULL; - free_token(token); - return EVENT_ERROR; - } - atom = new_atom; + atom = realloc(atom, strlen(atom) + strlen(token) + 2); strcat(atom, " "); strcat(atom, token); free_token(token); @@ -2931,7 +2835,7 @@ static int get_common_info(struct pevent *pevent, event = pevent->events[0]; field = pevent_find_common_field(event, type); if (!field) - return -1; + die("field '%s' not found", type); *offset = field->offset; *size = field->size; @@ -2982,16 +2886,15 @@ static int parse_common_flags(struct pevent *pevent, void *data) static int parse_common_lock_depth(struct pevent *pevent, void *data) { - return __parse_common(pevent, data, - &pevent->ld_size, &pevent->ld_offset, - "common_lock_depth"); -} + int ret; -static int parse_common_migrate_disable(struct pevent *pevent, void *data) -{ - return __parse_common(pevent, data, - &pevent->ld_size, &pevent->ld_offset, - "common_migrate_disable"); + ret = __parse_common(pevent, data, + &pevent->ld_size, &pevent->ld_offset, + "common_lock_depth"); + if (ret < 0) + return -1; + + return ret; } static int events_id_cmp(const void *a, const void *b); @@ -3092,7 +2995,6 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg break; case PRINT_FLAGS: case PRINT_SYMBOL: - case PRINT_HEX: break; case PRINT_TYPE: val = eval_num_arg(data, size, event, arg->typecast.item); @@ -3312,13 +3214,11 @@ static void print_str_arg(struct trace_seq *s, void *data, int size, { struct pevent *pevent = event->pevent; struct print_flag_sym *flag; - struct format_field *field; unsigned long long val, fval; unsigned long addr; char *str; - unsigned char *hex; int print; - int i, len; + int len; switch (arg->type) { case PRINT_NULL: @@ -3328,29 +3228,27 @@ static void print_str_arg(struct trace_seq *s, void *data, int size, print_str_to_seq(s, format, len_arg, arg->atom.atom); return; case PRINT_FIELD: - field = arg->field.field; - if (!field) { - field = pevent_find_any_field(event, arg->field.name); - if (!field) + if (!arg->field.field) { + arg->field.field = pevent_find_any_field(event, arg->field.name); + if (!arg->field.field) die("field %s not found", arg->field.name); - arg->field.field = field; } /* Zero sized fields, mean the rest of the data */ - len = field->size ? : size - field->offset; + len = arg->field.field->size ? : size - arg->field.field->offset; /* * Some events pass in pointers. If this is not an array * and the size is the same as long_size, assume that it * is a pointer. */ - if (!(field->flags & FIELD_IS_ARRAY) && - field->size == pevent->long_size) { - addr = *(unsigned long *)(data + field->offset); + if (!(arg->field.field->flags & FIELD_IS_ARRAY) && + arg->field.field->size == pevent->long_size) { + addr = *(unsigned long *)(data + arg->field.field->offset); trace_seq_printf(s, "%lx", addr); break; } str = malloc_or_die(len + 1); - memcpy(str, data + field->offset, len); + memcpy(str, data + arg->field.field->offset, len); str[len] = 0; print_str_to_seq(s, format, len_arg, str); free(str); @@ -3383,23 +3281,6 @@ static void print_str_arg(struct trace_seq *s, void *data, int size, } } break; - case PRINT_HEX: - field = arg->hex.field->field.field; - if (!field) { - str = arg->hex.field->field.name; - field = pevent_find_any_field(event, str); - if (!field) - die("field %s not found", str); - arg->hex.field->field.field = field; - } - hex = data + field->offset; - len = eval_num_arg(data, size, event, arg->hex.size); - for (i = 0; i < len; i++) { - if (i) - trace_seq_putc(s, ' '); - trace_seq_printf(s, "%02x", hex[i]); - } - break; case PRINT_TYPE: break; @@ -3418,7 +3299,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size, break; } case PRINT_BSTRING: - print_str_to_seq(s, format, len_arg, arg->string.string); + trace_seq_printf(s, format, arg->string.string); break; case PRINT_OP: /* @@ -3482,10 +3363,6 @@ process_defined_func(struct trace_seq *s, void *data, int size, string = malloc_or_die(sizeof(*string)); string->next = strings; string->str = strdup(str.buffer); - if (!string->str) - die("malloc str"); - - args[i] = (unsigned long long)string->str; strings = string; trace_seq_destroy(&str); break; @@ -3523,7 +3400,6 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc unsigned long long ip, val; char *ptr; void *bptr; - int vsize; field = pevent->bprint_buf_field; ip_field = pevent->bprint_ip_field; @@ -3572,8 +3448,6 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc goto process_again; case '0' ... '9': goto process_again; - case '.': - goto process_again; case 'p': ls = 1; /* fall through */ @@ -3581,30 +3455,23 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc case 'u': case 'x': case 'i': + /* the pointers are always 4 bytes aligned */ + bptr = (void *)(((unsigned long)bptr + 3) & + ~3); switch (ls) { case 0: - vsize = 4; + ls = 4; break; case 1: - vsize = pevent->long_size; + ls = pevent->long_size; break; case 2: - vsize = 8; - break; + ls = 8; default: - vsize = ls; /* ? */ break; } - /* fall through */ - case '*': - if (*ptr == '*') - vsize = 4; - - /* the pointers are always 4 bytes aligned */ - bptr = (void *)(((unsigned long)bptr + 3) & - ~3); - val = pevent_read_number(pevent, bptr, vsize); - bptr += vsize; + val = pevent_read_number(pevent, bptr, ls); + bptr += ls; arg = alloc_arg(); arg->next = NULL; arg->type = PRINT_ATOM; @@ -3612,21 +3479,12 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc sprintf(arg->atom.atom, "%lld", val); *next = arg; next = &arg->next; - /* - * The '*' case means that an arg is used as the length. - * We need to continue to figure out for what. - */ - if (*ptr == '*') - goto process_again; - break; case 's': arg = alloc_arg(); arg->next = NULL; arg->type = PRINT_BSTRING; arg->string.string = strdup(bptr); - if (!arg->string.string) - break; bptr += strlen(bptr) + 1; *next = arg; next = &arg->next; @@ -3731,16 +3589,6 @@ static void print_mac_arg(struct trace_seq *s, int mac, void *data, int size, trace_seq_printf(s, fmt, buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]); } -static int is_printable_array(char *p, unsigned int len) -{ - unsigned int i; - - for (i = 0; i < len && p[i]; i++) - if (!isprint(p[i])) - return 0; - return 1; -} - static void print_event_fields(struct trace_seq *s, void *data, int size, struct event_format *event) { @@ -3760,8 +3608,7 @@ static void print_event_fields(struct trace_seq *s, void *data, int size, len = offset >> 16; offset &= 0xffff; } - if (field->flags & FIELD_IS_STRING && - is_printable_array(data + offset, len)) { + if (field->flags & FIELD_IS_STRING) { trace_seq_printf(s, "%s", (char *)data + offset); } else { trace_seq_puts(s, "ARRAY["); @@ -3772,7 +3619,6 @@ static void print_event_fields(struct trace_seq *s, void *data, int size, *((unsigned char *)data + offset + i)); } trace_seq_putc(s, ']'); - field->flags &= ~FIELD_IS_STRING; } } else { val = pevent_read_number(event->pevent, data + field->offset, @@ -3912,7 +3758,6 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event } else if (*(ptr+1) == 'M' || *(ptr+1) == 'm') { print_mac_arg(s, *(ptr+1), data, size, event, arg); ptr++; - arg = arg->next; break; } @@ -3949,15 +3794,14 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event break; } } - if (pevent->long_size == 8 && ls && - sizeof(long) != 8) { + if (pevent->long_size == 8 && ls) { char *p; ls = 2; /* make %l into %ll */ p = strchr(format, 'l'); if (p) - memmove(p+1, p, strlen(p)+1); + memmove(p, p+1, strlen(p)+1); else if (strcmp(format, "%p") == 0) strcpy(format, "0x%llx"); } @@ -4034,7 +3878,8 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event * pevent_data_lat_fmt - parse the data for the latency format * @pevent: a handle to the pevent * @s: the trace_seq to write to - * @record: the record to read from + * @data: the raw data to read from + * @size: currently unused. * * This parses out the Latency format (interrupts disabled, * need rescheduling, in hard/soft interrupt, preempt count @@ -4044,13 +3889,10 @@ void pevent_data_lat_fmt(struct pevent *pevent, struct trace_seq *s, struct pevent_record *record) { static int check_lock_depth = 1; - static int check_migrate_disable = 1; static int lock_depth_exists; - static int migrate_disable_exists; unsigned int lat_flags; unsigned int pc; int lock_depth; - int migrate_disable; int hardirq; int softirq; void *data = record->data; @@ -4058,26 +3900,18 @@ void pevent_data_lat_fmt(struct pevent *pevent, lat_flags = parse_common_flags(pevent, data); pc = parse_common_pc(pevent, data); /* lock_depth may not always exist */ - if (lock_depth_exists) - lock_depth = parse_common_lock_depth(pevent, data); - else if (check_lock_depth) { - lock_depth = parse_common_lock_depth(pevent, data); - if (lock_depth < 0) - check_lock_depth = 0; - else - lock_depth_exists = 1; - } + if (check_lock_depth) { + struct format_field *field; + struct event_format *event; - /* migrate_disable may not always exist */ - if (migrate_disable_exists) - migrate_disable = parse_common_migrate_disable(pevent, data); - else if (check_migrate_disable) { - migrate_disable = parse_common_migrate_disable(pevent, data); - if (migrate_disable < 0) - check_migrate_disable = 0; - else - migrate_disable_exists = 1; + check_lock_depth = 0; + event = pevent->events[0]; + field = pevent_find_common_field(event, "common_lock_depth"); + if (field) + lock_depth_exists = 1; } + if (lock_depth_exists) + lock_depth = parse_common_lock_depth(pevent, data); hardirq = lat_flags & TRACE_FLAG_HARDIRQ; softirq = lat_flags & TRACE_FLAG_SOFTIRQ; @@ -4096,13 +3930,6 @@ void pevent_data_lat_fmt(struct pevent *pevent, else trace_seq_putc(s, '.'); - if (migrate_disable_exists) { - if (migrate_disable < 0) - trace_seq_putc(s, '.'); - else - trace_seq_printf(s, "%d", migrate_disable); - } - if (lock_depth_exists) { if (lock_depth < 0) trace_seq_putc(s, '.'); @@ -4169,7 +3996,10 @@ const char *pevent_data_comm_from_pid(struct pevent *pevent, int pid) * pevent_data_comm_from_pid - parse the data into the print format * @s: the trace_seq to write to * @event: the handle to the event - * @record: the record to read from + * @cpu: the cpu the event was recorded on + * @data: the raw data + * @size: the size of the raw data + * @nsecs: the timestamp of the event * * This parses the raw @data using the given @event information and * writes the print format into the trace_seq. @@ -4449,13 +4279,6 @@ static void print_args(struct print_arg *args) trace_seq_destroy(&s); printf(")"); break; - case PRINT_HEX: - printf("__print_hex("); - print_args(args->hex.field); - printf(", "); - print_args(args->hex.size); - printf(")"); - break; case PRINT_STRING: case PRINT_BSTRING: printf("__get_str(%s)", args->string.string); @@ -4718,8 +4541,6 @@ int pevent_parse_event(struct pevent *pevent, die("failed to read event id"); event->system = strdup(sys); - if (!event->system) - die("failed to allocate system"); /* Add pevent to event so that it can be referenced */ event->pevent = pevent; @@ -4761,11 +4582,6 @@ int pevent_parse_event(struct pevent *pevent, list = &arg->next; arg->type = PRINT_FIELD; arg->field.name = strdup(field->name); - if (!arg->field.name) { - do_warning("failed to allocate field name"); - event->flags |= EVENT_FL_FAILED; - return -1; - } arg->field.field = field; } return 0; @@ -4937,7 +4753,7 @@ int pevent_get_any_field_val(struct trace_seq *s, struct event_format *event, * @record: The record with the field name. * @err: print default error if failed. * - * Returns: 0 on success, -1 field not found, or 1 if buffer is full. + * Returns: 0 on success, -1 field not fould, or 1 if buffer is full. */ int pevent_print_num_field(struct trace_seq *s, const char *fmt, struct event_format *event, const char *name, @@ -4979,12 +4795,11 @@ static void free_func_handle(struct pevent_function_handler *func) * pevent_register_print_function - register a helper function * @pevent: the handle to the pevent * @func: the function to process the helper function - * @ret_type: the return type of the helper function * @name: the name of the helper function * @parameters: A list of enum pevent_func_arg_type * * Some events may have helper functions in the print format arguments. - * This allows a plugin to dynamically create a way to process one + * This allows a plugin to dynmically create a way to process one * of these functions. * * The @parameters is a variable list of pevent_func_arg_type enums that @@ -5055,13 +4870,12 @@ int pevent_register_print_function(struct pevent *pevent, } /** - * pevent_register_event_handler - register a way to parse an event + * pevent_register_event_handle - register a way to parse an event * @pevent: the handle to the pevent * @id: the id of the event to register * @sys_name: the system name the event belongs to * @event_name: the name of the event * @func: the function to call to parse the event information - * @context: the data to be passed to @func * * This function allows a developer to override the parsing of * a given event. If for some reason the default print format @@ -5111,11 +4925,6 @@ int pevent_register_event_handler(struct pevent *pevent, if (sys_name) handle->sys_name = strdup(sys_name); - if ((event_name && !handle->event_name) || - (sys_name && !handle->sys_name)) { - die("Failed to allocate event/sys name"); - } - handle->func = func; handle->next = pevent->handlers; pevent->handlers = handle; diff --git a/trunk/tools/lib/traceevent/event-parse.h b/trunk/tools/lib/traceevent/event-parse.h index 5772ad8cb386..ac997bc7b592 100644 --- a/trunk/tools/lib/traceevent/event-parse.h +++ b/trunk/tools/lib/traceevent/event-parse.h @@ -226,11 +226,6 @@ struct print_arg_symbol { struct print_flag_sym *symbols; }; -struct print_arg_hex { - struct print_arg *field; - struct print_arg *size; -}; - struct print_arg_dynarray { struct format_field *field; struct print_arg *index; @@ -258,7 +253,6 @@ enum print_arg_type { PRINT_FIELD, PRINT_FLAGS, PRINT_SYMBOL, - PRINT_HEX, PRINT_TYPE, PRINT_STRING, PRINT_BSTRING, @@ -276,7 +270,6 @@ struct print_arg { struct print_arg_typecast typecast; struct print_arg_flags flags; struct print_arg_symbol symbol; - struct print_arg_hex hex; struct print_arg_func func; struct print_arg_string string; struct print_arg_op op; diff --git a/trunk/tools/lib/traceevent/parse-filter.c b/trunk/tools/lib/traceevent/parse-filter.c index ad17855528f9..dfcfe2c131de 100644 --- a/trunk/tools/lib/traceevent/parse-filter.c +++ b/trunk/tools/lib/traceevent/parse-filter.c @@ -96,7 +96,7 @@ static enum event_type read_token(char **tok) (strcmp(token, "=") == 0 || strcmp(token, "!") == 0) && pevent_peek_char() == '~') { /* append it */ - *tok = malloc_or_die(3); + *tok = malloc(3); sprintf(*tok, "%c%c", *token, '~'); free_token(token); /* Now remove the '~' from the buffer */ @@ -148,11 +148,17 @@ add_filter_type(struct event_filter *filter, int id) if (filter_type) return filter_type; - filter->event_filters = realloc(filter->event_filters, - sizeof(*filter->event_filters) * - (filter->filters + 1)); - if (!filter->event_filters) - die("Could not allocate filter"); + if (!filter->filters) + filter->event_filters = + malloc_or_die(sizeof(*filter->event_filters)); + else { + filter->event_filters = + realloc(filter->event_filters, + sizeof(*filter->event_filters) * + (filter->filters + 1)); + if (!filter->event_filters) + die("Could not allocate filter"); + } for (i = 0; i < filter->filters; i++) { if (filter->event_filters[i].event_id > id) @@ -1474,7 +1480,7 @@ void pevent_filter_clear_trivial(struct event_filter *filter, { struct filter_type *filter_type; int count = 0; - int *ids = NULL; + int *ids; int i; if (!filter->filters) @@ -1498,8 +1504,10 @@ void pevent_filter_clear_trivial(struct event_filter *filter, default: break; } - - ids = realloc(ids, sizeof(*ids) * (count + 1)); + if (count) + ids = realloc(ids, sizeof(*ids) * (count + 1)); + else + ids = malloc(sizeof(*ids)); if (!ids) die("Can't allocate ids"); ids[count++] = filter_type->event_id; @@ -1702,43 +1710,18 @@ static int test_num(struct event_format *event, static const char *get_field_str(struct filter_arg *arg, struct pevent_record *record) { - struct event_format *event; - struct pevent *pevent; - unsigned long long addr; - const char *val = NULL; - char hex[64]; - - /* If the field is not a string convert it */ - if (arg->str.field->flags & FIELD_IS_STRING) { - val = record->data + arg->str.field->offset; - - /* - * We need to copy the data since we can't be sure the field - * is null terminated. - */ - if (*(val + arg->str.field->size - 1)) { - /* copy it */ - memcpy(arg->str.buffer, val, arg->str.field->size); - /* the buffer is already NULL terminated */ - val = arg->str.buffer; - } + const char *val = record->data + arg->str.field->offset; - } else { - event = arg->str.field->event; - pevent = event->pevent; - addr = get_value(event, arg->str.field, record); - - if (arg->str.field->flags & (FIELD_IS_POINTER | FIELD_IS_LONG)) - /* convert to a kernel symbol */ - val = pevent_find_function(pevent, addr); - - if (val == NULL) { - /* just use the hex of the string name */ - snprintf(hex, 64, "0x%llx", addr); - val = hex; - } + /* + * We need to copy the data since we can't be sure the field + * is null terminated. + */ + if (*(val + arg->str.field->size - 1)) { + /* copy it */ + memcpy(arg->str.buffer, val, arg->str.field->size); + /* the buffer is already NULL terminated */ + val = arg->str.buffer; } - return val; } @@ -2018,13 +2001,11 @@ static char *exp_to_str(struct event_filter *filter, struct filter_arg *arg) char *lstr; char *rstr; char *op; - char *str = NULL; + char *str; int len; lstr = arg_to_str(filter, arg->exp.left); rstr = arg_to_str(filter, arg->exp.right); - if (!lstr || !rstr) - goto out; switch (arg->exp.type) { case FILTER_EXP_ADD: @@ -2064,7 +2045,6 @@ static char *exp_to_str(struct event_filter *filter, struct filter_arg *arg) len = strlen(op) + strlen(lstr) + strlen(rstr) + 4; str = malloc_or_die(len); snprintf(str, len, "%s %s %s", lstr, op, rstr); -out: free(lstr); free(rstr); @@ -2081,8 +2061,6 @@ static char *num_to_str(struct event_filter *filter, struct filter_arg *arg) lstr = arg_to_str(filter, arg->num.left); rstr = arg_to_str(filter, arg->num.right); - if (!lstr || !rstr) - goto out; switch (arg->num.type) { case FILTER_CMP_EQ: @@ -2119,7 +2097,6 @@ static char *num_to_str(struct event_filter *filter, struct filter_arg *arg) break; } -out: free(lstr); free(rstr); return str; @@ -2270,12 +2247,7 @@ int pevent_filter_compare(struct event_filter *filter1, struct event_filter *fil /* The best way to compare complex filters is with strings */ str1 = arg_to_str(filter1, filter_type1->filter); str2 = arg_to_str(filter2, filter_type2->filter); - if (str1 && str2) - result = strcmp(str1, str2) != 0; - else - /* bail out if allocation fails */ - result = 1; - + result = strcmp(str1, str2) != 0; free(str1); free(str2); if (result) diff --git a/trunk/tools/perf/Documentation/perf-bench.txt b/trunk/tools/perf/Documentation/perf-bench.txt index 7065cd6fbdfc..a3dbadb26ef5 100644 --- a/trunk/tools/perf/Documentation/perf-bench.txt +++ b/trunk/tools/perf/Documentation/perf-bench.txt @@ -12,7 +12,7 @@ SYNOPSIS DESCRIPTION ----------- -This 'perf bench' command is a general framework for benchmark suites. +This 'perf bench' command is general framework for benchmark suites. COMMON OPTIONS -------------- @@ -45,20 +45,14 @@ SUBSYSTEM 'sched':: Scheduler and IPC mechanisms. -'mem':: - Memory access performance. - -'all':: - All benchmark subsystems. - SUITES FOR 'sched' ~~~~~~~~~~~~~~~~~~ *messaging*:: Suite for evaluating performance of scheduler and IPC mechanisms. Based on hackbench by Rusty Russell. -Options of *messaging* -^^^^^^^^^^^^^^^^^^^^^^ +Options of *pipe* +^^^^^^^^^^^^^^^^^ -p:: --pipe:: Use pipe() instead of socketpair() @@ -121,72 +115,6 @@ Example of *pipe* 59004 ops/sec --------------------- -SUITES FOR 'mem' -~~~~~~~~~~~~~~~~ -*memcpy*:: -Suite for evaluating performance of simple memory copy in various ways. - -Options of *memcpy* -^^^^^^^^^^^^^^^^^^^ --l:: ---length:: -Specify length of memory to copy (default: 1MB). -Available units are B, KB, MB, GB and TB (case insensitive). - --r:: ---routine:: -Specify routine to copy (default: default). -Available routines are depend on the architecture. -On x86-64, x86-64-unrolled, x86-64-movsq and x86-64-movsb are supported. - --i:: ---iterations:: -Repeat memcpy invocation this number of times. - --c:: ---cycle:: -Use perf's cpu-cycles event instead of gettimeofday syscall. - --o:: ---only-prefault:: -Show only the result with page faults before memcpy. - --n:: ---no-prefault:: -Show only the result without page faults before memcpy. - -*memset*:: -Suite for evaluating performance of simple memory set in various ways. - -Options of *memset* -^^^^^^^^^^^^^^^^^^^ --l:: ---length:: -Specify length of memory to set (default: 1MB). -Available units are B, KB, MB, GB and TB (case insensitive). - --r:: ---routine:: -Specify routine to set (default: default). -Available routines are depend on the architecture. -On x86-64, x86-64-unrolled, x86-64-stosq and x86-64-stosb are supported. - --i:: ---iterations:: -Repeat memset invocation this number of times. - --c:: ---cycle:: -Use perf's cpu-cycles event instead of gettimeofday syscall. - --o:: ---only-prefault:: -Show only the result with page faults before memset. - --n:: ---no-prefault:: -Show only the result without page faults before memset. - SEE ALSO -------- linkperf:perf[1] diff --git a/trunk/tools/perf/Documentation/perf-report.txt b/trunk/tools/perf/Documentation/perf-report.txt index 495210a612c4..2d89f02719b5 100644 --- a/trunk/tools/perf/Documentation/perf-report.txt +++ b/trunk/tools/perf/Documentation/perf-report.txt @@ -57,7 +57,7 @@ OPTIONS -s:: --sort=:: - Sort by key(s): pid, comm, dso, symbol, parent, srcline. + Sort by key(s): pid, comm, dso, symbol, parent. -p:: --parent=:: diff --git a/trunk/tools/perf/Documentation/perf-top.txt b/trunk/tools/perf/Documentation/perf-top.txt index 5b80d84d6b4a..4a5680cb242e 100644 --- a/trunk/tools/perf/Documentation/perf-top.txt +++ b/trunk/tools/perf/Documentation/perf-top.txt @@ -112,7 +112,7 @@ Default is to monitor all CPUS. -s:: --sort:: - Sort by key(s): pid, comm, dso, symbol, parent, srcline. + Sort by key(s): pid, comm, dso, symbol, parent -n:: --show-nr-samples:: diff --git a/trunk/tools/perf/Makefile b/trunk/tools/perf/Makefile index 75d74e5db8d5..0eee64cfe9a0 100644 --- a/trunk/tools/perf/Makefile +++ b/trunk/tools/perf/Makefile @@ -155,7 +155,7 @@ endif ### --- END CONFIGURATION SECTION --- -BASIC_CFLAGS = -Iutil/include -Iarch/$(ARCH)/include -I$(OUTPUT)util -I$(TRACE_EVENT_DIR) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE +BASIC_CFLAGS = -Iutil/include -Iarch/$(ARCH)/include -I$(OUTPUT)/util -I$(TRACE_EVENT_DIR) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE BASIC_LDFLAGS = # Guard against environment variables @@ -503,7 +503,6 @@ else LIB_OBJS += $(OUTPUT)ui/progress.o LIB_OBJS += $(OUTPUT)ui/util.o LIB_OBJS += $(OUTPUT)ui/tui/setup.o - LIB_OBJS += $(OUTPUT)ui/tui/util.o LIB_H += ui/browser.h LIB_H += ui/browsers/map.h LIB_H += ui/helpline.h @@ -523,18 +522,13 @@ else msg := $(warning GTK2 not found, disables GTK2 support. Please install gtk2-devel or libgtk2.0-dev); BASIC_CFLAGS += -DNO_GTK2_SUPPORT else - ifeq ($(call try-cc,$(SOURCE_GTK2_INFOBAR),$(FLAGS_GTK2)),y) - BASIC_CFLAGS += -DHAVE_GTK_INFO_BAR - endif BASIC_CFLAGS += $(shell pkg-config --cflags gtk+-2.0) EXTLIBS += $(shell pkg-config --libs gtk+-2.0) LIB_OBJS += $(OUTPUT)ui/gtk/browser.o LIB_OBJS += $(OUTPUT)ui/gtk/setup.o - LIB_OBJS += $(OUTPUT)ui/gtk/util.o # Make sure that it'd be included only once. ifneq ($(findstring -DNO_NEWT_SUPPORT,$(BASIC_CFLAGS)),) LIB_OBJS += $(OUTPUT)ui/setup.o - LIB_OBJS += $(OUTPUT)ui/util.o endif endif endif diff --git a/trunk/tools/perf/bench/mem-memcpy.c b/trunk/tools/perf/bench/mem-memcpy.c index 02dad5d3359b..71557225bf92 100644 --- a/trunk/tools/perf/bench/mem-memcpy.c +++ b/trunk/tools/perf/bench/mem-memcpy.c @@ -24,21 +24,21 @@ static const char *length_str = "1MB"; static const char *routine = "default"; static int iterations = 1; -static bool use_cycle; -static int cycle_fd; +static bool use_clock; +static int clock_fd; static bool only_prefault; static bool no_prefault; static const struct option options[] = { OPT_STRING('l', "length", &length_str, "1MB", "Specify length of memory to copy. " - "Available units: B, KB, MB, GB and TB (upper and lower)"), + "available unit: B, MB, GB (upper and lower)"), OPT_STRING('r', "routine", &routine, "default", "Specify routine to copy"), OPT_INTEGER('i', "iterations", &iterations, "repeat memcpy() invocation this number of times"), - OPT_BOOLEAN('c', "cycle", &use_cycle, - "Use cycles event instead of gettimeofday() for measuring"), + OPT_BOOLEAN('c', "clock", &use_clock, + "Use CPU clock for measuring"), OPT_BOOLEAN('o', "only-prefault", &only_prefault, "Show only the result with page faults before memcpy()"), OPT_BOOLEAN('n', "no-prefault", &no_prefault, @@ -76,27 +76,27 @@ static const char * const bench_mem_memcpy_usage[] = { NULL }; -static struct perf_event_attr cycle_attr = { +static struct perf_event_attr clock_attr = { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES }; -static void init_cycle(void) +static void init_clock(void) { - cycle_fd = sys_perf_event_open(&cycle_attr, getpid(), -1, -1, 0); + clock_fd = sys_perf_event_open(&clock_attr, getpid(), -1, -1, 0); - if (cycle_fd < 0 && errno == ENOSYS) + if (clock_fd < 0 && errno == ENOSYS) die("No CONFIG_PERF_EVENTS=y kernel support configured?\n"); else - BUG_ON(cycle_fd < 0); + BUG_ON(clock_fd < 0); } -static u64 get_cycle(void) +static u64 get_clock(void) { int ret; u64 clk; - ret = read(cycle_fd, &clk, sizeof(u64)); + ret = read(clock_fd, &clk, sizeof(u64)); BUG_ON(ret != sizeof(u64)); return clk; @@ -119,9 +119,9 @@ static void alloc_mem(void **dst, void **src, size_t length) die("memory allocation failed - maybe length is too large?\n"); } -static u64 do_memcpy_cycle(memcpy_t fn, size_t len, bool prefault) +static u64 do_memcpy_clock(memcpy_t fn, size_t len, bool prefault) { - u64 cycle_start = 0ULL, cycle_end = 0ULL; + u64 clock_start = 0ULL, clock_end = 0ULL; void *src = NULL, *dst = NULL; int i; @@ -130,14 +130,14 @@ static u64 do_memcpy_cycle(memcpy_t fn, size_t len, bool prefault) if (prefault) fn(dst, src, len); - cycle_start = get_cycle(); + clock_start = get_clock(); for (i = 0; i < iterations; ++i) fn(dst, src, len); - cycle_end = get_cycle(); + clock_end = get_clock(); free(src); free(dst); - return cycle_end - cycle_start; + return clock_end - clock_start; } static double do_memcpy_gettimeofday(memcpy_t fn, size_t len, bool prefault) @@ -182,17 +182,17 @@ int bench_mem_memcpy(int argc, const char **argv, int i; size_t len; double result_bps[2]; - u64 result_cycle[2]; + u64 result_clock[2]; argc = parse_options(argc, argv, options, bench_mem_memcpy_usage, 0); - if (use_cycle) - init_cycle(); + if (use_clock) + init_clock(); len = (size_t)perf_atoll((char *)length_str); - result_cycle[0] = result_cycle[1] = 0ULL; + result_clock[0] = result_clock[1] = 0ULL; result_bps[0] = result_bps[1] = 0.0; if ((s64)len <= 0) { @@ -223,11 +223,11 @@ int bench_mem_memcpy(int argc, const char **argv, if (!only_prefault && !no_prefault) { /* show both of results */ - if (use_cycle) { - result_cycle[0] = - do_memcpy_cycle(routines[i].fn, len, false); - result_cycle[1] = - do_memcpy_cycle(routines[i].fn, len, true); + if (use_clock) { + result_clock[0] = + do_memcpy_clock(routines[i].fn, len, false); + result_clock[1] = + do_memcpy_clock(routines[i].fn, len, true); } else { result_bps[0] = do_memcpy_gettimeofday(routines[i].fn, @@ -237,9 +237,9 @@ int bench_mem_memcpy(int argc, const char **argv, len, true); } } else { - if (use_cycle) { - result_cycle[pf] = - do_memcpy_cycle(routines[i].fn, + if (use_clock) { + result_clock[pf] = + do_memcpy_clock(routines[i].fn, len, only_prefault); } else { result_bps[pf] = @@ -251,12 +251,12 @@ int bench_mem_memcpy(int argc, const char **argv, switch (bench_format) { case BENCH_FORMAT_DEFAULT: if (!only_prefault && !no_prefault) { - if (use_cycle) { - printf(" %14lf Cycle/Byte\n", - (double)result_cycle[0] + if (use_clock) { + printf(" %14lf Clock/Byte\n", + (double)result_clock[0] / (double)len); - printf(" %14lf Cycle/Byte (with prefault)\n", - (double)result_cycle[1] + printf(" %14lf Clock/Byte (with prefault)\n", + (double)result_clock[1] / (double)len); } else { print_bps(result_bps[0]); @@ -265,9 +265,9 @@ int bench_mem_memcpy(int argc, const char **argv, printf(" (with prefault)\n"); } } else { - if (use_cycle) { - printf(" %14lf Cycle/Byte", - (double)result_cycle[pf] + if (use_clock) { + printf(" %14lf Clock/Byte", + (double)result_clock[pf] / (double)len); } else print_bps(result_bps[pf]); @@ -277,17 +277,17 @@ int bench_mem_memcpy(int argc, const char **argv, break; case BENCH_FORMAT_SIMPLE: if (!only_prefault && !no_prefault) { - if (use_cycle) { + if (use_clock) { printf("%lf %lf\n", - (double)result_cycle[0] / (double)len, - (double)result_cycle[1] / (double)len); + (double)result_clock[0] / (double)len, + (double)result_clock[1] / (double)len); } else { printf("%lf %lf\n", result_bps[0], result_bps[1]); } } else { - if (use_cycle) { - printf("%lf\n", (double)result_cycle[pf] + if (use_clock) { + printf("%lf\n", (double)result_clock[pf] / (double)len); } else printf("%lf\n", result_bps[pf]); diff --git a/trunk/tools/perf/bench/mem-memset.c b/trunk/tools/perf/bench/mem-memset.c index 350cc9557265..e9079185bd72 100644 --- a/trunk/tools/perf/bench/mem-memset.c +++ b/trunk/tools/perf/bench/mem-memset.c @@ -24,21 +24,21 @@ static const char *length_str = "1MB"; static const char *routine = "default"; static int iterations = 1; -static bool use_cycle; -static int cycle_fd; +static bool use_clock; +static int clock_fd; static bool only_prefault; static bool no_prefault; static const struct option options[] = { OPT_STRING('l', "length", &length_str, "1MB", - "Specify length of memory to set. " - "Available units: B, KB, MB, GB and TB (upper and lower)"), + "Specify length of memory to copy. " + "available unit: B, MB, GB (upper and lower)"), OPT_STRING('r', "routine", &routine, "default", - "Specify routine to set"), + "Specify routine to copy"), OPT_INTEGER('i', "iterations", &iterations, "repeat memset() invocation this number of times"), - OPT_BOOLEAN('c', "cycle", &use_cycle, - "Use cycles event instead of gettimeofday() for measuring"), + OPT_BOOLEAN('c', "clock", &use_clock, + "Use CPU clock for measuring"), OPT_BOOLEAN('o', "only-prefault", &only_prefault, "Show only the result with page faults before memset()"), OPT_BOOLEAN('n', "no-prefault", &no_prefault, @@ -76,27 +76,27 @@ static const char * const bench_mem_memset_usage[] = { NULL }; -static struct perf_event_attr cycle_attr = { +static struct perf_event_attr clock_attr = { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES }; -static void init_cycle(void) +static void init_clock(void) { - cycle_fd = sys_perf_event_open(&cycle_attr, getpid(), -1, -1, 0); + clock_fd = sys_perf_event_open(&clock_attr, getpid(), -1, -1, 0); - if (cycle_fd < 0 && errno == ENOSYS) + if (clock_fd < 0 && errno == ENOSYS) die("No CONFIG_PERF_EVENTS=y kernel support configured?\n"); else - BUG_ON(cycle_fd < 0); + BUG_ON(clock_fd < 0); } -static u64 get_cycle(void) +static u64 get_clock(void) { int ret; u64 clk; - ret = read(cycle_fd, &clk, sizeof(u64)); + ret = read(clock_fd, &clk, sizeof(u64)); BUG_ON(ret != sizeof(u64)); return clk; @@ -115,9 +115,9 @@ static void alloc_mem(void **dst, size_t length) die("memory allocation failed - maybe length is too large?\n"); } -static u64 do_memset_cycle(memset_t fn, size_t len, bool prefault) +static u64 do_memset_clock(memset_t fn, size_t len, bool prefault) { - u64 cycle_start = 0ULL, cycle_end = 0ULL; + u64 clock_start = 0ULL, clock_end = 0ULL; void *dst = NULL; int i; @@ -126,13 +126,13 @@ static u64 do_memset_cycle(memset_t fn, size_t len, bool prefault) if (prefault) fn(dst, -1, len); - cycle_start = get_cycle(); + clock_start = get_clock(); for (i = 0; i < iterations; ++i) fn(dst, i, len); - cycle_end = get_cycle(); + clock_end = get_clock(); free(dst); - return cycle_end - cycle_start; + return clock_end - clock_start; } static double do_memset_gettimeofday(memset_t fn, size_t len, bool prefault) @@ -176,17 +176,17 @@ int bench_mem_memset(int argc, const char **argv, int i; size_t len; double result_bps[2]; - u64 result_cycle[2]; + u64 result_clock[2]; argc = parse_options(argc, argv, options, bench_mem_memset_usage, 0); - if (use_cycle) - init_cycle(); + if (use_clock) + init_clock(); len = (size_t)perf_atoll((char *)length_str); - result_cycle[0] = result_cycle[1] = 0ULL; + result_clock[0] = result_clock[1] = 0ULL; result_bps[0] = result_bps[1] = 0.0; if ((s64)len <= 0) { @@ -217,11 +217,11 @@ int bench_mem_memset(int argc, const char **argv, if (!only_prefault && !no_prefault) { /* show both of results */ - if (use_cycle) { - result_cycle[0] = - do_memset_cycle(routines[i].fn, len, false); - result_cycle[1] = - do_memset_cycle(routines[i].fn, len, true); + if (use_clock) { + result_clock[0] = + do_memset_clock(routines[i].fn, len, false); + result_clock[1] = + do_memset_clock(routines[i].fn, len, true); } else { result_bps[0] = do_memset_gettimeofday(routines[i].fn, @@ -231,9 +231,9 @@ int bench_mem_memset(int argc, const char **argv, len, true); } } else { - if (use_cycle) { - result_cycle[pf] = - do_memset_cycle(routines[i].fn, + if (use_clock) { + result_clock[pf] = + do_memset_clock(routines[i].fn, len, only_prefault); } else { result_bps[pf] = @@ -245,12 +245,12 @@ int bench_mem_memset(int argc, const char **argv, switch (bench_format) { case BENCH_FORMAT_DEFAULT: if (!only_prefault && !no_prefault) { - if (use_cycle) { - printf(" %14lf Cycle/Byte\n", - (double)result_cycle[0] + if (use_clock) { + printf(" %14lf Clock/Byte\n", + (double)result_clock[0] / (double)len); - printf(" %14lf Cycle/Byte (with prefault)\n ", - (double)result_cycle[1] + printf(" %14lf Clock/Byte (with prefault)\n ", + (double)result_clock[1] / (double)len); } else { print_bps(result_bps[0]); @@ -259,9 +259,9 @@ int bench_mem_memset(int argc, const char **argv, printf(" (with prefault)\n"); } } else { - if (use_cycle) { - printf(" %14lf Cycle/Byte", - (double)result_cycle[pf] + if (use_clock) { + printf(" %14lf Clock/Byte", + (double)result_clock[pf] / (double)len); } else print_bps(result_bps[pf]); @@ -271,17 +271,17 @@ int bench_mem_memset(int argc, const char **argv, break; case BENCH_FORMAT_SIMPLE: if (!only_prefault && !no_prefault) { - if (use_cycle) { + if (use_clock) { printf("%lf %lf\n", - (double)result_cycle[0] / (double)len, - (double)result_cycle[1] / (double)len); + (double)result_clock[0] / (double)len, + (double)result_clock[1] / (double)len); } else { printf("%lf %lf\n", result_bps[0], result_bps[1]); } } else { - if (use_cycle) { - printf("%lf\n", (double)result_cycle[pf] + if (use_clock) { + printf("%lf\n", (double)result_clock[pf] / (double)len); } else printf("%lf\n", result_bps[pf]); diff --git a/trunk/tools/perf/builtin-bench.c b/trunk/tools/perf/builtin-bench.c index 1f3100216448..b0e74ab2d7a2 100644 --- a/trunk/tools/perf/builtin-bench.c +++ b/trunk/tools/perf/builtin-bench.c @@ -33,7 +33,7 @@ struct bench_suite { }; \ /* sentinel: easy for help */ -#define suite_all { "all", "Test all benchmark suites", NULL } +#define suite_all { "all", "test all suite (pseudo suite)", NULL } static struct bench_suite sched_suites[] = { { "messaging", @@ -75,7 +75,7 @@ static struct bench_subsys subsystems[] = { "memory access performance", mem_suites }, { "all", /* sentinel: easy for help */ - "all benchmark subsystem", + "test all subsystem (pseudo subsystem)", NULL }, { NULL, NULL, diff --git a/trunk/tools/perf/builtin-evlist.c b/trunk/tools/perf/builtin-evlist.c index 0dd5a058f766..acd78dc28341 100644 --- a/trunk/tools/perf/builtin-evlist.c +++ b/trunk/tools/perf/builtin-evlist.c @@ -60,7 +60,7 @@ static int __cmd_evlist(const char *input_name, struct perf_attr_details *detail list_for_each_entry(pos, &session->evlist->entries, node) { bool first = true; - printf("%s", perf_evsel__name(pos)); + printf("%s", event_name(pos)); if (details->verbose || details->freq) { comma_printf(&first, " sample_freq=%" PRIu64, diff --git a/trunk/tools/perf/builtin-kmem.c b/trunk/tools/perf/builtin-kmem.c index ce35015f2dc6..547af48deb4f 100644 --- a/trunk/tools/perf/builtin-kmem.c +++ b/trunk/tools/perf/builtin-kmem.c @@ -57,11 +57,6 @@ static unsigned long nr_allocs, nr_cross_allocs; #define PATH_SYS_NODE "/sys/devices/system/node" -struct perf_kmem { - struct perf_tool tool; - struct perf_session *session; -}; - static void init_cpunode_map(void) { FILE *fp; @@ -283,16 +278,14 @@ static void process_free_event(void *data, s_alloc->alloc_cpu = -1; } -static void process_raw_event(struct perf_tool *tool, - union perf_event *raw_event __used, void *data, +static void process_raw_event(union perf_event *raw_event __used, void *data, int cpu, u64 timestamp, struct thread *thread) { - struct perf_kmem *kmem = container_of(tool, struct perf_kmem, tool); struct event_format *event; int type; - type = trace_parse_common_type(kmem->session->pevent, data); - event = pevent_find_event(kmem->session->pevent, type); + type = trace_parse_common_type(data); + event = trace_find_event(type); if (!strcmp(event->name, "kmalloc") || !strcmp(event->name, "kmem_cache_alloc")) { @@ -313,7 +306,7 @@ static void process_raw_event(struct perf_tool *tool, } } -static int process_sample_event(struct perf_tool *tool, +static int process_sample_event(struct perf_tool *tool __used, union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel __used, @@ -329,18 +322,16 @@ static int process_sample_event(struct perf_tool *tool, dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); - process_raw_event(tool, event, sample->raw_data, sample->cpu, + process_raw_event(event, sample->raw_data, sample->cpu, sample->time, thread); return 0; } -static struct perf_kmem perf_kmem = { - .tool = { - .sample = process_sample_event, - .comm = perf_event__process_comm, - .ordered_samples = true, - }, +static struct perf_tool perf_kmem = { + .sample = process_sample_event, + .comm = perf_event__process_comm, + .ordered_samples = true, }; static double fragmentation(unsigned long n_req, unsigned long n_alloc) @@ -495,15 +486,11 @@ static void sort_result(void) static int __cmd_kmem(void) { int err = -EINVAL; - struct perf_session *session; - - session = perf_session__new(input_name, O_RDONLY, 0, false, - &perf_kmem.tool); + struct perf_session *session = perf_session__new(input_name, O_RDONLY, + 0, false, &perf_kmem); if (session == NULL) return -ENOMEM; - perf_kmem.session = session; - if (perf_session__create_kernel_maps(session) < 0) goto out_delete; @@ -511,7 +498,7 @@ static int __cmd_kmem(void) goto out_delete; setup_pager(); - err = perf_session__process_events(session, &perf_kmem.tool); + err = perf_session__process_events(session, &perf_kmem); if (err != 0) goto out_delete; sort_result(); diff --git a/trunk/tools/perf/builtin-lock.c b/trunk/tools/perf/builtin-lock.c index b3c428548868..fd53319de20d 100644 --- a/trunk/tools/perf/builtin-lock.c +++ b/trunk/tools/perf/builtin-lock.c @@ -724,8 +724,8 @@ process_raw_event(void *data, int cpu, u64 timestamp, struct thread *thread) struct event_format *event; int type; - type = trace_parse_common_type(session->pevent, data); - event = pevent_find_event(session->pevent, type); + type = trace_parse_common_type(data); + event = trace_find_event(type); if (!strcmp(event->name, "lock_acquire")) process_lock_acquire_event(data, event, cpu, timestamp, thread); diff --git a/trunk/tools/perf/builtin-record.c b/trunk/tools/perf/builtin-record.c index f5a6452931e6..f95840d04e4c 100644 --- a/trunk/tools/perf/builtin-record.c +++ b/trunk/tools/perf/builtin-record.c @@ -265,7 +265,7 @@ static void perf_record__open(struct perf_record *rec) if (err == ENOENT) { ui__error("The %s event is not supported.\n", - perf_evsel__name(pos)); + event_name(pos)); exit(EXIT_FAILURE); } @@ -916,7 +916,7 @@ int cmd_record(int argc, const char **argv, const char *prefix __used) usage_with_options(record_usage, record_options); list_for_each_entry(pos, &evsel_list->entries, node) { - if (perf_header__push_event(pos->attr.config, perf_evsel__name(pos))) + if (perf_header__push_event(pos->attr.config, event_name(pos))) goto out_free_fd; } diff --git a/trunk/tools/perf/builtin-report.c b/trunk/tools/perf/builtin-report.c index 69b1c1185159..25249f76329d 100644 --- a/trunk/tools/perf/builtin-report.c +++ b/trunk/tools/perf/builtin-report.c @@ -69,7 +69,7 @@ static int perf_report__add_branch_hist_entry(struct perf_tool *tool, if ((sort__has_parent || symbol_conf.use_callchain) && sample->callchain) { - err = machine__resolve_callchain(machine, al->thread, + err = machine__resolve_callchain(machine, evsel, al->thread, sample->callchain, &parent); if (err) return err; @@ -140,7 +140,7 @@ static int perf_evsel__add_hist_entry(struct perf_evsel *evsel, struct hist_entry *he; if ((sort__has_parent || symbol_conf.use_callchain) && sample->callchain) { - err = machine__resolve_callchain(machine, al->thread, + err = machine__resolve_callchain(machine, evsel, al->thread, sample->callchain, &parent); if (err) return err; @@ -230,7 +230,7 @@ static int process_read_event(struct perf_tool *tool, struct perf_report *rep = container_of(tool, struct perf_report, tool); if (rep->show_threads) { - const char *name = evsel ? perf_evsel__name(evsel) : "unknown"; + const char *name = evsel ? event_name(evsel) : "unknown"; perf_read_values_add_value(&rep->show_threads_values, event->read.pid, event->read.tid, event->read.id, @@ -239,18 +239,17 @@ static int process_read_event(struct perf_tool *tool, } dump_printf(": %d %d %s %" PRIu64 "\n", event->read.pid, event->read.tid, - evsel ? perf_evsel__name(evsel) : "FAIL", + evsel ? event_name(evsel) : "FAIL", event->read.value); return 0; } -/* For pipe mode, sample_type is not currently set */ static int perf_report__setup_sample_type(struct perf_report *rep) { struct perf_session *self = rep->session; - if (!self->fd_pipe && !(self->sample_type & PERF_SAMPLE_CALLCHAIN)) { + if (!(self->sample_type & PERF_SAMPLE_CALLCHAIN)) { if (sort__has_parent) { ui__error("Selected --sort parent, but no " "callchain data. Did you call " @@ -273,8 +272,7 @@ static int perf_report__setup_sample_type(struct perf_report *rep) } if (sort__branch_mode == 1) { - if (!self->fd_pipe && - !(self->sample_type & PERF_SAMPLE_BRANCH_STACK)) { + if (!(self->sample_type & PERF_SAMPLE_BRANCH_STACK)) { ui__error("Selected -b but no branch data. " "Did you call perf record without -b?\n"); return -1; @@ -316,7 +314,7 @@ static int perf_evlist__tty_browse_hists(struct perf_evlist *evlist, list_for_each_entry(pos, &evlist->entries, node) { struct hists *hists = &pos->hists; - const char *evname = perf_evsel__name(pos); + const char *evname = event_name(pos); hists__fprintf_nr_sample_events(hists, evname, stdout); hists__fprintf(hists, NULL, false, true, 0, 0, stdout); diff --git a/trunk/tools/perf/builtin-sched.c b/trunk/tools/perf/builtin-sched.c index 7a9ad2b1ee76..b125e07eb399 100644 --- a/trunk/tools/perf/builtin-sched.c +++ b/trunk/tools/perf/builtin-sched.c @@ -43,11 +43,6 @@ static u64 sleep_measurement_overhead; static unsigned long nr_tasks; -struct perf_sched { - struct perf_tool tool; - struct perf_session *session; -}; - struct sched_atom; struct task_desc { @@ -1602,13 +1597,11 @@ static int perf_sched__process_tracepoint_sample(struct perf_tool *tool, struct perf_evsel *evsel, struct machine *machine) { - struct perf_sched *sched = container_of(tool, struct perf_sched, tool); - struct pevent *pevent = sched->session->pevent; struct thread *thread = machine__findnew_thread(machine, sample->pid); if (thread == NULL) { pr_debug("problem processing %s event, skipping it.\n", - perf_evsel__name(evsel)); + evsel->name); return -1; } @@ -1619,8 +1612,7 @@ static int perf_sched__process_tracepoint_sample(struct perf_tool *tool, tracepoint_handler f = evsel->handler.func; if (evsel->handler.data == NULL) - evsel->handler.data = pevent_find_event(pevent, - evsel->attr.config); + evsel->handler.data = trace_find_event(evsel->attr.config); f(tool, evsel->handler.data, sample, machine, thread); } @@ -1628,14 +1620,12 @@ static int perf_sched__process_tracepoint_sample(struct perf_tool *tool, return 0; } -static struct perf_sched perf_sched = { - .tool = { - .sample = perf_sched__process_tracepoint_sample, - .comm = perf_event__process_comm, - .lost = perf_event__process_lost, - .fork = perf_event__process_task, - .ordered_samples = true, - }, +static struct perf_tool perf_sched = { + .sample = perf_sched__process_tracepoint_sample, + .comm = perf_event__process_comm, + .lost = perf_event__process_lost, + .fork = perf_event__process_task, + .ordered_samples = true, }; static void read_events(bool destroy, struct perf_session **psession) @@ -1650,20 +1640,16 @@ static void read_events(bool destroy, struct perf_session **psession) { "sched:sched_process_exit", process_sched_exit_event, }, { "sched:sched_migrate_task", process_sched_migrate_task_event, }, }; - struct perf_session *session; - - session = perf_session__new(input_name, O_RDONLY, 0, false, - &perf_sched.tool); + struct perf_session *session = perf_session__new(input_name, O_RDONLY, + 0, false, &perf_sched); if (session == NULL) die("No Memory"); - perf_sched.session = session; - - err = perf_session__set_tracepoints_handlers(session, handlers); + err = perf_evlist__set_tracepoints_handlers_array(session->evlist, handlers); assert(err == 0); if (perf_session__has_traces(session, "record -R")) { - err = perf_session__process_events(session, &perf_sched.tool); + err = perf_session__process_events(session, &perf_sched); if (err) die("Failed to process events, error %d", err); diff --git a/trunk/tools/perf/builtin-script.c b/trunk/tools/perf/builtin-script.c index 1e60ab70b2b1..8e395a538eb9 100644 --- a/trunk/tools/perf/builtin-script.c +++ b/trunk/tools/perf/builtin-script.c @@ -28,11 +28,6 @@ static bool system_wide; static const char *cpu_list; static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS); -struct perf_script { - struct perf_tool tool; - struct perf_session *session; -}; - enum perf_output_field { PERF_OUTPUT_COMM = 1U << 0, PERF_OUTPUT_TID = 1U << 1, @@ -142,11 +137,10 @@ static const char *output_field2str(enum perf_output_field field) #define PRINT_FIELD(x) (output[attr->type].fields & PERF_OUTPUT_##x) -static int perf_evsel__check_stype(struct perf_evsel *evsel, - u64 sample_type, const char *sample_msg, - enum perf_output_field field) +static int perf_event_attr__check_stype(struct perf_event_attr *attr, + u64 sample_type, const char *sample_msg, + enum perf_output_field field) { - struct perf_event_attr *attr = &evsel->attr; int type = attr->type; const char *evname; @@ -154,7 +148,7 @@ static int perf_evsel__check_stype(struct perf_evsel *evsel, return 0; if (output[type].user_set) { - evname = perf_evsel__name(evsel); + evname = __event_name(attr->type, attr->config); pr_err("Samples for '%s' event do not have %s attribute set. " "Cannot print '%s' field.\n", evname, sample_msg, output_field2str(field)); @@ -163,7 +157,7 @@ static int perf_evsel__check_stype(struct perf_evsel *evsel, /* user did not ask for it explicitly so remove from the default list */ output[type].fields &= ~field; - evname = perf_evsel__name(evsel); + evname = __event_name(attr->type, attr->config); pr_debug("Samples for '%s' event do not have %s attribute set. " "Skipping '%s' field.\n", evname, sample_msg, output_field2str(field)); @@ -181,8 +175,8 @@ static int perf_evsel__check_attr(struct perf_evsel *evsel, return -EINVAL; if (PRINT_FIELD(IP)) { - if (perf_evsel__check_stype(evsel, PERF_SAMPLE_IP, "IP", - PERF_OUTPUT_IP)) + if (perf_event_attr__check_stype(attr, PERF_SAMPLE_IP, "IP", + PERF_OUTPUT_IP)) return -EINVAL; if (!no_callchain && @@ -191,8 +185,8 @@ static int perf_evsel__check_attr(struct perf_evsel *evsel, } if (PRINT_FIELD(ADDR) && - perf_evsel__check_stype(evsel, PERF_SAMPLE_ADDR, "ADDR", - PERF_OUTPUT_ADDR)) + perf_event_attr__check_stype(attr, PERF_SAMPLE_ADDR, "ADDR", + PERF_OUTPUT_ADDR)) return -EINVAL; if (PRINT_FIELD(SYM) && !PRINT_FIELD(IP) && !PRINT_FIELD(ADDR)) { @@ -214,18 +208,18 @@ static int perf_evsel__check_attr(struct perf_evsel *evsel, } if ((PRINT_FIELD(PID) || PRINT_FIELD(TID)) && - perf_evsel__check_stype(evsel, PERF_SAMPLE_TID, "TID", - PERF_OUTPUT_TID|PERF_OUTPUT_PID)) + perf_event_attr__check_stype(attr, PERF_SAMPLE_TID, "TID", + PERF_OUTPUT_TID|PERF_OUTPUT_PID)) return -EINVAL; if (PRINT_FIELD(TIME) && - perf_evsel__check_stype(evsel, PERF_SAMPLE_TIME, "TIME", - PERF_OUTPUT_TIME)) + perf_event_attr__check_stype(attr, PERF_SAMPLE_TIME, "TIME", + PERF_OUTPUT_TIME)) return -EINVAL; if (PRINT_FIELD(CPU) && - perf_evsel__check_stype(evsel, PERF_SAMPLE_CPU, "CPU", - PERF_OUTPUT_CPU)) + perf_event_attr__check_stype(attr, PERF_SAMPLE_CPU, "CPU", + PERF_OUTPUT_CPU)) return -EINVAL; return 0; @@ -262,13 +256,11 @@ static int perf_session__check_output_opt(struct perf_session *session) return 0; } -static void print_sample_start(struct pevent *pevent, - struct perf_sample *sample, +static void print_sample_start(struct perf_sample *sample, struct thread *thread, - struct perf_evsel *evsel) + struct perf_event_attr *attr) { int type; - struct perf_event_attr *attr = &evsel->attr; struct event_format *event; const char *evname = NULL; unsigned long secs; @@ -308,18 +300,12 @@ static void print_sample_start(struct pevent *pevent, if (PRINT_FIELD(EVNAME)) { if (attr->type == PERF_TYPE_TRACEPOINT) { - /* - * XXX Do we really need this here? - * perf_evlist__set_tracepoint_names should have done - * this already - */ - type = trace_parse_common_type(pevent, - sample->raw_data); - event = pevent_find_event(pevent, type); + type = trace_parse_common_type(sample->raw_data); + event = trace_find_event(type); if (event) evname = event->name; } else - evname = perf_evsel__name(evsel); + evname = __event_name(attr->type, attr->config); printf("%s: ", evname ? evname : "[unknown]"); } @@ -401,7 +387,7 @@ static void print_sample_bts(union perf_event *event, printf(" "); else printf("\n"); - perf_event__print_ip(event, sample, machine, + perf_event__print_ip(event, sample, machine, evsel, PRINT_FIELD(SYM), PRINT_FIELD(DSO), PRINT_FIELD(SYMOFFSET)); } @@ -416,7 +402,6 @@ static void print_sample_bts(union perf_event *event, } static void process_event(union perf_event *event __unused, - struct pevent *pevent, struct perf_sample *sample, struct perf_evsel *evsel, struct machine *machine, @@ -427,7 +412,7 @@ static void process_event(union perf_event *event __unused, if (output[attr->type].fields == 0) return; - print_sample_start(pevent, sample, thread, evsel); + print_sample_start(sample, thread, attr); if (is_bts_event(attr)) { print_sample_bts(event, sample, evsel, machine, thread); @@ -435,7 +420,7 @@ static void process_event(union perf_event *event __unused, } if (PRINT_FIELD(TRACE)) - print_trace_event(pevent, sample->cpu, sample->raw_data, + print_trace_event(sample->cpu, sample->raw_data, sample->raw_size); if (PRINT_FIELD(ADDR)) @@ -446,7 +431,7 @@ static void process_event(union perf_event *event __unused, printf(" "); else printf("\n"); - perf_event__print_ip(event, sample, machine, + perf_event__print_ip(event, sample, machine, evsel, PRINT_FIELD(SYM), PRINT_FIELD(DSO), PRINT_FIELD(SYMOFFSET)); } @@ -466,8 +451,7 @@ static int default_stop_script(void) return 0; } -static int default_generate_script(struct pevent *pevent __unused, - const char *outfile __unused) +static int default_generate_script(const char *outfile __unused) { return 0; } @@ -505,7 +489,6 @@ static int process_sample_event(struct perf_tool *tool __used, struct machine *machine) { struct addr_location al; - struct perf_script *scr = container_of(tool, struct perf_script, tool); struct thread *thread = machine__findnew_thread(machine, event->ip.tid); if (thread == NULL) { @@ -537,27 +520,24 @@ static int process_sample_event(struct perf_tool *tool __used, if (cpu_list && !test_bit(sample->cpu, cpu_bitmap)) return 0; - scripting_ops->process_event(event, scr->session->pevent, - sample, evsel, machine, thread); + scripting_ops->process_event(event, sample, evsel, machine, thread); evsel->hists.stats.total_period += sample->period; return 0; } -static struct perf_script perf_script = { - .tool = { - .sample = process_sample_event, - .mmap = perf_event__process_mmap, - .comm = perf_event__process_comm, - .exit = perf_event__process_task, - .fork = perf_event__process_task, - .attr = perf_event__process_attr, - .event_type = perf_event__process_event_type, - .tracing_data = perf_event__process_tracing_data, - .build_id = perf_event__process_build_id, - .ordered_samples = true, - .ordering_requires_timestamps = true, - }, +static struct perf_tool perf_script = { + .sample = process_sample_event, + .mmap = perf_event__process_mmap, + .comm = perf_event__process_comm, + .exit = perf_event__process_task, + .fork = perf_event__process_task, + .attr = perf_event__process_attr, + .event_type = perf_event__process_event_type, + .tracing_data = perf_event__process_tracing_data, + .build_id = perf_event__process_build_id, + .ordered_samples = true, + .ordering_requires_timestamps = true, }; extern volatile int session_done; @@ -573,7 +553,7 @@ static int __cmd_script(struct perf_session *session) signal(SIGINT, sig_handler); - ret = perf_session__process_events(session, &perf_script.tool); + ret = perf_session__process_events(session, &perf_script); if (debug_mode) pr_err("Misordered timestamps: %" PRIu64 "\n", nr_unordered); @@ -1355,13 +1335,10 @@ int cmd_script(int argc, const char **argv, const char *prefix __used) if (!script_name) setup_pager(); - session = perf_session__new(input_name, O_RDONLY, 0, false, - &perf_script.tool); + session = perf_session__new(input_name, O_RDONLY, 0, false, &perf_script); if (session == NULL) return -ENOMEM; - perf_script.session = session; - if (cpu_list) { if (perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap)) return -1; @@ -1407,8 +1384,7 @@ int cmd_script(int argc, const char **argv, const char *prefix __used) return -1; } - err = scripting_ops->generate_script(session->pevent, - "perf-script"); + err = scripting_ops->generate_script("perf-script"); goto out; } diff --git a/trunk/tools/perf/builtin-stat.c b/trunk/tools/perf/builtin-stat.c index 861f0aec77ae..07b5c7703dd1 100644 --- a/trunk/tools/perf/builtin-stat.c +++ b/trunk/tools/perf/builtin-stat.c @@ -391,7 +391,7 @@ static int read_counter_aggr(struct perf_evsel *counter) if (verbose) { fprintf(output, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n", - perf_evsel__name(counter), count[0], count[1], count[2]); + event_name(counter), count[0], count[1], count[2]); } /* @@ -496,7 +496,7 @@ static int run_perf_stat(int argc __used, const char **argv) errno == ENXIO) { if (verbose) ui__warning("%s event is not supported by the kernel.\n", - perf_evsel__name(counter)); + event_name(counter)); counter->supported = false; continue; } @@ -594,7 +594,7 @@ static void nsec_printout(int cpu, struct perf_evsel *evsel, double avg) csv_output ? 0 : -4, evsel_list->cpus->map[cpu], csv_sep); - fprintf(output, fmt, cpustr, msecs, csv_sep, perf_evsel__name(evsel)); + fprintf(output, fmt, cpustr, msecs, csv_sep, event_name(evsel)); if (evsel->cgrp) fprintf(output, "%s%s", csv_sep, evsel->cgrp->name); @@ -792,7 +792,7 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg) else cpu = 0; - fprintf(output, fmt, cpustr, avg, csv_sep, perf_evsel__name(evsel)); + fprintf(output, fmt, cpustr, avg, csv_sep, event_name(evsel)); if (evsel->cgrp) fprintf(output, "%s%s", csv_sep, evsel->cgrp->name); @@ -908,7 +908,7 @@ static void print_counter_aggr(struct perf_evsel *counter) counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED, csv_sep, csv_output ? 0 : -24, - perf_evsel__name(counter)); + event_name(counter)); if (counter->cgrp) fprintf(output, "%s%s", csv_sep, counter->cgrp->name); @@ -961,7 +961,7 @@ static void print_counter(struct perf_evsel *counter) counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED, csv_sep, csv_output ? 0 : -24, - perf_evsel__name(counter)); + event_name(counter)); if (counter->cgrp) fprintf(output, "%s%s", diff --git a/trunk/tools/perf/builtin-test.c b/trunk/tools/perf/builtin-test.c index 5ce30305462b..5a8727c08757 100644 --- a/trunk/tools/perf/builtin-test.c +++ b/trunk/tools/perf/builtin-test.c @@ -583,7 +583,7 @@ static int test__basic_mmap(void) if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) { pr_debug("expected %d %s events, got %d\n", expected_nr_events[evsel->idx], - perf_evsel__name(evsel), nr_events[evsel->idx]); + event_name(evsel), nr_events[evsel->idx]); goto out_munmap; } } diff --git a/trunk/tools/perf/builtin-top.c b/trunk/tools/perf/builtin-top.c index e3cab5f088f8..6bb0277b7dfe 100644 --- a/trunk/tools/perf/builtin-top.c +++ b/trunk/tools/perf/builtin-top.c @@ -245,7 +245,7 @@ static void perf_top__show_details(struct perf_top *top) if (notes->src == NULL) goto out_unlock; - printf("Showing %s for %s\n", perf_evsel__name(top->sym_evsel), symbol->name); + printf("Showing %s for %s\n", event_name(top->sym_evsel), symbol->name); printf(" Events Pcnt (>=%d%%)\n", top->sym_pcnt_filter); more = symbol__annotate_printf(symbol, he->ms.map, top->sym_evsel->idx, @@ -408,7 +408,7 @@ static void perf_top__print_mapped_keys(struct perf_top *top) fprintf(stdout, "\t[e] display entries (lines). \t(%d)\n", top->print_entries); if (top->evlist->nr_entries > 1) - fprintf(stdout, "\t[E] active event counter. \t(%s)\n", perf_evsel__name(top->sym_evsel)); + fprintf(stdout, "\t[E] active event counter. \t(%s)\n", event_name(top->sym_evsel)); fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", top->count_filter); @@ -503,13 +503,13 @@ static void perf_top__handle_keypress(struct perf_top *top, int c) fprintf(stderr, "\nAvailable events:"); list_for_each_entry(top->sym_evsel, &top->evlist->entries, node) - fprintf(stderr, "\n\t%d %s", top->sym_evsel->idx, perf_evsel__name(top->sym_evsel)); + fprintf(stderr, "\n\t%d %s", top->sym_evsel->idx, event_name(top->sym_evsel)); prompt_integer(&counter, "Enter details event counter"); if (counter >= top->evlist->nr_entries) { top->sym_evsel = list_entry(top->evlist->entries.next, struct perf_evsel, node); - fprintf(stderr, "Sorry, no such event, using %s.\n", perf_evsel__name(top->sym_evsel)); + fprintf(stderr, "Sorry, no such event, using %s.\n", event_name(top->sym_evsel)); sleep(1); break; } @@ -774,7 +774,7 @@ static void perf_event__process_sample(struct perf_tool *tool, if ((sort__has_parent || symbol_conf.use_callchain) && sample->callchain) { - err = machine__resolve_callchain(machine, al.thread, + err = machine__resolve_callchain(machine, evsel, al.thread, sample->callchain, &parent); if (err) return; @@ -960,7 +960,7 @@ static void perf_top__start_counters(struct perf_top *top) if (err == ENOENT) { ui__error("The %s event is not supported.\n", - perf_evsel__name(counter)); + event_name(counter)); goto out_err; } else if (err == EMFILE) { ui__error("Too many events are opened.\n" diff --git a/trunk/tools/perf/config/feature-tests.mak b/trunk/tools/perf/config/feature-tests.mak index 6c18785a6417..d9084e03ce56 100644 --- a/trunk/tools/perf/config/feature-tests.mak +++ b/trunk/tools/perf/config/feature-tests.mak @@ -78,19 +78,6 @@ int main(int argc, char *argv[]) return 0; } endef - -define SOURCE_GTK2_INFOBAR -#pragma GCC diagnostic ignored \"-Wstrict-prototypes\" -#include -#pragma GCC diagnostic error \"-Wstrict-prototypes\" - -int main(void) -{ - gtk_info_bar_new(); - - return 0; -} -endef endif ifndef NO_LIBPERL diff --git a/trunk/tools/perf/ui/browsers/annotate.c b/trunk/tools/perf/ui/browsers/annotate.c index 67a2703e666a..34b1c46eaf42 100644 --- a/trunk/tools/perf/ui/browsers/annotate.c +++ b/trunk/tools/perf/ui/browsers/annotate.c @@ -814,7 +814,7 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx, { struct disasm_line *pos, *n; struct annotation *notes; - size_t size; + const size_t size = symbol__size(sym); struct map_symbol ms = { .map = map, .sym = sym, @@ -834,8 +834,6 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx, if (sym == NULL) return -1; - size = symbol__size(sym); - if (map->dso->annotate_warned) return -1; diff --git a/trunk/tools/perf/ui/browsers/hists.c b/trunk/tools/perf/ui/browsers/hists.c index 482f0517b61e..53f6697d014e 100644 --- a/trunk/tools/perf/ui/browsers/hists.c +++ b/trunk/tools/perf/ui/browsers/hists.c @@ -23,7 +23,6 @@ struct hist_browser { struct hists *hists; struct hist_entry *he_selection; struct map_symbol *selection; - int print_seq; bool has_symbols; }; @@ -801,196 +800,6 @@ static void ui_browser__hists_seek(struct ui_browser *browser, } } -static int hist_browser__fprintf_callchain_node_rb_tree(struct hist_browser *browser, - struct callchain_node *chain_node, - u64 total, int level, - FILE *fp) -{ - struct rb_node *node; - int offset = level * LEVEL_OFFSET_STEP; - u64 new_total, remaining; - int printed = 0; - - if (callchain_param.mode == CHAIN_GRAPH_REL) - new_total = chain_node->children_hit; - else - new_total = total; - - remaining = new_total; - node = rb_first(&chain_node->rb_root); - while (node) { - struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node); - struct rb_node *next = rb_next(node); - u64 cumul = callchain_cumul_hits(child); - struct callchain_list *chain; - char folded_sign = ' '; - int first = true; - int extra_offset = 0; - - remaining -= cumul; - - list_for_each_entry(chain, &child->val, list) { - char ipstr[BITS_PER_LONG / 4 + 1], *alloc_str; - const char *str; - bool was_first = first; - - if (first) - first = false; - else - extra_offset = LEVEL_OFFSET_STEP; - - folded_sign = callchain_list__folded(chain); - - alloc_str = NULL; - str = callchain_list__sym_name(chain, ipstr, sizeof(ipstr)); - if (was_first) { - double percent = cumul * 100.0 / new_total; - - if (asprintf(&alloc_str, "%2.2f%% %s", percent, str) < 0) - str = "Not enough memory!"; - else - str = alloc_str; - } - - printed += fprintf(fp, "%*s%c %s\n", offset + extra_offset, " ", folded_sign, str); - free(alloc_str); - if (folded_sign == '+') - break; - } - - if (folded_sign == '-') { - const int new_level = level + (extra_offset ? 2 : 1); - printed += hist_browser__fprintf_callchain_node_rb_tree(browser, child, new_total, - new_level, fp); - } - - node = next; - } - - return printed; -} - -static int hist_browser__fprintf_callchain_node(struct hist_browser *browser, - struct callchain_node *node, - int level, FILE *fp) -{ - struct callchain_list *chain; - int offset = level * LEVEL_OFFSET_STEP; - char folded_sign = ' '; - int printed = 0; - - list_for_each_entry(chain, &node->val, list) { - char ipstr[BITS_PER_LONG / 4 + 1], *s; - - folded_sign = callchain_list__folded(chain); - s = callchain_list__sym_name(chain, ipstr, sizeof(ipstr)); - printed += fprintf(fp, "%*s%c %s\n", offset, " ", folded_sign, s); - } - - if (folded_sign == '-') - printed += hist_browser__fprintf_callchain_node_rb_tree(browser, node, - browser->hists->stats.total_period, - level + 1, fp); - return printed; -} - -static int hist_browser__fprintf_callchain(struct hist_browser *browser, - struct rb_root *chain, int level, FILE *fp) -{ - struct rb_node *nd; - int printed = 0; - - for (nd = rb_first(chain); nd; nd = rb_next(nd)) { - struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node); - - printed += hist_browser__fprintf_callchain_node(browser, node, level, fp); - } - - return printed; -} - -static int hist_browser__fprintf_entry(struct hist_browser *browser, - struct hist_entry *he, FILE *fp) -{ - char s[8192]; - double percent; - int printed = 0; - char folded_sign = ' '; - - if (symbol_conf.use_callchain) - folded_sign = hist_entry__folded(he); - - hist_entry__snprintf(he, s, sizeof(s), browser->hists); - percent = (he->period * 100.0) / browser->hists->stats.total_period; - - if (symbol_conf.use_callchain) - printed += fprintf(fp, "%c ", folded_sign); - - printed += fprintf(fp, " %5.2f%%", percent); - - if (symbol_conf.show_nr_samples) - printed += fprintf(fp, " %11u", he->nr_events); - - if (symbol_conf.show_total_period) - printed += fprintf(fp, " %12" PRIu64, he->period); - - printed += fprintf(fp, "%s\n", rtrim(s)); - - if (folded_sign == '-') - printed += hist_browser__fprintf_callchain(browser, &he->sorted_chain, 1, fp); - - return printed; -} - -static int hist_browser__fprintf(struct hist_browser *browser, FILE *fp) -{ - struct rb_node *nd = hists__filter_entries(rb_first(browser->b.entries)); - int printed = 0; - - while (nd) { - struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); - - printed += hist_browser__fprintf_entry(browser, h, fp); - nd = hists__filter_entries(rb_next(nd)); - } - - return printed; -} - -static int hist_browser__dump(struct hist_browser *browser) -{ - char filename[64]; - FILE *fp; - - while (1) { - scnprintf(filename, sizeof(filename), "perf.hist.%d", browser->print_seq); - if (access(filename, F_OK)) - break; - /* - * XXX: Just an arbitrary lazy upper limit - */ - if (++browser->print_seq == 8192) { - ui_helpline__fpush("Too many perf.hist.N files, nothing written!"); - return -1; - } - } - - fp = fopen(filename, "w"); - if (fp == NULL) { - char bf[64]; - strerror_r(errno, bf, sizeof(bf)); - ui_helpline__fpush("Couldn't write to %s: %s", filename, bf); - return -1; - } - - ++browser->print_seq; - hist_browser__fprintf(browser, fp); - fclose(fp); - ui_helpline__fpush("%s written!", filename); - - return 0; -} - static struct hist_browser *hist_browser__new(struct hists *hists) { struct hist_browser *browser = zalloc(sizeof(*browser)); @@ -1128,9 +937,6 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, browser->selection->map->dso->annotate_warned) continue; goto do_annotate; - case 'P': - hist_browser__dump(browser); - continue; case 'd': goto zoom_dso; case 't': @@ -1163,7 +969,6 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, "E Expand all callchains\n" "d Zoom into current DSO\n" "t Zoom into current Thread\n" - "P Print histograms to perf.hist.N\n" "/ Filter symbol by name"); continue; case K_ENTER: @@ -1367,7 +1172,7 @@ static void perf_evsel_menu__write(struct ui_browser *browser, struct perf_evsel *evsel = list_entry(entry, struct perf_evsel, node); bool current_entry = ui_browser__is_current_entry(browser, row); unsigned long nr_events = evsel->hists.stats.nr_events[PERF_RECORD_SAMPLE]; - const char *ev_name = perf_evsel__name(evsel); + const char *ev_name = event_name(evsel); char bf[256], unit; const char *warn = " "; size_t printed; @@ -1435,7 +1240,7 @@ static int perf_evsel_menu__run(struct perf_evsel_menu *menu, */ if (timer) timer(arg); - ev_name = perf_evsel__name(pos); + ev_name = event_name(pos); key = perf_evsel__hists_browse(pos, nr_events, help, ev_name, true, timer, arg, delay_secs); @@ -1504,11 +1309,17 @@ static int __perf_evlist__tui_browse_hists(struct perf_evlist *evlist, ui_helpline__push("Press ESC to exit"); list_for_each_entry(pos, &evlist->entries, node) { - const char *ev_name = perf_evsel__name(pos); + const char *ev_name = event_name(pos); size_t line_len = strlen(ev_name) + 7; if (menu.b.width < line_len) menu.b.width = line_len; + /* + * Cache the evsel name, tracepoints have a _high_ cost per + * event_name() call. + */ + if (pos->name == NULL) + pos->name = strdup(ev_name); } return perf_evsel_menu__run(&menu, evlist->nr_entries, help, timer, @@ -1519,10 +1330,11 @@ int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help, void(*timer)(void *arg), void *arg, int delay_secs) { + if (evlist->nr_entries == 1) { struct perf_evsel *first = list_entry(evlist->entries.next, struct perf_evsel, node); - const char *ev_name = perf_evsel__name(first); + const char *ev_name = event_name(first); return perf_evsel__hists_browse(first, evlist->nr_entries, help, ev_name, false, timer, arg, delay_secs); diff --git a/trunk/tools/perf/ui/gtk/browser.c b/trunk/tools/perf/ui/gtk/browser.c index ec12e0b4ded6..0656c381a89c 100644 --- a/trunk/tools/perf/ui/gtk/browser.c +++ b/trunk/tools/perf/ui/gtk/browser.c @@ -11,8 +11,8 @@ static void perf_gtk__signal(int sig) { - perf_gtk__exit(false); psignal(sig, "perf"); + gtk_main_quit(); } static void perf_gtk__resize_window(GtkWidget *window) @@ -122,59 +122,13 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists) gtk_container_add(GTK_CONTAINER(window), view); } -#ifdef HAVE_GTK_INFO_BAR -static GtkWidget *perf_gtk__setup_info_bar(void) -{ - GtkWidget *info_bar; - GtkWidget *label; - GtkWidget *content_area; - - info_bar = gtk_info_bar_new(); - gtk_widget_set_no_show_all(info_bar, TRUE); - - label = gtk_label_new(""); - gtk_widget_show(label); - - content_area = gtk_info_bar_get_content_area(GTK_INFO_BAR(info_bar)); - gtk_container_add(GTK_CONTAINER(content_area), label); - - gtk_info_bar_add_button(GTK_INFO_BAR(info_bar), GTK_STOCK_OK, - GTK_RESPONSE_OK); - g_signal_connect(info_bar, "response", - G_CALLBACK(gtk_widget_hide), NULL); - - pgctx->info_bar = info_bar; - pgctx->message_label = label; - - return info_bar; -} -#endif - -static GtkWidget *perf_gtk__setup_statusbar(void) -{ - GtkWidget *stbar; - unsigned ctxid; - - stbar = gtk_statusbar_new(); - - ctxid = gtk_statusbar_get_context_id(GTK_STATUSBAR(stbar), - "perf report"); - pgctx->statbar = stbar; - pgctx->statbar_ctx_id = ctxid; - - return stbar; -} - int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist, const char *help __used, void (*timer) (void *arg)__used, void *arg __used, int delay_secs __used) { struct perf_evsel *pos; - GtkWidget *vbox; GtkWidget *notebook; - GtkWidget *info_bar; - GtkWidget *statbar; GtkWidget *window; signal(SIGSEGV, perf_gtk__signal); @@ -189,17 +143,11 @@ int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist, g_signal_connect(window, "delete_event", gtk_main_quit, NULL); - pgctx = perf_gtk__activate_context(window); - if (!pgctx) - return -1; - - vbox = gtk_vbox_new(FALSE, 0); - notebook = gtk_notebook_new(); list_for_each_entry(pos, &evlist->entries, node) { struct hists *hists = &pos->hists; - const char *evname = perf_evsel__name(pos); + const char *evname = event_name(pos); GtkWidget *scrolled_window; GtkWidget *tab_label; @@ -216,16 +164,7 @@ int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist, gtk_notebook_append_page(GTK_NOTEBOOK(notebook), scrolled_window, tab_label); } - gtk_box_pack_start(GTK_BOX(vbox), notebook, TRUE, TRUE, 0); - - info_bar = perf_gtk__setup_info_bar(); - if (info_bar) - gtk_box_pack_start(GTK_BOX(vbox), info_bar, FALSE, FALSE, 0); - - statbar = perf_gtk__setup_statusbar(); - gtk_box_pack_start(GTK_BOX(vbox), statbar, FALSE, FALSE, 0); - - gtk_container_add(GTK_CONTAINER(window), vbox); + gtk_container_add(GTK_CONTAINER(window), notebook); gtk_widget_show_all(window); @@ -235,7 +174,5 @@ int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist, gtk_main(); - perf_gtk__deactivate_context(&pgctx); - return 0; } diff --git a/trunk/tools/perf/ui/gtk/gtk.h b/trunk/tools/perf/ui/gtk/gtk.h index a4d0f2b4a2dc..75177ee04032 100644 --- a/trunk/tools/perf/ui/gtk/gtk.h +++ b/trunk/tools/perf/ui/gtk/gtk.h @@ -1,39 +1,8 @@ #ifndef _PERF_GTK_H_ #define _PERF_GTK_H_ 1 -#include - #pragma GCC diagnostic ignored "-Wstrict-prototypes" #include #pragma GCC diagnostic error "-Wstrict-prototypes" - -struct perf_gtk_context { - GtkWidget *main_window; - -#ifdef HAVE_GTK_INFO_BAR - GtkWidget *info_bar; - GtkWidget *message_label; -#endif - GtkWidget *statbar; - guint statbar_ctx_id; -}; - -extern struct perf_gtk_context *pgctx; - -static inline bool perf_gtk__is_active_context(struct perf_gtk_context *ctx) -{ - return ctx && ctx->main_window; -} - -struct perf_gtk_context *perf_gtk__activate_context(GtkWidget *window); -int perf_gtk__deactivate_context(struct perf_gtk_context **ctx); - -#ifndef HAVE_GTK_INFO_BAR -static inline GtkWidget *perf_gtk__setup_info_bar(void) -{ - return NULL; -} -#endif - #endif /* _PERF_GTK_H_ */ diff --git a/trunk/tools/perf/ui/gtk/setup.c b/trunk/tools/perf/ui/gtk/setup.c index 92879ce61e2f..829529957766 100644 --- a/trunk/tools/perf/ui/gtk/setup.c +++ b/trunk/tools/perf/ui/gtk/setup.c @@ -1,17 +1,12 @@ #include "gtk.h" #include "../../util/cache.h" -#include "../../util/debug.h" - -extern struct perf_error_ops perf_gtk_eops; int perf_gtk__init(void) { - perf_error__register(&perf_gtk_eops); return gtk_init_check(NULL, NULL) ? 0 : -1; } void perf_gtk__exit(bool wait_for_ok __used) { - perf_error__unregister(&perf_gtk_eops); gtk_main_quit(); } diff --git a/trunk/tools/perf/ui/gtk/util.c b/trunk/tools/perf/ui/gtk/util.c deleted file mode 100644 index 0ead373c0dfb..000000000000 --- a/trunk/tools/perf/ui/gtk/util.c +++ /dev/null @@ -1,129 +0,0 @@ -#include "../util.h" -#include "../../util/debug.h" -#include "gtk.h" - -#include - - -struct perf_gtk_context *pgctx; - -struct perf_gtk_context *perf_gtk__activate_context(GtkWidget *window) -{ - struct perf_gtk_context *ctx; - - ctx = malloc(sizeof(*pgctx)); - if (ctx) - ctx->main_window = window; - - return ctx; -} - -int perf_gtk__deactivate_context(struct perf_gtk_context **ctx) -{ - if (!perf_gtk__is_active_context(*ctx)) - return -1; - - free(*ctx); - *ctx = NULL; - return 0; -} - -static int perf_gtk__error(const char *format, va_list args) -{ - char *msg; - GtkWidget *dialog; - - if (!perf_gtk__is_active_context(pgctx) || - vasprintf(&msg, format, args) < 0) { - fprintf(stderr, "Error:\n"); - vfprintf(stderr, format, args); - fprintf(stderr, "\n"); - return -1; - } - - dialog = gtk_message_dialog_new_with_markup(GTK_WINDOW(pgctx->main_window), - GTK_DIALOG_DESTROY_WITH_PARENT, - GTK_MESSAGE_ERROR, - GTK_BUTTONS_CLOSE, - "Error\n\n%s", msg); - gtk_dialog_run(GTK_DIALOG(dialog)); - - gtk_widget_destroy(dialog); - free(msg); - return 0; -} - -#ifdef HAVE_GTK_INFO_BAR -static int perf_gtk__warning_info_bar(const char *format, va_list args) -{ - char *msg; - - if (!perf_gtk__is_active_context(pgctx) || - vasprintf(&msg, format, args) < 0) { - fprintf(stderr, "Warning:\n"); - vfprintf(stderr, format, args); - fprintf(stderr, "\n"); - return -1; - } - - gtk_label_set_text(GTK_LABEL(pgctx->message_label), msg); - gtk_info_bar_set_message_type(GTK_INFO_BAR(pgctx->info_bar), - GTK_MESSAGE_WARNING); - gtk_widget_show(pgctx->info_bar); - - free(msg); - return 0; -} -#else -static int perf_gtk__warning_statusbar(const char *format, va_list args) -{ - char *msg, *p; - - if (!perf_gtk__is_active_context(pgctx) || - vasprintf(&msg, format, args) < 0) { - fprintf(stderr, "Warning:\n"); - vfprintf(stderr, format, args); - fprintf(stderr, "\n"); - return -1; - } - - gtk_statusbar_pop(GTK_STATUSBAR(pgctx->statbar), - pgctx->statbar_ctx_id); - - /* Only first line can be displayed */ - p = strchr(msg, '\n'); - if (p) - *p = '\0'; - - gtk_statusbar_push(GTK_STATUSBAR(pgctx->statbar), - pgctx->statbar_ctx_id, msg); - - free(msg); - return 0; -} -#endif - -struct perf_error_ops perf_gtk_eops = { - .error = perf_gtk__error, -#ifdef HAVE_GTK_INFO_BAR - .warning = perf_gtk__warning_info_bar, -#else - .warning = perf_gtk__warning_statusbar, -#endif -}; - -/* - * FIXME: Functions below should be implemented properly. - * For now, just add stubs for NO_NEWT=1 build. - */ -#ifdef NO_NEWT_SUPPORT -int ui_helpline__show_help(const char *format __used, va_list ap __used) -{ - return 0; -} - -void ui_progress__update(u64 curr __used, u64 total __used, - const char *title __used) -{ -} -#endif diff --git a/trunk/tools/perf/ui/tui/setup.c b/trunk/tools/perf/ui/tui/setup.c index e813c1d17346..d33e943ac434 100644 --- a/trunk/tools/perf/ui/tui/setup.c +++ b/trunk/tools/perf/ui/tui/setup.c @@ -15,8 +15,6 @@ pthread_mutex_t ui__lock = PTHREAD_MUTEX_INITIALIZER; static volatile int ui__need_resize; -extern struct perf_error_ops perf_tui_eops; - void ui__refresh_dimensions(bool force) { if (force || ui__need_resize) { @@ -124,8 +122,6 @@ int ui__init(void) signal(SIGINT, ui__signal); signal(SIGQUIT, ui__signal); signal(SIGTERM, ui__signal); - - perf_error__register(&perf_tui_eops); out: return err; } @@ -141,6 +137,4 @@ void ui__exit(bool wait_for_ok) SLsmg_refresh(); SLsmg_reset_smg(); SLang_reset_tty(); - - perf_error__unregister(&perf_tui_eops); } diff --git a/trunk/tools/perf/ui/tui/util.c b/trunk/tools/perf/ui/tui/util.c deleted file mode 100644 index 092902e30cee..000000000000 --- a/trunk/tools/perf/ui/tui/util.c +++ /dev/null @@ -1,243 +0,0 @@ -#include "../../util/util.h" -#include -#include -#include -#include - -#include "../../util/cache.h" -#include "../../util/debug.h" -#include "../browser.h" -#include "../keysyms.h" -#include "../helpline.h" -#include "../ui.h" -#include "../util.h" -#include "../libslang.h" - -static void ui_browser__argv_write(struct ui_browser *browser, - void *entry, int row) -{ - char **arg = entry; - bool current_entry = ui_browser__is_current_entry(browser, row); - - ui_browser__set_color(browser, current_entry ? HE_COLORSET_SELECTED : - HE_COLORSET_NORMAL); - slsmg_write_nstring(*arg, browser->width); -} - -static int popup_menu__run(struct ui_browser *menu) -{ - int key; - - if (ui_browser__show(menu, " ", "ESC: exit, ENTER|->: Select option") < 0) - return -1; - - while (1) { - key = ui_browser__run(menu, 0); - - switch (key) { - case K_RIGHT: - case K_ENTER: - key = menu->index; - break; - case K_LEFT: - case K_ESC: - case 'q': - case CTRL('c'): - key = -1; - break; - default: - continue; - } - - break; - } - - ui_browser__hide(menu); - return key; -} - -int ui__popup_menu(int argc, char * const argv[]) -{ - struct ui_browser menu = { - .entries = (void *)argv, - .refresh = ui_browser__argv_refresh, - .seek = ui_browser__argv_seek, - .write = ui_browser__argv_write, - .nr_entries = argc, - }; - - return popup_menu__run(&menu); -} - -int ui_browser__input_window(const char *title, const char *text, char *input, - const char *exit_msg, int delay_secs) -{ - int x, y, len, key; - int max_len = 60, nr_lines = 0; - static char buf[50]; - const char *t; - - t = text; - while (1) { - const char *sep = strchr(t, '\n'); - - if (sep == NULL) - sep = strchr(t, '\0'); - len = sep - t; - if (max_len < len) - max_len = len; - ++nr_lines; - if (*sep == '\0') - break; - t = sep + 1; - } - - max_len += 2; - nr_lines += 8; - y = SLtt_Screen_Rows / 2 - nr_lines / 2; - x = SLtt_Screen_Cols / 2 - max_len / 2; - - SLsmg_set_color(0); - SLsmg_draw_box(y, x++, nr_lines, max_len); - if (title) { - SLsmg_gotorc(y, x + 1); - SLsmg_write_string((char *)title); - } - SLsmg_gotorc(++y, x); - nr_lines -= 7; - max_len -= 2; - SLsmg_write_wrapped_string((unsigned char *)text, y, x, - nr_lines, max_len, 1); - y += nr_lines; - len = 5; - while (len--) { - SLsmg_gotorc(y + len - 1, x); - SLsmg_write_nstring((char *)" ", max_len); - } - SLsmg_draw_box(y++, x + 1, 3, max_len - 2); - - SLsmg_gotorc(y + 3, x); - SLsmg_write_nstring((char *)exit_msg, max_len); - SLsmg_refresh(); - - x += 2; - len = 0; - key = ui__getch(delay_secs); - while (key != K_TIMER && key != K_ENTER && key != K_ESC) { - if (key == K_BKSPC) { - if (len == 0) - goto next_key; - SLsmg_gotorc(y, x + --len); - SLsmg_write_char(' '); - } else { - buf[len] = key; - SLsmg_gotorc(y, x + len++); - SLsmg_write_char(key); - } - SLsmg_refresh(); - - /* XXX more graceful overflow handling needed */ - if (len == sizeof(buf) - 1) { - ui_helpline__push("maximum size of symbol name reached!"); - key = K_ENTER; - break; - } -next_key: - key = ui__getch(delay_secs); - } - - buf[len] = '\0'; - strncpy(input, buf, len+1); - return key; -} - -int ui__question_window(const char *title, const char *text, - const char *exit_msg, int delay_secs) -{ - int x, y; - int max_len = 0, nr_lines = 0; - const char *t; - - t = text; - while (1) { - const char *sep = strchr(t, '\n'); - int len; - - if (sep == NULL) - sep = strchr(t, '\0'); - len = sep - t; - if (max_len < len) - max_len = len; - ++nr_lines; - if (*sep == '\0') - break; - t = sep + 1; - } - - max_len += 2; - nr_lines += 4; - y = SLtt_Screen_Rows / 2 - nr_lines / 2, - x = SLtt_Screen_Cols / 2 - max_len / 2; - - SLsmg_set_color(0); - SLsmg_draw_box(y, x++, nr_lines, max_len); - if (title) { - SLsmg_gotorc(y, x + 1); - SLsmg_write_string((char *)title); - } - SLsmg_gotorc(++y, x); - nr_lines -= 2; - max_len -= 2; - SLsmg_write_wrapped_string((unsigned char *)text, y, x, - nr_lines, max_len, 1); - SLsmg_gotorc(y + nr_lines - 2, x); - SLsmg_write_nstring((char *)" ", max_len); - SLsmg_gotorc(y + nr_lines - 1, x); - SLsmg_write_nstring((char *)exit_msg, max_len); - SLsmg_refresh(); - return ui__getch(delay_secs); -} - -int ui__help_window(const char *text) -{ - return ui__question_window("Help", text, "Press any key...", 0); -} - -int ui__dialog_yesno(const char *msg) -{ - return ui__question_window(NULL, msg, "Enter: Yes, ESC: No", 0); -} - -static int __ui__warning(const char *title, const char *format, va_list args) -{ - char *s; - - if (vasprintf(&s, format, args) > 0) { - int key; - - pthread_mutex_lock(&ui__lock); - key = ui__question_window(title, s, "Press any key...", 0); - pthread_mutex_unlock(&ui__lock); - free(s); - return key; - } - - fprintf(stderr, "%s\n", title); - vfprintf(stderr, format, args); - return K_ESC; -} - -static int perf_tui__error(const char *format, va_list args) -{ - return __ui__warning("Error:", format, args); -} - -static int perf_tui__warning(const char *format, va_list args) -{ - return __ui__warning("Warning:", format, args); -} - -struct perf_error_ops perf_tui_eops = { - .error = perf_tui__error, - .warning = perf_tui__warning, -}; diff --git a/trunk/tools/perf/ui/util.c b/trunk/tools/perf/ui/util.c index 4f989774c8c6..ad4374a16bb0 100644 --- a/trunk/tools/perf/ui/util.c +++ b/trunk/tools/perf/ui/util.c @@ -1,85 +1,250 @@ -#include "util.h" +#include "../util.h" +#include +#include +#include +#include + +#include "../cache.h" #include "../debug.h" +#include "browser.h" +#include "keysyms.h" +#include "helpline.h" +#include "ui.h" +#include "util.h" +#include "libslang.h" + +static void ui_browser__argv_write(struct ui_browser *browser, + void *entry, int row) +{ + char **arg = entry; + bool current_entry = ui_browser__is_current_entry(browser, row); + ui_browser__set_color(browser, current_entry ? HE_COLORSET_SELECTED : + HE_COLORSET_NORMAL); + slsmg_write_nstring(*arg, browser->width); +} -/* - * Default error logging functions - */ -static int perf_stdio__error(const char *format, va_list args) +static int popup_menu__run(struct ui_browser *menu) { - fprintf(stderr, "Error:\n"); - vfprintf(stderr, format, args); - return 0; + int key; + + if (ui_browser__show(menu, " ", "ESC: exit, ENTER|->: Select option") < 0) + return -1; + + while (1) { + key = ui_browser__run(menu, 0); + + switch (key) { + case K_RIGHT: + case K_ENTER: + key = menu->index; + break; + case K_LEFT: + case K_ESC: + case 'q': + case CTRL('c'): + key = -1; + break; + default: + continue; + } + + break; + } + + ui_browser__hide(menu); + return key; } -static int perf_stdio__warning(const char *format, va_list args) +int ui__popup_menu(int argc, char * const argv[]) { - fprintf(stderr, "Warning:\n"); - vfprintf(stderr, format, args); - return 0; + struct ui_browser menu = { + .entries = (void *)argv, + .refresh = ui_browser__argv_refresh, + .seek = ui_browser__argv_seek, + .write = ui_browser__argv_write, + .nr_entries = argc, + }; + + return popup_menu__run(&menu); } -static struct perf_error_ops default_eops = +int ui_browser__input_window(const char *title, const char *text, char *input, + const char *exit_msg, int delay_secs) { - .error = perf_stdio__error, - .warning = perf_stdio__warning, -}; + int x, y, len, key; + int max_len = 60, nr_lines = 0; + static char buf[50]; + const char *t; + + t = text; + while (1) { + const char *sep = strchr(t, '\n'); + + if (sep == NULL) + sep = strchr(t, '\0'); + len = sep - t; + if (max_len < len) + max_len = len; + ++nr_lines; + if (*sep == '\0') + break; + t = sep + 1; + } + + max_len += 2; + nr_lines += 8; + y = SLtt_Screen_Rows / 2 - nr_lines / 2; + x = SLtt_Screen_Cols / 2 - max_len / 2; + + SLsmg_set_color(0); + SLsmg_draw_box(y, x++, nr_lines, max_len); + if (title) { + SLsmg_gotorc(y, x + 1); + SLsmg_write_string((char *)title); + } + SLsmg_gotorc(++y, x); + nr_lines -= 7; + max_len -= 2; + SLsmg_write_wrapped_string((unsigned char *)text, y, x, + nr_lines, max_len, 1); + y += nr_lines; + len = 5; + while (len--) { + SLsmg_gotorc(y + len - 1, x); + SLsmg_write_nstring((char *)" ", max_len); + } + SLsmg_draw_box(y++, x + 1, 3, max_len - 2); + + SLsmg_gotorc(y + 3, x); + SLsmg_write_nstring((char *)exit_msg, max_len); + SLsmg_refresh(); + + x += 2; + len = 0; + key = ui__getch(delay_secs); + while (key != K_TIMER && key != K_ENTER && key != K_ESC) { + if (key == K_BKSPC) { + if (len == 0) + goto next_key; + SLsmg_gotorc(y, x + --len); + SLsmg_write_char(' '); + } else { + buf[len] = key; + SLsmg_gotorc(y, x + len++); + SLsmg_write_char(key); + } + SLsmg_refresh(); + + /* XXX more graceful overflow handling needed */ + if (len == sizeof(buf) - 1) { + ui_helpline__push("maximum size of symbol name reached!"); + key = K_ENTER; + break; + } +next_key: + key = ui__getch(delay_secs); + } + + buf[len] = '\0'; + strncpy(input, buf, len+1); + return key; +} -static struct perf_error_ops *perf_eops = &default_eops; +int ui__question_window(const char *title, const char *text, + const char *exit_msg, int delay_secs) +{ + int x, y; + int max_len = 0, nr_lines = 0; + const char *t; + + t = text; + while (1) { + const char *sep = strchr(t, '\n'); + int len; + + if (sep == NULL) + sep = strchr(t, '\0'); + len = sep - t; + if (max_len < len) + max_len = len; + ++nr_lines; + if (*sep == '\0') + break; + t = sep + 1; + } + + max_len += 2; + nr_lines += 4; + y = SLtt_Screen_Rows / 2 - nr_lines / 2, + x = SLtt_Screen_Cols / 2 - max_len / 2; + + SLsmg_set_color(0); + SLsmg_draw_box(y, x++, nr_lines, max_len); + if (title) { + SLsmg_gotorc(y, x + 1); + SLsmg_write_string((char *)title); + } + SLsmg_gotorc(++y, x); + nr_lines -= 2; + max_len -= 2; + SLsmg_write_wrapped_string((unsigned char *)text, y, x, + nr_lines, max_len, 1); + SLsmg_gotorc(y + nr_lines - 2, x); + SLsmg_write_nstring((char *)" ", max_len); + SLsmg_gotorc(y + nr_lines - 1, x); + SLsmg_write_nstring((char *)exit_msg, max_len); + SLsmg_refresh(); + return ui__getch(delay_secs); +} +int ui__help_window(const char *text) +{ + return ui__question_window("Help", text, "Press any key...", 0); +} -int ui__error(const char *format, ...) +int ui__dialog_yesno(const char *msg) { - int ret; - va_list args; + return ui__question_window(NULL, msg, "Enter: Yes, ESC: No", 0); +} - va_start(args, format); - ret = perf_eops->error(format, args); - va_end(args); +int __ui__warning(const char *title, const char *format, va_list args) +{ + char *s; + + if (use_browser > 0 && vasprintf(&s, format, args) > 0) { + int key; + + pthread_mutex_lock(&ui__lock); + key = ui__question_window(title, s, "Press any key...", 0); + pthread_mutex_unlock(&ui__lock); + free(s); + return key; + } - return ret; + fprintf(stderr, "%s:\n", title); + vfprintf(stderr, format, args); + return K_ESC; } int ui__warning(const char *format, ...) { - int ret; + int key; va_list args; va_start(args, format); - ret = perf_eops->warning(format, args); + key = __ui__warning("Warning", format, args); va_end(args); - - return ret; -} - - -/** - * perf_error__register - Register error logging functions - * @eops: The pointer to error logging function struct - * - * Register UI-specific error logging functions. Before calling this, - * other logging functions should be unregistered, if any. - */ -int perf_error__register(struct perf_error_ops *eops) -{ - if (perf_eops != &default_eops) - return -1; - - perf_eops = eops; - return 0; + return key; } -/** - * perf_error__unregister - Unregister error logging functions - * @eops: The pointer to error logging function struct - * - * Unregister already registered error logging functions. - */ -int perf_error__unregister(struct perf_error_ops *eops) +int ui__error(const char *format, ...) { - if (perf_eops != eops) - return -1; + int key; + va_list args; - perf_eops = &default_eops; - return 0; + va_start(args, format); + key = __ui__warning("Error", format, args); + va_end(args); + return key; } diff --git a/trunk/tools/perf/ui/util.h b/trunk/tools/perf/ui/util.h index 361f08c52d37..2d1738bd71c8 100644 --- a/trunk/tools/perf/ui/util.h +++ b/trunk/tools/perf/ui/util.h @@ -9,13 +9,6 @@ int ui__help_window(const char *text); int ui__dialog_yesno(const char *msg); int ui__question_window(const char *title, const char *text, const char *exit_msg, int delay_secs); - -struct perf_error_ops { - int (*error)(const char *format, va_list args); - int (*warning)(const char *format, va_list args); -}; - -int perf_error__register(struct perf_error_ops *eops); -int perf_error__unregister(struct perf_error_ops *eops); +int __ui__warning(const char *title, const char *format, va_list args); #endif /* _PERF_UI_UTIL_H_ */ diff --git a/trunk/tools/perf/util/debug.c b/trunk/tools/perf/util/debug.c index 4dfe0bb3c322..efb1fce259a4 100644 --- a/trunk/tools/perf/util/debug.c +++ b/trunk/tools/perf/util/debug.c @@ -47,7 +47,7 @@ int dump_printf(const char *fmt, ...) return ret; } -#if defined(NO_NEWT_SUPPORT) && defined(NO_GTK2_SUPPORT) +#ifdef NO_NEWT_SUPPORT int ui__warning(const char *format, ...) { va_list args; diff --git a/trunk/tools/perf/util/debug.h b/trunk/tools/perf/util/debug.h index 015c91dbc096..6bebe7f0a20c 100644 --- a/trunk/tools/perf/util/debug.h +++ b/trunk/tools/perf/util/debug.h @@ -12,9 +12,8 @@ int dump_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2))); void trace_event(union perf_event *event); struct ui_progress; -struct perf_error_ops; -#if defined(NO_NEWT_SUPPORT) && defined(NO_GTK2_SUPPORT) +#ifdef NO_NEWT_SUPPORT static inline int ui_helpline__show_help(const char *format __used, va_list ap __used) { return 0; @@ -24,28 +23,12 @@ static inline void ui_progress__update(u64 curr __used, u64 total __used, const char *title __used) {} #define ui__error(format, arg...) ui__warning(format, ##arg) - -static inline int -perf_error__register(struct perf_error_ops *eops __used) -{ - return 0; -} - -static inline int -perf_error__unregister(struct perf_error_ops *eops __used) -{ - return 0; -} - -#else /* NO_NEWT_SUPPORT && NO_GTK2_SUPPORT */ - +#else extern char ui_helpline__last_msg[]; int ui_helpline__show_help(const char *format, va_list ap); #include "../ui/progress.h" int ui__error(const char *format, ...) __attribute__((format(printf, 1, 2))); -#include "../ui/util.h" - -#endif /* NO_NEWT_SUPPORT && NO_GTK2_SUPPORT */ +#endif int ui__warning(const char *format, ...) __attribute__((format(printf, 1, 2))); int ui__error_paranoid(void); diff --git a/trunk/tools/perf/util/evlist.c b/trunk/tools/perf/util/evlist.c index f74e9560350e..7400fb3fc50c 100644 --- a/trunk/tools/perf/util/evlist.c +++ b/trunk/tools/perf/util/evlist.c @@ -224,8 +224,8 @@ int perf_evlist__add_tracepoints(struct perf_evlist *evlist, return err; } -struct perf_evsel * -perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id) +static struct perf_evsel * + perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id) { struct perf_evsel *evsel; diff --git a/trunk/tools/perf/util/evlist.h b/trunk/tools/perf/util/evlist.h index 40d4d3cdced0..989bee9624c2 100644 --- a/trunk/tools/perf/util/evlist.h +++ b/trunk/tools/perf/util/evlist.h @@ -73,9 +73,6 @@ int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist, #define perf_evlist__set_tracepoints_handlers_array(evlist, array) \ perf_evlist__set_tracepoints_handlers(evlist, array, ARRAY_SIZE(array)) -struct perf_evsel * -perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id); - void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel, int cpu, int thread, u64 id); diff --git a/trunk/tools/perf/util/evsel.c b/trunk/tools/perf/util/evsel.c index e81771364867..9f6cebd798ee 100644 --- a/trunk/tools/perf/util/evsel.c +++ b/trunk/tools/perf/util/evsel.c @@ -15,7 +15,7 @@ #include "cpumap.h" #include "thread_map.h" #include "target.h" -#include "../../../include/linux/hw_breakpoint.h" +#include "../../include/linux/perf_event.h" #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) #define GROUP_FD(group_fd, cpu) (*(int *)xyarray__entry(group_fd, cpu, 0)) @@ -78,7 +78,7 @@ static const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = { "ref-cycles", }; -static const char *__perf_evsel__hw_name(u64 config) +const char *__perf_evsel__hw_name(u64 config) { if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config]) return perf_evsel__hw_names[config]; @@ -86,15 +86,16 @@ static const char *__perf_evsel__hw_name(u64 config) return "unknown-hardware"; } -static int perf_evsel__add_modifiers(struct perf_evsel *evsel, char *bf, size_t size) +static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size) { - int colon = 0, r = 0; + int colon = 0; struct perf_event_attr *attr = &evsel->attr; + int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(attr->config)); bool exclude_guest_default = false; #define MOD_PRINT(context, mod) do { \ if (!attr->exclude_##context) { \ - if (!colon) colon = ++r; \ + if (!colon) colon = r++; \ r += scnprintf(bf + r, size - r, "%c", mod); \ } } while(0) @@ -107,7 +108,7 @@ static int perf_evsel__add_modifiers(struct perf_evsel *evsel, char *bf, size_t if (attr->precise_ip) { if (!colon) - colon = ++r; + colon = r++; r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp"); exclude_guest_default = true; } @@ -118,211 +119,39 @@ static int perf_evsel__add_modifiers(struct perf_evsel *evsel, char *bf, size_t } #undef MOD_PRINT if (colon) - bf[colon - 1] = ':'; + bf[colon] = ':'; return r; } -static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size) -{ - int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(evsel->attr.config)); - return r + perf_evsel__add_modifiers(evsel, bf + r, size - r); -} - -static const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = { - "cpu-clock", - "task-clock", - "page-faults", - "context-switches", - "CPU-migrations", - "minor-faults", - "major-faults", - "alignment-faults", - "emulation-faults", -}; - -static const char *__perf_evsel__sw_name(u64 config) +int perf_evsel__name(struct perf_evsel *evsel, char *bf, size_t size) { - if (config < PERF_COUNT_SW_MAX && perf_evsel__sw_names[config]) - return perf_evsel__sw_names[config]; - return "unknown-software"; -} - -static int perf_evsel__sw_name(struct perf_evsel *evsel, char *bf, size_t size) -{ - int r = scnprintf(bf, size, "%s", __perf_evsel__sw_name(evsel->attr.config)); - return r + perf_evsel__add_modifiers(evsel, bf + r, size - r); -} - -static int __perf_evsel__bp_name(char *bf, size_t size, u64 addr, u64 type) -{ - int r; - - r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr); - - if (type & HW_BREAKPOINT_R) - r += scnprintf(bf + r, size - r, "r"); - - if (type & HW_BREAKPOINT_W) - r += scnprintf(bf + r, size - r, "w"); - - if (type & HW_BREAKPOINT_X) - r += scnprintf(bf + r, size - r, "x"); - - return r; -} - -static int perf_evsel__bp_name(struct perf_evsel *evsel, char *bf, size_t size) -{ - struct perf_event_attr *attr = &evsel->attr; - int r = __perf_evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type); - return r + perf_evsel__add_modifiers(evsel, bf + r, size - r); -} - -const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX] - [PERF_EVSEL__MAX_ALIASES] = { - { "L1-dcache", "l1-d", "l1d", "L1-data", }, - { "L1-icache", "l1-i", "l1i", "L1-instruction", }, - { "LLC", "L2", }, - { "dTLB", "d-tlb", "Data-TLB", }, - { "iTLB", "i-tlb", "Instruction-TLB", }, - { "branch", "branches", "bpu", "btb", "bpc", }, - { "node", }, -}; - -const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX] - [PERF_EVSEL__MAX_ALIASES] = { - { "load", "loads", "read", }, - { "store", "stores", "write", }, - { "prefetch", "prefetches", "speculative-read", "speculative-load", }, -}; - -const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX] - [PERF_EVSEL__MAX_ALIASES] = { - { "refs", "Reference", "ops", "access", }, - { "misses", "miss", }, -}; - -#define C(x) PERF_COUNT_HW_CACHE_##x -#define CACHE_READ (1 << C(OP_READ)) -#define CACHE_WRITE (1 << C(OP_WRITE)) -#define CACHE_PREFETCH (1 << C(OP_PREFETCH)) -#define COP(x) (1 << x) - -/* - * cache operartion stat - * L1I : Read and prefetch only - * ITLB and BPU : Read-only - */ -static unsigned long perf_evsel__hw_cache_stat[C(MAX)] = { - [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), - [C(L1I)] = (CACHE_READ | CACHE_PREFETCH), - [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), - [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), - [C(ITLB)] = (CACHE_READ), - [C(BPU)] = (CACHE_READ), - [C(NODE)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), -}; - -bool perf_evsel__is_cache_op_valid(u8 type, u8 op) -{ - if (perf_evsel__hw_cache_stat[type] & COP(op)) - return true; /* valid */ - else - return false; /* invalid */ -} - -int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result, - char *bf, size_t size) -{ - if (result) { - return scnprintf(bf, size, "%s-%s-%s", perf_evsel__hw_cache[type][0], - perf_evsel__hw_cache_op[op][0], - perf_evsel__hw_cache_result[result][0]); - } - - return scnprintf(bf, size, "%s-%s", perf_evsel__hw_cache[type][0], - perf_evsel__hw_cache_op[op][1]); -} - -static int __perf_evsel__hw_cache_name(u64 config, char *bf, size_t size) -{ - u8 op, result, type = (config >> 0) & 0xff; - const char *err = "unknown-ext-hardware-cache-type"; - - if (type > PERF_COUNT_HW_CACHE_MAX) - goto out_err; - - op = (config >> 8) & 0xff; - err = "unknown-ext-hardware-cache-op"; - if (op > PERF_COUNT_HW_CACHE_OP_MAX) - goto out_err; - - result = (config >> 16) & 0xff; - err = "unknown-ext-hardware-cache-result"; - if (result > PERF_COUNT_HW_CACHE_RESULT_MAX) - goto out_err; - - err = "invalid-cache"; - if (!perf_evsel__is_cache_op_valid(type, op)) - goto out_err; - - return __perf_evsel__hw_cache_type_op_res_name(type, op, result, bf, size); -out_err: - return scnprintf(bf, size, "%s", err); -} - -static int perf_evsel__hw_cache_name(struct perf_evsel *evsel, char *bf, size_t size) -{ - int ret = __perf_evsel__hw_cache_name(evsel->attr.config, bf, size); - return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret); -} - -static int perf_evsel__raw_name(struct perf_evsel *evsel, char *bf, size_t size) -{ - int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config); - return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret); -} - -const char *perf_evsel__name(struct perf_evsel *evsel) -{ - char bf[128]; - - if (evsel->name) - return evsel->name; + int ret; switch (evsel->attr.type) { case PERF_TYPE_RAW: - perf_evsel__raw_name(evsel, bf, sizeof(bf)); + ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config); break; case PERF_TYPE_HARDWARE: - perf_evsel__hw_name(evsel, bf, sizeof(bf)); - break; - - case PERF_TYPE_HW_CACHE: - perf_evsel__hw_cache_name(evsel, bf, sizeof(bf)); - break; - - case PERF_TYPE_SOFTWARE: - perf_evsel__sw_name(evsel, bf, sizeof(bf)); + ret = perf_evsel__hw_name(evsel, bf, size); break; - - case PERF_TYPE_TRACEPOINT: - scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint"); - break; - - case PERF_TYPE_BREAKPOINT: - perf_evsel__bp_name(evsel, bf, sizeof(bf)); - break; - default: - scnprintf(bf, sizeof(bf), "%s", "unknown attr type"); - break; - } - - evsel->name = strdup(bf); - - return evsel->name ?: "unknown"; + /* + * FIXME + * + * This is the minimal perf_evsel__name so that we can + * reconstruct event names taking into account event modifiers. + * + * The old event_name uses it now for raw anr hw events, so that + * we don't drag all the parsing stuff into the python binding. + * + * On the next devel cycle the rest of the event naming will be + * brought here. + */ + return 0; + } + + return ret; } void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts, diff --git a/trunk/tools/perf/util/evsel.h b/trunk/tools/perf/util/evsel.h index 67cc5033d192..4ba8b564e6f4 100644 --- a/trunk/tools/perf/util/evsel.h +++ b/trunk/tools/perf/util/evsel.h @@ -83,19 +83,8 @@ void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts, struct perf_evsel *first); -bool perf_evsel__is_cache_op_valid(u8 type, u8 op); - -#define PERF_EVSEL__MAX_ALIASES 8 - -extern const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX] - [PERF_EVSEL__MAX_ALIASES]; -extern const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX] - [PERF_EVSEL__MAX_ALIASES]; -const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX] - [PERF_EVSEL__MAX_ALIASES]; -int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result, - char *bf, size_t size); -const char *perf_evsel__name(struct perf_evsel *evsel); +const char* __perf_evsel__hw_name(u64 config); +int perf_evsel__name(struct perf_evsel *evsel, char *bf, size_t size); int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads); int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads); diff --git a/trunk/tools/perf/util/header.c b/trunk/tools/perf/util/header.c index 5a47aba46759..e909d43cf542 100644 --- a/trunk/tools/perf/util/header.c +++ b/trunk/tools/perf/util/header.c @@ -641,7 +641,7 @@ static int write_event_desc(int fd, struct perf_header *h __used, /* * write event string as passed on cmdline */ - ret = do_write_string(fd, perf_evsel__name(attr)); + ret = do_write_string(fd, event_name(attr)); if (ret < 0) return ret; /* @@ -1474,15 +1474,15 @@ static int perf_header__read_build_ids(struct perf_header *header, static int process_tracing_data(struct perf_file_section *section __unused, struct perf_header *ph __unused, - int feat __unused, int fd, void *data) + int feat __unused, int fd) { - trace_report(fd, data, false); + trace_report(fd, false); return 0; } static int process_build_id(struct perf_file_section *section, struct perf_header *ph, - int feat __unused, int fd, void *data __used) + int feat __unused, int fd) { if (perf_header__read_build_ids(ph, fd, section->offset, section->size)) pr_debug("Failed to read buildids, continuing...\n"); @@ -1493,7 +1493,7 @@ struct feature_ops { int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist); void (*print)(struct perf_header *h, int fd, FILE *fp); int (*process)(struct perf_file_section *section, - struct perf_header *h, int feat, int fd, void *data); + struct perf_header *h, int feat, int fd); const char *name; bool full_only; }; @@ -1988,7 +1988,7 @@ int perf_file_header__read(struct perf_file_header *header, static int perf_file_section__process(struct perf_file_section *section, struct perf_header *ph, - int feat, int fd, void *data) + int feat, int fd, void *data __used) { if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) { pr_debug("Failed to lseek to %" PRIu64 " offset for feature " @@ -2004,7 +2004,7 @@ static int perf_file_section__process(struct perf_file_section *section, if (!feat_ops[feat].process) return 0; - return feat_ops[feat].process(section, ph, feat, fd, data); + return feat_ops[feat].process(section, ph, feat, fd); } static int perf_file_header__read_pipe(struct perf_pipe_file_header *header, @@ -2093,11 +2093,9 @@ static int read_attr(int fd, struct perf_header *ph, return ret <= 0 ? -1 : 0; } -static int perf_evsel__set_tracepoint_name(struct perf_evsel *evsel, - struct pevent *pevent) +static int perf_evsel__set_tracepoint_name(struct perf_evsel *evsel) { - struct event_format *event = pevent_find_event(pevent, - evsel->attr.config); + struct event_format *event = trace_find_event(evsel->attr.config); char bf[128]; if (event == NULL) @@ -2111,14 +2109,13 @@ static int perf_evsel__set_tracepoint_name(struct perf_evsel *evsel, return 0; } -static int perf_evlist__set_tracepoint_names(struct perf_evlist *evlist, - struct pevent *pevent) +static int perf_evlist__set_tracepoint_names(struct perf_evlist *evlist) { struct perf_evsel *pos; list_for_each_entry(pos, &evlist->entries, node) { if (pos->attr.type == PERF_TYPE_TRACEPOINT && - perf_evsel__set_tracepoint_name(pos, pevent)) + perf_evsel__set_tracepoint_name(pos)) return -1; } @@ -2201,12 +2198,12 @@ int perf_session__read_header(struct perf_session *session, int fd) event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type); } - perf_header__process_sections(header, fd, &session->pevent, + perf_header__process_sections(header, fd, NULL, perf_file_section__process); lseek(fd, header->data_offset, SEEK_SET); - if (perf_evlist__set_tracepoint_names(session->evlist, session->pevent)) + if (perf_evlist__set_tracepoint_names(session->evlist)) goto out_delete_evlist; header->frozen = 1; @@ -2422,8 +2419,8 @@ int perf_event__process_tracing_data(union perf_event *event, lseek(session->fd, offset + sizeof(struct tracing_data_event), SEEK_SET); - size_read = trace_report(session->fd, &session->pevent, - session->repipe); + size_read = trace_report(session->fd, session->repipe); + padding = ALIGN(size_read, sizeof(u64)) - size_read; if (read(session->fd, buf, padding) < 0) diff --git a/trunk/tools/perf/util/hist.h b/trunk/tools/perf/util/hist.h index 0b096c27a419..34bb556d6219 100644 --- a/trunk/tools/perf/util/hist.h +++ b/trunk/tools/perf/util/hist.h @@ -47,7 +47,6 @@ enum hist_column { HISTC_SYMBOL_TO, HISTC_DSO_FROM, HISTC_DSO_TO, - HISTC_SRCLINE, HISTC_NR_COLS, /* Last entry */ }; diff --git a/trunk/tools/perf/util/include/linux/kernel.h b/trunk/tools/perf/util/include/linux/kernel.h index b6842c1d02a8..1eb804fd3fbf 100644 --- a/trunk/tools/perf/util/include/linux/kernel.h +++ b/trunk/tools/perf/util/include/linux/kernel.h @@ -108,14 +108,4 @@ int eprintf(int level, #define pr_debug3(fmt, ...) pr_debugN(3, pr_fmt(fmt), ##__VA_ARGS__) #define pr_debug4(fmt, ...) pr_debugN(4, pr_fmt(fmt), ##__VA_ARGS__) -/* - * This looks more complex than it should be. But we need to - * get the type for the ~ right in round_down (it needs to be - * as wide as the result!), and we want to evaluate the macro - * arguments just once each. - */ -#define __round_mask(x, y) ((__typeof__(x))((y)-1)) -#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1) -#define round_down(x, y) ((x) & ~__round_mask(x, y)) - #endif diff --git a/trunk/tools/perf/util/map.h b/trunk/tools/perf/util/map.h index c14c665d9a25..81371bad4ef0 100644 --- a/trunk/tools/perf/util/map.h +++ b/trunk/tools/perf/util/map.h @@ -157,7 +157,7 @@ void machine__exit(struct machine *self); void machine__delete(struct machine *self); int machine__resolve_callchain(struct machine *machine, - struct thread *thread, + struct perf_evsel *evsel, struct thread *thread, struct ip_callchain *chain, struct symbol **parent); int maps__set_kallsyms_ref_reloc_sym(struct map **maps, const char *symbol_name, diff --git a/trunk/tools/perf/util/parse-events-test.c b/trunk/tools/perf/util/parse-events-test.c index 1b997d2b89ce..76b98e2a587d 100644 --- a/trunk/tools/perf/util/parse-events-test.c +++ b/trunk/tools/perf/util/parse-events-test.c @@ -181,22 +181,6 @@ static int test__checkevent_breakpoint_w(struct perf_evlist *evlist) return 0; } -static int test__checkevent_breakpoint_rw(struct perf_evlist *evlist) -{ - struct perf_evsel *evsel = list_entry(evlist->entries.next, - struct perf_evsel, node); - - TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); - TEST_ASSERT_VAL("wrong type", - PERF_TYPE_BREAKPOINT == evsel->attr.type); - TEST_ASSERT_VAL("wrong config", 0 == evsel->attr.config); - TEST_ASSERT_VAL("wrong bp_type", - (HW_BREAKPOINT_R|HW_BREAKPOINT_W) == evsel->attr.bp_type); - TEST_ASSERT_VAL("wrong bp_len", - HW_BREAKPOINT_LEN_4 == evsel->attr.bp_len); - return 0; -} - static int test__checkevent_tracepoint_modifier(struct perf_evlist *evlist) { struct perf_evsel *evsel = list_entry(evlist->entries.next, @@ -325,8 +309,6 @@ static int test__checkevent_breakpoint_modifier(struct perf_evlist *evlist) TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); - TEST_ASSERT_VAL("wrong name", - !strcmp(perf_evsel__name(evsel), "mem:0x0:rw:u")); return test__checkevent_breakpoint(evlist); } @@ -340,8 +322,6 @@ static int test__checkevent_breakpoint_x_modifier(struct perf_evlist *evlist) TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); - TEST_ASSERT_VAL("wrong name", - !strcmp(perf_evsel__name(evsel), "mem:0x0:x:k")); return test__checkevent_breakpoint_x(evlist); } @@ -355,8 +335,6 @@ static int test__checkevent_breakpoint_r_modifier(struct perf_evlist *evlist) TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv); TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip); - TEST_ASSERT_VAL("wrong name", - !strcmp(perf_evsel__name(evsel), "mem:0x0:r:hp")); return test__checkevent_breakpoint_r(evlist); } @@ -370,27 +348,10 @@ static int test__checkevent_breakpoint_w_modifier(struct perf_evlist *evlist) TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip); - TEST_ASSERT_VAL("wrong name", - !strcmp(perf_evsel__name(evsel), "mem:0x0:w:up")); return test__checkevent_breakpoint_w(evlist); } -static int test__checkevent_breakpoint_rw_modifier(struct perf_evlist *evlist) -{ - struct perf_evsel *evsel = list_entry(evlist->entries.next, - struct perf_evsel, node); - - TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); - TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); - TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); - TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip); - TEST_ASSERT_VAL("wrong name", - !strcmp(perf_evsel__name(evsel), "mem:0x0:rw:kp")); - - return test__checkevent_breakpoint_rw(evlist); -} - static int test__checkevent_pmu(struct perf_evlist *evlist) { @@ -452,63 +413,19 @@ static int test__checkevent_pmu_name(struct perf_evlist *evlist) { struct perf_evsel *evsel; - /* cpu/config=1,name=krava/u */ + /* cpu/config=1,name=krava1/u */ evsel = list_entry(evlist->entries.next, struct perf_evsel, node); TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries); TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type); TEST_ASSERT_VAL("wrong config", 1 == evsel->attr.config); - TEST_ASSERT_VAL("wrong name", !strcmp(perf_evsel__name(evsel), "krava")); + TEST_ASSERT_VAL("wrong name", !strcmp(evsel->name, "krava")); - /* cpu/config=2/u" */ + /* cpu/config=2/" */ evsel = list_entry(evsel->node.next, struct perf_evsel, node); TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries); TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type); TEST_ASSERT_VAL("wrong config", 2 == evsel->attr.config); - TEST_ASSERT_VAL("wrong name", - !strcmp(perf_evsel__name(evsel), "raw 0x2:u")); - - return 0; -} - -static int test__checkterms_simple(struct list_head *terms) -{ - struct parse_events__term *term; - - /* config=10 */ - term = list_entry(terms->next, struct parse_events__term, list); - TEST_ASSERT_VAL("wrong type term", - term->type_term == PARSE_EVENTS__TERM_TYPE_CONFIG); - TEST_ASSERT_VAL("wrong type val", - term->type_val == PARSE_EVENTS__TERM_TYPE_NUM); - TEST_ASSERT_VAL("wrong val", term->val.num == 10); - TEST_ASSERT_VAL("wrong config", !term->config); - - /* config1 */ - term = list_entry(term->list.next, struct parse_events__term, list); - TEST_ASSERT_VAL("wrong type term", - term->type_term == PARSE_EVENTS__TERM_TYPE_CONFIG1); - TEST_ASSERT_VAL("wrong type val", - term->type_val == PARSE_EVENTS__TERM_TYPE_NUM); - TEST_ASSERT_VAL("wrong val", term->val.num == 1); - TEST_ASSERT_VAL("wrong config", !term->config); - - /* config2=3 */ - term = list_entry(term->list.next, struct parse_events__term, list); - TEST_ASSERT_VAL("wrong type term", - term->type_term == PARSE_EVENTS__TERM_TYPE_CONFIG2); - TEST_ASSERT_VAL("wrong type val", - term->type_val == PARSE_EVENTS__TERM_TYPE_NUM); - TEST_ASSERT_VAL("wrong val", term->val.num == 3); - TEST_ASSERT_VAL("wrong config", !term->config); - - /* umask=1*/ - term = list_entry(term->list.next, struct parse_events__term, list); - TEST_ASSERT_VAL("wrong type term", - term->type_term == PARSE_EVENTS__TERM_TYPE_USER); - TEST_ASSERT_VAL("wrong type val", - term->type_val == PARSE_EVENTS__TERM_TYPE_NUM); - TEST_ASSERT_VAL("wrong val", term->val.num == 1); - TEST_ASSERT_VAL("wrong config", !strcmp(term->config, "umask")); + TEST_ASSERT_VAL("wrong name", !strcmp(evsel->name, "raw 0x2")); return 0; } @@ -624,16 +541,10 @@ static struct test__event_st test__events[] = { .name = "instructions:H", .check = test__checkevent_exclude_guest_modifier, }, - [26] = { - .name = "mem:0:rw", - .check = test__checkevent_breakpoint_rw, - }, - [27] = { - .name = "mem:0:rw:kp", - .check = test__checkevent_breakpoint_rw_modifier, - }, }; +#define TEST__EVENTS_CNT (sizeof(test__events) / sizeof(struct test__event_st)) + static struct test__event_st test__events_pmu[] = { [0] = { .name = "cpu/config=10,config1,config2=3,period=1000/u", @@ -645,23 +556,10 @@ static struct test__event_st test__events_pmu[] = { }, }; -struct test__term { - const char *str; - __u32 type; - int (*check)(struct list_head *terms); -}; - -static struct test__term test__terms[] = { - [0] = { - .str = "config=10,config1,config2=3,umask=1", - .check = test__checkterms_simple, - }, -}; - -#define TEST__TERMS_CNT (sizeof(test__terms) / \ - sizeof(struct test__term)) +#define TEST__EVENTS_PMU_CNT (sizeof(test__events_pmu) / \ + sizeof(struct test__event_st)) -static int test_event(struct test__event_st *e) +static int test(struct test__event_st *e) { struct perf_evlist *evlist; int ret; @@ -692,48 +590,7 @@ static int test_events(struct test__event_st *events, unsigned cnt) struct test__event_st *e = &events[i]; pr_debug("running test %d '%s'\n", i, e->name); - ret = test_event(e); - if (ret) - break; - } - - return ret; -} - -static int test_term(struct test__term *t) -{ - struct list_head *terms; - int ret; - - terms = malloc(sizeof(*terms)); - if (!terms) - return -ENOMEM; - - INIT_LIST_HEAD(terms); - - ret = parse_events_terms(terms, t->str); - if (ret) { - pr_debug("failed to parse terms '%s', err %d\n", - t->str , ret); - return ret; - } - - ret = t->check(terms); - parse_events__free_terms(terms); - - return ret; -} - -static int test_terms(struct test__term *terms, unsigned cnt) -{ - int ret = 0; - unsigned i; - - for (i = 0; i < cnt; i++) { - struct test__term *t = &terms[i]; - - pr_debug("running test %d '%s'\n", i, t->str); - ret = test_term(t); + ret = test(e); if (ret) break; } @@ -760,17 +617,9 @@ int parse_events__test(void) { int ret; -#define TEST_EVENTS(tests) \ -do { \ - ret = test_events(tests, ARRAY_SIZE(tests)); \ - if (ret) \ - return ret; \ -} while (0) - - TEST_EVENTS(test__events); + ret = test_events(test__events, TEST__EVENTS_CNT); + if (!ret && test_pmu()) + ret = test_events(test__events_pmu, TEST__EVENTS_PMU_CNT); - if (test_pmu()) - TEST_EVENTS(test__events_pmu); - - return test_terms(test__terms, ARRAY_SIZE(test__terms)); + return ret; } diff --git a/trunk/tools/perf/util/parse-events.c b/trunk/tools/perf/util/parse-events.c index 1aa721d7c10f..05dbc8b3c767 100644 --- a/trunk/tools/perf/util/parse-events.c +++ b/trunk/tools/perf/util/parse-events.c @@ -11,14 +11,14 @@ #include "cache.h" #include "header.h" #include "debugfs.h" -#include "parse-events-bison.h" -#define YY_EXTRA_TYPE int #include "parse-events-flex.h" #include "pmu.h" #define MAX_NAME_LEN 100 struct event_symbol { + u8 type; + u64 config; const char *symbol; const char *alias; }; @@ -26,88 +26,32 @@ struct event_symbol { #ifdef PARSER_DEBUG extern int parse_events_debug; #endif -int parse_events_parse(void *data, void *scanner); - -static struct event_symbol event_symbols_hw[PERF_COUNT_HW_MAX] = { - [PERF_COUNT_HW_CPU_CYCLES] = { - .symbol = "cpu-cycles", - .alias = "cycles", - }, - [PERF_COUNT_HW_INSTRUCTIONS] = { - .symbol = "instructions", - .alias = "", - }, - [PERF_COUNT_HW_CACHE_REFERENCES] = { - .symbol = "cache-references", - .alias = "", - }, - [PERF_COUNT_HW_CACHE_MISSES] = { - .symbol = "cache-misses", - .alias = "", - }, - [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { - .symbol = "branch-instructions", - .alias = "branches", - }, - [PERF_COUNT_HW_BRANCH_MISSES] = { - .symbol = "branch-misses", - .alias = "", - }, - [PERF_COUNT_HW_BUS_CYCLES] = { - .symbol = "bus-cycles", - .alias = "", - }, - [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = { - .symbol = "stalled-cycles-frontend", - .alias = "idle-cycles-frontend", - }, - [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = { - .symbol = "stalled-cycles-backend", - .alias = "idle-cycles-backend", - }, - [PERF_COUNT_HW_REF_CPU_CYCLES] = { - .symbol = "ref-cycles", - .alias = "", - }, -}; - -static struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = { - [PERF_COUNT_SW_CPU_CLOCK] = { - .symbol = "cpu-clock", - .alias = "", - }, - [PERF_COUNT_SW_TASK_CLOCK] = { - .symbol = "task-clock", - .alias = "", - }, - [PERF_COUNT_SW_PAGE_FAULTS] = { - .symbol = "page-faults", - .alias = "faults", - }, - [PERF_COUNT_SW_CONTEXT_SWITCHES] = { - .symbol = "context-switches", - .alias = "cs", - }, - [PERF_COUNT_SW_CPU_MIGRATIONS] = { - .symbol = "cpu-migrations", - .alias = "migrations", - }, - [PERF_COUNT_SW_PAGE_FAULTS_MIN] = { - .symbol = "minor-faults", - .alias = "", - }, - [PERF_COUNT_SW_PAGE_FAULTS_MAJ] = { - .symbol = "major-faults", - .alias = "", - }, - [PERF_COUNT_SW_ALIGNMENT_FAULTS] = { - .symbol = "alignment-faults", - .alias = "", - }, - [PERF_COUNT_SW_EMULATION_FAULTS] = { - .symbol = "emulation-faults", - .alias = "", - }, +int parse_events_parse(struct list_head *list, int *idx); + +#define CHW(x) .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_##x +#define CSW(x) .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_##x + +static struct event_symbol event_symbols[] = { + { CHW(CPU_CYCLES), "cpu-cycles", "cycles" }, + { CHW(STALLED_CYCLES_FRONTEND), "stalled-cycles-frontend", "idle-cycles-frontend" }, + { CHW(STALLED_CYCLES_BACKEND), "stalled-cycles-backend", "idle-cycles-backend" }, + { CHW(INSTRUCTIONS), "instructions", "" }, + { CHW(CACHE_REFERENCES), "cache-references", "" }, + { CHW(CACHE_MISSES), "cache-misses", "" }, + { CHW(BRANCH_INSTRUCTIONS), "branch-instructions", "branches" }, + { CHW(BRANCH_MISSES), "branch-misses", "" }, + { CHW(BUS_CYCLES), "bus-cycles", "" }, + { CHW(REF_CPU_CYCLES), "ref-cycles", "" }, + + { CSW(CPU_CLOCK), "cpu-clock", "" }, + { CSW(TASK_CLOCK), "task-clock", "" }, + { CSW(PAGE_FAULTS), "page-faults", "faults" }, + { CSW(PAGE_FAULTS_MIN), "minor-faults", "" }, + { CSW(PAGE_FAULTS_MAJ), "major-faults", "" }, + { CSW(CONTEXT_SWITCHES), "context-switches", "cs" }, + { CSW(CPU_MIGRATIONS), "cpu-migrations", "migrations" }, + { CSW(ALIGNMENT_FAULTS), "alignment-faults", "" }, + { CSW(EMULATION_FAULTS), "emulation-faults", "" }, }; #define __PERF_EVENT_FIELD(config, name) \ @@ -118,6 +62,63 @@ static struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = { #define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE) #define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT) +static const char *sw_event_names[PERF_COUNT_SW_MAX] = { + "cpu-clock", + "task-clock", + "page-faults", + "context-switches", + "CPU-migrations", + "minor-faults", + "major-faults", + "alignment-faults", + "emulation-faults", +}; + +#define MAX_ALIASES 8 + +static const char *hw_cache[PERF_COUNT_HW_CACHE_MAX][MAX_ALIASES] = { + { "L1-dcache", "l1-d", "l1d", "L1-data", }, + { "L1-icache", "l1-i", "l1i", "L1-instruction", }, + { "LLC", "L2", }, + { "dTLB", "d-tlb", "Data-TLB", }, + { "iTLB", "i-tlb", "Instruction-TLB", }, + { "branch", "branches", "bpu", "btb", "bpc", }, + { "node", }, +}; + +static const char *hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX][MAX_ALIASES] = { + { "load", "loads", "read", }, + { "store", "stores", "write", }, + { "prefetch", "prefetches", "speculative-read", "speculative-load", }, +}; + +static const char *hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX] + [MAX_ALIASES] = { + { "refs", "Reference", "ops", "access", }, + { "misses", "miss", }, +}; + +#define C(x) PERF_COUNT_HW_CACHE_##x +#define CACHE_READ (1 << C(OP_READ)) +#define CACHE_WRITE (1 << C(OP_WRITE)) +#define CACHE_PREFETCH (1 << C(OP_PREFETCH)) +#define COP(x) (1 << x) + +/* + * cache operartion stat + * L1I : Read and prefetch only + * ITLB and BPU : Read-only + */ +static unsigned long hw_cache_stat[C(MAX)] = { + [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), + [C(L1I)] = (CACHE_READ | CACHE_PREFETCH), + [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), + [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), + [C(ITLB)] = (CACHE_READ), + [C(BPU)] = (CACHE_READ), + [C(NODE)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), +}; + #define for_each_subsystem(sys_dir, sys_dirent, sys_next) \ while (!readdir_r(sys_dir, &sys_dirent, &sys_next) && sys_next) \ if (sys_dirent.d_type == DT_DIR && \ @@ -217,6 +218,48 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config) return NULL; } +#define TP_PATH_LEN (MAX_EVENT_LENGTH * 2 + 1) +static const char *tracepoint_id_to_name(u64 config) +{ + static char buf[TP_PATH_LEN]; + struct tracepoint_path *path; + + path = tracepoint_id_to_path(config); + if (path) { + snprintf(buf, TP_PATH_LEN, "%s:%s", path->system, path->name); + free(path->name); + free(path->system); + free(path); + } else + snprintf(buf, TP_PATH_LEN, "%s:%s", "unknown", "unknown"); + + return buf; +} + +static int is_cache_op_valid(u8 cache_type, u8 cache_op) +{ + if (hw_cache_stat[cache_type] & COP(cache_op)) + return 1; /* valid */ + else + return 0; /* invalid */ +} + +static char *event_cache_name(u8 cache_type, u8 cache_op, u8 cache_result) +{ + static char name[50]; + + if (cache_result) { + sprintf(name, "%s-%s-%s", hw_cache[cache_type][0], + hw_cache_op[cache_op][0], + hw_cache_result[cache_result][0]); + } else { + sprintf(name, "%s-%s", hw_cache[cache_type][0], + hw_cache_op[cache_op][1]); + } + + return name; +} + const char *event_type(int type) { switch (type) { @@ -239,6 +282,76 @@ const char *event_type(int type) return "unknown"; } +const char *event_name(struct perf_evsel *evsel) +{ + u64 config = evsel->attr.config; + int type = evsel->attr.type; + + if (type == PERF_TYPE_RAW || type == PERF_TYPE_HARDWARE) { + /* + * XXX minimal fix, see comment on perf_evsen__name, this static buffer + * will go away together with event_name in the next devel cycle. + */ + static char bf[128]; + perf_evsel__name(evsel, bf, sizeof(bf)); + return bf; + } + + if (evsel->name) + return evsel->name; + + return __event_name(type, config); +} + +const char *__event_name(int type, u64 config) +{ + static char buf[32]; + + if (type == PERF_TYPE_RAW) { + sprintf(buf, "raw 0x%" PRIx64, config); + return buf; + } + + switch (type) { + case PERF_TYPE_HARDWARE: + return __perf_evsel__hw_name(config); + + case PERF_TYPE_HW_CACHE: { + u8 cache_type, cache_op, cache_result; + + cache_type = (config >> 0) & 0xff; + if (cache_type > PERF_COUNT_HW_CACHE_MAX) + return "unknown-ext-hardware-cache-type"; + + cache_op = (config >> 8) & 0xff; + if (cache_op > PERF_COUNT_HW_CACHE_OP_MAX) + return "unknown-ext-hardware-cache-op"; + + cache_result = (config >> 16) & 0xff; + if (cache_result > PERF_COUNT_HW_CACHE_RESULT_MAX) + return "unknown-ext-hardware-cache-result"; + + if (!is_cache_op_valid(cache_type, cache_op)) + return "invalid-cache"; + + return event_cache_name(cache_type, cache_op, cache_result); + } + + case PERF_TYPE_SOFTWARE: + if (config < PERF_COUNT_SW_MAX && sw_event_names[config]) + return sw_event_names[config]; + return "unknown-software"; + + case PERF_TYPE_TRACEPOINT: + return tracepoint_id_to_name(config); + + default: + break; + } + + return "unknown"; +} + static int add_event(struct list_head **_list, int *idx, struct perf_event_attr *attr, char *name) { @@ -260,20 +373,19 @@ static int add_event(struct list_head **_list, int *idx, return -ENOMEM; } - if (name) - evsel->name = strdup(name); + evsel->name = strdup(name); list_add_tail(&evsel->node, list); *_list = list; return 0; } -static int parse_aliases(char *str, const char *names[][PERF_EVSEL__MAX_ALIASES], int size) +static int parse_aliases(char *str, const char *names[][MAX_ALIASES], int size) { int i, j; int n, longest = -1; for (i = 0; i < size; i++) { - for (j = 0; j < PERF_EVSEL__MAX_ALIASES && names[i][j]; j++) { + for (j = 0; j < MAX_ALIASES && names[i][j]; j++) { n = strlen(names[i][j]); if (n > longest && !strncasecmp(str, names[i][j], n)) longest = n; @@ -298,7 +410,7 @@ int parse_events_add_cache(struct list_head **list, int *idx, * No fallback - if we cannot get a clear cache type * then bail out: */ - cache_type = parse_aliases(type, perf_evsel__hw_cache, + cache_type = parse_aliases(type, hw_cache, PERF_COUNT_HW_CACHE_MAX); if (cache_type == -1) return -EINVAL; @@ -311,18 +423,18 @@ int parse_events_add_cache(struct list_head **list, int *idx, snprintf(name + n, MAX_NAME_LEN - n, "-%s\n", str); if (cache_op == -1) { - cache_op = parse_aliases(str, perf_evsel__hw_cache_op, + cache_op = parse_aliases(str, hw_cache_op, PERF_COUNT_HW_CACHE_OP_MAX); if (cache_op >= 0) { - if (!perf_evsel__is_cache_op_valid(cache_type, cache_op)) + if (!is_cache_op_valid(cache_type, cache_op)) return -EINVAL; continue; } } if (cache_result == -1) { - cache_result = parse_aliases(str, perf_evsel__hw_cache_result, - PERF_COUNT_HW_CACHE_RESULT_MAX); + cache_result = parse_aliases(str, hw_cache_result, + PERF_COUNT_HW_CACHE_RESULT_MAX); if (cache_result >= 0) continue; } @@ -437,31 +549,21 @@ parse_breakpoint_type(const char *type, struct perf_event_attr *attr) if (!type || !type[i]) break; -#define CHECK_SET_TYPE(bit) \ -do { \ - if (attr->bp_type & bit) \ - return -EINVAL; \ - else \ - attr->bp_type |= bit; \ -} while (0) - switch (type[i]) { case 'r': - CHECK_SET_TYPE(HW_BREAKPOINT_R); + attr->bp_type |= HW_BREAKPOINT_R; break; case 'w': - CHECK_SET_TYPE(HW_BREAKPOINT_W); + attr->bp_type |= HW_BREAKPOINT_W; break; case 'x': - CHECK_SET_TYPE(HW_BREAKPOINT_X); + attr->bp_type |= HW_BREAKPOINT_X; break; default: return -EINVAL; } } -#undef CHECK_SET_TYPE - if (!attr->bp_type) /* Default */ attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W; @@ -472,6 +574,7 @@ int parse_events_add_breakpoint(struct list_head **list, int *idx, void *ptr, char *type) { struct perf_event_attr attr; + char name[MAX_NAME_LEN]; memset(&attr, 0, sizeof(attr)); attr.bp_addr = (unsigned long) ptr; @@ -490,7 +593,8 @@ int parse_events_add_breakpoint(struct list_head **list, int *idx, attr.type = PERF_TYPE_BREAKPOINT; - return add_event(list, idx, &attr, NULL); + snprintf(name, MAX_NAME_LEN, "mem:%p:%s", ptr, type ? type : "rw"); + return add_event(list, idx, &attr, name); } static int config_term(struct perf_event_attr *attr, @@ -562,7 +666,8 @@ int parse_events_add_numeric(struct list_head **list, int *idx, config_attr(&attr, head_config, 1)) return -EINVAL; - return add_event(list, idx, &attr, NULL); + return add_event(list, idx, &attr, + (char *) __event_name(type, config)); } static int parse_events__is_name_term(struct parse_events__term *term) @@ -570,7 +675,8 @@ static int parse_events__is_name_term(struct parse_events__term *term) return term->type_term == PARSE_EVENTS__TERM_TYPE_NAME; } -static char *pmu_event_name(struct list_head *head_terms) +static char *pmu_event_name(struct perf_event_attr *attr, + struct list_head *head_terms) { struct parse_events__term *term; @@ -578,7 +684,7 @@ static char *pmu_event_name(struct list_head *head_terms) if (parse_events__is_name_term(term)) return term->val.str; - return NULL; + return (char *) __event_name(PERF_TYPE_RAW, attr->config); } int parse_events_add_pmu(struct list_head **list, int *idx, @@ -593,9 +699,6 @@ int parse_events_add_pmu(struct list_head **list, int *idx, memset(&attr, 0, sizeof(attr)); - if (perf_pmu__check_alias(pmu, head_config)) - return -EINVAL; - /* * Configure hardcoded terms first, no need to check * return value when called with fail == 0 ;) @@ -606,7 +709,7 @@ int parse_events_add_pmu(struct list_head **list, int *idx, return -EINVAL; return add_event(list, idx, &attr, - pmu_event_name(head_config)); + pmu_event_name(&attr, head_config)); } void parse_events_update_lists(struct list_head *list_event, @@ -684,62 +787,27 @@ int parse_events_modifier(struct list_head *list, char *str) return 0; } -static int parse_events__scanner(const char *str, void *data, int start_token) +int parse_events(struct perf_evlist *evlist, const char *str, int unset __used) { + LIST_HEAD(list); + LIST_HEAD(list_tmp); YY_BUFFER_STATE buffer; - void *scanner; - int ret; - - ret = parse_events_lex_init_extra(start_token, &scanner); - if (ret) - return ret; + int ret, idx = evlist->nr_entries; - buffer = parse_events__scan_string(str, scanner); + buffer = parse_events__scan_string(str); #ifdef PARSER_DEBUG parse_events_debug = 1; #endif - ret = parse_events_parse(data, scanner); + ret = parse_events_parse(&list, &idx); - parse_events__flush_buffer(buffer, scanner); - parse_events__delete_buffer(buffer, scanner); - parse_events_lex_destroy(scanner); - return ret; -} - -/* - * parse event config string, return a list of event terms. - */ -int parse_events_terms(struct list_head *terms, const char *str) -{ - struct parse_events_data__terms data = { - .terms = NULL, - }; - int ret; + parse_events__flush_buffer(buffer); + parse_events__delete_buffer(buffer); + parse_events_lex_destroy(); - ret = parse_events__scanner(str, &data, PE_START_TERMS); if (!ret) { - list_splice(data.terms, terms); - free(data.terms); - return 0; - } - - parse_events__free_terms(data.terms); - return ret; -} - -int parse_events(struct perf_evlist *evlist, const char *str, int unset __used) -{ - struct parse_events_data__events data = { - .list = LIST_HEAD_INIT(data.list), - .idx = evlist->nr_entries, - }; - int ret; - - ret = parse_events__scanner(str, &data, PE_START_EVENTS); - if (!ret) { - int entries = data.idx - evlist->nr_entries; - perf_evlist__splice_list_tail(evlist, &data.list, entries); + int entries = idx - evlist->nr_entries; + perf_evlist__splice_list_tail(evlist, &list, entries); return 0; } @@ -878,13 +946,16 @@ int is_valid_tracepoint(const char *event_string) return 0; } -static void __print_events_type(u8 type, struct event_symbol *syms, - unsigned max) +void print_events_type(u8 type) { + struct event_symbol *syms = event_symbols; + unsigned int i; char name[64]; - unsigned i; - for (i = 0; i < max ; i++, syms++) { + for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) { + if (type != syms->type) + continue; + if (strlen(syms->alias)) snprintf(name, sizeof(name), "%s OR %s", syms->symbol, syms->alias); @@ -896,28 +967,19 @@ static void __print_events_type(u8 type, struct event_symbol *syms, } } -void print_events_type(u8 type) -{ - if (type == PERF_TYPE_SOFTWARE) - __print_events_type(type, event_symbols_sw, PERF_COUNT_SW_MAX); - else - __print_events_type(type, event_symbols_hw, PERF_COUNT_HW_MAX); -} - int print_hwcache_events(const char *event_glob) { unsigned int type, op, i, printed = 0; - char name[64]; for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) { for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) { /* skip invalid cache type */ - if (!perf_evsel__is_cache_op_valid(type, op)) + if (!is_cache_op_valid(type, op)) continue; for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { - __perf_evsel__hw_cache_type_op_res_name(type, op, i, - name, sizeof(name)); + char *name = event_cache_name(type, op, i); + if (event_glob != NULL && !strglobmatch(name, event_glob)) continue; @@ -931,13 +993,26 @@ int print_hwcache_events(const char *event_glob) return printed; } -static void print_symbol_events(const char *event_glob, unsigned type, - struct event_symbol *syms, unsigned max) +/* + * Print the help text for the event symbols: + */ +void print_events(const char *event_glob) { - unsigned i, printed = 0; + unsigned int i, type, prev_type = -1, printed = 0, ntypes_printed = 0; + struct event_symbol *syms = event_symbols; char name[MAX_NAME_LEN]; - for (i = 0; i < max; i++, syms++) { + printf("\n"); + printf("List of pre-defined events (to be used in -e):\n"); + + for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) { + type = syms->type; + + if (type != prev_type && printed) { + printf("\n"); + printed = 0; + ntypes_printed++; + } if (event_glob != NULL && !(strglobmatch(syms->symbol, event_glob) || @@ -948,31 +1023,17 @@ static void print_symbol_events(const char *event_glob, unsigned type, snprintf(name, MAX_NAME_LEN, "%s OR %s", syms->symbol, syms->alias); else strncpy(name, syms->symbol, MAX_NAME_LEN); + printf(" %-50s [%s]\n", name, + event_type_descriptors[type]); - printf(" %-50s [%s]\n", name, event_type_descriptors[type]); - - printed++; + prev_type = type; + ++printed; } - if (printed) + if (ntypes_printed) { + printed = 0; printf("\n"); -} - -/* - * Print the help text for the event symbols: - */ -void print_events(const char *event_glob) -{ - - printf("\n"); - printf("List of pre-defined events (to be used in -e):\n"); - - print_symbol_events(event_glob, PERF_TYPE_HARDWARE, - event_symbols_hw, PERF_COUNT_HW_MAX); - - print_symbol_events(event_glob, PERF_TYPE_SOFTWARE, - event_symbols_sw, PERF_COUNT_SW_MAX); - + } print_hwcache_events(event_glob); if (event_glob != NULL) @@ -1045,13 +1106,6 @@ int parse_events__term_str(struct parse_events__term **term, config, str, 0); } -int parse_events__term_clone(struct parse_events__term **new, - struct parse_events__term *term) -{ - return new_term(new, term->type_val, term->type_term, term->config, - term->val.str, term->val.num); -} - void parse_events__free_terms(struct list_head *terms) { struct parse_events__term *term, *h; diff --git a/trunk/tools/perf/util/parse-events.h b/trunk/tools/perf/util/parse-events.h index ee9c218a193c..8cac57ab4ee6 100644 --- a/trunk/tools/perf/util/parse-events.h +++ b/trunk/tools/perf/util/parse-events.h @@ -26,12 +26,13 @@ extern struct tracepoint_path *tracepoint_id_to_path(u64 config); extern bool have_tracepoints(struct list_head *evlist); const char *event_type(int type); +const char *event_name(struct perf_evsel *event); +extern const char *__event_name(int type, u64 config); extern int parse_events_option(const struct option *opt, const char *str, int unset); extern int parse_events(struct perf_evlist *evlist, const char *str, int unset); -extern int parse_events_terms(struct list_head *terms, const char *str); extern int parse_filter(const struct option *opt, const char *str, int unset); #define EVENTS_HELP_MAX (128*1024) @@ -62,22 +63,11 @@ struct parse_events__term { struct list_head list; }; -struct parse_events_data__events { - struct list_head list; - int idx; -}; - -struct parse_events_data__terms { - struct list_head *terms; -}; - int parse_events__is_hardcoded_term(struct parse_events__term *term); int parse_events__term_num(struct parse_events__term **_term, int type_term, char *config, long num); int parse_events__term_str(struct parse_events__term **_term, int type_term, char *config, char *str); -int parse_events__term_clone(struct parse_events__term **new, - struct parse_events__term *term); void parse_events__free_terms(struct list_head *terms); int parse_events_modifier(struct list_head *list, char *str); int parse_events_add_tracepoint(struct list_head **list, int *idx, @@ -93,7 +83,8 @@ int parse_events_add_pmu(struct list_head **list, int *idx, char *pmu , struct list_head *head_config); void parse_events_update_lists(struct list_head *list_event, struct list_head *list_all); -void parse_events_error(void *data, void *scanner, char const *msg); +void parse_events_error(struct list_head *list_all, + int *idx, char const *msg); int parse_events__test(void); void print_events(const char *event_glob); diff --git a/trunk/tools/perf/util/parse-events.l b/trunk/tools/perf/util/parse-events.l index 384ca74c6b22..618a8e788399 100644 --- a/trunk/tools/perf/util/parse-events.l +++ b/trunk/tools/perf/util/parse-events.l @@ -1,6 +1,4 @@ -%option reentrant -%option bison-bridge %option prefix="parse_events_" %option stack @@ -10,10 +8,7 @@ #include "parse-events-bison.h" #include "parse-events.h" -char *parse_events_get_text(yyscan_t yyscanner); -YYSTYPE *parse_events_get_lval(yyscan_t yyscanner); - -static int __value(YYSTYPE *yylval, char *str, int base, int token) +static int __value(char *str, int base, int token) { long num; @@ -22,48 +17,35 @@ static int __value(YYSTYPE *yylval, char *str, int base, int token) if (errno) return PE_ERROR; - yylval->num = num; + parse_events_lval.num = num; return token; } -static int value(yyscan_t scanner, int base) +static int value(int base) { - YYSTYPE *yylval = parse_events_get_lval(scanner); - char *text = parse_events_get_text(scanner); - - return __value(yylval, text, base, PE_VALUE); + return __value(parse_events_text, base, PE_VALUE); } -static int raw(yyscan_t scanner) +static int raw(void) { - YYSTYPE *yylval = parse_events_get_lval(scanner); - char *text = parse_events_get_text(scanner); - - return __value(yylval, text + 1, 16, PE_RAW); + return __value(parse_events_text + 1, 16, PE_RAW); } -static int str(yyscan_t scanner, int token) +static int str(int token) { - YYSTYPE *yylval = parse_events_get_lval(scanner); - char *text = parse_events_get_text(scanner); - - yylval->str = strdup(text); + parse_events_lval.str = strdup(parse_events_text); return token; } -static int sym(yyscan_t scanner, int type, int config) +static int sym(int type, int config) { - YYSTYPE *yylval = parse_events_get_lval(scanner); - - yylval->num = (type << 16) + config; - return type == PERF_TYPE_HARDWARE ? PE_VALUE_SYM_HW : PE_VALUE_SYM_SW; + parse_events_lval.num = (type << 16) + config; + return PE_VALUE_SYM; } -static int term(yyscan_t scanner, int type) +static int term(int type) { - YYSTYPE *yylval = parse_events_get_lval(scanner); - - yylval->num = type; + parse_events_lval.num = type; return PE_TERM; } @@ -76,41 +58,28 @@ num_hex 0x[a-fA-F0-9]+ num_raw_hex [a-fA-F0-9]+ name [a-zA-Z_*?][a-zA-Z0-9_*?]* modifier_event [ukhpGH]{1,8} -modifier_bp [rwx]{1,3} +modifier_bp [rwx] %% - -%{ - { - int start_token; - - start_token = (int) parse_events_get_extra(yyscanner); - if (start_token) { - parse_events_set_extra(NULL, yyscanner); - return start_token; - } - } -%} - -cpu-cycles|cycles { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES); } -stalled-cycles-frontend|idle-cycles-frontend { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND); } -stalled-cycles-backend|idle-cycles-backend { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_STALLED_CYCLES_BACKEND); } -instructions { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_INSTRUCTIONS); } -cache-references { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_CACHE_REFERENCES); } -cache-misses { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_CACHE_MISSES); } -branch-instructions|branches { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_BRANCH_INSTRUCTIONS); } -branch-misses { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_BRANCH_MISSES); } -bus-cycles { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_BUS_CYCLES); } -ref-cycles { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_REF_CPU_CYCLES); } -cpu-clock { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CPU_CLOCK); } -task-clock { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_TASK_CLOCK); } -page-faults|faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS); } -minor-faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS_MIN); } -major-faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS_MAJ); } -context-switches|cs { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CONTEXT_SWITCHES); } -cpu-migrations|migrations { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CPU_MIGRATIONS); } -alignment-faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_ALIGNMENT_FAULTS); } -emulation-faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_EMULATION_FAULTS); } +cpu-cycles|cycles { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES); } +stalled-cycles-frontend|idle-cycles-frontend { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND); } +stalled-cycles-backend|idle-cycles-backend { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_STALLED_CYCLES_BACKEND); } +instructions { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_INSTRUCTIONS); } +cache-references { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_CACHE_REFERENCES); } +cache-misses { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_CACHE_MISSES); } +branch-instructions|branches { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_BRANCH_INSTRUCTIONS); } +branch-misses { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_BRANCH_MISSES); } +bus-cycles { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_BUS_CYCLES); } +ref-cycles { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_REF_CPU_CYCLES); } +cpu-clock { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CPU_CLOCK); } +task-clock { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_TASK_CLOCK); } +page-faults|faults { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS); } +minor-faults { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS_MIN); } +major-faults { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS_MAJ); } +context-switches|cs { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CONTEXT_SWITCHES); } +cpu-migrations|migrations { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CPU_MIGRATIONS); } +alignment-faults { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_ALIGNMENT_FAULTS); } +emulation-faults { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_EMULATION_FAULTS); } L1-dcache|l1-d|l1d|L1-data | L1-icache|l1-i|l1i|L1-instruction | @@ -118,14 +87,14 @@ LLC|L2 | dTLB|d-tlb|Data-TLB | iTLB|i-tlb|Instruction-TLB | branch|branches|bpu|btb|bpc | -node { return str(yyscanner, PE_NAME_CACHE_TYPE); } +node { return str(PE_NAME_CACHE_TYPE); } load|loads|read | store|stores|write | prefetch|prefetches | speculative-read|speculative-load | refs|Reference|ops|access | -misses|miss { return str(yyscanner, PE_NAME_CACHE_OP_RESULT); } +misses|miss { return str(PE_NAME_CACHE_OP_RESULT); } /* * These are event config hardcoded term names to be specified @@ -133,39 +102,38 @@ misses|miss { return str(yyscanner, PE_NAME_CACHE_OP_RESULT); } * so we can put them here directly. In case the we have a conflict * in future, this needs to go into '//' condition block. */ -config { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG); } -config1 { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG1); } -config2 { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG2); } -name { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_NAME); } -period { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD); } -branch_type { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE); } +config { return term(PARSE_EVENTS__TERM_TYPE_CONFIG); } +config1 { return term(PARSE_EVENTS__TERM_TYPE_CONFIG1); } +config2 { return term(PARSE_EVENTS__TERM_TYPE_CONFIG2); } +name { return term(PARSE_EVENTS__TERM_TYPE_NAME); } +period { return term(PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD); } +branch_type { return term(PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE); } mem: { BEGIN(mem); return PE_PREFIX_MEM; } -r{num_raw_hex} { return raw(yyscanner); } -{num_dec} { return value(yyscanner, 10); } -{num_hex} { return value(yyscanner, 16); } +r{num_raw_hex} { return raw(); } +{num_dec} { return value(10); } +{num_hex} { return value(16); } -{modifier_event} { return str(yyscanner, PE_MODIFIER_EVENT); } -{name} { return str(yyscanner, PE_NAME); } +{modifier_event} { return str(PE_MODIFIER_EVENT); } +{name} { return str(PE_NAME); } "/" { return '/'; } - { return '-'; } , { return ','; } : { return ':'; } = { return '='; } -\n { } { -{modifier_bp} { return str(yyscanner, PE_MODIFIER_BP); } +{modifier_bp} { return str(PE_MODIFIER_BP); } : { return ':'; } -{num_dec} { return value(yyscanner, 10); } -{num_hex} { return value(yyscanner, 16); } +{num_dec} { return value(10); } +{num_hex} { return value(16); } /* * We need to separate 'mem:' scanner part, in order to get specific * modifier bits parsed out. Otherwise we would need to handle PE_NAME * and we'd need to parse it manually. During the escape from * state we need to put the escaping char back, so we dont miss it. */ -. { unput(*yytext); BEGIN(INITIAL); } +. { unput(*parse_events_text); BEGIN(INITIAL); } /* * We destroy the scanner after reaching EOF, * but anyway just to be sure get back to INIT state. @@ -175,7 +143,7 @@ r{num_raw_hex} { return raw(yyscanner); } %% -int parse_events_wrap(void *scanner __used) +int parse_events_wrap(void) { return 1; } diff --git a/trunk/tools/perf/util/parse-events.y b/trunk/tools/perf/util/parse-events.y index 2bc5fbff2b5d..362cc59332ae 100644 --- a/trunk/tools/perf/util/parse-events.y +++ b/trunk/tools/perf/util/parse-events.y @@ -1,8 +1,7 @@ -%pure-parser + %name-prefix "parse_events_" -%parse-param {void *_data} -%parse-param {void *scanner} -%lex-param {void* scanner} +%parse-param {struct list_head *list_all} +%parse-param {int *idx} %{ @@ -13,9 +12,8 @@ #include "types.h" #include "util.h" #include "parse-events.h" -#include "parse-events-bison.h" -extern int parse_events_lex (YYSTYPE* lvalp, void* scanner); +extern int parse_events_lex (void); #define ABORT_ON(val) \ do { \ @@ -25,16 +23,14 @@ do { \ %} -%token PE_START_EVENTS PE_START_TERMS -%token PE_VALUE PE_VALUE_SYM_HW PE_VALUE_SYM_SW PE_RAW PE_TERM +%token PE_VALUE PE_VALUE_SYM PE_RAW PE_TERM %token PE_NAME %token PE_MODIFIER_EVENT PE_MODIFIER_BP %token PE_NAME_CACHE_TYPE PE_NAME_CACHE_OP_RESULT %token PE_PREFIX_MEM PE_PREFIX_RAW %token PE_ERROR %type PE_VALUE -%type PE_VALUE_SYM_HW -%type PE_VALUE_SYM_SW +%type PE_VALUE_SYM %type PE_RAW %type PE_TERM %type PE_NAME @@ -42,7 +38,6 @@ do { \ %type PE_NAME_CACHE_OP_RESULT %type PE_MODIFIER_EVENT %type PE_MODIFIER_BP -%type value_sym %type event_config %type event_term %type event_pmu @@ -63,33 +58,24 @@ do { \ } %% -start: -PE_START_EVENTS events -| -PE_START_TERMS terms - events: events ',' event | event event: event_def PE_MODIFIER_EVENT { - struct parse_events_data__events *data = _data; - /* * Apply modifier on all events added by single event definition * (there could be more events added for multiple tracepoint * definitions via '*?'. */ ABORT_ON(parse_events_modifier($1, $2)); - parse_events_update_lists($1, &data->list); + parse_events_update_lists($1, list_all); } | event_def { - struct parse_events_data__events *data = _data; - - parse_events_update_lists($1, &data->list); + parse_events_update_lists($1, list_all); } event_def: event_pmu | @@ -103,131 +89,104 @@ event_def: event_pmu | event_pmu: PE_NAME '/' event_config '/' { - struct parse_events_data__events *data = _data; struct list_head *list = NULL; - ABORT_ON(parse_events_add_pmu(&list, &data->idx, $1, $3)); + ABORT_ON(parse_events_add_pmu(&list, idx, $1, $3)); parse_events__free_terms($3); $$ = list; } -value_sym: -PE_VALUE_SYM_HW -| -PE_VALUE_SYM_SW - event_legacy_symbol: -value_sym '/' event_config '/' +PE_VALUE_SYM '/' event_config '/' { - struct parse_events_data__events *data = _data; struct list_head *list = NULL; int type = $1 >> 16; int config = $1 & 255; - ABORT_ON(parse_events_add_numeric(&list, &data->idx, - type, config, $3)); + ABORT_ON(parse_events_add_numeric(&list, idx, type, config, $3)); parse_events__free_terms($3); $$ = list; } | -value_sym sep_slash_dc +PE_VALUE_SYM sep_slash_dc { - struct parse_events_data__events *data = _data; struct list_head *list = NULL; int type = $1 >> 16; int config = $1 & 255; - ABORT_ON(parse_events_add_numeric(&list, &data->idx, - type, config, NULL)); + ABORT_ON(parse_events_add_numeric(&list, idx, type, config, NULL)); $$ = list; } event_legacy_cache: PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT '-' PE_NAME_CACHE_OP_RESULT { - struct parse_events_data__events *data = _data; struct list_head *list = NULL; - ABORT_ON(parse_events_add_cache(&list, &data->idx, $1, $3, $5)); + ABORT_ON(parse_events_add_cache(&list, idx, $1, $3, $5)); $$ = list; } | PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT { - struct parse_events_data__events *data = _data; struct list_head *list = NULL; - ABORT_ON(parse_events_add_cache(&list, &data->idx, $1, $3, NULL)); + ABORT_ON(parse_events_add_cache(&list, idx, $1, $3, NULL)); $$ = list; } | PE_NAME_CACHE_TYPE { - struct parse_events_data__events *data = _data; struct list_head *list = NULL; - ABORT_ON(parse_events_add_cache(&list, &data->idx, $1, NULL, NULL)); + ABORT_ON(parse_events_add_cache(&list, idx, $1, NULL, NULL)); $$ = list; } event_legacy_mem: PE_PREFIX_MEM PE_VALUE ':' PE_MODIFIER_BP sep_dc { - struct parse_events_data__events *data = _data; struct list_head *list = NULL; - ABORT_ON(parse_events_add_breakpoint(&list, &data->idx, - (void *) $2, $4)); + ABORT_ON(parse_events_add_breakpoint(&list, idx, (void *) $2, $4)); $$ = list; } | PE_PREFIX_MEM PE_VALUE sep_dc { - struct parse_events_data__events *data = _data; struct list_head *list = NULL; - ABORT_ON(parse_events_add_breakpoint(&list, &data->idx, - (void *) $2, NULL)); + ABORT_ON(parse_events_add_breakpoint(&list, idx, (void *) $2, NULL)); $$ = list; } event_legacy_tracepoint: PE_NAME ':' PE_NAME { - struct parse_events_data__events *data = _data; struct list_head *list = NULL; - ABORT_ON(parse_events_add_tracepoint(&list, &data->idx, $1, $3)); + ABORT_ON(parse_events_add_tracepoint(&list, idx, $1, $3)); $$ = list; } event_legacy_numeric: PE_VALUE ':' PE_VALUE { - struct parse_events_data__events *data = _data; struct list_head *list = NULL; - ABORT_ON(parse_events_add_numeric(&list, &data->idx, $1, $3, NULL)); + ABORT_ON(parse_events_add_numeric(&list, idx, $1, $3, NULL)); $$ = list; } event_legacy_raw: PE_RAW { - struct parse_events_data__events *data = _data; struct list_head *list = NULL; - ABORT_ON(parse_events_add_numeric(&list, &data->idx, - PERF_TYPE_RAW, $1, NULL)); + ABORT_ON(parse_events_add_numeric(&list, idx, PERF_TYPE_RAW, $1, NULL)); $$ = list; } -terms: event_config -{ - struct parse_events_data__terms *data = _data; - data->terms = $1; -} - event_config: event_config ',' event_term { @@ -308,7 +267,8 @@ sep_slash_dc: '/' | ':' | %% -void parse_events_error(void *data __used, void *scanner __used, +void parse_events_error(struct list_head *list_all __used, + int *idx __used, char const *msg __used) { } diff --git a/trunk/tools/perf/util/pmu.c b/trunk/tools/perf/util/pmu.c index 67715a42cd6d..a119a5371699 100644 --- a/trunk/tools/perf/util/pmu.c +++ b/trunk/tools/perf/util/pmu.c @@ -72,122 +72,14 @@ static int pmu_format(char *name, struct list_head *format) "%s/bus/event_source/devices/%s/format", sysfs, name); if (stat(path, &st) < 0) - return 0; /* no error if format does not exist */ - - if (pmu_format_parse(path, format)) return -1; - return 0; -} - -static int perf_pmu__new_alias(struct list_head *list, char *name, FILE *file) -{ - struct perf_pmu__alias *alias; - char buf[256]; - int ret; - - ret = fread(buf, 1, sizeof(buf), file); - if (ret == 0) - return -EINVAL; - buf[ret] = 0; - - alias = malloc(sizeof(*alias)); - if (!alias) - return -ENOMEM; - - INIT_LIST_HEAD(&alias->terms); - ret = parse_events_terms(&alias->terms, buf); - if (ret) { - free(alias); - return ret; - } - - alias->name = strdup(name); - list_add_tail(&alias->list, list); - return 0; -} - -/* - * Process all the sysfs attributes located under the directory - * specified in 'dir' parameter. - */ -static int pmu_aliases_parse(char *dir, struct list_head *head) -{ - struct dirent *evt_ent; - DIR *event_dir; - int ret = 0; - - event_dir = opendir(dir); - if (!event_dir) - return -EINVAL; - - while (!ret && (evt_ent = readdir(event_dir))) { - char path[PATH_MAX]; - char *name = evt_ent->d_name; - FILE *file; - - if (!strcmp(name, ".") || !strcmp(name, "..")) - continue; - - snprintf(path, PATH_MAX, "%s/%s", dir, name); - - ret = -EINVAL; - file = fopen(path, "r"); - if (!file) - break; - ret = perf_pmu__new_alias(head, name, file); - fclose(file); - } - - closedir(event_dir); - return ret; -} - -/* - * Reading the pmu event aliases definition, which should be located at: - * /sys/bus/event_source/devices//events as sysfs group attributes. - */ -static int pmu_aliases(char *name, struct list_head *head) -{ - struct stat st; - char path[PATH_MAX]; - const char *sysfs; - - sysfs = sysfs_find_mountpoint(); - if (!sysfs) - return -1; - - snprintf(path, PATH_MAX, - "%s/bus/event_source/devices/%s/events", sysfs, name); - - if (stat(path, &st) < 0) - return -1; - - if (pmu_aliases_parse(path, head)) + if (pmu_format_parse(path, format)) return -1; return 0; } -static int pmu_alias_terms(struct perf_pmu__alias *alias, - struct list_head *terms) -{ - struct parse_events__term *term, *clone; - LIST_HEAD(list); - int ret; - - list_for_each_entry(term, &alias->terms, list) { - ret = parse_events__term_clone(&clone, term); - if (ret) { - parse_events__free_terms(&list); - return ret; - } - list_add_tail(&clone->list, &list); - } - list_splice(&list, terms); - return 0; -} - /* * Reading/parsing the default pmu type value, which should be * located at: @@ -226,7 +118,6 @@ static struct perf_pmu *pmu_lookup(char *name) { struct perf_pmu *pmu; LIST_HEAD(format); - LIST_HEAD(aliases); __u32 type; /* @@ -244,15 +135,10 @@ static struct perf_pmu *pmu_lookup(char *name) if (!pmu) return NULL; - pmu_aliases(name, &aliases); - INIT_LIST_HEAD(&pmu->format); - INIT_LIST_HEAD(&pmu->aliases); list_splice(&format, &pmu->format); - list_splice(&aliases, &pmu->aliases); pmu->name = strdup(name); pmu->type = type; - list_add_tail(&pmu->list, &pmus); return pmu; } @@ -393,59 +279,6 @@ int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr, return pmu_config(&pmu->format, attr, head_terms); } -static struct perf_pmu__alias *pmu_find_alias(struct perf_pmu *pmu, - struct parse_events__term *term) -{ - struct perf_pmu__alias *alias; - char *name; - - if (parse_events__is_hardcoded_term(term)) - return NULL; - - if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) { - if (term->val.num != 1) - return NULL; - if (pmu_find_format(&pmu->format, term->config)) - return NULL; - name = term->config; - } else if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR) { - if (strcasecmp(term->config, "event")) - return NULL; - name = term->val.str; - } else { - return NULL; - } - - list_for_each_entry(alias, &pmu->aliases, list) { - if (!strcasecmp(alias->name, name)) - return alias; - } - return NULL; -} - -/* - * Find alias in the terms list and replace it with the terms - * defined for the alias - */ -int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms) -{ - struct parse_events__term *term, *h; - struct perf_pmu__alias *alias; - int ret; - - list_for_each_entry_safe(term, h, head_terms, list) { - alias = pmu_find_alias(pmu, term); - if (!alias) - continue; - ret = pmu_alias_terms(alias, &term->list); - if (ret) - return ret; - list_del(&term->list); - free(term); - } - return 0; -} - int perf_pmu__new_format(struct list_head *list, char *name, int config, unsigned long *bits) { diff --git a/trunk/tools/perf/util/pmu.h b/trunk/tools/perf/util/pmu.h index 535f2c5258ab..68c0db965e1f 100644 --- a/trunk/tools/perf/util/pmu.h +++ b/trunk/tools/perf/util/pmu.h @@ -19,26 +19,17 @@ struct perf_pmu__format { struct list_head list; }; -struct perf_pmu__alias { - char *name; - struct list_head terms; - struct list_head list; -}; - struct perf_pmu { char *name; __u32 type; struct list_head format; - struct list_head aliases; struct list_head list; }; struct perf_pmu *perf_pmu__find(char *name); int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr, struct list_head *head_terms); -int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms); -struct list_head *perf_pmu__alias(struct perf_pmu *pmu, - struct list_head *head_terms); + int perf_pmu_wrap(void); void perf_pmu_error(struct list_head *list, char *name, char const *msg); diff --git a/trunk/tools/perf/util/scripting-engines/trace-event-perl.c b/trunk/tools/perf/util/scripting-engines/trace-event-perl.c index 02dfa19a467f..4c1b3d72a1d2 100644 --- a/trunk/tools/perf/util/scripting-engines/trace-event-perl.c +++ b/trunk/tools/perf/util/scripting-engines/trace-event-perl.c @@ -209,10 +209,6 @@ static void define_event_symbols(struct event_format *event, define_symbolic_values(args->symbol.symbols, ev_name, cur_field_name); break; - case PRINT_HEX: - define_event_symbols(event, ev_name, args->hex.field); - define_event_symbols(event, ev_name, args->hex.size); - break; case PRINT_BSTRING: case PRINT_DYNAMIC_ARRAY: case PRINT_STRING: @@ -237,8 +233,7 @@ static void define_event_symbols(struct event_format *event, define_event_symbols(event, ev_name, args->next); } -static inline -struct event_format *find_cache_event(struct pevent *pevent, int type) +static inline struct event_format *find_cache_event(int type) { static char ev_name[256]; struct event_format *event; @@ -246,7 +241,7 @@ struct event_format *find_cache_event(struct pevent *pevent, int type) if (events[type]) return events[type]; - events[type] = event = pevent_find_event(pevent, type); + events[type] = event = trace_find_event(type); if (!event) return NULL; @@ -257,8 +252,7 @@ struct event_format *find_cache_event(struct pevent *pevent, int type) return event; } -static void perl_process_tracepoint(union perf_event *perf_event __unused, - struct pevent *pevent, +static void perl_process_tracepoint(union perf_event *pevent __unused, struct perf_sample *sample, struct perf_evsel *evsel, struct machine *machine __unused, @@ -281,13 +275,13 @@ static void perl_process_tracepoint(union perf_event *perf_event __unused, if (evsel->attr.type != PERF_TYPE_TRACEPOINT) return; - type = trace_parse_common_type(pevent, data); + type = trace_parse_common_type(data); - event = find_cache_event(pevent, type); + event = find_cache_event(type); if (!event) die("ug! no event found for type %d", type); - pid = trace_parse_common_pid(pevent, data); + pid = trace_parse_common_pid(data); sprintf(handler, "%s::%s", event->system, event->name); @@ -320,8 +314,7 @@ static void perl_process_tracepoint(union perf_event *perf_event __unused, offset = field->offset; XPUSHs(sv_2mortal(newSVpv((char *)data + offset, 0))); } else { /* FIELD_IS_NUMERIC */ - val = read_size(pevent, data + field->offset, - field->size); + val = read_size(data + field->offset, field->size); if (field->flags & FIELD_IS_SIGNED) { XPUSHs(sv_2mortal(newSViv(val))); } else { @@ -375,15 +368,14 @@ static void perl_process_event_generic(union perf_event *pevent __unused, LEAVE; } -static void perl_process_event(union perf_event *event, - struct pevent *pevent, +static void perl_process_event(union perf_event *pevent, struct perf_sample *sample, struct perf_evsel *evsel, struct machine *machine, struct thread *thread) { - perl_process_tracepoint(event, pevent, sample, evsel, machine, thread); - perl_process_event_generic(event, sample, evsel, machine, thread); + perl_process_tracepoint(pevent, sample, evsel, machine, thread); + perl_process_event_generic(pevent, sample, evsel, machine, thread); } static void run_start_sub(void) @@ -456,7 +448,7 @@ static int perl_stop_script(void) return 0; } -static int perl_generate_script(struct pevent *pevent, const char *outfile) +static int perl_generate_script(const char *outfile) { struct event_format *event = NULL; struct format_field *f; @@ -503,7 +495,7 @@ static int perl_generate_script(struct pevent *pevent, const char *outfile) fprintf(ofp, "sub trace_begin\n{\n\t# optional\n}\n\n"); fprintf(ofp, "sub trace_end\n{\n\t# optional\n}\n\n"); - while ((event = trace_find_next_event(pevent, event))) { + while ((event = trace_find_next_event(event))) { fprintf(ofp, "sub %s::%s\n{\n", event->system, event->name); fprintf(ofp, "\tmy ("); diff --git a/trunk/tools/perf/util/scripting-engines/trace-event-python.c b/trunk/tools/perf/util/scripting-engines/trace-event-python.c index ce4d1b0c3862..acb9795286c4 100644 --- a/trunk/tools/perf/util/scripting-engines/trace-event-python.c +++ b/trunk/tools/perf/util/scripting-engines/trace-event-python.c @@ -166,10 +166,6 @@ static void define_event_symbols(struct event_format *event, define_values(PRINT_SYMBOL, args->symbol.symbols, ev_name, cur_field_name); break; - case PRINT_HEX: - define_event_symbols(event, ev_name, args->hex.field); - define_event_symbols(event, ev_name, args->hex.size); - break; case PRINT_STRING: break; case PRINT_TYPE: @@ -194,8 +190,7 @@ static void define_event_symbols(struct event_format *event, define_event_symbols(event, ev_name, args->next); } -static inline -struct event_format *find_cache_event(struct pevent *pevent, int type) +static inline struct event_format *find_cache_event(int type) { static char ev_name[256]; struct event_format *event; @@ -203,7 +198,7 @@ struct event_format *find_cache_event(struct pevent *pevent, int type) if (events[type]) return events[type]; - events[type] = event = pevent_find_event(pevent, type); + events[type] = event = trace_find_event(type); if (!event) return NULL; @@ -214,8 +209,7 @@ struct event_format *find_cache_event(struct pevent *pevent, int type) return event; } -static void python_process_event(union perf_event *perf_event __unused, - struct pevent *pevent, +static void python_process_event(union perf_event *pevent __unused, struct perf_sample *sample, struct perf_evsel *evsel __unused, struct machine *machine __unused, @@ -239,13 +233,13 @@ static void python_process_event(union perf_event *perf_event __unused, if (!t) Py_FatalError("couldn't create Python tuple"); - type = trace_parse_common_type(pevent, data); + type = trace_parse_common_type(data); - event = find_cache_event(pevent, type); + event = find_cache_event(type); if (!event) die("ug! no event found for type %d", type); - pid = trace_parse_common_pid(pevent, data); + pid = trace_parse_common_pid(data); sprintf(handler_name, "%s__%s", event->system, event->name); @@ -290,8 +284,7 @@ static void python_process_event(union perf_event *perf_event __unused, offset = field->offset; obj = PyString_FromString((char *)data + offset); } else { /* FIELD_IS_NUMERIC */ - val = read_size(pevent, data + field->offset, - field->size); + val = read_size(data + field->offset, field->size); if (field->flags & FIELD_IS_SIGNED) { if ((long long)val >= LONG_MIN && (long long)val <= LONG_MAX) @@ -445,7 +438,7 @@ static int python_stop_script(void) return err; } -static int python_generate_script(struct pevent *pevent, const char *outfile) +static int python_generate_script(const char *outfile) { struct event_format *event = NULL; struct format_field *f; @@ -494,7 +487,7 @@ static int python_generate_script(struct pevent *pevent, const char *outfile) fprintf(ofp, "def trace_end():\n"); fprintf(ofp, "\tprint \"in trace_end\"\n\n"); - while ((event = trace_find_next_event(pevent, event))) { + while ((event = trace_find_next_event(event))) { fprintf(ofp, "def %s__%s(", event->system, event->name); fprintf(ofp, "event_name, "); fprintf(ofp, "context, "); diff --git a/trunk/tools/perf/util/session.c b/trunk/tools/perf/util/session.c index 8e485592ca20..56142d0fb8d7 100644 --- a/trunk/tools/perf/util/session.c +++ b/trunk/tools/perf/util/session.c @@ -14,7 +14,6 @@ #include "sort.h" #include "util.h" #include "cpumap.h" -#include "event-parse.h" static int perf_session__open(struct perf_session *self, bool force) { @@ -290,6 +289,7 @@ struct branch_info *machine__resolve_bstack(struct machine *self, } int machine__resolve_callchain(struct machine *self, + struct perf_evsel *evsel __used, struct thread *thread, struct ip_callchain *chain, struct symbol **parent) @@ -1449,7 +1449,7 @@ size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp) ret += hists__fprintf_nr_events(&session->hists, fp); list_for_each_entry(pos, &session->evlist->entries, node) { - ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos)); + ret += fprintf(fp, "%s stats:\n", event_name(pos)); ret += hists__fprintf_nr_events(&pos->hists, fp); } @@ -1490,8 +1490,8 @@ struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, } void perf_event__print_ip(union perf_event *event, struct perf_sample *sample, - struct machine *machine, int print_sym, - int print_dso, int print_symoffset) + struct machine *machine, struct perf_evsel *evsel, + int print_sym, int print_dso, int print_symoffset) { struct addr_location al; struct callchain_cursor_node *node; @@ -1505,7 +1505,7 @@ void perf_event__print_ip(union perf_event *event, struct perf_sample *sample, if (symbol_conf.use_callchain && sample->callchain) { - if (machine__resolve_callchain(machine, al.thread, + if (machine__resolve_callchain(machine, evsel, al.thread, sample->callchain, NULL) != 0) { if (verbose) error("Failed to resolve callchain. Skipping\n"); @@ -1611,58 +1611,3 @@ void perf_session__fprintf_info(struct perf_session *session, FILE *fp, perf_header__fprintf_info(session, fp, full); fprintf(fp, "# ========\n#\n"); } - - -int __perf_session__set_tracepoints_handlers(struct perf_session *session, - const struct perf_evsel_str_handler *assocs, - size_t nr_assocs) -{ - struct perf_evlist *evlist = session->evlist; - struct event_format *format; - struct perf_evsel *evsel; - char *tracepoint, *name; - size_t i; - int err; - - for (i = 0; i < nr_assocs; i++) { - err = -ENOMEM; - tracepoint = strdup(assocs[i].name); - if (tracepoint == NULL) - goto out; - - err = -ENOENT; - name = strchr(tracepoint, ':'); - if (name == NULL) - goto out_free; - - *name++ = '\0'; - format = pevent_find_event_by_name(session->pevent, - tracepoint, name); - if (format == NULL) { - /* - * Adding a handler for an event not in the session, - * just ignore it. - */ - goto next; - } - - evsel = perf_evlist__find_tracepoint_by_id(evlist, format->id); - if (evsel == NULL) - goto next; - - err = -EEXIST; - if (evsel->handler.func != NULL) - goto out_free; - evsel->handler.func = assocs[i].handler; -next: - free(tracepoint); - } - - err = 0; -out: - return err; - -out_free: - free(tracepoint); - goto out; -} diff --git a/trunk/tools/perf/util/session.h b/trunk/tools/perf/util/session.h index 7c435bde6eb0..0c702e3f0a36 100644 --- a/trunk/tools/perf/util/session.h +++ b/trunk/tools/perf/util/session.h @@ -33,7 +33,6 @@ struct perf_session { struct machine host_machine; struct rb_root machines; struct perf_evlist *evlist; - struct pevent *pevent; /* * FIXME: Need to split this up further, we need global * stats + per event stats. 'perf diff' also needs @@ -152,20 +151,11 @@ struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, unsigned int type); void perf_event__print_ip(union perf_event *event, struct perf_sample *sample, - struct machine *machine, int print_sym, - int print_dso, int print_symoffset); + struct machine *machine, struct perf_evsel *evsel, + int print_sym, int print_dso, int print_symoffset); int perf_session__cpu_bitmap(struct perf_session *session, const char *cpu_list, unsigned long *cpu_bitmap); void perf_session__fprintf_info(struct perf_session *s, FILE *fp, bool full); - -struct perf_evsel_str_handler; - -int __perf_session__set_tracepoints_handlers(struct perf_session *session, - const struct perf_evsel_str_handler *assocs, - size_t nr_assocs); - -#define perf_session__set_tracepoints_handlers(session, array) \ - __perf_session__set_tracepoints_handlers(session, array, ARRAY_SIZE(array)) #endif /* __PERF_SESSION_H */ diff --git a/trunk/tools/perf/util/sort.c b/trunk/tools/perf/util/sort.c index 0f5a0a496bc4..a27237430c5f 100644 --- a/trunk/tools/perf/util/sort.c +++ b/trunk/tools/perf/util/sort.c @@ -241,54 +241,6 @@ struct sort_entry sort_sym = { .se_width_idx = HISTC_SYMBOL, }; -/* --sort srcline */ - -static int64_t -sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right) -{ - return (int64_t)(right->ip - left->ip); -} - -static int hist_entry__srcline_snprintf(struct hist_entry *self, char *bf, - size_t size, unsigned int width __used) -{ - FILE *fp; - char cmd[PATH_MAX + 2], *path = self->srcline, *nl; - size_t line_len; - - if (path != NULL) - goto out_path; - - snprintf(cmd, sizeof(cmd), "addr2line -e %s %016" PRIx64, - self->ms.map->dso->long_name, self->ip); - fp = popen(cmd, "r"); - if (!fp) - goto out_ip; - - if (getline(&path, &line_len, fp) < 0 || !line_len) - goto out_ip; - fclose(fp); - self->srcline = strdup(path); - if (self->srcline == NULL) - goto out_ip; - - nl = strchr(self->srcline, '\n'); - if (nl != NULL) - *nl = '\0'; - path = self->srcline; -out_path: - return repsep_snprintf(bf, size, "%s", path); -out_ip: - return repsep_snprintf(bf, size, "%-#*llx", BITS_PER_LONG / 4, self->ip); -} - -struct sort_entry sort_srcline = { - .se_header = "Source:Line", - .se_cmp = sort__srcline_cmp, - .se_snprintf = hist_entry__srcline_snprintf, - .se_width_idx = HISTC_SRCLINE, -}; - /* --sort parent */ static int64_t @@ -487,7 +439,6 @@ static struct sort_dimension sort_dimensions[] = { DIM(SORT_PARENT, "parent", sort_parent), DIM(SORT_CPU, "cpu", sort_cpu), DIM(SORT_MISPREDICT, "mispredict", sort_mispredict), - DIM(SORT_SRCLINE, "srcline", sort_srcline), }; int sort_dimension__add(const char *tok) diff --git a/trunk/tools/perf/util/sort.h b/trunk/tools/perf/util/sort.h index e724b26acd51..472aa5a63a58 100644 --- a/trunk/tools/perf/util/sort.h +++ b/trunk/tools/perf/util/sort.h @@ -71,7 +71,6 @@ struct hist_entry { char level; bool used; u8 filtered; - char *srcline; struct symbol *parent; union { unsigned long position; @@ -94,7 +93,6 @@ enum sort_type { SORT_SYM_FROM, SORT_SYM_TO, SORT_MISPREDICT, - SORT_SRCLINE, }; /* diff --git a/trunk/tools/perf/util/string.c b/trunk/tools/perf/util/string.c index 199bc4d8905d..d5836382ff2c 100644 --- a/trunk/tools/perf/util/string.c +++ b/trunk/tools/perf/util/string.c @@ -313,25 +313,3 @@ int strtailcmp(const char *s1, const char *s2) return 0; } -/** - * rtrim - Removes trailing whitespace from @s. - * @s: The string to be stripped. - * - * Note that the first trailing whitespace is replaced with a %NUL-terminator - * in the given string @s. Returns @s. - */ -char *rtrim(char *s) -{ - size_t size = strlen(s); - char *end; - - if (!size) - return s; - - end = s + size - 1; - while (end >= s && isspace(*end)) - end--; - *(end + 1) = '\0'; - - return s; -} diff --git a/trunk/tools/perf/util/symbol.c b/trunk/tools/perf/util/symbol.c index 50958bbeb26a..3e2e5ea0f03f 100644 --- a/trunk/tools/perf/util/symbol.c +++ b/trunk/tools/perf/util/symbol.c @@ -1478,31 +1478,14 @@ static int elf_read_build_id(Elf *elf, void *bf, size_t size) goto out; } - /* - * Check following sections for notes: - * '.note.gnu.build-id' - * '.notes' - * '.note' (VDSO specific) - */ - do { - sec = elf_section_by_name(elf, &ehdr, &shdr, - ".note.gnu.build-id", NULL); - if (sec) - break; - + sec = elf_section_by_name(elf, &ehdr, &shdr, + ".note.gnu.build-id", NULL); + if (sec == NULL) { sec = elf_section_by_name(elf, &ehdr, &shdr, ".notes", NULL); - if (sec) - break; - - sec = elf_section_by_name(elf, &ehdr, &shdr, - ".note", NULL); - if (sec) - break; - - return err; - - } while (0); + if (sec == NULL) + goto out; + } data = elf_getdata(sec, NULL); if (data == NULL) @@ -1607,62 +1590,11 @@ int sysfs__read_build_id(const char *filename, void *build_id, size_t size) return err; } -static int filename__read_debuglink(const char *filename, - char *debuglink, size_t size) -{ - int fd, err = -1; - Elf *elf; - GElf_Ehdr ehdr; - GElf_Shdr shdr; - Elf_Data *data; - Elf_Scn *sec; - Elf_Kind ek; - - fd = open(filename, O_RDONLY); - if (fd < 0) - goto out; - - elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); - if (elf == NULL) { - pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename); - goto out_close; - } - - ek = elf_kind(elf); - if (ek != ELF_K_ELF) - goto out_close; - - if (gelf_getehdr(elf, &ehdr) == NULL) { - pr_err("%s: cannot get elf header.\n", __func__); - goto out_close; - } - - sec = elf_section_by_name(elf, &ehdr, &shdr, - ".gnu_debuglink", NULL); - if (sec == NULL) - goto out_close; - - data = elf_getdata(sec, NULL); - if (data == NULL) - goto out_close; - - /* the start of this section is a zero-terminated string */ - strncpy(debuglink, data->d_buf, size); - - elf_end(elf); - -out_close: - close(fd); -out: - return err; -} - char dso__symtab_origin(const struct dso *dso) { static const char origin[] = { [SYMTAB__KALLSYMS] = 'k', [SYMTAB__JAVA_JIT] = 'j', - [SYMTAB__DEBUGLINK] = 'l', [SYMTAB__BUILD_ID_CACHE] = 'B', [SYMTAB__FEDORA_DEBUGINFO] = 'f', [SYMTAB__UBUNTU_DEBUGINFO] = 'u', @@ -1730,22 +1662,10 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) */ want_symtab = 1; restart: - for (dso->symtab_type = SYMTAB__DEBUGLINK; + for (dso->symtab_type = SYMTAB__BUILD_ID_CACHE; dso->symtab_type != SYMTAB__NOT_FOUND; dso->symtab_type++) { switch (dso->symtab_type) { - case SYMTAB__DEBUGLINK: { - char *debuglink; - strncpy(name, dso->long_name, size); - debuglink = name + dso->long_name_len; - while (debuglink != name && *debuglink != '/') - debuglink--; - if (*debuglink == '/') - debuglink++; - filename__read_debuglink(dso->long_name, debuglink, - size - (debuglink - name)); - } - break; case SYMTAB__BUILD_ID_CACHE: /* skip the locally configured cache if a symfs is given */ if (symbol_conf.symfs[0] || diff --git a/trunk/tools/perf/util/symbol.h b/trunk/tools/perf/util/symbol.h index a884b99017f0..af0752b1aca1 100644 --- a/trunk/tools/perf/util/symbol.h +++ b/trunk/tools/perf/util/symbol.h @@ -257,7 +257,6 @@ enum symtab_type { SYMTAB__KALLSYMS = 0, SYMTAB__GUEST_KALLSYMS, SYMTAB__JAVA_JIT, - SYMTAB__DEBUGLINK, SYMTAB__BUILD_ID_CACHE, SYMTAB__FEDORA_DEBUGINFO, SYMTAB__UBUNTU_DEBUGINFO, diff --git a/trunk/tools/perf/util/top.c b/trunk/tools/perf/util/top.c index 7eeebcee291c..abe0e8e95068 100644 --- a/trunk/tools/perf/util/top.c +++ b/trunk/tools/perf/util/top.c @@ -65,7 +65,7 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size) top->freq ? "Hz" : ""); } - ret += SNPRINTF(bf + ret, size - ret, "%s", perf_evsel__name(top->sym_evsel)); + ret += SNPRINTF(bf + ret, size - ret, "%s", event_name(top->sym_evsel)); ret += SNPRINTF(bf + ret, size - ret, "], "); diff --git a/trunk/tools/perf/util/trace-event-parse.c b/trunk/tools/perf/util/trace-event-parse.c index 0715c843c2e7..5dd3b5ec8411 100644 --- a/trunk/tools/perf/util/trace-event-parse.c +++ b/trunk/tools/perf/util/trace-event-parse.c @@ -32,25 +32,29 @@ int header_page_size_size; int header_page_ts_size; int header_page_data_offset; +struct pevent *perf_pevent; +static struct pevent *pevent; + bool latency_format; -struct pevent *read_trace_init(int file_bigendian, int host_bigendian) +int read_trace_init(int file_bigendian, int host_bigendian) { - struct pevent *pevent = pevent_alloc(); + if (pevent) + return 0; - if (pevent != NULL) { - pevent_set_flag(pevent, PEVENT_NSEC_OUTPUT); - pevent_set_file_bigendian(pevent, file_bigendian); - pevent_set_host_bigendian(pevent, host_bigendian); - } + perf_pevent = pevent_alloc(); + pevent = perf_pevent; - return pevent; + pevent_set_flag(pevent, PEVENT_NSEC_OUTPUT); + pevent_set_file_bigendian(pevent, file_bigendian); + pevent_set_host_bigendian(pevent, host_bigendian); + + return 0; } static int get_common_field(struct scripting_context *context, int *offset, int *size, const char *type) { - struct pevent *pevent = context->pevent; struct event_format *event; struct format_field *field; @@ -146,7 +150,7 @@ void *raw_field_ptr(struct event_format *event, const char *name, void *data) return data + field->offset; } -int trace_parse_common_type(struct pevent *pevent, void *data) +int trace_parse_common_type(void *data) { struct pevent_record record; @@ -154,7 +158,7 @@ int trace_parse_common_type(struct pevent *pevent, void *data) return pevent_data_type(pevent, &record); } -int trace_parse_common_pid(struct pevent *pevent, void *data) +int trace_parse_common_pid(void *data) { struct pevent_record record; @@ -162,21 +166,27 @@ int trace_parse_common_pid(struct pevent *pevent, void *data) return pevent_data_pid(pevent, &record); } -unsigned long long read_size(struct pevent *pevent, void *ptr, int size) +unsigned long long read_size(void *ptr, int size) { return pevent_read_number(pevent, ptr, size); } -void print_trace_event(struct pevent *pevent, int cpu, void *data, int size) +struct event_format *trace_find_event(int type) +{ + return pevent_find_event(pevent, type); +} + + +void print_trace_event(int cpu, void *data, int size) { struct event_format *event; struct pevent_record record; struct trace_seq s; int type; - type = trace_parse_common_type(pevent, data); + type = trace_parse_common_type(data); - event = pevent_find_event(pevent, type); + event = trace_find_event(type); if (!event) { warning("ug! no event found for type %d", type); return; @@ -192,8 +202,8 @@ void print_trace_event(struct pevent *pevent, int cpu, void *data, int size) trace_seq_do_printf(&s); } -void print_event(struct pevent *pevent, int cpu, void *data, int size, - unsigned long long nsecs, char *comm) +void print_event(int cpu, void *data, int size, unsigned long long nsecs, + char *comm) { struct pevent_record record; struct trace_seq s; @@ -216,8 +226,7 @@ void print_event(struct pevent *pevent, int cpu, void *data, int size, printf("\n"); } -void parse_proc_kallsyms(struct pevent *pevent, - char *file, unsigned int size __unused) +void parse_proc_kallsyms(char *file, unsigned int size __unused) { unsigned long long addr; char *func; @@ -248,8 +257,7 @@ void parse_proc_kallsyms(struct pevent *pevent, } } -void parse_ftrace_printk(struct pevent *pevent, - char *file, unsigned int size __unused) +void parse_ftrace_printk(char *file, unsigned int size __unused) { unsigned long long addr; char *printk; @@ -273,19 +281,17 @@ void parse_ftrace_printk(struct pevent *pevent, } } -int parse_ftrace_file(struct pevent *pevent, char *buf, unsigned long size) +int parse_ftrace_file(char *buf, unsigned long size) { return pevent_parse_event(pevent, buf, size, "ftrace"); } -int parse_event_file(struct pevent *pevent, - char *buf, unsigned long size, char *sys) +int parse_event_file(char *buf, unsigned long size, char *sys) { return pevent_parse_event(pevent, buf, size, sys); } -struct event_format *trace_find_next_event(struct pevent *pevent, - struct event_format *event) +struct event_format *trace_find_next_event(struct event_format *event) { static int idx; diff --git a/trunk/tools/perf/util/trace-event-read.c b/trunk/tools/perf/util/trace-event-read.c index 719ed74a8565..f097e0dd6c5c 100644 --- a/trunk/tools/perf/util/trace-event-read.c +++ b/trunk/tools/perf/util/trace-event-read.c @@ -114,20 +114,20 @@ static void skip(int size) }; } -static unsigned int read4(struct pevent *pevent) +static unsigned int read4(void) { unsigned int data; read_or_die(&data, 4); - return __data2host4(pevent, data); + return __data2host4(perf_pevent, data); } -static unsigned long long read8(struct pevent *pevent) +static unsigned long long read8(void) { unsigned long long data; read_or_die(&data, 8); - return __data2host8(pevent, data); + return __data2host8(perf_pevent, data); } static char *read_string(void) @@ -168,12 +168,12 @@ static char *read_string(void) return str; } -static void read_proc_kallsyms(struct pevent *pevent) +static void read_proc_kallsyms(void) { unsigned int size; char *buf; - size = read4(pevent); + size = read4(); if (!size) return; @@ -181,29 +181,29 @@ static void read_proc_kallsyms(struct pevent *pevent) read_or_die(buf, size); buf[size] = '\0'; - parse_proc_kallsyms(pevent, buf, size); + parse_proc_kallsyms(buf, size); free(buf); } -static void read_ftrace_printk(struct pevent *pevent) +static void read_ftrace_printk(void) { unsigned int size; char *buf; - size = read4(pevent); + size = read4(); if (!size) return; buf = malloc_or_die(size); read_or_die(buf, size); - parse_ftrace_printk(pevent, buf, size); + parse_ftrace_printk(buf, size); free(buf); } -static void read_header_files(struct pevent *pevent) +static void read_header_files(void) { unsigned long long size; char *header_event; @@ -214,7 +214,7 @@ static void read_header_files(struct pevent *pevent) if (memcmp(buf, "header_page", 12) != 0) die("did not read header page"); - size = read8(pevent); + size = read8(); skip(size); /* @@ -227,48 +227,47 @@ static void read_header_files(struct pevent *pevent) if (memcmp(buf, "header_event", 13) != 0) die("did not read header event"); - size = read8(pevent); + size = read8(); header_event = malloc_or_die(size); read_or_die(header_event, size); free(header_event); } -static void read_ftrace_file(struct pevent *pevent, unsigned long long size) +static void read_ftrace_file(unsigned long long size) { char *buf; buf = malloc_or_die(size); read_or_die(buf, size); - parse_ftrace_file(pevent, buf, size); + parse_ftrace_file(buf, size); free(buf); } -static void read_event_file(struct pevent *pevent, char *sys, - unsigned long long size) +static void read_event_file(char *sys, unsigned long long size) { char *buf; buf = malloc_or_die(size); read_or_die(buf, size); - parse_event_file(pevent, buf, size, sys); + parse_event_file(buf, size, sys); free(buf); } -static void read_ftrace_files(struct pevent *pevent) +static void read_ftrace_files(void) { unsigned long long size; int count; int i; - count = read4(pevent); + count = read4(); for (i = 0; i < count; i++) { - size = read8(pevent); - read_ftrace_file(pevent, size); + size = read8(); + read_ftrace_file(size); } } -static void read_event_files(struct pevent *pevent) +static void read_event_files(void) { unsigned long long size; char *sys; @@ -276,15 +275,15 @@ static void read_event_files(struct pevent *pevent) int count; int i,x; - systems = read4(pevent); + systems = read4(); for (i = 0; i < systems; i++) { sys = read_string(); - count = read4(pevent); + count = read4(); for (x=0; x < count; x++) { - size = read8(pevent); - read_event_file(pevent, sys, size); + size = read8(); + read_event_file(sys, size); } } } @@ -378,7 +377,7 @@ static int calc_index(void *ptr, int cpu) return (unsigned long)ptr - (unsigned long)cpu_data[cpu].page; } -struct pevent_record *trace_peek_data(struct pevent *pevent, int cpu) +struct pevent_record *trace_peek_data(int cpu) { struct pevent_record *data; void *page = cpu_data[cpu].page; @@ -400,15 +399,15 @@ struct pevent_record *trace_peek_data(struct pevent *pevent, int cpu) /* FIXME: handle header page */ if (header_page_ts_size != 8) die("expected a long long type for timestamp"); - cpu_data[cpu].timestamp = data2host8(pevent, ptr); + cpu_data[cpu].timestamp = data2host8(perf_pevent, ptr); ptr += 8; switch (header_page_size_size) { case 4: - cpu_data[cpu].page_size = data2host4(pevent, ptr); + cpu_data[cpu].page_size = data2host4(perf_pevent, ptr); ptr += 4; break; case 8: - cpu_data[cpu].page_size = data2host8(pevent, ptr); + cpu_data[cpu].page_size = data2host8(perf_pevent, ptr); ptr += 8; break; default: @@ -422,10 +421,10 @@ struct pevent_record *trace_peek_data(struct pevent *pevent, int cpu) if (idx >= cpu_data[cpu].page_size) { get_next_page(cpu); - return trace_peek_data(pevent, cpu); + return trace_peek_data(cpu); } - type_len_ts = data2host4(pevent, ptr); + type_len_ts = data2host4(perf_pevent, ptr); ptr += 4; type_len = type_len4host(type_len_ts); @@ -435,14 +434,14 @@ struct pevent_record *trace_peek_data(struct pevent *pevent, int cpu) case RINGBUF_TYPE_PADDING: if (!delta) die("error, hit unexpected end of page"); - length = data2host4(pevent, ptr); + length = data2host4(perf_pevent, ptr); ptr += 4; length *= 4; ptr += length; goto read_again; case RINGBUF_TYPE_TIME_EXTEND: - extend = data2host4(pevent, ptr); + extend = data2host4(perf_pevent, ptr); ptr += 4; extend <<= TS_SHIFT; extend += delta; @@ -453,7 +452,7 @@ struct pevent_record *trace_peek_data(struct pevent *pevent, int cpu) ptr += 12; break; case 0: - length = data2host4(pevent, ptr); + length = data2host4(perf_pevent, ptr); ptr += 4; die("here! length=%d", length); break; @@ -478,17 +477,17 @@ struct pevent_record *trace_peek_data(struct pevent *pevent, int cpu) return data; } -struct pevent_record *trace_read_data(struct pevent *pevent, int cpu) +struct pevent_record *trace_read_data(int cpu) { struct pevent_record *data; - data = trace_peek_data(pevent, cpu); + data = trace_peek_data(cpu); cpu_data[cpu].next = NULL; return data; } -ssize_t trace_report(int fd, struct pevent **ppevent, bool __repipe) +ssize_t trace_report(int fd, bool __repipe) { char buf[BUFSIZ]; char test[] = { 23, 8, 68 }; @@ -520,32 +519,30 @@ ssize_t trace_report(int fd, struct pevent **ppevent, bool __repipe) file_bigendian = buf[0]; host_bigendian = bigendian(); - *ppevent = read_trace_init(file_bigendian, host_bigendian); - if (*ppevent == NULL) - die("read_trace_init failed"); + read_trace_init(file_bigendian, host_bigendian); read_or_die(buf, 1); long_size = buf[0]; - page_size = read4(*ppevent); + page_size = read4(); - read_header_files(*ppevent); + read_header_files(); - read_ftrace_files(*ppevent); - read_event_files(*ppevent); - read_proc_kallsyms(*ppevent); - read_ftrace_printk(*ppevent); + read_ftrace_files(); + read_event_files(); + read_proc_kallsyms(); + read_ftrace_printk(); size = calc_data_size - 1; calc_data_size = 0; repipe = false; if (show_funcs) { - pevent_print_funcs(*ppevent); + pevent_print_funcs(perf_pevent); return size; } if (show_printk) { - pevent_print_printk(*ppevent); + pevent_print_printk(perf_pevent); return size; } diff --git a/trunk/tools/perf/util/trace-event-scripting.c b/trunk/tools/perf/util/trace-event-scripting.c index 474aa7a7df43..18ae6c1831d3 100644 --- a/trunk/tools/perf/util/trace-event-scripting.c +++ b/trunk/tools/perf/util/trace-event-scripting.c @@ -36,7 +36,6 @@ static int stop_script_unsupported(void) } static void process_event_unsupported(union perf_event *event __unused, - struct pevent *pevent __unused, struct perf_sample *sample __unused, struct perf_evsel *evsel __unused, struct machine *machine __unused, @@ -62,8 +61,7 @@ static int python_start_script_unsupported(const char *script __unused, return -1; } -static int python_generate_script_unsupported(struct pevent *pevent __unused, - const char *outfile __unused) +static int python_generate_script_unsupported(const char *outfile __unused) { print_python_unsupported_msg(); @@ -124,8 +122,7 @@ static int perl_start_script_unsupported(const char *script __unused, return -1; } -static int perl_generate_script_unsupported(struct pevent *pevent __unused, - const char *outfile __unused) +static int perl_generate_script_unsupported(const char *outfile __unused) { print_perl_unsupported_msg(); diff --git a/trunk/tools/perf/util/trace-event.h b/trunk/tools/perf/util/trace-event.h index 8fef1d6687b7..639852ac1117 100644 --- a/trunk/tools/perf/util/trace-event.h +++ b/trunk/tools/perf/util/trace-event.h @@ -8,7 +8,6 @@ struct machine; struct perf_sample; union perf_event; -struct perf_tool; struct thread; extern int header_page_size_size; @@ -30,36 +29,35 @@ enum { int bigendian(void); -struct pevent *read_trace_init(int file_bigendian, int host_bigendian); -void print_trace_event(struct pevent *pevent, int cpu, void *data, int size); +int read_trace_init(int file_bigendian, int host_bigendian); +void print_trace_event(int cpu, void *data, int size); -void print_event(struct pevent *pevent, int cpu, void *data, int size, - unsigned long long nsecs, char *comm); +void print_event(int cpu, void *data, int size, unsigned long long nsecs, + char *comm); -int parse_ftrace_file(struct pevent *pevent, char *buf, unsigned long size); -int parse_event_file(struct pevent *pevent, - char *buf, unsigned long size, char *sys); +int parse_ftrace_file(char *buf, unsigned long size); +int parse_event_file(char *buf, unsigned long size, char *sys); -struct pevent_record *trace_peek_data(struct pevent *pevent, int cpu); +struct pevent_record *trace_peek_data(int cpu); +struct event_format *trace_find_event(int type); unsigned long long raw_field_value(struct event_format *event, const char *name, void *data); void *raw_field_ptr(struct event_format *event, const char *name, void *data); -void parse_proc_kallsyms(struct pevent *pevent, char *file, unsigned int size); -void parse_ftrace_printk(struct pevent *pevent, char *file, unsigned int size); +void parse_proc_kallsyms(char *file, unsigned int size __unused); +void parse_ftrace_printk(char *file, unsigned int size __unused); -ssize_t trace_report(int fd, struct pevent **pevent, bool repipe); +ssize_t trace_report(int fd, bool repipe); -int trace_parse_common_type(struct pevent *pevent, void *data); -int trace_parse_common_pid(struct pevent *pevent, void *data); +int trace_parse_common_type(void *data); +int trace_parse_common_pid(void *data); -struct event_format *trace_find_next_event(struct pevent *pevent, - struct event_format *event); -unsigned long long read_size(struct pevent *pevent, void *ptr, int size); +struct event_format *trace_find_next_event(struct event_format *event); +unsigned long long read_size(void *ptr, int size); unsigned long long eval_flag(const char *flag); -struct pevent_record *trace_read_data(struct pevent *pevent, int cpu); +struct pevent_record *trace_read_data(int cpu); int read_tracing_data(int fd, struct list_head *pattrs); struct tracing_data { @@ -79,12 +77,11 @@ struct scripting_ops { int (*start_script) (const char *script, int argc, const char **argv); int (*stop_script) (void); void (*process_event) (union perf_event *event, - struct pevent *pevent, struct perf_sample *sample, struct perf_evsel *evsel, struct machine *machine, struct thread *thread); - int (*generate_script) (struct pevent *pevent, const char *outfile); + int (*generate_script) (const char *outfile); }; int script_spec_register(const char *spec, struct scripting_ops *ops); @@ -93,7 +90,6 @@ void setup_perl_scripting(void); void setup_python_scripting(void); struct scripting_context { - struct pevent *pevent; void *event_data; }; diff --git a/trunk/tools/perf/util/util.h b/trunk/tools/perf/util/util.h index b13c7331eaf8..2daaedb83d84 100644 --- a/trunk/tools/perf/util/util.h +++ b/trunk/tools/perf/util/util.h @@ -264,6 +264,4 @@ bool is_power_of_2(unsigned long n) size_t hex_width(u64 v); -char *rtrim(char *s); - #endif