diff --git a/[refs] b/[refs] index 8c262184d389..a53d17959f27 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 7b281ee026552f10862b617a2a51acf49c829554 +refs/heads/master: 22b4e63ebe062e2e3d4a3a2b468e47ca9575d598 diff --git a/trunk/Documentation/ABI/testing/sysfs-bus-pci b/trunk/Documentation/ABI/testing/sysfs-bus-pci index 34f51100f029..dff1f48d252d 100644 --- a/trunk/Documentation/ABI/testing/sysfs-bus-pci +++ b/trunk/Documentation/ABI/testing/sysfs-bus-pci @@ -210,3 +210,15 @@ Users: firmware assigned instance number of the PCI device that can help in understanding the firmware intended order of the PCI device. + +What: /sys/bus/pci/devices/.../d3cold_allowed +Date: July 2012 +Contact: Huang Ying +Description: + d3cold_allowed is bit to control whether the corresponding PCI + device can be put into D3Cold state. If it is cleared, the + device will never be put into D3Cold state. If it is set, the + device may be put into D3Cold state if other requirements are + satisfied too. Reading this attribute will show the current + value of d3cold_allowed bit. Writing this attribute will set + the value of d3cold_allowed bit. diff --git a/trunk/Documentation/ABI/testing/sysfs-devices-platform-sh_mobile_lcdc_fb b/trunk/Documentation/ABI/testing/sysfs-devices-platform-sh_mobile_lcdc_fb new file mode 100644 index 000000000000..2107082426da --- /dev/null +++ b/trunk/Documentation/ABI/testing/sysfs-devices-platform-sh_mobile_lcdc_fb @@ -0,0 +1,44 @@ +What: /sys/devices/platform/sh_mobile_lcdc_fb.[0-3]/graphics/fb[0-9]/ovl_alpha +Date: May 2012 +Contact: Laurent Pinchart +Description: + This file is only available on fb[0-9] devices corresponding + to overlay planes. + + Stores the alpha blending value for the overlay. Values range + from 0 (transparent) to 255 (opaque). The value is ignored if + the mode is not set to Alpha Blending. + +What: /sys/devices/platform/sh_mobile_lcdc_fb.[0-3]/graphics/fb[0-9]/ovl_mode +Date: May 2012 +Contact: Laurent Pinchart +Description: + This file is only available on fb[0-9] devices corresponding + to overlay planes. + + Selects the composition mode for the overlay. Possible values + are + + 0 - Alpha Blending + 1 - ROP3 + +What: /sys/devices/platform/sh_mobile_lcdc_fb.[0-3]/graphics/fb[0-9]/ovl_position +Date: May 2012 +Contact: Laurent Pinchart +Description: + This file is only available on fb[0-9] devices corresponding + to overlay planes. + + Stores the x,y overlay position on the display in pixels. The + position format is `[0-9]+,[0-9]+'. + +What: /sys/devices/platform/sh_mobile_lcdc_fb.[0-3]/graphics/fb[0-9]/ovl_rop3 +Date: May 2012 +Contact: Laurent Pinchart +Description: + This file is only available on fb[0-9] devices corresponding + to overlay planes. + + Stores the raster operation (ROP3) for the overlay. Values + range from 0 to 255. The value is ignored if the mode is not + set to ROP3. diff --git a/trunk/Documentation/ABI/testing/sysfs-platform-ideapad-laptop b/trunk/Documentation/ABI/testing/sysfs-platform-ideapad-laptop index 814b01354c41..b31e782bd985 100644 --- a/trunk/Documentation/ABI/testing/sysfs-platform-ideapad-laptop +++ b/trunk/Documentation/ABI/testing/sysfs-platform-ideapad-laptop @@ -5,4 +5,15 @@ Contact: "Ike Panhc " Description: Control the power of camera module. 1 means on, 0 means off. +What: /sys/devices/platform/ideapad/fan_mode +Date: June 2012 +KernelVersion: 3.6 +Contact: "Maxim Mikityanskiy " +Description: + Change fan mode + There are four available modes: + * 0 -> Super Silent Mode + * 1 -> Standard Mode + * 2 -> Dust Cleaning + * 4 -> Efficient Thermal Dissipation Mode diff --git a/trunk/Documentation/DocBook/filesystems.tmpl b/trunk/Documentation/DocBook/filesystems.tmpl index 3fca32c41927..25b58efd955d 100644 --- a/trunk/Documentation/DocBook/filesystems.tmpl +++ b/trunk/Documentation/DocBook/filesystems.tmpl @@ -224,8 +224,8 @@ all your transactions. -Then at umount time , in your put_super() (2.4) or write_super() (2.5) -you can then call journal_destroy() to clean up your in-core journal object. +Then at umount time , in your put_super() you can then call journal_destroy() +to clean up your in-core journal object. diff --git a/trunk/Documentation/DocBook/media/v4l/vidioc-g-tuner.xml b/trunk/Documentation/DocBook/media/v4l/vidioc-g-tuner.xml index 720395127904..701138f1209d 100644 --- a/trunk/Documentation/DocBook/media/v4l/vidioc-g-tuner.xml +++ b/trunk/Documentation/DocBook/media/v4l/vidioc-g-tuner.xml @@ -125,7 +125,7 @@ the structure refers to a radio tuner the V4L2_TUNER_CAP_NORM flags can't be used. If multiple frequency bands are supported, then capability is the union of all -capability> fields of each &v4l2-frequency-band;. +capability fields of each &v4l2-frequency-band;. diff --git a/trunk/Documentation/block/00-INDEX b/trunk/Documentation/block/00-INDEX index d111e3b23db0..d18ecd827c40 100644 --- a/trunk/Documentation/block/00-INDEX +++ b/trunk/Documentation/block/00-INDEX @@ -3,15 +3,21 @@ biodoc.txt - Notes on the Generic Block Layer Rewrite in Linux 2.5 capability.txt - - Generic Block Device Capability (/sys/block//capability) + - Generic Block Device Capability (/sys/block//capability) +cfq-iosched.txt + - CFQ IO scheduler tunables +data-integrity.txt + - Block data integrity deadline-iosched.txt - Deadline IO scheduler tunables ioprio.txt - Block io priorities (in CFQ scheduler) +queue-sysfs.txt + - Queue's sysfs entries request.txt - The members of struct request (in include/linux/blkdev.h) stat.txt - - Block layer statistics in /sys/block//stat + - Block layer statistics in /sys/block//stat switching-sched.txt - Switching I/O schedulers at runtime writeback_cache_control.txt diff --git a/trunk/Documentation/block/cfq-iosched.txt b/trunk/Documentation/block/cfq-iosched.txt index 6d670f570451..d89b4fe724d7 100644 --- a/trunk/Documentation/block/cfq-iosched.txt +++ b/trunk/Documentation/block/cfq-iosched.txt @@ -1,3 +1,14 @@ +CFQ (Complete Fairness Queueing) +=============================== + +The main aim of CFQ scheduler is to provide a fair allocation of the disk +I/O bandwidth for all the processes which requests an I/O operation. + +CFQ maintains the per process queue for the processes which request I/O +operation(syncronous requests). In case of asynchronous requests, all the +requests from all the processes are batched together according to their +process's I/O priority. + CFQ ioscheduler tunables ======================== @@ -25,6 +36,72 @@ there are multiple spindles behind single LUN (Host based hardware RAID controller or for storage arrays), setting slice_idle=0 might end up in better throughput and acceptable latencies. +back_seek_max +------------- +This specifies, given in Kbytes, the maximum "distance" for backward seeking. +The distance is the amount of space from the current head location to the +sectors that are backward in terms of distance. + +This parameter allows the scheduler to anticipate requests in the "backward" +direction and consider them as being the "next" if they are within this +distance from the current head location. + +back_seek_penalty +----------------- +This parameter is used to compute the cost of backward seeking. If the +backward distance of request is just 1/back_seek_penalty from a "front" +request, then the seeking cost of two requests is considered equivalent. + +So scheduler will not bias toward one or the other request (otherwise scheduler +will bias toward front request). Default value of back_seek_penalty is 2. + +fifo_expire_async +----------------- +This parameter is used to set the timeout of asynchronous requests. Default +value of this is 248ms. + +fifo_expire_sync +---------------- +This parameter is used to set the timeout of synchronous requests. Default +value of this is 124ms. In case to favor synchronous requests over asynchronous +one, this value should be decreased relative to fifo_expire_async. + +slice_async +----------- +This parameter is same as of slice_sync but for asynchronous queue. The +default value is 40ms. + +slice_async_rq +-------------- +This parameter is used to limit the dispatching of asynchronous request to +device request queue in queue's slice time. The maximum number of request that +are allowed to be dispatched also depends upon the io priority. Default value +for this is 2. + +slice_sync +---------- +When a queue is selected for execution, the queues IO requests are only +executed for a certain amount of time(time_slice) before switching to another +queue. This parameter is used to calculate the time slice of synchronous +queue. + +time_slice is computed using the below equation:- +time_slice = slice_sync + (slice_sync/5 * (4 - prio)). To increase the +time_slice of synchronous queue, increase the value of slice_sync. Default +value is 100ms. + +quantum +------- +This specifies the number of request dispatched to the device queue. In a +queue's time slice, a request will not be dispatched if the number of request +in the device exceeds this parameter. This parameter is used for synchronous +request. + +In case of storage with several disk, this setting can limit the parallel +processing of request. Therefore, increasing the value can imporve the +performace although this can cause the latency of some I/O to increase due +to more number of requests. + CFQ IOPS Mode for group scheduling =================================== Basic CFQ design is to provide priority based time slices. Higher priority diff --git a/trunk/Documentation/block/queue-sysfs.txt b/trunk/Documentation/block/queue-sysfs.txt index d8147b336c35..e54ac1d53403 100644 --- a/trunk/Documentation/block/queue-sysfs.txt +++ b/trunk/Documentation/block/queue-sysfs.txt @@ -9,20 +9,71 @@ These files are the ones found in the /sys/block/xxx/queue/ directory. Files denoted with a RO postfix are readonly and the RW postfix means read-write. +add_random (RW) +---------------- +This file allows to trun off the disk entropy contribution. Default +value of this file is '1'(on). + +discard_granularity (RO) +----------------------- +This shows the size of internal allocation of the device in bytes, if +reported by the device. A value of '0' means device does not support +the discard functionality. + +discard_max_bytes (RO) +---------------------- +Devices that support discard functionality may have internal limits on +the number of bytes that can be trimmed or unmapped in a single operation. +The discard_max_bytes parameter is set by the device driver to the maximum +number of bytes that can be discarded in a single operation. Discard +requests issued to the device must not exceed this limit. A discard_max_bytes +value of 0 means that the device does not support discard functionality. + +discard_zeroes_data (RO) +------------------------ +When read, this file will show if the discarded block are zeroed by the +device or not. If its value is '1' the blocks are zeroed otherwise not. + hw_sector_size (RO) ------------------- This is the hardware sector size of the device, in bytes. +iostats (RW) +------------- +This file is used to control (on/off) the iostats accounting of the +disk. + +logical_block_size (RO) +----------------------- +This is the logcal block size of the device, in bytes. + max_hw_sectors_kb (RO) ---------------------- This is the maximum number of kilobytes supported in a single data transfer. +max_integrity_segments (RO) +--------------------------- +When read, this file shows the max limit of integrity segments as +set by block layer which a hardware controller can handle. + max_sectors_kb (RW) ------------------- This is the maximum number of kilobytes that the block layer will allow for a filesystem request. Must be smaller than or equal to the maximum size allowed by the hardware. +max_segments (RO) +----------------- +Maximum number of segments of the device. + +max_segment_size (RO) +--------------------- +Maximum segment size of the device. + +minimum_io_size (RO) +-------------------- +This is the smallest preferred io size reported by the device. + nomerges (RW) ------------- This enables the user to disable the lookup logic involved with IO @@ -38,11 +89,31 @@ read or write requests. Note that the total allocated number may be twice this amount, since it applies only to reads or writes (not the accumulated sum). +To avoid priority inversion through request starvation, a request +queue maintains a separate request pool per each cgroup when +CONFIG_BLK_CGROUP is enabled, and this parameter applies to each such +per-block-cgroup request pool. IOW, if there are N block cgroups, +each request queue may have upto N request pools, each independently +regulated by nr_requests. + +optimal_io_size (RO) +-------------------- +This is the optimal io size reported by the device. + +physical_block_size (RO) +------------------------ +This is the physical block size of device, in bytes. + read_ahead_kb (RW) ------------------ Maximum number of kilobytes to read-ahead for filesystems on this block device. +rotational (RW) +--------------- +This file is used to stat if the device is of rotational type or +non-rotational type. + rq_affinity (RW) ---------------- If this option is '1', the block layer will migrate request completions to the diff --git a/trunk/Documentation/device-mapper/dm-raid.txt b/trunk/Documentation/device-mapper/dm-raid.txt index 946c73342cde..1c1844957166 100644 --- a/trunk/Documentation/device-mapper/dm-raid.txt +++ b/trunk/Documentation/device-mapper/dm-raid.txt @@ -27,6 +27,10 @@ The target is named "raid" and it accepts the following parameters: - rotating parity N (right-to-left) with data restart raid6_nc RAID6 N continue - rotating parity N (right-to-left) with data continuation + raid10 Various RAID10 inspired algorithms chosen by additional params + - RAID10: Striped Mirrors (aka 'Striping on top of mirrors') + - RAID1E: Integrated Adjacent Stripe Mirroring + - and other similar RAID10 variants Reference: Chapter 4 of http://www.snia.org/sites/default/files/SNIA_DDF_Technical_Position_v2.0.pdf @@ -59,6 +63,28 @@ The target is named "raid" and it accepts the following parameters: logical size of the array. The bitmap records the device synchronisation state for each region. + [raid10_copies <# copies>] + [raid10_format near] + These two options are used to alter the default layout of + a RAID10 configuration. The number of copies is can be + specified, but the default is 2. There are other variations + to how the copies are laid down - the default and only current + option is "near". Near copies are what most people think of + with respect to mirroring. If these options are left + unspecified, or 'raid10_copies 2' and/or 'raid10_format near' + are given, then the layouts for 2, 3 and 4 devices are: + 2 drives 3 drives 4 drives + -------- ---------- -------------- + A1 A1 A1 A1 A2 A1 A1 A2 A2 + A2 A2 A2 A3 A3 A3 A3 A4 A4 + A3 A3 A4 A4 A5 A5 A5 A6 A6 + A4 A4 A5 A6 A6 A7 A7 A8 A8 + .. .. .. .. .. .. .. .. .. + The 2-device layout is equivalent 2-way RAID1. The 4-device + layout is what a traditional RAID10 would look like. The + 3-device layout is what might be called a 'RAID1E - Integrated + Adjacent Stripe Mirroring'. + <#raid_devs>: The number of devices composing the array. Each device consists of two entries. The first is the device containing the metadata (if any); the second is the one containing the diff --git a/trunk/Documentation/devicetree/bindings/arm/mrvl/intc.txt b/trunk/Documentation/devicetree/bindings/arm/mrvl/intc.txt index 80b9a94d9a23..8b53273cb22f 100644 --- a/trunk/Documentation/devicetree/bindings/arm/mrvl/intc.txt +++ b/trunk/Documentation/devicetree/bindings/arm/mrvl/intc.txt @@ -38,3 +38,23 @@ Example: reg-names = "mux status", "mux mask"; mrvl,intc-nr-irqs = <2>; }; + +* Marvell Orion Interrupt controller + +Required properties +- compatible : Should be "marvell,orion-intc". +- #interrupt-cells: Specifies the number of cells needed to encode an + interrupt source. Supported value is <1>. +- interrupt-controller : Declare this node to be an interrupt controller. +- reg : Interrupt mask address. A list of 4 byte ranges, one per controller. + One entry in the list represents 32 interrupts. + +Example: + + intc: interrupt-controller { + compatible = "marvell,orion-intc", "marvell,intc"; + interrupt-controller; + #interrupt-cells = <1>; + reg = <0xfed20204 0x04>, + <0xfed20214 0x04>; + }; diff --git a/trunk/Documentation/devicetree/bindings/ata/marvell.txt b/trunk/Documentation/devicetree/bindings/ata/marvell.txt new file mode 100644 index 000000000000..b5cdd20cde9c --- /dev/null +++ b/trunk/Documentation/devicetree/bindings/ata/marvell.txt @@ -0,0 +1,16 @@ +* Marvell Orion SATA + +Required Properties: +- compatibility : "marvell,orion-sata" +- reg : Address range of controller +- interrupts : Interrupt controller is using +- nr-ports : Number of SATA ports in use. + +Example: + + sata@80000 { + compatible = "marvell,orion-sata"; + reg = <0x80000 0x5000>; + interrupts = <21>; + nr-ports = <2>; + } diff --git a/trunk/Documentation/devicetree/bindings/gpio/mrvl-gpio.txt b/trunk/Documentation/devicetree/bindings/gpio/mrvl-gpio.txt index 05428f39d9ac..e13787498bcf 100644 --- a/trunk/Documentation/devicetree/bindings/gpio/mrvl-gpio.txt +++ b/trunk/Documentation/devicetree/bindings/gpio/mrvl-gpio.txt @@ -27,3 +27,26 @@ Example: interrupt-controller; #interrupt-cells = <1>; }; + +* Marvell Orion GPIO Controller + +Required properties: +- compatible : Should be "marvell,orion-gpio" +- reg : Address and length of the register set for controller. +- gpio-controller : So we know this is a gpio controller. +- ngpio : How many gpios this controller has. +- interrupts : Up to 4 Interrupts for the controller. + +Optional properties: +- mask-offset : For SMP Orions, offset for Nth CPU + +Example: + + gpio0: gpio@10100 { + compatible = "marvell,orion-gpio"; + #gpio-cells = <2>; + gpio-controller; + reg = <0x10100 0x40>; + ngpio = <32>; + interrupts = <35>, <36>, <37>, <38>; + }; diff --git a/trunk/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt b/trunk/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt index 70cd49b1caa8..1dd622546d06 100644 --- a/trunk/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt +++ b/trunk/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt @@ -10,8 +10,8 @@ Required properties: - compatible : Should be "fsl,-esdhc" Optional properties: -- fsl,cd-internal : Indicate to use controller internal card detection -- fsl,wp-internal : Indicate to use controller internal write protection +- fsl,cd-controller : Indicate to use controller internal card detection +- fsl,wp-controller : Indicate to use controller internal write protection Examples: @@ -19,8 +19,8 @@ esdhc@70004000 { compatible = "fsl,imx51-esdhc"; reg = <0x70004000 0x4000>; interrupts = <1>; - fsl,cd-internal; - fsl,wp-internal; + fsl,cd-controller; + fsl,wp-controller; }; esdhc@70008000 { diff --git a/trunk/Documentation/devicetree/bindings/regulator/tps6586x.txt b/trunk/Documentation/devicetree/bindings/regulator/tps6586x.txt index d156e1b5db12..da80c2ae0915 100644 --- a/trunk/Documentation/devicetree/bindings/regulator/tps6586x.txt +++ b/trunk/Documentation/devicetree/bindings/regulator/tps6586x.txt @@ -9,9 +9,9 @@ Required properties: - regulators: list of regulators provided by this controller, must have property "regulator-compatible" to match their hardware counterparts: sm[0-2], ldo[0-9] and ldo_rtc -- sm0-supply: The input supply for the SM0. -- sm1-supply: The input supply for the SM1. -- sm2-supply: The input supply for the SM2. +- vin-sm0-supply: The input supply for the SM0. +- vin-sm1-supply: The input supply for the SM1. +- vin-sm2-supply: The input supply for the SM2. - vinldo01-supply: The input supply for the LDO1 and LDO2 - vinldo23-supply: The input supply for the LDO2 and LDO3 - vinldo4-supply: The input supply for the LDO4 @@ -30,9 +30,9 @@ Example: #gpio-cells = <2>; gpio-controller; - sm0-supply = <&some_reg>; - sm1-supply = <&some_reg>; - sm2-supply = <&some_reg>; + vin-sm0-supply = <&some_reg>; + vin-sm1-supply = <&some_reg>; + vin-sm2-supply = <&some_reg>; vinldo01-supply = <...>; vinldo23-supply = <...>; vinldo4-supply = <...>; diff --git a/trunk/Documentation/devicetree/bindings/watchdog/marvel.txt b/trunk/Documentation/devicetree/bindings/watchdog/marvel.txt new file mode 100644 index 000000000000..0b2503ab0a05 --- /dev/null +++ b/trunk/Documentation/devicetree/bindings/watchdog/marvel.txt @@ -0,0 +1,14 @@ +* Marvell Orion Watchdog Time + +Required Properties: + +- Compatibility : "marvell,orion-wdt" +- reg : Address of the timer registers + +Example: + + wdt@20300 { + compatible = "marvell,orion-wdt"; + reg = <0x20300 0x28>; + status = "okay"; + }; diff --git a/trunk/Documentation/feature-removal-schedule.txt b/trunk/Documentation/feature-removal-schedule.txt index 72ed15075f79..f4d8c7105fcd 100644 --- a/trunk/Documentation/feature-removal-schedule.txt +++ b/trunk/Documentation/feature-removal-schedule.txt @@ -579,7 +579,7 @@ Why: KVM tracepoints provide mostly equivalent information in a much more ---------------------------- What: at91-mci driver ("CONFIG_MMC_AT91") -When: 3.7 +When: 3.8 Why: There are two mci drivers: at91-mci and atmel-mci. The PDC support was added to atmel-mci as a first step to support more chips. Then at91-mci was kept only for old IP versions (on at91rm9200 and @@ -626,3 +626,14 @@ Why: New drivers should use new V4L2_CAP_VIDEO_M2M capability flag Who: Sylwester Nawrocki ---------------------------- + +What: OMAP private DMA implementation +When: 2013 +Why: We have a DMA engine implementation; all users should be updated + to use this rather than persisting with the old APIs. The old APIs + block merging the old DMA engine implementation into the DMA + engine driver. +Who: Russell King , + Santosh Shilimkar + +---------------------------- diff --git a/trunk/Documentation/filesystems/Locking b/trunk/Documentation/filesystems/Locking index 2db1900d7538..e540a24e5d06 100644 --- a/trunk/Documentation/filesystems/Locking +++ b/trunk/Documentation/filesystems/Locking @@ -114,7 +114,6 @@ prototypes: int (*drop_inode) (struct inode *); void (*evict_inode) (struct inode *); void (*put_super) (struct super_block *); - void (*write_super) (struct super_block *); int (*sync_fs)(struct super_block *sb, int wait); int (*freeze_fs) (struct super_block *); int (*unfreeze_fs) (struct super_block *); @@ -136,10 +135,9 @@ write_inode: drop_inode: !!!inode->i_lock!!! evict_inode: put_super: write -write_super: read sync_fs: read -freeze_fs: read -unfreeze_fs: read +freeze_fs: write +unfreeze_fs: write statfs: maybe(read) (see below) remount_fs: write umount_begin: no @@ -359,7 +357,6 @@ prototypes: int (*lm_compare_owner)(struct file_lock *, struct file_lock *); void (*lm_notify)(struct file_lock *); /* unblock callback */ int (*lm_grant)(struct file_lock *, struct file_lock *, int); - void (*lm_release_private)(struct file_lock *); void (*lm_break)(struct file_lock *); /* break_lease callback */ int (*lm_change)(struct file_lock **, int); @@ -368,7 +365,6 @@ locking rules: lm_compare_owner: yes no lm_notify: yes no lm_grant: no no -lm_release_private: maybe no lm_break: yes no lm_change yes no diff --git a/trunk/Documentation/filesystems/porting b/trunk/Documentation/filesystems/porting index 2bef2b3843d1..0742feebc6e2 100644 --- a/trunk/Documentation/filesystems/porting +++ b/trunk/Documentation/filesystems/porting @@ -94,9 +94,8 @@ protected. --- [mandatory] -BKL is also moved from around sb operations. ->write_super() Is now called -without BKL held. BKL should have been shifted into individual fs sb_op -functions. If you don't need it, remove it. +BKL is also moved from around sb operations. BKL should have been shifted into +individual fs sb_op functions. If you don't need it, remove it. --- [informational] diff --git a/trunk/Documentation/filesystems/vfat.txt b/trunk/Documentation/filesystems/vfat.txt index ead764b2728f..de1e6c4dccff 100644 --- a/trunk/Documentation/filesystems/vfat.txt +++ b/trunk/Documentation/filesystems/vfat.txt @@ -137,6 +137,17 @@ errors=panic|continue|remount-ro without doing anything or remount the partition in read-only mode (default behavior). +discard -- If set, issues discard/TRIM commands to the block + device when blocks are freed. This is useful for SSD devices + and sparse/thinly-provisoned LUNs. + +nfs -- This option maintains an index (cache) of directory + inodes by i_logstart which is used by the nfs-related code to + improve look-ups. + + Enable this only if you want to export the FAT filesystem + over NFS + : 0,1,yes,no,true,false TODO diff --git a/trunk/Documentation/filesystems/vfs.txt b/trunk/Documentation/filesystems/vfs.txt index 065aa2dc0835..2ee133e030c3 100644 --- a/trunk/Documentation/filesystems/vfs.txt +++ b/trunk/Documentation/filesystems/vfs.txt @@ -216,7 +216,6 @@ struct super_operations { void (*drop_inode) (struct inode *); void (*delete_inode) (struct inode *); void (*put_super) (struct super_block *); - void (*write_super) (struct super_block *); int (*sync_fs)(struct super_block *sb, int wait); int (*freeze_fs) (struct super_block *); int (*unfreeze_fs) (struct super_block *); @@ -273,9 +272,6 @@ or bottom half). put_super: called when the VFS wishes to free the superblock (i.e. unmount). This is called with the superblock lock held - write_super: called when the VFS superblock needs to be written to - disc. This method is optional - sync_fs: called when VFS is writing out all dirty data associated with a superblock. The second parameter indicates whether the method should wait until the write out has been completed. Optional. diff --git a/trunk/Documentation/i2c/busses/i2c-i801 b/trunk/Documentation/i2c/busses/i2c-i801 index 615142da4ef6..157416e78cc4 100644 --- a/trunk/Documentation/i2c/busses/i2c-i801 +++ b/trunk/Documentation/i2c/busses/i2c-i801 @@ -21,6 +21,7 @@ Supported adapters: * Intel DH89xxCC (PCH) * Intel Panther Point (PCH) * Intel Lynx Point (PCH) + * Intel Lynx Point-LP (PCH) Datasheets: Publicly available at the Intel website On Intel Patsburg and later chipsets, both the normal host SMBus controller diff --git a/trunk/Documentation/laptops/laptop-mode.txt b/trunk/Documentation/laptops/laptop-mode.txt index 0bf25eebce94..4ebbfc3f1c6e 100644 --- a/trunk/Documentation/laptops/laptop-mode.txt +++ b/trunk/Documentation/laptops/laptop-mode.txt @@ -262,9 +262,9 @@ MINIMUM_BATTERY_MINUTES=10 # # Allowed dirty background ratio, in percent. Once DIRTY_RATIO has been -# exceeded, the kernel will wake pdflush which will then reduce the amount -# of dirty memory to dirty_background_ratio. Set this nice and low, so once -# some writeout has commenced, we do a lot of it. +# exceeded, the kernel will wake flusher threads which will then reduce the +# amount of dirty memory to dirty_background_ratio. Set this nice and low, +# so once some writeout has commenced, we do a lot of it. # #DIRTY_BACKGROUND_RATIO=5 @@ -384,9 +384,9 @@ CPU_MAXFREQ=${CPU_MAXFREQ:-'slowest'} # # Allowed dirty background ratio, in percent. Once DIRTY_RATIO has been -# exceeded, the kernel will wake pdflush which will then reduce the amount -# of dirty memory to dirty_background_ratio. Set this nice and low, so once -# some writeout has commenced, we do a lot of it. +# exceeded, the kernel will wake flusher threads which will then reduce the +# amount of dirty memory to dirty_background_ratio. Set this nice and low, +# so once some writeout has commenced, we do a lot of it. # DIRTY_BACKGROUND_RATIO=${DIRTY_BACKGROUND_RATIO:-'5'} diff --git a/trunk/Documentation/networking/netconsole.txt b/trunk/Documentation/networking/netconsole.txt index 8d022073e3ef..2e9e0ae2cd45 100644 --- a/trunk/Documentation/networking/netconsole.txt +++ b/trunk/Documentation/networking/netconsole.txt @@ -51,8 +51,23 @@ Built-in netconsole starts immediately after the TCP stack is initialized and attempts to bring up the supplied dev at the supplied address. -The remote host can run either 'netcat -u -l -p ', -'nc -l -u ' or syslogd. +The remote host has several options to receive the kernel messages, +for example: + +1) syslogd + +2) netcat + + On distributions using a BSD-based netcat version (e.g. Fedora, + openSUSE and Ubuntu) the listening port must be specified without + the -p switch: + + 'nc -u -l -p ' / 'nc -u -l ' or + 'netcat -u -l -p ' / 'netcat -u -l ' + +3) socat + + 'socat udp-recv: -' Dynamic reconfiguration: ======================== diff --git a/trunk/Documentation/pinctrl.txt b/trunk/Documentation/pinctrl.txt index e40f4b4e1977..1479aca23744 100644 --- a/trunk/Documentation/pinctrl.txt +++ b/trunk/Documentation/pinctrl.txt @@ -840,9 +840,9 @@ static unsigned long i2c_pin_configs[] = { static struct pinctrl_map __initdata mapping[] = { PIN_MAP_MUX_GROUP("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0", "i2c0"), - PIN_MAP_MUX_CONFIGS_GROUP("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0", i2c_grp_configs), - PIN_MAP_MUX_CONFIGS_PIN("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0scl", i2c_pin_configs), - PIN_MAP_MUX_CONFIGS_PIN("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0sda", i2c_pin_configs), + PIN_MAP_CONFIGS_GROUP("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0", i2c_grp_configs), + PIN_MAP_CONFIGS_PIN("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0scl", i2c_pin_configs), + PIN_MAP_CONFIGS_PIN("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0sda", i2c_pin_configs), }; Finally, some devices expect the mapping table to contain certain specific diff --git a/trunk/Documentation/security/Yama.txt b/trunk/Documentation/security/Yama.txt index e369de2d48cd..dd908cf64ecf 100644 --- a/trunk/Documentation/security/Yama.txt +++ b/trunk/Documentation/security/Yama.txt @@ -46,14 +46,13 @@ restrictions, it can call prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY, ...) so that any otherwise allowed process (even those in external pid namespaces) may attach. -These restrictions do not change how ptrace via PTRACE_TRACEME operates. - -The sysctl settings are: +The sysctl settings (writable only with CAP_SYS_PTRACE) are: 0 - classic ptrace permissions: a process can PTRACE_ATTACH to any other process running under the same uid, as long as it is dumpable (i.e. did not transition uids, start privileged, or have called - prctl(PR_SET_DUMPABLE...) already). + prctl(PR_SET_DUMPABLE...) already). Similarly, PTRACE_TRACEME is + unchanged. 1 - restricted ptrace: a process must have a predefined relationship with the inferior it wants to call PTRACE_ATTACH on. By default, @@ -61,12 +60,13 @@ The sysctl settings are: classic criteria is also met. To change the relationship, an inferior can call prctl(PR_SET_PTRACER, debugger, ...) to declare an allowed debugger PID to call PTRACE_ATTACH on the inferior. + Using PTRACE_TRACEME is unchanged. 2 - admin-only attach: only processes with CAP_SYS_PTRACE may use ptrace - with PTRACE_ATTACH. + with PTRACE_ATTACH, or through children calling PTRACE_TRACEME. -3 - no attach: no processes may use ptrace with PTRACE_ATTACH. Once set, - this sysctl cannot be changed to a lower value. +3 - no attach: no processes may use ptrace with PTRACE_ATTACH nor via + PTRACE_TRACEME. Once set, this sysctl value cannot be changed. The original children-only logic was based on the restrictions in grsecurity. diff --git a/trunk/Documentation/sound/alsa/HD-Audio-Models.txt b/trunk/Documentation/sound/alsa/HD-Audio-Models.txt index 7456360e161c..a92bba816843 100644 --- a/trunk/Documentation/sound/alsa/HD-Audio-Models.txt +++ b/trunk/Documentation/sound/alsa/HD-Audio-Models.txt @@ -53,6 +53,7 @@ ALC882/883/885/888/889 acer-aspire-8930g Acer Aspire 8330G/6935G acer-aspire Acer Aspire others inv-dmic Inverted internal mic workaround + no-primary-hp VAIO Z workaround (for fixed speaker DAC) ALC861/660 ========== @@ -273,6 +274,10 @@ STAC92HD83* dell-s14 Dell laptop dell-vostro-3500 Dell Vostro 3500 laptop hp-dv7-4000 HP dv-7 4000 + hp_cNB11_intquad HP CNB models with 4 speakers + hp-zephyr HP Zephyr + hp-led HP with broken BIOS for mute LED + hp-inv-led HP with broken BIOS for inverted mute LED auto BIOS setup (default) STAC9872 diff --git a/trunk/Documentation/sysctl/fs.txt b/trunk/Documentation/sysctl/fs.txt index 8c235b6e4246..88152f214f48 100644 --- a/trunk/Documentation/sysctl/fs.txt +++ b/trunk/Documentation/sysctl/fs.txt @@ -32,6 +32,8 @@ Currently, these files are in /proc/sys/fs: - nr_open - overflowuid - overflowgid +- protected_hardlinks +- protected_symlinks - suid_dumpable - super-max - super-nr @@ -157,6 +159,46 @@ The default is 65534. ============================================================== +protected_hardlinks: + +A long-standing class of security issues is the hardlink-based +time-of-check-time-of-use race, most commonly seen in world-writable +directories like /tmp. The common method of exploitation of this flaw +is to cross privilege boundaries when following a given hardlink (i.e. a +root process follows a hardlink created by another user). Additionally, +on systems without separated partitions, this stops unauthorized users +from "pinning" vulnerable setuid/setgid files against being upgraded by +the administrator, or linking to special files. + +When set to "0", hardlink creation behavior is unrestricted. + +When set to "1" hardlinks cannot be created by users if they do not +already own the source file, or do not have read/write access to it. + +This protection is based on the restrictions in Openwall and grsecurity. + +============================================================== + +protected_symlinks: + +A long-standing class of security issues is the symlink-based +time-of-check-time-of-use race, most commonly seen in world-writable +directories like /tmp. The common method of exploitation of this flaw +is to cross privilege boundaries when following a given symlink (i.e. a +root process follows a symlink belonging to another user). For a likely +incomplete list of hundreds of examples across the years, please see: +http://cve.mitre.org/cgi-bin/cvekey.cgi?keyword=/tmp + +When set to "0", symlink following behavior is unrestricted. + +When set to "1" symlinks are permitted to be followed only when outside +a sticky world-writable directory, or when the uid of the symlink and +follower match, or when the directory owner matches the symlink's owner. + +This protection is based on the restrictions in Openwall and grsecurity. + +============================================================== + suid_dumpable: This value can be used to query and set the core dump mode for setuid diff --git a/trunk/Documentation/sysctl/vm.txt b/trunk/Documentation/sysctl/vm.txt index dcc2a94ae34e..078701fdbd4d 100644 --- a/trunk/Documentation/sysctl/vm.txt +++ b/trunk/Documentation/sysctl/vm.txt @@ -76,8 +76,8 @@ huge pages although processes will also directly compact memory as required. dirty_background_bytes -Contains the amount of dirty memory at which the pdflush background writeback -daemon will start writeback. +Contains the amount of dirty memory at which the background kernel +flusher threads will start writeback. Note: dirty_background_bytes is the counterpart of dirty_background_ratio. Only one of them may be specified at a time. When one sysctl is written it is @@ -89,7 +89,7 @@ other appears as 0 when read. dirty_background_ratio Contains, as a percentage of total system memory, the number of pages at which -the pdflush background writeback daemon will start writing out dirty data. +the background kernel flusher threads will start writing out dirty data. ============================================================== @@ -112,9 +112,9 @@ retained. dirty_expire_centisecs This tunable is used to define when dirty data is old enough to be eligible -for writeout by the pdflush daemons. It is expressed in 100'ths of a second. -Data which has been dirty in-memory for longer than this interval will be -written out next time a pdflush daemon wakes up. +for writeout by the kernel flusher threads. It is expressed in 100'ths +of a second. Data which has been dirty in-memory for longer than this +interval will be written out next time a flusher thread wakes up. ============================================================== @@ -128,7 +128,7 @@ data. dirty_writeback_centisecs -The pdflush writeback daemons will periodically wake up and write `old' data +The kernel flusher threads will periodically wake up and write `old' data out to disk. This tunable expresses the interval between those wakeups, in 100'ths of a second. diff --git a/trunk/Documentation/vm/hugetlbpage.txt b/trunk/Documentation/vm/hugetlbpage.txt index f8551b3879f8..4ac359b7aa17 100644 --- a/trunk/Documentation/vm/hugetlbpage.txt +++ b/trunk/Documentation/vm/hugetlbpage.txt @@ -299,11 +299,17 @@ map_hugetlb.c. ******************************************************************* /* - * hugepage-shm: see Documentation/vm/hugepage-shm.c + * map_hugetlb: see tools/testing/selftests/vm/map_hugetlb.c */ ******************************************************************* /* - * hugepage-mmap: see Documentation/vm/hugepage-mmap.c + * hugepage-shm: see tools/testing/selftests/vm/hugepage-shm.c + */ + +******************************************************************* + +/* + * hugepage-mmap: see tools/testing/selftests/vm/hugepage-mmap.c */ diff --git a/trunk/Documentation/w1/slaves/w1_therm b/trunk/Documentation/w1/slaves/w1_therm index 0403aaaba878..874a8ca93feb 100644 --- a/trunk/Documentation/w1/slaves/w1_therm +++ b/trunk/Documentation/w1/slaves/w1_therm @@ -3,6 +3,7 @@ Kernel driver w1_therm Supported chips: * Maxim ds18*20 based temperature sensors. + * Maxim ds1825 based temperature sensors. Author: Evgeniy Polyakov @@ -15,6 +16,7 @@ supported family codes: W1_THERM_DS18S20 0x10 W1_THERM_DS1822 0x22 W1_THERM_DS18B20 0x28 +W1_THERM_DS1825 0x3B Support is provided through the sysfs w1_slave file. Each open and read sequence will initiate a temperature conversion then provide two diff --git a/trunk/Documentation/watchdog/src/watchdog-test.c b/trunk/Documentation/watchdog/src/watchdog-test.c index 73ff5cc93e05..3da822967ee0 100644 --- a/trunk/Documentation/watchdog/src/watchdog-test.c +++ b/trunk/Documentation/watchdog/src/watchdog-test.c @@ -31,7 +31,7 @@ static void keep_alive(void) * or "-e" to enable the card. */ -void term(int sig) +static void term(int sig) { close(fd); fprintf(stderr, "Stopping watchdog ticks...\n"); diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS index 6720018bc674..fdc0119963e7 100644 --- a/trunk/MAINTAINERS +++ b/trunk/MAINTAINERS @@ -827,24 +827,24 @@ F: arch/arm/mach-pxa/colibri-pxa270-income.c ARM/INTEL IOP32X ARM ARCHITECTURE M: Lennert Buytenhek -M: Dan Williams +M: Dan Williams L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained ARM/INTEL IOP33X ARM ARCHITECTURE -M: Dan Williams +M: Dan Williams L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained ARM/INTEL IOP13XX ARM ARCHITECTURE M: Lennert Buytenhek -M: Dan Williams +M: Dan Williams L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained ARM/INTEL IQ81342EX MACHINE SUPPORT M: Lennert Buytenhek -M: Dan Williams +M: Dan Williams L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained @@ -869,7 +869,7 @@ F: drivers/pcmcia/pxa2xx_stargate2.c ARM/INTEL XSC3 (MANZANO) ARM CORE M: Lennert Buytenhek -M: Dan Williams +M: Dan Williams L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained @@ -925,14 +925,14 @@ S: Maintained ARM/NOMADIK ARCHITECTURE M: Alessandro Rubini -M: Linus Walleij +M: Linus Walleij M: STEricsson L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/mach-nomadik/ F: arch/arm/plat-nomadik/ F: drivers/i2c/busses/i2c-nomadik.c -T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-nomadik.git ARM/OPENMOKO NEO FREERUNNER (GTA02) MACHINE SUPPORT M: Nelson Castillo @@ -1146,7 +1146,7 @@ F: drivers/usb/host/ehci-w90x900.c F: drivers/video/nuc900fb.c ARM/U300 MACHINE SUPPORT -M: Linus Walleij +M: Linus Walleij L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Supported F: arch/arm/mach-u300/ @@ -1161,15 +1161,20 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git ARM/Ux500 ARM ARCHITECTURE M: Srinidhi Kasagar -M: Linus Walleij +M: Linus Walleij L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/mach-ux500/ +F: drivers/clocksource/clksrc-dbx500-prcmu.c F: drivers/dma/ste_dma40* +F: drivers/hwspinlock/u8500_hsem.c F: drivers/mfd/abx500* F: drivers/mfd/ab8500* -F: drivers/mfd/stmpe* +F: drivers/mfd/dbx500* +F: drivers/mfd/db8500* +F: drivers/pinctrl/pinctrl-nomadik* F: drivers/rtc/rtc-ab8500.c +F: drivers/rtc/rtc-pl031.c T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git ARM/VFP SUPPORT @@ -1227,9 +1232,9 @@ S: Maintained F: drivers/hwmon/asb100.c ASYNCHRONOUS TRANSFERS/TRANSFORMS (IOAT) API -M: Dan Williams +M: Dan Williams W: http://sourceforge.net/projects/xscaleiop -S: Supported +S: Maintained F: Documentation/crypto/async-tx-api.txt F: crypto/async_tx/ F: drivers/dma/ @@ -2212,7 +2217,7 @@ S: Maintained F: drivers/scsi/tmscsim.* DC395x SCSI driver -M: Oliver Neukum +M: Oliver Neukum M: Ali Akcaagac M: Jamie Lenehan W: http://twibble.org/dist/dc395x/ @@ -2359,7 +2364,7 @@ T: git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git DMA GENERIC OFFLOAD ENGINE SUBSYSTEM M: Vinod Koul -M: Dan Williams +M: Dan Williams S: Supported F: drivers/dma/ F: include/linux/dma* @@ -3094,7 +3099,7 @@ F: include/linux/gigaset_dev.h GPIO SUBSYSTEM M: Grant Likely -M: Linus Walleij +M: Linus Walleij S: Maintained T: git git://git.secretlab.ca/git/linux-2.6.git F: Documentation/gpio.txt @@ -3547,7 +3552,6 @@ K: \b(ABS|SYN)_MT_ INTEL C600 SERIES SAS CONTROLLER DRIVER M: Intel SCU Linux support -M: Dan Williams M: Dave Jiang M: Ed Nadolski L: linux-scsi@vger.kernel.org @@ -3590,8 +3594,8 @@ F: arch/x86/kernel/microcode_core.c F: arch/x86/kernel/microcode_intel.c INTEL I/OAT DMA DRIVER -M: Dan Williams -S: Supported +M: Dan Williams +S: Maintained F: drivers/dma/ioat* INTEL IOMMU (VT-d) @@ -3603,8 +3607,8 @@ F: drivers/iommu/intel-iommu.c F: include/linux/intel-iommu.h INTEL IOP-ADMA DMA DRIVER -M: Dan Williams -S: Maintained +M: Dan Williams +S: Odd fixes F: drivers/dma/iop-adma.c INTEL IXP4XX QMGR, NPE, ETHERNET and HSS SUPPORT @@ -4533,7 +4537,7 @@ S: Supported F: arch/microblaze/ MICROTEK X6 SCANNER -M: Oliver Neukum +M: Oliver Neukum S: Maintained F: drivers/usb/image/microtek.* @@ -5329,14 +5333,15 @@ PIN CONTROL SUBSYSTEM M: Linus Walleij S: Maintained F: drivers/pinctrl/ +F: include/linux/pinctrl/ PIN CONTROLLER - ST SPEAR -M: Viresh Kumar +M: Viresh Kumar L: spear-devel@list.st.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) W: http://www.st.com/spear S: Maintained -F: driver/pinctrl/spear/ +F: drivers/pinctrl/spear/ PKTCDVD DRIVER M: Peter Osterlund @@ -7071,7 +7076,7 @@ F: include/linux/mtd/ubi.h F: include/mtd/ubi-user.h USB ACM DRIVER -M: Oliver Neukum +M: Oliver Neukum L: linux-usb@vger.kernel.org S: Maintained F: Documentation/usb/acm.txt @@ -7092,7 +7097,7 @@ S: Supported F: drivers/block/ub.c USB CDC ETHERNET DRIVER -M: Oliver Neukum +M: Oliver Neukum L: linux-usb@vger.kernel.org S: Maintained F: drivers/net/usb/cdc_*.c @@ -7165,7 +7170,7 @@ F: drivers/usb/host/isp116x* F: include/linux/usb/isp116x.h USB KAWASAKI LSI DRIVER -M: Oliver Neukum +M: Oliver Neukum L: linux-usb@vger.kernel.org S: Maintained F: drivers/usb/serial/kl5kusb105.* @@ -7283,6 +7288,12 @@ W: http://www.connecttech.com S: Supported F: drivers/usb/serial/whiteheat* +USB SMSC75XX ETHERNET DRIVER +M: Steve Glendinning +L: netdev@vger.kernel.org +S: Maintained +F: drivers/net/usb/smsc75xx.* + USB SMSC95XX ETHERNET DRIVER M: Steve Glendinning L: netdev@vger.kernel.org @@ -7382,6 +7393,7 @@ W: http://user-mode-linux.sourceforge.net S: Maintained F: Documentation/virtual/uml/ F: arch/um/ +F: arch/x86/um/ F: fs/hostfs/ F: fs/hppfs/ @@ -7664,23 +7676,28 @@ S: Supported F: Documentation/hwmon/wm83?? F: arch/arm/mach-s3c64xx/mach-crag6410* F: drivers/clk/clk-wm83*.c +F: drivers/extcon/extcon-arizona.c F: drivers/leds/leds-wm83*.c F: drivers/gpio/gpio-*wm*.c +F: drivers/gpio/gpio-arizona.c F: drivers/hwmon/wm83??-hwmon.c F: drivers/input/misc/wm831x-on.c F: drivers/input/touchscreen/wm831x-ts.c F: drivers/input/touchscreen/wm97*.c -F: drivers/mfd/wm8*.c +F: drivers/mfd/arizona* +F: drivers/mfd/wm*.c F: drivers/power/wm83*.c F: drivers/rtc/rtc-wm83*.c F: drivers/regulator/wm8*.c F: drivers/video/backlight/wm83*_bl.c F: drivers/watchdog/wm83*_wdt.c +F: include/linux/mfd/arizona/ F: include/linux/mfd/wm831x/ F: include/linux/mfd/wm8350/ F: include/linux/mfd/wm8400* F: include/linux/wm97xx.h F: include/sound/wm????.h +F: sound/soc/codecs/arizona.? F: sound/soc/codecs/wm* WORKQUEUE diff --git a/trunk/Makefile b/trunk/Makefile index 8e4c0a7d402b..0f66f146d57e 100644 --- a/trunk/Makefile +++ b/trunk/Makefile @@ -1,7 +1,7 @@ VERSION = 3 -PATCHLEVEL = 5 +PATCHLEVEL = 6 SUBLEVEL = 0 -EXTRAVERSION = +EXTRAVERSION = -rc5 NAME = Saber-toothed Squirrel # *DOCUMENTATION* diff --git a/trunk/arch/alpha/Kconfig b/trunk/arch/alpha/Kconfig index d5b9b5e645cc..9944dedee5b1 100644 --- a/trunk/arch/alpha/Kconfig +++ b/trunk/arch/alpha/Kconfig @@ -18,6 +18,8 @@ config ALPHA select ARCH_HAVE_NMI_SAFE_CMPXCHG select GENERIC_SMP_IDLE_THREAD select GENERIC_CMOS_UPDATE + select GENERIC_STRNCPY_FROM_USER + select GENERIC_STRNLEN_USER help The Alpha is a 64-bit general-purpose processor designed and marketed by the Digital Equipment Corporation of blessed memory, diff --git a/trunk/arch/alpha/include/asm/atomic.h b/trunk/arch/alpha/include/asm/atomic.h index 3bb7ffeae3bc..c2cbe4fc391c 100644 --- a/trunk/arch/alpha/include/asm/atomic.h +++ b/trunk/arch/alpha/include/asm/atomic.h @@ -14,8 +14,8 @@ */ -#define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) -#define ATOMIC64_INIT(i) ( (atomic64_t) { (i) } ) +#define ATOMIC_INIT(i) { (i) } +#define ATOMIC64_INIT(i) { (i) } #define atomic_read(v) (*(volatile int *)&(v)->counter) #define atomic64_read(v) (*(volatile long *)&(v)->counter) diff --git a/trunk/arch/alpha/include/asm/fpu.h b/trunk/arch/alpha/include/asm/fpu.h index db00f7885faa..e477bcd5b94a 100644 --- a/trunk/arch/alpha/include/asm/fpu.h +++ b/trunk/arch/alpha/include/asm/fpu.h @@ -1,7 +1,9 @@ #ifndef __ASM_ALPHA_FPU_H #define __ASM_ALPHA_FPU_H +#ifdef __KERNEL__ #include +#endif /* * Alpha floating-point control register defines: diff --git a/trunk/arch/alpha/include/asm/ptrace.h b/trunk/arch/alpha/include/asm/ptrace.h index fd698a174f26..b87755a19554 100644 --- a/trunk/arch/alpha/include/asm/ptrace.h +++ b/trunk/arch/alpha/include/asm/ptrace.h @@ -76,7 +76,10 @@ struct switch_stack { #define task_pt_regs(task) \ ((struct pt_regs *) (task_stack_page(task) + 2*PAGE_SIZE) - 1) -#define force_successful_syscall_return() (task_pt_regs(current)->r0 = 0) +#define current_pt_regs() \ + ((struct pt_regs *) ((char *)current_thread_info() + 2*PAGE_SIZE) - 1) + +#define force_successful_syscall_return() (current_pt_regs()->r0 = 0) #endif diff --git a/trunk/arch/alpha/include/asm/socket.h b/trunk/arch/alpha/include/asm/socket.h index dcb221a4b5be..7d2f75be932e 100644 --- a/trunk/arch/alpha/include/asm/socket.h +++ b/trunk/arch/alpha/include/asm/socket.h @@ -76,9 +76,11 @@ /* Instruct lower device to use last 4-bytes of skb data as FCS */ #define SO_NOFCS 43 +#ifdef __KERNEL__ /* O_NONBLOCK clashes with the bits used for socket types. Therefore we * have to define SOCK_NONBLOCK to a different value here. */ #define SOCK_NONBLOCK 0x40000000 +#endif /* __KERNEL__ */ #endif /* _ASM_SOCKET_H */ diff --git a/trunk/arch/alpha/include/asm/uaccess.h b/trunk/arch/alpha/include/asm/uaccess.h index b49ec2f8d6e3..766fdfde2b7a 100644 --- a/trunk/arch/alpha/include/asm/uaccess.h +++ b/trunk/arch/alpha/include/asm/uaccess.h @@ -433,36 +433,12 @@ clear_user(void __user *to, long len) #undef __module_address #undef __module_call -/* Returns: -EFAULT if exception before terminator, N if the entire - buffer filled, else strlen. */ +#define user_addr_max() \ + (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL) -extern long __strncpy_from_user(char *__to, const char __user *__from, long __to_len); - -extern inline long -strncpy_from_user(char *to, const char __user *from, long n) -{ - long ret = -EFAULT; - if (__access_ok((unsigned long)from, 0, get_fs())) - ret = __strncpy_from_user(to, from, n); - return ret; -} - -/* Returns: 0 if bad, string length+1 (memory size) of string if ok */ -extern long __strlen_user(const char __user *); - -extern inline long strlen_user(const char __user *str) -{ - return access_ok(VERIFY_READ,str,0) ? __strlen_user(str) : 0; -} - -/* Returns: 0 if exception before NUL or reaching the supplied limit (N), - * a value greater than N if the limit would be exceeded, else strlen. */ -extern long __strnlen_user(const char __user *, long); - -extern inline long strnlen_user(const char __user *str, long n) -{ - return access_ok(VERIFY_READ,str,0) ? __strnlen_user(str, n) : 0; -} +extern long strncpy_from_user(char *dest, const char __user *src, long count); +extern __must_check long strlen_user(const char __user *str); +extern __must_check long strnlen_user(const char __user *str, long n); /* * About the exception table: diff --git a/trunk/arch/alpha/include/asm/unistd.h b/trunk/arch/alpha/include/asm/unistd.h index 633b23b0664a..a31a78eac9b9 100644 --- a/trunk/arch/alpha/include/asm/unistd.h +++ b/trunk/arch/alpha/include/asm/unistd.h @@ -465,10 +465,12 @@ #define __NR_setns 501 #define __NR_accept4 502 #define __NR_sendmmsg 503 +#define __NR_process_vm_readv 504 +#define __NR_process_vm_writev 505 #ifdef __KERNEL__ -#define NR_SYSCALLS 504 +#define NR_SYSCALLS 506 #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_STAT64 diff --git a/trunk/arch/alpha/include/asm/word-at-a-time.h b/trunk/arch/alpha/include/asm/word-at-a-time.h new file mode 100644 index 000000000000..6b340d0f1521 --- /dev/null +++ b/trunk/arch/alpha/include/asm/word-at-a-time.h @@ -0,0 +1,55 @@ +#ifndef _ASM_WORD_AT_A_TIME_H +#define _ASM_WORD_AT_A_TIME_H + +#include + +/* + * word-at-a-time interface for Alpha. + */ + +/* + * We do not use the word_at_a_time struct on Alpha, but it needs to be + * implemented to humour the generic code. + */ +struct word_at_a_time { + const unsigned long unused; +}; + +#define WORD_AT_A_TIME_CONSTANTS { 0 } + +/* Return nonzero if val has a zero */ +static inline unsigned long has_zero(unsigned long val, unsigned long *bits, const struct word_at_a_time *c) +{ + unsigned long zero_locations = __kernel_cmpbge(0, val); + *bits = zero_locations; + return zero_locations; +} + +static inline unsigned long prep_zero_mask(unsigned long val, unsigned long bits, const struct word_at_a_time *c) +{ + return bits; +} + +#define create_zero_mask(bits) (bits) + +static inline unsigned long find_zero(unsigned long bits) +{ +#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) + /* Simple if have CIX instructions */ + return __kernel_cttz(bits); +#else + unsigned long t1, t2, t3; + /* Retain lowest set bit only */ + bits &= -bits; + /* Binary search for lowest set bit */ + t1 = bits & 0xf0; + t2 = bits & 0xcc; + t3 = bits & 0xaa; + if (t1) t1 = 4; + if (t2) t2 = 2; + if (t3) t3 = 1; + return t1 + t2 + t3; +#endif +} + +#endif /* _ASM_WORD_AT_A_TIME_H */ diff --git a/trunk/arch/alpha/kernel/alpha_ksyms.c b/trunk/arch/alpha/kernel/alpha_ksyms.c index d96e742d4dc2..15fa821d09cd 100644 --- a/trunk/arch/alpha/kernel/alpha_ksyms.c +++ b/trunk/arch/alpha/kernel/alpha_ksyms.c @@ -52,7 +52,6 @@ EXPORT_SYMBOL(alpha_write_fp_reg_s); /* entry.S */ EXPORT_SYMBOL(kernel_thread); -EXPORT_SYMBOL(kernel_execve); /* Networking helper routines. */ EXPORT_SYMBOL(csum_tcpudp_magic); @@ -74,8 +73,6 @@ EXPORT_SYMBOL(alpha_fp_emul); */ EXPORT_SYMBOL(__copy_user); EXPORT_SYMBOL(__do_clear_user); -EXPORT_SYMBOL(__strncpy_from_user); -EXPORT_SYMBOL(__strnlen_user); /* * SMP-specific symbols. diff --git a/trunk/arch/alpha/kernel/entry.S b/trunk/arch/alpha/kernel/entry.S index 6d159cee5f2f..ec0da0567ab5 100644 --- a/trunk/arch/alpha/kernel/entry.S +++ b/trunk/arch/alpha/kernel/entry.S @@ -663,58 +663,6 @@ kernel_thread: br ret_to_kernel .end kernel_thread -/* - * kernel_execve(path, argv, envp) - */ - .align 4 - .globl kernel_execve - .ent kernel_execve -kernel_execve: - /* We can be called from a module. */ - ldgp $gp, 0($27) - lda $sp, -(32+SIZEOF_PT_REGS+8)($sp) - .frame $sp, 32+SIZEOF_PT_REGS+8, $26, 0 - stq $26, 0($sp) - stq $16, 8($sp) - stq $17, 16($sp) - stq $18, 24($sp) - .prologue 1 - - lda $16, 32($sp) - lda $17, 0 - lda $18, SIZEOF_PT_REGS - bsr $26, memset !samegp - - /* Avoid the HAE being gratuitously wrong, which would cause us - to do the whole turn off interrupts thing and restore it. */ - ldq $2, alpha_mv+HAE_CACHE - stq $2, 152+32($sp) - - ldq $16, 8($sp) - ldq $17, 16($sp) - ldq $18, 24($sp) - lda $19, 32($sp) - bsr $26, do_execve !samegp - - ldq $26, 0($sp) - bne $0, 1f /* error! */ - - /* Move the temporary pt_regs struct from its current location - to the top of the kernel stack frame. See copy_thread for - details for a normal process. */ - lda $16, 0x4000 - SIZEOF_PT_REGS($8) - lda $17, 32($sp) - lda $18, SIZEOF_PT_REGS - bsr $26, memmove !samegp - - /* Take that over as our new stack frame and visit userland! */ - lda $sp, 0x4000 - SIZEOF_PT_REGS($8) - br $31, ret_from_sys_call - -1: lda $sp, 32+SIZEOF_PT_REGS+8($sp) - ret -.end kernel_execve - /* * Special system calls. Most of these are special in that they either @@ -796,115 +744,6 @@ sys_rt_sigreturn: br ret_from_sys_call .end sys_rt_sigreturn - .align 4 - .globl sys_sethae - .ent sys_sethae -sys_sethae: - .prologue 0 - stq $16, 152($sp) - ret -.end sys_sethae - - .align 4 - .globl osf_getpriority - .ent osf_getpriority -osf_getpriority: - lda $sp, -16($sp) - stq $26, 0($sp) - .prologue 0 - - jsr $26, sys_getpriority - - ldq $26, 0($sp) - blt $0, 1f - - /* Return value is the unbiased priority, i.e. 20 - prio. - This does result in negative return values, so signal - no error by writing into the R0 slot. */ - lda $1, 20 - stq $31, 16($sp) - subl $1, $0, $0 - unop - -1: lda $sp, 16($sp) - ret -.end osf_getpriority - - .align 4 - .globl sys_getxuid - .ent sys_getxuid -sys_getxuid: - .prologue 0 - ldq $2, TI_TASK($8) - ldq $3, TASK_CRED($2) - ldl $0, CRED_UID($3) - ldl $1, CRED_EUID($3) - stq $1, 80($sp) - ret -.end sys_getxuid - - .align 4 - .globl sys_getxgid - .ent sys_getxgid -sys_getxgid: - .prologue 0 - ldq $2, TI_TASK($8) - ldq $3, TASK_CRED($2) - ldl $0, CRED_GID($3) - ldl $1, CRED_EGID($3) - stq $1, 80($sp) - ret -.end sys_getxgid - - .align 4 - .globl sys_getxpid - .ent sys_getxpid -sys_getxpid: - .prologue 0 - ldq $2, TI_TASK($8) - - /* See linux/kernel/timer.c sys_getppid for discussion - about this loop. */ - ldq $3, TASK_GROUP_LEADER($2) - ldq $4, TASK_REAL_PARENT($3) - ldl $0, TASK_TGID($2) -1: ldl $1, TASK_TGID($4) -#ifdef CONFIG_SMP - mov $4, $5 - mb - ldq $3, TASK_GROUP_LEADER($2) - ldq $4, TASK_REAL_PARENT($3) - cmpeq $4, $5, $5 - beq $5, 1b -#endif - stq $1, 80($sp) - ret -.end sys_getxpid - - .align 4 - .globl sys_alpha_pipe - .ent sys_alpha_pipe -sys_alpha_pipe: - lda $sp, -16($sp) - stq $26, 0($sp) - .prologue 0 - - mov $31, $17 - lda $16, 8($sp) - jsr $26, do_pipe_flags - - ldq $26, 0($sp) - bne $0, 1f - - /* The return values are in $0 and $20. */ - ldl $1, 12($sp) - ldl $0, 8($sp) - - stq $1, 80+16($sp) -1: lda $sp, 16($sp) - ret -.end sys_alpha_pipe - .align 4 .globl sys_execve .ent sys_execve diff --git a/trunk/arch/alpha/kernel/osf_sys.c b/trunk/arch/alpha/kernel/osf_sys.c index 98a103621af6..bc1acdda7a5e 100644 --- a/trunk/arch/alpha/kernel/osf_sys.c +++ b/trunk/arch/alpha/kernel/osf_sys.c @@ -1404,3 +1404,52 @@ SYSCALL_DEFINE3(osf_writev, unsigned long, fd, } #endif + +SYSCALL_DEFINE2(osf_getpriority, int, which, int, who) +{ + int prio = sys_getpriority(which, who); + if (prio >= 0) { + /* Return value is the unbiased priority, i.e. 20 - prio. + This does result in negative return values, so signal + no error */ + force_successful_syscall_return(); + prio = 20 - prio; + } + return prio; +} + +SYSCALL_DEFINE0(getxuid) +{ + current_pt_regs()->r20 = sys_geteuid(); + return sys_getuid(); +} + +SYSCALL_DEFINE0(getxgid) +{ + current_pt_regs()->r20 = sys_getegid(); + return sys_getgid(); +} + +SYSCALL_DEFINE0(getxpid) +{ + current_pt_regs()->r20 = sys_getppid(); + return sys_getpid(); +} + +SYSCALL_DEFINE0(alpha_pipe) +{ + int fd[2]; + int res = do_pipe_flags(fd, 0); + if (!res) { + /* The return values are in $0 and $20. */ + current_pt_regs()->r20 = fd[1]; + res = fd[0]; + } + return res; +} + +SYSCALL_DEFINE1(sethae, unsigned long, val) +{ + current_pt_regs()->hae = val; + return 0; +} diff --git a/trunk/arch/alpha/kernel/process.c b/trunk/arch/alpha/kernel/process.c index 153d3fce3e8e..d6fde98b74b3 100644 --- a/trunk/arch/alpha/kernel/process.c +++ b/trunk/arch/alpha/kernel/process.c @@ -455,3 +455,22 @@ get_wchan(struct task_struct *p) } return pc; } + +int kernel_execve(const char *path, const char *const argv[], const char *const envp[]) +{ + /* Avoid the HAE being gratuitously wrong, which would cause us + to do the whole turn off interrupts thing and restore it. */ + struct pt_regs regs = {.hae = alpha_mv.hae_cache}; + int err = do_execve(path, argv, envp, ®s); + if (!err) { + struct pt_regs *p = current_pt_regs(); + /* copy regs to normal position and off to userland we go... */ + *p = regs; + __asm__ __volatile__ ( + "mov %0, $sp;" + "br $31, ret_from_sys_call" + : : "r"(p)); + } + return err; +} +EXPORT_SYMBOL(kernel_execve); diff --git a/trunk/arch/alpha/kernel/systbls.S b/trunk/arch/alpha/kernel/systbls.S index 87835235f114..2ac6b45c3e00 100644 --- a/trunk/arch/alpha/kernel/systbls.S +++ b/trunk/arch/alpha/kernel/systbls.S @@ -111,7 +111,7 @@ sys_call_table: .quad sys_socket .quad sys_connect .quad sys_accept - .quad osf_getpriority /* 100 */ + .quad sys_osf_getpriority /* 100 */ .quad sys_send .quad sys_recv .quad sys_sigreturn @@ -522,6 +522,8 @@ sys_call_table: .quad sys_setns .quad sys_accept4 .quad sys_sendmmsg + .quad sys_process_vm_readv + .quad sys_process_vm_writev /* 505 */ .size sys_call_table, . - sys_call_table .type sys_call_table, @object diff --git a/trunk/arch/alpha/lib/Makefile b/trunk/arch/alpha/lib/Makefile index c0a83ab62b78..59660743237c 100644 --- a/trunk/arch/alpha/lib/Makefile +++ b/trunk/arch/alpha/lib/Makefile @@ -31,8 +31,6 @@ lib-y = __divqu.o __remqu.o __divlu.o __remlu.o \ $(ev6-y)memchr.o \ $(ev6-y)copy_user.o \ $(ev6-y)clear_user.o \ - $(ev6-y)strncpy_from_user.o \ - $(ev67-y)strlen_user.o \ $(ev6-y)csum_ipv6_magic.o \ $(ev6-y)clear_page.o \ $(ev6-y)copy_page.o \ diff --git a/trunk/arch/alpha/lib/ev6-strncpy_from_user.S b/trunk/arch/alpha/lib/ev6-strncpy_from_user.S deleted file mode 100644 index d2e28178cacc..000000000000 --- a/trunk/arch/alpha/lib/ev6-strncpy_from_user.S +++ /dev/null @@ -1,424 +0,0 @@ -/* - * arch/alpha/lib/ev6-strncpy_from_user.S - * 21264 version contributed by Rick Gorton - * - * Just like strncpy except in the return value: - * - * -EFAULT if an exception occurs before the terminator is copied. - * N if the buffer filled. - * - * Otherwise the length of the string is returned. - * - * Much of the information about 21264 scheduling/coding comes from: - * Compiler Writer's Guide for the Alpha 21264 - * abbreviated as 'CWG' in other comments here - * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html - * Scheduling notation: - * E - either cluster - * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1 - * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1 - * A bunch of instructions got moved and temp registers were changed - * to aid in scheduling. Control flow was also re-arranged to eliminate - * branches, and to provide longer code sequences to enable better scheduling. - * A total rewrite (using byte load/stores for start & tail sequences) - * is desirable, but very difficult to do without a from-scratch rewrite. - * Save that for the future. - */ - - -#include -#include - - -/* Allow an exception for an insn; exit if we get one. */ -#define EX(x,y...) \ - 99: x,##y; \ - .section __ex_table,"a"; \ - .long 99b - .; \ - lda $31, $exception-99b($0); \ - .previous - - - .set noat - .set noreorder - .text - - .globl __strncpy_from_user - .ent __strncpy_from_user - .frame $30, 0, $26 - .prologue 0 - - .align 4 -__strncpy_from_user: - and a0, 7, t3 # E : find dest misalignment - beq a2, $zerolength # U : - - /* Are source and destination co-aligned? */ - mov a0, v0 # E : save the string start - xor a0, a1, t4 # E : - EX( ldq_u t1, 0(a1) ) # L : Latency=3 load first quadword - ldq_u t0, 0(a0) # L : load first (partial) aligned dest quadword - - addq a2, t3, a2 # E : bias count by dest misalignment - subq a2, 1, a3 # E : - addq zero, 1, t10 # E : - and t4, 7, t4 # E : misalignment between the two - - and a3, 7, t6 # E : number of tail bytes - sll t10, t6, t10 # E : t10 = bitmask of last count byte - bne t4, $unaligned # U : - lda t2, -1 # E : build a mask against false zero - - /* - * We are co-aligned; take care of a partial first word. - * On entry to this basic block: - * t0 == the first destination word for masking back in - * t1 == the first source word. - */ - - srl a3, 3, a2 # E : a2 = loop counter = (count - 1)/8 - addq a1, 8, a1 # E : - mskqh t2, a1, t2 # U : detection in the src word - nop - - /* Create the 1st output word and detect 0's in the 1st input word. */ - mskqh t1, a1, t3 # U : - mskql t0, a1, t0 # U : assemble the first output word - ornot t1, t2, t2 # E : - nop - - cmpbge zero, t2, t8 # E : bits set iff null found - or t0, t3, t0 # E : - beq a2, $a_eoc # U : - bne t8, $a_eos # U : 2nd branch in a quad. Bad. - - /* On entry to this basic block: - * t0 == a source quad not containing a null. - * a0 - current aligned destination address - * a1 - current aligned source address - * a2 - count of quadwords to move. - * NOTE: Loop improvement - unrolling this is going to be - * a huge win, since we're going to stall otherwise. - * Fix this later. For _really_ large copies, look - * at using wh64 on a look-ahead basis. See the code - * in clear_user.S and copy_user.S. - * Presumably, since (a0) and (a1) do not overlap (by C definition) - * Lots of nops here: - * - Separate loads from stores - * - Keep it to 1 branch/quadpack so the branch predictor - * can train. - */ -$a_loop: - stq_u t0, 0(a0) # L : - addq a0, 8, a0 # E : - nop - subq a2, 1, a2 # E : - - EX( ldq_u t0, 0(a1) ) # L : - addq a1, 8, a1 # E : - cmpbge zero, t0, t8 # E : Stall 2 cycles on t0 - beq a2, $a_eoc # U : - - beq t8, $a_loop # U : - nop - nop - nop - - /* Take care of the final (partial) word store. At this point - * the end-of-count bit is set in t8 iff it applies. - * - * On entry to this basic block we have: - * t0 == the source word containing the null - * t8 == the cmpbge mask that found it. - */ -$a_eos: - negq t8, t12 # E : find low bit set - and t8, t12, t12 # E : - - /* We're doing a partial word store and so need to combine - our source and original destination words. */ - ldq_u t1, 0(a0) # L : - subq t12, 1, t6 # E : - - or t12, t6, t8 # E : - zapnot t0, t8, t0 # U : clear src bytes > null - zap t1, t8, t1 # U : clear dst bytes <= null - or t0, t1, t0 # E : - - stq_u t0, 0(a0) # L : - br $finish_up # L0 : - nop - nop - - /* Add the end-of-count bit to the eos detection bitmask. */ - .align 4 -$a_eoc: - or t10, t8, t8 - br $a_eos - nop - nop - - -/* The source and destination are not co-aligned. Align the destination - and cope. We have to be very careful about not reading too much and - causing a SEGV. */ - - .align 4 -$u_head: - /* We know just enough now to be able to assemble the first - full source word. We can still find a zero at the end of it - that prevents us from outputting the whole thing. - - On entry to this basic block: - t0 == the first dest word, unmasked - t1 == the shifted low bits of the first source word - t6 == bytemask that is -1 in dest word bytes */ - - EX( ldq_u t2, 8(a1) ) # L : load second src word - addq a1, 8, a1 # E : - mskql t0, a0, t0 # U : mask trailing garbage in dst - extqh t2, a1, t4 # U : - - or t1, t4, t1 # E : first aligned src word complete - mskqh t1, a0, t1 # U : mask leading garbage in src - or t0, t1, t0 # E : first output word complete - or t0, t6, t6 # E : mask original data for zero test - - cmpbge zero, t6, t8 # E : - beq a2, $u_eocfin # U : - bne t8, $u_final # U : bad news - 2nd branch in a quad - lda t6, -1 # E : mask out the bits we have - - mskql t6, a1, t6 # U : already seen - stq_u t0, 0(a0) # L : store first output word - or t6, t2, t2 # E : - cmpbge zero, t2, t8 # E : find nulls in second partial - - addq a0, 8, a0 # E : - subq a2, 1, a2 # E : - bne t8, $u_late_head_exit # U : - nop - - /* Finally, we've got all the stupid leading edge cases taken care - of and we can set up to enter the main loop. */ - - extql t2, a1, t1 # U : position hi-bits of lo word - EX( ldq_u t2, 8(a1) ) # L : read next high-order source word - addq a1, 8, a1 # E : - cmpbge zero, t2, t8 # E : - - beq a2, $u_eoc # U : - bne t8, $u_eos # U : - nop - nop - - /* Unaligned copy main loop. In order to avoid reading too much, - the loop is structured to detect zeros in aligned source words. - This has, unfortunately, effectively pulled half of a loop - iteration out into the head and half into the tail, but it does - prevent nastiness from accumulating in the very thing we want - to run as fast as possible. - - On entry to this basic block: - t1 == the shifted high-order bits from the previous source word - t2 == the unshifted current source word - - We further know that t2 does not contain a null terminator. */ - - /* - * Extra nops here: - * separate load quads from store quads - * only one branch/quad to permit predictor training - */ - - .align 4 -$u_loop: - extqh t2, a1, t0 # U : extract high bits for current word - addq a1, 8, a1 # E : - extql t2, a1, t3 # U : extract low bits for next time - addq a0, 8, a0 # E : - - or t0, t1, t0 # E : current dst word now complete - EX( ldq_u t2, 0(a1) ) # L : load high word for next time - subq a2, 1, a2 # E : - nop - - stq_u t0, -8(a0) # L : save the current word - mov t3, t1 # E : - cmpbge zero, t2, t8 # E : test new word for eos - beq a2, $u_eoc # U : - - beq t8, $u_loop # U : - nop - nop - nop - - /* We've found a zero somewhere in the source word we just read. - If it resides in the lower half, we have one (probably partial) - word to write out, and if it resides in the upper half, we - have one full and one partial word left to write out. - - On entry to this basic block: - t1 == the shifted high-order bits from the previous source word - t2 == the unshifted current source word. */ - .align 4 -$u_eos: - extqh t2, a1, t0 # U : - or t0, t1, t0 # E : first (partial) source word complete - cmpbge zero, t0, t8 # E : is the null in this first bit? - nop - - bne t8, $u_final # U : - stq_u t0, 0(a0) # L : the null was in the high-order bits - addq a0, 8, a0 # E : - subq a2, 1, a2 # E : - - .align 4 -$u_late_head_exit: - extql t2, a1, t0 # U : - cmpbge zero, t0, t8 # E : - or t8, t10, t6 # E : - cmoveq a2, t6, t8 # E : - - /* Take care of a final (probably partial) result word. - On entry to this basic block: - t0 == assembled source word - t8 == cmpbge mask that found the null. */ - .align 4 -$u_final: - negq t8, t6 # E : isolate low bit set - and t6, t8, t12 # E : - ldq_u t1, 0(a0) # L : - subq t12, 1, t6 # E : - - or t6, t12, t8 # E : - zapnot t0, t8, t0 # U : kill source bytes > null - zap t1, t8, t1 # U : kill dest bytes <= null - or t0, t1, t0 # E : - - stq_u t0, 0(a0) # E : - br $finish_up # U : - nop - nop - - .align 4 -$u_eoc: # end-of-count - extqh t2, a1, t0 # U : - or t0, t1, t0 # E : - cmpbge zero, t0, t8 # E : - nop - - .align 4 -$u_eocfin: # end-of-count, final word - or t10, t8, t8 # E : - br $u_final # U : - nop - nop - - /* Unaligned copy entry point. */ - .align 4 -$unaligned: - - srl a3, 3, a2 # U : a2 = loop counter = (count - 1)/8 - and a0, 7, t4 # E : find dest misalignment - and a1, 7, t5 # E : find src misalignment - mov zero, t0 # E : - - /* Conditionally load the first destination word and a bytemask - with 0xff indicating that the destination byte is sacrosanct. */ - - mov zero, t6 # E : - beq t4, 1f # U : - ldq_u t0, 0(a0) # L : - lda t6, -1 # E : - - mskql t6, a0, t6 # E : - nop - nop - nop - - .align 4 -1: - subq a1, t4, a1 # E : sub dest misalignment from src addr - /* If source misalignment is larger than dest misalignment, we need - extra startup checks to avoid SEGV. */ - cmplt t4, t5, t12 # E : - extql t1, a1, t1 # U : shift src into place - lda t2, -1 # E : for creating masks later - - beq t12, $u_head # U : - mskqh t2, t5, t2 # U : begin src byte validity mask - cmpbge zero, t1, t8 # E : is there a zero? - nop - - extql t2, a1, t2 # U : - or t8, t10, t5 # E : test for end-of-count too - cmpbge zero, t2, t3 # E : - cmoveq a2, t5, t8 # E : Latency=2, extra map slot - - nop # E : goes with cmov - andnot t8, t3, t8 # E : - beq t8, $u_head # U : - nop - - /* At this point we've found a zero in the first partial word of - the source. We need to isolate the valid source data and mask - it into the original destination data. (Incidentally, we know - that we'll need at least one byte of that original dest word.) */ - - ldq_u t0, 0(a0) # L : - negq t8, t6 # E : build bitmask of bytes <= zero - mskqh t1, t4, t1 # U : - and t6, t8, t12 # E : - - subq t12, 1, t6 # E : - or t6, t12, t8 # E : - zapnot t2, t8, t2 # U : prepare source word; mirror changes - zapnot t1, t8, t1 # U : to source validity mask - - andnot t0, t2, t0 # E : zero place for source to reside - or t0, t1, t0 # E : and put it there - stq_u t0, 0(a0) # L : - nop - - .align 4 -$finish_up: - zapnot t0, t12, t4 # U : was last byte written null? - and t12, 0xf0, t3 # E : binary search for the address of the - cmovne t4, 1, t4 # E : Latency=2, extra map slot - nop # E : with cmovne - - and t12, 0xcc, t2 # E : last byte written - and t12, 0xaa, t1 # E : - cmovne t3, 4, t3 # E : Latency=2, extra map slot - nop # E : with cmovne - - bic a0, 7, t0 - cmovne t2, 2, t2 # E : Latency=2, extra map slot - nop # E : with cmovne - nop - - cmovne t1, 1, t1 # E : Latency=2, extra map slot - nop # E : with cmovne - addq t0, t3, t0 # E : - addq t1, t2, t1 # E : - - addq t0, t1, t0 # E : - addq t0, t4, t0 # add one if we filled the buffer - subq t0, v0, v0 # find string length - ret # L0 : - - .align 4 -$zerolength: - nop - nop - nop - clr v0 - -$exception: - nop - nop - nop - ret - - .end __strncpy_from_user diff --git a/trunk/arch/alpha/lib/ev67-strlen_user.S b/trunk/arch/alpha/lib/ev67-strlen_user.S deleted file mode 100644 index 57e0d77b81a6..000000000000 --- a/trunk/arch/alpha/lib/ev67-strlen_user.S +++ /dev/null @@ -1,107 +0,0 @@ -/* - * arch/alpha/lib/ev67-strlen_user.S - * 21264 version contributed by Rick Gorton - * - * Return the length of the string including the NULL terminator - * (strlen+1) or zero if an error occurred. - * - * In places where it is critical to limit the processing time, - * and the data is not trusted, strnlen_user() should be used. - * It will return a value greater than its second argument if - * that limit would be exceeded. This implementation is allowed - * to access memory beyond the limit, but will not cross a page - * boundary when doing so. - * - * Much of the information about 21264 scheduling/coding comes from: - * Compiler Writer's Guide for the Alpha 21264 - * abbreviated as 'CWG' in other comments here - * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html - * Scheduling notation: - * E - either cluster - * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1 - * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1 - * Try not to change the actual algorithm if possible for consistency. - */ - -#include - - -/* Allow an exception for an insn; exit if we get one. */ -#define EX(x,y...) \ - 99: x,##y; \ - .section __ex_table,"a"; \ - .long 99b - .; \ - lda v0, $exception-99b(zero); \ - .previous - - - .set noreorder - .set noat - .text - - .globl __strlen_user - .ent __strlen_user - .frame sp, 0, ra - - .align 4 -__strlen_user: - ldah a1, 32767(zero) # do not use plain strlen_user() for strings - # that might be almost 2 GB long; you should - # be using strnlen_user() instead - nop - nop - nop - - .globl __strnlen_user - - .align 4 -__strnlen_user: - .prologue 0 - EX( ldq_u t0, 0(a0) ) # L : load first quadword (a0 may be misaligned) - lda t1, -1(zero) # E : - - insqh t1, a0, t1 # U : - andnot a0, 7, v0 # E : - or t1, t0, t0 # E : - subq a0, 1, a0 # E : get our +1 for the return - - cmpbge zero, t0, t1 # E : t1 <- bitmask: bit i == 1 <==> i-th byte == 0 - subq a1, 7, t2 # E : - subq a0, v0, t0 # E : - bne t1, $found # U : - - addq t2, t0, t2 # E : - addq a1, 1, a1 # E : - nop # E : - nop # E : - - .align 4 -$loop: ble t2, $limit # U : - EX( ldq t0, 8(v0) ) # L : - nop # E : - nop # E : - - cmpbge zero, t0, t1 # E : - subq t2, 8, t2 # E : - addq v0, 8, v0 # E : addr += 8 - beq t1, $loop # U : - -$found: cttz t1, t2 # U0 : - addq v0, t2, v0 # E : - subq v0, a0, v0 # E : - ret # L0 : - -$exception: - nop - nop - nop - ret - - .align 4 # currently redundant -$limit: - nop - nop - subq a1, t2, v0 - ret - - .end __strlen_user diff --git a/trunk/arch/alpha/lib/strlen_user.S b/trunk/arch/alpha/lib/strlen_user.S deleted file mode 100644 index 508a18e96479..000000000000 --- a/trunk/arch/alpha/lib/strlen_user.S +++ /dev/null @@ -1,91 +0,0 @@ -/* - * arch/alpha/lib/strlen_user.S - * - * Return the length of the string including the NUL terminator - * (strlen+1) or zero if an error occurred. - * - * In places where it is critical to limit the processing time, - * and the data is not trusted, strnlen_user() should be used. - * It will return a value greater than its second argument if - * that limit would be exceeded. This implementation is allowed - * to access memory beyond the limit, but will not cross a page - * boundary when doing so. - */ - -#include - - -/* Allow an exception for an insn; exit if we get one. */ -#define EX(x,y...) \ - 99: x,##y; \ - .section __ex_table,"a"; \ - .long 99b - .; \ - lda v0, $exception-99b(zero); \ - .previous - - - .set noreorder - .set noat - .text - - .globl __strlen_user - .ent __strlen_user - .frame sp, 0, ra - - .align 3 -__strlen_user: - ldah a1, 32767(zero) # do not use plain strlen_user() for strings - # that might be almost 2 GB long; you should - # be using strnlen_user() instead - - .globl __strnlen_user - - .align 3 -__strnlen_user: - .prologue 0 - - EX( ldq_u t0, 0(a0) ) # load first quadword (a0 may be misaligned) - lda t1, -1(zero) - insqh t1, a0, t1 - andnot a0, 7, v0 - or t1, t0, t0 - subq a0, 1, a0 # get our +1 for the return - cmpbge zero, t0, t1 # t1 <- bitmask: bit i == 1 <==> i-th byte == 0 - subq a1, 7, t2 - subq a0, v0, t0 - bne t1, $found - - addq t2, t0, t2 - addq a1, 1, a1 - - .align 3 -$loop: ble t2, $limit - EX( ldq t0, 8(v0) ) - subq t2, 8, t2 - addq v0, 8, v0 # addr += 8 - cmpbge zero, t0, t1 - beq t1, $loop - -$found: negq t1, t2 # clear all but least set bit - and t1, t2, t1 - - and t1, 0xf0, t2 # binary search for that set bit - and t1, 0xcc, t3 - and t1, 0xaa, t4 - cmovne t2, 4, t2 - cmovne t3, 2, t3 - cmovne t4, 1, t4 - addq t2, t3, t2 - addq v0, t4, v0 - addq v0, t2, v0 - nop # dual issue next two on ev4 and ev5 - subq v0, a0, v0 -$exception: - ret - - .align 3 # currently redundant -$limit: - subq a1, t2, v0 - ret - - .end __strlen_user diff --git a/trunk/arch/alpha/lib/strncpy_from_user.S b/trunk/arch/alpha/lib/strncpy_from_user.S deleted file mode 100644 index 73ee21160ff7..000000000000 --- a/trunk/arch/alpha/lib/strncpy_from_user.S +++ /dev/null @@ -1,339 +0,0 @@ -/* - * arch/alpha/lib/strncpy_from_user.S - * Contributed by Richard Henderson (rth@tamu.edu) - * - * Just like strncpy except in the return value: - * - * -EFAULT if an exception occurs before the terminator is copied. - * N if the buffer filled. - * - * Otherwise the length of the string is returned. - */ - - -#include -#include - - -/* Allow an exception for an insn; exit if we get one. */ -#define EX(x,y...) \ - 99: x,##y; \ - .section __ex_table,"a"; \ - .long 99b - .; \ - lda $31, $exception-99b($0); \ - .previous - - - .set noat - .set noreorder - .text - - .globl __strncpy_from_user - .ent __strncpy_from_user - .frame $30, 0, $26 - .prologue 0 - - .align 3 -$aligned: - /* On entry to this basic block: - t0 == the first destination word for masking back in - t1 == the first source word. */ - - /* Create the 1st output word and detect 0's in the 1st input word. */ - lda t2, -1 # e1 : build a mask against false zero - mskqh t2, a1, t2 # e0 : detection in the src word - mskqh t1, a1, t3 # e0 : - ornot t1, t2, t2 # .. e1 : - mskql t0, a1, t0 # e0 : assemble the first output word - cmpbge zero, t2, t8 # .. e1 : bits set iff null found - or t0, t3, t0 # e0 : - beq a2, $a_eoc # .. e1 : - bne t8, $a_eos # .. e1 : - - /* On entry to this basic block: - t0 == a source word not containing a null. */ - -$a_loop: - stq_u t0, 0(a0) # e0 : - addq a0, 8, a0 # .. e1 : - EX( ldq_u t0, 0(a1) ) # e0 : - addq a1, 8, a1 # .. e1 : - subq a2, 1, a2 # e0 : - cmpbge zero, t0, t8 # .. e1 (stall) - beq a2, $a_eoc # e1 : - beq t8, $a_loop # e1 : - - /* Take care of the final (partial) word store. At this point - the end-of-count bit is set in t8 iff it applies. - - On entry to this basic block we have: - t0 == the source word containing the null - t8 == the cmpbge mask that found it. */ - -$a_eos: - negq t8, t12 # e0 : find low bit set - and t8, t12, t12 # e1 (stall) - - /* For the sake of the cache, don't read a destination word - if we're not going to need it. */ - and t12, 0x80, t6 # e0 : - bne t6, 1f # .. e1 (zdb) - - /* We're doing a partial word store and so need to combine - our source and original destination words. */ - ldq_u t1, 0(a0) # e0 : - subq t12, 1, t6 # .. e1 : - or t12, t6, t8 # e0 : - unop # - zapnot t0, t8, t0 # e0 : clear src bytes > null - zap t1, t8, t1 # .. e1 : clear dst bytes <= null - or t0, t1, t0 # e1 : - -1: stq_u t0, 0(a0) - br $finish_up - - /* Add the end-of-count bit to the eos detection bitmask. */ -$a_eoc: - or t10, t8, t8 - br $a_eos - - /*** The Function Entry Point ***/ - .align 3 -__strncpy_from_user: - mov a0, v0 # save the string start - beq a2, $zerolength - - /* Are source and destination co-aligned? */ - xor a0, a1, t1 # e0 : - and a0, 7, t0 # .. e1 : find dest misalignment - and t1, 7, t1 # e0 : - addq a2, t0, a2 # .. e1 : bias count by dest misalignment - subq a2, 1, a2 # e0 : - and a2, 7, t2 # e1 : - srl a2, 3, a2 # e0 : a2 = loop counter = (count - 1)/8 - addq zero, 1, t10 # .. e1 : - sll t10, t2, t10 # e0 : t10 = bitmask of last count byte - bne t1, $unaligned # .. e1 : - - /* We are co-aligned; take care of a partial first word. */ - - EX( ldq_u t1, 0(a1) ) # e0 : load first src word - addq a1, 8, a1 # .. e1 : - - beq t0, $aligned # avoid loading dest word if not needed - ldq_u t0, 0(a0) # e0 : - br $aligned # .. e1 : - - -/* The source and destination are not co-aligned. Align the destination - and cope. We have to be very careful about not reading too much and - causing a SEGV. */ - - .align 3 -$u_head: - /* We know just enough now to be able to assemble the first - full source word. We can still find a zero at the end of it - that prevents us from outputting the whole thing. - - On entry to this basic block: - t0 == the first dest word, unmasked - t1 == the shifted low bits of the first source word - t6 == bytemask that is -1 in dest word bytes */ - - EX( ldq_u t2, 8(a1) ) # e0 : load second src word - addq a1, 8, a1 # .. e1 : - mskql t0, a0, t0 # e0 : mask trailing garbage in dst - extqh t2, a1, t4 # e0 : - or t1, t4, t1 # e1 : first aligned src word complete - mskqh t1, a0, t1 # e0 : mask leading garbage in src - or t0, t1, t0 # e0 : first output word complete - or t0, t6, t6 # e1 : mask original data for zero test - cmpbge zero, t6, t8 # e0 : - beq a2, $u_eocfin # .. e1 : - bne t8, $u_final # e1 : - - lda t6, -1 # e1 : mask out the bits we have - mskql t6, a1, t6 # e0 : already seen - stq_u t0, 0(a0) # e0 : store first output word - or t6, t2, t2 # .. e1 : - cmpbge zero, t2, t8 # e0 : find nulls in second partial - addq a0, 8, a0 # .. e1 : - subq a2, 1, a2 # e0 : - bne t8, $u_late_head_exit # .. e1 : - - /* Finally, we've got all the stupid leading edge cases taken care - of and we can set up to enter the main loop. */ - - extql t2, a1, t1 # e0 : position hi-bits of lo word - EX( ldq_u t2, 8(a1) ) # .. e1 : read next high-order source word - addq a1, 8, a1 # e0 : - cmpbge zero, t2, t8 # e1 (stall) - beq a2, $u_eoc # e1 : - bne t8, $u_eos # e1 : - - /* Unaligned copy main loop. In order to avoid reading too much, - the loop is structured to detect zeros in aligned source words. - This has, unfortunately, effectively pulled half of a loop - iteration out into the head and half into the tail, but it does - prevent nastiness from accumulating in the very thing we want - to run as fast as possible. - - On entry to this basic block: - t1 == the shifted high-order bits from the previous source word - t2 == the unshifted current source word - - We further know that t2 does not contain a null terminator. */ - - .align 3 -$u_loop: - extqh t2, a1, t0 # e0 : extract high bits for current word - addq a1, 8, a1 # .. e1 : - extql t2, a1, t3 # e0 : extract low bits for next time - addq a0, 8, a0 # .. e1 : - or t0, t1, t0 # e0 : current dst word now complete - EX( ldq_u t2, 0(a1) ) # .. e1 : load high word for next time - stq_u t0, -8(a0) # e0 : save the current word - mov t3, t1 # .. e1 : - subq a2, 1, a2 # e0 : - cmpbge zero, t2, t8 # .. e1 : test new word for eos - beq a2, $u_eoc # e1 : - beq t8, $u_loop # e1 : - - /* We've found a zero somewhere in the source word we just read. - If it resides in the lower half, we have one (probably partial) - word to write out, and if it resides in the upper half, we - have one full and one partial word left to write out. - - On entry to this basic block: - t1 == the shifted high-order bits from the previous source word - t2 == the unshifted current source word. */ -$u_eos: - extqh t2, a1, t0 # e0 : - or t0, t1, t0 # e1 : first (partial) source word complete - - cmpbge zero, t0, t8 # e0 : is the null in this first bit? - bne t8, $u_final # .. e1 (zdb) - - stq_u t0, 0(a0) # e0 : the null was in the high-order bits - addq a0, 8, a0 # .. e1 : - subq a2, 1, a2 # e1 : - -$u_late_head_exit: - extql t2, a1, t0 # .. e0 : - cmpbge zero, t0, t8 # e0 : - or t8, t10, t6 # e1 : - cmoveq a2, t6, t8 # e0 : - nop # .. e1 : - - /* Take care of a final (probably partial) result word. - On entry to this basic block: - t0 == assembled source word - t8 == cmpbge mask that found the null. */ -$u_final: - negq t8, t6 # e0 : isolate low bit set - and t6, t8, t12 # e1 : - - and t12, 0x80, t6 # e0 : avoid dest word load if we can - bne t6, 1f # .. e1 (zdb) - - ldq_u t1, 0(a0) # e0 : - subq t12, 1, t6 # .. e1 : - or t6, t12, t8 # e0 : - zapnot t0, t8, t0 # .. e1 : kill source bytes > null - zap t1, t8, t1 # e0 : kill dest bytes <= null - or t0, t1, t0 # e1 : - -1: stq_u t0, 0(a0) # e0 : - br $finish_up - -$u_eoc: # end-of-count - extqh t2, a1, t0 - or t0, t1, t0 - cmpbge zero, t0, t8 - -$u_eocfin: # end-of-count, final word - or t10, t8, t8 - br $u_final - - /* Unaligned copy entry point. */ - .align 3 -$unaligned: - - EX( ldq_u t1, 0(a1) ) # e0 : load first source word - - and a0, 7, t4 # .. e1 : find dest misalignment - and a1, 7, t5 # e0 : find src misalignment - - /* Conditionally load the first destination word and a bytemask - with 0xff indicating that the destination byte is sacrosanct. */ - - mov zero, t0 # .. e1 : - mov zero, t6 # e0 : - beq t4, 1f # .. e1 : - ldq_u t0, 0(a0) # e0 : - lda t6, -1 # .. e1 : - mskql t6, a0, t6 # e0 : -1: - subq a1, t4, a1 # .. e1 : sub dest misalignment from src addr - - /* If source misalignment is larger than dest misalignment, we need - extra startup checks to avoid SEGV. */ - - cmplt t4, t5, t12 # e1 : - extql t1, a1, t1 # .. e0 : shift src into place - lda t2, -1 # e0 : for creating masks later - beq t12, $u_head # e1 : - - mskqh t2, t5, t2 # e0 : begin src byte validity mask - cmpbge zero, t1, t8 # .. e1 : is there a zero? - extql t2, a1, t2 # e0 : - or t8, t10, t5 # .. e1 : test for end-of-count too - cmpbge zero, t2, t3 # e0 : - cmoveq a2, t5, t8 # .. e1 : - andnot t8, t3, t8 # e0 : - beq t8, $u_head # .. e1 (zdb) - - /* At this point we've found a zero in the first partial word of - the source. We need to isolate the valid source data and mask - it into the original destination data. (Incidentally, we know - that we'll need at least one byte of that original dest word.) */ - - ldq_u t0, 0(a0) # e0 : - negq t8, t6 # .. e1 : build bitmask of bytes <= zero - mskqh t1, t4, t1 # e0 : - and t6, t8, t12 # .. e1 : - subq t12, 1, t6 # e0 : - or t6, t12, t8 # e1 : - - zapnot t2, t8, t2 # e0 : prepare source word; mirror changes - zapnot t1, t8, t1 # .. e1 : to source validity mask - - andnot t0, t2, t0 # e0 : zero place for source to reside - or t0, t1, t0 # e1 : and put it there - stq_u t0, 0(a0) # e0 : - -$finish_up: - zapnot t0, t12, t4 # was last byte written null? - cmovne t4, 1, t4 - - and t12, 0xf0, t3 # binary search for the address of the - and t12, 0xcc, t2 # last byte written - and t12, 0xaa, t1 - bic a0, 7, t0 - cmovne t3, 4, t3 - cmovne t2, 2, t2 - cmovne t1, 1, t1 - addq t0, t3, t0 - addq t1, t2, t1 - addq t0, t1, t0 - addq t0, t4, t0 # add one if we filled the buffer - - subq t0, v0, v0 # find string length - ret - -$zerolength: - clr v0 -$exception: - ret - - .end __strncpy_from_user diff --git a/trunk/arch/alpha/mm/fault.c b/trunk/arch/alpha/mm/fault.c index 5eecab1a84ef..0c4132dd3507 100644 --- a/trunk/arch/alpha/mm/fault.c +++ b/trunk/arch/alpha/mm/fault.c @@ -89,6 +89,8 @@ do_page_fault(unsigned long address, unsigned long mmcsr, const struct exception_table_entry *fixup; int fault, si_code = SEGV_MAPERR; siginfo_t info; + unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | + (cause > 0 ? FAULT_FLAG_WRITE : 0)); /* As of EV6, a load into $31/$f31 is a prefetch, and never faults (or is suppressed by the PALcode). Support that for older CPUs @@ -114,6 +116,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr, goto vmalloc_fault; #endif +retry: down_read(&mm->mmap_sem); vma = find_vma(mm, address); if (!vma) @@ -144,8 +147,11 @@ do_page_fault(unsigned long address, unsigned long mmcsr, /* If for any reason at all we couldn't handle the fault, make sure we exit gracefully rather than endlessly redo the fault. */ - fault = handle_mm_fault(mm, vma, address, cause > 0 ? FAULT_FLAG_WRITE : 0); - up_read(&mm->mmap_sem); + fault = handle_mm_fault(mm, vma, address, flags); + + if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) + return; + if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; @@ -153,10 +159,26 @@ do_page_fault(unsigned long address, unsigned long mmcsr, goto do_sigbus; BUG(); } - if (fault & VM_FAULT_MAJOR) - current->maj_flt++; - else - current->min_flt++; + + if (flags & FAULT_FLAG_ALLOW_RETRY) { + if (fault & VM_FAULT_MAJOR) + current->maj_flt++; + else + current->min_flt++; + if (fault & VM_FAULT_RETRY) { + flags &= ~FAULT_FLAG_ALLOW_RETRY; + + /* No need to up_read(&mm->mmap_sem) as we would + * have already released it in __lock_page_or_retry + * in mm/filemap.c. + */ + + goto retry; + } + } + + up_read(&mm->mmap_sem); + return; /* Something tried to access memory that isn't in our memory map. @@ -186,12 +208,14 @@ do_page_fault(unsigned long address, unsigned long mmcsr, /* We ran out of memory, or some other thing happened to us that made us unable to handle the page fault gracefully. */ out_of_memory: + up_read(&mm->mmap_sem); if (!user_mode(regs)) goto no_context; pagefault_out_of_memory(); return; do_sigbus: + up_read(&mm->mmap_sem); /* Send a sigbus, regardless of whether we were in kernel or user mode. */ info.si_signo = SIGBUS; diff --git a/trunk/arch/alpha/oprofile/common.c b/trunk/arch/alpha/oprofile/common.c index a0a5d27aa215..b8ce18f485d3 100644 --- a/trunk/arch/alpha/oprofile/common.c +++ b/trunk/arch/alpha/oprofile/common.c @@ -12,6 +12,7 @@ #include #include #include +#include #include "op_impl.h" diff --git a/trunk/arch/arm/Kconfig b/trunk/arch/arm/Kconfig index 7980873525b2..2f88d8d97701 100644 --- a/trunk/arch/arm/Kconfig +++ b/trunk/arch/arm/Kconfig @@ -6,7 +6,7 @@ config ARM select HAVE_DMA_API_DEBUG select HAVE_IDE if PCI || ISA || PCMCIA select HAVE_DMA_ATTRS - select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7) + select HAVE_DMA_CONTIGUOUS if MMU select HAVE_MEMBLOCK select RTC_LIB select SYS_SUPPORTS_APM_EMULATION @@ -38,7 +38,6 @@ config ARM select HARDIRQS_SW_RESEND select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW - select GENERIC_IRQ_PROBE select ARCH_WANT_IPC_PARSE_VERSION select HARDIRQS_SW_RESEND select CPU_PM if (SUSPEND || CPU_IDLE) @@ -126,11 +125,6 @@ config TRACE_IRQFLAGS_SUPPORT bool default y -config GENERIC_LOCKBREAK - bool - default y - depends on SMP && PREEMPT - config RWSEM_GENERIC_SPINLOCK bool default y @@ -1151,6 +1145,7 @@ config PLAT_ORION bool select CLKSRC_MMIO select GENERIC_IRQ_CHIP + select IRQ_DOMAIN select COMMON_CLK config PLAT_PXA @@ -2149,6 +2144,7 @@ source "drivers/cpufreq/Kconfig" config CPU_FREQ_IMX tristate "CPUfreq driver for i.MX CPUs" depends on ARCH_MXC && CPU_FREQ + select CPU_FREQ_TABLE help This enables the CPUfreq driver for i.MX CPUs. diff --git a/trunk/arch/arm/boot/dts/am33xx.dtsi b/trunk/arch/arm/boot/dts/am33xx.dtsi index 59509c48d7e5..bd0cff3f808c 100644 --- a/trunk/arch/arm/boot/dts/am33xx.dtsi +++ b/trunk/arch/arm/boot/dts/am33xx.dtsi @@ -154,5 +154,10 @@ #size-cells = <0>; ti,hwmods = "i2c3"; }; + + wdt2: wdt@44e35000 { + compatible = "ti,omap3-wdt"; + ti,hwmods = "wd_timer2"; + }; }; }; diff --git a/trunk/arch/arm/boot/dts/armada-xp.dtsi b/trunk/arch/arm/boot/dts/armada-xp.dtsi index e1fa7e6edfe8..71d6b5d0daf1 100644 --- a/trunk/arch/arm/boot/dts/armada-xp.dtsi +++ b/trunk/arch/arm/boot/dts/armada-xp.dtsi @@ -12,7 +12,7 @@ * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. * - * Contains definitions specific to the Armada 370 SoC that are not + * Contains definitions specific to the Armada XP SoC that are not * common to all Armada SoCs. */ diff --git a/trunk/arch/arm/boot/dts/at91sam9g25ek.dts b/trunk/arch/arm/boot/dts/at91sam9g25ek.dts index 7829a4d0cb22..96514c134e54 100644 --- a/trunk/arch/arm/boot/dts/at91sam9g25ek.dts +++ b/trunk/arch/arm/boot/dts/at91sam9g25ek.dts @@ -15,7 +15,7 @@ compatible = "atmel,at91sam9g25ek", "atmel,at91sam9x5ek", "atmel,at91sam9x5", "atmel,at91sam9"; chosen { - bootargs = "128M console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=ubifs ubi.mtd=1 root=ubi0:rootfs"; + bootargs = "console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=ubifs ubi.mtd=1 root=ubi0:rootfs"; }; ahb { diff --git a/trunk/arch/arm/boot/dts/imx23.dtsi b/trunk/arch/arm/boot/dts/imx23.dtsi index a874dbfb5ae6..e6138310e5ce 100644 --- a/trunk/arch/arm/boot/dts/imx23.dtsi +++ b/trunk/arch/arm/boot/dts/imx23.dtsi @@ -51,11 +51,11 @@ dma-apbh@80004000 { compatible = "fsl,imx23-dma-apbh"; - reg = <0x80004000 2000>; + reg = <0x80004000 0x2000>; }; ecc@80008000 { - reg = <0x80008000 2000>; + reg = <0x80008000 0x2000>; status = "disabled"; }; @@ -63,7 +63,7 @@ compatible = "fsl,imx23-gpmi-nand"; #address-cells = <1>; #size-cells = <1>; - reg = <0x8000c000 2000>, <0x8000a000 2000>; + reg = <0x8000c000 0x2000>, <0x8000a000 0x2000>; reg-names = "gpmi-nand", "bch"; interrupts = <13>, <56>; interrupt-names = "gpmi-dma", "bch"; @@ -72,14 +72,14 @@ }; ssp0: ssp@80010000 { - reg = <0x80010000 2000>; + reg = <0x80010000 0x2000>; interrupts = <15 14>; fsl,ssp-dma-channel = <1>; status = "disabled"; }; etm@80014000 { - reg = <0x80014000 2000>; + reg = <0x80014000 0x2000>; status = "disabled"; }; @@ -87,7 +87,7 @@ #address-cells = <1>; #size-cells = <0>; compatible = "fsl,imx23-pinctrl", "simple-bus"; - reg = <0x80018000 2000>; + reg = <0x80018000 0x2000>; gpio0: gpio@0 { compatible = "fsl,imx23-gpio", "fsl,mxs-gpio"; @@ -273,32 +273,32 @@ }; emi@80020000 { - reg = <0x80020000 2000>; + reg = <0x80020000 0x2000>; status = "disabled"; }; dma-apbx@80024000 { compatible = "fsl,imx23-dma-apbx"; - reg = <0x80024000 2000>; + reg = <0x80024000 0x2000>; }; dcp@80028000 { - reg = <0x80028000 2000>; + reg = <0x80028000 0x2000>; status = "disabled"; }; pxp@8002a000 { - reg = <0x8002a000 2000>; + reg = <0x8002a000 0x2000>; status = "disabled"; }; ocotp@8002c000 { - reg = <0x8002c000 2000>; + reg = <0x8002c000 0x2000>; status = "disabled"; }; axi-ahb@8002e000 { - reg = <0x8002e000 2000>; + reg = <0x8002e000 0x2000>; status = "disabled"; }; @@ -310,14 +310,14 @@ }; ssp1: ssp@80034000 { - reg = <0x80034000 2000>; + reg = <0x80034000 0x2000>; interrupts = <2 20>; fsl,ssp-dma-channel = <2>; status = "disabled"; }; tvenc@80038000 { - reg = <0x80038000 2000>; + reg = <0x80038000 0x2000>; status = "disabled"; }; }; @@ -330,37 +330,37 @@ ranges; clkctl@80040000 { - reg = <0x80040000 2000>; + reg = <0x80040000 0x2000>; status = "disabled"; }; saif0: saif@80042000 { - reg = <0x80042000 2000>; + reg = <0x80042000 0x2000>; status = "disabled"; }; power@80044000 { - reg = <0x80044000 2000>; + reg = <0x80044000 0x2000>; status = "disabled"; }; saif1: saif@80046000 { - reg = <0x80046000 2000>; + reg = <0x80046000 0x2000>; status = "disabled"; }; audio-out@80048000 { - reg = <0x80048000 2000>; + reg = <0x80048000 0x2000>; status = "disabled"; }; audio-in@8004c000 { - reg = <0x8004c000 2000>; + reg = <0x8004c000 0x2000>; status = "disabled"; }; lradc@80050000 { - reg = <0x80050000 2000>; + reg = <0x80050000 0x2000>; status = "disabled"; }; @@ -370,26 +370,26 @@ }; i2c@80058000 { - reg = <0x80058000 2000>; + reg = <0x80058000 0x2000>; status = "disabled"; }; rtc@8005c000 { compatible = "fsl,imx23-rtc", "fsl,stmp3xxx-rtc"; - reg = <0x8005c000 2000>; + reg = <0x8005c000 0x2000>; interrupts = <22>; }; pwm: pwm@80064000 { compatible = "fsl,imx23-pwm"; - reg = <0x80064000 2000>; + reg = <0x80064000 0x2000>; #pwm-cells = <2>; fsl,pwm-number = <5>; status = "disabled"; }; timrot@80068000 { - reg = <0x80068000 2000>; + reg = <0x80068000 0x2000>; status = "disabled"; }; @@ -429,7 +429,7 @@ ranges; usbctrl@80080000 { - reg = <0x80080000 0x10000>; + reg = <0x80080000 0x40000>; status = "disabled"; }; }; diff --git a/trunk/arch/arm/boot/dts/imx27-3ds.dts b/trunk/arch/arm/boot/dts/imx27-3ds.dts index d3f8296e19e0..0a8978a40ece 100644 --- a/trunk/arch/arm/boot/dts/imx27-3ds.dts +++ b/trunk/arch/arm/boot/dts/imx27-3ds.dts @@ -27,7 +27,7 @@ status = "okay"; }; - uart@1000a000 { + uart1: serial@1000a000 { fsl,uart-has-rtscts; status = "okay"; }; diff --git a/trunk/arch/arm/boot/dts/imx27.dtsi b/trunk/arch/arm/boot/dts/imx27.dtsi index 00bae3aad5ab..5303ab680a34 100644 --- a/trunk/arch/arm/boot/dts/imx27.dtsi +++ b/trunk/arch/arm/boot/dts/imx27.dtsi @@ -19,6 +19,12 @@ serial3 = &uart4; serial4 = &uart5; serial5 = &uart6; + gpio0 = &gpio1; + gpio1 = &gpio2; + gpio2 = &gpio3; + gpio3 = &gpio4; + gpio4 = &gpio5; + gpio5 = &gpio6; }; avic: avic-interrupt-controller@e0000000 { diff --git a/trunk/arch/arm/boot/dts/imx28.dtsi b/trunk/arch/arm/boot/dts/imx28.dtsi index 787efac68da8..3fa6d190fab4 100644 --- a/trunk/arch/arm/boot/dts/imx28.dtsi +++ b/trunk/arch/arm/boot/dts/imx28.dtsi @@ -57,18 +57,18 @@ }; hsadc@80002000 { - reg = <0x80002000 2000>; + reg = <0x80002000 0x2000>; interrupts = <13 87>; status = "disabled"; }; dma-apbh@80004000 { compatible = "fsl,imx28-dma-apbh"; - reg = <0x80004000 2000>; + reg = <0x80004000 0x2000>; }; perfmon@80006000 { - reg = <0x80006000 800>; + reg = <0x80006000 0x800>; interrupts = <27>; status = "disabled"; }; @@ -77,7 +77,7 @@ compatible = "fsl,imx28-gpmi-nand"; #address-cells = <1>; #size-cells = <1>; - reg = <0x8000c000 2000>, <0x8000a000 2000>; + reg = <0x8000c000 0x2000>, <0x8000a000 0x2000>; reg-names = "gpmi-nand", "bch"; interrupts = <88>, <41>; interrupt-names = "gpmi-dma", "bch"; @@ -86,28 +86,28 @@ }; ssp0: ssp@80010000 { - reg = <0x80010000 2000>; + reg = <0x80010000 0x2000>; interrupts = <96 82>; fsl,ssp-dma-channel = <0>; status = "disabled"; }; ssp1: ssp@80012000 { - reg = <0x80012000 2000>; + reg = <0x80012000 0x2000>; interrupts = <97 83>; fsl,ssp-dma-channel = <1>; status = "disabled"; }; ssp2: ssp@80014000 { - reg = <0x80014000 2000>; + reg = <0x80014000 0x2000>; interrupts = <98 84>; fsl,ssp-dma-channel = <2>; status = "disabled"; }; ssp3: ssp@80016000 { - reg = <0x80016000 2000>; + reg = <0x80016000 0x2000>; interrupts = <99 85>; fsl,ssp-dma-channel = <3>; status = "disabled"; @@ -117,7 +117,7 @@ #address-cells = <1>; #size-cells = <0>; compatible = "fsl,imx28-pinctrl", "simple-bus"; - reg = <0x80018000 2000>; + reg = <0x80018000 0x2000>; gpio0: gpio@0 { compatible = "fsl,imx28-gpio", "fsl,mxs-gpio"; @@ -510,96 +510,96 @@ }; digctl@8001c000 { - reg = <0x8001c000 2000>; + reg = <0x8001c000 0x2000>; interrupts = <89>; status = "disabled"; }; etm@80022000 { - reg = <0x80022000 2000>; + reg = <0x80022000 0x2000>; status = "disabled"; }; dma-apbx@80024000 { compatible = "fsl,imx28-dma-apbx"; - reg = <0x80024000 2000>; + reg = <0x80024000 0x2000>; }; dcp@80028000 { - reg = <0x80028000 2000>; + reg = <0x80028000 0x2000>; interrupts = <52 53 54>; status = "disabled"; }; pxp@8002a000 { - reg = <0x8002a000 2000>; + reg = <0x8002a000 0x2000>; interrupts = <39>; status = "disabled"; }; ocotp@8002c000 { - reg = <0x8002c000 2000>; + reg = <0x8002c000 0x2000>; status = "disabled"; }; axi-ahb@8002e000 { - reg = <0x8002e000 2000>; + reg = <0x8002e000 0x2000>; status = "disabled"; }; lcdif@80030000 { compatible = "fsl,imx28-lcdif"; - reg = <0x80030000 2000>; + reg = <0x80030000 0x2000>; interrupts = <38 86>; status = "disabled"; }; can0: can@80032000 { compatible = "fsl,imx28-flexcan", "fsl,p1010-flexcan"; - reg = <0x80032000 2000>; + reg = <0x80032000 0x2000>; interrupts = <8>; status = "disabled"; }; can1: can@80034000 { compatible = "fsl,imx28-flexcan", "fsl,p1010-flexcan"; - reg = <0x80034000 2000>; + reg = <0x80034000 0x2000>; interrupts = <9>; status = "disabled"; }; simdbg@8003c000 { - reg = <0x8003c000 200>; + reg = <0x8003c000 0x200>; status = "disabled"; }; simgpmisel@8003c200 { - reg = <0x8003c200 100>; + reg = <0x8003c200 0x100>; status = "disabled"; }; simsspsel@8003c300 { - reg = <0x8003c300 100>; + reg = <0x8003c300 0x100>; status = "disabled"; }; simmemsel@8003c400 { - reg = <0x8003c400 100>; + reg = <0x8003c400 0x100>; status = "disabled"; }; gpiomon@8003c500 { - reg = <0x8003c500 100>; + reg = <0x8003c500 0x100>; status = "disabled"; }; simenet@8003c700 { - reg = <0x8003c700 100>; + reg = <0x8003c700 0x100>; status = "disabled"; }; armjtag@8003c800 { - reg = <0x8003c800 100>; + reg = <0x8003c800 0x100>; status = "disabled"; }; }; @@ -612,45 +612,45 @@ ranges; clkctl@80040000 { - reg = <0x80040000 2000>; + reg = <0x80040000 0x2000>; status = "disabled"; }; saif0: saif@80042000 { compatible = "fsl,imx28-saif"; - reg = <0x80042000 2000>; + reg = <0x80042000 0x2000>; interrupts = <59 80>; fsl,saif-dma-channel = <4>; status = "disabled"; }; power@80044000 { - reg = <0x80044000 2000>; + reg = <0x80044000 0x2000>; status = "disabled"; }; saif1: saif@80046000 { compatible = "fsl,imx28-saif"; - reg = <0x80046000 2000>; + reg = <0x80046000 0x2000>; interrupts = <58 81>; fsl,saif-dma-channel = <5>; status = "disabled"; }; lradc@80050000 { - reg = <0x80050000 2000>; + reg = <0x80050000 0x2000>; status = "disabled"; }; spdif@80054000 { - reg = <0x80054000 2000>; + reg = <0x80054000 0x2000>; interrupts = <45 66>; status = "disabled"; }; rtc@80056000 { compatible = "fsl,imx28-rtc", "fsl,stmp3xxx-rtc"; - reg = <0x80056000 2000>; + reg = <0x80056000 0x2000>; interrupts = <29>; }; @@ -658,7 +658,7 @@ #address-cells = <1>; #size-cells = <0>; compatible = "fsl,imx28-i2c"; - reg = <0x80058000 2000>; + reg = <0x80058000 0x2000>; interrupts = <111 68>; clock-frequency = <100000>; status = "disabled"; @@ -668,7 +668,7 @@ #address-cells = <1>; #size-cells = <0>; compatible = "fsl,imx28-i2c"; - reg = <0x8005a000 2000>; + reg = <0x8005a000 0x2000>; interrupts = <110 69>; clock-frequency = <100000>; status = "disabled"; @@ -676,14 +676,14 @@ pwm: pwm@80064000 { compatible = "fsl,imx28-pwm", "fsl,imx23-pwm"; - reg = <0x80064000 2000>; + reg = <0x80064000 0x2000>; #pwm-cells = <2>; fsl,pwm-number = <8>; status = "disabled"; }; timrot@80068000 { - reg = <0x80068000 2000>; + reg = <0x80068000 0x2000>; status = "disabled"; }; diff --git a/trunk/arch/arm/boot/dts/imx51-babbage.dts b/trunk/arch/arm/boot/dts/imx51-babbage.dts index de065b5976e6..59d9789e5508 100644 --- a/trunk/arch/arm/boot/dts/imx51-babbage.dts +++ b/trunk/arch/arm/boot/dts/imx51-babbage.dts @@ -25,8 +25,8 @@ aips@70000000 { /* aips-1 */ spba@70000000 { esdhc@70004000 { /* ESDHC1 */ - fsl,cd-internal; - fsl,wp-internal; + fsl,cd-controller; + fsl,wp-controller; status = "okay"; }; @@ -53,7 +53,7 @@ spi-max-frequency = <6000000>; reg = <0>; interrupt-parent = <&gpio1>; - interrupts = <8>; + interrupts = <8 0x4>; regulators { sw1_reg: sw1 { diff --git a/trunk/arch/arm/boot/dts/imx51.dtsi b/trunk/arch/arm/boot/dts/imx51.dtsi index 53cbaa3d4f90..aba28dc87fc8 100644 --- a/trunk/arch/arm/boot/dts/imx51.dtsi +++ b/trunk/arch/arm/boot/dts/imx51.dtsi @@ -17,6 +17,10 @@ serial0 = &uart1; serial1 = &uart2; serial2 = &uart3; + gpio0 = &gpio1; + gpio1 = &gpio2; + gpio2 = &gpio3; + gpio3 = &gpio4; }; tzic: tz-interrupt-controller@e0000000 { diff --git a/trunk/arch/arm/boot/dts/imx53-ard.dts b/trunk/arch/arm/boot/dts/imx53-ard.dts index 5b8eafcdbeec..da895e93a999 100644 --- a/trunk/arch/arm/boot/dts/imx53-ard.dts +++ b/trunk/arch/arm/boot/dts/imx53-ard.dts @@ -64,12 +64,32 @@ reg = <0xf4000000 0x2000000>; phy-mode = "mii"; interrupt-parent = <&gpio2>; - interrupts = <31>; + interrupts = <31 0x8>; reg-io-width = <4>; + /* + * VDD33A and VDDVARIO of LAN9220 are supplied by + * SW4_3V3 of LTC3589. Before the regulator driver + * for this PMIC is available, we use a fixed dummy + * 3V3 regulator to get LAN9220 driver probing work. + */ + vdd33a-supply = <®_3p3v>; + vddvario-supply = <®_3p3v>; smsc,irq-push-pull; }; }; + regulators { + compatible = "simple-bus"; + + reg_3p3v: 3p3v { + compatible = "regulator-fixed"; + regulator-name = "3P3V"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + }; + }; + gpio-keys { compatible = "gpio-keys"; diff --git a/trunk/arch/arm/boot/dts/imx53.dtsi b/trunk/arch/arm/boot/dts/imx53.dtsi index fc79cdc4b4e6..cd37165edce5 100644 --- a/trunk/arch/arm/boot/dts/imx53.dtsi +++ b/trunk/arch/arm/boot/dts/imx53.dtsi @@ -19,6 +19,13 @@ serial2 = &uart3; serial3 = &uart4; serial4 = &uart5; + gpio0 = &gpio1; + gpio1 = &gpio2; + gpio2 = &gpio3; + gpio3 = &gpio4; + gpio4 = &gpio5; + gpio5 = &gpio6; + gpio6 = &gpio7; }; tzic: tz-interrupt-controller@0fffc000 { diff --git a/trunk/arch/arm/boot/dts/imx6q-sabrelite.dts b/trunk/arch/arm/boot/dts/imx6q-sabrelite.dts index d42e851ceb97..72f30f3e6171 100644 --- a/trunk/arch/arm/boot/dts/imx6q-sabrelite.dts +++ b/trunk/arch/arm/boot/dts/imx6q-sabrelite.dts @@ -53,6 +53,7 @@ fsl,pins = < 144 0x80000000 /* MX6Q_PAD_EIM_D22__GPIO_3_22 */ 121 0x80000000 /* MX6Q_PAD_EIM_D19__GPIO_3_19 */ + 953 0x80000000 /* MX6Q_PAD_GPIO_0__CCM_CLKO */ >; }; }; diff --git a/trunk/arch/arm/boot/dts/imx6q.dtsi b/trunk/arch/arm/boot/dts/imx6q.dtsi index 3d3c64b014e6..fd57079f71a9 100644 --- a/trunk/arch/arm/boot/dts/imx6q.dtsi +++ b/trunk/arch/arm/boot/dts/imx6q.dtsi @@ -19,6 +19,13 @@ serial2 = &uart3; serial3 = &uart4; serial4 = &uart5; + gpio0 = &gpio1; + gpio1 = &gpio2; + gpio2 = &gpio3; + gpio3 = &gpio4; + gpio4 = &gpio5; + gpio5 = &gpio6; + gpio6 = &gpio7; }; cpus { diff --git a/trunk/arch/arm/boot/dts/kirkwood-dns320.dts b/trunk/arch/arm/boot/dts/kirkwood-dns320.dts index 9a33077130e8..5bb0bf39d3b8 100644 --- a/trunk/arch/arm/boot/dts/kirkwood-dns320.dts +++ b/trunk/arch/arm/boot/dts/kirkwood-dns320.dts @@ -1,6 +1,6 @@ /dts-v1/; -/include/ "kirkwood.dtsi" +/include/ "kirkwood-dnskw.dtsi" / { model = "D-Link DNS-320 NAS (Rev A1)"; @@ -15,6 +15,31 @@ bootargs = "console=ttyS0,115200n8 earlyprintk"; }; + gpio-leds { + compatible = "gpio-leds"; + blue-power { + label = "dns320:blue:power"; + gpios = <&gpio0 26 1>; /* GPIO 26 Active Low */ + linux,default-trigger = "default-on"; + }; + blue-usb { + label = "dns320:blue:usb"; + gpios = <&gpio1 11 1>; /* GPIO 43 Active Low */ + }; + orange-l_hdd { + label = "dns320:orange:l_hdd"; + gpios = <&gpio0 28 1>; /* GPIO 28 Active Low */ + }; + orange-r_hdd { + label = "dns320:orange:r_hdd"; + gpios = <&gpio0 27 1>; /* GPIO 27 Active Low */ + }; + orange-usb { + label = "dns320:orange:usb"; + gpios = <&gpio1 3 1>; /* GPIO 35 Active Low */ + }; + }; + ocp@f1000000 { serial@12000 { clock-frequency = <166666667>; @@ -25,40 +50,5 @@ clock-frequency = <166666667>; status = "okay"; }; - - nand@3000000 { - status = "okay"; - - partition@0 { - label = "u-boot"; - reg = <0x0000000 0x100000>; - read-only; - }; - - partition@100000 { - label = "uImage"; - reg = <0x0100000 0x500000>; - }; - - partition@600000 { - label = "ramdisk"; - reg = <0x0600000 0x500000>; - }; - - partition@b00000 { - label = "image"; - reg = <0x0b00000 0x6600000>; - }; - - partition@7100000 { - label = "mini firmware"; - reg = <0x7100000 0xa00000>; - }; - - partition@7b00000 { - label = "config"; - reg = <0x7b00000 0x500000>; - }; - }; }; }; diff --git a/trunk/arch/arm/boot/dts/kirkwood-dns325.dts b/trunk/arch/arm/boot/dts/kirkwood-dns325.dts index 16734c1b5dfe..d430713ea9b9 100644 --- a/trunk/arch/arm/boot/dts/kirkwood-dns325.dts +++ b/trunk/arch/arm/boot/dts/kirkwood-dns325.dts @@ -1,6 +1,6 @@ /dts-v1/; -/include/ "kirkwood.dtsi" +/include/ "kirkwood-dnskw.dtsi" / { model = "D-Link DNS-325 NAS (Rev A1)"; @@ -15,45 +15,43 @@ bootargs = "console=ttyS0,115200n8 earlyprintk"; }; - ocp@f1000000 { - serial@12000 { - clock-frequency = <200000000>; - status = "okay"; + gpio-leds { + compatible = "gpio-leds"; + white-power { + label = "dns325:white:power"; + gpios = <&gpio0 26 1>; /* GPIO 26 Active Low */ + linux,default-trigger = "default-on"; + }; + white-usb { + label = "dns325:white:usb"; + gpios = <&gpio1 11 1>; /* GPIO 43 Active Low */ + }; + red-l_hdd { + label = "dns325:red:l_hdd"; + gpios = <&gpio0 28 1>; /* GPIO 28 Active Low */ }; + red-r_hdd { + label = "dns325:red:r_hdd"; + gpios = <&gpio0 27 1>; /* GPIO 27 Active Low */ + }; + red-usb { + label = "dns325:red:usb"; + gpios = <&gpio0 29 1>; /* GPIO 29 Active Low */ + }; + }; - nand@3000000 { + ocp@f1000000 { + i2c@11000 { status = "okay"; - partition@0 { - label = "u-boot"; - reg = <0x0000000 0x100000>; - read-only; - }; - - partition@100000 { - label = "uImage"; - reg = <0x0100000 0x500000>; - }; - - partition@600000 { - label = "ramdisk"; - reg = <0x0600000 0x500000>; - }; - - partition@b00000 { - label = "image"; - reg = <0x0b00000 0x6600000>; - }; - - partition@7100000 { - label = "mini firmware"; - reg = <0x7100000 0xa00000>; - }; - - partition@7b00000 { - label = "config"; - reg = <0x7b00000 0x500000>; + lm75: lm75@48 { + compatible = "national,lm75"; + reg = <0x48>; }; }; + serial@12000 { + clock-frequency = <200000000>; + status = "okay"; + }; }; }; diff --git a/trunk/arch/arm/boot/dts/kirkwood-dnskw.dtsi b/trunk/arch/arm/boot/dts/kirkwood-dnskw.dtsi new file mode 100644 index 000000000000..7408655f91b5 --- /dev/null +++ b/trunk/arch/arm/boot/dts/kirkwood-dnskw.dtsi @@ -0,0 +1,69 @@ +/include/ "kirkwood.dtsi" + +/ { + model = "D-Link DNS NASes (kirkwood-based)"; + compatible = "dlink,dns-kirkwood", "marvell,kirkwood-88f6281", "marvell,kirkwood"; + + gpio_keys { + compatible = "gpio-keys"; + #address-cells = <1>; + #size-cells = <0>; + button@1 { + label = "Power button"; + linux,code = <116>; + gpios = <&gpio1 2 1>; + }; + button@2 { + label = "USB unmount button"; + linux,code = <161>; + gpios = <&gpio1 15 1>; + }; + button@3 { + label = "Reset button"; + linux,code = <0x198>; + gpios = <&gpio1 16 1>; + }; + }; + + ocp@f1000000 { + sata@80000 { + status = "okay"; + nr-ports = <2>; + }; + + nand@3000000 { + status = "okay"; + + partition@0 { + label = "u-boot"; + reg = <0x0000000 0x100000>; + read-only; + }; + + partition@100000 { + label = "uImage"; + reg = <0x0100000 0x500000>; + }; + + partition@600000 { + label = "ramdisk"; + reg = <0x0600000 0x500000>; + }; + + partition@b00000 { + label = "image"; + reg = <0x0b00000 0x6600000>; + }; + + partition@7100000 { + label = "mini firmware"; + reg = <0x7100000 0xa00000>; + }; + + partition@7b00000 { + label = "config"; + reg = <0x7b00000 0x500000>; + }; + }; + }; +}; diff --git a/trunk/arch/arm/boot/dts/kirkwood-dreamplug.dts b/trunk/arch/arm/boot/dts/kirkwood-dreamplug.dts index 78b0f06a09a2..26e281fbf6bc 100644 --- a/trunk/arch/arm/boot/dts/kirkwood-dreamplug.dts +++ b/trunk/arch/arm/boot/dts/kirkwood-dreamplug.dts @@ -20,5 +20,55 @@ clock-frequency = <200000000>; status = "ok"; }; + + spi@10600 { + status = "okay"; + + m25p40@0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "mx25l1606e"; + reg = <0>; + spi-max-frequency = <50000000>; + mode = <0>; + + partition@0 { + reg = <0x0 0x80000>; + label = "u-boot"; + }; + + partition@100000 { + reg = <0x100000 0x10000>; + label = "u-boot env"; + }; + + partition@180000 { + reg = <0x180000 0x10000>; + label = "dtb"; + }; + }; + }; + + sata@80000 { + status = "okay"; + nr-ports = <1>; + }; + }; + + gpio-leds { + compatible = "gpio-leds"; + + bluetooth { + label = "dreamplug:blue:bluetooth"; + gpios = <&gpio1 15 1>; + }; + wifi { + label = "dreamplug:green:wifi"; + gpios = <&gpio1 16 1>; + }; + wifi-ap { + label = "dreamplug:green:wifi_ap"; + gpios = <&gpio1 17 1>; + }; }; }; diff --git a/trunk/arch/arm/boot/dts/kirkwood-goflexnet.dts b/trunk/arch/arm/boot/dts/kirkwood-goflexnet.dts new file mode 100644 index 000000000000..7c8238fbb6f9 --- /dev/null +++ b/trunk/arch/arm/boot/dts/kirkwood-goflexnet.dts @@ -0,0 +1,99 @@ +/dts-v1/; + +/include/ "kirkwood.dtsi" + +/ { + model = "Seagate GoFlex Net"; + compatible = "seagate,goflexnet", "marvell,kirkwood-88f6281", "marvell,kirkwood"; + + memory { + device_type = "memory"; + reg = <0x00000000 0x8000000>; + }; + + chosen { + bootargs = "console=ttyS0,115200n8 earlyprintk root=/dev/sda1 rootdelay=10"; + }; + + ocp@f1000000 { + serial@12000 { + clock-frequency = <200000000>; + status = "ok"; + }; + + nand@3000000 { + status = "okay"; + + partition@0 { + label = "u-boot"; + reg = <0x0000000 0x100000>; + read-only; + }; + + partition@100000 { + label = "uImage"; + reg = <0x0100000 0x400000>; + }; + + partition@500000 { + label = "pogoplug"; + reg = <0x0500000 0x2000000>; + }; + + partition@2500000 { + label = "root"; + reg = <0x02500000 0xd800000>; + }; + }; + sata@80000 { + status = "okay"; + nr-ports = <2>; + }; + + }; + gpio-leds { + compatible = "gpio-leds"; + + health { + label = "status:green:health"; + gpios = <&gpio1 14 1>; + linux,default-trigger = "default-on"; + }; + fault { + label = "status:orange:fault"; + gpios = <&gpio1 15 1>; + }; + left0 { + label = "status:white:left0"; + gpios = <&gpio1 10 0>; + }; + left1 { + label = "status:white:left1"; + gpios = <&gpio1 11 0>; + }; + left2 { + label = "status:white:left2"; + gpios = <&gpio1 12 0>; + }; + left3 { + label = "status:white:left3"; + gpios = <&gpio1 13 0>; + }; + right0 { + label = "status:white:right0"; + gpios = <&gpio1 6 0>; + }; + right1 { + label = "status:white:right1"; + gpios = <&gpio1 7 0>; + }; + right2 { + label = "status:white:right2"; + gpios = <&gpio1 8 0>; + }; + right3 { + label = "status:white:right3"; + gpios = <&gpio1 9 0>; + }; + }; +}; diff --git a/trunk/arch/arm/boot/dts/kirkwood-ib62x0.dts b/trunk/arch/arm/boot/dts/kirkwood-ib62x0.dts index f59dcf6dc45f..66794ed75ff1 100644 --- a/trunk/arch/arm/boot/dts/kirkwood-ib62x0.dts +++ b/trunk/arch/arm/boot/dts/kirkwood-ib62x0.dts @@ -21,6 +21,11 @@ status = "okay"; }; + sata@80000 { + status = "okay"; + nr-ports = <2>; + }; + nand@3000000 { status = "okay"; @@ -41,4 +46,37 @@ }; }; + + gpio_keys { + compatible = "gpio-keys"; + #address-cells = <1>; + #size-cells = <0>; + button@1 { + label = "USB Copy"; + linux,code = <133>; + gpios = <&gpio0 29 1>; + }; + button@2 { + label = "Reset"; + linux,code = <0x198>; + gpios = <&gpio0 28 1>; + }; + }; + gpio-leds { + compatible = "gpio-leds"; + + green-os { + label = "ib62x0:green:os"; + gpios = <&gpio0 25 0>; + linux,default-trigger = "default-on"; + }; + red-os { + label = "ib62x0:red:os"; + gpios = <&gpio0 22 0>; + }; + usb-copy { + label = "ib62x0:red:usb_copy"; + gpios = <&gpio0 27 0>; + }; + }; }; diff --git a/trunk/arch/arm/boot/dts/kirkwood-iconnect.dts b/trunk/arch/arm/boot/dts/kirkwood-iconnect.dts index 026a1f82d813..f8ca6fa88192 100644 --- a/trunk/arch/arm/boot/dts/kirkwood-iconnect.dts +++ b/trunk/arch/arm/boot/dts/kirkwood-iconnect.dts @@ -18,9 +18,55 @@ }; ocp@f1000000 { + i2c@11000 { + status = "okay"; + + lm63: lm63@4c { + compatible = "national,lm63"; + reg = <0x4c>; + }; + }; serial@12000 { clock-frequency = <200000000>; status = "ok"; }; }; + gpio-leds { + compatible = "gpio-leds"; + + led-level { + label = "led_level"; + gpios = <&gpio1 9 0>; + linux,default-trigger = "default-on"; + }; + power-blue { + label = "power:blue"; + gpios = <&gpio1 10 0>; + linux,default-trigger = "timer"; + }; + power-red { + label = "power:red"; + gpios = <&gpio1 11 0>; + }; + usb1 { + label = "usb1:blue"; + gpios = <&gpio1 12 0>; + }; + usb2 { + label = "usb2:blue"; + gpios = <&gpio1 13 0>; + }; + usb3 { + label = "usb3:blue"; + gpios = <&gpio1 14 0>; + }; + usb4 { + label = "usb4:blue"; + gpios = <&gpio1 15 0>; + }; + otb { + label = "otb:blue"; + gpios = <&gpio1 16 0>; + }; + }; }; diff --git a/trunk/arch/arm/boot/dts/kirkwood-lschlv2.dts b/trunk/arch/arm/boot/dts/kirkwood-lschlv2.dts new file mode 100644 index 000000000000..9510c9ea666c --- /dev/null +++ b/trunk/arch/arm/boot/dts/kirkwood-lschlv2.dts @@ -0,0 +1,20 @@ +/dts-v1/; + +/include/ "kirkwood-lsxl.dtsi" + +/ { + model = "Buffalo Linkstation LS-CHLv2"; + compatible = "buffalo,lschlv2", "buffalo,lsxl", "marvell,kirkwood-88f6281", "marvell,kirkwood"; + + memory { + device_type = "memory"; + reg = <0x00000000 0x4000000>; + }; + + ocp@f1000000 { + serial@12000 { + clock-frequency = <166666667>; + status = "okay"; + }; + }; +}; diff --git a/trunk/arch/arm/boot/dts/kirkwood-lsxhl.dts b/trunk/arch/arm/boot/dts/kirkwood-lsxhl.dts new file mode 100644 index 000000000000..739019c4cba9 --- /dev/null +++ b/trunk/arch/arm/boot/dts/kirkwood-lsxhl.dts @@ -0,0 +1,20 @@ +/dts-v1/; + +/include/ "kirkwood-lsxl.dtsi" + +/ { + model = "Buffalo Linkstation LS-XHL"; + compatible = "buffalo,lsxhl", "buffalo,lsxl", "marvell,kirkwood-88f6281", "marvell,kirkwood"; + + memory { + device_type = "memory"; + reg = <0x00000000 0x10000000>; + }; + + ocp@f1000000 { + serial@12000 { + clock-frequency = <200000000>; + status = "okay"; + }; + }; +}; diff --git a/trunk/arch/arm/boot/dts/kirkwood-lsxl.dtsi b/trunk/arch/arm/boot/dts/kirkwood-lsxl.dtsi new file mode 100644 index 000000000000..8ac51c08269d --- /dev/null +++ b/trunk/arch/arm/boot/dts/kirkwood-lsxl.dtsi @@ -0,0 +1,95 @@ +/include/ "kirkwood.dtsi" + +/ { + chosen { + bootargs = "console=ttyS0,115200n8 earlyprintk"; + }; + + ocp@f1000000 { + sata@80000 { + status = "okay"; + nr-ports = <1>; + }; + + spi@10600 { + status = "okay"; + + m25p40@0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "m25p40"; + reg = <0>; + spi-max-frequency = <25000000>; + mode = <0>; + + partition@0 { + reg = <0x0 0x60000>; + label = "uboot"; + read-only; + }; + + partition@60000 { + reg = <0x60000 0x10000>; + label = "dtb"; + read-only; + }; + + partition@70000 { + reg = <0x70000 0x10000>; + label = "uboot_env"; + }; + }; + }; + }; + + gpio_keys { + compatible = "gpio-keys"; + #address-cells = <1>; + #size-cells = <0>; + button@1 { + label = "Function Button"; + linux,code = <132>; + gpios = <&gpio1 9 1>; + }; + button@2 { + label = "Power-on Switch"; + linux,code = <116>; + gpios = <&gpio1 10 1>; + }; + button@3 { + label = "Power-auto Switch"; + linux,code = <142>; + gpios = <&gpio1 11 1>; + }; + }; + + gpio_leds { + compatible = "gpio-leds"; + + led@1 { + label = "lschlv2:blue:func"; + gpios = <&gpio1 4 1>; + }; + + led@2 { + label = "lschlv2:red:alarm"; + gpios = <&gpio1 5 1>; + }; + + led@3 { + label = "lschlv2:amber:info"; + gpios = <&gpio1 6 1>; + }; + + led@4 { + label = "lschlv2:blue:power"; + gpios = <&gpio1 7 1>; + linux,default-trigger = "default-on"; + }; + + led@5 { + label = "lschlv2:red:func"; + gpios = <&gpio1 16 1>; + }; + }; +}; diff --git a/trunk/arch/arm/boot/dts/kirkwood-ts219-6281.dts b/trunk/arch/arm/boot/dts/kirkwood-ts219-6281.dts new file mode 100644 index 000000000000..ccbf32757800 --- /dev/null +++ b/trunk/arch/arm/boot/dts/kirkwood-ts219-6281.dts @@ -0,0 +1,21 @@ +/dts-v1/; + +/include/ "kirkwood-ts219.dtsi" + +/ { + gpio_keys { + compatible = "gpio-keys"; + #address-cells = <1>; + #size-cells = <0>; + button@1 { + label = "USB Copy"; + linux,code = <133>; + gpios = <&gpio0 15 1>; + }; + button@2 { + label = "Reset"; + linux,code = <0x198>; + gpios = <&gpio0 16 1>; + }; + }; +}; \ No newline at end of file diff --git a/trunk/arch/arm/boot/dts/kirkwood-ts219-6282.dts b/trunk/arch/arm/boot/dts/kirkwood-ts219-6282.dts new file mode 100644 index 000000000000..fbe9932161a1 --- /dev/null +++ b/trunk/arch/arm/boot/dts/kirkwood-ts219-6282.dts @@ -0,0 +1,21 @@ +/dts-v1/; + +/include/ "kirkwood-ts219.dtsi" + +/ { + gpio_keys { + compatible = "gpio-keys"; + #address-cells = <1>; + #size-cells = <0>; + button@1 { + label = "USB Copy"; + linux,code = <133>; + gpios = <&gpio1 11 1>; + }; + button@2 { + label = "Reset"; + linux,code = <0x198>; + gpios = <&gpio1 5 1>; + }; + }; +}; \ No newline at end of file diff --git a/trunk/arch/arm/boot/dts/kirkwood-ts219.dtsi b/trunk/arch/arm/boot/dts/kirkwood-ts219.dtsi new file mode 100644 index 000000000000..64ea27cb3298 --- /dev/null +++ b/trunk/arch/arm/boot/dts/kirkwood-ts219.dtsi @@ -0,0 +1,78 @@ +/include/ "kirkwood.dtsi" + +/ { + model = "QNAP TS219 family"; + compatible = "qnap,ts219", "marvell,kirkwood"; + + memory { + device_type = "memory"; + reg = <0x00000000 0x20000000>; + }; + + chosen { + bootargs = "console=ttyS0,115200n8"; + }; + + ocp@f1000000 { + i2c@11000 { + status = "okay"; + clock-frequency = <400000>; + + s35390a: s35390a@30 { + compatible = "s35390a"; + reg = <0x30>; + }; + }; + serial@12000 { + clock-frequency = <200000000>; + status = "okay"; + }; + serial@12100 { + clock-frequency = <200000000>; + status = "okay"; + }; + spi@10600 { + status = "okay"; + + m25p128@0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "m25p128"; + reg = <0>; + spi-max-frequency = <20000000>; + mode = <0>; + + partition@0000000 { + reg = <0x00000000 0x00080000>; + label = "U-Boot"; + }; + + partition@00200000 { + reg = <0x00200000 0x00200000>; + label = "Kernel"; + }; + + partition@00400000 { + reg = <0x00400000 0x00900000>; + label = "RootFS1"; + }; + partition@00d00000 { + reg = <0x00d00000 0x00300000>; + label = "RootFS2"; + }; + partition@00040000 { + reg = <0x00080000 0x00040000>; + label = "U-Boot Config"; + }; + partition@000c0000 { + reg = <0x000c0000 0x00140000>; + label = "NAS Config"; + }; + }; + }; + sata@80000 { + status = "okay"; + nr-ports = <2>; + }; + }; +}; diff --git a/trunk/arch/arm/boot/dts/kirkwood.dtsi b/trunk/arch/arm/boot/dts/kirkwood.dtsi index f95dbc190ab6..cef9616f330a 100644 --- a/trunk/arch/arm/boot/dts/kirkwood.dtsi +++ b/trunk/arch/arm/boot/dts/kirkwood.dtsi @@ -2,6 +2,15 @@ / { compatible = "marvell,kirkwood"; + interrupt-parent = <&intc>; + + intc: interrupt-controller { + compatible = "marvell,orion-intc", "marvell,intc"; + interrupt-controller; + #interrupt-cells = <1>; + reg = <0xf1020204 0x04>, + <0xf1020214 0x04>; + }; ocp@f1000000 { compatible = "simple-bus"; @@ -9,6 +18,24 @@ #address-cells = <1>; #size-cells = <1>; + gpio0: gpio@10100 { + compatible = "marvell,orion-gpio"; + #gpio-cells = <2>; + gpio-controller; + reg = <0x10100 0x40>; + ngpio = <32>; + interrupts = <35>, <36>, <37>, <38>; + }; + + gpio1: gpio@10140 { + compatible = "marvell,orion-gpio"; + #gpio-cells = <2>; + gpio-controller; + reg = <0x10140 0x40>; + ngpio = <18>; + interrupts = <39>, <40>, <41>; + }; + serial@12000 { compatible = "ns16550a"; reg = <0x12000 0x100>; @@ -33,6 +60,29 @@ interrupts = <53>; }; + spi@10600 { + compatible = "marvell,orion-spi"; + #address-cells = <1>; + #size-cells = <0>; + cell-index = <0>; + interrupts = <23>; + reg = <0x10600 0x28>; + status = "disabled"; + }; + + wdt@20300 { + compatible = "marvell,orion-wdt"; + reg = <0x20300 0x28>; + status = "okay"; + }; + + sata@80000 { + compatible = "marvell,orion-sata"; + reg = <0x80000 0x5000>; + interrupts = <21>; + status = "disabled"; + }; + nand@3000000 { #address-cells = <1>; #size-cells = <1>; @@ -45,5 +95,15 @@ /* set partition map and/or chip-delay in board dts */ status = "disabled"; }; + + i2c@11000 { + compatible = "marvell,mv64xxx-i2c"; + reg = <0x11000 0x20>; + #address-cells = <1>; + #size-cells = <0>; + interrupts = <29>; + clock-frequency = <100000>; + status = "disabled"; + }; }; }; diff --git a/trunk/arch/arm/boot/dts/twl6030.dtsi b/trunk/arch/arm/boot/dts/twl6030.dtsi index 3b2f3510d7eb..d351b27d7213 100644 --- a/trunk/arch/arm/boot/dts/twl6030.dtsi +++ b/trunk/arch/arm/boot/dts/twl6030.dtsi @@ -66,6 +66,7 @@ vcxio: regulator@8 { compatible = "ti,twl6030-vcxio"; + regulator-always-on; }; vusb: regulator@9 { @@ -74,10 +75,12 @@ v1v8: regulator@10 { compatible = "ti,twl6030-v1v8"; + regulator-always-on; }; v2v1: regulator@11 { compatible = "ti,twl6030-v2v1"; + regulator-always-on; }; clk32kg: regulator@12 { diff --git a/trunk/arch/arm/configs/armadillo800eva_defconfig b/trunk/arch/arm/configs/armadillo800eva_defconfig index 7d8718468e0d..90610c7030f7 100644 --- a/trunk/arch/arm/configs/armadillo800eva_defconfig +++ b/trunk/arch/arm/configs/armadillo800eva_defconfig @@ -33,7 +33,7 @@ CONFIG_AEABI=y CONFIG_FORCE_MAX_ZONEORDER=13 CONFIG_ZBOOT_ROM_TEXT=0x0 CONFIG_ZBOOT_ROM_BSS=0x0 -CONFIG_CMDLINE="console=tty0 console=ttySC1,115200 earlyprintk=sh-sci.1,115200 ignore_loglevel root=/dev/nfs ip=dhcp nfsroot=,rsize=4096,wsize=4096" +CONFIG_CMDLINE="console=tty0 console=ttySC1,115200 earlyprintk=sh-sci.1,115200 ignore_loglevel root=/dev/nfs ip=dhcp nfsroot=,rsize=4096,wsize=4096 rw" CONFIG_CMDLINE_FORCE=y CONFIG_KEXEC=y CONFIG_VFP=y diff --git a/trunk/arch/arm/configs/imx_v6_v7_defconfig b/trunk/arch/arm/configs/imx_v6_v7_defconfig index f725b9637b33..3c9f32f9b6b4 100644 --- a/trunk/arch/arm/configs/imx_v6_v7_defconfig +++ b/trunk/arch/arm/configs/imx_v6_v7_defconfig @@ -192,6 +192,7 @@ CONFIG_RTC_DRV_MC13XXX=y CONFIG_RTC_DRV_MXC=y CONFIG_DMADEVICES=y CONFIG_IMX_SDMA=y +CONFIG_MXS_DMA=y CONFIG_COMMON_CLK_DEBUG=y # CONFIG_IOMMU_SUPPORT is not set CONFIG_EXT2_FS=y diff --git a/trunk/arch/arm/configs/mxs_defconfig b/trunk/arch/arm/configs/mxs_defconfig index ccdb6357fb74..4edcfb4e4dee 100644 --- a/trunk/arch/arm/configs/mxs_defconfig +++ b/trunk/arch/arm/configs/mxs_defconfig @@ -34,7 +34,6 @@ CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y CONFIG_PREEMPT_VOLUNTARY=y CONFIG_AEABI=y -CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 CONFIG_AUTO_ZRELADDR=y CONFIG_FPE_NWFPE=y CONFIG_NET=y diff --git a/trunk/arch/arm/configs/omap2plus_defconfig b/trunk/arch/arm/configs/omap2plus_defconfig index b152de79fd95..e58edc36b406 100644 --- a/trunk/arch/arm/configs/omap2plus_defconfig +++ b/trunk/arch/arm/configs/omap2plus_defconfig @@ -193,6 +193,8 @@ CONFIG_MMC_OMAP_HS=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_TWL92330=y CONFIG_RTC_DRV_TWL4030=y +CONFIG_DMADEVICES=y +CONFIG_DMA_OMAP=y CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y # CONFIG_EXT3_FS_XATTR is not set diff --git a/trunk/arch/arm/configs/tct_hammer_defconfig b/trunk/arch/arm/configs/tct_hammer_defconfig index 1d24f8458bef..71277a1591ba 100644 --- a/trunk/arch/arm/configs/tct_hammer_defconfig +++ b/trunk/arch/arm/configs/tct_hammer_defconfig @@ -7,7 +7,7 @@ CONFIG_SYSFS_DEPRECATED_V2=y CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y # CONFIG_KALLSYMS is not set -# CONFIG_BUG is not set +# CONFIG_BUGVERBOSE is not set # CONFIG_ELF_CORE is not set # CONFIG_SHMEM is not set CONFIG_SLOB=y diff --git a/trunk/arch/arm/configs/u8500_defconfig b/trunk/arch/arm/configs/u8500_defconfig index 2d4f661d1cf6..da6845493caa 100644 --- a/trunk/arch/arm/configs/u8500_defconfig +++ b/trunk/arch/arm/configs/u8500_defconfig @@ -86,6 +86,7 @@ CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y CONFIG_LEDS_LM3530=y CONFIG_LEDS_LP5521=y +CONFIG_LEDS_GPIO=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_AB8500=y CONFIG_RTC_DRV_PL031=y diff --git a/trunk/arch/arm/include/asm/cacheflush.h b/trunk/arch/arm/include/asm/cacheflush.h index 004c1bc95d2b..e4448e16046d 100644 --- a/trunk/arch/arm/include/asm/cacheflush.h +++ b/trunk/arch/arm/include/asm/cacheflush.h @@ -215,7 +215,9 @@ static inline void vivt_flush_cache_mm(struct mm_struct *mm) static inline void vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) + struct mm_struct *mm = vma->vm_mm; + + if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), vma->vm_flags); } @@ -223,7 +225,9 @@ vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned static inline void vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) { - if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { + struct mm_struct *mm = vma->vm_mm; + + if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) { unsigned long addr = user_addr & PAGE_MASK; __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); } diff --git a/trunk/arch/arm/include/asm/dma-mapping.h b/trunk/arch/arm/include/asm/dma-mapping.h index 2ae842df4551..5c44dcb0987b 100644 --- a/trunk/arch/arm/include/asm/dma-mapping.h +++ b/trunk/arch/arm/include/asm/dma-mapping.h @@ -202,6 +202,13 @@ static inline void dma_free_writecombine(struct device *dev, size_t size, return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs); } +/* + * This can be called during early boot to increase the size of the atomic + * coherent DMA pool above the default value of 256KiB. It must be called + * before postcore_initcall. + */ +extern void __init init_dma_coherent_pool_size(unsigned long size); + /* * This can be called during boot to increase the size of the consistent * DMA region above it's default value of 2MB. It must be called before the diff --git a/trunk/arch/arm/include/asm/mutex.h b/trunk/arch/arm/include/asm/mutex.h index 93226cf23ae0..b1479fd04a95 100644 --- a/trunk/arch/arm/include/asm/mutex.h +++ b/trunk/arch/arm/include/asm/mutex.h @@ -7,121 +7,10 @@ */ #ifndef _ASM_MUTEX_H #define _ASM_MUTEX_H - -#if __LINUX_ARM_ARCH__ < 6 -/* On pre-ARMv6 hardware the swp based implementation is the most efficient. */ -# include -#else - /* - * Attempting to lock a mutex on ARMv6+ can be done with a bastardized - * atomic decrement (it is not a reliable atomic decrement but it satisfies - * the defined semantics for our purpose, while being smaller and faster - * than a real atomic decrement or atomic swap. The idea is to attempt - * decrementing the lock value only once. If once decremented it isn't zero, - * or if its store-back fails due to a dispute on the exclusive store, we - * simply bail out immediately through the slow path where the lock will be - * reattempted until it succeeds. + * On pre-ARMv6 hardware this results in a swp-based implementation, + * which is the most efficient. For ARMv6+, we emit a pair of exclusive + * accesses instead. */ -static inline void -__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) -{ - int __ex_flag, __res; - - __asm__ ( - - "ldrex %0, [%2] \n\t" - "sub %0, %0, #1 \n\t" - "strex %1, %0, [%2] " - - : "=&r" (__res), "=&r" (__ex_flag) - : "r" (&(count)->counter) - : "cc","memory" ); - - __res |= __ex_flag; - if (unlikely(__res != 0)) - fail_fn(count); -} - -static inline int -__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) -{ - int __ex_flag, __res; - - __asm__ ( - - "ldrex %0, [%2] \n\t" - "sub %0, %0, #1 \n\t" - "strex %1, %0, [%2] " - - : "=&r" (__res), "=&r" (__ex_flag) - : "r" (&(count)->counter) - : "cc","memory" ); - - __res |= __ex_flag; - if (unlikely(__res != 0)) - __res = fail_fn(count); - return __res; -} - -/* - * Same trick is used for the unlock fast path. However the original value, - * rather than the result, is used to test for success in order to have - * better generated assembly. - */ -static inline void -__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) -{ - int __ex_flag, __res, __orig; - - __asm__ ( - - "ldrex %0, [%3] \n\t" - "add %1, %0, #1 \n\t" - "strex %2, %1, [%3] " - - : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag) - : "r" (&(count)->counter) - : "cc","memory" ); - - __orig |= __ex_flag; - if (unlikely(__orig != 0)) - fail_fn(count); -} - -/* - * If the unlock was done on a contended lock, or if the unlock simply fails - * then the mutex remains locked. - */ -#define __mutex_slowpath_needs_to_unlock() 1 - -/* - * For __mutex_fastpath_trylock we use another construct which could be - * described as a "single value cmpxchg". - * - * This provides the needed trylock semantics like cmpxchg would, but it is - * lighter and less generic than a true cmpxchg implementation. - */ -static inline int -__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) -{ - int __ex_flag, __res, __orig; - - __asm__ ( - - "1: ldrex %0, [%3] \n\t" - "subs %1, %0, #1 \n\t" - "strexeq %2, %1, [%3] \n\t" - "movlt %0, #0 \n\t" - "cmpeq %2, #0 \n\t" - "bgt 1b " - - : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag) - : "r" (&count->counter) - : "cc", "memory" ); - - return __orig; -} - -#endif +#include #endif diff --git a/trunk/arch/arm/include/asm/pgtable.h b/trunk/arch/arm/include/asm/pgtable.h index f66626d71e7d..41dc31f834c3 100644 --- a/trunk/arch/arm/include/asm/pgtable.h +++ b/trunk/arch/arm/include/asm/pgtable.h @@ -195,6 +195,18 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd) #define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0) +#define pte_none(pte) (!pte_val(pte)) +#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT) +#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY)) +#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY) +#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG) +#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN)) +#define pte_special(pte) (0) + +#define pte_present_user(pte) \ + ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \ + (L_PTE_PRESENT | L_PTE_USER)) + #if __LINUX_ARM_ARCH__ < 6 static inline void __sync_icache_dcache(pte_t pteval) { @@ -206,25 +218,15 @@ extern void __sync_icache_dcache(pte_t pteval); static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval) { - if (addr >= TASK_SIZE) - set_pte_ext(ptep, pteval, 0); - else { + unsigned long ext = 0; + + if (addr < TASK_SIZE && pte_present_user(pteval)) { __sync_icache_dcache(pteval); - set_pte_ext(ptep, pteval, PTE_EXT_NG); + ext |= PTE_EXT_NG; } -} -#define pte_none(pte) (!pte_val(pte)) -#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT) -#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY)) -#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY) -#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG) -#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN)) -#define pte_special(pte) (0) - -#define pte_present_user(pte) \ - ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \ - (L_PTE_PRESENT | L_PTE_USER)) + set_pte_ext(ptep, pteval, ext); +} #define PTE_BIT_FUNC(fn,op) \ static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } @@ -251,13 +253,13 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) * * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 - * <--------------- offset --------------------> <- type --> 0 0 0 + * <--------------- offset ----------------------> < type -> 0 0 0 * - * This gives us up to 63 swap files and 32GB per swap file. Note that + * This gives us up to 31 swap files and 64GB per swap file. Note that * the offset field is always non-zero. */ #define __SWP_TYPE_SHIFT 3 -#define __SWP_TYPE_BITS 6 +#define __SWP_TYPE_BITS 5 #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) diff --git a/trunk/arch/arm/include/asm/sched_clock.h b/trunk/arch/arm/include/asm/sched_clock.h index e3f757263438..05b8e82ec9f5 100644 --- a/trunk/arch/arm/include/asm/sched_clock.h +++ b/trunk/arch/arm/include/asm/sched_clock.h @@ -10,5 +10,7 @@ extern void sched_clock_postinit(void); extern void setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate); +extern void setup_sched_clock_needs_suspend(u32 (*read)(void), int bits, + unsigned long rate); #endif diff --git a/trunk/arch/arm/include/asm/setup.h b/trunk/arch/arm/include/asm/setup.h index 23ebc0c82a39..24d284a1bfc7 100644 --- a/trunk/arch/arm/include/asm/setup.h +++ b/trunk/arch/arm/include/asm/setup.h @@ -196,7 +196,7 @@ static const struct tagtable __tagtable_##fn __tag = { tag, fn } struct membank { phys_addr_t start; - unsigned long size; + phys_addr_t size; unsigned int highmem; }; @@ -217,7 +217,7 @@ extern struct meminfo meminfo; #define bank_phys_end(bank) ((bank)->start + (bank)->size) #define bank_phys_size(bank) (bank)->size -extern int arm_add_memory(phys_addr_t start, unsigned long size); +extern int arm_add_memory(phys_addr_t start, phys_addr_t size); extern void early_print(const char *str, ...); extern void dump_machine_table(void); diff --git a/trunk/arch/arm/kernel/entry-armv.S b/trunk/arch/arm/kernel/entry-armv.S index 0d1851ca6eb9..0f82098c9bfe 100644 --- a/trunk/arch/arm/kernel/entry-armv.S +++ b/trunk/arch/arm/kernel/entry-armv.S @@ -244,6 +244,19 @@ svc_preempt: b 1b #endif +__und_fault: + @ Correct the PC such that it is pointing at the instruction + @ which caused the fault. If the faulting instruction was ARM + @ the PC will be pointing at the next instruction, and have to + @ subtract 4. Otherwise, it is Thumb, and the PC will be + @ pointing at the second half of the Thumb instruction. We + @ have to subtract 2. + ldr r2, [r0, #S_PC] + sub r2, r2, r1 + str r2, [r0, #S_PC] + b do_undefinstr +ENDPROC(__und_fault) + .align 5 __und_svc: #ifdef CONFIG_KPROBES @@ -261,25 +274,32 @@ __und_svc: @ @ r0 - instruction @ -#ifndef CONFIG_THUMB2_KERNEL +#ifndef CONFIG_THUMB2_KERNEL ldr r0, [r4, #-4] #else + mov r1, #2 ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2 cmp r0, #0xe800 @ 32-bit instruction if xx >= 0 - ldrhhs r9, [r4] @ bottom 16 bits - orrhs r0, r9, r0, lsl #16 + blo __und_svc_fault + ldrh r9, [r4] @ bottom 16 bits + add r4, r4, #2 + str r4, [sp, #S_PC] + orr r0, r9, r0, lsl #16 #endif - adr r9, BSYM(1f) + adr r9, BSYM(__und_svc_finish) mov r2, r4 bl call_fpe + mov r1, #4 @ PC correction to apply +__und_svc_fault: mov r0, sp @ struct pt_regs *regs - bl do_undefinstr + bl __und_fault @ @ IRQs off again before pulling preserved data off the stack @ -1: disable_irq_notrace +__und_svc_finish: + disable_irq_notrace @ @ restore SPSR and restart the instruction @@ -423,25 +443,33 @@ __und_usr: mov r2, r4 mov r3, r5 + @ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the + @ faulting instruction depending on Thumb mode. + @ r3 = regs->ARM_cpsr @ - @ fall through to the emulation code, which returns using r9 if - @ it has emulated the instruction, or the more conventional lr - @ if we are to treat this as a real undefined instruction - @ - @ r0 - instruction + @ The emulation code returns using r9 if it has emulated the + @ instruction, or the more conventional lr if we are to treat + @ this as a real undefined instruction @ adr r9, BSYM(ret_from_exception) - adr lr, BSYM(__und_usr_unknown) + tst r3, #PSR_T_BIT @ Thumb mode? - itet eq @ explicit IT needed for the 1f label - subeq r4, r2, #4 @ ARM instr at LR - 4 - subne r4, r2, #2 @ Thumb instr at LR - 2 -1: ldreqt r0, [r4] + bne __und_usr_thumb + sub r4, r2, #4 @ ARM instr at LR - 4 +1: ldrt r0, [r4] #ifdef CONFIG_CPU_ENDIAN_BE8 - reveq r0, r0 @ little endian instruction + rev r0, r0 @ little endian instruction #endif - beq call_fpe + @ r0 = 32-bit ARM instruction which caused the exception + @ r2 = PC value for the following instruction (:= regs->ARM_pc) + @ r4 = PC value for the faulting instruction + @ lr = 32-bit undefined instruction function + adr lr, BSYM(__und_usr_fault_32) + b call_fpe + +__und_usr_thumb: @ Thumb instruction + sub r4, r2, #2 @ First half of thumb instr at LR - 2 #if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7 /* * Thumb-2 instruction handling. Note that because pre-v6 and >= v6 platforms @@ -455,7 +483,7 @@ __und_usr: ldr r5, .LCcpu_architecture ldr r5, [r5] cmp r5, #CPU_ARCH_ARMv7 - blo __und_usr_unknown + blo __und_usr_fault_16 @ 16bit undefined instruction /* * The following code won't get run unless the running CPU really is v7, so * coding round the lack of ldrht on older arches is pointless. Temporarily @@ -463,15 +491,18 @@ __und_usr: */ .arch armv6t2 #endif -2: - ARM( ldrht r5, [r4], #2 ) - THUMB( ldrht r5, [r4] ) - THUMB( add r4, r4, #2 ) +2: ldrht r5, [r4] cmp r5, #0xe800 @ 32bit instruction if xx != 0 - blo __und_usr_unknown -3: ldrht r0, [r4] + blo __und_usr_fault_16 @ 16bit undefined instruction +3: ldrht r0, [r2] add r2, r2, #2 @ r2 is PC + 2, make it PC + 4 + str r2, [sp, #S_PC] @ it's a 2x16bit instr, update orr r0, r0, r5, lsl #16 + adr lr, BSYM(__und_usr_fault_32) + @ r0 = the two 16-bit Thumb instructions which caused the exception + @ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc) + @ r4 = PC value for the first 16-bit Thumb instruction + @ lr = 32bit undefined instruction function #if __LINUX_ARM_ARCH__ < 7 /* If the target arch was overridden, change it back: */ @@ -482,17 +513,13 @@ __und_usr: #endif #endif /* __LINUX_ARM_ARCH__ < 7 */ #else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */ - b __und_usr_unknown + b __und_usr_fault_16 #endif - UNWIND(.fnend ) + UNWIND(.fnend) ENDPROC(__und_usr) - @ - @ fallthrough to call_fpe - @ - /* - * The out of line fixup for the ldrt above. + * The out of line fixup for the ldrt instructions above. */ .pushsection .fixup, "ax" .align 2 @@ -524,11 +551,12 @@ ENDPROC(__und_usr) * NEON handler code. * * Emulators may wish to make use of the following registers: - * r0 = instruction opcode. - * r2 = PC+4 + * r0 = instruction opcode (32-bit ARM or two 16-bit Thumb) + * r2 = PC value to resume execution after successful emulation * r9 = normal "successful" return address - * r10 = this threads thread_info structure. + * r10 = this threads thread_info structure * lr = unrecognised instruction return address + * IRQs disabled, FIQs enabled. */ @ @ Fall-through from Thumb-2 __und_usr @@ -659,12 +687,17 @@ ENTRY(no_fp) mov pc, lr ENDPROC(no_fp) -__und_usr_unknown: - enable_irq +__und_usr_fault_32: + mov r1, #4 + b 1f +__und_usr_fault_16: + mov r1, #2 +1: enable_irq mov r0, sp adr lr, BSYM(ret_from_exception) - b do_undefinstr -ENDPROC(__und_usr_unknown) + b __und_fault +ENDPROC(__und_usr_fault_32) +ENDPROC(__und_usr_fault_16) .align 5 __pabt_usr: diff --git a/trunk/arch/arm/kernel/entry-common.S b/trunk/arch/arm/kernel/entry-common.S index 49d9f9305247..978eac57e04a 100644 --- a/trunk/arch/arm/kernel/entry-common.S +++ b/trunk/arch/arm/kernel/entry-common.S @@ -51,23 +51,15 @@ ret_fast_syscall: fast_work_pending: str r0, [sp, #S_R0+S_OFF]! @ returned r0 work_pending: - tst r1, #_TIF_NEED_RESCHED - bne work_resched - /* - * TIF_SIGPENDING or TIF_NOTIFY_RESUME must've been set if we got here - */ - ldr r2, [sp, #S_PSR] mov r0, sp @ 'regs' - tst r2, #15 @ are we returning to user mode? - bne no_work_pending @ no? just leave, then... mov r2, why @ 'syscall' - tst r1, #_TIF_SIGPENDING @ delivering a signal? - movne why, #0 @ prevent further restarts - bl do_notify_resume - b ret_slow_syscall @ Check work again + bl do_work_pending + cmp r0, #0 + beq no_work_pending + movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE) + ldmia sp, {r0 - r6} @ have to reload r0 - r6 + b local_restart @ ... and off we go -work_resched: - bl schedule /* * "slow" syscall return path. "why" tells us if this was a real syscall. */ @@ -409,6 +401,7 @@ ENTRY(vector_swi) eor scno, scno, #__NR_SYSCALL_BASE @ check OS number #endif +local_restart: ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing stmdb sp!, {r4, r5} @ push fifth and sixth args @@ -450,7 +443,8 @@ __sys_trace: mov scno, r0 @ syscall number (possibly new) add r1, sp, #S_R0 + S_OFF @ pointer to regs cmp scno, #NR_syscalls @ check upper syscall limit - ldmccia r1, {r0 - r3} @ have to reload r0 - r3 + ldmccia r1, {r0 - r6} @ have to reload r0 - r6 + stmccia sp, {r4, r5} @ and update the stack args ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine b 2b diff --git a/trunk/arch/arm/kernel/ftrace.c b/trunk/arch/arm/kernel/ftrace.c index df0bf0c8cb79..34e56647dcee 100644 --- a/trunk/arch/arm/kernel/ftrace.c +++ b/trunk/arch/arm/kernel/ftrace.c @@ -179,19 +179,20 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, old = *parent; *parent = return_hooker; - err = ftrace_push_return_trace(old, self_addr, &trace.depth, - frame_pointer); - if (err == -EBUSY) { - *parent = old; - return; - } - trace.func = self_addr; + trace.depth = current->curr_ret_stack + 1; /* Only trace if the calling function expects to */ if (!ftrace_graph_entry(&trace)) { - current->curr_ret_stack--; *parent = old; + return; + } + + err = ftrace_push_return_trace(old, self_addr, &trace.depth, + frame_pointer); + if (err == -EBUSY) { + *parent = old; + return; } } diff --git a/trunk/arch/arm/kernel/process.c b/trunk/arch/arm/kernel/process.c index 19c95ea65b2f..693b744fd572 100644 --- a/trunk/arch/arm/kernel/process.c +++ b/trunk/arch/arm/kernel/process.c @@ -247,6 +247,7 @@ void machine_shutdown(void) void machine_halt(void) { machine_shutdown(); + local_irq_disable(); while (1); } @@ -268,6 +269,7 @@ void machine_restart(char *cmd) /* Whoops - the platform was unable to reboot. Tell the user! */ printk("Reboot failed -- System halted\n"); + local_irq_disable(); while (1); } diff --git a/trunk/arch/arm/kernel/ptrace.c b/trunk/arch/arm/kernel/ptrace.c index dab711e6e1ca..3e0fc5f7ed4b 100644 --- a/trunk/arch/arm/kernel/ptrace.c +++ b/trunk/arch/arm/kernel/ptrace.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include diff --git a/trunk/arch/arm/kernel/sched_clock.c b/trunk/arch/arm/kernel/sched_clock.c index 27d186abbc06..f4515393248d 100644 --- a/trunk/arch/arm/kernel/sched_clock.c +++ b/trunk/arch/arm/kernel/sched_clock.c @@ -21,6 +21,8 @@ struct clock_data { u32 epoch_cyc_copy; u32 mult; u32 shift; + bool suspended; + bool needs_suspend; }; static void sched_clock_poll(unsigned long wrap_ticks); @@ -49,6 +51,9 @@ static unsigned long long cyc_to_sched_clock(u32 cyc, u32 mask) u64 epoch_ns; u32 epoch_cyc; + if (cd.suspended) + return cd.epoch_ns; + /* * Load the epoch_cyc and epoch_ns atomically. We do this by * ensuring that we always write epoch_cyc, epoch_ns and @@ -98,6 +103,13 @@ static void sched_clock_poll(unsigned long wrap_ticks) update_sched_clock(); } +void __init setup_sched_clock_needs_suspend(u32 (*read)(void), int bits, + unsigned long rate) +{ + setup_sched_clock(read, bits, rate); + cd.needs_suspend = true; +} + void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate) { unsigned long r, w; @@ -169,11 +181,23 @@ void __init sched_clock_postinit(void) static int sched_clock_suspend(void) { sched_clock_poll(sched_clock_timer.data); + if (cd.needs_suspend) + cd.suspended = true; return 0; } +static void sched_clock_resume(void) +{ + if (cd.needs_suspend) { + cd.epoch_cyc = read_sched_clock(); + cd.epoch_cyc_copy = cd.epoch_cyc; + cd.suspended = false; + } +} + static struct syscore_ops sched_clock_ops = { .suspend = sched_clock_suspend, + .resume = sched_clock_resume, }; static int __init sched_clock_syscore_init(void) diff --git a/trunk/arch/arm/kernel/setup.c b/trunk/arch/arm/kernel/setup.c index e15d83bb4ea3..a81dcecc7343 100644 --- a/trunk/arch/arm/kernel/setup.c +++ b/trunk/arch/arm/kernel/setup.c @@ -508,7 +508,7 @@ void __init dump_machine_table(void) /* can't use cpu_relax() here as it may require MMU setup */; } -int __init arm_add_memory(phys_addr_t start, unsigned long size) +int __init arm_add_memory(phys_addr_t start, phys_addr_t size) { struct membank *bank = &meminfo.bank[meminfo.nr_banks]; @@ -538,7 +538,7 @@ int __init arm_add_memory(phys_addr_t start, unsigned long size) } #endif - bank->size = size & PAGE_MASK; + bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1); /* * Check whether this memory region has non-zero size or @@ -558,7 +558,7 @@ int __init arm_add_memory(phys_addr_t start, unsigned long size) static int __init early_mem(char *p) { static int usermem __initdata = 0; - unsigned long size; + phys_addr_t size; phys_addr_t start; char *endp; diff --git a/trunk/arch/arm/kernel/signal.c b/trunk/arch/arm/kernel/signal.c index 536c5d6b340b..f27789e4e38a 100644 --- a/trunk/arch/arm/kernel/signal.c +++ b/trunk/arch/arm/kernel/signal.c @@ -27,7 +27,6 @@ */ #define SWI_SYS_SIGRETURN (0xef000000|(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE)) #define SWI_SYS_RT_SIGRETURN (0xef000000|(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE)) -#define SWI_SYS_RESTART (0xef000000|__NR_restart_syscall|__NR_OABI_SYSCALL_BASE) /* * With EABI, the syscall number has to be loaded into r7. @@ -47,18 +46,6 @@ const unsigned long sigreturn_codes[7] = { MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN, }; -/* - * Either we support OABI only, or we have EABI with the OABI - * compat layer enabled. In the later case we don't know if - * user space is EABI or not, and if not we must not clobber r7. - * Always using the OABI syscall solves that issue and works for - * all those cases. - */ -const unsigned long syscall_restart_code[2] = { - SWI_SYS_RESTART, /* swi __NR_restart_syscall */ - 0xe49df004, /* ldr pc, [sp], #4 */ -}; - /* * atomically swap in the new signal mask, and wait for a signal. */ @@ -582,12 +569,13 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, * the kernel can handle, and then we build all the user-level signal handling * stack-frames in one go after that. */ -static void do_signal(struct pt_regs *regs, int syscall) +static int do_signal(struct pt_regs *regs, int syscall) { unsigned int retval = 0, continue_addr = 0, restart_addr = 0; struct k_sigaction ka; siginfo_t info; int signr; + int restart = 0; /* * If we were from a system call, check for system call restarting... @@ -602,15 +590,15 @@ static void do_signal(struct pt_regs *regs, int syscall) * debugger will see the already changed PSW. */ switch (retval) { + case -ERESTART_RESTARTBLOCK: + restart -= 2; case -ERESTARTNOHAND: case -ERESTARTSYS: case -ERESTARTNOINTR: + restart++; regs->ARM_r0 = regs->ARM_ORIG_r0; regs->ARM_pc = restart_addr; break; - case -ERESTART_RESTARTBLOCK: - regs->ARM_r0 = -EINTR; - break; } } @@ -619,14 +607,17 @@ static void do_signal(struct pt_regs *regs, int syscall) * point the debugger may change all our registers ... */ signr = get_signal_to_deliver(&info, &ka, regs, NULL); + /* + * Depending on the signal settings we may need to revert the + * decision to restart the system call. But skip this if a + * debugger has chosen to restart at a different PC. + */ + if (regs->ARM_pc != restart_addr) + restart = 0; if (signr > 0) { - /* - * Depending on the signal settings we may need to revert the - * decision to restart the system call. But skip this if a - * debugger has chosen to restart at a different PC. - */ - if (regs->ARM_pc == restart_addr) { - if (retval == -ERESTARTNOHAND + if (unlikely(restart)) { + if (retval == -ERESTARTNOHAND || + retval == -ERESTART_RESTARTBLOCK || (retval == -ERESTARTSYS && !(ka.sa.sa_flags & SA_RESTART))) { regs->ARM_r0 = -EINTR; @@ -635,52 +626,43 @@ static void do_signal(struct pt_regs *regs, int syscall) } handle_signal(signr, &ka, &info, regs); - return; - } - - if (syscall) { - /* - * Handle restarting a different system call. As above, - * if a debugger has chosen to restart at a different PC, - * ignore the restart. - */ - if (retval == -ERESTART_RESTARTBLOCK - && regs->ARM_pc == continue_addr) { - if (thumb_mode(regs)) { - regs->ARM_r7 = __NR_restart_syscall - __NR_SYSCALL_BASE; - regs->ARM_pc -= 2; - } else { -#if defined(CONFIG_AEABI) && !defined(CONFIG_OABI_COMPAT) - regs->ARM_r7 = __NR_restart_syscall; - regs->ARM_pc -= 4; -#else - u32 __user *usp; - - regs->ARM_sp -= 4; - usp = (u32 __user *)regs->ARM_sp; - - if (put_user(regs->ARM_pc, usp) == 0) { - regs->ARM_pc = KERN_RESTART_CODE; - } else { - regs->ARM_sp += 4; - force_sigsegv(0, current); - } -#endif - } - } + return 0; } restore_saved_sigmask(); + if (unlikely(restart)) + regs->ARM_pc = continue_addr; + return restart; } -asmlinkage void -do_notify_resume(struct pt_regs *regs, unsigned int thread_flags, int syscall) +asmlinkage int +do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) { - if (thread_flags & _TIF_SIGPENDING) - do_signal(regs, syscall); - - if (thread_flags & _TIF_NOTIFY_RESUME) { - clear_thread_flag(TIF_NOTIFY_RESUME); - tracehook_notify_resume(regs); - } + do { + if (likely(thread_flags & _TIF_NEED_RESCHED)) { + schedule(); + } else { + if (unlikely(!user_mode(regs))) + return 0; + local_irq_enable(); + if (thread_flags & _TIF_SIGPENDING) { + int restart = do_signal(regs, syscall); + if (unlikely(restart)) { + /* + * Restart without handlers. + * Deal with it without leaving + * the kernel space. + */ + return restart; + } + syscall = 0; + } else { + clear_thread_flag(TIF_NOTIFY_RESUME); + tracehook_notify_resume(regs); + } + } + local_irq_disable(); + thread_flags = current_thread_info()->flags; + } while (thread_flags & _TIF_WORK_MASK); + return 0; } diff --git a/trunk/arch/arm/kernel/signal.h b/trunk/arch/arm/kernel/signal.h index 6fcfe8398aa4..5ff067b7c752 100644 --- a/trunk/arch/arm/kernel/signal.h +++ b/trunk/arch/arm/kernel/signal.h @@ -8,7 +8,5 @@ * published by the Free Software Foundation. */ #define KERN_SIGRETURN_CODE (CONFIG_VECTORS_BASE + 0x00000500) -#define KERN_RESTART_CODE (KERN_SIGRETURN_CODE + sizeof(sigreturn_codes)) extern const unsigned long sigreturn_codes[7]; -extern const unsigned long syscall_restart_code[2]; diff --git a/trunk/arch/arm/kernel/smp.c b/trunk/arch/arm/kernel/smp.c index aea74f5bc34a..ebd8ad274d76 100644 --- a/trunk/arch/arm/kernel/smp.c +++ b/trunk/arch/arm/kernel/smp.c @@ -563,7 +563,8 @@ void smp_send_stop(void) cpumask_copy(&mask, cpu_online_mask); cpumask_clear_cpu(smp_processor_id(), &mask); - smp_cross_call(&mask, IPI_CPU_STOP); + if (!cpumask_empty(&mask)) + smp_cross_call(&mask, IPI_CPU_STOP); /* Wait up to one second for other CPUs to stop */ timeout = USEC_PER_SEC; diff --git a/trunk/arch/arm/kernel/topology.c b/trunk/arch/arm/kernel/topology.c index 198b08456e90..26c12c6440fc 100644 --- a/trunk/arch/arm/kernel/topology.c +++ b/trunk/arch/arm/kernel/topology.c @@ -321,7 +321,7 @@ void store_cpu_topology(unsigned int cpuid) * init_cpu_topology is called at boot when only one cpu is running * which prevent simultaneous write access to cpu_topology array */ -void init_cpu_topology(void) +void __init init_cpu_topology(void) { unsigned int cpu; diff --git a/trunk/arch/arm/kernel/traps.c b/trunk/arch/arm/kernel/traps.c index 8b97d739b17b..f7945218b8c6 100644 --- a/trunk/arch/arm/kernel/traps.c +++ b/trunk/arch/arm/kernel/traps.c @@ -402,18 +402,10 @@ static int call_undef_hook(struct pt_regs *regs, unsigned int instr) asmlinkage void __exception do_undefinstr(struct pt_regs *regs) { - unsigned int correction = thumb_mode(regs) ? 2 : 4; unsigned int instr; siginfo_t info; void __user *pc; - /* - * According to the ARM ARM, PC is 2 or 4 bytes ahead, - * depending whether we're in Thumb mode or not. - * Correct this offset. - */ - regs->ARM_pc -= correction; - pc = (void __user *)instruction_pointer(regs); if (processor_mode(regs) == SVC_MODE) { @@ -852,8 +844,6 @@ void __init early_trap_init(void *vectors_base) */ memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE), sigreturn_codes, sizeof(sigreturn_codes)); - memcpy((void *)(vectors + KERN_RESTART_CODE - CONFIG_VECTORS_BASE), - syscall_restart_code, sizeof(syscall_restart_code)); flush_icache_range(vectors, vectors + PAGE_SIZE); modify_domain(DOMAIN_USER, DOMAIN_CLIENT); diff --git a/trunk/arch/arm/lib/Makefile b/trunk/arch/arm/lib/Makefile index 2473fd1fd51c..af72969820b4 100644 --- a/trunk/arch/arm/lib/Makefile +++ b/trunk/arch/arm/lib/Makefile @@ -16,13 +16,30 @@ lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \ call_with_stack.o mmu-y := clear_user.o copy_page.o getuser.o putuser.o -mmu-y += copy_from_user.o copy_to_user.o + +# the code in uaccess.S is not preemption safe and +# probably faster on ARMv3 only +ifeq ($(CONFIG_PREEMPT),y) + mmu-y += copy_from_user.o copy_to_user.o +else +ifneq ($(CONFIG_CPU_32v3),y) + mmu-y += copy_from_user.o copy_to_user.o +else + mmu-y += uaccess.o +endif +endif # using lib_ here won't override already available weak symbols obj-$(CONFIG_UACCESS_WITH_MEMCPY) += uaccess_with_memcpy.o -lib-$(CONFIG_MMU) += $(mmu-y) -lib-y += io-readsw-armv4.o io-writesw-armv4.o +lib-$(CONFIG_MMU) += $(mmu-y) + +ifeq ($(CONFIG_CPU_32v3),y) + lib-y += io-readsw-armv3.o io-writesw-armv3.o +else + lib-y += io-readsw-armv4.o io-writesw-armv4.o +endif + lib-$(CONFIG_ARCH_RPC) += ecard.o io-acorn.o floppydma.o lib-$(CONFIG_ARCH_SHARK) += io-shark.o diff --git a/trunk/arch/arm/lib/io-readsw-armv3.S b/trunk/arch/arm/lib/io-readsw-armv3.S new file mode 100644 index 000000000000..88487c8c4f23 --- /dev/null +++ b/trunk/arch/arm/lib/io-readsw-armv3.S @@ -0,0 +1,106 @@ +/* + * linux/arch/arm/lib/io-readsw-armv3.S + * + * Copyright (C) 1995-2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include + +.Linsw_bad_alignment: + adr r0, .Linsw_bad_align_msg + mov r2, lr + b panic +.Linsw_bad_align_msg: + .asciz "insw: bad buffer alignment (0x%p, lr=0x%08lX)\n" + .align + +.Linsw_align: tst r1, #1 + bne .Linsw_bad_alignment + + ldr r3, [r0] + strb r3, [r1], #1 + mov r3, r3, lsr #8 + strb r3, [r1], #1 + + subs r2, r2, #1 + moveq pc, lr + +ENTRY(__raw_readsw) + teq r2, #0 @ do we have to check for the zero len? + moveq pc, lr + tst r1, #3 + bne .Linsw_align + +.Linsw_aligned: mov ip, #0xff + orr ip, ip, ip, lsl #8 + stmfd sp!, {r4, r5, r6, lr} + + subs r2, r2, #8 + bmi .Lno_insw_8 + +.Linsw_8_lp: ldr r3, [r0] + and r3, r3, ip + ldr r4, [r0] + orr r3, r3, r4, lsl #16 + + ldr r4, [r0] + and r4, r4, ip + ldr r5, [r0] + orr r4, r4, r5, lsl #16 + + ldr r5, [r0] + and r5, r5, ip + ldr r6, [r0] + orr r5, r5, r6, lsl #16 + + ldr r6, [r0] + and r6, r6, ip + ldr lr, [r0] + orr r6, r6, lr, lsl #16 + + stmia r1!, {r3 - r6} + + subs r2, r2, #8 + bpl .Linsw_8_lp + + tst r2, #7 + ldmeqfd sp!, {r4, r5, r6, pc} + +.Lno_insw_8: tst r2, #4 + beq .Lno_insw_4 + + ldr r3, [r0] + and r3, r3, ip + ldr r4, [r0] + orr r3, r3, r4, lsl #16 + + ldr r4, [r0] + and r4, r4, ip + ldr r5, [r0] + orr r4, r4, r5, lsl #16 + + stmia r1!, {r3, r4} + +.Lno_insw_4: tst r2, #2 + beq .Lno_insw_2 + + ldr r3, [r0] + and r3, r3, ip + ldr r4, [r0] + orr r3, r3, r4, lsl #16 + + str r3, [r1], #4 + +.Lno_insw_2: tst r2, #1 + ldrne r3, [r0] + strneb r3, [r1], #1 + movne r3, r3, lsr #8 + strneb r3, [r1] + + ldmfd sp!, {r4, r5, r6, pc} + + diff --git a/trunk/arch/arm/lib/io-writesw-armv3.S b/trunk/arch/arm/lib/io-writesw-armv3.S new file mode 100644 index 000000000000..49b800419e32 --- /dev/null +++ b/trunk/arch/arm/lib/io-writesw-armv3.S @@ -0,0 +1,126 @@ +/* + * linux/arch/arm/lib/io-writesw-armv3.S + * + * Copyright (C) 1995-2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include + +.Loutsw_bad_alignment: + adr r0, .Loutsw_bad_align_msg + mov r2, lr + b panic +.Loutsw_bad_align_msg: + .asciz "outsw: bad buffer alignment (0x%p, lr=0x%08lX)\n" + .align + +.Loutsw_align: tst r1, #1 + bne .Loutsw_bad_alignment + + add r1, r1, #2 + + ldr r3, [r1, #-4] + mov r3, r3, lsr #16 + orr r3, r3, r3, lsl #16 + str r3, [r0] + subs r2, r2, #1 + moveq pc, lr + +ENTRY(__raw_writesw) + teq r2, #0 @ do we have to check for the zero len? + moveq pc, lr + tst r1, #3 + bne .Loutsw_align + + stmfd sp!, {r4, r5, r6, lr} + + subs r2, r2, #8 + bmi .Lno_outsw_8 + +.Loutsw_8_lp: ldmia r1!, {r3, r4, r5, r6} + + mov ip, r3, lsl #16 + orr ip, ip, ip, lsr #16 + str ip, [r0] + + mov ip, r3, lsr #16 + orr ip, ip, ip, lsl #16 + str ip, [r0] + + mov ip, r4, lsl #16 + orr ip, ip, ip, lsr #16 + str ip, [r0] + + mov ip, r4, lsr #16 + orr ip, ip, ip, lsl #16 + str ip, [r0] + + mov ip, r5, lsl #16 + orr ip, ip, ip, lsr #16 + str ip, [r0] + + mov ip, r5, lsr #16 + orr ip, ip, ip, lsl #16 + str ip, [r0] + + mov ip, r6, lsl #16 + orr ip, ip, ip, lsr #16 + str ip, [r0] + + mov ip, r6, lsr #16 + orr ip, ip, ip, lsl #16 + str ip, [r0] + + subs r2, r2, #8 + bpl .Loutsw_8_lp + + tst r2, #7 + ldmeqfd sp!, {r4, r5, r6, pc} + +.Lno_outsw_8: tst r2, #4 + beq .Lno_outsw_4 + + ldmia r1!, {r3, r4} + + mov ip, r3, lsl #16 + orr ip, ip, ip, lsr #16 + str ip, [r0] + + mov ip, r3, lsr #16 + orr ip, ip, ip, lsl #16 + str ip, [r0] + + mov ip, r4, lsl #16 + orr ip, ip, ip, lsr #16 + str ip, [r0] + + mov ip, r4, lsr #16 + orr ip, ip, ip, lsl #16 + str ip, [r0] + +.Lno_outsw_4: tst r2, #2 + beq .Lno_outsw_2 + + ldr r3, [r1], #4 + + mov ip, r3, lsl #16 + orr ip, ip, ip, lsr #16 + str ip, [r0] + + mov ip, r3, lsr #16 + orr ip, ip, ip, lsl #16 + str ip, [r0] + +.Lno_outsw_2: tst r2, #1 + + ldrne r3, [r1] + + movne ip, r3, lsl #16 + orrne ip, ip, ip, lsr #16 + strne ip, [r0] + + ldmfd sp!, {r4, r5, r6, pc} diff --git a/trunk/arch/arm/lib/uaccess.S b/trunk/arch/arm/lib/uaccess.S new file mode 100644 index 000000000000..5c908b1cb8ed --- /dev/null +++ b/trunk/arch/arm/lib/uaccess.S @@ -0,0 +1,564 @@ +/* + * linux/arch/arm/lib/uaccess.S + * + * Copyright (C) 1995, 1996,1997,1998 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Routines to block copy data to/from user memory + * These are highly optimised both for the 4k page size + * and for various alignments. + */ +#include +#include +#include +#include + + .text + +#define PAGE_SHIFT 12 + +/* Prototype: int __copy_to_user(void *to, const char *from, size_t n) + * Purpose : copy a block to user memory from kernel memory + * Params : to - user memory + * : from - kernel memory + * : n - number of bytes to copy + * Returns : Number of bytes NOT copied. + */ + +.Lc2u_dest_not_aligned: + rsb ip, ip, #4 + cmp ip, #2 + ldrb r3, [r1], #1 +USER( TUSER( strb) r3, [r0], #1) @ May fault + ldrgeb r3, [r1], #1 +USER( TUSER( strgeb) r3, [r0], #1) @ May fault + ldrgtb r3, [r1], #1 +USER( TUSER( strgtb) r3, [r0], #1) @ May fault + sub r2, r2, ip + b .Lc2u_dest_aligned + +ENTRY(__copy_to_user) + stmfd sp!, {r2, r4 - r7, lr} + cmp r2, #4 + blt .Lc2u_not_enough + ands ip, r0, #3 + bne .Lc2u_dest_not_aligned +.Lc2u_dest_aligned: + + ands ip, r1, #3 + bne .Lc2u_src_not_aligned +/* + * Seeing as there has to be at least 8 bytes to copy, we can + * copy one word, and force a user-mode page fault... + */ + +.Lc2u_0fupi: subs r2, r2, #4 + addmi ip, r2, #4 + bmi .Lc2u_0nowords + ldr r3, [r1], #4 +USER( TUSER( str) r3, [r0], #4) @ May fault + mov ip, r0, lsl #32 - PAGE_SHIFT @ On each page, use a ld/st??t instruction + rsb ip, ip, #0 + movs ip, ip, lsr #32 - PAGE_SHIFT + beq .Lc2u_0fupi +/* + * ip = max no. of bytes to copy before needing another "strt" insn + */ + cmp r2, ip + movlt ip, r2 + sub r2, r2, ip + subs ip, ip, #32 + blt .Lc2u_0rem8lp + +.Lc2u_0cpy8lp: ldmia r1!, {r3 - r6} + stmia r0!, {r3 - r6} @ Shouldnt fault + ldmia r1!, {r3 - r6} + subs ip, ip, #32 + stmia r0!, {r3 - r6} @ Shouldnt fault + bpl .Lc2u_0cpy8lp + +.Lc2u_0rem8lp: cmn ip, #16 + ldmgeia r1!, {r3 - r6} + stmgeia r0!, {r3 - r6} @ Shouldnt fault + tst ip, #8 + ldmneia r1!, {r3 - r4} + stmneia r0!, {r3 - r4} @ Shouldnt fault + tst ip, #4 + ldrne r3, [r1], #4 + TUSER( strne) r3, [r0], #4 @ Shouldnt fault + ands ip, ip, #3 + beq .Lc2u_0fupi +.Lc2u_0nowords: teq ip, #0 + beq .Lc2u_finished +.Lc2u_nowords: cmp ip, #2 + ldrb r3, [r1], #1 +USER( TUSER( strb) r3, [r0], #1) @ May fault + ldrgeb r3, [r1], #1 +USER( TUSER( strgeb) r3, [r0], #1) @ May fault + ldrgtb r3, [r1], #1 +USER( TUSER( strgtb) r3, [r0], #1) @ May fault + b .Lc2u_finished + +.Lc2u_not_enough: + movs ip, r2 + bne .Lc2u_nowords +.Lc2u_finished: mov r0, #0 + ldmfd sp!, {r2, r4 - r7, pc} + +.Lc2u_src_not_aligned: + bic r1, r1, #3 + ldr r7, [r1], #4 + cmp ip, #2 + bgt .Lc2u_3fupi + beq .Lc2u_2fupi +.Lc2u_1fupi: subs r2, r2, #4 + addmi ip, r2, #4 + bmi .Lc2u_1nowords + mov r3, r7, pull #8 + ldr r7, [r1], #4 + orr r3, r3, r7, push #24 +USER( TUSER( str) r3, [r0], #4) @ May fault + mov ip, r0, lsl #32 - PAGE_SHIFT + rsb ip, ip, #0 + movs ip, ip, lsr #32 - PAGE_SHIFT + beq .Lc2u_1fupi + cmp r2, ip + movlt ip, r2 + sub r2, r2, ip + subs ip, ip, #16 + blt .Lc2u_1rem8lp + +.Lc2u_1cpy8lp: mov r3, r7, pull #8 + ldmia r1!, {r4 - r7} + subs ip, ip, #16 + orr r3, r3, r4, push #24 + mov r4, r4, pull #8 + orr r4, r4, r5, push #24 + mov r5, r5, pull #8 + orr r5, r5, r6, push #24 + mov r6, r6, pull #8 + orr r6, r6, r7, push #24 + stmia r0!, {r3 - r6} @ Shouldnt fault + bpl .Lc2u_1cpy8lp + +.Lc2u_1rem8lp: tst ip, #8 + movne r3, r7, pull #8 + ldmneia r1!, {r4, r7} + orrne r3, r3, r4, push #24 + movne r4, r4, pull #8 + orrne r4, r4, r7, push #24 + stmneia r0!, {r3 - r4} @ Shouldnt fault + tst ip, #4 + movne r3, r7, pull #8 + ldrne r7, [r1], #4 + orrne r3, r3, r7, push #24 + TUSER( strne) r3, [r0], #4 @ Shouldnt fault + ands ip, ip, #3 + beq .Lc2u_1fupi +.Lc2u_1nowords: mov r3, r7, get_byte_1 + teq ip, #0 + beq .Lc2u_finished + cmp ip, #2 +USER( TUSER( strb) r3, [r0], #1) @ May fault + movge r3, r7, get_byte_2 +USER( TUSER( strgeb) r3, [r0], #1) @ May fault + movgt r3, r7, get_byte_3 +USER( TUSER( strgtb) r3, [r0], #1) @ May fault + b .Lc2u_finished + +.Lc2u_2fupi: subs r2, r2, #4 + addmi ip, r2, #4 + bmi .Lc2u_2nowords + mov r3, r7, pull #16 + ldr r7, [r1], #4 + orr r3, r3, r7, push #16 +USER( TUSER( str) r3, [r0], #4) @ May fault + mov ip, r0, lsl #32 - PAGE_SHIFT + rsb ip, ip, #0 + movs ip, ip, lsr #32 - PAGE_SHIFT + beq .Lc2u_2fupi + cmp r2, ip + movlt ip, r2 + sub r2, r2, ip + subs ip, ip, #16 + blt .Lc2u_2rem8lp + +.Lc2u_2cpy8lp: mov r3, r7, pull #16 + ldmia r1!, {r4 - r7} + subs ip, ip, #16 + orr r3, r3, r4, push #16 + mov r4, r4, pull #16 + orr r4, r4, r5, push #16 + mov r5, r5, pull #16 + orr r5, r5, r6, push #16 + mov r6, r6, pull #16 + orr r6, r6, r7, push #16 + stmia r0!, {r3 - r6} @ Shouldnt fault + bpl .Lc2u_2cpy8lp + +.Lc2u_2rem8lp: tst ip, #8 + movne r3, r7, pull #16 + ldmneia r1!, {r4, r7} + orrne r3, r3, r4, push #16 + movne r4, r4, pull #16 + orrne r4, r4, r7, push #16 + stmneia r0!, {r3 - r4} @ Shouldnt fault + tst ip, #4 + movne r3, r7, pull #16 + ldrne r7, [r1], #4 + orrne r3, r3, r7, push #16 + TUSER( strne) r3, [r0], #4 @ Shouldnt fault + ands ip, ip, #3 + beq .Lc2u_2fupi +.Lc2u_2nowords: mov r3, r7, get_byte_2 + teq ip, #0 + beq .Lc2u_finished + cmp ip, #2 +USER( TUSER( strb) r3, [r0], #1) @ May fault + movge r3, r7, get_byte_3 +USER( TUSER( strgeb) r3, [r0], #1) @ May fault + ldrgtb r3, [r1], #0 +USER( TUSER( strgtb) r3, [r0], #1) @ May fault + b .Lc2u_finished + +.Lc2u_3fupi: subs r2, r2, #4 + addmi ip, r2, #4 + bmi .Lc2u_3nowords + mov r3, r7, pull #24 + ldr r7, [r1], #4 + orr r3, r3, r7, push #8 +USER( TUSER( str) r3, [r0], #4) @ May fault + mov ip, r0, lsl #32 - PAGE_SHIFT + rsb ip, ip, #0 + movs ip, ip, lsr #32 - PAGE_SHIFT + beq .Lc2u_3fupi + cmp r2, ip + movlt ip, r2 + sub r2, r2, ip + subs ip, ip, #16 + blt .Lc2u_3rem8lp + +.Lc2u_3cpy8lp: mov r3, r7, pull #24 + ldmia r1!, {r4 - r7} + subs ip, ip, #16 + orr r3, r3, r4, push #8 + mov r4, r4, pull #24 + orr r4, r4, r5, push #8 + mov r5, r5, pull #24 + orr r5, r5, r6, push #8 + mov r6, r6, pull #24 + orr r6, r6, r7, push #8 + stmia r0!, {r3 - r6} @ Shouldnt fault + bpl .Lc2u_3cpy8lp + +.Lc2u_3rem8lp: tst ip, #8 + movne r3, r7, pull #24 + ldmneia r1!, {r4, r7} + orrne r3, r3, r4, push #8 + movne r4, r4, pull #24 + orrne r4, r4, r7, push #8 + stmneia r0!, {r3 - r4} @ Shouldnt fault + tst ip, #4 + movne r3, r7, pull #24 + ldrne r7, [r1], #4 + orrne r3, r3, r7, push #8 + TUSER( strne) r3, [r0], #4 @ Shouldnt fault + ands ip, ip, #3 + beq .Lc2u_3fupi +.Lc2u_3nowords: mov r3, r7, get_byte_3 + teq ip, #0 + beq .Lc2u_finished + cmp ip, #2 +USER( TUSER( strb) r3, [r0], #1) @ May fault + ldrgeb r3, [r1], #1 +USER( TUSER( strgeb) r3, [r0], #1) @ May fault + ldrgtb r3, [r1], #0 +USER( TUSER( strgtb) r3, [r0], #1) @ May fault + b .Lc2u_finished +ENDPROC(__copy_to_user) + + .pushsection .fixup,"ax" + .align 0 +9001: ldmfd sp!, {r0, r4 - r7, pc} + .popsection + +/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n); + * Purpose : copy a block from user memory to kernel memory + * Params : to - kernel memory + * : from - user memory + * : n - number of bytes to copy + * Returns : Number of bytes NOT copied. + */ +.Lcfu_dest_not_aligned: + rsb ip, ip, #4 + cmp ip, #2 +USER( TUSER( ldrb) r3, [r1], #1) @ May fault + strb r3, [r0], #1 +USER( TUSER( ldrgeb) r3, [r1], #1) @ May fault + strgeb r3, [r0], #1 +USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault + strgtb r3, [r0], #1 + sub r2, r2, ip + b .Lcfu_dest_aligned + +ENTRY(__copy_from_user) + stmfd sp!, {r0, r2, r4 - r7, lr} + cmp r2, #4 + blt .Lcfu_not_enough + ands ip, r0, #3 + bne .Lcfu_dest_not_aligned +.Lcfu_dest_aligned: + ands ip, r1, #3 + bne .Lcfu_src_not_aligned + +/* + * Seeing as there has to be at least 8 bytes to copy, we can + * copy one word, and force a user-mode page fault... + */ + +.Lcfu_0fupi: subs r2, r2, #4 + addmi ip, r2, #4 + bmi .Lcfu_0nowords +USER( TUSER( ldr) r3, [r1], #4) + str r3, [r0], #4 + mov ip, r1, lsl #32 - PAGE_SHIFT @ On each page, use a ld/st??t instruction + rsb ip, ip, #0 + movs ip, ip, lsr #32 - PAGE_SHIFT + beq .Lcfu_0fupi +/* + * ip = max no. of bytes to copy before needing another "strt" insn + */ + cmp r2, ip + movlt ip, r2 + sub r2, r2, ip + subs ip, ip, #32 + blt .Lcfu_0rem8lp + +.Lcfu_0cpy8lp: ldmia r1!, {r3 - r6} @ Shouldnt fault + stmia r0!, {r3 - r6} + ldmia r1!, {r3 - r6} @ Shouldnt fault + subs ip, ip, #32 + stmia r0!, {r3 - r6} + bpl .Lcfu_0cpy8lp + +.Lcfu_0rem8lp: cmn ip, #16 + ldmgeia r1!, {r3 - r6} @ Shouldnt fault + stmgeia r0!, {r3 - r6} + tst ip, #8 + ldmneia r1!, {r3 - r4} @ Shouldnt fault + stmneia r0!, {r3 - r4} + tst ip, #4 + TUSER( ldrne) r3, [r1], #4 @ Shouldnt fault + strne r3, [r0], #4 + ands ip, ip, #3 + beq .Lcfu_0fupi +.Lcfu_0nowords: teq ip, #0 + beq .Lcfu_finished +.Lcfu_nowords: cmp ip, #2 +USER( TUSER( ldrb) r3, [r1], #1) @ May fault + strb r3, [r0], #1 +USER( TUSER( ldrgeb) r3, [r1], #1) @ May fault + strgeb r3, [r0], #1 +USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault + strgtb r3, [r0], #1 + b .Lcfu_finished + +.Lcfu_not_enough: + movs ip, r2 + bne .Lcfu_nowords +.Lcfu_finished: mov r0, #0 + add sp, sp, #8 + ldmfd sp!, {r4 - r7, pc} + +.Lcfu_src_not_aligned: + bic r1, r1, #3 +USER( TUSER( ldr) r7, [r1], #4) @ May fault + cmp ip, #2 + bgt .Lcfu_3fupi + beq .Lcfu_2fupi +.Lcfu_1fupi: subs r2, r2, #4 + addmi ip, r2, #4 + bmi .Lcfu_1nowords + mov r3, r7, pull #8 +USER( TUSER( ldr) r7, [r1], #4) @ May fault + orr r3, r3, r7, push #24 + str r3, [r0], #4 + mov ip, r1, lsl #32 - PAGE_SHIFT + rsb ip, ip, #0 + movs ip, ip, lsr #32 - PAGE_SHIFT + beq .Lcfu_1fupi + cmp r2, ip + movlt ip, r2 + sub r2, r2, ip + subs ip, ip, #16 + blt .Lcfu_1rem8lp + +.Lcfu_1cpy8lp: mov r3, r7, pull #8 + ldmia r1!, {r4 - r7} @ Shouldnt fault + subs ip, ip, #16 + orr r3, r3, r4, push #24 + mov r4, r4, pull #8 + orr r4, r4, r5, push #24 + mov r5, r5, pull #8 + orr r5, r5, r6, push #24 + mov r6, r6, pull #8 + orr r6, r6, r7, push #24 + stmia r0!, {r3 - r6} + bpl .Lcfu_1cpy8lp + +.Lcfu_1rem8lp: tst ip, #8 + movne r3, r7, pull #8 + ldmneia r1!, {r4, r7} @ Shouldnt fault + orrne r3, r3, r4, push #24 + movne r4, r4, pull #8 + orrne r4, r4, r7, push #24 + stmneia r0!, {r3 - r4} + tst ip, #4 + movne r3, r7, pull #8 +USER( TUSER( ldrne) r7, [r1], #4) @ May fault + orrne r3, r3, r7, push #24 + strne r3, [r0], #4 + ands ip, ip, #3 + beq .Lcfu_1fupi +.Lcfu_1nowords: mov r3, r7, get_byte_1 + teq ip, #0 + beq .Lcfu_finished + cmp ip, #2 + strb r3, [r0], #1 + movge r3, r7, get_byte_2 + strgeb r3, [r0], #1 + movgt r3, r7, get_byte_3 + strgtb r3, [r0], #1 + b .Lcfu_finished + +.Lcfu_2fupi: subs r2, r2, #4 + addmi ip, r2, #4 + bmi .Lcfu_2nowords + mov r3, r7, pull #16 +USER( TUSER( ldr) r7, [r1], #4) @ May fault + orr r3, r3, r7, push #16 + str r3, [r0], #4 + mov ip, r1, lsl #32 - PAGE_SHIFT + rsb ip, ip, #0 + movs ip, ip, lsr #32 - PAGE_SHIFT + beq .Lcfu_2fupi + cmp r2, ip + movlt ip, r2 + sub r2, r2, ip + subs ip, ip, #16 + blt .Lcfu_2rem8lp + + +.Lcfu_2cpy8lp: mov r3, r7, pull #16 + ldmia r1!, {r4 - r7} @ Shouldnt fault + subs ip, ip, #16 + orr r3, r3, r4, push #16 + mov r4, r4, pull #16 + orr r4, r4, r5, push #16 + mov r5, r5, pull #16 + orr r5, r5, r6, push #16 + mov r6, r6, pull #16 + orr r6, r6, r7, push #16 + stmia r0!, {r3 - r6} + bpl .Lcfu_2cpy8lp + +.Lcfu_2rem8lp: tst ip, #8 + movne r3, r7, pull #16 + ldmneia r1!, {r4, r7} @ Shouldnt fault + orrne r3, r3, r4, push #16 + movne r4, r4, pull #16 + orrne r4, r4, r7, push #16 + stmneia r0!, {r3 - r4} + tst ip, #4 + movne r3, r7, pull #16 +USER( TUSER( ldrne) r7, [r1], #4) @ May fault + orrne r3, r3, r7, push #16 + strne r3, [r0], #4 + ands ip, ip, #3 + beq .Lcfu_2fupi +.Lcfu_2nowords: mov r3, r7, get_byte_2 + teq ip, #0 + beq .Lcfu_finished + cmp ip, #2 + strb r3, [r0], #1 + movge r3, r7, get_byte_3 + strgeb r3, [r0], #1 +USER( TUSER( ldrgtb) r3, [r1], #0) @ May fault + strgtb r3, [r0], #1 + b .Lcfu_finished + +.Lcfu_3fupi: subs r2, r2, #4 + addmi ip, r2, #4 + bmi .Lcfu_3nowords + mov r3, r7, pull #24 +USER( TUSER( ldr) r7, [r1], #4) @ May fault + orr r3, r3, r7, push #8 + str r3, [r0], #4 + mov ip, r1, lsl #32 - PAGE_SHIFT + rsb ip, ip, #0 + movs ip, ip, lsr #32 - PAGE_SHIFT + beq .Lcfu_3fupi + cmp r2, ip + movlt ip, r2 + sub r2, r2, ip + subs ip, ip, #16 + blt .Lcfu_3rem8lp + +.Lcfu_3cpy8lp: mov r3, r7, pull #24 + ldmia r1!, {r4 - r7} @ Shouldnt fault + orr r3, r3, r4, push #8 + mov r4, r4, pull #24 + orr r4, r4, r5, push #8 + mov r5, r5, pull #24 + orr r5, r5, r6, push #8 + mov r6, r6, pull #24 + orr r6, r6, r7, push #8 + stmia r0!, {r3 - r6} + subs ip, ip, #16 + bpl .Lcfu_3cpy8lp + +.Lcfu_3rem8lp: tst ip, #8 + movne r3, r7, pull #24 + ldmneia r1!, {r4, r7} @ Shouldnt fault + orrne r3, r3, r4, push #8 + movne r4, r4, pull #24 + orrne r4, r4, r7, push #8 + stmneia r0!, {r3 - r4} + tst ip, #4 + movne r3, r7, pull #24 +USER( TUSER( ldrne) r7, [r1], #4) @ May fault + orrne r3, r3, r7, push #8 + strne r3, [r0], #4 + ands ip, ip, #3 + beq .Lcfu_3fupi +.Lcfu_3nowords: mov r3, r7, get_byte_3 + teq ip, #0 + beq .Lcfu_finished + cmp ip, #2 + strb r3, [r0], #1 +USER( TUSER( ldrgeb) r3, [r1], #1) @ May fault + strgeb r3, [r0], #1 +USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault + strgtb r3, [r0], #1 + b .Lcfu_finished +ENDPROC(__copy_from_user) + + .pushsection .fixup,"ax" + .align 0 + /* + * We took an exception. r0 contains a pointer to + * the byte not copied. + */ +9001: ldr r2, [sp], #4 @ void *to + sub r2, r0, r2 @ bytes copied + ldr r1, [sp], #4 @ unsigned long count + subs r4, r1, r2 @ bytes left to copy + movne r1, r4 + blne __memzero + mov r0, r4 + ldmfd sp!, {r4 - r7, pc} + .popsection + diff --git a/trunk/arch/arm/mach-at91/at91rm9200_time.c b/trunk/arch/arm/mach-at91/at91rm9200_time.c index 104ca40d8d18..aaa443b48c91 100644 --- a/trunk/arch/arm/mach-at91/at91rm9200_time.c +++ b/trunk/arch/arm/mach-at91/at91rm9200_time.c @@ -197,7 +197,7 @@ void __init at91rm9200_timer_init(void) at91_st_read(AT91_ST_SR); /* Make IRQs happen for the system timer */ - setup_irq(AT91_ID_SYS, &at91rm9200_timer_irq); + setup_irq(NR_IRQS_LEGACY + AT91_ID_SYS, &at91rm9200_timer_irq); /* The 32KiHz "Slow Clock" (tick every 30517.58 nanoseconds) is used * directly for the clocksource and all clockevents, after adjusting diff --git a/trunk/arch/arm/mach-at91/at91sam9260_devices.c b/trunk/arch/arm/mach-at91/at91sam9260_devices.c index 7b9c2ba396ed..bce572a530ef 100644 --- a/trunk/arch/arm/mach-at91/at91sam9260_devices.c +++ b/trunk/arch/arm/mach-at91/at91sam9260_devices.c @@ -726,6 +726,8 @@ static struct resource rtt_resources[] = { .flags = IORESOURCE_MEM, }, { .flags = IORESOURCE_MEM, + }, { + .flags = IORESOURCE_IRQ, }, }; @@ -744,10 +746,12 @@ static void __init at91_add_device_rtt_rtc(void) * The second resource is needed: * GPBR will serve as the storage for RTC time offset */ - at91sam9260_rtt_device.num_resources = 2; + at91sam9260_rtt_device.num_resources = 3; rtt_resources[1].start = AT91SAM9260_BASE_GPBR + 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR; rtt_resources[1].end = rtt_resources[1].start + 3; + rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS; + rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS; } #else static void __init at91_add_device_rtt_rtc(void) diff --git a/trunk/arch/arm/mach-at91/at91sam9261_devices.c b/trunk/arch/arm/mach-at91/at91sam9261_devices.c index 8df5c1bdff92..bc2590d712d0 100644 --- a/trunk/arch/arm/mach-at91/at91sam9261_devices.c +++ b/trunk/arch/arm/mach-at91/at91sam9261_devices.c @@ -609,6 +609,8 @@ static struct resource rtt_resources[] = { .flags = IORESOURCE_MEM, }, { .flags = IORESOURCE_MEM, + }, { + .flags = IORESOURCE_IRQ, } }; @@ -626,10 +628,12 @@ static void __init at91_add_device_rtt_rtc(void) * The second resource is needed: * GPBR will serve as the storage for RTC time offset */ - at91sam9261_rtt_device.num_resources = 2; + at91sam9261_rtt_device.num_resources = 3; rtt_resources[1].start = AT91SAM9261_BASE_GPBR + 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR; rtt_resources[1].end = rtt_resources[1].start + 3; + rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS; + rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS; } #else static void __init at91_add_device_rtt_rtc(void) diff --git a/trunk/arch/arm/mach-at91/at91sam9263_devices.c b/trunk/arch/arm/mach-at91/at91sam9263_devices.c index eb6bbf86fb9f..9b6ca734f1a9 100644 --- a/trunk/arch/arm/mach-at91/at91sam9263_devices.c +++ b/trunk/arch/arm/mach-at91/at91sam9263_devices.c @@ -990,6 +990,8 @@ static struct resource rtt0_resources[] = { .flags = IORESOURCE_MEM, }, { .flags = IORESOURCE_MEM, + }, { + .flags = IORESOURCE_IRQ, } }; @@ -1006,6 +1008,8 @@ static struct resource rtt1_resources[] = { .flags = IORESOURCE_MEM, }, { .flags = IORESOURCE_MEM, + }, { + .flags = IORESOURCE_IRQ, } }; @@ -1027,14 +1031,14 @@ static void __init at91_add_device_rtt_rtc(void) * The second resource is needed only for the chosen RTT: * GPBR will serve as the storage for RTC time offset */ - at91sam9263_rtt0_device.num_resources = 2; + at91sam9263_rtt0_device.num_resources = 3; at91sam9263_rtt1_device.num_resources = 1; pdev = &at91sam9263_rtt0_device; r = rtt0_resources; break; case 1: at91sam9263_rtt0_device.num_resources = 1; - at91sam9263_rtt1_device.num_resources = 2; + at91sam9263_rtt1_device.num_resources = 3; pdev = &at91sam9263_rtt1_device; r = rtt1_resources; break; @@ -1047,6 +1051,8 @@ static void __init at91_add_device_rtt_rtc(void) pdev->name = "rtc-at91sam9"; r[1].start = AT91SAM9263_BASE_GPBR + 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR; r[1].end = r[1].start + 3; + r[2].start = NR_IRQS_LEGACY + AT91_ID_SYS; + r[2].end = NR_IRQS_LEGACY + AT91_ID_SYS; } #else static void __init at91_add_device_rtt_rtc(void) diff --git a/trunk/arch/arm/mach-at91/at91sam9g45_devices.c b/trunk/arch/arm/mach-at91/at91sam9g45_devices.c index 06073996a382..1b47319ca00b 100644 --- a/trunk/arch/arm/mach-at91/at91sam9g45_devices.c +++ b/trunk/arch/arm/mach-at91/at91sam9g45_devices.c @@ -1293,6 +1293,8 @@ static struct resource rtt_resources[] = { .flags = IORESOURCE_MEM, }, { .flags = IORESOURCE_MEM, + }, { + .flags = IORESOURCE_IRQ, } }; @@ -1310,10 +1312,12 @@ static void __init at91_add_device_rtt_rtc(void) * The second resource is needed: * GPBR will serve as the storage for RTC time offset */ - at91sam9g45_rtt_device.num_resources = 2; + at91sam9g45_rtt_device.num_resources = 3; rtt_resources[1].start = AT91SAM9G45_BASE_GPBR + 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR; rtt_resources[1].end = rtt_resources[1].start + 3; + rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS; + rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS; } #else static void __init at91_add_device_rtt_rtc(void) diff --git a/trunk/arch/arm/mach-at91/at91sam9rl_devices.c b/trunk/arch/arm/mach-at91/at91sam9rl_devices.c index f09fff932172..b3d365dadef5 100644 --- a/trunk/arch/arm/mach-at91/at91sam9rl_devices.c +++ b/trunk/arch/arm/mach-at91/at91sam9rl_devices.c @@ -688,6 +688,8 @@ static struct resource rtt_resources[] = { .flags = IORESOURCE_MEM, }, { .flags = IORESOURCE_MEM, + }, { + .flags = IORESOURCE_IRQ, } }; @@ -705,10 +707,12 @@ static void __init at91_add_device_rtt_rtc(void) * The second resource is needed: * GPBR will serve as the storage for RTC time offset */ - at91sam9rl_rtt_device.num_resources = 2; + at91sam9rl_rtt_device.num_resources = 3; rtt_resources[1].start = AT91SAM9RL_BASE_GPBR + 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR; rtt_resources[1].end = rtt_resources[1].start + 3; + rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS; + rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS; } #else static void __init at91_add_device_rtt_rtc(void) diff --git a/trunk/arch/arm/mach-at91/clock.c b/trunk/arch/arm/mach-at91/clock.c index de2ec6b8fea7..188c82971ebd 100644 --- a/trunk/arch/arm/mach-at91/clock.c +++ b/trunk/arch/arm/mach-at91/clock.c @@ -63,6 +63,12 @@ EXPORT_SYMBOL_GPL(at91_pmc_base); #define cpu_has_300M_plla() (cpu_is_at91sam9g10()) +#define cpu_has_240M_plla() (cpu_is_at91sam9261() \ + || cpu_is_at91sam9263() \ + || cpu_is_at91sam9rl()) + +#define cpu_has_210M_plla() (cpu_is_at91sam9260()) + #define cpu_has_pllb() (!(cpu_is_at91sam9rl() \ || cpu_is_at91sam9g45() \ || cpu_is_at91sam9x5() \ @@ -706,6 +712,12 @@ static int __init at91_pmc_init(unsigned long main_clock) } else if (cpu_has_800M_plla()) { if (plla.rate_hz > 800000000) pll_overclock = true; + } else if (cpu_has_240M_plla()) { + if (plla.rate_hz > 240000000) + pll_overclock = true; + } else if (cpu_has_210M_plla()) { + if (plla.rate_hz > 210000000) + pll_overclock = true; } else { if (plla.rate_hz > 209000000) pll_overclock = true; diff --git a/trunk/arch/arm/mach-davinci/board-neuros-osd2.c b/trunk/arch/arm/mach-davinci/board-neuros-osd2.c index 5de69f2fcca9..f6b9fc70161b 100644 --- a/trunk/arch/arm/mach-davinci/board-neuros-osd2.c +++ b/trunk/arch/arm/mach-davinci/board-neuros-osd2.c @@ -162,38 +162,6 @@ static void __init davinci_ntosd2_map_io(void) dm644x_init(); } -/* - I2C initialization -*/ -static struct davinci_i2c_platform_data ntosd2_i2c_pdata = { - .bus_freq = 20 /* kHz */, - .bus_delay = 100 /* usec */, -}; - -static struct i2c_board_info __initdata ntosd2_i2c_info[] = { -}; - -static int ntosd2_init_i2c(void) -{ - int status; - - davinci_init_i2c(&ntosd2_i2c_pdata); - status = gpio_request(NTOSD2_MSP430_IRQ, ntosd2_i2c_info[0].type); - if (status == 0) { - status = gpio_direction_input(NTOSD2_MSP430_IRQ); - if (status == 0) { - status = gpio_to_irq(NTOSD2_MSP430_IRQ); - if (status > 0) { - ntosd2_i2c_info[0].irq = status; - i2c_register_board_info(1, - ntosd2_i2c_info, - ARRAY_SIZE(ntosd2_i2c_info)); - } - } - } - return status; -} - static struct davinci_mmc_config davinci_ntosd2_mmc_config = { .wires = 4, .version = MMC_CTLR_VERSION_1 @@ -218,7 +186,6 @@ static __init void davinci_ntosd2_init(void) { struct clk *aemif_clk; struct davinci_soc_info *soc_info = &davinci_soc_info; - int status; aemif_clk = clk_get(NULL, "aemif"); clk_enable(aemif_clk); @@ -242,12 +209,6 @@ static __init void davinci_ntosd2_init(void) platform_add_devices(davinci_ntosd2_devices, ARRAY_SIZE(davinci_ntosd2_devices)); - /* Initialize I2C interface specific for this board */ - status = ntosd2_init_i2c(); - if (status < 0) - pr_warning("davinci_ntosd2_init: msp430 irq setup failed:" - " %d\n", status); - davinci_serial_init(&uart_config); dm644x_init_asp(&dm644x_ntosd2_snd_data); diff --git a/trunk/arch/arm/mach-davinci/devices-da8xx.c b/trunk/arch/arm/mach-davinci/devices-da8xx.c index d1624a315c9a..783eab6845c4 100644 --- a/trunk/arch/arm/mach-davinci/devices-da8xx.c +++ b/trunk/arch/arm/mach-davinci/devices-da8xx.c @@ -546,6 +546,7 @@ static struct lcd_ctrl_config lcd_cfg = { .sync_edge = 0, .sync_ctrl = 1, .raster_order = 0, + .fifo_th = 6, }; struct da8xx_lcdc_platform_data sharp_lcd035q3dg01_pdata = { diff --git a/trunk/arch/arm/mach-dove/common.c b/trunk/arch/arm/mach-dove/common.c index 4db5de54b6a7..6321567d8eaa 100644 --- a/trunk/arch/arm/mach-dove/common.c +++ b/trunk/arch/arm/mach-dove/common.c @@ -102,7 +102,8 @@ void __init dove_ehci1_init(void) void __init dove_ge00_init(struct mv643xx_eth_platform_data *eth_data) { orion_ge00_init(eth_data, DOVE_GE00_PHYS_BASE, - IRQ_DOVE_GE00_SUM, IRQ_DOVE_GE00_ERR); + IRQ_DOVE_GE00_SUM, IRQ_DOVE_GE00_ERR, + 1600); } /***************************************************************************** diff --git a/trunk/arch/arm/mach-dove/irq.c b/trunk/arch/arm/mach-dove/irq.c index f07fd16e0c9b..9bc97a5baaa8 100644 --- a/trunk/arch/arm/mach-dove/irq.c +++ b/trunk/arch/arm/mach-dove/irq.c @@ -20,22 +20,6 @@ #include #include "common.h" -static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc) -{ - int irqoff; - BUG_ON(irq < IRQ_DOVE_GPIO_0_7 || irq > IRQ_DOVE_HIGH_GPIO); - - irqoff = irq <= IRQ_DOVE_GPIO_16_23 ? irq - IRQ_DOVE_GPIO_0_7 : - 3 + irq - IRQ_DOVE_GPIO_24_31; - - orion_gpio_irq_handler(irqoff << 3); - if (irq == IRQ_DOVE_HIGH_GPIO) { - orion_gpio_irq_handler(40); - orion_gpio_irq_handler(48); - orion_gpio_irq_handler(56); - } -} - static void pmu_irq_mask(struct irq_data *d) { int pin = irq_to_pmu(d->irq); @@ -90,6 +74,27 @@ static void pmu_irq_handler(unsigned int irq, struct irq_desc *desc) } } +static int __initdata gpio0_irqs[4] = { + IRQ_DOVE_GPIO_0_7, + IRQ_DOVE_GPIO_8_15, + IRQ_DOVE_GPIO_16_23, + IRQ_DOVE_GPIO_24_31, +}; + +static int __initdata gpio1_irqs[4] = { + IRQ_DOVE_HIGH_GPIO, + 0, + 0, + 0, +}; + +static int __initdata gpio2_irqs[4] = { + 0, + 0, + 0, + 0, +}; + void __init dove_init_irq(void) { int i; @@ -100,19 +105,14 @@ void __init dove_init_irq(void) /* * Initialize gpiolib for GPIOs 0-71. */ - orion_gpio_init(0, 32, DOVE_GPIO_LO_VIRT_BASE, 0, - IRQ_DOVE_GPIO_START); - irq_set_chained_handler(IRQ_DOVE_GPIO_0_7, gpio_irq_handler); - irq_set_chained_handler(IRQ_DOVE_GPIO_8_15, gpio_irq_handler); - irq_set_chained_handler(IRQ_DOVE_GPIO_16_23, gpio_irq_handler); - irq_set_chained_handler(IRQ_DOVE_GPIO_24_31, gpio_irq_handler); - - orion_gpio_init(32, 32, DOVE_GPIO_HI_VIRT_BASE, 0, - IRQ_DOVE_GPIO_START + 32); - irq_set_chained_handler(IRQ_DOVE_HIGH_GPIO, gpio_irq_handler); - - orion_gpio_init(64, 8, DOVE_GPIO2_VIRT_BASE, 0, - IRQ_DOVE_GPIO_START + 64); + orion_gpio_init(NULL, 0, 32, (void __iomem *)DOVE_GPIO_LO_VIRT_BASE, 0, + IRQ_DOVE_GPIO_START, gpio0_irqs); + + orion_gpio_init(NULL, 32, 32, (void __iomem *)DOVE_GPIO_HI_VIRT_BASE, 0, + IRQ_DOVE_GPIO_START + 32, gpio1_irqs); + + orion_gpio_init(NULL, 64, 8, (void __iomem *)DOVE_GPIO2_VIRT_BASE, 0, + IRQ_DOVE_GPIO_START + 64, gpio2_irqs); /* * Mask and clear PMU interrupts diff --git a/trunk/arch/arm/mach-exynos/mach-origen.c b/trunk/arch/arm/mach-exynos/mach-origen.c index 5ca80307d6d7..4e574c24581c 100644 --- a/trunk/arch/arm/mach-exynos/mach-origen.c +++ b/trunk/arch/arm/mach-exynos/mach-origen.c @@ -42,6 +42,7 @@ #include #include #include +#include #include #include @@ -734,6 +735,11 @@ static void __init origen_bt_setup(void) s3c_gpio_setpull(EXYNOS4_GPX2(2), S3C_GPIO_PULL_NONE); } +/* I2C module and id for HDMIPHY */ +static struct i2c_board_info hdmiphy_info = { + I2C_BOARD_INFO("hdmiphy-exynos4210", 0x38), +}; + static void s5p_tv_setup(void) { /* Direct HPD to HDMI chip */ @@ -781,6 +787,7 @@ static void __init origen_machine_init(void) s5p_tv_setup(); s5p_i2c_hdmiphy_set_platdata(NULL); + s5p_hdmi_set_platdata(&hdmiphy_info, NULL, 0); #ifdef CONFIG_DRM_EXYNOS s5p_device_fimd0.dev.platform_data = &drm_fimd_pdata; diff --git a/trunk/arch/arm/mach-exynos/mach-smdkv310.c b/trunk/arch/arm/mach-exynos/mach-smdkv310.c index 3cfa688d274a..73f2bce097e1 100644 --- a/trunk/arch/arm/mach-exynos/mach-smdkv310.c +++ b/trunk/arch/arm/mach-exynos/mach-smdkv310.c @@ -40,6 +40,7 @@ #include #include #include +#include #include #include @@ -354,6 +355,11 @@ static struct platform_pwm_backlight_data smdkv310_bl_data = { .pwm_period_ns = 1000, }; +/* I2C module and id for HDMIPHY */ +static struct i2c_board_info hdmiphy_info = { + I2C_BOARD_INFO("hdmiphy-exynos4210", 0x38), +}; + static void s5p_tv_setup(void) { /* direct HPD to HDMI chip */ @@ -388,6 +394,7 @@ static void __init smdkv310_machine_init(void) s5p_tv_setup(); s5p_i2c_hdmiphy_set_platdata(NULL); + s5p_hdmi_set_platdata(&hdmiphy_info, NULL, 0); samsung_keypad_set_platdata(&smdkv310_keypad_data); diff --git a/trunk/arch/arm/mach-exynos/pm_domains.c b/trunk/arch/arm/mach-exynos/pm_domains.c index 373c3c00d24c..c0bc83a7663e 100644 --- a/trunk/arch/arm/mach-exynos/pm_domains.c +++ b/trunk/arch/arm/mach-exynos/pm_domains.c @@ -115,7 +115,7 @@ static __init int exynos_pm_dt_parse_domains(void) } #endif /* CONFIG_OF */ -static __init void exynos_pm_add_dev_to_genpd(struct platform_device *pdev, +static __init __maybe_unused void exynos_pm_add_dev_to_genpd(struct platform_device *pdev, struct exynos_pm_domain *pd) { if (pdev->dev.bus) { diff --git a/trunk/arch/arm/mach-gemini/irq.c b/trunk/arch/arm/mach-gemini/irq.c index ca70e5fcc7ac..020852d3bdd8 100644 --- a/trunk/arch/arm/mach-gemini/irq.c +++ b/trunk/arch/arm/mach-gemini/irq.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #define IRQ_SOURCE(base_addr) (base_addr + 0x00) diff --git a/trunk/arch/arm/mach-imx/Makefile b/trunk/arch/arm/mach-imx/Makefile index 07f7c226e4cf..d004d37ad9d8 100644 --- a/trunk/arch/arm/mach-imx/Makefile +++ b/trunk/arch/arm/mach-imx/Makefile @@ -9,7 +9,8 @@ obj-$(CONFIG_SOC_IMX27) += clk-imx27.o mm-imx27.o ehci-imx27.o obj-$(CONFIG_SOC_IMX31) += mm-imx3.o cpu-imx31.o clk-imx31.o iomux-imx31.o ehci-imx31.o pm-imx3.o obj-$(CONFIG_SOC_IMX35) += mm-imx3.o cpu-imx35.o clk-imx35.o ehci-imx35.o pm-imx3.o -obj-$(CONFIG_SOC_IMX5) += cpu-imx5.o mm-imx5.o clk-imx51-imx53.o ehci-imx5.o pm-imx5.o cpu_op-mx51.o +imx5-pm-$(CONFIG_PM) += pm-imx5.o +obj-$(CONFIG_SOC_IMX5) += cpu-imx5.o mm-imx5.o clk-imx51-imx53.o ehci-imx5.o $(imx5-pm-y) cpu_op-mx51.o obj-$(CONFIG_COMMON_CLK) += clk-pllv1.o clk-pllv2.o clk-pllv3.o clk-gate2.o \ clk-pfd.o clk-busy.o @@ -70,14 +71,13 @@ obj-$(CONFIG_DEBUG_LL) += lluart.o obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o obj-$(CONFIG_HAVE_IMX_MMDC) += mmdc.o obj-$(CONFIG_HAVE_IMX_SRC) += src.o -obj-$(CONFIG_CPU_V7) += head-v7.o -AFLAGS_head-v7.o :=-Wa,-march=armv7-a -obj-$(CONFIG_SMP) += platsmp.o +AFLAGS_headsmp.o :=-Wa,-march=armv7-a +obj-$(CONFIG_SMP) += headsmp.o platsmp.o obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o obj-$(CONFIG_SOC_IMX6Q) += clk-imx6q.o mach-imx6q.o ifeq ($(CONFIG_PM),y) -obj-$(CONFIG_SOC_IMX6Q) += pm-imx6q.o +obj-$(CONFIG_SOC_IMX6Q) += pm-imx6q.o headsmp.o endif # i.MX5 based machines diff --git a/trunk/arch/arm/mach-imx/clk-imx27.c b/trunk/arch/arm/mach-imx/clk-imx27.c index 7aa6313fb167..f69ca4680049 100644 --- a/trunk/arch/arm/mach-imx/clk-imx27.c +++ b/trunk/arch/arm/mach-imx/clk-imx27.c @@ -223,7 +223,7 @@ int __init mx27_clocks_init(unsigned long fref) clk_register_clkdev(clk[per3_gate], "per", "imx-fb.0"); clk_register_clkdev(clk[lcdc_ipg_gate], "ipg", "imx-fb.0"); clk_register_clkdev(clk[lcdc_ahb_gate], "ahb", "imx-fb.0"); - clk_register_clkdev(clk[csi_ahb_gate], NULL, "mx2-camera.0"); + clk_register_clkdev(clk[csi_ahb_gate], "ahb", "mx2-camera.0"); clk_register_clkdev(clk[usb_div], "per", "fsl-usb2-udc"); clk_register_clkdev(clk[usb_ipg_gate], "ipg", "fsl-usb2-udc"); clk_register_clkdev(clk[usb_ahb_gate], "ahb", "fsl-usb2-udc"); @@ -250,8 +250,10 @@ int __init mx27_clocks_init(unsigned long fref) clk_register_clkdev(clk[i2c2_ipg_gate], NULL, "imx-i2c.1"); clk_register_clkdev(clk[owire_ipg_gate], NULL, "mxc_w1.0"); clk_register_clkdev(clk[kpp_ipg_gate], NULL, "imx-keypad"); - clk_register_clkdev(clk[emma_ahb_gate], "ahb", "imx-emma"); - clk_register_clkdev(clk[emma_ipg_gate], "ipg", "imx-emma"); + clk_register_clkdev(clk[emma_ahb_gate], "emma-ahb", "mx2-camera.0"); + clk_register_clkdev(clk[emma_ipg_gate], "emma-ipg", "mx2-camera.0"); + clk_register_clkdev(clk[emma_ahb_gate], "ahb", "m2m-emmaprp.0"); + clk_register_clkdev(clk[emma_ipg_gate], "ipg", "m2m-emmaprp.0"); clk_register_clkdev(clk[iim_ipg_gate], "iim", NULL); clk_register_clkdev(clk[gpio_ipg_gate], "gpio", NULL); clk_register_clkdev(clk[brom_ahb_gate], "brom", NULL); diff --git a/trunk/arch/arm/mach-imx/clk-imx31.c b/trunk/arch/arm/mach-imx/clk-imx31.c index 8e19e70f90f9..1253af2d9971 100644 --- a/trunk/arch/arm/mach-imx/clk-imx31.c +++ b/trunk/arch/arm/mach-imx/clk-imx31.c @@ -130,7 +130,7 @@ int __init mx31_clocks_init(unsigned long fref) clk_register_clkdev(clk[nfc], NULL, "mxc_nand.0"); clk_register_clkdev(clk[ipu_gate], NULL, "ipu-core"); clk_register_clkdev(clk[ipu_gate], NULL, "mx3_sdc_fb"); - clk_register_clkdev(clk[kpp_gate], "kpp", NULL); + clk_register_clkdev(clk[kpp_gate], NULL, "imx-keypad"); clk_register_clkdev(clk[usb_div_post], "per", "mxc-ehci.0"); clk_register_clkdev(clk[usb_gate], "ahb", "mxc-ehci.0"); clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.0"); diff --git a/trunk/arch/arm/mach-imx/clk-imx51-imx53.c b/trunk/arch/arm/mach-imx/clk-imx51-imx53.c index f6086693ebd2..4bdcaa97bd98 100644 --- a/trunk/arch/arm/mach-imx/clk-imx51-imx53.c +++ b/trunk/arch/arm/mach-imx/clk-imx51-imx53.c @@ -303,6 +303,7 @@ static void __init mx5_clocks_common_init(unsigned long rate_ckil, clk_prepare_enable(clk[aips_tz2]); /* fec */ clk_prepare_enable(clk[spba]); clk_prepare_enable(clk[emi_fast_gate]); /* fec */ + clk_prepare_enable(clk[emi_slow_gate]); /* eim */ clk_prepare_enable(clk[tmax1]); clk_prepare_enable(clk[tmax2]); /* esdhc2, fec */ clk_prepare_enable(clk[tmax3]); /* esdhc1, esdhc4 */ diff --git a/trunk/arch/arm/mach-imx/clk-imx6q.c b/trunk/arch/arm/mach-imx/clk-imx6q.c index ea89520b6e22..4233d9e3531d 100644 --- a/trunk/arch/arm/mach-imx/clk-imx6q.c +++ b/trunk/arch/arm/mach-imx/clk-imx6q.c @@ -152,7 +152,7 @@ enum mx6q_clks { ssi2, ssi3, uart_ipg, uart_serial, usboh3, usdhc1, usdhc2, usdhc3, usdhc4, vdo_axi, vpu_axi, cko1, pll1_sys, pll2_bus, pll3_usb_otg, pll4_audio, pll5_video, pll6_mlb, pll7_usb_host, pll8_enet, ssi1_ipg, - ssi2_ipg, ssi3_ipg, rom, usbphy1, usbphy2, + ssi2_ipg, ssi3_ipg, rom, usbphy1, usbphy2, ldb_di0_div_3_5, ldb_di1_div_3_5, clk_max }; @@ -288,8 +288,10 @@ int __init mx6q_clocks_init(void) clk[gpu3d_shader] = imx_clk_divider("gpu3d_shader", "gpu3d_shader_sel", base + 0x18, 29, 3); clk[ipu1_podf] = imx_clk_divider("ipu1_podf", "ipu1_sel", base + 0x3c, 11, 3); clk[ipu2_podf] = imx_clk_divider("ipu2_podf", "ipu2_sel", base + 0x3c, 16, 3); - clk[ldb_di0_podf] = imx_clk_divider("ldb_di0_podf", "ldb_di0_sel", base + 0x20, 10, 1); - clk[ldb_di1_podf] = imx_clk_divider("ldb_di1_podf", "ldb_di1_sel", base + 0x20, 11, 1); + clk[ldb_di0_div_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7); + clk[ldb_di0_podf] = imx_clk_divider("ldb_di0_podf", "ldb_di0_div_3_5", base + 0x20, 10, 1); + clk[ldb_di1_div_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1_sel", 2, 7); + clk[ldb_di1_podf] = imx_clk_divider("ldb_di1_podf", "ldb_di1_div_3_5", base + 0x20, 11, 1); clk[ipu1_di0_pre] = imx_clk_divider("ipu1_di0_pre", "ipu1_di0_pre_sel", base + 0x34, 3, 3); clk[ipu1_di1_pre] = imx_clk_divider("ipu1_di1_pre", "ipu1_di1_pre_sel", base + 0x34, 12, 3); clk[ipu2_di0_pre] = imx_clk_divider("ipu2_di0_pre", "ipu2_di0_pre_sel", base + 0x38, 3, 3); diff --git a/trunk/arch/arm/mach-imx/head-v7.S b/trunk/arch/arm/mach-imx/headsmp.S similarity index 100% rename from trunk/arch/arm/mach-imx/head-v7.S rename to trunk/arch/arm/mach-imx/headsmp.S diff --git a/trunk/arch/arm/mach-imx/hotplug.c b/trunk/arch/arm/mach-imx/hotplug.c index 20ed2d56c1af..f8f7437c83b8 100644 --- a/trunk/arch/arm/mach-imx/hotplug.c +++ b/trunk/arch/arm/mach-imx/hotplug.c @@ -42,22 +42,6 @@ static inline void cpu_enter_lowpower(void) : "cc"); } -static inline void cpu_leave_lowpower(void) -{ - unsigned int v; - - asm volatile( - "mrc p15, 0, %0, c1, c0, 0\n" - " orr %0, %0, %1\n" - " mcr p15, 0, %0, c1, c0, 0\n" - " mrc p15, 0, %0, c1, c0, 1\n" - " orr %0, %0, %2\n" - " mcr p15, 0, %0, c1, c0, 1\n" - : "=&r" (v) - : "Ir" (CR_C), "Ir" (0x40) - : "cc"); -} - /* * platform-specific code to shutdown a CPU * @@ -67,11 +51,10 @@ void platform_cpu_die(unsigned int cpu) { cpu_enter_lowpower(); imx_enable_cpu(cpu, false); - cpu_do_idle(); - cpu_leave_lowpower(); - /* We should never return from idle */ - panic("cpu %d unexpectedly exit from shutdown\n", cpu); + /* spin here until hardware takes it down */ + while (1) + ; } int platform_cpu_disable(unsigned int cpu) diff --git a/trunk/arch/arm/mach-imx/mach-imx6q.c b/trunk/arch/arm/mach-imx/mach-imx6q.c index 5ec0608f2a76..045b3f6a387d 100644 --- a/trunk/arch/arm/mach-imx/mach-imx6q.c +++ b/trunk/arch/arm/mach-imx/mach-imx6q.c @@ -71,7 +71,7 @@ void imx6q_restart(char mode, const char *cmd) /* For imx6q sabrelite board: set KSZ9021RN RGMII pad skew */ static int ksz9021rn_phy_fixup(struct phy_device *phydev) { - if (IS_ENABLED(CONFIG_PHYLIB)) { + if (IS_BUILTIN(CONFIG_PHYLIB)) { /* min rx data delay */ phy_write(phydev, 0x0b, 0x8105); phy_write(phydev, 0x0c, 0x0000); @@ -112,7 +112,7 @@ static void __init imx6q_sabrelite_cko1_setup(void) static void __init imx6q_sabrelite_init(void) { - if (IS_ENABLED(CONFIG_PHYLIB)) + if (IS_BUILTIN(CONFIG_PHYLIB)) phy_register_fixup_for_uid(PHY_ID_KSZ9021, MICREL_PHY_ID_MASK, ksz9021rn_phy_fixup); imx6q_sabrelite_cko1_setup(); diff --git a/trunk/arch/arm/mach-integrator/core.c b/trunk/arch/arm/mach-integrator/core.c index ebf680bebdf2..3fa6c51390da 100644 --- a/trunk/arch/arm/mach-integrator/core.c +++ b/trunk/arch/arm/mach-integrator/core.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include diff --git a/trunk/arch/arm/mach-integrator/integrator_ap.c b/trunk/arch/arm/mach-integrator/integrator_ap.c index 7b1055c8e0b9..3b2267529f5e 100644 --- a/trunk/arch/arm/mach-integrator/integrator_ap.c +++ b/trunk/arch/arm/mach-integrator/integrator_ap.c @@ -456,7 +456,7 @@ static void __init ap_init_timer(void) clk = clk_get_sys("ap_timer", NULL); BUG_ON(IS_ERR(clk)); - clk_enable(clk); + clk_prepare_enable(clk); rate = clk_get_rate(clk); writel(0, TIMER0_VA_BASE + TIMER_CTRL); diff --git a/trunk/arch/arm/mach-kirkwood/Kconfig b/trunk/arch/arm/mach-kirkwood/Kconfig index 199764fe0fb0..ca5c15a4e626 100644 --- a/trunk/arch/arm/mach-kirkwood/Kconfig +++ b/trunk/arch/arm/mach-kirkwood/Kconfig @@ -80,6 +80,35 @@ config MACH_IB62X0_DT RaidSonic IB-NAS6210 & IB-NAS6220 devices, using Flattened Device Tree. +config MACH_TS219_DT + bool "Device Tree for QNAP TS-11X, TS-21X NAS" + select ARCH_KIRKWOOD_DT + select ARM_APPENDED_DTB + select ARM_ATAG_DTB_COMPAT + help + Say 'Y' here if you want your kernel to support the QNAP + TS-110, TS-119, TS-119P+, TS-210, TS-219, TS-219P and + TS-219P+ Turbo NAS devices using Fattened Device Tree. + There are two different Device Tree descriptions, depending + on if the device is based on an if the board uses the MV6281 + or MV6282. If you have the wrong one, the buttons will not + work. + +config MACH_GOFLEXNET_DT + bool "Seagate GoFlex Net (Flattened Device Tree)" + select ARCH_KIRKWOOD_DT + help + Say 'Y' here if you want your kernel to support the + Seagate GoFlex Net (Flattened Device Tree). + +config MACH_LSXL_DT + bool "Buffalo Linkstation LS-XHL, LS-CHLv2 (Flattened Device Tree)" + select ARCH_KIRKWOOD_DT + help + Say 'Y' here if you want your kernel to support the + Buffalo Linkstation LS-XHL & LS-CHLv2 devices, using + Flattened Device Tree. + config MACH_TS219 bool "QNAP TS-110, TS-119, TS-119P+, TS-210, TS-219, TS-219P and TS-219P+ Turbo NAS" help diff --git a/trunk/arch/arm/mach-kirkwood/Makefile b/trunk/arch/arm/mach-kirkwood/Makefile index d2b05907b10e..055c85a1cc46 100644 --- a/trunk/arch/arm/mach-kirkwood/Makefile +++ b/trunk/arch/arm/mach-kirkwood/Makefile @@ -25,3 +25,6 @@ obj-$(CONFIG_MACH_DREAMPLUG_DT) += board-dreamplug.o obj-$(CONFIG_MACH_ICONNECT_DT) += board-iconnect.o obj-$(CONFIG_MACH_DLINK_KIRKWOOD_DT) += board-dnskw.o obj-$(CONFIG_MACH_IB62X0_DT) += board-ib62x0.o +obj-$(CONFIG_MACH_TS219_DT) += board-ts219.o tsx1x-common.o +obj-$(CONFIG_MACH_GOFLEXNET_DT) += board-goflexnet.o +obj-$(CONFIG_MACH_LSXL_DT) += board-lsxl.o diff --git a/trunk/arch/arm/mach-kirkwood/Makefile.boot b/trunk/arch/arm/mach-kirkwood/Makefile.boot index 02edbdf5b065..a13299d758e1 100644 --- a/trunk/arch/arm/mach-kirkwood/Makefile.boot +++ b/trunk/arch/arm/mach-kirkwood/Makefile.boot @@ -7,3 +7,8 @@ dtb-$(CONFIG_MACH_DLINK_KIRKWOOD_DT) += kirkwood-dns320.dtb dtb-$(CONFIG_MACH_DLINK_KIRKWOOD_DT) += kirkwood-dns325.dtb dtb-$(CONFIG_MACH_ICONNECT_DT) += kirkwood-iconnect.dtb dtb-$(CONFIG_MACH_IB62X0_DT) += kirkwood-ib62x0.dtb +dtb-$(CONFIG_MACH_TS219_DT) += kirkwood-ts219-6281.dtb +dtb-$(CONFIG_MACH_TS219_DT) += kirkwood-ts219-6282.dtb +dtb-$(CONFIG_MACH_GOFLEXNET_DT) += kirkwood-goflexnet.dtb +dtb-$(CONFIG_MACH_LSXL_DT) += kirkwood-lschlv2.dtb +dtb-$(CONFIG_MACH_LSXL_DT) += kirkwood-lsxhl.dtb diff --git a/trunk/arch/arm/mach-kirkwood/board-dnskw.c b/trunk/arch/arm/mach-kirkwood/board-dnskw.c index 58c2d68f9443..4ab35065a144 100644 --- a/trunk/arch/arm/mach-kirkwood/board-dnskw.c +++ b/trunk/arch/arm/mach-kirkwood/board-dnskw.c @@ -14,13 +14,11 @@ #include #include #include -#include #include #include #include #include #include -#include #include #include #include @@ -35,10 +33,6 @@ static struct mv643xx_eth_platform_data dnskw_ge00_data = { .phy_addr = MV643XX_ETH_PHY_ADDR(8), }; -static struct mv_sata_platform_data dnskw_sata_data = { - .n_ports = 2, -}; - static unsigned int dnskw_mpp_config[] __initdata = { MPP13_UART1_TXD, /* Custom ... */ MPP14_UART1_RXD, /* ... Controller (DNS-320 only) */ @@ -73,132 +67,6 @@ static unsigned int dnskw_mpp_config[] __initdata = { 0 }; -static struct gpio_led dns325_led_pins[] = { - { - .name = "dns325:white:power", - .gpio = 26, - .active_low = 1, - .default_trigger = "default-on", - }, - { - .name = "dns325:white:usb", - .gpio = 43, - .active_low = 1, - }, - { - .name = "dns325:red:l_hdd", - .gpio = 28, - .active_low = 1, - }, - { - .name = "dns325:red:r_hdd", - .gpio = 27, - .active_low = 1, - }, - { - .name = "dns325:red:usb", - .gpio = 29, - .active_low = 1, - }, -}; - -static struct gpio_led_platform_data dns325_led_data = { - .num_leds = ARRAY_SIZE(dns325_led_pins), - .leds = dns325_led_pins, -}; - -static struct platform_device dns325_led_device = { - .name = "leds-gpio", - .id = -1, - .dev = { - .platform_data = &dns325_led_data, - }, -}; - -static struct gpio_led dns320_led_pins[] = { - { - .name = "dns320:blue:power", - .gpio = 26, - .active_low = 1, - .default_trigger = "default-on", - }, - { - .name = "dns320:blue:usb", - .gpio = 43, - .active_low = 1, - }, - { - .name = "dns320:orange:l_hdd", - .gpio = 28, - .active_low = 1, - }, - { - .name = "dns320:orange:r_hdd", - .gpio = 27, - .active_low = 1, - }, - { - .name = "dns320:orange:usb", - .gpio = 35, - .active_low = 1, - }, -}; - -static struct gpio_led_platform_data dns320_led_data = { - .num_leds = ARRAY_SIZE(dns320_led_pins), - .leds = dns320_led_pins, -}; - -static struct platform_device dns320_led_device = { - .name = "leds-gpio", - .id = -1, - .dev = { - .platform_data = &dns320_led_data, - }, -}; - -static struct i2c_board_info dns325_i2c_board_info[] __initdata = { - { - I2C_BOARD_INFO("lm75", 0x48), - }, - /* Something at 0x0c also */ -}; - -static struct gpio_keys_button dnskw_button_pins[] = { - { - .code = KEY_POWER, - .gpio = 34, - .desc = "Power button", - .active_low = 1, - }, - { - .code = KEY_EJECTCD, - .gpio = 47, - .desc = "USB unmount button", - .active_low = 1, - }, - { - .code = KEY_RESTART, - .gpio = 48, - .desc = "Reset button", - .active_low = 1, - }, -}; - -static struct gpio_keys_platform_data dnskw_button_data = { - .buttons = dnskw_button_pins, - .nbuttons = ARRAY_SIZE(dnskw_button_pins), -}; - -static struct platform_device dnskw_button_device = { - .name = "gpio-keys", - .id = -1, - .num_resources = 0, - .dev = { - .platform_data = &dnskw_button_data, - } -}; - /* Fan: ADDA AD045HB-G73 40mm 6000rpm@5v */ static struct gpio_fan_speed dnskw_fan_speed[] = { { 0, 0 }, @@ -245,20 +113,9 @@ void __init dnskw_init(void) kirkwood_ehci_init(); kirkwood_ge00_init(&dnskw_ge00_data); - kirkwood_sata_init(&dnskw_sata_data); - kirkwood_i2c_init(); - platform_device_register(&dnskw_button_device); platform_device_register(&dnskw_fan_device); - if (of_machine_is_compatible("dlink,dns-325")) { - i2c_register_board_info(0, dns325_i2c_board_info, - ARRAY_SIZE(dns325_i2c_board_info)); - platform_device_register(&dns325_led_device); - - } else if (of_machine_is_compatible("dlink,dns-320")) - platform_device_register(&dns320_led_device); - /* Register power-off GPIO. */ if (gpio_request(36, "dnskw:power:off") == 0 && gpio_direction_output(36, 0) == 0) diff --git a/trunk/arch/arm/mach-kirkwood/board-dreamplug.c b/trunk/arch/arm/mach-kirkwood/board-dreamplug.c index 55e357ab2923..aeb234d0d0e3 100644 --- a/trunk/arch/arm/mach-kirkwood/board-dreamplug.c +++ b/trunk/arch/arm/mach-kirkwood/board-dreamplug.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include #include @@ -23,7 +22,6 @@ #include #include #include -#include #include #include #include @@ -36,42 +34,6 @@ #include "common.h" #include "mpp.h" -struct mtd_partition dreamplug_partitions[] = { - { - .name = "u-boot", - .size = SZ_512K, - .offset = 0, - }, - { - .name = "u-boot env", - .size = SZ_64K, - .offset = SZ_512K + SZ_512K, - }, - { - .name = "dtb", - .size = SZ_64K, - .offset = SZ_512K + SZ_512K + SZ_512K, - }, -}; - -static const struct flash_platform_data dreamplug_spi_slave_data = { - .type = "mx25l1606e", - .name = "spi_flash", - .parts = dreamplug_partitions, - .nr_parts = ARRAY_SIZE(dreamplug_partitions), -}; - -static struct spi_board_info __initdata dreamplug_spi_slave_info[] = { - { - .modalias = "m25p80", - .platform_data = &dreamplug_spi_slave_data, - .irq = -1, - .max_speed_hz = 50000000, - .bus_num = 0, - .chip_select = 0, - }, -}; - static struct mv643xx_eth_platform_data dreamplug_ge00_data = { .phy_addr = MV643XX_ETH_PHY_ADDR(0), }; @@ -80,45 +42,10 @@ static struct mv643xx_eth_platform_data dreamplug_ge01_data = { .phy_addr = MV643XX_ETH_PHY_ADDR(1), }; -static struct mv_sata_platform_data dreamplug_sata_data = { - .n_ports = 1, -}; - static struct mvsdio_platform_data dreamplug_mvsdio_data = { /* unfortunately the CD signal has not been connected */ }; -static struct gpio_led dreamplug_led_pins[] = { - { - .name = "dreamplug:blue:bluetooth", - .gpio = 47, - .active_low = 1, - }, - { - .name = "dreamplug:green:wifi", - .gpio = 48, - .active_low = 1, - }, - { - .name = "dreamplug:green:wifi_ap", - .gpio = 49, - .active_low = 1, - }, -}; - -static struct gpio_led_platform_data dreamplug_led_data = { - .leds = dreamplug_led_pins, - .num_leds = ARRAY_SIZE(dreamplug_led_pins), -}; - -static struct platform_device dreamplug_leds = { - .name = "leds-gpio", - .id = -1, - .dev = { - .platform_data = &dreamplug_led_data, - } -}; - static unsigned int dreamplug_mpp_config[] __initdata = { MPP0_SPI_SCn, MPP1_SPI_MOSI, @@ -137,15 +64,8 @@ void __init dreamplug_init(void) */ kirkwood_mpp_conf(dreamplug_mpp_config); - spi_register_board_info(dreamplug_spi_slave_info, - ARRAY_SIZE(dreamplug_spi_slave_info)); - kirkwood_spi_init(); - kirkwood_ehci_init(); kirkwood_ge00_init(&dreamplug_ge00_data); kirkwood_ge01_init(&dreamplug_ge01_data); - kirkwood_sata_init(&dreamplug_sata_data); kirkwood_sdio_init(&dreamplug_mvsdio_data); - - platform_device_register(&dreamplug_leds); } diff --git a/trunk/arch/arm/mach-kirkwood/board-dt.c b/trunk/arch/arm/mach-kirkwood/board-dt.c index edc3f8a9d45e..e4eb450de301 100644 --- a/trunk/arch/arm/mach-kirkwood/board-dt.c +++ b/trunk/arch/arm/mach-kirkwood/board-dt.c @@ -18,6 +18,7 @@ #include #include #include +#include #include "common.h" static struct of_device_id kirkwood_dt_match_table[] __initdata = { @@ -25,6 +26,16 @@ static struct of_device_id kirkwood_dt_match_table[] __initdata = { { } }; +struct of_dev_auxdata kirkwood_auxdata_lookup[] __initdata = { + OF_DEV_AUXDATA("marvell,orion-spi", 0xf1010600, "orion_spi.0", NULL), + OF_DEV_AUXDATA("marvell,mv64xxx-i2c", 0xf1011000, "mv64xxx_i2c.0", + NULL), + OF_DEV_AUXDATA("marvell,orion-wdt", 0xf1020300, "orion_wdt", NULL), + OF_DEV_AUXDATA("marvell,orion-sata", 0xf1080000, "sata_mv.0", NULL), + OF_DEV_AUXDATA("marvell,orion-nand", 0xf4000000, "orion_nand", NULL), + {}, +}; + static void __init kirkwood_dt_init(void) { pr_info("Kirkwood: %s, TCLK=%d.\n", kirkwood_id(), kirkwood_tclk); @@ -47,7 +58,6 @@ static void __init kirkwood_dt_init(void) kirkwood_clk_init(); /* internal devices that every board has */ - kirkwood_wdt_init(); kirkwood_xor0_init(); kirkwood_xor1_init(); kirkwood_crypto_init(); @@ -68,7 +78,17 @@ static void __init kirkwood_dt_init(void) if (of_machine_is_compatible("raidsonic,ib-nas62x0")) ib62x0_init(); - of_platform_populate(NULL, kirkwood_dt_match_table, NULL, NULL); + if (of_machine_is_compatible("qnap,ts219")) + qnap_dt_ts219_init(); + + if (of_machine_is_compatible("seagate,goflexnet")) + goflexnet_init(); + + if (of_machine_is_compatible("buffalo,lsxl")) + lsxl_init(); + + of_platform_populate(NULL, kirkwood_dt_match_table, + kirkwood_auxdata_lookup, NULL); } static const char *kirkwood_dt_board_compat[] = { @@ -77,6 +97,9 @@ static const char *kirkwood_dt_board_compat[] = { "dlink,dns-325", "iom,iconnect", "raidsonic,ib-nas62x0", + "qnap,ts219", + "seagate,goflexnet", + "buffalo,lsxl", NULL }; @@ -84,7 +107,7 @@ DT_MACHINE_START(KIRKWOOD_DT, "Marvell Kirkwood (Flattened Device Tree)") /* Maintainer: Jason Cooper */ .map_io = kirkwood_map_io, .init_early = kirkwood_init_early, - .init_irq = kirkwood_init_irq, + .init_irq = orion_dt_init_irq, .timer = &kirkwood_timer, .init_machine = kirkwood_dt_init, .restart = kirkwood_restart, diff --git a/trunk/arch/arm/mach-kirkwood/board-goflexnet.c b/trunk/arch/arm/mach-kirkwood/board-goflexnet.c new file mode 100644 index 000000000000..413e2c8ef5fe --- /dev/null +++ b/trunk/arch/arm/mach-kirkwood/board-goflexnet.c @@ -0,0 +1,71 @@ +/* + * Copyright 2012 (C), Jason Cooper + * + * arch/arm/mach-kirkwood/board-goflexnet.c + * + * Seagate GoFlext Net Board Init for drivers not converted to + * flattened device tree yet. + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + * + * Copied and modified for Seagate GoFlex Net support by + * Joshua Coombs based on ArchLinux ARM's + * GoFlex kernel patches. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "common.h" +#include "mpp.h" + +static struct mv643xx_eth_platform_data goflexnet_ge00_data = { + .phy_addr = MV643XX_ETH_PHY_ADDR(0), +}; + +static unsigned int goflexnet_mpp_config[] __initdata = { + MPP29_GPIO, /* USB Power Enable */ + MPP47_GPIO, /* LED Orange */ + MPP46_GPIO, /* LED Green */ + MPP45_GPIO, /* LED Left Capacity 3 */ + MPP44_GPIO, /* LED Left Capacity 2 */ + MPP43_GPIO, /* LED Left Capacity 1 */ + MPP42_GPIO, /* LED Left Capacity 0 */ + MPP41_GPIO, /* LED Right Capacity 3 */ + MPP40_GPIO, /* LED Right Capacity 2 */ + MPP39_GPIO, /* LED Right Capacity 1 */ + MPP38_GPIO, /* LED Right Capacity 0 */ + 0 +}; + +void __init goflexnet_init(void) +{ + /* + * Basic setup. Needs to be called early. + */ + kirkwood_mpp_conf(goflexnet_mpp_config); + + if (gpio_request(29, "USB Power Enable") != 0 || + gpio_direction_output(29, 1) != 0) + pr_err("can't setup GPIO 29 (USB Power Enable)\n"); + kirkwood_ehci_init(); + + kirkwood_ge00_init(&goflexnet_ge00_data); +} diff --git a/trunk/arch/arm/mach-kirkwood/board-ib62x0.c b/trunk/arch/arm/mach-kirkwood/board-ib62x0.c index eddf1df8891f..cfc47f80e734 100644 --- a/trunk/arch/arm/mach-kirkwood/board-ib62x0.c +++ b/trunk/arch/arm/mach-kirkwood/board-ib62x0.c @@ -18,9 +18,7 @@ #include #include #include -#include #include -#include #include #include #include @@ -33,10 +31,6 @@ static struct mv643xx_eth_platform_data ib62x0_ge00_data = { .phy_addr = MV643XX_ETH_PHY_ADDR(8), }; -static struct mv_sata_platform_data ib62x0_sata_data = { - .n_ports = 2, -}; - static unsigned int ib62x0_mpp_config[] __initdata = { MPP0_NF_IO2, MPP1_NF_IO3, @@ -55,69 +49,6 @@ static unsigned int ib62x0_mpp_config[] __initdata = { 0 }; -static struct gpio_led ib62x0_led_pins[] = { - { - .name = "ib62x0:green:os", - .default_trigger = "default-on", - .gpio = 25, - .active_low = 0, - }, - { - .name = "ib62x0:red:os", - .default_trigger = "none", - .gpio = 22, - .active_low = 0, - }, - { - .name = "ib62x0:red:usb_copy", - .default_trigger = "none", - .gpio = 27, - .active_low = 0, - }, -}; - -static struct gpio_led_platform_data ib62x0_led_data = { - .leds = ib62x0_led_pins, - .num_leds = ARRAY_SIZE(ib62x0_led_pins), -}; - -static struct platform_device ib62x0_led_device = { - .name = "leds-gpio", - .id = -1, - .dev = { - .platform_data = &ib62x0_led_data, - } -}; - -static struct gpio_keys_button ib62x0_button_pins[] = { - { - .code = KEY_COPY, - .gpio = 29, - .desc = "USB Copy", - .active_low = 1, - }, - { - .code = KEY_RESTART, - .gpio = 28, - .desc = "Reset", - .active_low = 1, - }, -}; - -static struct gpio_keys_platform_data ib62x0_button_data = { - .buttons = ib62x0_button_pins, - .nbuttons = ARRAY_SIZE(ib62x0_button_pins), -}; - -static struct platform_device ib62x0_button_device = { - .name = "gpio-keys", - .id = -1, - .num_resources = 0, - .dev = { - .platform_data = &ib62x0_button_data, - } -}; - static void ib62x0_power_off(void) { gpio_set_value(IB62X0_GPIO_POWER_OFF, 1); @@ -132,9 +63,6 @@ void __init ib62x0_init(void) kirkwood_ehci_init(); kirkwood_ge00_init(&ib62x0_ge00_data); - kirkwood_sata_init(&ib62x0_sata_data); - platform_device_register(&ib62x0_led_device); - platform_device_register(&ib62x0_button_device); if (gpio_request(IB62X0_GPIO_POWER_OFF, "ib62x0:power:off") == 0 && gpio_direction_output(IB62X0_GPIO_POWER_OFF, 0) == 0) pm_power_off = ib62x0_power_off; diff --git a/trunk/arch/arm/mach-kirkwood/board-iconnect.c b/trunk/arch/arm/mach-kirkwood/board-iconnect.c index b0d3cc49269d..d7a9198ed300 100644 --- a/trunk/arch/arm/mach-kirkwood/board-iconnect.c +++ b/trunk/arch/arm/mach-kirkwood/board-iconnect.c @@ -19,8 +19,6 @@ #include #include #include -#include -#include #include #include #include @@ -32,50 +30,6 @@ static struct mv643xx_eth_platform_data iconnect_ge00_data = { .phy_addr = MV643XX_ETH_PHY_ADDR(11), }; -static struct gpio_led iconnect_led_pins[] = { - { - .name = "led_level", - .gpio = 41, - .default_trigger = "default-on", - }, { - .name = "power:blue", - .gpio = 42, - .default_trigger = "timer", - }, { - .name = "power:red", - .gpio = 43, - }, { - .name = "usb1:blue", - .gpio = 44, - }, { - .name = "usb2:blue", - .gpio = 45, - }, { - .name = "usb3:blue", - .gpio = 46, - }, { - .name = "usb4:blue", - .gpio = 47, - }, { - .name = "otb:blue", - .gpio = 48, - }, -}; - -static struct gpio_led_platform_data iconnect_led_data = { - .leds = iconnect_led_pins, - .num_leds = ARRAY_SIZE(iconnect_led_pins), - .gpio_blink_set = orion_gpio_led_blink_set, -}; - -static struct platform_device iconnect_leds = { - .name = "leds-gpio", - .id = -1, - .dev = { - .platform_data = &iconnect_led_data, - } -}; - static unsigned int iconnect_mpp_config[] __initdata = { MPP12_GPIO, MPP35_GPIO, @@ -90,12 +44,6 @@ static unsigned int iconnect_mpp_config[] __initdata = { 0 }; -static struct i2c_board_info __initdata iconnect_board_info[] = { - { - I2C_BOARD_INFO("lm63", 0x4c), - }, -}; - static struct mtd_partition iconnect_nand_parts[] = { { .name = "flash", @@ -142,15 +90,11 @@ void __init iconnect_init(void) { kirkwood_mpp_conf(iconnect_mpp_config); kirkwood_nand_init(ARRAY_AND_SIZE(iconnect_nand_parts), 25); - kirkwood_i2c_init(); - i2c_register_board_info(0, iconnect_board_info, - ARRAY_SIZE(iconnect_board_info)); kirkwood_ehci_init(); kirkwood_ge00_init(&iconnect_ge00_data); platform_device_register(&iconnect_button_device); - platform_device_register(&iconnect_leds); } static int __init iconnect_pci_init(void) diff --git a/trunk/arch/arm/mach-kirkwood/board-lsxl.c b/trunk/arch/arm/mach-kirkwood/board-lsxl.c new file mode 100644 index 000000000000..83d8975592f8 --- /dev/null +++ b/trunk/arch/arm/mach-kirkwood/board-lsxl.c @@ -0,0 +1,135 @@ +/* + * Copyright 2012 (C), Michael Walle + * + * arch/arm/mach-kirkwood/board-lsxl.c + * + * Buffalo Linkstation LS-XHL and LS-CHLv2 init for drivers not + * converted to flattened device tree yet. + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "common.h" +#include "mpp.h" + +static struct mv643xx_eth_platform_data lsxl_ge00_data = { + .phy_addr = MV643XX_ETH_PHY_ADDR(0), +}; + +static struct mv643xx_eth_platform_data lsxl_ge01_data = { + .phy_addr = MV643XX_ETH_PHY_ADDR(8), +}; + +static unsigned int lsxl_mpp_config[] __initdata = { + MPP10_GPO, /* HDD Power Enable */ + MPP11_GPIO, /* USB Vbus Enable */ + MPP18_GPO, /* FAN High Enable# */ + MPP19_GPO, /* FAN Low Enable# */ + MPP36_GPIO, /* Function Blue LED */ + MPP37_GPIO, /* Alarm LED */ + MPP38_GPIO, /* Info LED */ + MPP39_GPIO, /* Power LED */ + MPP40_GPIO, /* Fan Lock */ + MPP41_GPIO, /* Function Button */ + MPP42_GPIO, /* Power Switch */ + MPP43_GPIO, /* Power Auto Switch */ + MPP48_GPIO, /* Function Red LED */ + 0 +}; + +#define LSXL_GPIO_FAN_HIGH 18 +#define LSXL_GPIO_FAN_LOW 19 +#define LSXL_GPIO_FAN_LOCK 40 + +static struct gpio_fan_alarm lsxl_alarm = { + .gpio = LSXL_GPIO_FAN_LOCK, +}; + +static struct gpio_fan_speed lsxl_speeds[] = { + { + .rpm = 0, + .ctrl_val = 3, + }, { + .rpm = 1500, + .ctrl_val = 1, + }, { + .rpm = 3250, + .ctrl_val = 2, + }, { + .rpm = 5000, + .ctrl_val = 0, + } +}; + +static int lsxl_gpio_list[] = { + LSXL_GPIO_FAN_HIGH, LSXL_GPIO_FAN_LOW, +}; + +static struct gpio_fan_platform_data lsxl_fan_data = { + .num_ctrl = ARRAY_SIZE(lsxl_gpio_list), + .ctrl = lsxl_gpio_list, + .alarm = &lsxl_alarm, + .num_speed = ARRAY_SIZE(lsxl_speeds), + .speed = lsxl_speeds, +}; + +static struct platform_device lsxl_fan_device = { + .name = "gpio-fan", + .id = -1, + .num_resources = 0, + .dev = { + .platform_data = &lsxl_fan_data, + }, +}; + +/* + * On the LS-XHL/LS-CHLv2, the shutdown process is following: + * - Userland monitors key events until the power switch goes to off position + * - The board reboots + * - U-boot starts and goes into an idle mode waiting for the user + * to move the switch to ON position + * + */ +static void lsxl_power_off(void) +{ + kirkwood_restart('h', NULL); +} + +#define LSXL_GPIO_HDD_POWER 10 +#define LSXL_GPIO_USB_POWER 11 + +void __init lsxl_init(void) +{ + /* + * Basic setup. Needs to be called early. + */ + kirkwood_mpp_conf(lsxl_mpp_config); + + /* usb and sata power on */ + gpio_set_value(LSXL_GPIO_USB_POWER, 1); + gpio_set_value(LSXL_GPIO_HDD_POWER, 1); + + kirkwood_ehci_init(); + kirkwood_ge00_init(&lsxl_ge00_data); + kirkwood_ge01_init(&lsxl_ge01_data); + platform_device_register(&lsxl_fan_device); + + /* register power-off method */ + pm_power_off = lsxl_power_off; +} diff --git a/trunk/arch/arm/mach-kirkwood/board-ts219.c b/trunk/arch/arm/mach-kirkwood/board-ts219.c new file mode 100644 index 000000000000..1750e68506c1 --- /dev/null +++ b/trunk/arch/arm/mach-kirkwood/board-ts219.c @@ -0,0 +1,82 @@ +/* + * + * QNAP TS-11x/TS-21x Turbo NAS Board Setup via DT + * + * Copyright (C) 2012 Andrew Lunn + * + * Based on the board file ts219-setup.c: + * + * Copyright (C) 2009 Martin Michlmayr + * Copyright (C) 2008 Byron Bradley + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "common.h" +#include "mpp.h" +#include "tsx1x-common.h" + +static struct mv643xx_eth_platform_data qnap_ts219_ge00_data = { + .phy_addr = MV643XX_ETH_PHY_ADDR(8), +}; + +static unsigned int qnap_ts219_mpp_config[] __initdata = { + MPP0_SPI_SCn, + MPP1_SPI_MOSI, + MPP2_SPI_SCK, + MPP3_SPI_MISO, + MPP4_SATA1_ACTn, + MPP5_SATA0_ACTn, + MPP8_TW0_SDA, + MPP9_TW0_SCK, + MPP10_UART0_TXD, + MPP11_UART0_RXD, + MPP13_UART1_TXD, /* PIC controller */ + MPP14_UART1_RXD, /* PIC controller */ + MPP15_GPIO, /* USB Copy button (on devices with 88F6281) */ + MPP16_GPIO, /* Reset button (on devices with 88F6281) */ + MPP36_GPIO, /* RAM: 0: 256 MB, 1: 512 MB */ + MPP37_GPIO, /* Reset button (on devices with 88F6282) */ + MPP43_GPIO, /* USB Copy button (on devices with 88F6282) */ + MPP44_GPIO, /* Board ID: 0: TS-11x, 1: TS-21x */ + 0 +}; + +void __init qnap_dt_ts219_init(void) +{ + u32 dev, rev; + + kirkwood_mpp_conf(qnap_ts219_mpp_config); + + kirkwood_pcie_id(&dev, &rev); + if (dev == MV88F6282_DEV_ID) + qnap_ts219_ge00_data.phy_addr = MV643XX_ETH_PHY_ADDR(0); + + kirkwood_ge00_init(&qnap_ts219_ge00_data); + kirkwood_ehci_init(); + + pm_power_off = qnap_tsx1x_power_off; +} + +/* FIXME: Will not work with DT. Maybe use MPP40_GPIO? */ +static int __init ts219_pci_init(void) +{ + if (machine_is_ts219()) + kirkwood_pcie_init(KW_PCIE0); + + return 0; +} +subsys_initcall(ts219_pci_init); diff --git a/trunk/arch/arm/mach-kirkwood/common.c b/trunk/arch/arm/mach-kirkwood/common.c index c9201539ffbd..1201191d7f1b 100644 --- a/trunk/arch/arm/mach-kirkwood/common.c +++ b/trunk/arch/arm/mach-kirkwood/common.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -276,6 +277,7 @@ void __init kirkwood_clk_init(void) orion_clkdev_add("0", "pcie", pex0); orion_clkdev_add("1", "pcie", pex1); orion_clkdev_add(NULL, "kirkwood-i2s", audio); + orion_clkdev_add(NULL, MV64XXX_I2C_CTLR_NAME ".0", runit); /* Marvell says runit is used by SPI, UART, NAND, TWSI, ..., * so should never be gated. @@ -299,7 +301,7 @@ void __init kirkwood_ge00_init(struct mv643xx_eth_platform_data *eth_data) { orion_ge00_init(eth_data, GE00_PHYS_BASE, IRQ_KIRKWOOD_GE00_SUM, - IRQ_KIRKWOOD_GE00_ERR); + IRQ_KIRKWOOD_GE00_ERR, 1600); /* The interface forgets the MAC address assigned by u-boot if the clock is turned off, so claim the clk now. */ clk_prepare_enable(ge0); @@ -313,7 +315,7 @@ void __init kirkwood_ge01_init(struct mv643xx_eth_platform_data *eth_data) { orion_ge01_init(eth_data, GE01_PHYS_BASE, IRQ_KIRKWOOD_GE01_SUM, - IRQ_KIRKWOOD_GE01_ERR); + IRQ_KIRKWOOD_GE01_ERR, 1600); clk_prepare_enable(ge1); } @@ -515,6 +517,13 @@ void __init kirkwood_wdt_init(void) void __init kirkwood_init_early(void) { orion_time_set_base(TIMER_VIRT_BASE); + + /* + * Some Kirkwood devices allocate their coherent buffers from atomic + * context. Increase size of atomic coherent pool to make sure such + * the allocations won't fail. + */ + init_dma_coherent_pool_size(SZ_1M); } int kirkwood_tclk; diff --git a/trunk/arch/arm/mach-kirkwood/common.h b/trunk/arch/arm/mach-kirkwood/common.h index 9248fa2c165b..304dd1abfdca 100644 --- a/trunk/arch/arm/mach-kirkwood/common.h +++ b/trunk/arch/arm/mach-kirkwood/common.h @@ -58,6 +58,11 @@ void dreamplug_init(void); #else static inline void dreamplug_init(void) {}; #endif +#ifdef CONFIG_MACH_TS219_DT +void qnap_dt_ts219_init(void); +#else +static inline void qnap_dt_ts219_init(void) {}; +#endif #ifdef CONFIG_MACH_DLINK_KIRKWOOD_DT void dnskw_init(void); @@ -77,6 +82,18 @@ void ib62x0_init(void); static inline void ib62x0_init(void) {}; #endif +#ifdef CONFIG_MACH_GOFLEXNET_DT +void goflexnet_init(void); +#else +static inline void goflexnet_init(void) {}; +#endif + +#ifdef CONFIG_MACH_LSXL_DT +void lsxl_init(void); +#else +static inline void lsxl_init(void) {}; +#endif + /* early init functions not converted to fdt yet */ char *kirkwood_id(void); void kirkwood_l2_init(void); diff --git a/trunk/arch/arm/mach-kirkwood/db88f6281-bp-setup.c b/trunk/arch/arm/mach-kirkwood/db88f6281-bp-setup.c index d93359379598..be90b7d0e10b 100644 --- a/trunk/arch/arm/mach-kirkwood/db88f6281-bp-setup.c +++ b/trunk/arch/arm/mach-kirkwood/db88f6281-bp-setup.c @@ -10,6 +10,7 @@ #include #include +#include #include #include #include diff --git a/trunk/arch/arm/mach-kirkwood/irq.c b/trunk/arch/arm/mach-kirkwood/irq.c index c4c68e5b94f1..720063ffa19d 100644 --- a/trunk/arch/arm/mach-kirkwood/irq.c +++ b/trunk/arch/arm/mach-kirkwood/irq.c @@ -9,20 +9,23 @@ */ #include #include -#include #include -#include #include #include -#include "common.h" -static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc) -{ - BUG_ON(irq < IRQ_KIRKWOOD_GPIO_LOW_0_7); - BUG_ON(irq > IRQ_KIRKWOOD_GPIO_HIGH_16_23); +static int __initdata gpio0_irqs[4] = { + IRQ_KIRKWOOD_GPIO_LOW_0_7, + IRQ_KIRKWOOD_GPIO_LOW_8_15, + IRQ_KIRKWOOD_GPIO_LOW_16_23, + IRQ_KIRKWOOD_GPIO_LOW_24_31, +}; - orion_gpio_irq_handler((irq - IRQ_KIRKWOOD_GPIO_LOW_0_7) << 3); -} +static int __initdata gpio1_irqs[4] = { + IRQ_KIRKWOOD_GPIO_HIGH_0_7, + IRQ_KIRKWOOD_GPIO_HIGH_8_15, + IRQ_KIRKWOOD_GPIO_HIGH_16_23, + 0, +}; void __init kirkwood_init_irq(void) { @@ -32,17 +35,8 @@ void __init kirkwood_init_irq(void) /* * Initialize gpiolib for GPIOs 0-49. */ - orion_gpio_init(0, 32, GPIO_LOW_VIRT_BASE, 0, - IRQ_KIRKWOOD_GPIO_START); - irq_set_chained_handler(IRQ_KIRKWOOD_GPIO_LOW_0_7, gpio_irq_handler); - irq_set_chained_handler(IRQ_KIRKWOOD_GPIO_LOW_8_15, gpio_irq_handler); - irq_set_chained_handler(IRQ_KIRKWOOD_GPIO_LOW_16_23, gpio_irq_handler); - irq_set_chained_handler(IRQ_KIRKWOOD_GPIO_LOW_24_31, gpio_irq_handler); - - orion_gpio_init(32, 18, GPIO_HIGH_VIRT_BASE, 0, - IRQ_KIRKWOOD_GPIO_START + 32); - irq_set_chained_handler(IRQ_KIRKWOOD_GPIO_HIGH_0_7, gpio_irq_handler); - irq_set_chained_handler(IRQ_KIRKWOOD_GPIO_HIGH_8_15, gpio_irq_handler); - irq_set_chained_handler(IRQ_KIRKWOOD_GPIO_HIGH_16_23, - gpio_irq_handler); + orion_gpio_init(NULL, 0, 32, (void __iomem *)GPIO_LOW_VIRT_BASE, 0, + IRQ_KIRKWOOD_GPIO_START, gpio0_irqs); + orion_gpio_init(NULL, 32, 18, (void __iomem *)GPIO_HIGH_VIRT_BASE, 0, + IRQ_KIRKWOOD_GPIO_START + 32, gpio1_irqs); } diff --git a/trunk/arch/arm/mach-mmp/gplugd.c b/trunk/arch/arm/mach-mmp/gplugd.c index f516e74ce0d5..5c3d61ee729a 100644 --- a/trunk/arch/arm/mach-mmp/gplugd.c +++ b/trunk/arch/arm/mach-mmp/gplugd.c @@ -14,6 +14,7 @@ #include #include +#include #include #include diff --git a/trunk/arch/arm/mach-mmp/sram.c b/trunk/arch/arm/mach-mmp/sram.c index 4304f9519372..7e8a5a2e1ec7 100644 --- a/trunk/arch/arm/mach-mmp/sram.c +++ b/trunk/arch/arm/mach-mmp/sram.c @@ -68,7 +68,7 @@ static int __devinit sram_probe(struct platform_device *pdev) struct resource *res; int ret = 0; - if (!pdata && !pdata->pool_name) + if (!pdata || !pdata->pool_name) return -ENODEV; info = kzalloc(sizeof(*info), GFP_KERNEL); diff --git a/trunk/arch/arm/mach-mv78xx0/addr-map.c b/trunk/arch/arm/mach-mv78xx0/addr-map.c index 62b53d710efd..a9bc84180d21 100644 --- a/trunk/arch/arm/mach-mv78xx0/addr-map.c +++ b/trunk/arch/arm/mach-mv78xx0/addr-map.c @@ -37,7 +37,7 @@ #define WIN0_OFF(n) (BRIDGE_VIRT_BASE + 0x0000 + ((n) << 4)) #define WIN8_OFF(n) (BRIDGE_VIRT_BASE + 0x0900 + (((n) - 8) << 4)) -static void __init __iomem *win_cfg_base(int win) +static void __init __iomem *win_cfg_base(const struct orion_addr_map_cfg *cfg, int win) { /* * Find the control register base address for this window. diff --git a/trunk/arch/arm/mach-mv78xx0/common.c b/trunk/arch/arm/mach-mv78xx0/common.c index b4c53b846c9c..3057f7d4329a 100644 --- a/trunk/arch/arm/mach-mv78xx0/common.c +++ b/trunk/arch/arm/mach-mv78xx0/common.c @@ -213,7 +213,8 @@ void __init mv78xx0_ge00_init(struct mv643xx_eth_platform_data *eth_data) { orion_ge00_init(eth_data, GE00_PHYS_BASE, IRQ_MV78XX0_GE00_SUM, - IRQ_MV78XX0_GE_ERR); + IRQ_MV78XX0_GE_ERR, + MV643XX_TX_CSUM_DEFAULT_LIMIT); } @@ -224,7 +225,8 @@ void __init mv78xx0_ge01_init(struct mv643xx_eth_platform_data *eth_data) { orion_ge01_init(eth_data, GE01_PHYS_BASE, IRQ_MV78XX0_GE01_SUM, - NO_IRQ); + NO_IRQ, + MV643XX_TX_CSUM_DEFAULT_LIMIT); } diff --git a/trunk/arch/arm/mach-mv78xx0/irq.c b/trunk/arch/arm/mach-mv78xx0/irq.c index e421b701663b..eff9a750bbe2 100644 --- a/trunk/arch/arm/mach-mv78xx0/irq.c +++ b/trunk/arch/arm/mach-mv78xx0/irq.c @@ -9,19 +9,17 @@ */ #include #include -#include -#include #include #include #include #include "common.h" -static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc) -{ - BUG_ON(irq < IRQ_MV78XX0_GPIO_0_7 || irq > IRQ_MV78XX0_GPIO_24_31); - - orion_gpio_irq_handler((irq - IRQ_MV78XX0_GPIO_0_7) << 3); -} +static int __initdata gpio0_irqs[4] = { + IRQ_MV78XX0_GPIO_0_7, + IRQ_MV78XX0_GPIO_8_15, + IRQ_MV78XX0_GPIO_16_23, + IRQ_MV78XX0_GPIO_24_31, +}; void __init mv78xx0_init_irq(void) { @@ -34,11 +32,7 @@ void __init mv78xx0_init_irq(void) * registers for core #1 are at an offset of 0x18 from those of * core #0.) */ - orion_gpio_init(0, 32, GPIO_VIRT_BASE, + orion_gpio_init(NULL, 0, 32, (void __iomem *)GPIO_VIRT_BASE, mv78xx0_core_index() ? 0x18 : 0, - IRQ_MV78XX0_GPIO_START); - irq_set_chained_handler(IRQ_MV78XX0_GPIO_0_7, gpio_irq_handler); - irq_set_chained_handler(IRQ_MV78XX0_GPIO_8_15, gpio_irq_handler); - irq_set_chained_handler(IRQ_MV78XX0_GPIO_16_23, gpio_irq_handler); - irq_set_chained_handler(IRQ_MV78XX0_GPIO_24_31, gpio_irq_handler); + IRQ_MV78XX0_GPIO_START, gpio0_irqs); } diff --git a/trunk/arch/arm/mach-mxs/Kconfig b/trunk/arch/arm/mach-mxs/Kconfig index ccdf83b17cf1..9a8bbda195b2 100644 --- a/trunk/arch/arm/mach-mxs/Kconfig +++ b/trunk/arch/arm/mach-mxs/Kconfig @@ -2,9 +2,6 @@ if ARCH_MXS source "arch/arm/mach-mxs/devices/Kconfig" -config MXS_OCOTP - bool - config SOC_IMX23 bool select ARM_AMBA @@ -66,7 +63,6 @@ config MACH_MX28EVK select MXS_HAVE_PLATFORM_MXS_SAIF select MXS_HAVE_PLATFORM_MXS_I2C select MXS_HAVE_PLATFORM_RTC_STMP3XXX - select MXS_OCOTP help Include support for MX28EVK platform. This includes specific configurations for the board and its peripherals. @@ -94,7 +90,6 @@ config MODULE_M28 select MXS_HAVE_PLATFORM_MXS_I2C select MXS_HAVE_PLATFORM_MXS_MMC select MXS_HAVE_PLATFORM_MXSFB - select MXS_OCOTP config MODULE_APX4 bool @@ -106,7 +101,6 @@ config MODULE_APX4 select MXS_HAVE_PLATFORM_MXS_I2C select MXS_HAVE_PLATFORM_MXS_MMC select MXS_HAVE_PLATFORM_MXS_SAIF - select MXS_OCOTP config MACH_TX28 bool "Ka-Ro TX28 module" diff --git a/trunk/arch/arm/mach-mxs/Makefile b/trunk/arch/arm/mach-mxs/Makefile index e41590ccb437..fed3695a1339 100644 --- a/trunk/arch/arm/mach-mxs/Makefile +++ b/trunk/arch/arm/mach-mxs/Makefile @@ -1,7 +1,6 @@ # Common support -obj-y := devices.o icoll.o iomux.o system.o timer.o mm.o +obj-y := devices.o icoll.o iomux.o ocotp.o system.o timer.o mm.o -obj-$(CONFIG_MXS_OCOTP) += ocotp.o obj-$(CONFIG_PM) += pm.o obj-$(CONFIG_MACH_MXS_DT) += mach-mxs.o diff --git a/trunk/arch/arm/mach-omap1/board-h2-mmc.c b/trunk/arch/arm/mach-omap1/board-h2-mmc.c index da0e37d40823..e1362ce48497 100644 --- a/trunk/arch/arm/mach-omap1/board-h2-mmc.c +++ b/trunk/arch/arm/mach-omap1/board-h2-mmc.c @@ -54,7 +54,6 @@ static struct omap_mmc_platform_data mmc1_data = { .nr_slots = 1, .init = mmc_late_init, .cleanup = mmc_cleanup, - .dma_mask = 0xffffffff, .slots[0] = { .set_power = mmc_set_power, .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, diff --git a/trunk/arch/arm/mach-omap1/board-h3-mmc.c b/trunk/arch/arm/mach-omap1/board-h3-mmc.c index f8242aa9b763..c74daace8cd6 100644 --- a/trunk/arch/arm/mach-omap1/board-h3-mmc.c +++ b/trunk/arch/arm/mach-omap1/board-h3-mmc.c @@ -36,7 +36,6 @@ static int mmc_set_power(struct device *dev, int slot, int power_on, */ static struct omap_mmc_platform_data mmc1_data = { .nr_slots = 1, - .dma_mask = 0xffffffff, .slots[0] = { .set_power = mmc_set_power, .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, diff --git a/trunk/arch/arm/mach-omap1/board-nokia770.c b/trunk/arch/arm/mach-omap1/board-nokia770.c index 4007a372481b..2c0ca8fc3380 100644 --- a/trunk/arch/arm/mach-omap1/board-nokia770.c +++ b/trunk/arch/arm/mach-omap1/board-nokia770.c @@ -185,7 +185,6 @@ static int nokia770_mmc_get_cover_state(struct device *dev, int slot) static struct omap_mmc_platform_data nokia770_mmc2_data = { .nr_slots = 1, - .dma_mask = 0xffffffff, .max_freq = 12000000, .slots[0] = { .set_power = nokia770_mmc_set_power, diff --git a/trunk/arch/arm/mach-omap2/Kconfig b/trunk/arch/arm/mach-omap2/Kconfig index dd0fbf76ac79..fcd4e85c4ddc 100644 --- a/trunk/arch/arm/mach-omap2/Kconfig +++ b/trunk/arch/arm/mach-omap2/Kconfig @@ -62,12 +62,14 @@ config ARCH_OMAP4 select PM_OPP if PM select USB_ARCH_HAS_EHCI if USB_SUPPORT select ARM_CPU_SUSPEND if PM + select ARCH_NEEDS_CPU_IDLE_COUPLED if SMP config SOC_OMAP5 bool "TI OMAP5" select CPU_V7 select ARM_GIC select HAVE_SMP + select ARM_CPU_SUSPEND if PM comment "OMAP Core Type" depends on ARCH_OMAP2 diff --git a/trunk/arch/arm/mach-omap2/board-igep0020.c b/trunk/arch/arm/mach-omap2/board-igep0020.c index 74915295482e..28214483aaba 100644 --- a/trunk/arch/arm/mach-omap2/board-igep0020.c +++ b/trunk/arch/arm/mach-omap2/board-igep0020.c @@ -554,6 +554,8 @@ static const struct usbhs_omap_board_data igep3_usbhs_bdata __initconst = { #ifdef CONFIG_OMAP_MUX static struct omap_board_mux board_mux[] __initdata = { + /* SMSC9221 LAN Controller ETH IRQ (GPIO_176) */ + OMAP3_MUX(MCSPI1_CS2, OMAP_MUX_MODE4 | OMAP_PIN_INPUT), { .reg_offset = OMAP_MUX_TERMINATOR }, }; #endif diff --git a/trunk/arch/arm/mach-omap2/board-n8x0.c b/trunk/arch/arm/mach-omap2/board-n8x0.c index 2c5d0ed75285..677357ff61ac 100644 --- a/trunk/arch/arm/mach-omap2/board-n8x0.c +++ b/trunk/arch/arm/mach-omap2/board-n8x0.c @@ -468,7 +468,6 @@ static struct omap_mmc_platform_data mmc1_data = { .cleanup = n8x0_mmc_cleanup, .shutdown = n8x0_mmc_shutdown, .max_freq = 24000000, - .dma_mask = 0xffffffff, .slots[0] = { .wires = 4, .set_power = n8x0_mmc_set_power, diff --git a/trunk/arch/arm/mach-omap2/board-omap3evm.c b/trunk/arch/arm/mach-omap2/board-omap3evm.c index ef230a0eb5eb..0d362e9f9cb9 100644 --- a/trunk/arch/arm/mach-omap2/board-omap3evm.c +++ b/trunk/arch/arm/mach-omap2/board-omap3evm.c @@ -58,6 +58,7 @@ #include "hsmmc.h" #include "common-board-devices.h" +#define OMAP3_EVM_TS_GPIO 175 #define OMAP3_EVM_EHCI_VBUS 22 #define OMAP3_EVM_EHCI_SELECT 61 diff --git a/trunk/arch/arm/mach-omap2/common-board-devices.c b/trunk/arch/arm/mach-omap2/common-board-devices.c index 14734746457c..c1875862679f 100644 --- a/trunk/arch/arm/mach-omap2/common-board-devices.c +++ b/trunk/arch/arm/mach-omap2/common-board-devices.c @@ -35,16 +35,6 @@ static struct omap2_mcspi_device_config ads7846_mcspi_config = { .turbo_mode = 0, }; -/* - * ADS7846 driver maybe request a gpio according to the value - * of pdata->get_pendown_state, but we have done this. So set - * get_pendown_state to avoid twice gpio requesting. - */ -static int omap3_get_pendown_state(void) -{ - return !gpio_get_value(OMAP3_EVM_TS_GPIO); -} - static struct ads7846_platform_data ads7846_config = { .x_max = 0x0fff, .y_max = 0x0fff, @@ -55,7 +45,6 @@ static struct ads7846_platform_data ads7846_config = { .debounce_rep = 1, .gpio_pendown = -EINVAL, .keep_vref_on = 1, - .get_pendown_state = &omap3_get_pendown_state, }; static struct spi_board_info ads7846_spi_board_info __initdata = { diff --git a/trunk/arch/arm/mach-omap2/common-board-devices.h b/trunk/arch/arm/mach-omap2/common-board-devices.h index 4c4ef6a6166b..a0b4a42836ab 100644 --- a/trunk/arch/arm/mach-omap2/common-board-devices.h +++ b/trunk/arch/arm/mach-omap2/common-board-devices.h @@ -4,7 +4,6 @@ #include "twl-common.h" #define NAND_BLOCK_SIZE SZ_128K -#define OMAP3_EVM_TS_GPIO 175 struct mtd_partition; struct ads7846_platform_data; diff --git a/trunk/arch/arm/mach-omap2/cpuidle44xx.c b/trunk/arch/arm/mach-omap2/cpuidle44xx.c index 02d15bbd4e35..288bee6cbb76 100644 --- a/trunk/arch/arm/mach-omap2/cpuidle44xx.c +++ b/trunk/arch/arm/mach-omap2/cpuidle44xx.c @@ -21,6 +21,7 @@ #include "common.h" #include "pm.h" #include "prm.h" +#include "clockdomain.h" /* Machine specific information */ struct omap4_idle_statedata { @@ -47,10 +48,14 @@ static struct omap4_idle_statedata omap4_idle_data[] = { }, }; -static struct powerdomain *mpu_pd, *cpu0_pd, *cpu1_pd; +static struct powerdomain *mpu_pd, *cpu_pd[NR_CPUS]; +static struct clockdomain *cpu_clkdm[NR_CPUS]; + +static atomic_t abort_barrier; +static bool cpu_done[NR_CPUS]; /** - * omap4_enter_idle - Programs OMAP4 to enter the specified state + * omap4_enter_idle_coupled_[simple/coupled] - OMAP4 cpuidle entry functions * @dev: cpuidle device * @drv: cpuidle driver * @index: the index of state to be entered @@ -59,60 +64,84 @@ static struct powerdomain *mpu_pd, *cpu0_pd, *cpu1_pd; * specified low power state selected by the governor. * Returns the amount of time spent in the low power state. */ -static int omap4_enter_idle(struct cpuidle_device *dev, +static int omap4_enter_idle_simple(struct cpuidle_device *dev, + struct cpuidle_driver *drv, + int index) +{ + local_fiq_disable(); + omap_do_wfi(); + local_fiq_enable(); + + return index; +} + +static int omap4_enter_idle_coupled(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { struct omap4_idle_statedata *cx = &omap4_idle_data[index]; - u32 cpu1_state; int cpu_id = smp_processor_id(); local_fiq_disable(); /* - * CPU0 has to stay ON (i.e in C1) until CPU1 is OFF state. + * CPU0 has to wait and stay ON until CPU1 is OFF state. * This is necessary to honour hardware recommondation * of triggeing all the possible low power modes once CPU1 is * out of coherency and in OFF mode. - * Update dev->last_state so that governor stats reflects right - * data. */ - cpu1_state = pwrdm_read_pwrst(cpu1_pd); - if (cpu1_state != PWRDM_POWER_OFF) { - index = drv->safe_state_index; - cx = &omap4_idle_data[index]; + if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) { + while (pwrdm_read_pwrst(cpu_pd[1]) != PWRDM_POWER_OFF) { + cpu_relax(); + + /* + * CPU1 could have already entered & exited idle + * without hitting off because of a wakeup + * or a failed attempt to hit off mode. Check for + * that here, otherwise we could spin forever + * waiting for CPU1 off. + */ + if (cpu_done[1]) + goto fail; + + } } - if (index > 0) - clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id); + clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id); /* * Call idle CPU PM enter notifier chain so that * VFP and per CPU interrupt context is saved. */ - if (cx->cpu_state == PWRDM_POWER_OFF) - cpu_pm_enter(); - - pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state); - omap_set_pwrdm_state(mpu_pd, cx->mpu_state); - - /* - * Call idle CPU cluster PM enter notifier chain - * to save GIC and wakeupgen context. - */ - if ((cx->mpu_state == PWRDM_POWER_RET) && - (cx->mpu_logic_state == PWRDM_POWER_OFF)) - cpu_cluster_pm_enter(); + cpu_pm_enter(); + + if (dev->cpu == 0) { + pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state); + omap_set_pwrdm_state(mpu_pd, cx->mpu_state); + + /* + * Call idle CPU cluster PM enter notifier chain + * to save GIC and wakeupgen context. + */ + if ((cx->mpu_state == PWRDM_POWER_RET) && + (cx->mpu_logic_state == PWRDM_POWER_OFF)) + cpu_cluster_pm_enter(); + } omap4_enter_lowpower(dev->cpu, cx->cpu_state); + cpu_done[dev->cpu] = true; + + /* Wakeup CPU1 only if it is not offlined */ + if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) { + clkdm_wakeup(cpu_clkdm[1]); + clkdm_allow_idle(cpu_clkdm[1]); + } /* * Call idle CPU PM exit notifier chain to restore - * VFP and per CPU IRQ context. Only CPU0 state is - * considered since CPU1 is managed by CPU hotplug. + * VFP and per CPU IRQ context. */ - if (pwrdm_read_prev_pwrst(cpu0_pd) == PWRDM_POWER_OFF) - cpu_pm_exit(); + cpu_pm_exit(); /* * Call idle CPU cluster PM exit notifier chain @@ -121,8 +150,11 @@ static int omap4_enter_idle(struct cpuidle_device *dev, if (omap4_mpuss_read_prev_context_state()) cpu_cluster_pm_exit(); - if (index > 0) - clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id); + clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id); + +fail: + cpuidle_coupled_parallel_barrier(dev, &abort_barrier); + cpu_done[dev->cpu] = false; local_fiq_enable(); @@ -141,7 +173,7 @@ struct cpuidle_driver omap4_idle_driver = { .exit_latency = 2 + 2, .target_residency = 5, .flags = CPUIDLE_FLAG_TIME_VALID, - .enter = omap4_enter_idle, + .enter = omap4_enter_idle_simple, .name = "C1", .desc = "MPUSS ON" }, @@ -149,8 +181,8 @@ struct cpuidle_driver omap4_idle_driver = { /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */ .exit_latency = 328 + 440, .target_residency = 960, - .flags = CPUIDLE_FLAG_TIME_VALID, - .enter = omap4_enter_idle, + .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED, + .enter = omap4_enter_idle_coupled, .name = "C2", .desc = "MPUSS CSWR", }, @@ -158,8 +190,8 @@ struct cpuidle_driver omap4_idle_driver = { /* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */ .exit_latency = 460 + 518, .target_residency = 1100, - .flags = CPUIDLE_FLAG_TIME_VALID, - .enter = omap4_enter_idle, + .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED, + .enter = omap4_enter_idle_coupled, .name = "C3", .desc = "MPUSS OSWR", }, @@ -168,6 +200,16 @@ struct cpuidle_driver omap4_idle_driver = { .safe_state_index = 0, }; +/* + * For each cpu, setup the broadcast timer because local timers + * stops for the states above C1. + */ +static void omap_setup_broadcast_timer(void *arg) +{ + int cpu = smp_processor_id(); + clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu); +} + /** * omap4_idle_init - Init routine for OMAP4 idle * @@ -180,19 +222,31 @@ int __init omap4_idle_init(void) unsigned int cpu_id = 0; mpu_pd = pwrdm_lookup("mpu_pwrdm"); - cpu0_pd = pwrdm_lookup("cpu0_pwrdm"); - cpu1_pd = pwrdm_lookup("cpu1_pwrdm"); - if ((!mpu_pd) || (!cpu0_pd) || (!cpu1_pd)) + cpu_pd[0] = pwrdm_lookup("cpu0_pwrdm"); + cpu_pd[1] = pwrdm_lookup("cpu1_pwrdm"); + if ((!mpu_pd) || (!cpu_pd[0]) || (!cpu_pd[1])) return -ENODEV; - dev = &per_cpu(omap4_idle_dev, cpu_id); - dev->cpu = cpu_id; - - cpuidle_register_driver(&omap4_idle_driver); + cpu_clkdm[0] = clkdm_lookup("mpu0_clkdm"); + cpu_clkdm[1] = clkdm_lookup("mpu1_clkdm"); + if (!cpu_clkdm[0] || !cpu_clkdm[1]) + return -ENODEV; - if (cpuidle_register_device(dev)) { - pr_err("%s: CPUidle register device failed\n", __func__); - return -EIO; + /* Configure the broadcast timer on each cpu */ + on_each_cpu(omap_setup_broadcast_timer, NULL, 1); + + for_each_cpu(cpu_id, cpu_online_mask) { + dev = &per_cpu(omap4_idle_dev, cpu_id); + dev->cpu = cpu_id; +#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED + dev->coupled_cpus = *cpu_online_mask; +#endif + cpuidle_register_driver(&omap4_idle_driver); + + if (cpuidle_register_device(dev)) { + pr_err("%s: CPUidle register failed\n", __func__); + return -EIO; + } } return 0; diff --git a/trunk/arch/arm/mach-omap2/display.c b/trunk/arch/arm/mach-omap2/display.c index 5fb47a14f4ba..af1ed7d24a1f 100644 --- a/trunk/arch/arm/mach-omap2/display.c +++ b/trunk/arch/arm/mach-omap2/display.c @@ -37,6 +37,7 @@ #define DISPC_CONTROL 0x0040 #define DISPC_CONTROL2 0x0238 +#define DISPC_CONTROL3 0x0848 #define DISPC_IRQSTATUS 0x0018 #define DSS_SYSCONFIG 0x10 @@ -52,6 +53,7 @@ #define EVSYNC_EVEN_IRQ_SHIFT 2 #define EVSYNC_ODD_IRQ_SHIFT 3 #define FRAMEDONE2_IRQ_SHIFT 22 +#define FRAMEDONE3_IRQ_SHIFT 30 #define FRAMEDONETV_IRQ_SHIFT 24 /* @@ -376,7 +378,7 @@ int __init omap_display_init(struct omap_dss_board_info *board_data) static void dispc_disable_outputs(void) { u32 v, irq_mask = 0; - bool lcd_en, digit_en, lcd2_en = false; + bool lcd_en, digit_en, lcd2_en = false, lcd3_en = false; int i; struct omap_dss_dispc_dev_attr *da; struct omap_hwmod *oh; @@ -405,7 +407,13 @@ static void dispc_disable_outputs(void) lcd2_en = v & LCD_EN_MASK; } - if (!(lcd_en | digit_en | lcd2_en)) + /* store value of LCDENABLE for LCD3 */ + if (da->manager_count > 3) { + v = omap_hwmod_read(oh, DISPC_CONTROL3); + lcd3_en = v & LCD_EN_MASK; + } + + if (!(lcd_en | digit_en | lcd2_en | lcd3_en)) return; /* no managers currently enabled */ /* @@ -426,10 +434,12 @@ static void dispc_disable_outputs(void) if (lcd2_en) irq_mask |= 1 << FRAMEDONE2_IRQ_SHIFT; + if (lcd3_en) + irq_mask |= 1 << FRAMEDONE3_IRQ_SHIFT; /* * clear any previous FRAMEDONE, FRAMEDONETV, - * EVSYNC_EVEN/ODD or FRAMEDONE2 interrupts + * EVSYNC_EVEN/ODD, FRAMEDONE2 or FRAMEDONE3 interrupts */ omap_hwmod_write(irq_mask, oh, DISPC_IRQSTATUS); @@ -445,12 +455,19 @@ static void dispc_disable_outputs(void) omap_hwmod_write(v, oh, DISPC_CONTROL2); } + /* disable LCD3 manager */ + if (da->manager_count > 3) { + v = omap_hwmod_read(oh, DISPC_CONTROL3); + v &= ~LCD_EN_MASK; + omap_hwmod_write(v, oh, DISPC_CONTROL3); + } + i = 0; while ((omap_hwmod_read(oh, DISPC_IRQSTATUS) & irq_mask) != irq_mask) { i++; if (i > FRAMEDONE_IRQ_TIMEOUT) { - pr_err("didn't get FRAMEDONE1/2 or TV interrupt\n"); + pr_err("didn't get FRAMEDONE1/2/3 or TV interrupt\n"); break; } mdelay(1); diff --git a/trunk/arch/arm/mach-omap2/hsmmc.c b/trunk/arch/arm/mach-omap2/hsmmc.c index be697d4e0843..a9675d8d1822 100644 --- a/trunk/arch/arm/mach-omap2/hsmmc.c +++ b/trunk/arch/arm/mach-omap2/hsmmc.c @@ -315,7 +315,6 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c, mmc->slots[0].caps = c->caps; mmc->slots[0].pm_caps = c->pm_caps; mmc->slots[0].internal_clock = !c->ext_clock; - mmc->dma_mask = 0xffffffff; mmc->max_freq = c->max_freq; if (cpu_is_omap44xx()) mmc->reg_offset = OMAP4_MMC_REG_OFFSET; diff --git a/trunk/arch/arm/mach-omap2/mux.h b/trunk/arch/arm/mach-omap2/mux.h index 471e62a74a16..76f9b3c2f586 100644 --- a/trunk/arch/arm/mach-omap2/mux.h +++ b/trunk/arch/arm/mach-omap2/mux.h @@ -127,7 +127,6 @@ struct omap_mux_partition { * @gpio: GPIO number * @muxnames: available signal modes for a ball * @balls: available balls on the package - * @partition: mux partition */ struct omap_mux { u16 reg_offset; diff --git a/trunk/arch/arm/mach-omap2/opp4xxx_data.c b/trunk/arch/arm/mach-omap2/opp4xxx_data.c index 2293ba27101b..c95415da23c2 100644 --- a/trunk/arch/arm/mach-omap2/opp4xxx_data.c +++ b/trunk/arch/arm/mach-omap2/opp4xxx_data.c @@ -94,7 +94,7 @@ int __init omap4_opp_init(void) { int r = -ENODEV; - if (!cpu_is_omap44xx()) + if (!cpu_is_omap443x()) return r; r = omap_init_opp_table(omap44xx_opp_def_list, diff --git a/trunk/arch/arm/mach-omap2/pm34xx.c b/trunk/arch/arm/mach-omap2/pm34xx.c index e4fc88c65dbd..05bd8f02723f 100644 --- a/trunk/arch/arm/mach-omap2/pm34xx.c +++ b/trunk/arch/arm/mach-omap2/pm34xx.c @@ -272,21 +272,16 @@ void omap_sram_idle(void) per_next_state = pwrdm_read_next_pwrst(per_pwrdm); core_next_state = pwrdm_read_next_pwrst(core_pwrdm); - if (mpu_next_state < PWRDM_POWER_ON) { - pwrdm_pre_transition(mpu_pwrdm); - pwrdm_pre_transition(neon_pwrdm); - } + pwrdm_pre_transition(NULL); /* PER */ if (per_next_state < PWRDM_POWER_ON) { - pwrdm_pre_transition(per_pwrdm); per_going_off = (per_next_state == PWRDM_POWER_OFF) ? 1 : 0; omap2_gpio_prepare_for_idle(per_going_off); } /* CORE */ if (core_next_state < PWRDM_POWER_ON) { - pwrdm_pre_transition(core_pwrdm); if (core_next_state == PWRDM_POWER_OFF) { omap3_core_save_context(); omap3_cm_save_context(); @@ -339,20 +334,14 @@ void omap_sram_idle(void) omap2_prm_clear_mod_reg_bits(OMAP3430_AUTO_OFF_MASK, OMAP3430_GR_MOD, OMAP3_PRM_VOLTCTRL_OFFSET); - pwrdm_post_transition(core_pwrdm); } omap3_intc_resume_idle(); + pwrdm_post_transition(NULL); + /* PER */ - if (per_next_state < PWRDM_POWER_ON) { + if (per_next_state < PWRDM_POWER_ON) omap2_gpio_resume_after_idle(); - pwrdm_post_transition(per_pwrdm); - } - - if (mpu_next_state < PWRDM_POWER_ON) { - pwrdm_post_transition(mpu_pwrdm); - pwrdm_post_transition(neon_pwrdm); - } } static void omap3_pm_idle(void) diff --git a/trunk/arch/arm/mach-omap2/sleep44xx.S b/trunk/arch/arm/mach-omap2/sleep44xx.S index 9f6b83d1b193..91e71d8f46f0 100644 --- a/trunk/arch/arm/mach-omap2/sleep44xx.S +++ b/trunk/arch/arm/mach-omap2/sleep44xx.S @@ -56,9 +56,13 @@ ppa_por_params: * The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET. * It returns to the caller for CPU INACTIVE and ON power states or in case * CPU failed to transition to targeted OFF/DORMANT state. + * + * omap4_finish_suspend() calls v7_flush_dcache_all() which doesn't save + * stack frame and it expects the caller to take care of it. Hence the entire + * stack frame is saved to avoid possible stack corruption. */ ENTRY(omap4_finish_suspend) - stmfd sp!, {lr} + stmfd sp!, {r4-r12, lr} cmp r0, #0x0 beq do_WFI @ No lowpower state, jump to WFI @@ -226,7 +230,7 @@ scu_gp_clear: skip_scu_gp_clear: isb dsb - ldmfd sp!, {pc} + ldmfd sp!, {r4-r12, pc} ENDPROC(omap4_finish_suspend) /* diff --git a/trunk/arch/arm/mach-omap2/timer.c b/trunk/arch/arm/mach-omap2/timer.c index 13d20c8a283d..2ff6d41ec6c6 100644 --- a/trunk/arch/arm/mach-omap2/timer.c +++ b/trunk/arch/arm/mach-omap2/timer.c @@ -130,6 +130,7 @@ static struct clock_event_device clockevent_gpt = { .name = "gp_timer", .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, .shift = 32, + .rating = 300, .set_next_event = omap2_gp_timer_set_next_event, .set_mode = omap2_gp_timer_set_mode, }; @@ -223,7 +224,8 @@ static void __init omap2_gp_clockevent_init(int gptimer_id, clockevent_delta2ns(3, &clockevent_gpt); /* Timer internal resynch latency. */ - clockevent_gpt.cpumask = cpumask_of(0); + clockevent_gpt.cpumask = cpu_possible_mask; + clockevent_gpt.irq = omap_dm_timer_get_irq(&clkev); clockevents_register_device(&clockevent_gpt); pr_info("OMAP clockevent source: GPTIMER%d at %lu Hz\n", diff --git a/trunk/arch/arm/mach-omap2/twl-common.c b/trunk/arch/arm/mach-omap2/twl-common.c index de47f170ba50..db5ff6642375 100644 --- a/trunk/arch/arm/mach-omap2/twl-common.c +++ b/trunk/arch/arm/mach-omap2/twl-common.c @@ -67,6 +67,7 @@ void __init omap_pmic_init(int bus, u32 clkrate, const char *pmic_type, int pmic_irq, struct twl4030_platform_data *pmic_data) { + omap_mux_init_signal("sys_nirq", OMAP_PIN_INPUT_PULLUP | OMAP_PIN_OFF_WAKEUPENABLE); strncpy(pmic_i2c_board_info.type, pmic_type, sizeof(pmic_i2c_board_info.type)); pmic_i2c_board_info.irq = pmic_irq; diff --git a/trunk/arch/arm/mach-orion5x/common.c b/trunk/arch/arm/mach-orion5x/common.c index 9148b229d0de..410291c67666 100644 --- a/trunk/arch/arm/mach-orion5x/common.c +++ b/trunk/arch/arm/mach-orion5x/common.c @@ -109,7 +109,8 @@ void __init orion5x_eth_init(struct mv643xx_eth_platform_data *eth_data) { orion_ge00_init(eth_data, ORION5X_ETH_PHYS_BASE, IRQ_ORION5X_ETH_SUM, - IRQ_ORION5X_ETH_ERR); + IRQ_ORION5X_ETH_ERR, + MV643XX_TX_CSUM_DEFAULT_LIMIT); } diff --git a/trunk/arch/arm/mach-orion5x/irq.c b/trunk/arch/arm/mach-orion5x/irq.c index b1b45fff776e..17da7091d310 100644 --- a/trunk/arch/arm/mach-orion5x/irq.c +++ b/trunk/arch/arm/mach-orion5x/irq.c @@ -11,19 +11,16 @@ */ #include #include -#include #include -#include #include #include -#include "common.h" -static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc) -{ - BUG_ON(irq < IRQ_ORION5X_GPIO_0_7 || irq > IRQ_ORION5X_GPIO_24_31); - - orion_gpio_irq_handler((irq - IRQ_ORION5X_GPIO_0_7) << 3); -} +static int __initdata gpio0_irqs[4] = { + IRQ_ORION5X_GPIO_0_7, + IRQ_ORION5X_GPIO_8_15, + IRQ_ORION5X_GPIO_16_23, + IRQ_ORION5X_GPIO_24_31, +}; void __init orion5x_init_irq(void) { @@ -32,9 +29,6 @@ void __init orion5x_init_irq(void) /* * Initialize gpiolib for GPIOs 0-31. */ - orion_gpio_init(0, 32, GPIO_VIRT_BASE, 0, IRQ_ORION5X_GPIO_START); - irq_set_chained_handler(IRQ_ORION5X_GPIO_0_7, gpio_irq_handler); - irq_set_chained_handler(IRQ_ORION5X_GPIO_8_15, gpio_irq_handler); - irq_set_chained_handler(IRQ_ORION5X_GPIO_16_23, gpio_irq_handler); - irq_set_chained_handler(IRQ_ORION5X_GPIO_24_31, gpio_irq_handler); + orion_gpio_init(NULL, 0, 32, (void __iomem *)GPIO_VIRT_BASE, 0, + IRQ_ORION5X_GPIO_START, gpio0_irqs); } diff --git a/trunk/arch/arm/mach-prima2/timer.c b/trunk/arch/arm/mach-prima2/timer.c index 0d024b1e916d..f224107de7bc 100644 --- a/trunk/arch/arm/mach-prima2/timer.c +++ b/trunk/arch/arm/mach-prima2/timer.c @@ -132,11 +132,11 @@ static void sirfsoc_clocksource_resume(struct clocksource *cs) { int i; - for (i = 0; i < SIRFSOC_TIMER_REG_CNT; i++) + for (i = 0; i < SIRFSOC_TIMER_REG_CNT - 2; i++) writel_relaxed(sirfsoc_timer_reg_val[i], sirfsoc_timer_base + sirfsoc_timer_reg_list[i]); - writel_relaxed(sirfsoc_timer_reg_val[i - 2], sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_LO); - writel_relaxed(sirfsoc_timer_reg_val[i - 1], sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_HI); + writel_relaxed(sirfsoc_timer_reg_val[SIRFSOC_TIMER_REG_CNT - 2], sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_LO); + writel_relaxed(sirfsoc_timer_reg_val[SIRFSOC_TIMER_REG_CNT - 1], sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_HI); } static struct clock_event_device sirfsoc_clockevent = { diff --git a/trunk/arch/arm/mach-pxa/raumfeld.c b/trunk/arch/arm/mach-pxa/raumfeld.c index 5905ed130e94..d89d87ae144c 100644 --- a/trunk/arch/arm/mach-pxa/raumfeld.c +++ b/trunk/arch/arm/mach-pxa/raumfeld.c @@ -953,12 +953,12 @@ static struct i2c_board_info raumfeld_connector_i2c_board_info __initdata = { static struct eeti_ts_platform_data eeti_ts_pdata = { .irq_active_high = 1, + .irq_gpio = GPIO_TOUCH_IRQ, }; static struct i2c_board_info raumfeld_controller_i2c_board_info __initdata = { .type = "eeti_ts", .addr = 0x0a, - .irq = PXA_GPIO_TO_IRQ(GPIO_TOUCH_IRQ), .platform_data = &eeti_ts_pdata, }; diff --git a/trunk/arch/arm/mach-s3c24xx/Kconfig b/trunk/arch/arm/mach-s3c24xx/Kconfig index e24961109b70..d56b0f7f2b20 100644 --- a/trunk/arch/arm/mach-s3c24xx/Kconfig +++ b/trunk/arch/arm/mach-s3c24xx/Kconfig @@ -483,7 +483,7 @@ config MACH_NEO1973_GTA02 select I2C select POWER_SUPPLY select MACH_NEO1973 - select S3C2410_PWM + select S3C24XX_PWM select S3C_DEV_USB_HOST help Say Y here if you are using the Openmoko GTA02 / Freerunner GSM Phone @@ -493,7 +493,7 @@ config MACH_RX1950 select S3C24XX_DCLK select PM_H1940 if PM select I2C - select S3C2410_PWM + select S3C24XX_PWM select S3C_DEV_NAND select S3C2410_IOTIMING if S3C2440_CPUFREQ select S3C2440_XTAL_16934400 diff --git a/trunk/arch/arm/mach-s3c24xx/include/mach/dma.h b/trunk/arch/arm/mach-s3c24xx/include/mach/dma.h index 454831b66037..ee99fd56c043 100644 --- a/trunk/arch/arm/mach-s3c24xx/include/mach/dma.h +++ b/trunk/arch/arm/mach-s3c24xx/include/mach/dma.h @@ -24,7 +24,8 @@ */ enum dma_ch { - DMACH_XD0, + DMACH_DT_PROP = -1, /* not yet supported, do not use */ + DMACH_XD0 = 0, DMACH_XD1, DMACH_SDI, DMACH_SPI0, diff --git a/trunk/arch/arm/mach-sa1100/leds-hackkit.c b/trunk/arch/arm/mach-sa1100/leds-hackkit.c index 6a2352436e62..f8e47235babe 100644 --- a/trunk/arch/arm/mach-sa1100/leds-hackkit.c +++ b/trunk/arch/arm/mach-sa1100/leds-hackkit.c @@ -10,6 +10,7 @@ * as cpu led, the green one is used as timer led. */ #include +#include #include #include diff --git a/trunk/arch/arm/mach-shmobile/board-armadillo800eva.c b/trunk/arch/arm/mach-shmobile/board-armadillo800eva.c index cf10f92856dc..453a6e50db8b 100644 --- a/trunk/arch/arm/mach-shmobile/board-armadillo800eva.c +++ b/trunk/arch/arm/mach-shmobile/board-armadillo800eva.c @@ -520,13 +520,14 @@ static struct platform_device hdmi_lcdc_device = { }; /* GPIO KEY */ -#define GPIO_KEY(c, g, d) { .code = c, .gpio = g, .desc = d, .active_low = 1 } +#define GPIO_KEY(c, g, d, ...) \ + { .code = c, .gpio = g, .desc = d, .active_low = 1, __VA_ARGS__ } static struct gpio_keys_button gpio_buttons[] = { - GPIO_KEY(KEY_POWER, GPIO_PORT99, "SW1"), - GPIO_KEY(KEY_BACK, GPIO_PORT100, "SW2"), - GPIO_KEY(KEY_MENU, GPIO_PORT97, "SW3"), - GPIO_KEY(KEY_HOME, GPIO_PORT98, "SW4"), + GPIO_KEY(KEY_POWER, GPIO_PORT99, "SW3", .wakeup = 1), + GPIO_KEY(KEY_BACK, GPIO_PORT100, "SW4"), + GPIO_KEY(KEY_MENU, GPIO_PORT97, "SW5"), + GPIO_KEY(KEY_HOME, GPIO_PORT98, "SW6"), }; static struct gpio_keys_platform_data gpio_key_info = { @@ -901,8 +902,8 @@ static struct platform_device *eva_devices[] __initdata = { &camera_device, &ceu0_device, &fsi_device, - &fsi_hdmi_device, &fsi_wm8978_device, + &fsi_hdmi_device, }; static void __init eva_clock_init(void) diff --git a/trunk/arch/arm/mach-shmobile/board-mackerel.c b/trunk/arch/arm/mach-shmobile/board-mackerel.c index 7ea2b31e3199..c129542f6aed 100644 --- a/trunk/arch/arm/mach-shmobile/board-mackerel.c +++ b/trunk/arch/arm/mach-shmobile/board-mackerel.c @@ -695,6 +695,7 @@ static struct platform_device usbhs0_device = { * - J30 "open" * - modify usbhs1_get_id() USBHS_HOST -> USBHS_GADGET * - add .get_vbus = usbhs_get_vbus in usbhs1_private + * - check usbhs0_device(pio)/usbhs1_device(irq) order in mackerel_devices. */ #define IRQ8 evt2irq(0x0300) #define USB_PHY_MODE (1 << 4) @@ -1325,8 +1326,8 @@ static struct platform_device *mackerel_devices[] __initdata = { &nor_flash_device, &smc911x_device, &lcdc_device, - &usbhs1_device, &usbhs0_device, + &usbhs1_device, &leds_device, &fsi_device, &fsi_ak4643_device, diff --git a/trunk/arch/arm/mach-shmobile/board-marzen.c b/trunk/arch/arm/mach-shmobile/board-marzen.c index 3a528cf4366c..fcf5a47f4772 100644 --- a/trunk/arch/arm/mach-shmobile/board-marzen.c +++ b/trunk/arch/arm/mach-shmobile/board-marzen.c @@ -67,7 +67,7 @@ static struct smsc911x_platform_config smsc911x_platdata = { static struct platform_device eth_device = { .name = "smsc911x", - .id = 0, + .id = -1, .dev = { .platform_data = &smsc911x_platdata, }, diff --git a/trunk/arch/arm/mach-shmobile/intc-sh73a0.c b/trunk/arch/arm/mach-shmobile/intc-sh73a0.c index ee447404c857..588555a67d9c 100644 --- a/trunk/arch/arm/mach-shmobile/intc-sh73a0.c +++ b/trunk/arch/arm/mach-shmobile/intc-sh73a0.c @@ -259,9 +259,9 @@ static int sh73a0_set_wake(struct irq_data *data, unsigned int on) return 0; /* always allow wakeup */ } -#define RELOC_BASE 0x1000 +#define RELOC_BASE 0x1200 -/* INTCA IRQ pins at INTCS + 0x1000 to make space for GIC+INTC handling */ +/* INTCA IRQ pins at INTCS + RELOC_BASE to make space for GIC+INTC handling */ #define INTCS_VECT_RELOC(n, vect) INTCS_VECT((n), (vect) + RELOC_BASE) INTC_IRQ_PINS_32(intca_irq_pins, 0xe6900000, diff --git a/trunk/arch/arm/mach-spear3xx/spear300.c b/trunk/arch/arm/mach-spear3xx/spear300.c index 0f882ecb7d81..6ec300549960 100644 --- a/trunk/arch/arm/mach-spear3xx/spear300.c +++ b/trunk/arch/arm/mach-spear3xx/spear300.c @@ -120,182 +120,156 @@ struct pl08x_channel_data spear300_dma_info[] = { .min_signal = 2, .max_signal = 2, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart0_tx", .min_signal = 3, .max_signal = 3, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ssp0_rx", .min_signal = 8, .max_signal = 8, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ssp0_tx", .min_signal = 9, .max_signal = 9, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "i2c_rx", .min_signal = 10, .max_signal = 10, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "i2c_tx", .min_signal = 11, .max_signal = 11, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "irda", .min_signal = 12, .max_signal = 12, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "adc", .min_signal = 13, .max_signal = 13, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "to_jpeg", .min_signal = 14, .max_signal = 14, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "from_jpeg", .min_signal = 15, .max_signal = 15, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras0_rx", .min_signal = 0, .max_signal = 0, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras0_tx", .min_signal = 1, .max_signal = 1, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras1_rx", .min_signal = 2, .max_signal = 2, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras1_tx", .min_signal = 3, .max_signal = 3, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras2_rx", .min_signal = 4, .max_signal = 4, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras2_tx", .min_signal = 5, .max_signal = 5, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras3_rx", .min_signal = 6, .max_signal = 6, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras3_tx", .min_signal = 7, .max_signal = 7, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras4_rx", .min_signal = 8, .max_signal = 8, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras4_tx", .min_signal = 9, .max_signal = 9, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras5_rx", .min_signal = 10, .max_signal = 10, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras5_tx", .min_signal = 11, .max_signal = 11, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras6_rx", .min_signal = 12, .max_signal = 12, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras6_tx", .min_signal = 13, .max_signal = 13, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras7_rx", .min_signal = 14, .max_signal = 14, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras7_tx", .min_signal = 15, .max_signal = 15, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, }; diff --git a/trunk/arch/arm/mach-spear3xx/spear310.c b/trunk/arch/arm/mach-spear3xx/spear310.c index bbcf4571d361..1d0e435b9045 100644 --- a/trunk/arch/arm/mach-spear3xx/spear310.c +++ b/trunk/arch/arm/mach-spear3xx/spear310.c @@ -205,182 +205,156 @@ struct pl08x_channel_data spear310_dma_info[] = { .min_signal = 2, .max_signal = 2, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart0_tx", .min_signal = 3, .max_signal = 3, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ssp0_rx", .min_signal = 8, .max_signal = 8, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ssp0_tx", .min_signal = 9, .max_signal = 9, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "i2c_rx", .min_signal = 10, .max_signal = 10, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "i2c_tx", .min_signal = 11, .max_signal = 11, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "irda", .min_signal = 12, .max_signal = 12, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "adc", .min_signal = 13, .max_signal = 13, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "to_jpeg", .min_signal = 14, .max_signal = 14, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "from_jpeg", .min_signal = 15, .max_signal = 15, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart1_rx", .min_signal = 0, .max_signal = 0, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart1_tx", .min_signal = 1, .max_signal = 1, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart2_rx", .min_signal = 2, .max_signal = 2, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart2_tx", .min_signal = 3, .max_signal = 3, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart3_rx", .min_signal = 4, .max_signal = 4, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart3_tx", .min_signal = 5, .max_signal = 5, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart4_rx", .min_signal = 6, .max_signal = 6, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart4_tx", .min_signal = 7, .max_signal = 7, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart5_rx", .min_signal = 8, .max_signal = 8, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart5_tx", .min_signal = 9, .max_signal = 9, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras5_rx", .min_signal = 10, .max_signal = 10, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras5_tx", .min_signal = 11, .max_signal = 11, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras6_rx", .min_signal = 12, .max_signal = 12, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras6_tx", .min_signal = 13, .max_signal = 13, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras7_rx", .min_signal = 14, .max_signal = 14, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras7_tx", .min_signal = 15, .max_signal = 15, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, }; diff --git a/trunk/arch/arm/mach-spear3xx/spear320.c b/trunk/arch/arm/mach-spear3xx/spear320.c index 88d483bcd66a..fd823c624575 100644 --- a/trunk/arch/arm/mach-spear3xx/spear320.c +++ b/trunk/arch/arm/mach-spear3xx/spear320.c @@ -213,182 +213,156 @@ struct pl08x_channel_data spear320_dma_info[] = { .min_signal = 2, .max_signal = 2, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart0_tx", .min_signal = 3, .max_signal = 3, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ssp0_rx", .min_signal = 8, .max_signal = 8, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ssp0_tx", .min_signal = 9, .max_signal = 9, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "i2c0_rx", .min_signal = 10, .max_signal = 10, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "i2c0_tx", .min_signal = 11, .max_signal = 11, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "irda", .min_signal = 12, .max_signal = 12, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "adc", .min_signal = 13, .max_signal = 13, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "to_jpeg", .min_signal = 14, .max_signal = 14, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "from_jpeg", .min_signal = 15, .max_signal = 15, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ssp1_rx", .min_signal = 0, .max_signal = 0, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ssp1_tx", .min_signal = 1, .max_signal = 1, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ssp2_rx", .min_signal = 2, .max_signal = 2, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ssp2_tx", .min_signal = 3, .max_signal = 3, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "uart1_rx", .min_signal = 4, .max_signal = 4, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "uart1_tx", .min_signal = 5, .max_signal = 5, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "uart2_rx", .min_signal = 6, .max_signal = 6, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "uart2_tx", .min_signal = 7, .max_signal = 7, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "i2c1_rx", .min_signal = 8, .max_signal = 8, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "i2c1_tx", .min_signal = 9, .max_signal = 9, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "i2c2_rx", .min_signal = 10, .max_signal = 10, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "i2c2_tx", .min_signal = 11, .max_signal = 11, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "i2s_rx", .min_signal = 12, .max_signal = 12, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "i2s_tx", .min_signal = 13, .max_signal = 13, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "rs485_rx", .min_signal = 14, .max_signal = 14, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "rs485_tx", .min_signal = 15, .max_signal = 15, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB2, }, }; diff --git a/trunk/arch/arm/mach-spear3xx/spear3xx.c b/trunk/arch/arm/mach-spear3xx/spear3xx.c index 66db5f13af84..98144baf8883 100644 --- a/trunk/arch/arm/mach-spear3xx/spear3xx.c +++ b/trunk/arch/arm/mach-spear3xx/spear3xx.c @@ -46,7 +46,8 @@ struct pl022_ssp_controller pl022_plat_data = { struct pl08x_platform_data pl080_plat_data = { .memcpy_channel = { .bus_id = "memcpy", - .cctl = (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT | \ + .cctl_memcpy = + (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT | \ PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT | \ PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT | \ PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT | \ diff --git a/trunk/arch/arm/mach-spear6xx/spear6xx.c b/trunk/arch/arm/mach-spear6xx/spear6xx.c index 9af67d003c62..5a5a52db252b 100644 --- a/trunk/arch/arm/mach-spear6xx/spear6xx.c +++ b/trunk/arch/arm/mach-spear6xx/spear6xx.c @@ -36,336 +36,288 @@ static struct pl08x_channel_data spear600_dma_info[] = { .min_signal = 0, .max_signal = 0, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ssp1_tx", .min_signal = 1, .max_signal = 1, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart0_rx", .min_signal = 2, .max_signal = 2, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart0_tx", .min_signal = 3, .max_signal = 3, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart1_rx", .min_signal = 4, .max_signal = 4, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "uart1_tx", .min_signal = 5, .max_signal = 5, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ssp2_rx", .min_signal = 6, .max_signal = 6, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ssp2_tx", .min_signal = 7, .max_signal = 7, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ssp0_rx", .min_signal = 8, .max_signal = 8, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ssp0_tx", .min_signal = 9, .max_signal = 9, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "i2c_rx", .min_signal = 10, .max_signal = 10, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "i2c_tx", .min_signal = 11, .max_signal = 11, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "irda", .min_signal = 12, .max_signal = 12, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "adc", .min_signal = 13, .max_signal = 13, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "to_jpeg", .min_signal = 14, .max_signal = 14, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "from_jpeg", .min_signal = 15, .max_signal = 15, .muxval = 0, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras0_rx", .min_signal = 0, .max_signal = 0, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras0_tx", .min_signal = 1, .max_signal = 1, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras1_rx", .min_signal = 2, .max_signal = 2, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras1_tx", .min_signal = 3, .max_signal = 3, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras2_rx", .min_signal = 4, .max_signal = 4, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras2_tx", .min_signal = 5, .max_signal = 5, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras3_rx", .min_signal = 6, .max_signal = 6, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras3_tx", .min_signal = 7, .max_signal = 7, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras4_rx", .min_signal = 8, .max_signal = 8, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras4_tx", .min_signal = 9, .max_signal = 9, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras5_rx", .min_signal = 10, .max_signal = 10, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras5_tx", .min_signal = 11, .max_signal = 11, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras6_rx", .min_signal = 12, .max_signal = 12, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras6_tx", .min_signal = 13, .max_signal = 13, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras7_rx", .min_signal = 14, .max_signal = 14, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ras7_tx", .min_signal = 15, .max_signal = 15, .muxval = 1, - .cctl = 0, .periph_buses = PL08X_AHB1, }, { .bus_id = "ext0_rx", .min_signal = 0, .max_signal = 0, .muxval = 2, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext0_tx", .min_signal = 1, .max_signal = 1, .muxval = 2, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext1_rx", .min_signal = 2, .max_signal = 2, .muxval = 2, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext1_tx", .min_signal = 3, .max_signal = 3, .muxval = 2, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext2_rx", .min_signal = 4, .max_signal = 4, .muxval = 2, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext2_tx", .min_signal = 5, .max_signal = 5, .muxval = 2, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext3_rx", .min_signal = 6, .max_signal = 6, .muxval = 2, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext3_tx", .min_signal = 7, .max_signal = 7, .muxval = 2, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext4_rx", .min_signal = 8, .max_signal = 8, .muxval = 2, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext4_tx", .min_signal = 9, .max_signal = 9, .muxval = 2, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext5_rx", .min_signal = 10, .max_signal = 10, .muxval = 2, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext5_tx", .min_signal = 11, .max_signal = 11, .muxval = 2, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext6_rx", .min_signal = 12, .max_signal = 12, .muxval = 2, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext6_tx", .min_signal = 13, .max_signal = 13, .muxval = 2, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext7_rx", .min_signal = 14, .max_signal = 14, .muxval = 2, - .cctl = 0, .periph_buses = PL08X_AHB2, }, { .bus_id = "ext7_tx", .min_signal = 15, .max_signal = 15, .muxval = 2, - .cctl = 0, .periph_buses = PL08X_AHB2, }, }; @@ -373,7 +325,8 @@ static struct pl08x_channel_data spear600_dma_info[] = { struct pl08x_platform_data pl080_plat_data = { .memcpy_channel = { .bus_id = "memcpy", - .cctl = (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT | \ + .cctl_memcpy = + (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT | \ PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT | \ PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT | \ PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT | \ diff --git a/trunk/arch/arm/mach-tegra/board-harmony-power.c b/trunk/arch/arm/mach-tegra/board-harmony-power.c index 8fd387bf31f0..b7344beec102 100644 --- a/trunk/arch/arm/mach-tegra/board-harmony-power.c +++ b/trunk/arch/arm/mach-tegra/board-harmony-power.c @@ -51,7 +51,7 @@ static struct regulator_init_data ldo0_data = { .consumer_supplies = tps658621_ldo0_supply, }; -#define HARMONY_REGULATOR_INIT(_id, _name, _supply, _minmv, _maxmv) \ +#define HARMONY_REGULATOR_INIT(_id, _name, _supply, _minmv, _maxmv, _on)\ static struct regulator_init_data _id##_data = { \ .supply_regulator = _supply, \ .constraints = { \ @@ -63,21 +63,22 @@ static struct regulator_init_data ldo0_data = { .valid_ops_mask = (REGULATOR_CHANGE_MODE | \ REGULATOR_CHANGE_STATUS | \ REGULATOR_CHANGE_VOLTAGE), \ + .always_on = _on, \ }, \ } -HARMONY_REGULATOR_INIT(sm0, "vdd_sm0", "vdd_sys", 725, 1500); -HARMONY_REGULATOR_INIT(sm1, "vdd_sm1", "vdd_sys", 725, 1500); -HARMONY_REGULATOR_INIT(sm2, "vdd_sm2", "vdd_sys", 3000, 4550); -HARMONY_REGULATOR_INIT(ldo1, "vdd_ldo1", "vdd_sm2", 725, 1500); -HARMONY_REGULATOR_INIT(ldo2, "vdd_ldo2", "vdd_sm2", 725, 1500); -HARMONY_REGULATOR_INIT(ldo3, "vdd_ldo3", "vdd_sm2", 1250, 3300); -HARMONY_REGULATOR_INIT(ldo4, "vdd_ldo4", "vdd_sm2", 1700, 2475); -HARMONY_REGULATOR_INIT(ldo5, "vdd_ldo5", NULL, 1250, 3300); -HARMONY_REGULATOR_INIT(ldo6, "vdd_ldo6", "vdd_sm2", 1250, 3300); -HARMONY_REGULATOR_INIT(ldo7, "vdd_ldo7", "vdd_sm2", 1250, 3300); -HARMONY_REGULATOR_INIT(ldo8, "vdd_ldo8", "vdd_sm2", 1250, 3300); -HARMONY_REGULATOR_INIT(ldo9, "vdd_ldo9", "vdd_sm2", 1250, 3300); +HARMONY_REGULATOR_INIT(sm0, "vdd_sm0", "vdd_sys", 725, 1500, 1); +HARMONY_REGULATOR_INIT(sm1, "vdd_sm1", "vdd_sys", 725, 1500, 1); +HARMONY_REGULATOR_INIT(sm2, "vdd_sm2", "vdd_sys", 3000, 4550, 1); +HARMONY_REGULATOR_INIT(ldo1, "vdd_ldo1", "vdd_sm2", 725, 1500, 1); +HARMONY_REGULATOR_INIT(ldo2, "vdd_ldo2", "vdd_sm2", 725, 1500, 0); +HARMONY_REGULATOR_INIT(ldo3, "vdd_ldo3", "vdd_sm2", 1250, 3300, 1); +HARMONY_REGULATOR_INIT(ldo4, "vdd_ldo4", "vdd_sm2", 1700, 2475, 1); +HARMONY_REGULATOR_INIT(ldo5, "vdd_ldo5", NULL, 1250, 3300, 1); +HARMONY_REGULATOR_INIT(ldo6, "vdd_ldo6", "vdd_sm2", 1250, 3300, 0); +HARMONY_REGULATOR_INIT(ldo7, "vdd_ldo7", "vdd_sm2", 1250, 3300, 0); +HARMONY_REGULATOR_INIT(ldo8, "vdd_ldo8", "vdd_sm2", 1250, 3300, 0); +HARMONY_REGULATOR_INIT(ldo9, "vdd_ldo9", "vdd_sm2", 1250, 3300, 1); #define TPS_REG(_id, _data) \ { \ @@ -119,9 +120,10 @@ static struct i2c_board_info __initdata harmony_regulators[] = { int __init harmony_regulator_init(void) { + regulator_register_always_on(0, "vdd_sys", + NULL, 0, 5000000); + if (machine_is_harmony()) { - regulator_register_always_on(0, "vdd_sys", - NULL, 0, 5000000); i2c_register_board_info(3, harmony_regulators, 1); } else { /* Harmony, booted using device tree */ struct device_node *np; diff --git a/trunk/arch/arm/mach-ux500/Kconfig b/trunk/arch/arm/mach-ux500/Kconfig index c013bbf79cac..53d3d46dec12 100644 --- a/trunk/arch/arm/mach-ux500/Kconfig +++ b/trunk/arch/arm/mach-ux500/Kconfig @@ -41,7 +41,6 @@ config MACH_HREFV60 config MACH_SNOWBALL bool "U8500 Snowball platform" select MACH_MOP500 - select LEDS_GPIO help Include support for the snowball development platform. diff --git a/trunk/arch/arm/mach-ux500/board-mop500-msp.c b/trunk/arch/arm/mach-ux500/board-mop500-msp.c index 996048038743..df15646036aa 100644 --- a/trunk/arch/arm/mach-ux500/board-mop500-msp.c +++ b/trunk/arch/arm/mach-ux500/board-mop500-msp.c @@ -191,9 +191,9 @@ static struct platform_device *db8500_add_msp_i2s(struct device *parent, return pdev; } -/* Platform device for ASoC U8500 machine */ -static struct platform_device snd_soc_u8500 = { - .name = "snd-soc-u8500", +/* Platform device for ASoC MOP500 machine */ +static struct platform_device snd_soc_mop500 = { + .name = "snd-soc-mop500", .id = 0, .dev = { .platform_data = NULL, @@ -227,8 +227,8 @@ int mop500_msp_init(struct device *parent) { struct platform_device *msp1; - pr_info("%s: Register platform-device 'snd-soc-u8500'.\n", __func__); - platform_device_register(&snd_soc_u8500); + pr_info("%s: Register platform-device 'snd-soc-mop500'.\n", __func__); + platform_device_register(&snd_soc_mop500); pr_info("Initialize MSP I2S-devices.\n"); db8500_add_msp_i2s(parent, 0, U8500_MSP0_BASE, IRQ_DB8500_MSP0, diff --git a/trunk/arch/arm/mach-ux500/board-mop500.c b/trunk/arch/arm/mach-ux500/board-mop500.c index 8674a890fd1c..a534d8880de1 100644 --- a/trunk/arch/arm/mach-ux500/board-mop500.c +++ b/trunk/arch/arm/mach-ux500/board-mop500.c @@ -797,6 +797,7 @@ static void __init u8500_init_machine(void) ARRAY_SIZE(mop500_platform_devs)); mop500_sdi_init(parent); + mop500_msp_init(parent); i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices); i2c_register_board_info(0, mop500_i2c0_devices, i2c0_devs); i2c_register_board_info(2, mop500_i2c2_devices, @@ -804,6 +805,8 @@ static void __init u8500_init_machine(void) mop500_uib_init(); + } else if (of_machine_is_compatible("calaosystems,snowball-a9500")) { + mop500_msp_init(parent); } else if (of_machine_is_compatible("st-ericsson,hrefv60+")) { /* * The HREFv60 board removed a GPIO expander and routed @@ -815,6 +818,7 @@ static void __init u8500_init_machine(void) ARRAY_SIZE(mop500_platform_devs)); hrefv60_sdi_init(parent); + mop500_msp_init(parent); i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices); i2c0_devs -= NUM_PRE_V60_I2C0_DEVICES; diff --git a/trunk/arch/arm/mm/dma-mapping.c b/trunk/arch/arm/mm/dma-mapping.c index c2cdf6500f75..051204fc4617 100644 --- a/trunk/arch/arm/mm/dma-mapping.c +++ b/trunk/arch/arm/mm/dma-mapping.c @@ -267,17 +267,19 @@ static void __dma_free_remap(void *cpu_addr, size_t size) vunmap(cpu_addr); } +#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K + struct dma_pool { size_t size; spinlock_t lock; unsigned long *bitmap; unsigned long nr_pages; void *vaddr; - struct page *page; + struct page **pages; }; static struct dma_pool atomic_pool = { - .size = SZ_256K, + .size = DEFAULT_DMA_COHERENT_POOL_SIZE, }; static int __init early_coherent_pool(char *p) @@ -287,6 +289,21 @@ static int __init early_coherent_pool(char *p) } early_param("coherent_pool", early_coherent_pool); +void __init init_dma_coherent_pool_size(unsigned long size) +{ + /* + * Catch any attempt to set the pool size too late. + */ + BUG_ON(atomic_pool.vaddr); + + /* + * Set architecture specific coherent pool size only if + * it has not been changed by kernel command line parameter. + */ + if (atomic_pool.size == DEFAULT_DMA_COHERENT_POOL_SIZE) + atomic_pool.size = size; +} + /* * Initialise the coherent pool for atomic allocations. */ @@ -297,6 +314,7 @@ static int __init atomic_pool_init(void) unsigned long nr_pages = pool->size >> PAGE_SHIFT; unsigned long *bitmap; struct page *page; + struct page **pages; void *ptr; int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long); @@ -304,21 +322,31 @@ static int __init atomic_pool_init(void) if (!bitmap) goto no_bitmap; + pages = kzalloc(nr_pages * sizeof(struct page *), GFP_KERNEL); + if (!pages) + goto no_pages; + if (IS_ENABLED(CONFIG_CMA)) ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page); else ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot, &page, NULL); if (ptr) { + int i; + + for (i = 0; i < nr_pages; i++) + pages[i] = page + i; + spin_lock_init(&pool->lock); pool->vaddr = ptr; - pool->page = page; + pool->pages = pages; pool->bitmap = bitmap; pool->nr_pages = nr_pages; pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n", (unsigned)pool->size / 1024); return 0; } +no_pages: kfree(bitmap); no_bitmap: pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n", @@ -358,7 +386,7 @@ void __init dma_contiguous_remap(void) if (end > arm_lowmem_limit) end = arm_lowmem_limit; if (start >= end) - return; + continue; map.pfn = __phys_to_pfn(start); map.virtual = __phys_to_virt(start); @@ -423,7 +451,7 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page) unsigned int pageno; unsigned long flags; void *ptr = NULL; - size_t align; + unsigned long align_mask; if (!pool->vaddr) { WARN(1, "coherent pool not initialised!\n"); @@ -435,35 +463,53 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page) * small, so align them to their order in pages, minimum is a page * size. This helps reduce fragmentation of the DMA space. */ - align = PAGE_SIZE << get_order(size); + align_mask = (1 << get_order(size)) - 1; spin_lock_irqsave(&pool->lock, flags); pageno = bitmap_find_next_zero_area(pool->bitmap, pool->nr_pages, - 0, count, (1 << align) - 1); + 0, count, align_mask); if (pageno < pool->nr_pages) { bitmap_set(pool->bitmap, pageno, count); ptr = pool->vaddr + PAGE_SIZE * pageno; - *ret_page = pool->page + pageno; + *ret_page = pool->pages[pageno]; + } else { + pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n" + "Please increase it with coherent_pool= kernel parameter!\n", + (unsigned)pool->size / 1024); } spin_unlock_irqrestore(&pool->lock, flags); return ptr; } +static bool __in_atomic_pool(void *start, size_t size) +{ + struct dma_pool *pool = &atomic_pool; + void *end = start + size; + void *pool_start = pool->vaddr; + void *pool_end = pool->vaddr + pool->size; + + if (start < pool_start || start > pool_end) + return false; + + if (end <= pool_end) + return true; + + WARN(1, "Wrong coherent size(%p-%p) from atomic pool(%p-%p)\n", + start, end - 1, pool_start, pool_end - 1); + + return false; +} + static int __free_from_pool(void *start, size_t size) { struct dma_pool *pool = &atomic_pool; unsigned long pageno, count; unsigned long flags; - if (start < pool->vaddr || start > pool->vaddr + pool->size) + if (!__in_atomic_pool(start, size)) return 0; - if (start + size > pool->vaddr + pool->size) { - WARN(1, "freeing wrong coherent size from pool\n"); - return 0; - } - pageno = (start - pool->vaddr) >> PAGE_SHIFT; count = size >> PAGE_SHIFT; @@ -648,12 +694,12 @@ void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, if (arch_is_coherent() || nommu()) { __dma_free_buffer(page, size); + } else if (__free_from_pool(cpu_addr, size)) { + return; } else if (!IS_ENABLED(CONFIG_CMA)) { __dma_free_remap(cpu_addr, size); __dma_free_buffer(page, size); } else { - if (__free_from_pool(cpu_addr, size)) - return; /* * Non-atomic allocations cannot be freed with IRQs disabled */ @@ -1090,10 +1136,22 @@ static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t si return 0; } +static struct page **__atomic_get_pages(void *addr) +{ + struct dma_pool *pool = &atomic_pool; + struct page **pages = pool->pages; + int offs = (addr - pool->vaddr) >> PAGE_SHIFT; + + return pages + offs; +} + static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs) { struct vm_struct *area; + if (__in_atomic_pool(cpu_addr, PAGE_SIZE)) + return __atomic_get_pages(cpu_addr); + if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) return cpu_addr; @@ -1103,6 +1161,34 @@ static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs) return NULL; } +static void *__iommu_alloc_atomic(struct device *dev, size_t size, + dma_addr_t *handle) +{ + struct page *page; + void *addr; + + addr = __alloc_from_pool(size, &page); + if (!addr) + return NULL; + + *handle = __iommu_create_mapping(dev, &page, size); + if (*handle == DMA_ERROR_CODE) + goto err_mapping; + + return addr; + +err_mapping: + __free_from_pool(addr, size); + return NULL; +} + +static void __iommu_free_atomic(struct device *dev, struct page **pages, + dma_addr_t handle, size_t size) +{ + __iommu_remove_mapping(dev, handle, size); + __free_from_pool(page_address(pages[0]), size); +} + static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) { @@ -1113,6 +1199,9 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, *handle = DMA_ERROR_CODE; size = PAGE_ALIGN(size); + if (gfp & GFP_ATOMIC) + return __iommu_alloc_atomic(dev, size, handle); + pages = __iommu_alloc_buffer(dev, size, gfp); if (!pages) return NULL; @@ -1179,6 +1268,11 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, return; } + if (__in_atomic_pool(cpu_addr, size)) { + __iommu_free_atomic(dev, pages, handle, size); + return; + } + if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) { unmap_kernel_range((unsigned long)cpu_addr, size); vunmap(cpu_addr); diff --git a/trunk/arch/arm/mm/flush.c b/trunk/arch/arm/mm/flush.c index 77458548e031..40ca11ed6e5f 100644 --- a/trunk/arch/arm/mm/flush.c +++ b/trunk/arch/arm/mm/flush.c @@ -231,8 +231,6 @@ void __sync_icache_dcache(pte_t pteval) struct page *page; struct address_space *mapping; - if (!pte_present_user(pteval)) - return; if (cache_is_vipt_nonaliasing() && !pte_exec(pteval)) /* only flush non-aliasing VIPT caches for exec mappings */ return; diff --git a/trunk/arch/arm/mm/tlb-v7.S b/trunk/arch/arm/mm/tlb-v7.S index 845f461f8ec1..ea94765acf9a 100644 --- a/trunk/arch/arm/mm/tlb-v7.S +++ b/trunk/arch/arm/mm/tlb-v7.S @@ -39,10 +39,18 @@ ENTRY(v7wbi_flush_user_tlb_range) mov r0, r0, lsr #PAGE_SHIFT @ align address mov r1, r1, lsr #PAGE_SHIFT asid r3, r3 @ mask ASID +#ifdef CONFIG_ARM_ERRATA_720789 + ALT_SMP(W(mov) r3, #0 ) + ALT_UP(W(nop) ) +#endif orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA mov r1, r1, lsl #PAGE_SHIFT 1: +#ifdef CONFIG_ARM_ERRATA_720789 + ALT_SMP(mcr p15, 0, r0, c8, c3, 3) @ TLB invalidate U MVA all ASID (shareable) +#else ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable) +#endif ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA add r0, r0, #PAGE_SZ @@ -67,7 +75,11 @@ ENTRY(v7wbi_flush_kern_tlb_range) mov r0, r0, lsl #PAGE_SHIFT mov r1, r1, lsl #PAGE_SHIFT 1: +#ifdef CONFIG_ARM_ERRATA_720789 + ALT_SMP(mcr p15, 0, r0, c8, c3, 3) @ TLB invalidate U MVA all ASID (shareable) +#else ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable) +#endif ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA add r0, r0, #PAGE_SZ cmp r0, r1 diff --git a/trunk/arch/arm/plat-mxc/tzic.c b/trunk/arch/arm/plat-mxc/tzic.c index c2193178210b..3ed1adbc09f8 100644 --- a/trunk/arch/arm/plat-mxc/tzic.c +++ b/trunk/arch/arm/plat-mxc/tzic.c @@ -23,6 +23,7 @@ #include #include +#include #include "irq-common.h" diff --git a/trunk/arch/arm/plat-omap/dmtimer.c b/trunk/arch/arm/plat-omap/dmtimer.c index 626ad8cad7a9..938b50a33439 100644 --- a/trunk/arch/arm/plat-omap/dmtimer.c +++ b/trunk/arch/arm/plat-omap/dmtimer.c @@ -189,6 +189,7 @@ struct omap_dm_timer *omap_dm_timer_request(void) timer->reserved = 1; break; } + spin_unlock_irqrestore(&dm_timer_lock, flags); if (timer) { ret = omap_dm_timer_prepare(timer); @@ -197,7 +198,6 @@ struct omap_dm_timer *omap_dm_timer_request(void) timer = NULL; } } - spin_unlock_irqrestore(&dm_timer_lock, flags); if (!timer) pr_debug("%s: timer request failed!\n", __func__); @@ -220,6 +220,7 @@ struct omap_dm_timer *omap_dm_timer_request_specific(int id) break; } } + spin_unlock_irqrestore(&dm_timer_lock, flags); if (timer) { ret = omap_dm_timer_prepare(timer); @@ -228,7 +229,6 @@ struct omap_dm_timer *omap_dm_timer_request_specific(int id) timer = NULL; } } - spin_unlock_irqrestore(&dm_timer_lock, flags); if (!timer) pr_debug("%s: timer%d request failed!\n", __func__, id); @@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(omap_dm_timer_enable); void omap_dm_timer_disable(struct omap_dm_timer *timer) { - pm_runtime_put(&timer->pdev->dev); + pm_runtime_put_sync(&timer->pdev->dev); } EXPORT_SYMBOL_GPL(omap_dm_timer_disable); diff --git a/trunk/arch/arm/plat-omap/include/plat/cpu.h b/trunk/arch/arm/plat-omap/include/plat/cpu.h index 68b180edcfff..bb5d08a70dbc 100644 --- a/trunk/arch/arm/plat-omap/include/plat/cpu.h +++ b/trunk/arch/arm/plat-omap/include/plat/cpu.h @@ -372,7 +372,8 @@ IS_OMAP_TYPE(3430, 0x3430) #define cpu_class_is_omap1() (cpu_is_omap7xx() || cpu_is_omap15xx() || \ cpu_is_omap16xx()) #define cpu_class_is_omap2() (cpu_is_omap24xx() || cpu_is_omap34xx() || \ - cpu_is_omap44xx() || soc_is_omap54xx()) + cpu_is_omap44xx() || soc_is_omap54xx() || \ + soc_is_am33xx()) /* Various silicon revisions for omap2 */ #define OMAP242X_CLASS 0x24200024 diff --git a/trunk/arch/arm/plat-omap/include/plat/mmc.h b/trunk/arch/arm/plat-omap/include/plat/mmc.h index 5493bd95da5e..eb3e4d555343 100644 --- a/trunk/arch/arm/plat-omap/include/plat/mmc.h +++ b/trunk/arch/arm/plat-omap/include/plat/mmc.h @@ -81,8 +81,6 @@ struct omap_mmc_platform_data { /* Return context loss count due to PM states changing */ int (*get_context_loss_count)(struct device *dev); - u64 dma_mask; - /* Integrating attributes from the omap_hwmod layer */ u8 controller_flags; diff --git a/trunk/arch/arm/plat-omap/include/plat/multi.h b/trunk/arch/arm/plat-omap/include/plat/multi.h index 045e320f1067..324d31b14852 100644 --- a/trunk/arch/arm/plat-omap/include/plat/multi.h +++ b/trunk/arch/arm/plat-omap/include/plat/multi.h @@ -108,4 +108,13 @@ # endif #endif +#ifdef CONFIG_SOC_AM33XX +# ifdef OMAP_NAME +# undef MULTI_OMAP2 +# define MULTI_OMAP2 +# else +# define OMAP_NAME am33xx +# endif +#endif + #endif /* __PLAT_OMAP_MULTI_H */ diff --git a/trunk/arch/arm/plat-omap/include/plat/uncompress.h b/trunk/arch/arm/plat-omap/include/plat/uncompress.h index b8d19a136781..7f7b112acccb 100644 --- a/trunk/arch/arm/plat-omap/include/plat/uncompress.h +++ b/trunk/arch/arm/plat-omap/include/plat/uncompress.h @@ -110,7 +110,7 @@ static inline void flush(void) _DEBUG_LL_ENTRY(mach, AM33XX_UART##p##_BASE, OMAP_PORT_SHIFT, \ AM33XXUART##p) -static inline void __arch_decomp_setup(unsigned long arch_id) +static inline void arch_decomp_setup(void) { int port = 0; @@ -198,8 +198,6 @@ static inline void __arch_decomp_setup(unsigned long arch_id) } while (0); } -#define arch_decomp_setup() __arch_decomp_setup(arch_id) - /* * nothing to do */ diff --git a/trunk/arch/arm/plat-orion/common.c b/trunk/arch/arm/plat-orion/common.c index c1793786aea9..b8b747a9d360 100644 --- a/trunk/arch/arm/plat-orion/common.c +++ b/trunk/arch/arm/plat-orion/common.c @@ -47,6 +47,7 @@ void __init orion_clkdev_init(struct clk *tclk) orion_clkdev_add(NULL, MV643XX_ETH_NAME ".2", tclk); orion_clkdev_add(NULL, MV643XX_ETH_NAME ".3", tclk); orion_clkdev_add(NULL, "orion_wdt", tclk); + orion_clkdev_add(NULL, MV64XXX_I2C_CTLR_NAME ".0", tclk); } /* Fill in the resources structure and link it into the platform @@ -290,10 +291,12 @@ static struct platform_device orion_ge00 = { void __init orion_ge00_init(struct mv643xx_eth_platform_data *eth_data, unsigned long mapbase, unsigned long irq, - unsigned long irq_err) + unsigned long irq_err, + unsigned int tx_csum_limit) { fill_resources(&orion_ge00_shared, orion_ge00_shared_resources, mapbase + 0x2000, SZ_16K - 1, irq_err); + orion_ge00_shared_data.tx_csum_limit = tx_csum_limit; ge_complete(&orion_ge00_shared_data, orion_ge00_resources, irq, &orion_ge00_shared, eth_data, &orion_ge00); @@ -342,10 +345,12 @@ static struct platform_device orion_ge01 = { void __init orion_ge01_init(struct mv643xx_eth_platform_data *eth_data, unsigned long mapbase, unsigned long irq, - unsigned long irq_err) + unsigned long irq_err, + unsigned int tx_csum_limit) { fill_resources(&orion_ge01_shared, orion_ge01_shared_resources, mapbase + 0x2000, SZ_16K - 1, irq_err); + orion_ge01_shared_data.tx_csum_limit = tx_csum_limit; ge_complete(&orion_ge01_shared_data, orion_ge01_resources, irq, &orion_ge01_shared, eth_data, &orion_ge01); diff --git a/trunk/arch/arm/plat-orion/gpio.c b/trunk/arch/arm/plat-orion/gpio.c index af95af257301..dfda74fae6f2 100644 --- a/trunk/arch/arm/plat-orion/gpio.c +++ b/trunk/arch/arm/plat-orion/gpio.c @@ -8,15 +8,22 @@ * warranty of any kind, whether express or implied. */ +#define DEBUG + #include #include #include +#include #include #include #include #include #include #include +#include +#include +#include +#include /* * GPIO unit register offsets. @@ -38,6 +45,7 @@ struct orion_gpio_chip { unsigned long valid_output; int mask_offset; int secondary_irq_base; + struct irq_domain *domain; }; static void __iomem *GPIO_OUT(struct orion_gpio_chip *ochip) @@ -222,10 +230,10 @@ static int orion_gpio_to_irq(struct gpio_chip *chip, unsigned pin) struct orion_gpio_chip *ochip = container_of(chip, struct orion_gpio_chip, chip); - return ochip->secondary_irq_base + pin; + return irq_create_mapping(ochip->domain, + ochip->secondary_irq_base + pin); } - /* * Orion-specific GPIO API extensions. */ @@ -353,12 +361,10 @@ static int gpio_irq_set_type(struct irq_data *d, u32 type) int pin; u32 u; - pin = d->irq - gc->irq_base; + pin = d->hwirq - ochip->secondary_irq_base; u = readl(GPIO_IO_CONF(ochip)) & (1 << pin); if (!u) { - printk(KERN_ERR "orion gpio_irq_set_type failed " - "(irq %d, pin %d).\n", d->irq, pin); return -EINVAL; } @@ -397,17 +403,53 @@ static int gpio_irq_set_type(struct irq_data *d, u32 type) u &= ~(1 << pin); /* rising */ writel(u, GPIO_IN_POL(ochip)); } - return 0; } -void __init orion_gpio_init(int gpio_base, int ngpio, - u32 base, int mask_offset, int secondary_irq_base) +static void gpio_irq_handler(unsigned irq, struct irq_desc *desc) +{ + struct orion_gpio_chip *ochip = irq_get_handler_data(irq); + u32 cause, type; + int i; + + if (ochip == NULL) + return; + + cause = readl(GPIO_DATA_IN(ochip)) & readl(GPIO_LEVEL_MASK(ochip)); + cause |= readl(GPIO_EDGE_CAUSE(ochip)) & readl(GPIO_EDGE_MASK(ochip)); + + for (i = 0; i < ochip->chip.ngpio; i++) { + int irq; + + irq = ochip->secondary_irq_base + i; + + if (!(cause & (1 << i))) + continue; + + type = irqd_get_trigger_type(irq_get_irq_data(irq)); + if ((type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) { + /* Swap polarity (race with GPIO line) */ + u32 polarity; + + polarity = readl(GPIO_IN_POL(ochip)); + polarity ^= 1 << i; + writel(polarity, GPIO_IN_POL(ochip)); + } + generic_handle_irq(irq); + } +} + +void __init orion_gpio_init(struct device_node *np, + int gpio_base, int ngpio, + void __iomem *base, int mask_offset, + int secondary_irq_base, + int irqs[4]) { struct orion_gpio_chip *ochip; struct irq_chip_generic *gc; struct irq_chip_type *ct; char gc_label[16]; + int i; if (orion_gpio_chip_count == ARRAY_SIZE(orion_gpio_chips)) return; @@ -426,6 +468,10 @@ void __init orion_gpio_init(int gpio_base, int ngpio, ochip->chip.base = gpio_base; ochip->chip.ngpio = ngpio; ochip->chip.can_sleep = 0; +#ifdef CONFIG_OF + ochip->chip.of_node = np; +#endif + spin_lock_init(&ochip->lock); ochip->base = (void __iomem *)base; ochip->valid_input = 0; @@ -435,8 +481,6 @@ void __init orion_gpio_init(int gpio_base, int ngpio, gpiochip_add(&ochip->chip); - orion_gpio_chip_count++; - /* * Mask and clear GPIO interrupts. */ @@ -444,16 +488,28 @@ void __init orion_gpio_init(int gpio_base, int ngpio, writel(0, GPIO_EDGE_MASK(ochip)); writel(0, GPIO_LEVEL_MASK(ochip)); - gc = irq_alloc_generic_chip("orion_gpio_irq", 2, secondary_irq_base, + /* Setup the interrupt handlers. Each chip can have up to 4 + * interrupt handlers, with each handler dealing with 8 GPIO + * pins. */ + + for (i = 0; i < 4; i++) { + if (irqs[i]) { + irq_set_handler_data(irqs[i], ochip); + irq_set_chained_handler(irqs[i], gpio_irq_handler); + } + } + + gc = irq_alloc_generic_chip("orion_gpio_irq", 2, + secondary_irq_base, ochip->base, handle_level_irq); gc->private = ochip; - ct = gc->chip_types; ct->regs.mask = ochip->mask_offset + GPIO_LEVEL_MASK_OFF; ct->type = IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW; ct->chip.irq_mask = irq_gc_mask_clr_bit; ct->chip.irq_unmask = irq_gc_mask_set_bit; ct->chip.irq_set_type = gpio_irq_set_type; + ct->chip.name = ochip->chip.label; ct++; ct->regs.mask = ochip->mask_offset + GPIO_EDGE_MASK_OFF; @@ -464,41 +520,69 @@ void __init orion_gpio_init(int gpio_base, int ngpio, ct->chip.irq_unmask = irq_gc_mask_set_bit; ct->chip.irq_set_type = gpio_irq_set_type; ct->handler = handle_edge_irq; + ct->chip.name = ochip->chip.label; irq_setup_generic_chip(gc, IRQ_MSK(ngpio), IRQ_GC_INIT_MASK_CACHE, IRQ_NOREQUEST, IRQ_LEVEL | IRQ_NOPROBE); -} -void orion_gpio_irq_handler(int pinoff) -{ - struct orion_gpio_chip *ochip; - u32 cause, type; - int i; - - ochip = orion_gpio_chip_find(pinoff); - if (ochip == NULL) - return; - - cause = readl(GPIO_DATA_IN(ochip)) & readl(GPIO_LEVEL_MASK(ochip)); - cause |= readl(GPIO_EDGE_CAUSE(ochip)) & readl(GPIO_EDGE_MASK(ochip)); - - for (i = 0; i < ochip->chip.ngpio; i++) { - int irq; + /* Setup irq domain on top of the generic chip. */ + ochip->domain = irq_domain_add_legacy(np, + ochip->chip.ngpio, + ochip->secondary_irq_base, + ochip->secondary_irq_base, + &irq_domain_simple_ops, + ochip); + if (!ochip->domain) + panic("%s: couldn't allocate irq domain (DT).\n", + ochip->chip.label); - irq = ochip->secondary_irq_base + i; + orion_gpio_chip_count++; +} - if (!(cause & (1 << i))) - continue; +#ifdef CONFIG_OF +static void __init orion_gpio_of_init_one(struct device_node *np, + int irq_gpio_base) +{ + int ngpio, gpio_base, mask_offset; + void __iomem *base; + int ret, i; + int irqs[4]; + int secondary_irq_base; + + ret = of_property_read_u32(np, "ngpio", &ngpio); + if (ret) + goto out; + ret = of_property_read_u32(np, "mask-offset", &mask_offset); + if (ret == -EINVAL) + mask_offset = 0; + else + goto out; + base = of_iomap(np, 0); + if (!base) + goto out; + + secondary_irq_base = irq_gpio_base + (32 * orion_gpio_chip_count); + gpio_base = 32 * orion_gpio_chip_count; + + /* Get the interrupt numbers. Each chip can have up to 4 + * interrupt handlers, with each handler dealing with 8 GPIO + * pins. */ + + for (i = 0; i < 4; i++) + irqs[i] = irq_of_parse_and_map(np, i); + + orion_gpio_init(np, gpio_base, ngpio, base, mask_offset, + secondary_irq_base, irqs); + return; +out: + pr_err("%s: %s: missing mandatory property\n", __func__, np->name); +} - type = irqd_get_trigger_type(irq_get_irq_data(irq)); - if ((type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) { - /* Swap polarity (race with GPIO line) */ - u32 polarity; +void __init orion_gpio_of_init(int irq_gpio_base) +{ + struct device_node *np; - polarity = readl(GPIO_IN_POL(ochip)); - polarity ^= 1 << i; - writel(polarity, GPIO_IN_POL(ochip)); - } - generic_handle_irq(irq); - } + for_each_compatible_node(np, NULL, "marvell,orion-gpio") + orion_gpio_of_init_one(np, irq_gpio_base); } +#endif diff --git a/trunk/arch/arm/plat-orion/include/plat/common.h b/trunk/arch/arm/plat-orion/include/plat/common.h index e00fdb213609..ae2377ef63e5 100644 --- a/trunk/arch/arm/plat-orion/include/plat/common.h +++ b/trunk/arch/arm/plat-orion/include/plat/common.h @@ -39,12 +39,14 @@ void __init orion_rtc_init(unsigned long mapbase, void __init orion_ge00_init(struct mv643xx_eth_platform_data *eth_data, unsigned long mapbase, unsigned long irq, - unsigned long irq_err); + unsigned long irq_err, + unsigned int tx_csum_limit); void __init orion_ge01_init(struct mv643xx_eth_platform_data *eth_data, unsigned long mapbase, unsigned long irq, - unsigned long irq_err); + unsigned long irq_err, + unsigned int tx_csum_limit); void __init orion_ge10_init(struct mv643xx_eth_platform_data *eth_data, unsigned long mapbase, diff --git a/trunk/arch/arm/plat-orion/include/plat/gpio.h b/trunk/arch/arm/plat-orion/include/plat/gpio.h index bec0c98ce41f..81c6fc8a7b28 100644 --- a/trunk/arch/arm/plat-orion/include/plat/gpio.h +++ b/trunk/arch/arm/plat-orion/include/plat/gpio.h @@ -13,7 +13,7 @@ #include #include - +#include /* * Orion-specific GPIO API extensions. */ @@ -27,13 +27,11 @@ int orion_gpio_led_blink_set(unsigned gpio, int state, void orion_gpio_set_valid(unsigned pin, int mode); /* Initialize gpiolib. */ -void __init orion_gpio_init(int gpio_base, int ngpio, - u32 base, int mask_offset, int secondary_irq_base); - -/* - * GPIO interrupt handling. - */ -void orion_gpio_irq_handler(int irqoff); - +void __init orion_gpio_init(struct device_node *np, + int gpio_base, int ngpio, + void __iomem *base, int mask_offset, + int secondary_irq_base, + int irq[4]); +void __init orion_gpio_of_init(int irq_gpio_base); #endif diff --git a/trunk/arch/arm/plat-orion/include/plat/irq.h b/trunk/arch/arm/plat-orion/include/plat/irq.h index f05eeab94968..50547e417936 100644 --- a/trunk/arch/arm/plat-orion/include/plat/irq.h +++ b/trunk/arch/arm/plat-orion/include/plat/irq.h @@ -12,6 +12,5 @@ #define __PLAT_IRQ_H void orion_irq_init(unsigned int irq_start, void __iomem *maskaddr); - - +void __init orion_dt_init_irq(void); #endif diff --git a/trunk/arch/arm/plat-orion/irq.c b/trunk/arch/arm/plat-orion/irq.c index 2d5b9c1ef389..d751964def4c 100644 --- a/trunk/arch/arm/plat-orion/irq.c +++ b/trunk/arch/arm/plat-orion/irq.c @@ -11,8 +11,12 @@ #include #include #include +#include #include +#include +#include #include +#include void __init orion_irq_init(unsigned int irq_start, void __iomem *maskaddr) { @@ -32,3 +36,39 @@ void __init orion_irq_init(unsigned int irq_start, void __iomem *maskaddr) irq_setup_generic_chip(gc, IRQ_MSK(32), IRQ_GC_INIT_MASK_CACHE, IRQ_NOREQUEST, IRQ_LEVEL | IRQ_NOPROBE); } + +#ifdef CONFIG_OF +static int __init orion_add_irq_domain(struct device_node *np, + struct device_node *interrupt_parent) +{ + int i = 0, irq_gpio; + void __iomem *base; + + do { + base = of_iomap(np, i); + if (base) { + orion_irq_init(i * 32, base); + i++; + } + } while (base); + + irq_domain_add_legacy(np, i * 32, 0, 0, + &irq_domain_simple_ops, NULL); + + irq_gpio = i * 32; + orion_gpio_of_init(irq_gpio); + + return 0; +} + +static const struct of_device_id orion_irq_match[] = { + { .compatible = "marvell,orion-intc", + .data = orion_add_irq_domain, }, + {}, +}; + +void __init orion_dt_init_irq(void) +{ + of_irq_init(orion_irq_match); +} +#endif diff --git a/trunk/arch/arm/plat-s3c24xx/dma.c b/trunk/arch/arm/plat-s3c24xx/dma.c index 28f898f75380..db98e7021f0d 100644 --- a/trunk/arch/arm/plat-s3c24xx/dma.c +++ b/trunk/arch/arm/plat-s3c24xx/dma.c @@ -430,7 +430,7 @@ s3c2410_dma_canload(struct s3c2410_dma_chan *chan) * when necessary. */ -int s3c2410_dma_enqueue(unsigned int channel, void *id, +int s3c2410_dma_enqueue(enum dma_ch channel, void *id, dma_addr_t data, int size) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); diff --git a/trunk/arch/arm/plat-samsung/Kconfig b/trunk/arch/arm/plat-samsung/Kconfig index 7aca31c1df1f..9c3b90c3538e 100644 --- a/trunk/arch/arm/plat-samsung/Kconfig +++ b/trunk/arch/arm/plat-samsung/Kconfig @@ -403,7 +403,8 @@ config S5P_DEV_USB_EHCI config S3C24XX_PWM bool "PWM device support" - select HAVE_PWM + select PWM + select PWM_SAMSUNG help Support for exporting the PWM timer blocks via the pwm device system diff --git a/trunk/arch/arm/plat-samsung/devs.c b/trunk/arch/arm/plat-samsung/devs.c index 74e31ce35538..fc49f3dabd76 100644 --- a/trunk/arch/arm/plat-samsung/devs.c +++ b/trunk/arch/arm/plat-samsung/devs.c @@ -32,6 +32,8 @@ #include #include +#include + #include #include #include @@ -748,7 +750,8 @@ void __init s5p_i2c_hdmiphy_set_platdata(struct s3c2410_platform_i2c *pd) if (!pd) { pd = &default_i2c_data; - if (soc_is_exynos4210()) + if (soc_is_exynos4210() || + soc_is_exynos4212() || soc_is_exynos4412()) pd->bus_num = 8; else if (soc_is_s5pv210()) pd->bus_num = 3; @@ -759,6 +762,30 @@ void __init s5p_i2c_hdmiphy_set_platdata(struct s3c2410_platform_i2c *pd) npd = s3c_set_platdata(pd, sizeof(struct s3c2410_platform_i2c), &s5p_device_i2c_hdmiphy); } + +struct s5p_hdmi_platform_data s5p_hdmi_def_platdata; + +void __init s5p_hdmi_set_platdata(struct i2c_board_info *hdmiphy_info, + struct i2c_board_info *mhl_info, int mhl_bus) +{ + struct s5p_hdmi_platform_data *pd = &s5p_hdmi_def_platdata; + + if (soc_is_exynos4210() || + soc_is_exynos4212() || soc_is_exynos4412()) + pd->hdmiphy_bus = 8; + else if (soc_is_s5pv210()) + pd->hdmiphy_bus = 3; + else + pd->hdmiphy_bus = 0; + + pd->hdmiphy_info = hdmiphy_info; + pd->mhl_info = mhl_info; + pd->mhl_bus = mhl_bus; + + s3c_set_platdata(pd, sizeof(struct s5p_hdmi_platform_data), + &s5p_device_hdmi); +} + #endif /* CONFIG_S5P_DEV_I2C_HDMIPHY */ /* I2S */ diff --git a/trunk/arch/arm/plat-samsung/include/plat/hdmi.h b/trunk/arch/arm/plat-samsung/include/plat/hdmi.h new file mode 100644 index 000000000000..331d046ac2c5 --- /dev/null +++ b/trunk/arch/arm/plat-samsung/include/plat/hdmi.h @@ -0,0 +1,16 @@ +/* + * Copyright (C) 2012 Samsung Electronics Co.Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __PLAT_SAMSUNG_HDMI_H +#define __PLAT_SAMSUNG_HDMI_H __FILE__ + +extern void s5p_hdmi_set_platdata(struct i2c_board_info *hdmiphy_info, + struct i2c_board_info *mhl_info, int mhl_bus); + +#endif /* __PLAT_SAMSUNG_HDMI_H */ diff --git a/trunk/arch/arm/plat-samsung/pm.c b/trunk/arch/arm/plat-samsung/pm.c index 64ab65f0fdbc..15070284343e 100644 --- a/trunk/arch/arm/plat-samsung/pm.c +++ b/trunk/arch/arm/plat-samsung/pm.c @@ -74,7 +74,7 @@ unsigned char pm_uart_udivslot; #ifdef CONFIG_SAMSUNG_PM_DEBUG -struct pm_uart_save uart_save[CONFIG_SERIAL_SAMSUNG_UARTS]; +static struct pm_uart_save uart_save[CONFIG_SERIAL_SAMSUNG_UARTS]; static void s3c_pm_save_uart(unsigned int uart, struct pm_uart_save *save) { diff --git a/trunk/arch/arm/plat-spear/include/plat/pl080.h b/trunk/arch/arm/plat-spear/include/plat/pl080.h index 2bc6b54460a8..eb6590ded40d 100644 --- a/trunk/arch/arm/plat-spear/include/plat/pl080.h +++ b/trunk/arch/arm/plat-spear/include/plat/pl080.h @@ -14,8 +14,8 @@ #ifndef __PLAT_PL080_H #define __PLAT_PL080_H -struct pl08x_dma_chan; -int pl080_get_signal(struct pl08x_dma_chan *ch); -void pl080_put_signal(struct pl08x_dma_chan *ch); +struct pl08x_channel_data; +int pl080_get_signal(const struct pl08x_channel_data *cd); +void pl080_put_signal(const struct pl08x_channel_data *cd, int signal); #endif /* __PLAT_PL080_H */ diff --git a/trunk/arch/arm/plat-spear/pl080.c b/trunk/arch/arm/plat-spear/pl080.c index 12cf27f935f9..cfa1199d0f4a 100644 --- a/trunk/arch/arm/plat-spear/pl080.c +++ b/trunk/arch/arm/plat-spear/pl080.c @@ -27,9 +27,8 @@ struct { unsigned char val; } signals[16] = {{0, 0}, }; -int pl080_get_signal(struct pl08x_dma_chan *ch) +int pl080_get_signal(const struct pl08x_channel_data *cd) { - const struct pl08x_channel_data *cd = ch->cd; unsigned int signal = cd->min_signal, val; unsigned long flags; @@ -63,18 +62,17 @@ int pl080_get_signal(struct pl08x_dma_chan *ch) return signal; } -void pl080_put_signal(struct pl08x_dma_chan *ch) +void pl080_put_signal(const struct pl08x_channel_data *cd, int signal) { - const struct pl08x_channel_data *cd = ch->cd; unsigned long flags; spin_lock_irqsave(&lock, flags); /* if signal is not used */ - if (!signals[cd->min_signal].busy) + if (!signals[signal].busy) BUG(); - signals[cd->min_signal].busy--; + signals[signal].busy--; spin_unlock_irqrestore(&lock, flags); } diff --git a/trunk/arch/arm/vfp/entry.S b/trunk/arch/arm/vfp/entry.S index 4fa9903b83cf..cc926c985981 100644 --- a/trunk/arch/arm/vfp/entry.S +++ b/trunk/arch/arm/vfp/entry.S @@ -7,18 +7,20 @@ * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. - * - * Basic entry code, called from the kernel's undefined instruction trap. - * r0 = faulted instruction - * r5 = faulted PC+4 - * r9 = successful return - * r10 = thread_info structure - * lr = failure return */ #include #include #include "../kernel/entry-header.S" +@ VFP entry point. +@ +@ r0 = instruction opcode (32-bit ARM or two 16-bit Thumb) +@ r2 = PC value to resume execution after successful emulation +@ r9 = normal "successful" return address +@ r10 = this threads thread_info structure +@ lr = unrecognised instruction return address +@ IRQs disabled. +@ ENTRY(do_vfp) #ifdef CONFIG_PREEMPT ldr r4, [r10, #TI_PREEMPT] @ get preempt count diff --git a/trunk/arch/arm/vfp/vfphw.S b/trunk/arch/arm/vfp/vfphw.S index d50f0e486cf2..ea0349f63586 100644 --- a/trunk/arch/arm/vfp/vfphw.S +++ b/trunk/arch/arm/vfp/vfphw.S @@ -62,13 +62,13 @@ @ VFP hardware support entry point. @ -@ r0 = faulted instruction -@ r2 = faulted PC+4 -@ r9 = successful return +@ r0 = instruction opcode (32-bit ARM or two 16-bit Thumb) +@ r2 = PC value to resume execution after successful emulation +@ r9 = normal "successful" return address @ r10 = vfp_state union @ r11 = CPU number -@ lr = failure return - +@ lr = unrecognised instruction return address +@ IRQs enabled. ENTRY(vfp_support_entry) DBGSTR3 "instr %08x pc %08x state %p", r0, r2, r10 @@ -162,9 +162,12 @@ vfp_hw_state_valid: @ exception before retrying branch @ out before setting an FPEXC that @ stops us reading stuff - VFPFMXR FPEXC, r1 @ restore FPEXC last - sub r2, r2, #4 - str r2, [sp, #S_PC] @ retry the instruction + VFPFMXR FPEXC, r1 @ Restore FPEXC last + sub r2, r2, #4 @ Retry current instruction - if Thumb + str r2, [sp, #S_PC] @ mode it's two 16-bit instructions, + @ else it's one 32-bit instruction, so + @ always subtract 4 from the following + @ instruction address. #ifdef CONFIG_PREEMPT get_thread_info r10 ldr r4, [r10, #TI_PREEMPT] @ get preempt count diff --git a/trunk/arch/arm/vfp/vfpmodule.c b/trunk/arch/arm/vfp/vfpmodule.c index 586961929e96..c834b32af275 100644 --- a/trunk/arch/arm/vfp/vfpmodule.c +++ b/trunk/arch/arm/vfp/vfpmodule.c @@ -457,10 +457,16 @@ static int vfp_pm_suspend(void) /* disable, just in case */ fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); + } else if (vfp_current_hw_state[ti->cpu]) { +#ifndef CONFIG_SMP + fmxr(FPEXC, fpexc | FPEXC_EN); + vfp_save_state(vfp_current_hw_state[ti->cpu], fpexc); + fmxr(FPEXC, fpexc); +#endif } /* clear any information we had about last context state */ - memset(vfp_current_hw_state, 0, sizeof(vfp_current_hw_state)); + vfp_current_hw_state[ti->cpu] = NULL; return 0; } @@ -713,8 +719,10 @@ static int __init vfp_init(void) if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100) elf_hwcap |= HWCAP_NEON; #endif +#ifdef CONFIG_VFPv3 if ((fmrx(MVFR1) & 0xf0000000) == 0x10000000) elf_hwcap |= HWCAP_VFPv4; +#endif } } return 0; diff --git a/trunk/arch/blackfin/Kconfig b/trunk/arch/blackfin/Kconfig index f34861920634..c7092e6057c5 100644 --- a/trunk/arch/blackfin/Kconfig +++ b/trunk/arch/blackfin/Kconfig @@ -38,6 +38,7 @@ config BLACKFIN select GENERIC_ATOMIC64 select GENERIC_IRQ_PROBE select IRQ_PER_CPU if SMP + select USE_GENERIC_SMP_HELPERS if SMP select HAVE_NMI_WATCHDOG if NMI_WATCHDOG select GENERIC_SMP_IDLE_THREAD select ARCH_USES_GETTIMEOFFSET if !GENERIC_CLOCKEVENTS diff --git a/trunk/arch/blackfin/Makefile b/trunk/arch/blackfin/Makefile index d3d7e64ca96d..66cf00095b84 100644 --- a/trunk/arch/blackfin/Makefile +++ b/trunk/arch/blackfin/Makefile @@ -20,7 +20,6 @@ endif KBUILD_AFLAGS += $(call cc-option,-mno-fdpic) KBUILD_CFLAGS_MODULE += -mlong-calls LDFLAGS += -m elf32bfin -KALLSYMS += --symbol-prefix=_ KBUILD_DEFCONFIG := BF537-STAMP_defconfig diff --git a/trunk/arch/blackfin/include/asm/smp.h b/trunk/arch/blackfin/include/asm/smp.h index dc3d144b4bb5..9631598dcc5d 100644 --- a/trunk/arch/blackfin/include/asm/smp.h +++ b/trunk/arch/blackfin/include/asm/smp.h @@ -18,6 +18,8 @@ #define raw_smp_processor_id() blackfin_core_id() extern void bfin_relocate_coreb_l1_mem(void); +extern void arch_send_call_function_single_ipi(int cpu); +extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); #if defined(CONFIG_SMP) && defined(CONFIG_ICACHE_FLUSH_L1) asmlinkage void blackfin_icache_flush_range_l1(unsigned long *ptr); diff --git a/trunk/arch/blackfin/kernel/setup.c b/trunk/arch/blackfin/kernel/setup.c index ada8f0fc71e4..fb96e607adcf 100644 --- a/trunk/arch/blackfin/kernel/setup.c +++ b/trunk/arch/blackfin/kernel/setup.c @@ -52,7 +52,6 @@ EXPORT_SYMBOL(reserved_mem_dcache_on); #ifdef CONFIG_MTD_UCLINUX extern struct map_info uclinux_ram_map; unsigned long memory_mtd_end, memory_mtd_start, mtd_size; -unsigned long _ebss; EXPORT_SYMBOL(memory_mtd_end); EXPORT_SYMBOL(memory_mtd_start); EXPORT_SYMBOL(mtd_size); diff --git a/trunk/arch/blackfin/mach-common/smp.c b/trunk/arch/blackfin/mach-common/smp.c index 00bbe672b3b3..a40151306b77 100644 --- a/trunk/arch/blackfin/mach-common/smp.c +++ b/trunk/arch/blackfin/mach-common/smp.c @@ -48,10 +48,13 @@ unsigned long blackfin_iflush_l1_entry[NR_CPUS]; struct blackfin_initial_pda __cpuinitdata initial_pda_coreb; -#define BFIN_IPI_TIMER 0 -#define BFIN_IPI_RESCHEDULE 1 -#define BFIN_IPI_CALL_FUNC 2 -#define BFIN_IPI_CPU_STOP 3 +enum ipi_message_type { + BFIN_IPI_TIMER, + BFIN_IPI_RESCHEDULE, + BFIN_IPI_CALL_FUNC, + BFIN_IPI_CALL_FUNC_SINGLE, + BFIN_IPI_CPU_STOP, +}; struct blackfin_flush_data { unsigned long start; @@ -60,35 +63,20 @@ struct blackfin_flush_data { void *secondary_stack; - -struct smp_call_struct { - void (*func)(void *info); - void *info; - int wait; - cpumask_t *waitmask; -}; - static struct blackfin_flush_data smp_flush_data; static DEFINE_SPINLOCK(stop_lock); -struct ipi_message { - unsigned long type; - struct smp_call_struct call_struct; -}; - /* A magic number - stress test shows this is safe for common cases */ #define BFIN_IPI_MSGQ_LEN 5 /* Simple FIFO buffer, overflow leads to panic */ -struct ipi_message_queue { - spinlock_t lock; +struct ipi_data { unsigned long count; - unsigned long head; /* head of the queue */ - struct ipi_message ipi_message[BFIN_IPI_MSGQ_LEN]; + unsigned long bits; }; -static DEFINE_PER_CPU(struct ipi_message_queue, ipi_msg_queue); +static DEFINE_PER_CPU(struct ipi_data, bfin_ipi); static void ipi_cpu_stop(unsigned int cpu) { @@ -129,28 +117,6 @@ static void ipi_flush_icache(void *info) blackfin_icache_flush_range(fdata->start, fdata->end); } -static void ipi_call_function(unsigned int cpu, struct ipi_message *msg) -{ - int wait; - void (*func)(void *info); - void *info; - func = msg->call_struct.func; - info = msg->call_struct.info; - wait = msg->call_struct.wait; - func(info); - if (wait) { -#ifdef __ARCH_SYNC_CORE_DCACHE - /* - * 'wait' usually means synchronization between CPUs. - * Invalidate D cache in case shared data was changed - * by func() to ensure cache coherence. - */ - resync_core_dcache(); -#endif - cpumask_clear_cpu(cpu, msg->call_struct.waitmask); - } -} - /* Use IRQ_SUPPLE_0 to request reschedule. * When returning from interrupt to user space, * there is chance to reschedule */ @@ -172,152 +138,95 @@ void ipi_timer(void) static irqreturn_t ipi_handler_int1(int irq, void *dev_instance) { - struct ipi_message *msg; - struct ipi_message_queue *msg_queue; + struct ipi_data *bfin_ipi_data; unsigned int cpu = smp_processor_id(); - unsigned long flags; + unsigned long pending; + unsigned long msg; platform_clear_ipi(cpu, IRQ_SUPPLE_1); - msg_queue = &__get_cpu_var(ipi_msg_queue); - - spin_lock_irqsave(&msg_queue->lock, flags); - - while (msg_queue->count) { - msg = &msg_queue->ipi_message[msg_queue->head]; - switch (msg->type) { - case BFIN_IPI_TIMER: - ipi_timer(); - break; - case BFIN_IPI_RESCHEDULE: - scheduler_ipi(); - break; - case BFIN_IPI_CALL_FUNC: - ipi_call_function(cpu, msg); - break; - case BFIN_IPI_CPU_STOP: - ipi_cpu_stop(cpu); - break; - default: - printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%lx\n", - cpu, msg->type); - break; - } - msg_queue->head++; - msg_queue->head %= BFIN_IPI_MSGQ_LEN; - msg_queue->count--; + bfin_ipi_data = &__get_cpu_var(bfin_ipi); + + while ((pending = xchg(&bfin_ipi_data->bits, 0)) != 0) { + msg = 0; + do { + msg = find_next_bit(&pending, BITS_PER_LONG, msg + 1); + switch (msg) { + case BFIN_IPI_TIMER: + ipi_timer(); + break; + case BFIN_IPI_RESCHEDULE: + scheduler_ipi(); + break; + case BFIN_IPI_CALL_FUNC: + generic_smp_call_function_interrupt(); + break; + + case BFIN_IPI_CALL_FUNC_SINGLE: + generic_smp_call_function_single_interrupt(); + break; + + case BFIN_IPI_CPU_STOP: + ipi_cpu_stop(cpu); + break; + } + } while (msg < BITS_PER_LONG); + + smp_mb(); } - spin_unlock_irqrestore(&msg_queue->lock, flags); return IRQ_HANDLED; } -static void ipi_queue_init(void) +static void bfin_ipi_init(void) { unsigned int cpu; - struct ipi_message_queue *msg_queue; + struct ipi_data *bfin_ipi_data; for_each_possible_cpu(cpu) { - msg_queue = &per_cpu(ipi_msg_queue, cpu); - spin_lock_init(&msg_queue->lock); - msg_queue->count = 0; - msg_queue->head = 0; + bfin_ipi_data = &per_cpu(bfin_ipi, cpu); + bfin_ipi_data->bits = 0; + bfin_ipi_data->count = 0; } } -static inline void smp_send_message(cpumask_t callmap, unsigned long type, - void (*func) (void *info), void *info, int wait) +void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg) { unsigned int cpu; - struct ipi_message_queue *msg_queue; - struct ipi_message *msg; - unsigned long flags, next_msg; - cpumask_t waitmask; /* waitmask is shared by all cpus */ - - cpumask_copy(&waitmask, &callmap); - for_each_cpu(cpu, &callmap) { - msg_queue = &per_cpu(ipi_msg_queue, cpu); - spin_lock_irqsave(&msg_queue->lock, flags); - if (msg_queue->count < BFIN_IPI_MSGQ_LEN) { - next_msg = (msg_queue->head + msg_queue->count) - % BFIN_IPI_MSGQ_LEN; - msg = &msg_queue->ipi_message[next_msg]; - msg->type = type; - if (type == BFIN_IPI_CALL_FUNC) { - msg->call_struct.func = func; - msg->call_struct.info = info; - msg->call_struct.wait = wait; - msg->call_struct.waitmask = &waitmask; - } - msg_queue->count++; - } else - panic("IPI message queue overflow\n"); - spin_unlock_irqrestore(&msg_queue->lock, flags); + struct ipi_data *bfin_ipi_data; + unsigned long flags; + + local_irq_save(flags); + + for_each_cpu(cpu, cpumask) { + bfin_ipi_data = &per_cpu(bfin_ipi, cpu); + smp_mb(); + set_bit(msg, &bfin_ipi_data->bits); + bfin_ipi_data->count++; platform_send_ipi_cpu(cpu, IRQ_SUPPLE_1); } - if (wait) { - while (!cpumask_empty(&waitmask)) - blackfin_dcache_invalidate_range( - (unsigned long)(&waitmask), - (unsigned long)(&waitmask)); -#ifdef __ARCH_SYNC_CORE_DCACHE - /* - * Invalidate D cache in case shared data was changed by - * other processors to ensure cache coherence. - */ - resync_core_dcache(); -#endif - } + local_irq_restore(flags); } -int smp_call_function(void (*func)(void *info), void *info, int wait) +void arch_send_call_function_single_ipi(int cpu) { - cpumask_t callmap; - - preempt_disable(); - cpumask_copy(&callmap, cpu_online_mask); - cpumask_clear_cpu(smp_processor_id(), &callmap); - if (!cpumask_empty(&callmap)) - smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait); - - preempt_enable(); - - return 0; + send_ipi(cpumask_of(cpu), BFIN_IPI_CALL_FUNC_SINGLE); } -EXPORT_SYMBOL_GPL(smp_call_function); -int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, - int wait) +void arch_send_call_function_ipi_mask(const struct cpumask *mask) { - unsigned int cpu = cpuid; - cpumask_t callmap; - - if (cpu_is_offline(cpu)) - return 0; - cpumask_clear(&callmap); - cpumask_set_cpu(cpu, &callmap); - - smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait); - - return 0; + send_ipi(mask, BFIN_IPI_CALL_FUNC); } -EXPORT_SYMBOL_GPL(smp_call_function_single); void smp_send_reschedule(int cpu) { - cpumask_t callmap; - /* simply trigger an ipi */ - - cpumask_clear(&callmap); - cpumask_set_cpu(cpu, &callmap); - - smp_send_message(callmap, BFIN_IPI_RESCHEDULE, NULL, NULL, 0); + send_ipi(cpumask_of(cpu), BFIN_IPI_RESCHEDULE); return; } void smp_send_msg(const struct cpumask *mask, unsigned long type) { - smp_send_message(*mask, type, NULL, NULL, 0); + send_ipi(mask, type); } void smp_timer_broadcast(const struct cpumask *mask) @@ -333,7 +242,7 @@ void smp_send_stop(void) cpumask_copy(&callmap, cpu_online_mask); cpumask_clear_cpu(smp_processor_id(), &callmap); if (!cpumask_empty(&callmap)) - smp_send_message(callmap, BFIN_IPI_CPU_STOP, NULL, NULL, 0); + send_ipi(&callmap, BFIN_IPI_CPU_STOP); preempt_enable(); @@ -436,7 +345,7 @@ void __init smp_prepare_boot_cpu(void) void __init smp_prepare_cpus(unsigned int max_cpus) { platform_prepare_cpus(max_cpus); - ipi_queue_init(); + bfin_ipi_init(); platform_request_ipi(IRQ_SUPPLE_0, ipi_handler_int0); platform_request_ipi(IRQ_SUPPLE_1, ipi_handler_int1); } diff --git a/trunk/arch/c6x/Kconfig b/trunk/arch/c6x/Kconfig index 052f81a76239..983c859e40b7 100644 --- a/trunk/arch/c6x/Kconfig +++ b/trunk/arch/c6x/Kconfig @@ -6,6 +6,7 @@ config C6X def_bool y select CLKDEV_LOOKUP + select GENERIC_ATOMIC64 select GENERIC_IRQ_SHOW select HAVE_ARCH_TRACEHOOK select HAVE_DMA_API_DEBUG diff --git a/trunk/arch/c6x/include/asm/cache.h b/trunk/arch/c6x/include/asm/cache.h index 6d521d96d941..09c5a0f5f4d1 100644 --- a/trunk/arch/c6x/include/asm/cache.h +++ b/trunk/arch/c6x/include/asm/cache.h @@ -1,7 +1,7 @@ /* * Port on Texas Instruments TMS320C6x architecture * - * Copyright (C) 2005, 2006, 2009, 2010 Texas Instruments Incorporated + * Copyright (C) 2005, 2006, 2009, 2010, 2012 Texas Instruments Incorporated * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com) * * This program is free software; you can redistribute it and/or modify @@ -16,9 +16,14 @@ /* * Cache line size */ -#define L1D_CACHE_BYTES 64 -#define L1P_CACHE_BYTES 32 -#define L2_CACHE_BYTES 128 +#define L1D_CACHE_SHIFT 6 +#define L1D_CACHE_BYTES (1 << L1D_CACHE_SHIFT) + +#define L1P_CACHE_SHIFT 5 +#define L1P_CACHE_BYTES (1 << L1P_CACHE_SHIFT) + +#define L2_CACHE_SHIFT 7 +#define L2_CACHE_BYTES (1 << L2_CACHE_SHIFT) /* * L2 used as cache @@ -29,7 +34,8 @@ * For practical reasons the L1_CACHE_BYTES defines should not be smaller than * the L2 line size */ -#define L1_CACHE_BYTES L2_CACHE_BYTES +#define L1_CACHE_SHIFT L2_CACHE_SHIFT +#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) #define L2_CACHE_ALIGN_LOW(x) \ (((x) & ~(L2_CACHE_BYTES - 1))) diff --git a/trunk/arch/ia64/configs/generic_defconfig b/trunk/arch/ia64/configs/generic_defconfig index 954d81e2e837..7913695b2fcb 100644 --- a/trunk/arch/ia64/configs/generic_defconfig +++ b/trunk/arch/ia64/configs/generic_defconfig @@ -234,5 +234,4 @@ CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_MD5=y # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRC_T10DIF=y -CONFIG_MISC_DEVICES=y CONFIG_INTEL_IOMMU=y diff --git a/trunk/arch/ia64/configs/gensparse_defconfig b/trunk/arch/ia64/configs/gensparse_defconfig index 91c41ecfa6d9..f8e913365423 100644 --- a/trunk/arch/ia64/configs/gensparse_defconfig +++ b/trunk/arch/ia64/configs/gensparse_defconfig @@ -209,4 +209,3 @@ CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_MUTEXES=y CONFIG_CRYPTO_MD5=y -CONFIG_MISC_DEVICES=y diff --git a/trunk/arch/ia64/kernel/acpi.c b/trunk/arch/ia64/kernel/acpi.c index 6f38b6120d96..440578850ae5 100644 --- a/trunk/arch/ia64/kernel/acpi.c +++ b/trunk/arch/ia64/kernel/acpi.c @@ -497,7 +497,7 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) srat_num_cpus++; } -void __init +int __init acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) { unsigned long paddr, size; @@ -512,7 +512,7 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) /* Ignore disabled entries */ if (!(ma->flags & ACPI_SRAT_MEM_ENABLED)) - return; + return -1; /* record this node in proximity bitmap */ pxm_bit_set(pxm); @@ -531,6 +531,7 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) p->size = size; p->nid = pxm; num_node_memblks++; + return 0; } void __init acpi_numa_arch_fixup(void) diff --git a/trunk/arch/m68k/Kconfig b/trunk/arch/m68k/Kconfig index 0b0f8b8c4a26..b22df9410dce 100644 --- a/trunk/arch/m68k/Kconfig +++ b/trunk/arch/m68k/Kconfig @@ -5,6 +5,7 @@ config M68K select HAVE_AOUT if MMU select HAVE_GENERIC_HARDIRQS select GENERIC_IRQ_SHOW + select GENERIC_ATOMIC64 select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS select GENERIC_CPU_DEVICES select GENERIC_STRNCPY_FROM_USER if MMU @@ -54,18 +55,6 @@ config ZONE_DMA bool default y -config CPU_HAS_NO_BITFIELDS - bool - -config CPU_HAS_NO_MULDIV64 - bool - -config CPU_HAS_ADDRESS_SPACES - bool - -config FPU - bool - config HZ int default 1000 if CLEOPATRA diff --git a/trunk/arch/m68k/Kconfig.cpu b/trunk/arch/m68k/Kconfig.cpu index 43a9f8f1b8eb..c4eb79edecec 100644 --- a/trunk/arch/m68k/Kconfig.cpu +++ b/trunk/arch/m68k/Kconfig.cpu @@ -28,6 +28,7 @@ config COLDFIRE select CPU_HAS_NO_BITFIELDS select CPU_HAS_NO_MULDIV64 select GENERIC_CSUM + select HAVE_CLK endchoice @@ -37,6 +38,7 @@ config M68000 bool select CPU_HAS_NO_BITFIELDS select CPU_HAS_NO_MULDIV64 + select CPU_HAS_NO_UNALIGNED select GENERIC_CSUM help The Freescale (was Motorola) 68000 CPU is the first generation of @@ -48,6 +50,7 @@ config M68000 config MCPU32 bool select CPU_HAS_NO_BITFIELDS + select CPU_HAS_NO_UNALIGNED help The Freescale (was then Motorola) CPU32 is a CPU core that is based on the 68020 processor. For the most part it is used in @@ -56,7 +59,6 @@ config MCPU32 config M68020 bool "68020 support" depends on MMU - select GENERIC_ATOMIC64 select CPU_HAS_ADDRESS_SPACES help If you anticipate running this kernel on a computer with a MC68020 @@ -67,7 +69,6 @@ config M68020 config M68030 bool "68030 support" depends on MMU && !MMU_SUN3 - select GENERIC_ATOMIC64 select CPU_HAS_ADDRESS_SPACES help If you anticipate running this kernel on a computer with a MC68030 @@ -77,7 +78,6 @@ config M68030 config M68040 bool "68040 support" depends on MMU && !MMU_SUN3 - select GENERIC_ATOMIC64 select CPU_HAS_ADDRESS_SPACES help If you anticipate running this kernel on a computer with a MC68LC040 @@ -88,7 +88,6 @@ config M68040 config M68060 bool "68060 support" depends on MMU && !MMU_SUN3 - select GENERIC_ATOMIC64 select CPU_HAS_ADDRESS_SPACES help If you anticipate running this kernel on a computer with a MC68060 @@ -376,6 +375,18 @@ config NODES_SHIFT default "3" depends on !SINGLE_MEMORY_CHUNK +config CPU_HAS_NO_BITFIELDS + bool + +config CPU_HAS_NO_MULDIV64 + bool + +config CPU_HAS_NO_UNALIGNED + bool + +config CPU_HAS_ADDRESS_SPACES + bool + config FPU bool diff --git a/trunk/arch/m68k/apollo/config.c b/trunk/arch/m68k/apollo/config.c index 0a30406b9442..f5565d6eeb8e 100644 --- a/trunk/arch/m68k/apollo/config.c +++ b/trunk/arch/m68k/apollo/config.c @@ -177,8 +177,8 @@ irqreturn_t dn_timer_int(int irq, void *dev_id) timer_handler(irq, dev_id); - x=*(volatile unsigned char *)(timer+3); - x=*(volatile unsigned char *)(timer+5); + x = *(volatile unsigned char *)(apollo_timer + 3); + x = *(volatile unsigned char *)(apollo_timer + 5); return IRQ_HANDLED; } @@ -186,17 +186,17 @@ irqreturn_t dn_timer_int(int irq, void *dev_id) void dn_sched_init(irq_handler_t timer_routine) { /* program timer 1 */ - *(volatile unsigned char *)(timer+3)=0x01; - *(volatile unsigned char *)(timer+1)=0x40; - *(volatile unsigned char *)(timer+5)=0x09; - *(volatile unsigned char *)(timer+7)=0xc4; + *(volatile unsigned char *)(apollo_timer + 3) = 0x01; + *(volatile unsigned char *)(apollo_timer + 1) = 0x40; + *(volatile unsigned char *)(apollo_timer + 5) = 0x09; + *(volatile unsigned char *)(apollo_timer + 7) = 0xc4; /* enable IRQ of PIC B */ *(volatile unsigned char *)(pica+1)&=(~8); #if 0 - printk("*(0x10803) %02x\n",*(volatile unsigned char *)(timer+0x3)); - printk("*(0x10803) %02x\n",*(volatile unsigned char *)(timer+0x3)); + printk("*(0x10803) %02x\n",*(volatile unsigned char *)(apollo_timer + 0x3)); + printk("*(0x10803) %02x\n",*(volatile unsigned char *)(apollo_timer + 0x3)); #endif if (request_irq(IRQ_APOLLO, dn_timer_int, 0, "time", timer_routine)) diff --git a/trunk/arch/m68k/include/asm/Kbuild b/trunk/arch/m68k/include/asm/Kbuild index eafa2539a8ee..a74e5d95c384 100644 --- a/trunk/arch/m68k/include/asm/Kbuild +++ b/trunk/arch/m68k/include/asm/Kbuild @@ -1,4 +1,29 @@ include include/asm-generic/Kbuild.asm header-y += cachectl.h +generic-y += bitsperlong.h +generic-y += cputime.h +generic-y += device.h +generic-y += emergency-restart.h +generic-y += errno.h +generic-y += futex.h +generic-y += ioctl.h +generic-y += ipcbuf.h +generic-y += irq_regs.h +generic-y += kdebug.h +generic-y += kmap_types.h +generic-y += kvm_para.h +generic-y += local64.h +generic-y += local.h +generic-y += mman.h +generic-y += mutex.h +generic-y += percpu.h +generic-y += resource.h +generic-y += scatterlist.h +generic-y += sections.h +generic-y += siginfo.h +generic-y += statfs.h +generic-y += topology.h +generic-y += types.h generic-y += word-at-a-time.h +generic-y += xor.h diff --git a/trunk/arch/m68k/include/asm/MC68332.h b/trunk/arch/m68k/include/asm/MC68332.h deleted file mode 100644 index 6bb8f02685a2..000000000000 --- a/trunk/arch/m68k/include/asm/MC68332.h +++ /dev/null @@ -1,152 +0,0 @@ - -/* include/asm-m68knommu/MC68332.h: '332 control registers - * - * Copyright (C) 1998 Kenneth Albanowski , - * - */ - -#ifndef _MC68332_H_ -#define _MC68332_H_ - -#define BYTE_REF(addr) (*((volatile unsigned char*)addr)) -#define WORD_REF(addr) (*((volatile unsigned short*)addr)) - -#define PORTE_ADDR 0xfffa11 -#define PORTE BYTE_REF(PORTE_ADDR) -#define DDRE_ADDR 0xfffa15 -#define DDRE BYTE_REF(DDRE_ADDR) -#define PEPAR_ADDR 0xfffa17 -#define PEPAR BYTE_REF(PEPAR_ADDR) - -#define PORTF_ADDR 0xfffa19 -#define PORTF BYTE_REF(PORTF_ADDR) -#define DDRF_ADDR 0xfffa1d -#define DDRF BYTE_REF(DDRF_ADDR) -#define PFPAR_ADDR 0xfffa1f -#define PFPAR BYTE_REF(PFPAR_ADDR) - -#define PORTQS_ADDR 0xfffc15 -#define PORTQS BYTE_REF(PORTQS_ADDR) -#define DDRQS_ADDR 0xfffc17 -#define DDRQS BYTE_REF(DDRQS_ADDR) -#define PQSPAR_ADDR 0xfffc16 -#define PQSPAR BYTE_REF(PQSPAR_ADDR) - -#define CSPAR0_ADDR 0xFFFA44 -#define CSPAR0 WORD_REF(CSPAR0_ADDR) -#define CSPAR1_ADDR 0xFFFA46 -#define CSPAR1 WORD_REF(CSPAR1_ADDR) -#define CSARBT_ADDR 0xFFFA48 -#define CSARBT WORD_REF(CSARBT_ADDR) -#define CSOPBT_ADDR 0xFFFA4A -#define CSOPBT WORD_REF(CSOPBT_ADDR) -#define CSBAR0_ADDR 0xFFFA4C -#define CSBAR0 WORD_REF(CSBAR0_ADDR) -#define CSOR0_ADDR 0xFFFA4E -#define CSOR0 WORD_REF(CSOR0_ADDR) -#define CSBAR1_ADDR 0xFFFA50 -#define CSBAR1 WORD_REF(CSBAR1_ADDR) -#define CSOR1_ADDR 0xFFFA52 -#define CSOR1 WORD_REF(CSOR1_ADDR) -#define CSBAR2_ADDR 0xFFFA54 -#define CSBAR2 WORD_REF(CSBAR2_ADDR) -#define CSOR2_ADDR 0xFFFA56 -#define CSOR2 WORD_REF(CSOR2_ADDR) -#define CSBAR3_ADDR 0xFFFA58 -#define CSBAR3 WORD_REF(CSBAR3_ADDR) -#define CSOR3_ADDR 0xFFFA5A -#define CSOR3 WORD_REF(CSOR3_ADDR) -#define CSBAR4_ADDR 0xFFFA5C -#define CSBAR4 WORD_REF(CSBAR4_ADDR) -#define CSOR4_ADDR 0xFFFA5E -#define CSOR4 WORD_REF(CSOR4_ADDR) -#define CSBAR5_ADDR 0xFFFA60 -#define CSBAR5 WORD_REF(CSBAR5_ADDR) -#define CSOR5_ADDR 0xFFFA62 -#define CSOR5 WORD_REF(CSOR5_ADDR) -#define CSBAR6_ADDR 0xFFFA64 -#define CSBAR6 WORD_REF(CSBAR6_ADDR) -#define CSOR6_ADDR 0xFFFA66 -#define CSOR6 WORD_REF(CSOR6_ADDR) -#define CSBAR7_ADDR 0xFFFA68 -#define CSBAR7 WORD_REF(CSBAR7_ADDR) -#define CSOR7_ADDR 0xFFFA6A -#define CSOR7 WORD_REF(CSOR7_ADDR) -#define CSBAR8_ADDR 0xFFFA6C -#define CSBAR8 WORD_REF(CSBAR8_ADDR) -#define CSOR8_ADDR 0xFFFA6E -#define CSOR8 WORD_REF(CSOR8_ADDR) -#define CSBAR9_ADDR 0xFFFA70 -#define CSBAR9 WORD_REF(CSBAR9_ADDR) -#define CSOR9_ADDR 0xFFFA72 -#define CSOR9 WORD_REF(CSOR9_ADDR) -#define CSBAR10_ADDR 0xFFFA74 -#define CSBAR10 WORD_REF(CSBAR10_ADDR) -#define CSOR10_ADDR 0xFFFA76 -#define CSOR10 WORD_REF(CSOR10_ADDR) - -#define CSOR_MODE_ASYNC 0x0000 -#define CSOR_MODE_SYNC 0x8000 -#define CSOR_MODE_MASK 0x8000 -#define CSOR_BYTE_DISABLE 0x0000 -#define CSOR_BYTE_UPPER 0x4000 -#define CSOR_BYTE_LOWER 0x2000 -#define CSOR_BYTE_BOTH 0x6000 -#define CSOR_BYTE_MASK 0x6000 -#define CSOR_RW_RSVD 0x0000 -#define CSOR_RW_READ 0x0800 -#define CSOR_RW_WRITE 0x1000 -#define CSOR_RW_BOTH 0x1800 -#define CSOR_RW_MASK 0x1800 -#define CSOR_STROBE_DS 0x0400 -#define CSOR_STROBE_AS 0x0000 -#define CSOR_STROBE_MASK 0x0400 -#define CSOR_DSACK_WAIT(x) (wait << 6) -#define CSOR_DSACK_FTERM (14 << 6) -#define CSOR_DSACK_EXTERNAL (15 << 6) -#define CSOR_DSACK_MASK 0x03c0 -#define CSOR_SPACE_CPU 0x0000 -#define CSOR_SPACE_USER 0x0010 -#define CSOR_SPACE_SU 0x0020 -#define CSOR_SPACE_BOTH 0x0030 -#define CSOR_SPACE_MASK 0x0030 -#define CSOR_IPL_ALL 0x0000 -#define CSOR_IPL_PRIORITY(x) (x << 1) -#define CSOR_IPL_MASK 0x000e -#define CSOR_AVEC_ON 0x0001 -#define CSOR_AVEC_OFF 0x0000 -#define CSOR_AVEC_MASK 0x0001 - -#define CSBAR_ADDR(x) ((addr >> 11) << 3) -#define CSBAR_ADDR_MASK 0xfff8 -#define CSBAR_BLKSIZE_2K 0x0000 -#define CSBAR_BLKSIZE_8K 0x0001 -#define CSBAR_BLKSIZE_16K 0x0002 -#define CSBAR_BLKSIZE_64K 0x0003 -#define CSBAR_BLKSIZE_128K 0x0004 -#define CSBAR_BLKSIZE_256K 0x0005 -#define CSBAR_BLKSIZE_512K 0x0006 -#define CSBAR_BLKSIZE_1M 0x0007 -#define CSBAR_BLKSIZE_MASK 0x0007 - -#define CSPAR_DISC 0 -#define CSPAR_ALT 1 -#define CSPAR_CS8 2 -#define CSPAR_CS16 3 -#define CSPAR_MASK 3 - -#define CSPAR0_CSBOOT(x) (x << 0) -#define CSPAR0_CS0(x) (x << 2) -#define CSPAR0_CS1(x) (x << 4) -#define CSPAR0_CS2(x) (x << 6) -#define CSPAR0_CS3(x) (x << 8) -#define CSPAR0_CS4(x) (x << 10) -#define CSPAR0_CS5(x) (x << 12) - -#define CSPAR1_CS6(x) (x << 0) -#define CSPAR1_CS7(x) (x << 2) -#define CSPAR1_CS8(x) (x << 4) -#define CSPAR1_CS9(x) (x << 6) -#define CSPAR1_CS10(x) (x << 8) - -#endif diff --git a/trunk/arch/m68k/include/asm/apollodma.h b/trunk/arch/m68k/include/asm/apollodma.h deleted file mode 100644 index 954adc851adb..000000000000 --- a/trunk/arch/m68k/include/asm/apollodma.h +++ /dev/null @@ -1,248 +0,0 @@ -/* - * linux/include/asm/dma.h: Defines for using and allocating dma channels. - * Written by Hennus Bergman, 1992. - * High DMA channel support & info by Hannu Savolainen - * and John Boyd, Nov. 1992. - */ - -#ifndef _ASM_APOLLO_DMA_H -#define _ASM_APOLLO_DMA_H - -#include /* need byte IO */ -#include /* And spinlocks */ -#include - - -#define dma_outb(val,addr) (*((volatile unsigned char *)(addr+IO_BASE)) = (val)) -#define dma_inb(addr) (*((volatile unsigned char *)(addr+IO_BASE))) - -/* - * NOTES about DMA transfers: - * - * controller 1: channels 0-3, byte operations, ports 00-1F - * controller 2: channels 4-7, word operations, ports C0-DF - * - * - ALL registers are 8 bits only, regardless of transfer size - * - channel 4 is not used - cascades 1 into 2. - * - channels 0-3 are byte - addresses/counts are for physical bytes - * - channels 5-7 are word - addresses/counts are for physical words - * - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries - * - transfer count loaded to registers is 1 less than actual count - * - controller 2 offsets are all even (2x offsets for controller 1) - * - page registers for 5-7 don't use data bit 0, represent 128K pages - * - page registers for 0-3 use bit 0, represent 64K pages - * - * DMA transfers are limited to the lower 16MB of _physical_ memory. - * Note that addresses loaded into registers must be _physical_ addresses, - * not logical addresses (which may differ if paging is active). - * - * Address mapping for channels 0-3: - * - * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses) - * | ... | | ... | | ... | - * | ... | | ... | | ... | - * | ... | | ... | | ... | - * P7 ... P0 A7 ... A0 A7 ... A0 - * | Page | Addr MSB | Addr LSB | (DMA registers) - * - * Address mapping for channels 5-7: - * - * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses) - * | ... | \ \ ... \ \ \ ... \ \ - * | ... | \ \ ... \ \ \ ... \ (not used) - * | ... | \ \ ... \ \ \ ... \ - * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0 - * | Page | Addr MSB | Addr LSB | (DMA registers) - * - * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses - * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at - * the hardware level, so odd-byte transfers aren't possible). - * - * Transfer count (_not # bytes_) is limited to 64K, represented as actual - * count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more, - * and up to 128K bytes may be transferred on channels 5-7 in one operation. - * - */ - -#define MAX_DMA_CHANNELS 8 - -/* The maximum address that we can perform a DMA transfer to on this platform */#define MAX_DMA_ADDRESS (PAGE_OFFSET+0x1000000) - -/* 8237 DMA controllers */ -#define IO_DMA1_BASE 0x10C00 /* 8 bit slave DMA, channels 0..3 */ -#define IO_DMA2_BASE 0x10D00 /* 16 bit master DMA, ch 4(=slave input)..7 */ - -/* DMA controller registers */ -#define DMA1_CMD_REG (IO_DMA1_BASE+0x08) /* command register (w) */ -#define DMA1_STAT_REG (IO_DMA1_BASE+0x08) /* status register (r) */ -#define DMA1_REQ_REG (IO_DMA1_BASE+0x09) /* request register (w) */ -#define DMA1_MASK_REG (IO_DMA1_BASE+0x0A) /* single-channel mask (w) */ -#define DMA1_MODE_REG (IO_DMA1_BASE+0x0B) /* mode register (w) */ -#define DMA1_CLEAR_FF_REG (IO_DMA1_BASE+0x0C) /* clear pointer flip-flop (w) */ -#define DMA1_TEMP_REG (IO_DMA1_BASE+0x0D) /* Temporary Register (r) */ -#define DMA1_RESET_REG (IO_DMA1_BASE+0x0D) /* Master Clear (w) */ -#define DMA1_CLR_MASK_REG (IO_DMA1_BASE+0x0E) /* Clear Mask */ -#define DMA1_MASK_ALL_REG (IO_DMA1_BASE+0x0F) /* all-channels mask (w) */ - -#define DMA2_CMD_REG (IO_DMA2_BASE+0x10) /* command register (w) */ -#define DMA2_STAT_REG (IO_DMA2_BASE+0x10) /* status register (r) */ -#define DMA2_REQ_REG (IO_DMA2_BASE+0x12) /* request register (w) */ -#define DMA2_MASK_REG (IO_DMA2_BASE+0x14) /* single-channel mask (w) */ -#define DMA2_MODE_REG (IO_DMA2_BASE+0x16) /* mode register (w) */ -#define DMA2_CLEAR_FF_REG (IO_DMA2_BASE+0x18) /* clear pointer flip-flop (w) */ -#define DMA2_TEMP_REG (IO_DMA2_BASE+0x1A) /* Temporary Register (r) */ -#define DMA2_RESET_REG (IO_DMA2_BASE+0x1A) /* Master Clear (w) */ -#define DMA2_CLR_MASK_REG (IO_DMA2_BASE+0x1C) /* Clear Mask */ -#define DMA2_MASK_ALL_REG (IO_DMA2_BASE+0x1E) /* all-channels mask (w) */ - -#define DMA_ADDR_0 (IO_DMA1_BASE+0x00) /* DMA address registers */ -#define DMA_ADDR_1 (IO_DMA1_BASE+0x02) -#define DMA_ADDR_2 (IO_DMA1_BASE+0x04) -#define DMA_ADDR_3 (IO_DMA1_BASE+0x06) -#define DMA_ADDR_4 (IO_DMA2_BASE+0x00) -#define DMA_ADDR_5 (IO_DMA2_BASE+0x04) -#define DMA_ADDR_6 (IO_DMA2_BASE+0x08) -#define DMA_ADDR_7 (IO_DMA2_BASE+0x0C) - -#define DMA_CNT_0 (IO_DMA1_BASE+0x01) /* DMA count registers */ -#define DMA_CNT_1 (IO_DMA1_BASE+0x03) -#define DMA_CNT_2 (IO_DMA1_BASE+0x05) -#define DMA_CNT_3 (IO_DMA1_BASE+0x07) -#define DMA_CNT_4 (IO_DMA2_BASE+0x02) -#define DMA_CNT_5 (IO_DMA2_BASE+0x06) -#define DMA_CNT_6 (IO_DMA2_BASE+0x0A) -#define DMA_CNT_7 (IO_DMA2_BASE+0x0E) - -#define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */ -#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */ -#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */ - -#define DMA_AUTOINIT 0x10 - -#define DMA_8BIT 0 -#define DMA_16BIT 1 -#define DMA_BUSMASTER 2 - -extern spinlock_t dma_spin_lock; - -static __inline__ unsigned long claim_dma_lock(void) -{ - unsigned long flags; - spin_lock_irqsave(&dma_spin_lock, flags); - return flags; -} - -static __inline__ void release_dma_lock(unsigned long flags) -{ - spin_unlock_irqrestore(&dma_spin_lock, flags); -} - -/* enable/disable a specific DMA channel */ -static __inline__ void enable_dma(unsigned int dmanr) -{ - if (dmanr<=3) - dma_outb(dmanr, DMA1_MASK_REG); - else - dma_outb(dmanr & 3, DMA2_MASK_REG); -} - -static __inline__ void disable_dma(unsigned int dmanr) -{ - if (dmanr<=3) - dma_outb(dmanr | 4, DMA1_MASK_REG); - else - dma_outb((dmanr & 3) | 4, DMA2_MASK_REG); -} - -/* Clear the 'DMA Pointer Flip Flop'. - * Write 0 for LSB/MSB, 1 for MSB/LSB access. - * Use this once to initialize the FF to a known state. - * After that, keep track of it. :-) - * --- In order to do that, the DMA routines below should --- - * --- only be used while holding the DMA lock ! --- - */ -static __inline__ void clear_dma_ff(unsigned int dmanr) -{ - if (dmanr<=3) - dma_outb(0, DMA1_CLEAR_FF_REG); - else - dma_outb(0, DMA2_CLEAR_FF_REG); -} - -/* set mode (above) for a specific DMA channel */ -static __inline__ void set_dma_mode(unsigned int dmanr, char mode) -{ - if (dmanr<=3) - dma_outb(mode | dmanr, DMA1_MODE_REG); - else - dma_outb(mode | (dmanr&3), DMA2_MODE_REG); -} - -/* Set transfer address & page bits for specific DMA channel. - * Assumes dma flipflop is clear. - */ -static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a) -{ - if (dmanr <= 3) { - dma_outb( a & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE ); - dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE ); - } else { - dma_outb( (a>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE ); - dma_outb( (a>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE ); - } -} - - -/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for - * a specific DMA channel. - * You must ensure the parameters are valid. - * NOTE: from a manual: "the number of transfers is one more - * than the initial word count"! This is taken into account. - * Assumes dma flip-flop is clear. - * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7. - */ -static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count) -{ - count--; - if (dmanr <= 3) { - dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE ); - dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE ); - } else { - dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE ); - dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE ); - } -} - - -/* Get DMA residue count. After a DMA transfer, this - * should return zero. Reading this while a DMA transfer is - * still in progress will return unpredictable results. - * If called before the channel has been used, it may return 1. - * Otherwise, it returns the number of _bytes_ left to transfer. - * - * Assumes DMA flip-flop is clear. - */ -static __inline__ int get_dma_residue(unsigned int dmanr) -{ - unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE - : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE; - - /* using short to get 16-bit wrap around */ - unsigned short count; - - count = 1 + dma_inb(io_port); - count += dma_inb(io_port) << 8; - - return (dmanr<=3)? count : (count<<1); -} - - -/* These are in kernel/dma.c: */ -extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */ -extern void free_dma(unsigned int dmanr); /* release it again */ - -/* These are in arch/m68k/apollo/dma.c: */ -extern unsigned short dma_map_page(unsigned long phys_addr,int count,int type); -extern void dma_unmap_page(unsigned short dma_addr); - -#endif /* _ASM_APOLLO_DMA_H */ diff --git a/trunk/arch/m68k/include/asm/apollohw.h b/trunk/arch/m68k/include/asm/apollohw.h index a1373b9aa281..635ef4f89010 100644 --- a/trunk/arch/m68k/include/asm/apollohw.h +++ b/trunk/arch/m68k/include/asm/apollohw.h @@ -98,7 +98,7 @@ extern u_long timer_physaddr; #define cpuctrl (*(volatile unsigned int *)(IO_BASE + cpuctrl_physaddr)) #define pica (IO_BASE + pica_physaddr) #define picb (IO_BASE + picb_physaddr) -#define timer (IO_BASE + timer_physaddr) +#define apollo_timer (IO_BASE + timer_physaddr) #define addr_xlat_map ((unsigned short *)(IO_BASE + 0x17000)) #define isaIO2mem(x) (((((x) & 0x3f8) << 7) | (((x) & 0xfc00) >> 6) | ((x) & 0x7)) + 0x40000 + IO_BASE) diff --git a/trunk/arch/m68k/include/asm/bitsperlong.h b/trunk/arch/m68k/include/asm/bitsperlong.h deleted file mode 100644 index 6dc0bb0c13b2..000000000000 --- a/trunk/arch/m68k/include/asm/bitsperlong.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/trunk/arch/m68k/include/asm/cputime.h b/trunk/arch/m68k/include/asm/cputime.h deleted file mode 100644 index c79c5e892305..000000000000 --- a/trunk/arch/m68k/include/asm/cputime.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __M68K_CPUTIME_H -#define __M68K_CPUTIME_H - -#include - -#endif /* __M68K_CPUTIME_H */ diff --git a/trunk/arch/m68k/include/asm/delay.h b/trunk/arch/m68k/include/asm/delay.h index 9c09becfd4c9..12d8fe4f1d30 100644 --- a/trunk/arch/m68k/include/asm/delay.h +++ b/trunk/arch/m68k/include/asm/delay.h @@ -43,7 +43,7 @@ static inline void __delay(unsigned long loops) extern void __bad_udelay(void); -#if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE) +#ifdef CONFIG_CPU_HAS_NO_MULDIV64 /* * The simpler m68k and ColdFire processors do not have a 32*32->64 * multiply instruction. So we need to handle them a little differently. diff --git a/trunk/arch/m68k/include/asm/device.h b/trunk/arch/m68k/include/asm/device.h deleted file mode 100644 index d8f9872b0e2d..000000000000 --- a/trunk/arch/m68k/include/asm/device.h +++ /dev/null @@ -1,7 +0,0 @@ -/* - * Arch specific extensions to struct device - * - * This file is released under the GPLv2 - */ -#include - diff --git a/trunk/arch/m68k/include/asm/emergency-restart.h b/trunk/arch/m68k/include/asm/emergency-restart.h deleted file mode 100644 index 108d8c48e42e..000000000000 --- a/trunk/arch/m68k/include/asm/emergency-restart.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _ASM_EMERGENCY_RESTART_H -#define _ASM_EMERGENCY_RESTART_H - -#include - -#endif /* _ASM_EMERGENCY_RESTART_H */ diff --git a/trunk/arch/m68k/include/asm/errno.h b/trunk/arch/m68k/include/asm/errno.h deleted file mode 100644 index 0d4e188d6ef6..000000000000 --- a/trunk/arch/m68k/include/asm/errno.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _M68K_ERRNO_H -#define _M68K_ERRNO_H - -#include - -#endif /* _M68K_ERRNO_H */ diff --git a/trunk/arch/m68k/include/asm/futex.h b/trunk/arch/m68k/include/asm/futex.h deleted file mode 100644 index 6a332a9f099c..000000000000 --- a/trunk/arch/m68k/include/asm/futex.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _ASM_FUTEX_H -#define _ASM_FUTEX_H - -#include - -#endif diff --git a/trunk/arch/m68k/include/asm/ioctl.h b/trunk/arch/m68k/include/asm/ioctl.h deleted file mode 100644 index b279fe06dfe5..000000000000 --- a/trunk/arch/m68k/include/asm/ioctl.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/trunk/arch/m68k/include/asm/ipcbuf.h b/trunk/arch/m68k/include/asm/ipcbuf.h deleted file mode 100644 index 84c7e51cb6d0..000000000000 --- a/trunk/arch/m68k/include/asm/ipcbuf.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/trunk/arch/m68k/include/asm/irq_regs.h b/trunk/arch/m68k/include/asm/irq_regs.h deleted file mode 100644 index 3dd9c0b70270..000000000000 --- a/trunk/arch/m68k/include/asm/irq_regs.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/trunk/arch/m68k/include/asm/kdebug.h b/trunk/arch/m68k/include/asm/kdebug.h deleted file mode 100644 index 6ece1b037665..000000000000 --- a/trunk/arch/m68k/include/asm/kdebug.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/trunk/arch/m68k/include/asm/kmap_types.h b/trunk/arch/m68k/include/asm/kmap_types.h deleted file mode 100644 index 3413cc1390ec..000000000000 --- a/trunk/arch/m68k/include/asm/kmap_types.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ASM_M68K_KMAP_TYPES_H -#define __ASM_M68K_KMAP_TYPES_H - -#include - -#endif /* __ASM_M68K_KMAP_TYPES_H */ diff --git a/trunk/arch/m68k/include/asm/kvm_para.h b/trunk/arch/m68k/include/asm/kvm_para.h deleted file mode 100644 index 14fab8f0b957..000000000000 --- a/trunk/arch/m68k/include/asm/kvm_para.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/trunk/arch/m68k/include/asm/local.h b/trunk/arch/m68k/include/asm/local.h deleted file mode 100644 index 6c259263e1f0..000000000000 --- a/trunk/arch/m68k/include/asm/local.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _ASM_M68K_LOCAL_H -#define _ASM_M68K_LOCAL_H - -#include - -#endif /* _ASM_M68K_LOCAL_H */ diff --git a/trunk/arch/m68k/include/asm/local64.h b/trunk/arch/m68k/include/asm/local64.h deleted file mode 100644 index 36c93b5cc239..000000000000 --- a/trunk/arch/m68k/include/asm/local64.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/trunk/arch/m68k/include/asm/mac_mouse.h b/trunk/arch/m68k/include/asm/mac_mouse.h deleted file mode 100644 index 39a5c292eaee..000000000000 --- a/trunk/arch/m68k/include/asm/mac_mouse.h +++ /dev/null @@ -1,23 +0,0 @@ -#ifndef _ASM_MAC_MOUSE_H -#define _ASM_MAC_MOUSE_H - -/* - * linux/include/asm-m68k/mac_mouse.h - * header file for Macintosh ADB mouse driver - * 27-10-97 Michael Schmitz - * copied from: - * header file for Atari Mouse driver - * by Robert de Vries (robert@and.nl) on 19Jul93 - */ - -struct mouse_status { - char buttons; - short dx; - short dy; - int ready; - int active; - wait_queue_head_t wait; - struct fasync_struct *fasyncptr; -}; - -#endif diff --git a/trunk/arch/m68k/include/asm/mcfmbus.h b/trunk/arch/m68k/include/asm/mcfmbus.h deleted file mode 100644 index 319899c47a2c..000000000000 --- a/trunk/arch/m68k/include/asm/mcfmbus.h +++ /dev/null @@ -1,77 +0,0 @@ -/****************************************************************************/ - -/* - * mcfmbus.h -- Coldfire MBUS support defines. - * - * (C) Copyright 1999, Martin Floeer (mfloeer@axcent.de) - */ - -/****************************************************************************/ - - -#ifndef mcfmbus_h -#define mcfmbus_h - - -#define MCFMBUS_BASE 0x280 -#define MCFMBUS_IRQ_VECTOR 0x19 -#define MCFMBUS_IRQ 0x1 -#define MCFMBUS_CLK 0x3f -#define MCFMBUS_IRQ_LEVEL 0x07 /*IRQ Level 1*/ -#define MCFMBUS_ADDRESS 0x01 - - -/* -* Define the 5307 MBUS register set addresses -*/ - -#define MCFMBUS_MADR 0x00 -#define MCFMBUS_MFDR 0x04 -#define MCFMBUS_MBCR 0x08 -#define MCFMBUS_MBSR 0x0C -#define MCFMBUS_MBDR 0x10 - - -#define MCFMBUS_MADR_ADDR(a) (((a)&0x7F)<<0x01) /*Slave Address*/ - -#define MCFMBUS_MFDR_MBC(a) ((a)&0x3F) /*M-Bus Clock*/ - -/* -* Define bit flags in Control Register -*/ - -#define MCFMBUS_MBCR_MEN (0x80) /* M-Bus Enable */ -#define MCFMBUS_MBCR_MIEN (0x40) /* M-Bus Interrupt Enable */ -#define MCFMBUS_MBCR_MSTA (0x20) /* Master/Slave Mode Select Bit */ -#define MCFMBUS_MBCR_MTX (0x10) /* Transmit/Rcv Mode Select Bit */ -#define MCFMBUS_MBCR_TXAK (0x08) /* Transmit Acknowledge Enable */ -#define MCFMBUS_MBCR_RSTA (0x04) /* Repeat Start */ - -/* -* Define bit flags in Status Register -*/ - -#define MCFMBUS_MBSR_MCF (0x80) /* Data Transfer Complete */ -#define MCFMBUS_MBSR_MAAS (0x40) /* Addressed as a Slave */ -#define MCFMBUS_MBSR_MBB (0x20) /* Bus Busy */ -#define MCFMBUS_MBSR_MAL (0x10) /* Arbitration Lost */ -#define MCFMBUS_MBSR_SRW (0x04) /* Slave Transmit */ -#define MCFMBUS_MBSR_MIF (0x02) /* M-Bus Interrupt */ -#define MCFMBUS_MBSR_RXAK (0x01) /* No Acknowledge Received */ - -/* -* Define bit flags in DATA I/O Register -*/ - -#define MCFMBUS_MBDR_READ (0x01) /* 1=read 0=write MBUS */ - -#define MBUSIOCSCLOCK 1 -#define MBUSIOCGCLOCK 2 -#define MBUSIOCSADDR 3 -#define MBUSIOCGADDR 4 -#define MBUSIOCSSLADDR 5 -#define MBUSIOCGSLADDR 6 -#define MBUSIOCSSUBADDR 7 -#define MBUSIOCGSUBADDR 8 - -#endif diff --git a/trunk/arch/m68k/include/asm/mman.h b/trunk/arch/m68k/include/asm/mman.h deleted file mode 100644 index 8eebf89f5ab1..000000000000 --- a/trunk/arch/m68k/include/asm/mman.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/trunk/arch/m68k/include/asm/mutex.h b/trunk/arch/m68k/include/asm/mutex.h deleted file mode 100644 index 458c1f7fbc18..000000000000 --- a/trunk/arch/m68k/include/asm/mutex.h +++ /dev/null @@ -1,9 +0,0 @@ -/* - * Pull in the generic implementation for the mutex fastpath. - * - * TODO: implement optimized primitives instead, or leave the generic - * implementation in place, or pick the atomic_xchg() based generic - * implementation. (see asm-generic/mutex-xchg.h for details) - */ - -#include diff --git a/trunk/arch/m68k/include/asm/percpu.h b/trunk/arch/m68k/include/asm/percpu.h deleted file mode 100644 index 0859d048faf5..000000000000 --- a/trunk/arch/m68k/include/asm/percpu.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ASM_M68K_PERCPU_H -#define __ASM_M68K_PERCPU_H - -#include - -#endif /* __ASM_M68K_PERCPU_H */ diff --git a/trunk/arch/m68k/include/asm/resource.h b/trunk/arch/m68k/include/asm/resource.h deleted file mode 100644 index e7d35019f337..000000000000 --- a/trunk/arch/m68k/include/asm/resource.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _M68K_RESOURCE_H -#define _M68K_RESOURCE_H - -#include - -#endif /* _M68K_RESOURCE_H */ diff --git a/trunk/arch/m68k/include/asm/sbus.h b/trunk/arch/m68k/include/asm/sbus.h deleted file mode 100644 index bfe3ba147f2e..000000000000 --- a/trunk/arch/m68k/include/asm/sbus.h +++ /dev/null @@ -1,45 +0,0 @@ -/* - * some sbus structures and macros to make usage of sbus drivers possible - */ - -#ifndef __M68K_SBUS_H -#define __M68K_SBUS_H - -struct sbus_dev { - struct { - unsigned int which_io; - unsigned int phys_addr; - } reg_addrs[1]; -}; - -/* sbus IO functions stolen from include/asm-sparc/io.h for the serial driver */ -/* No SBUS on the Sun3, kludge -- sam */ - -static inline void _sbus_writeb(unsigned char val, unsigned long addr) -{ - *(volatile unsigned char *)addr = val; -} - -static inline unsigned char _sbus_readb(unsigned long addr) -{ - return *(volatile unsigned char *)addr; -} - -static inline void _sbus_writel(unsigned long val, unsigned long addr) -{ - *(volatile unsigned long *)addr = val; - -} - -extern inline unsigned long _sbus_readl(unsigned long addr) -{ - return *(volatile unsigned long *)addr; -} - - -#define sbus_readb(a) _sbus_readb((unsigned long)a) -#define sbus_writeb(v, a) _sbus_writeb(v, (unsigned long)a) -#define sbus_readl(a) _sbus_readl((unsigned long)a) -#define sbus_writel(v, a) _sbus_writel(v, (unsigned long)a) - -#endif diff --git a/trunk/arch/m68k/include/asm/scatterlist.h b/trunk/arch/m68k/include/asm/scatterlist.h deleted file mode 100644 index 312505452a1e..000000000000 --- a/trunk/arch/m68k/include/asm/scatterlist.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _M68K_SCATTERLIST_H -#define _M68K_SCATTERLIST_H - -#include - -#endif /* !(_M68K_SCATTERLIST_H) */ diff --git a/trunk/arch/m68k/include/asm/sections.h b/trunk/arch/m68k/include/asm/sections.h deleted file mode 100644 index 5277e52715ec..000000000000 --- a/trunk/arch/m68k/include/asm/sections.h +++ /dev/null @@ -1,8 +0,0 @@ -#ifndef _ASM_M68K_SECTIONS_H -#define _ASM_M68K_SECTIONS_H - -#include - -extern char _sbss[], _ebss[]; - -#endif /* _ASM_M68K_SECTIONS_H */ diff --git a/trunk/arch/m68k/include/asm/shm.h b/trunk/arch/m68k/include/asm/shm.h deleted file mode 100644 index fa56ec84a126..000000000000 --- a/trunk/arch/m68k/include/asm/shm.h +++ /dev/null @@ -1,31 +0,0 @@ -#ifndef _M68K_SHM_H -#define _M68K_SHM_H - - -/* format of page table entries that correspond to shared memory pages - currently out in swap space (see also mm/swap.c): - bits 0-1 (PAGE_PRESENT) is = 0 - bits 8..2 (SWP_TYPE) are = SHM_SWP_TYPE - bits 31..9 are used like this: - bits 15..9 (SHM_ID) the id of the shared memory segment - bits 30..16 (SHM_IDX) the index of the page within the shared memory segment - (actually only bits 25..16 get used since SHMMAX is so low) - bit 31 (SHM_READ_ONLY) flag whether the page belongs to a read-only attach -*/ -/* on the m68k both bits 0 and 1 must be zero */ -/* format on the sun3 is similar, but bits 30, 31 are set to zero and all - others are reduced by 2. --m */ - -#ifndef CONFIG_SUN3 -#define SHM_ID_SHIFT 9 -#else -#define SHM_ID_SHIFT 7 -#endif -#define _SHM_ID_BITS 7 -#define SHM_ID_MASK ((1<<_SHM_ID_BITS)-1) - -#define SHM_IDX_SHIFT (SHM_ID_SHIFT+_SHM_ID_BITS) -#define _SHM_IDX_BITS 15 -#define SHM_IDX_MASK ((1<<_SHM_IDX_BITS)-1) - -#endif /* _M68K_SHM_H */ diff --git a/trunk/arch/m68k/include/asm/siginfo.h b/trunk/arch/m68k/include/asm/siginfo.h deleted file mode 100644 index 851d3d784b53..000000000000 --- a/trunk/arch/m68k/include/asm/siginfo.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _M68K_SIGINFO_H -#define _M68K_SIGINFO_H - -#include - -#endif diff --git a/trunk/arch/m68k/include/asm/statfs.h b/trunk/arch/m68k/include/asm/statfs.h deleted file mode 100644 index 08d93f14e061..000000000000 --- a/trunk/arch/m68k/include/asm/statfs.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _M68K_STATFS_H -#define _M68K_STATFS_H - -#include - -#endif /* _M68K_STATFS_H */ diff --git a/trunk/arch/m68k/include/asm/topology.h b/trunk/arch/m68k/include/asm/topology.h deleted file mode 100644 index ca173e9f26ff..000000000000 --- a/trunk/arch/m68k/include/asm/topology.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _ASM_M68K_TOPOLOGY_H -#define _ASM_M68K_TOPOLOGY_H - -#include - -#endif /* _ASM_M68K_TOPOLOGY_H */ diff --git a/trunk/arch/m68k/include/asm/types.h b/trunk/arch/m68k/include/asm/types.h deleted file mode 100644 index 89705adcbd52..000000000000 --- a/trunk/arch/m68k/include/asm/types.h +++ /dev/null @@ -1,22 +0,0 @@ -#ifndef _M68K_TYPES_H -#define _M68K_TYPES_H - -/* - * This file is never included by application software unless - * explicitly requested (e.g., via linux/types.h) in which case the - * application is Linux specific so (user-) name space pollution is - * not a major issue. However, for interoperability, libraries still - * need to be careful to avoid a name clashes. - */ -#include - -/* - * These aren't exported outside the kernel to avoid name space clashes - */ -#ifdef __KERNEL__ - -#define BITS_PER_LONG 32 - -#endif /* __KERNEL__ */ - -#endif /* _M68K_TYPES_H */ diff --git a/trunk/arch/m68k/include/asm/unaligned.h b/trunk/arch/m68k/include/asm/unaligned.h index f4043ae63db1..2b3ca0bf7a0d 100644 --- a/trunk/arch/m68k/include/asm/unaligned.h +++ b/trunk/arch/m68k/include/asm/unaligned.h @@ -2,7 +2,7 @@ #define _ASM_M68K_UNALIGNED_H -#if defined(CONFIG_COLDFIRE) || defined(CONFIG_M68000) +#ifdef CONFIG_CPU_HAS_NO_UNALIGNED #include #include #include @@ -12,7 +12,7 @@ #else /* - * The m68k can do unaligned accesses itself. + * The m68k can do unaligned accesses itself. */ #include #include diff --git a/trunk/arch/m68k/include/asm/xor.h b/trunk/arch/m68k/include/asm/xor.h deleted file mode 100644 index c82eb12a5b18..000000000000 --- a/trunk/arch/m68k/include/asm/xor.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/trunk/arch/m68k/kernel/setup_no.c b/trunk/arch/m68k/kernel/setup_no.c index 7dc186b7a85f..71fb29938dba 100644 --- a/trunk/arch/m68k/kernel/setup_no.c +++ b/trunk/arch/m68k/kernel/setup_no.c @@ -218,13 +218,10 @@ void __init setup_arch(char **cmdline_p) printk(KERN_INFO "Motorola M5235EVB support (C)2005 Syn-tech Systems, Inc. (Jate Sujjavanich)\n"); #endif - pr_debug("KERNEL -> TEXT=0x%06x-0x%06x DATA=0x%06x-0x%06x " - "BSS=0x%06x-0x%06x\n", (int) &_stext, (int) &_etext, - (int) &_sdata, (int) &_edata, - (int) &_sbss, (int) &_ebss); - pr_debug("MEMORY -> ROMFS=0x%06x-0x%06x MEM=0x%06x-0x%06x\n ", - (int) &_ebss, (int) memory_start, - (int) memory_start, (int) memory_end); + pr_debug("KERNEL -> TEXT=0x%p-0x%p DATA=0x%p-0x%p BSS=0x%p-0x%p\n", + _stext, _etext, _sdata, _edata, __bss_start, __bss_stop); + pr_debug("MEMORY -> ROMFS=0x%p-0x%06lx MEM=0x%06lx-0x%06lx\n ", + __bss_stop, memory_start, memory_start, memory_end); /* Keep a copy of command line */ *cmdline_p = &command_line[0]; diff --git a/trunk/arch/m68k/kernel/sys_m68k.c b/trunk/arch/m68k/kernel/sys_m68k.c index 8623f8dc16f8..9a5932ec3689 100644 --- a/trunk/arch/m68k/kernel/sys_m68k.c +++ b/trunk/arch/m68k/kernel/sys_m68k.c @@ -479,9 +479,13 @@ sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5, goto bad_access; } - mem_value = *mem; + /* + * No need to check for EFAULT; we know that the page is + * present and writable. + */ + __get_user(mem_value, mem); if (mem_value == oldval) - *mem = newval; + __put_user(newval, mem); pte_unmap_unlock(pte, ptl); up_read(&mm->mmap_sem); diff --git a/trunk/arch/m68k/kernel/vmlinux-nommu.lds b/trunk/arch/m68k/kernel/vmlinux-nommu.lds index 40e02d9c38b4..06a763f49fd3 100644 --- a/trunk/arch/m68k/kernel/vmlinux-nommu.lds +++ b/trunk/arch/m68k/kernel/vmlinux-nommu.lds @@ -78,9 +78,7 @@ SECTIONS { __init_end = .; } - _sbss = .; BSS_SECTION(0, 0, 0) - _ebss = .; _end = .; diff --git a/trunk/arch/m68k/kernel/vmlinux-std.lds b/trunk/arch/m68k/kernel/vmlinux-std.lds index 63407c836826..d0993594f558 100644 --- a/trunk/arch/m68k/kernel/vmlinux-std.lds +++ b/trunk/arch/m68k/kernel/vmlinux-std.lds @@ -31,9 +31,7 @@ SECTIONS RW_DATA_SECTION(16, PAGE_SIZE, THREAD_SIZE) - _sbss = .; BSS_SECTION(0, 0, 0) - _ebss = .; _edata = .; /* End of data section */ diff --git a/trunk/arch/m68k/kernel/vmlinux-sun3.lds b/trunk/arch/m68k/kernel/vmlinux-sun3.lds index ad0f46d64c0b..8080469ee6c1 100644 --- a/trunk/arch/m68k/kernel/vmlinux-sun3.lds +++ b/trunk/arch/m68k/kernel/vmlinux-sun3.lds @@ -44,9 +44,7 @@ __init_begin = .; . = ALIGN(PAGE_SIZE); __init_end = .; - _sbss = .; BSS_SECTION(0, 0, 0) - _ebss = .; _end = . ; diff --git a/trunk/arch/m68k/lib/muldi3.c b/trunk/arch/m68k/lib/muldi3.c index 79e928a525d0..ee5f0b1b5c5d 100644 --- a/trunk/arch/m68k/lib/muldi3.c +++ b/trunk/arch/m68k/lib/muldi3.c @@ -19,7 +19,7 @@ along with GNU CC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ -#if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE) +#ifdef CONFIG_CPU_HAS_NO_MULDIV64 #define SI_TYPE_SIZE 32 #define __BITS4 (SI_TYPE_SIZE / 4) diff --git a/trunk/arch/m68k/mm/init_mm.c b/trunk/arch/m68k/mm/init_mm.c index f77f258dce3a..282f9de68966 100644 --- a/trunk/arch/m68k/mm/init_mm.c +++ b/trunk/arch/m68k/mm/init_mm.c @@ -104,7 +104,7 @@ void __init print_memmap(void) MLK_ROUNDUP(__init_begin, __init_end), MLK_ROUNDUP(_stext, _etext), MLK_ROUNDUP(_sdata, _edata), - MLK_ROUNDUP(_sbss, _ebss)); + MLK_ROUNDUP(__bss_start, __bss_stop)); } void __init mem_init(void) diff --git a/trunk/arch/m68k/mm/init_no.c b/trunk/arch/m68k/mm/init_no.c index 345ec0d83e3d..688e3664aea0 100644 --- a/trunk/arch/m68k/mm/init_no.c +++ b/trunk/arch/m68k/mm/init_no.c @@ -91,7 +91,7 @@ void __init mem_init(void) totalram_pages = free_all_bootmem(); codek = (_etext - _stext) >> 10; - datak = (_ebss - _sdata) >> 10; + datak = (__bss_stop - _sdata) >> 10; initk = (__init_begin - __init_end) >> 10; tmp = nr_free_pages() << PAGE_SHIFT; diff --git a/trunk/arch/m68k/platform/68328/head-de2.S b/trunk/arch/m68k/platform/68328/head-de2.S index f632fdcb93e9..537d3245b539 100644 --- a/trunk/arch/m68k/platform/68328/head-de2.S +++ b/trunk/arch/m68k/platform/68328/head-de2.S @@ -60,8 +60,8 @@ _start: * Move ROM filesystem above bss :-) */ - moveal #_sbss, %a0 /* romfs at the start of bss */ - moveal #_ebss, %a1 /* Set up destination */ + moveal #__bss_start, %a0 /* romfs at the start of bss */ + moveal #__bss_stop, %a1 /* Set up destination */ movel %a0, %a2 /* Copy of bss start */ movel 8(%a0), %d1 /* Get size of ROMFS */ @@ -84,8 +84,8 @@ _start: * Initialize BSS segment to 0 */ - lea _sbss, %a0 - lea _ebss, %a1 + lea __bss_start, %a0 + lea __bss_stop, %a1 /* Copy 0 to %a0 until %a0 == %a1 */ 2: cmpal %a0, %a1 diff --git a/trunk/arch/m68k/platform/68328/head-pilot.S b/trunk/arch/m68k/platform/68328/head-pilot.S index 2ebfd6420818..45a9dad29e3d 100644 --- a/trunk/arch/m68k/platform/68328/head-pilot.S +++ b/trunk/arch/m68k/platform/68328/head-pilot.S @@ -110,7 +110,7 @@ L0: movel #CONFIG_VECTORBASE, %d7 addl #16, %d7 moveal %d7, %a0 - moveal #_ebss, %a1 + moveal #__bss_stop, %a1 lea %a1@(512), %a2 DBG_PUTC('C') @@ -138,8 +138,8 @@ LD1: DBG_PUTC('E') - moveal #_sbss, %a0 - moveal #_ebss, %a1 + moveal #__bss_start, %a0 + moveal #__bss_stop, %a1 /* Copy 0 to %a0 until %a0 == %a1 */ L1: @@ -150,7 +150,7 @@ L1: DBG_PUTC('F') /* Copy command line from end of bss to command line */ - moveal #_ebss, %a0 + moveal #__bss_stop, %a0 moveal #command_line, %a1 lea %a1@(512), %a2 @@ -165,7 +165,7 @@ L3: movel #_sdata, %d0 movel %d0, _rambase - movel #_ebss, %d0 + movel #__bss_stop, %d0 movel %d0, _ramstart movel %a4, %d0 diff --git a/trunk/arch/m68k/platform/68328/head-ram.S b/trunk/arch/m68k/platform/68328/head-ram.S index 7f1aeeacb219..5189ef926098 100644 --- a/trunk/arch/m68k/platform/68328/head-ram.S +++ b/trunk/arch/m68k/platform/68328/head-ram.S @@ -76,8 +76,8 @@ pclp3: beq pclp3 #endif /* DEBUG */ moveal #0x007ffff0, %ssp - moveal #_sbss, %a0 - moveal #_ebss, %a1 + moveal #__bss_start, %a0 + moveal #__bss_stop, %a1 /* Copy 0 to %a0 until %a0 >= %a1 */ L1: diff --git a/trunk/arch/m68k/platform/68328/head-rom.S b/trunk/arch/m68k/platform/68328/head-rom.S index a5ff96d0295f..3dff98ba2e97 100644 --- a/trunk/arch/m68k/platform/68328/head-rom.S +++ b/trunk/arch/m68k/platform/68328/head-rom.S @@ -59,8 +59,8 @@ _stext: movew #0x2700,%sr cmpal %a1, %a2 bhi 1b - moveal #_sbss, %a0 - moveal #_ebss, %a1 + moveal #__bss_start, %a0 + moveal #__bss_stop, %a1 /* Copy 0 to %a0 until %a0 == %a1 */ 1: @@ -70,7 +70,7 @@ _stext: movew #0x2700,%sr movel #_sdata, %d0 movel %d0, _rambase - movel #_ebss, %d0 + movel #__bss_stop, %d0 movel %d0, _ramstart movel #RAMEND-CONFIG_MEMORY_RESERVE*0x100000, %d0 movel %d0, _ramend diff --git a/trunk/arch/m68k/platform/68360/head-ram.S b/trunk/arch/m68k/platform/68360/head-ram.S index 8eb94fb6b971..acd213170d80 100644 --- a/trunk/arch/m68k/platform/68360/head-ram.S +++ b/trunk/arch/m68k/platform/68360/head-ram.S @@ -219,8 +219,8 @@ LD1: cmp.l #_edata, %a1 blt LD1 - moveal #_sbss, %a0 - moveal #_ebss, %a1 + moveal #__bss_start, %a0 + moveal #__bss_stop, %a1 /* Copy 0 to %a0 until %a0 == %a1 */ L1: @@ -234,7 +234,7 @@ load_quicc: store_ram_size: /* Set ram size information */ move.l #_sdata, _rambase - move.l #_ebss, _ramstart + move.l #__bss_stop, _ramstart move.l #RAMEND, %d0 sub.l #0x1000, %d0 /* Reserve 4K for stack space.*/ move.l %d0, _ramend /* Different from RAMEND.*/ diff --git a/trunk/arch/m68k/platform/68360/head-rom.S b/trunk/arch/m68k/platform/68360/head-rom.S index 97510e55b802..dfc756d99886 100644 --- a/trunk/arch/m68k/platform/68360/head-rom.S +++ b/trunk/arch/m68k/platform/68360/head-rom.S @@ -13,7 +13,7 @@ */ .global _stext -.global _sbss +.global __bss_start .global _start .global _rambase @@ -229,8 +229,8 @@ LD1: cmp.l #_edata, %a1 blt LD1 - moveal #_sbss, %a0 - moveal #_ebss, %a1 + moveal #__bss_start, %a0 + moveal #__bss_stop, %a1 /* Copy 0 to %a0 until %a0 == %a1 */ L1: @@ -244,7 +244,7 @@ load_quicc: store_ram_size: /* Set ram size information */ move.l #_sdata, _rambase - move.l #_ebss, _ramstart + move.l #__bss_stop, _ramstart move.l #RAMEND, %d0 sub.l #0x1000, %d0 /* Reserve 4K for stack space.*/ move.l %d0, _ramend /* Different from RAMEND.*/ diff --git a/trunk/arch/m68k/platform/coldfire/head.S b/trunk/arch/m68k/platform/coldfire/head.S index 4e0c9eb3bd1f..b88f5716f357 100644 --- a/trunk/arch/m68k/platform/coldfire/head.S +++ b/trunk/arch/m68k/platform/coldfire/head.S @@ -230,8 +230,8 @@ _vstart: /* * Move ROM filesystem above bss :-) */ - lea _sbss,%a0 /* get start of bss */ - lea _ebss,%a1 /* set up destination */ + lea __bss_start,%a0 /* get start of bss */ + lea __bss_stop,%a1 /* set up destination */ movel %a0,%a2 /* copy of bss start */ movel 8(%a0),%d0 /* get size of ROMFS */ @@ -249,7 +249,7 @@ _copy_romfs: bne _copy_romfs #else /* CONFIG_ROMFS_FS */ - lea _ebss,%a1 + lea __bss_stop,%a1 movel %a1,_ramstart #endif /* CONFIG_ROMFS_FS */ @@ -257,8 +257,8 @@ _copy_romfs: /* * Zero out the bss region. */ - lea _sbss,%a0 /* get start of bss */ - lea _ebss,%a1 /* get end of bss */ + lea __bss_start,%a0 /* get start of bss */ + lea __bss_stop,%a1 /* get end of bss */ clrl %d0 /* set value */ _clear_bss: movel %d0,(%a0)+ /* clear each word */ diff --git a/trunk/arch/m68k/sun3/prom/init.c b/trunk/arch/m68k/sun3/prom/init.c index d8e6349336b4..eeba067d565f 100644 --- a/trunk/arch/m68k/sun3/prom/init.c +++ b/trunk/arch/m68k/sun3/prom/init.c @@ -22,57 +22,13 @@ int prom_root_node; struct linux_nodeops *prom_nodeops; /* You must call prom_init() before you attempt to use any of the - * routines in the prom library. It returns 0 on success, 1 on - * failure. It gets passed the pointer to the PROM vector. + * routines in the prom library. + * It gets passed the pointer to the PROM vector. */ -extern void prom_meminit(void); -extern void prom_ranges_init(void); - void __init prom_init(struct linux_romvec *rp) { romvec = rp; -#ifndef CONFIG_SUN3 - switch(romvec->pv_romvers) { - case 0: - prom_vers = PROM_V0; - break; - case 2: - prom_vers = PROM_V2; - break; - case 3: - prom_vers = PROM_V3; - break; - case 4: - prom_vers = PROM_P1275; - prom_printf("PROMLIB: Sun IEEE Prom not supported yet\n"); - prom_halt(); - break; - default: - prom_printf("PROMLIB: Bad PROM version %d\n", - romvec->pv_romvers); - prom_halt(); - break; - }; - - prom_rev = romvec->pv_plugin_revision; - prom_prev = romvec->pv_printrev; - prom_nodeops = romvec->pv_nodeops; - - prom_root_node = prom_getsibling(0); - if((prom_root_node == 0) || (prom_root_node == -1)) - prom_halt(); - - if((((unsigned long) prom_nodeops) == 0) || - (((unsigned long) prom_nodeops) == -1)) - prom_halt(); - - prom_meminit(); - - prom_ranges_init(); -#endif -// printk("PROMLIB: Sun Boot Prom Version %d Revision %d\n", -// romvec->pv_romvers, prom_rev); /* Initialization successful. */ return; diff --git a/trunk/arch/microblaze/include/asm/sections.h b/trunk/arch/microblaze/include/asm/sections.h index 4487e150b455..c07ed5d2a820 100644 --- a/trunk/arch/microblaze/include/asm/sections.h +++ b/trunk/arch/microblaze/include/asm/sections.h @@ -18,10 +18,6 @@ extern char _ssbss[], _esbss[]; extern unsigned long __ivt_start[], __ivt_end[]; extern char _etext[], _stext[]; -# ifdef CONFIG_MTD_UCLINUX -extern char *_ebss; -# endif - extern u32 _fdt_start[], _fdt_end[]; # endif /* !__ASSEMBLY__ */ diff --git a/trunk/arch/microblaze/kernel/microblaze_ksyms.c b/trunk/arch/microblaze/kernel/microblaze_ksyms.c index bb4907c828dc..2b25bcf05c00 100644 --- a/trunk/arch/microblaze/kernel/microblaze_ksyms.c +++ b/trunk/arch/microblaze/kernel/microblaze_ksyms.c @@ -21,9 +21,6 @@ #include #include -extern char *_ebss; -EXPORT_SYMBOL_GPL(_ebss); - #ifdef CONFIG_FUNCTION_TRACER extern void _mcount(void); EXPORT_SYMBOL(_mcount); diff --git a/trunk/arch/microblaze/kernel/setup.c b/trunk/arch/microblaze/kernel/setup.c index 16d8dfd9094b..4da971d4392f 100644 --- a/trunk/arch/microblaze/kernel/setup.c +++ b/trunk/arch/microblaze/kernel/setup.c @@ -121,7 +121,7 @@ void __init machine_early_init(const char *cmdline, unsigned int ram, /* Move ROMFS out of BSS before clearing it */ if (romfs_size > 0) { - memmove(&_ebss, (int *)romfs_base, romfs_size); + memmove(&__bss_stop, (int *)romfs_base, romfs_size); klimit += romfs_size; } #endif @@ -165,7 +165,7 @@ void __init machine_early_init(const char *cmdline, unsigned int ram, BUG_ON(romfs_size < 0); /* What else can we do? */ printk("Moved 0x%08x bytes from 0x%08x to 0x%08x\n", - romfs_size, romfs_base, (unsigned)&_ebss); + romfs_size, romfs_base, (unsigned)&__bss_stop); printk("New klimit: 0x%08x\n", (unsigned)klimit); #endif diff --git a/trunk/arch/microblaze/kernel/vmlinux.lds.S b/trunk/arch/microblaze/kernel/vmlinux.lds.S index 109e9d86ade4..936d01a689d7 100644 --- a/trunk/arch/microblaze/kernel/vmlinux.lds.S +++ b/trunk/arch/microblaze/kernel/vmlinux.lds.S @@ -131,7 +131,6 @@ SECTIONS { *(COMMON) . = ALIGN (4) ; __bss_stop = . ; - _ebss = . ; } . = ALIGN(PAGE_SIZE); _end = .; diff --git a/trunk/arch/mips/Kconfig b/trunk/arch/mips/Kconfig index e3efc06e6409..faf65286574e 100644 --- a/trunk/arch/mips/Kconfig +++ b/trunk/arch/mips/Kconfig @@ -77,6 +77,7 @@ config AR7 select SYS_SUPPORTS_ZBOOT_UART16550 select ARCH_REQUIRE_GPIOLIB select VLYNQ + select HAVE_CLK help Support for the Texas Instruments AR7 System-on-a-Chip family: TNETD7100, 7200 and 7300. @@ -88,6 +89,7 @@ config ATH79 select CEVT_R4K select CSRC_R4K select DMA_NONCOHERENT + select HAVE_CLK select IRQ_CPU select MIPS_MACHINE select SYS_HAS_CPU_MIPS32_R2 @@ -124,6 +126,7 @@ config BCM63XX select SYS_HAS_EARLY_PRINTK select SWAP_IO_SPACE select ARCH_REQUIRE_GPIOLIB + select HAVE_CLK help Support for BCM63XX based boards diff --git a/trunk/arch/mips/alchemy/board-mtx1.c b/trunk/arch/mips/alchemy/board-mtx1.c index 99969484c475..a124c251c0c9 100644 --- a/trunk/arch/mips/alchemy/board-mtx1.c +++ b/trunk/arch/mips/alchemy/board-mtx1.c @@ -228,6 +228,8 @@ static int mtx1_pci_idsel(unsigned int devsel, int assert) * adapter on the mtx-1 "singleboard" variant. It triggers a custom * logic chip connected to EXT_IO3 (GPIO1) to suppress IDSEL signals. */ + udelay(1); + if (assert && devsel != 0) /* Suppress signal to Cardbus */ alchemy_gpio_set_value(1, 0); /* set EXT_IO3 OFF */ diff --git a/trunk/arch/mips/ath79/dev-usb.c b/trunk/arch/mips/ath79/dev-usb.c index 36e9570e7bc4..b2a2311ec85b 100644 --- a/trunk/arch/mips/ath79/dev-usb.c +++ b/trunk/arch/mips/ath79/dev-usb.c @@ -145,6 +145,8 @@ static void __init ar7240_usb_setup(void) ath79_ohci_resources[0].start = AR7240_OHCI_BASE; ath79_ohci_resources[0].end = AR7240_OHCI_BASE + AR7240_OHCI_SIZE - 1; + ath79_ohci_resources[1].start = ATH79_CPU_IRQ_USB; + ath79_ohci_resources[1].end = ATH79_CPU_IRQ_USB; platform_device_register(&ath79_ohci_device); } diff --git a/trunk/arch/mips/ath79/gpio.c b/trunk/arch/mips/ath79/gpio.c index 29054f211832..48fe762d2526 100644 --- a/trunk/arch/mips/ath79/gpio.c +++ b/trunk/arch/mips/ath79/gpio.c @@ -188,8 +188,10 @@ void __init ath79_gpio_init(void) if (soc_is_ar71xx()) ath79_gpio_count = AR71XX_GPIO_COUNT; - else if (soc_is_ar724x()) - ath79_gpio_count = AR724X_GPIO_COUNT; + else if (soc_is_ar7240()) + ath79_gpio_count = AR7240_GPIO_COUNT; + else if (soc_is_ar7241() || soc_is_ar7242()) + ath79_gpio_count = AR7241_GPIO_COUNT; else if (soc_is_ar913x()) ath79_gpio_count = AR913X_GPIO_COUNT; else if (soc_is_ar933x()) diff --git a/trunk/arch/mips/bcm63xx/dev-spi.c b/trunk/arch/mips/bcm63xx/dev-spi.c index e39f73048d4f..f1c9c3e2f678 100644 --- a/trunk/arch/mips/bcm63xx/dev-spi.c +++ b/trunk/arch/mips/bcm63xx/dev-spi.c @@ -106,11 +106,15 @@ int __init bcm63xx_spi_register(void) if (BCMCPU_IS_6338() || BCMCPU_IS_6348()) { spi_resources[0].end += BCM_6338_RSET_SPI_SIZE - 1; spi_pdata.fifo_size = SPI_6338_MSG_DATA_SIZE; + spi_pdata.msg_type_shift = SPI_6338_MSG_TYPE_SHIFT; + spi_pdata.msg_ctl_width = SPI_6338_MSG_CTL_WIDTH; } if (BCMCPU_IS_6358() || BCMCPU_IS_6368()) { spi_resources[0].end += BCM_6358_RSET_SPI_SIZE - 1; spi_pdata.fifo_size = SPI_6358_MSG_DATA_SIZE; + spi_pdata.msg_type_shift = SPI_6358_MSG_TYPE_SHIFT; + spi_pdata.msg_ctl_width = SPI_6358_MSG_CTL_WIDTH; } bcm63xx_spi_regs_init(); diff --git a/trunk/arch/mips/cavium-octeon/octeon-irq.c b/trunk/arch/mips/cavium-octeon/octeon-irq.c index 7fb1f222b8a5..274cd4fad30c 100644 --- a/trunk/arch/mips/cavium-octeon/octeon-irq.c +++ b/trunk/arch/mips/cavium-octeon/octeon-irq.c @@ -61,6 +61,12 @@ static void octeon_irq_set_ciu_mapping(int irq, int line, int bit, octeon_irq_ciu_to_irq[line][bit] = irq; } +static void octeon_irq_force_ciu_mapping(struct irq_domain *domain, + int irq, int line, int bit) +{ + irq_domain_associate(domain, irq, line << 6 | bit); +} + static int octeon_coreid_for_cpu(int cpu) { #ifdef CONFIG_SMP @@ -183,19 +189,9 @@ static void __init octeon_irq_init_core(void) mutex_init(&cd->core_irq_mutex); irq = OCTEON_IRQ_SW0 + i; - switch (irq) { - case OCTEON_IRQ_TIMER: - case OCTEON_IRQ_SW0: - case OCTEON_IRQ_SW1: - case OCTEON_IRQ_5: - case OCTEON_IRQ_PERF: - irq_set_chip_data(irq, cd); - irq_set_chip_and_handler(irq, &octeon_irq_chip_core, - handle_percpu_irq); - break; - default: - break; - } + irq_set_chip_data(irq, cd); + irq_set_chip_and_handler(irq, &octeon_irq_chip_core, + handle_percpu_irq); } } @@ -890,7 +886,6 @@ static int octeon_irq_gpio_xlat(struct irq_domain *d, unsigned int type; unsigned int pin; unsigned int trigger; - struct octeon_irq_gpio_domain_data *gpiod; if (d->of_node != node) return -EINVAL; @@ -925,8 +920,7 @@ static int octeon_irq_gpio_xlat(struct irq_domain *d, break; } *out_type = type; - gpiod = d->host_data; - *out_hwirq = gpiod->base_hwirq + pin; + *out_hwirq = pin; return 0; } @@ -996,19 +990,21 @@ static int octeon_irq_ciu_map(struct irq_domain *d, static int octeon_irq_gpio_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw) { - unsigned int line = hw >> 6; - unsigned int bit = hw & 63; + struct octeon_irq_gpio_domain_data *gpiod = d->host_data; + unsigned int line, bit; if (!octeon_irq_virq_in_range(virq)) return -EINVAL; + hw += gpiod->base_hwirq; + line = hw >> 6; + bit = hw & 63; if (line > 1 || octeon_irq_ciu_to_irq[line][bit] != 0) return -EINVAL; octeon_irq_set_ciu_mapping(virq, line, bit, octeon_irq_gpio_chip, octeon_irq_handle_gpio); - return 0; } @@ -1149,6 +1145,7 @@ static void __init octeon_irq_init_ciu(void) struct irq_chip *chip_wd; struct device_node *gpio_node; struct device_node *ciu_node; + struct irq_domain *ciu_domain = NULL; octeon_irq_init_ciu_percpu(); octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu; @@ -1177,31 +1174,6 @@ static void __init octeon_irq_init_ciu(void) /* Mips internal */ octeon_irq_init_core(); - /* CIU_0 */ - for (i = 0; i < 16; i++) - octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WORKQ0, 0, i + 0, chip, handle_level_irq); - - octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX0, 0, 32, chip_mbox, handle_percpu_irq); - octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX1, 0, 33, chip_mbox, handle_percpu_irq); - - for (i = 0; i < 4; i++) - octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_PCI_INT0, 0, i + 36, chip, handle_level_irq); - for (i = 0; i < 4; i++) - octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_PCI_MSI0, 0, i + 40, chip, handle_level_irq); - - octeon_irq_set_ciu_mapping(OCTEON_IRQ_RML, 0, 46, chip, handle_level_irq); - for (i = 0; i < 4; i++) - octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_TIMER0, 0, i + 52, chip, handle_edge_irq); - - octeon_irq_set_ciu_mapping(OCTEON_IRQ_USB0, 0, 56, chip, handle_level_irq); - octeon_irq_set_ciu_mapping(OCTEON_IRQ_BOOTDMA, 0, 63, chip, handle_level_irq); - - /* CIU_1 */ - for (i = 0; i < 16; i++) - octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i + 0, chip_wd, handle_level_irq); - - octeon_irq_set_ciu_mapping(OCTEON_IRQ_USB1, 1, 17, chip, handle_level_irq); - gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio"); if (gpio_node) { struct octeon_irq_gpio_domain_data *gpiod; @@ -1219,10 +1191,35 @@ static void __init octeon_irq_init_ciu(void) ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-ciu"); if (ciu_node) { - irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu_ops, NULL); + ciu_domain = irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu_ops, NULL); of_node_put(ciu_node); } else - pr_warn("Cannot find device node for cavium,octeon-3860-ciu.\n"); + panic("Cannot find device node for cavium,octeon-3860-ciu."); + + /* CIU_0 */ + for (i = 0; i < 16; i++) + octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0); + + octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX0, 0, 32, chip_mbox, handle_percpu_irq); + octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX1, 0, 33, chip_mbox, handle_percpu_irq); + + for (i = 0; i < 4; i++) + octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36); + for (i = 0; i < 4; i++) + octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40); + + octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46); + for (i = 0; i < 4; i++) + octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52); + + octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56); + octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_BOOTDMA, 0, 63); + + /* CIU_1 */ + for (i = 0; i < 16; i++) + octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i + 0, chip_wd, handle_level_irq); + + octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17); /* Enable the CIU lines */ set_c0_status(STATUSF_IP3 | STATUSF_IP2); diff --git a/trunk/arch/mips/include/asm/clock.h b/trunk/arch/mips/include/asm/clock.h index 83894aa7932c..c9456e7a7283 100644 --- a/trunk/arch/mips/include/asm/clock.h +++ b/trunk/arch/mips/include/asm/clock.h @@ -50,15 +50,4 @@ void clk_recalc_rate(struct clk *); int clk_register(struct clk *); void clk_unregister(struct clk *); -/* the exported API, in addition to clk_set_rate */ -/** - * clk_set_rate_ex - set the clock rate for a clock source, with additional parameter - * @clk: clock source - * @rate: desired clock rate in Hz - * @algo_id: algorithm id to be passed down to ops->set_rate - * - * Returns success (0) or negative errno. - */ -int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id); - #endif /* __ASM_MIPS_CLOCK_H */ diff --git a/trunk/arch/mips/include/asm/mach-ath79/ar71xx_regs.h b/trunk/arch/mips/include/asm/mach-ath79/ar71xx_regs.h index 1caa78ad06d5..dde504477fac 100644 --- a/trunk/arch/mips/include/asm/mach-ath79/ar71xx_regs.h +++ b/trunk/arch/mips/include/asm/mach-ath79/ar71xx_regs.h @@ -393,7 +393,8 @@ #define AR71XX_GPIO_REG_FUNC 0x28 #define AR71XX_GPIO_COUNT 16 -#define AR724X_GPIO_COUNT 18 +#define AR7240_GPIO_COUNT 18 +#define AR7241_GPIO_COUNT 20 #define AR913X_GPIO_COUNT 22 #define AR933X_GPIO_COUNT 30 #define AR934X_GPIO_COUNT 23 diff --git a/trunk/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h b/trunk/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h index 4476fa03bf36..6ddae926bf79 100644 --- a/trunk/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h +++ b/trunk/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h @@ -42,7 +42,6 @@ #define cpu_has_mips64r1 0 #define cpu_has_mips64r2 0 -#define cpu_has_dsp 0 #define cpu_has_mipsmt 0 #define cpu_has_64bits 0 diff --git a/trunk/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h b/trunk/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h index 7d98dbe5d4b5..c9bae1362606 100644 --- a/trunk/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h +++ b/trunk/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h @@ -9,6 +9,8 @@ int __init bcm63xx_spi_register(void); struct bcm63xx_spi_pdata { unsigned int fifo_size; + unsigned int msg_type_shift; + unsigned int msg_ctl_width; int bus_num; int num_chipselect; u32 speed_hz; diff --git a/trunk/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h b/trunk/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h index 4ccc2a748aff..61f2a2a5099d 100644 --- a/trunk/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h +++ b/trunk/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h @@ -1054,7 +1054,8 @@ #define SPI_6338_FILL_BYTE 0x07 #define SPI_6338_MSG_TAIL 0x09 #define SPI_6338_RX_TAIL 0x0b -#define SPI_6338_MSG_CTL 0x40 +#define SPI_6338_MSG_CTL 0x40 /* 8-bits register */ +#define SPI_6338_MSG_CTL_WIDTH 8 #define SPI_6338_MSG_DATA 0x41 #define SPI_6338_MSG_DATA_SIZE 0x3f #define SPI_6338_RX_DATA 0x80 @@ -1070,7 +1071,8 @@ #define SPI_6348_FILL_BYTE 0x07 #define SPI_6348_MSG_TAIL 0x09 #define SPI_6348_RX_TAIL 0x0b -#define SPI_6348_MSG_CTL 0x40 +#define SPI_6348_MSG_CTL 0x40 /* 8-bits register */ +#define SPI_6348_MSG_CTL_WIDTH 8 #define SPI_6348_MSG_DATA 0x41 #define SPI_6348_MSG_DATA_SIZE 0x3f #define SPI_6348_RX_DATA 0x80 @@ -1078,6 +1080,7 @@ /* BCM 6358 SPI core */ #define SPI_6358_MSG_CTL 0x00 /* 16-bits register */ +#define SPI_6358_MSG_CTL_WIDTH 16 #define SPI_6358_MSG_DATA 0x02 #define SPI_6358_MSG_DATA_SIZE 0x21e #define SPI_6358_RX_DATA 0x400 @@ -1094,6 +1097,7 @@ /* BCM 6358 SPI core */ #define SPI_6368_MSG_CTL 0x00 /* 16-bits register */ +#define SPI_6368_MSG_CTL_WIDTH 16 #define SPI_6368_MSG_DATA 0x02 #define SPI_6368_MSG_DATA_SIZE 0x21e #define SPI_6368_RX_DATA 0x400 @@ -1115,7 +1119,10 @@ #define SPI_HD_W 0x01 #define SPI_HD_R 0x02 #define SPI_BYTE_CNT_SHIFT 0 -#define SPI_MSG_TYPE_SHIFT 14 +#define SPI_6338_MSG_TYPE_SHIFT 6 +#define SPI_6348_MSG_TYPE_SHIFT 6 +#define SPI_6358_MSG_TYPE_SHIFT 14 +#define SPI_6368_MSG_TYPE_SHIFT 14 /* Command */ #define SPI_CMD_NOOP 0x00 diff --git a/trunk/arch/mips/include/asm/mach-cavium-octeon/irq.h b/trunk/arch/mips/include/asm/mach-cavium-octeon/irq.h index 418992042f6f..c22a3078bf11 100644 --- a/trunk/arch/mips/include/asm/mach-cavium-octeon/irq.h +++ b/trunk/arch/mips/include/asm/mach-cavium-octeon/irq.h @@ -21,14 +21,10 @@ enum octeon_irq { OCTEON_IRQ_TIMER, /* sources in CIU_INTX_EN0 */ OCTEON_IRQ_WORKQ0, - OCTEON_IRQ_GPIO0 = OCTEON_IRQ_WORKQ0 + 16, - OCTEON_IRQ_WDOG0 = OCTEON_IRQ_GPIO0 + 16, + OCTEON_IRQ_WDOG0 = OCTEON_IRQ_WORKQ0 + 16, OCTEON_IRQ_WDOG15 = OCTEON_IRQ_WDOG0 + 15, OCTEON_IRQ_MBOX0 = OCTEON_IRQ_WDOG0 + 16, OCTEON_IRQ_MBOX1, - OCTEON_IRQ_UART0, - OCTEON_IRQ_UART1, - OCTEON_IRQ_UART2, OCTEON_IRQ_PCI_INT0, OCTEON_IRQ_PCI_INT1, OCTEON_IRQ_PCI_INT2, @@ -38,8 +34,6 @@ enum octeon_irq { OCTEON_IRQ_PCI_MSI2, OCTEON_IRQ_PCI_MSI3, - OCTEON_IRQ_TWSI, - OCTEON_IRQ_TWSI2, OCTEON_IRQ_RML, OCTEON_IRQ_TIMER0, OCTEON_IRQ_TIMER1, @@ -47,8 +41,6 @@ enum octeon_irq { OCTEON_IRQ_TIMER3, OCTEON_IRQ_USB0, OCTEON_IRQ_USB1, - OCTEON_IRQ_MII0, - OCTEON_IRQ_MII1, OCTEON_IRQ_BOOTDMA, #ifndef CONFIG_PCI_MSI OCTEON_IRQ_LAST = 127 diff --git a/trunk/arch/mips/include/asm/mach-loongson/loongson.h b/trunk/arch/mips/include/asm/mach-loongson/loongson.h index 06367c37e1b2..5222a007bc21 100644 --- a/trunk/arch/mips/include/asm/mach-loongson/loongson.h +++ b/trunk/arch/mips/include/asm/mach-loongson/loongson.h @@ -245,7 +245,6 @@ static inline void do_perfcnt_IRQ(void) #ifdef CONFIG_CPU_SUPPORTS_CPUFREQ #include -extern void loongson2_cpu_wait(void); extern struct cpufreq_frequency_table loongson2_clockmod_table[]; /* Chip Config */ diff --git a/trunk/arch/mips/include/asm/module.h b/trunk/arch/mips/include/asm/module.h index 7531ecd654d6..dca8bce8c7ab 100644 --- a/trunk/arch/mips/include/asm/module.h +++ b/trunk/arch/mips/include/asm/module.h @@ -10,6 +10,7 @@ struct mod_arch_specific { struct list_head dbe_list; const struct exception_table_entry *dbe_start; const struct exception_table_entry *dbe_end; + struct mips_hi16 *r_mips_hi16_list; }; typedef uint8_t Elf64_Byte; /* Type for a 8-bit quantity. */ diff --git a/trunk/arch/mips/include/asm/r4k-timer.h b/trunk/arch/mips/include/asm/r4k-timer.h index a37d12b3b61c..afe9e0e03fe9 100644 --- a/trunk/arch/mips/include/asm/r4k-timer.h +++ b/trunk/arch/mips/include/asm/r4k-timer.h @@ -12,16 +12,16 @@ #ifdef CONFIG_SYNC_R4K -extern void synchronise_count_master(void); -extern void synchronise_count_slave(void); +extern void synchronise_count_master(int cpu); +extern void synchronise_count_slave(int cpu); #else -static inline void synchronise_count_master(void) +static inline void synchronise_count_master(int cpu) { } -static inline void synchronise_count_slave(void) +static inline void synchronise_count_slave(int cpu) { } diff --git a/trunk/arch/mips/kernel/cpufreq/Makefile b/trunk/arch/mips/kernel/cpufreq/Makefile index c3479a432efe..05a5715ee38c 100644 --- a/trunk/arch/mips/kernel/cpufreq/Makefile +++ b/trunk/arch/mips/kernel/cpufreq/Makefile @@ -2,4 +2,4 @@ # Makefile for the Linux/MIPS cpufreq. # -obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o loongson2_clock.o +obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o diff --git a/trunk/arch/mips/kernel/cpufreq/loongson2_cpufreq.c b/trunk/arch/mips/kernel/cpufreq/loongson2_cpufreq.c index ae5db206347c..e7c98e2b78b6 100644 --- a/trunk/arch/mips/kernel/cpufreq/loongson2_cpufreq.c +++ b/trunk/arch/mips/kernel/cpufreq/loongson2_cpufreq.c @@ -19,7 +19,7 @@ #include -#include +#include static uint nowait; @@ -181,6 +181,25 @@ static struct platform_driver platform_driver = { .id_table = platform_device_ids, }; +/* + * This is the simple version of Loongson-2 wait, Maybe we need do this in + * interrupt disabled context. + */ + +static DEFINE_SPINLOCK(loongson2_wait_lock); + +static void loongson2_cpu_wait(void) +{ + unsigned long flags; + u32 cpu_freq; + + spin_lock_irqsave(&loongson2_wait_lock, flags); + cpu_freq = LOONGSON_CHIPCFG0; + LOONGSON_CHIPCFG0 &= ~0x7; /* Put CPU into wait mode */ + LOONGSON_CHIPCFG0 = cpu_freq; /* Restore CPU state */ + spin_unlock_irqrestore(&loongson2_wait_lock, flags); +} + static int __init cpufreq_init(void) { int ret; diff --git a/trunk/arch/mips/kernel/module.c b/trunk/arch/mips/kernel/module.c index a5066b1c3de3..4f8c3cba8c0c 100644 --- a/trunk/arch/mips/kernel/module.c +++ b/trunk/arch/mips/kernel/module.c @@ -39,8 +39,6 @@ struct mips_hi16 { Elf_Addr value; }; -static struct mips_hi16 *mips_hi16_list; - static LIST_HEAD(dbe_list); static DEFINE_SPINLOCK(dbe_lock); @@ -128,8 +126,8 @@ static int apply_r_mips_hi16_rel(struct module *me, u32 *location, Elf_Addr v) n->addr = (Elf_Addr *)location; n->value = v; - n->next = mips_hi16_list; - mips_hi16_list = n; + n->next = me->arch.r_mips_hi16_list; + me->arch.r_mips_hi16_list = n; return 0; } @@ -142,18 +140,28 @@ static int apply_r_mips_hi16_rela(struct module *me, u32 *location, Elf_Addr v) return 0; } +static void free_relocation_chain(struct mips_hi16 *l) +{ + struct mips_hi16 *next; + + while (l) { + next = l->next; + kfree(l); + l = next; + } +} + static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v) { unsigned long insnlo = *location; + struct mips_hi16 *l; Elf_Addr val, vallo; /* Sign extend the addend we extract from the lo insn. */ vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000; - if (mips_hi16_list != NULL) { - struct mips_hi16 *l; - - l = mips_hi16_list; + if (me->arch.r_mips_hi16_list != NULL) { + l = me->arch.r_mips_hi16_list; while (l != NULL) { struct mips_hi16 *next; unsigned long insn; @@ -188,7 +196,7 @@ static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v) l = next; } - mips_hi16_list = NULL; + me->arch.r_mips_hi16_list = NULL; } /* @@ -201,6 +209,9 @@ static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v) return 0; out_danger: + free_relocation_chain(l); + me->arch.r_mips_hi16_list = NULL; + pr_err("module %s: dangerous R_MIPS_LO16 REL relocation\n", me->name); return -ENOEXEC; @@ -273,6 +284,7 @@ int apply_relocate(Elf_Shdr *sechdrs, const char *strtab, pr_debug("Applying relocate section %u to %u\n", relsec, sechdrs[relsec].sh_info); + me->arch.r_mips_hi16_list = NULL; for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { /* This is where to make the change */ location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr @@ -296,6 +308,19 @@ int apply_relocate(Elf_Shdr *sechdrs, const char *strtab, return res; } + /* + * Normally the hi16 list should be deallocated at this point. A + * malformed binary however could contain a series of R_MIPS_HI16 + * relocations not followed by a R_MIPS_LO16 relocation. In that + * case, free up the list and return an error. + */ + if (me->arch.r_mips_hi16_list) { + free_relocation_chain(me->arch.r_mips_hi16_list); + me->arch.r_mips_hi16_list = NULL; + + return -ENOEXEC; + } + return 0; } diff --git a/trunk/arch/mips/kernel/smp.c b/trunk/arch/mips/kernel/smp.c index 31637d8c8738..9005bf9fb859 100644 --- a/trunk/arch/mips/kernel/smp.c +++ b/trunk/arch/mips/kernel/smp.c @@ -130,7 +130,7 @@ asmlinkage __cpuinit void start_secondary(void) cpu_set(cpu, cpu_callin_map); - synchronise_count_slave(); + synchronise_count_slave(cpu); /* * irq will be enabled in ->smp_finish(), enabling it too early @@ -173,7 +173,6 @@ void smp_send_stop(void) void __init smp_cpus_done(unsigned int max_cpus) { mp_ops->cpus_done(); - synchronise_count_master(); } /* called from main before smp_init() */ @@ -206,6 +205,7 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) while (!cpu_isset(cpu, cpu_callin_map)) udelay(100); + synchronise_count_master(cpu); return 0; } diff --git a/trunk/arch/mips/kernel/sync-r4k.c b/trunk/arch/mips/kernel/sync-r4k.c index 842d55e411fd..7f1eca3858de 100644 --- a/trunk/arch/mips/kernel/sync-r4k.c +++ b/trunk/arch/mips/kernel/sync-r4k.c @@ -28,12 +28,11 @@ static atomic_t __cpuinitdata count_reference = ATOMIC_INIT(0); #define COUNTON 100 #define NR_LOOPS 5 -void __cpuinit synchronise_count_master(void) +void __cpuinit synchronise_count_master(int cpu) { int i; unsigned long flags; unsigned int initcount; - int nslaves; #ifdef CONFIG_MIPS_MT_SMTC /* @@ -43,8 +42,7 @@ void __cpuinit synchronise_count_master(void) return; #endif - printk(KERN_INFO "Synchronize counters across %u CPUs: ", - num_online_cpus()); + printk(KERN_INFO "Synchronize counters for CPU %u: ", cpu); local_irq_save(flags); @@ -52,7 +50,7 @@ void __cpuinit synchronise_count_master(void) * Notify the slaves that it's time to start */ atomic_set(&count_reference, read_c0_count()); - atomic_set(&count_start_flag, 1); + atomic_set(&count_start_flag, cpu); smp_wmb(); /* Count will be initialised to current timer for all CPU's */ @@ -69,10 +67,9 @@ void __cpuinit synchronise_count_master(void) * two CPUs. */ - nslaves = num_online_cpus()-1; for (i = 0; i < NR_LOOPS; i++) { - /* slaves loop on '!= ncpus' */ - while (atomic_read(&count_count_start) != nslaves) + /* slaves loop on '!= 2' */ + while (atomic_read(&count_count_start) != 1) mb(); atomic_set(&count_count_stop, 0); smp_wmb(); @@ -89,7 +86,7 @@ void __cpuinit synchronise_count_master(void) /* * Wait for all slaves to leave the synchronization point: */ - while (atomic_read(&count_count_stop) != nslaves) + while (atomic_read(&count_count_stop) != 1) mb(); atomic_set(&count_count_start, 0); smp_wmb(); @@ -97,6 +94,7 @@ void __cpuinit synchronise_count_master(void) } /* Arrange for an interrupt in a short while */ write_c0_compare(read_c0_count() + COUNTON); + atomic_set(&count_start_flag, 0); local_irq_restore(flags); @@ -108,11 +106,10 @@ void __cpuinit synchronise_count_master(void) printk("done.\n"); } -void __cpuinit synchronise_count_slave(void) +void __cpuinit synchronise_count_slave(int cpu) { int i; unsigned int initcount; - int ncpus; #ifdef CONFIG_MIPS_MT_SMTC /* @@ -127,16 +124,15 @@ void __cpuinit synchronise_count_slave(void) * so we first wait for the master to say everyone is ready */ - while (!atomic_read(&count_start_flag)) + while (atomic_read(&count_start_flag) != cpu) mb(); /* Count will be initialised to next expire for all CPU's */ initcount = atomic_read(&count_reference); - ncpus = num_online_cpus(); for (i = 0; i < NR_LOOPS; i++) { atomic_inc(&count_count_start); - while (atomic_read(&count_count_start) != ncpus) + while (atomic_read(&count_count_start) != 2) mb(); /* @@ -146,7 +142,7 @@ void __cpuinit synchronise_count_slave(void) write_c0_count(initcount); atomic_inc(&count_count_stop); - while (atomic_read(&count_count_stop) != ncpus) + while (atomic_read(&count_count_stop) != 2) mb(); } /* Arrange for an interrupt in a short while */ diff --git a/trunk/arch/mips/lantiq/clk.c b/trunk/arch/mips/lantiq/clk.c index d3bcc33f4699..ce2f129b081f 100644 --- a/trunk/arch/mips/lantiq/clk.c +++ b/trunk/arch/mips/lantiq/clk.c @@ -135,6 +135,11 @@ void clk_deactivate(struct clk *clk) } EXPORT_SYMBOL(clk_deactivate); +struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec) +{ + return NULL; +} + static inline u32 get_counter_resolution(void) { u32 res; diff --git a/trunk/arch/mips/lantiq/prom.c b/trunk/arch/mips/lantiq/prom.c index d185e8477fdf..6cfd6117fbfd 100644 --- a/trunk/arch/mips/lantiq/prom.c +++ b/trunk/arch/mips/lantiq/prom.c @@ -8,7 +8,10 @@ #include #include +#include #include +#include + #include #include @@ -70,6 +73,25 @@ void __init plat_mem_setup(void) __dt_setup_arch(&__dtb_start); } +void __init device_tree_init(void) +{ + unsigned long base, size; + + if (!initial_boot_params) + return; + + base = virt_to_phys((void *)initial_boot_params); + size = be32_to_cpu(initial_boot_params->totalsize); + + /* Before we do anything, lets reserve the dt blob */ + reserve_bootmem(base, size, BOOTMEM_DEFAULT); + + unflatten_device_tree(); + + /* free the space reserved for the dt blob */ + free_bootmem(base, size); +} + void __init prom_init(void) { /* call the soc specific detetcion code and get it to fill soc_info */ diff --git a/trunk/arch/mips/lantiq/xway/sysctrl.c b/trunk/arch/mips/lantiq/xway/sysctrl.c index 83780f7c842b..befbb760ab76 100644 --- a/trunk/arch/mips/lantiq/xway/sysctrl.c +++ b/trunk/arch/mips/lantiq/xway/sysctrl.c @@ -20,10 +20,12 @@ /* clock control register */ #define CGU_IFCCR 0x0018 +#define CGU_IFCCR_VR9 0x0024 /* system clock register */ #define CGU_SYS 0x0010 /* pci control register */ #define CGU_PCICR 0x0034 +#define CGU_PCICR_VR9 0x0038 /* ephy configuration register */ #define CGU_EPHY 0x10 /* power control register */ @@ -80,6 +82,9 @@ static void __iomem *pmu_membase; void __iomem *ltq_cgu_membase; void __iomem *ltq_ebu_membase; +static u32 ifccr = CGU_IFCCR; +static u32 pcicr = CGU_PCICR; + /* legacy function kept alive to ease clkdev transition */ void ltq_pmu_enable(unsigned int module) { @@ -103,14 +108,14 @@ EXPORT_SYMBOL(ltq_pmu_disable); /* enable a hw clock */ static int cgu_enable(struct clk *clk) { - ltq_cgu_w32(ltq_cgu_r32(CGU_IFCCR) | clk->bits, CGU_IFCCR); + ltq_cgu_w32(ltq_cgu_r32(ifccr) | clk->bits, ifccr); return 0; } /* disable a hw clock */ static void cgu_disable(struct clk *clk) { - ltq_cgu_w32(ltq_cgu_r32(CGU_IFCCR) & ~clk->bits, CGU_IFCCR); + ltq_cgu_w32(ltq_cgu_r32(ifccr) & ~clk->bits, ifccr); } /* enable a clock gate */ @@ -138,22 +143,22 @@ static void pmu_disable(struct clk *clk) /* the pci enable helper */ static int pci_enable(struct clk *clk) { - unsigned int ifccr = ltq_cgu_r32(CGU_IFCCR); + unsigned int val = ltq_cgu_r32(ifccr); /* set bus clock speed */ if (of_machine_is_compatible("lantiq,ar9")) { - ifccr &= ~0x1f00000; + val &= ~0x1f00000; if (clk->rate == CLOCK_33M) - ifccr |= 0xe00000; + val |= 0xe00000; else - ifccr |= 0x700000; /* 62.5M */ + val |= 0x700000; /* 62.5M */ } else { - ifccr &= ~0xf00000; + val &= ~0xf00000; if (clk->rate == CLOCK_33M) - ifccr |= 0x800000; + val |= 0x800000; else - ifccr |= 0x400000; /* 62.5M */ + val |= 0x400000; /* 62.5M */ } - ltq_cgu_w32(ifccr, CGU_IFCCR); + ltq_cgu_w32(val, ifccr); pmu_enable(clk); return 0; } @@ -161,18 +166,16 @@ static int pci_enable(struct clk *clk) /* enable the external clock as a source */ static int pci_ext_enable(struct clk *clk) { - ltq_cgu_w32(ltq_cgu_r32(CGU_IFCCR) & ~(1 << 16), - CGU_IFCCR); - ltq_cgu_w32((1 << 30), CGU_PCICR); + ltq_cgu_w32(ltq_cgu_r32(ifccr) & ~(1 << 16), ifccr); + ltq_cgu_w32((1 << 30), pcicr); return 0; } /* disable the external clock as a source */ static void pci_ext_disable(struct clk *clk) { - ltq_cgu_w32(ltq_cgu_r32(CGU_IFCCR) | (1 << 16), - CGU_IFCCR); - ltq_cgu_w32((1 << 31) | (1 << 30), CGU_PCICR); + ltq_cgu_w32(ltq_cgu_r32(ifccr) | (1 << 16), ifccr); + ltq_cgu_w32((1 << 31) | (1 << 30), pcicr); } /* enable a clockout source */ @@ -184,11 +187,11 @@ static int clkout_enable(struct clk *clk) for (i = 0; i < 4; i++) { if (clk->rates[i] == clk->rate) { int shift = 14 - (2 * clk->module); - unsigned int ifccr = ltq_cgu_r32(CGU_IFCCR); + unsigned int val = ltq_cgu_r32(ifccr); - ifccr &= ~(3 << shift); - ifccr |= i << shift; - ltq_cgu_w32(ifccr, CGU_IFCCR); + val &= ~(3 << shift); + val |= i << shift; + ltq_cgu_w32(val, ifccr); return 0; } } @@ -336,8 +339,12 @@ void __init ltq_soc_init(void) clkdev_add_clkout(); /* add the soc dependent clocks */ - if (!of_machine_is_compatible("lantiq,vr9")) + if (of_machine_is_compatible("lantiq,vr9")) { + ifccr = CGU_IFCCR_VR9; + pcicr = CGU_PCICR_VR9; + } else { clkdev_add_pmu("1e180000.etop", NULL, 0, PMU_PPE); + } if (!of_machine_is_compatible("lantiq,ase")) { clkdev_add_pmu("1e100c00.serial", NULL, 0, PMU_ASC1); diff --git a/trunk/arch/mips/loongson/Kconfig b/trunk/arch/mips/loongson/Kconfig index aca93eed8779..263beb9322a8 100644 --- a/trunk/arch/mips/loongson/Kconfig +++ b/trunk/arch/mips/loongson/Kconfig @@ -41,6 +41,7 @@ config LEMOTE_MACH2F select CSRC_R4K if ! MIPS_EXTERNAL_TIMER select DMA_NONCOHERENT select GENERIC_ISA_DMA_SUPPORT_BROKEN + select HAVE_CLK select HW_HAS_PCI select I8259 select IRQ_CPU diff --git a/trunk/arch/mips/loongson/lemote-2f/Makefile b/trunk/arch/mips/loongson/lemote-2f/Makefile index 8699a53f0477..4f9eaa328a16 100644 --- a/trunk/arch/mips/loongson/lemote-2f/Makefile +++ b/trunk/arch/mips/loongson/lemote-2f/Makefile @@ -2,7 +2,7 @@ # Makefile for lemote loongson2f family machines # -obj-y += machtype.o irq.o reset.o ec_kb3310b.o +obj-y += clock.o machtype.o irq.o reset.o ec_kb3310b.o # # Suspend Support diff --git a/trunk/arch/mips/kernel/cpufreq/loongson2_clock.c b/trunk/arch/mips/loongson/lemote-2f/clock.c similarity index 76% rename from trunk/arch/mips/kernel/cpufreq/loongson2_clock.c rename to trunk/arch/mips/loongson/lemote-2f/clock.c index 5426779d9fdb..bc739d4bab2e 100644 --- a/trunk/arch/mips/kernel/cpufreq/loongson2_clock.c +++ b/trunk/arch/mips/loongson/lemote-2f/clock.c @@ -6,14 +6,17 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. */ - -#include +#include #include -#include +#include +#include +#include +#include +#include +#include #include - -#include +#include static LIST_HEAD(clock_list); static DEFINE_SPINLOCK(clock_lock); @@ -88,12 +91,6 @@ void clk_put(struct clk *clk) EXPORT_SYMBOL(clk_put); int clk_set_rate(struct clk *clk, unsigned long rate) -{ - return clk_set_rate_ex(clk, rate, 0); -} -EXPORT_SYMBOL_GPL(clk_set_rate); - -int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id) { int ret = 0; int regval; @@ -103,7 +100,7 @@ int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id) unsigned long flags; spin_lock_irqsave(&clock_lock, flags); - ret = clk->ops->set_rate(clk, rate, algo_id); + ret = clk->ops->set_rate(clk, rate, 0); spin_unlock_irqrestore(&clock_lock, flags); } @@ -129,7 +126,7 @@ int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id) return ret; } -EXPORT_SYMBOL_GPL(clk_set_rate_ex); +EXPORT_SYMBOL_GPL(clk_set_rate); long clk_round_rate(struct clk *clk, unsigned long rate) { @@ -146,26 +143,3 @@ long clk_round_rate(struct clk *clk, unsigned long rate) return rate; } EXPORT_SYMBOL_GPL(clk_round_rate); - -/* - * This is the simple version of Loongson-2 wait, Maybe we need do this in - * interrupt disabled content - */ - -DEFINE_SPINLOCK(loongson2_wait_lock); -void loongson2_cpu_wait(void) -{ - u32 cpu_freq; - unsigned long flags; - - spin_lock_irqsave(&loongson2_wait_lock, flags); - cpu_freq = LOONGSON_CHIPCFG0; - LOONGSON_CHIPCFG0 &= ~0x7; /* Put CPU into wait mode */ - LOONGSON_CHIPCFG0 = cpu_freq; /* Restore CPU state */ - spin_unlock_irqrestore(&loongson2_wait_lock, flags); -} -EXPORT_SYMBOL_GPL(loongson2_cpu_wait); - -MODULE_AUTHOR("Yanhua "); -MODULE_DESCRIPTION("cpufreq driver for Loongson 2F"); -MODULE_LICENSE("GPL"); diff --git a/trunk/arch/mips/loongson1/Kconfig b/trunk/arch/mips/loongson1/Kconfig index 237fa214de9f..a9a14d6e81af 100644 --- a/trunk/arch/mips/loongson1/Kconfig +++ b/trunk/arch/mips/loongson1/Kconfig @@ -15,6 +15,7 @@ config LOONGSON1_LS1B select SYS_SUPPORTS_LITTLE_ENDIAN select SYS_SUPPORTS_HIGHMEM select SYS_HAS_EARLY_PRINTK + select HAVE_CLK endchoice diff --git a/trunk/arch/mips/loongson1/common/clock.c b/trunk/arch/mips/loongson1/common/clock.c index 2d98fb030596..1bbbbec12085 100644 --- a/trunk/arch/mips/loongson1/common/clock.c +++ b/trunk/arch/mips/loongson1/common/clock.c @@ -38,12 +38,28 @@ struct clk *clk_get(struct device *dev, const char *name) } EXPORT_SYMBOL(clk_get); +int clk_enable(struct clk *clk) +{ + return 0; +} +EXPORT_SYMBOL(clk_enable); + +void clk_disable(struct clk *clk) +{ +} +EXPORT_SYMBOL(clk_disable); + unsigned long clk_get_rate(struct clk *clk) { return clk->rate; } EXPORT_SYMBOL(clk_get_rate); +void clk_put(struct clk *clk) +{ +} +EXPORT_SYMBOL(clk_put); + static void pll_clk_init(struct clk *clk) { u32 pll; diff --git a/trunk/arch/mips/mti-malta/malta-pci.c b/trunk/arch/mips/mti-malta/malta-pci.c index 284dea54faf5..2147cb34e705 100644 --- a/trunk/arch/mips/mti-malta/malta-pci.c +++ b/trunk/arch/mips/mti-malta/malta-pci.c @@ -252,16 +252,3 @@ void __init mips_pcibios_init(void) register_pci_controller(controller); } - -/* Enable PCI 2.1 compatibility in PIIX4 */ -static void __devinit quirk_dlcsetup(struct pci_dev *dev) -{ - u8 odlc, ndlc; - (void) pci_read_config_byte(dev, 0x82, &odlc); - /* Enable passive releases and delayed transaction */ - ndlc = odlc | 7; - (void) pci_write_config_byte(dev, 0x82, ndlc); -} - -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_0, - quirk_dlcsetup); diff --git a/trunk/arch/mips/pci/pci-ar724x.c b/trunk/arch/mips/pci/pci-ar724x.c index 414a7459858d..86d77a666458 100644 --- a/trunk/arch/mips/pci/pci-ar724x.c +++ b/trunk/arch/mips/pci/pci-ar724x.c @@ -23,9 +23,12 @@ #define AR724X_PCI_MEM_BASE 0x10000000 #define AR724X_PCI_MEM_SIZE 0x08000000 +#define AR724X_PCI_REG_RESET 0x18 #define AR724X_PCI_REG_INT_STATUS 0x4c #define AR724X_PCI_REG_INT_MASK 0x50 +#define AR724X_PCI_RESET_LINK_UP BIT(0) + #define AR724X_PCI_INT_DEV0 BIT(14) #define AR724X_PCI_IRQ_COUNT 1 @@ -38,6 +41,15 @@ static void __iomem *ar724x_pci_ctrl_base; static u32 ar724x_pci_bar0_value; static bool ar724x_pci_bar0_is_cached; +static bool ar724x_pci_link_up; + +static inline bool ar724x_pci_check_link(void) +{ + u32 reset; + + reset = __raw_readl(ar724x_pci_ctrl_base + AR724X_PCI_REG_RESET); + return reset & AR724X_PCI_RESET_LINK_UP; +} static int ar724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where, int size, uint32_t *value) @@ -46,6 +58,9 @@ static int ar724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where, void __iomem *base; u32 data; + if (!ar724x_pci_link_up) + return PCIBIOS_DEVICE_NOT_FOUND; + if (devfn) return PCIBIOS_DEVICE_NOT_FOUND; @@ -96,6 +111,9 @@ static int ar724x_pci_write(struct pci_bus *bus, unsigned int devfn, int where, u32 data; int s; + if (!ar724x_pci_link_up) + return PCIBIOS_DEVICE_NOT_FOUND; + if (devfn) return PCIBIOS_DEVICE_NOT_FOUND; @@ -280,6 +298,10 @@ int __init ar724x_pcibios_init(int irq) if (ar724x_pci_ctrl_base == NULL) goto err_unmap_devcfg; + ar724x_pci_link_up = ar724x_pci_check_link(); + if (!ar724x_pci_link_up) + pr_warn("ar724x: PCIe link is down\n"); + ar724x_pci_irq_init(irq); register_pci_controller(&ar724x_pci_controller); diff --git a/trunk/arch/mips/txx9/Kconfig b/trunk/arch/mips/txx9/Kconfig index 852ae4bb7a85..6d40bc783459 100644 --- a/trunk/arch/mips/txx9/Kconfig +++ b/trunk/arch/mips/txx9/Kconfig @@ -20,6 +20,7 @@ config MACH_TXX9 select SYS_SUPPORTS_32BIT_KERNEL select SYS_SUPPORTS_LITTLE_ENDIAN select SYS_SUPPORTS_BIG_ENDIAN + select HAVE_CLK config TOSHIBA_JMR3927 bool "Toshiba JMR-TX3927 board" diff --git a/trunk/arch/parisc/include/asm/atomic.h b/trunk/arch/parisc/include/asm/atomic.h index 6c6defc24619..af9cf30ed474 100644 --- a/trunk/arch/parisc/include/asm/atomic.h +++ b/trunk/arch/parisc/include/asm/atomic.h @@ -141,7 +141,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) #define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0) -#define ATOMIC_INIT(i) ((atomic_t) { (i) }) +#define ATOMIC_INIT(i) { (i) } #define smp_mb__before_atomic_dec() smp_mb() #define smp_mb__after_atomic_dec() smp_mb() @@ -150,7 +150,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) #ifdef CONFIG_64BIT -#define ATOMIC64_INIT(i) ((atomic64_t) { (i) }) +#define ATOMIC64_INIT(i) { (i) } static __inline__ s64 __atomic64_add_return(s64 i, atomic64_t *v) diff --git a/trunk/arch/parisc/kernel/process.c b/trunk/arch/parisc/kernel/process.c index d4b94b395c16..2c05a9292a81 100644 --- a/trunk/arch/parisc/kernel/process.c +++ b/trunk/arch/parisc/kernel/process.c @@ -309,7 +309,7 @@ copy_thread(unsigned long clone_flags, unsigned long usp, cregs->ksp = (unsigned long)stack + (pregs->gr[21] & (THREAD_SIZE - 1)); cregs->gr[30] = usp; - if (p->personality == PER_HPUX) { + if (personality(p->personality) == PER_HPUX) { #ifdef CONFIG_HPUX cregs->kpc = (unsigned long) &hpux_child_return; #else diff --git a/trunk/arch/parisc/kernel/sys_parisc.c b/trunk/arch/parisc/kernel/sys_parisc.c index c9b932260f47..7426e40699bd 100644 --- a/trunk/arch/parisc/kernel/sys_parisc.c +++ b/trunk/arch/parisc/kernel/sys_parisc.c @@ -225,12 +225,12 @@ long parisc_personality(unsigned long personality) long err; if (personality(current->personality) == PER_LINUX32 - && personality == PER_LINUX) - personality = PER_LINUX32; + && personality(personality) == PER_LINUX) + personality = (personality & ~PER_MASK) | PER_LINUX32; err = sys_personality(personality); - if (err == PER_LINUX32) - err = PER_LINUX; + if (personality(err) == PER_LINUX32) + err = (err & ~PER_MASK) | PER_LINUX; return err; } diff --git a/trunk/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi b/trunk/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi index 8d35d2c1f694..4f9c9f682ecf 100644 --- a/trunk/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi +++ b/trunk/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi @@ -345,6 +345,13 @@ /include/ "qoriq-duart-1.dtsi" /include/ "qoriq-gpio-0.dtsi" /include/ "qoriq-usb2-mph-0.dtsi" + usb@210000 { + compatible = "fsl-usb2-mph-v1.6", "fsl,mpc85xx-usb2-mph", "fsl-usb2-mph"; + port0; + }; /include/ "qoriq-usb2-dr-0.dtsi" + usb@211000 { + compatible = "fsl-usb2-dr-v1.6", "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr"; + }; /include/ "qoriq-sec4.0-0.dtsi" }; diff --git a/trunk/arch/powerpc/configs/85xx/p1023rds_defconfig b/trunk/arch/powerpc/configs/85xx/p1023rds_defconfig index f4337bacd0e7..26e541c4662b 100644 --- a/trunk/arch/powerpc/configs/85xx/p1023rds_defconfig +++ b/trunk/arch/powerpc/configs/85xx/p1023rds_defconfig @@ -6,28 +6,27 @@ CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_AUDIT=y -CONFIG_SPARSE_IRQ=y +CONFIG_IRQ_DOMAIN_DEBUG=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_BLK_DEV_INITRD=y -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_KALLSYMS_ALL=y -CONFIG_KALLSYMS_EXTRA_PASS=y CONFIG_EMBEDDED=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_MODVERSIONS=y # CONFIG_BLK_DEV_BSG is not set +CONFIG_PARTITION_ADVANCED=y +CONFIG_MAC_PARTITION=y CONFIG_P1023_RDS=y CONFIG_QUICC_ENGINE=y CONFIG_QE_GPIO=y CONFIG_CPM2=y -CONFIG_GPIO_MPC8XXX=y CONFIG_HIGHMEM=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_MISC=m CONFIG_MATH_EMULATION=y @@ -63,11 +62,11 @@ CONFIG_INET_ESP=y CONFIG_IPV6=y CONFIG_IP_SCTP=m CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DEVTMPFS=y CONFIG_PROC_DEVICETREE=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=131072 -CONFIG_MISC_DEVICES=y CONFIG_EEPROM_LEGACY=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y @@ -80,15 +79,14 @@ CONFIG_SATA_FSL=y CONFIG_SATA_SIL24=y CONFIG_NETDEVICES=y CONFIG_DUMMY=y +CONFIG_FS_ENET=y +CONFIG_FSL_PQ_MDIO=y +CONFIG_E1000E=y CONFIG_MARVELL_PHY=y CONFIG_DAVICOM_PHY=y CONFIG_CICADA_PHY=y CONFIG_VITESSE_PHY=y CONFIG_FIXED_PHY=y -CONFIG_NET_ETHERNET=y -CONFIG_FS_ENET=y -CONFIG_E1000E=y -CONFIG_FSL_PQ_MDIO=y CONFIG_INPUT_FF_MEMLESS=m # CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set @@ -98,16 +96,15 @@ CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_NR_UARTS=2 CONFIG_SERIAL_8250_RUNTIME_UARTS=2 -CONFIG_SERIAL_8250_EXTENDED=y CONFIG_SERIAL_8250_MANY_PORTS=y CONFIG_SERIAL_8250_DETECT_IRQ=y CONFIG_SERIAL_8250_RSA=y CONFIG_SERIAL_QE=m -CONFIG_HW_RANDOM=y CONFIG_NVRAM=y CONFIG_I2C=y CONFIG_I2C_CPM=m CONFIG_I2C_MPC=y +CONFIG_GPIO_MPC8XXX=y # CONFIG_HWMON is not set CONFIG_VIDEO_OUTPUT_CONTROL=y CONFIG_SOUND=y @@ -123,7 +120,6 @@ CONFIG_DMADEVICES=y CONFIG_FSL_DMA=y # CONFIG_NET_DMA is not set CONFIG_STAGING=y -# CONFIG_STAGING_EXCLUDE_BUILD is not set CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set @@ -150,22 +146,15 @@ CONFIG_QNX4FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y -CONFIG_NFS_V3=y CONFIG_NFS_V4=y CONFIG_ROOT_NFS=y CONFIG_NFSD=y -CONFIG_PARTITION_ADVANCED=y -CONFIG_MAC_PARTITION=y CONFIG_CRC_T10DIF=y CONFIG_FRAME_WARN=8092 CONFIG_DEBUG_FS=y -CONFIG_DEBUG_KERNEL=y CONFIG_DETECT_HUNG_TASK=y # CONFIG_DEBUG_BUGVERBOSE is not set CONFIG_DEBUG_INFO=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set -CONFIG_SYSCTL_SYSCALL_CHECK=y -CONFIG_IRQ_DOMAIN_DEBUG=y CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_SHA256=y CONFIG_CRYPTO_SHA512=y diff --git a/trunk/arch/powerpc/configs/corenet32_smp_defconfig b/trunk/arch/powerpc/configs/corenet32_smp_defconfig index cbb98c1234fd..8b3d57c1ebe8 100644 --- a/trunk/arch/powerpc/configs/corenet32_smp_defconfig +++ b/trunk/arch/powerpc/configs/corenet32_smp_defconfig @@ -6,8 +6,8 @@ CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_AUDIT=y -CONFIG_SPARSE_IRQ=y -CONFIG_RCU_TRACE=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 @@ -21,23 +21,22 @@ CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_MODVERSIONS=y # CONFIG_BLK_DEV_BSG is not set +CONFIG_PARTITION_ADVANCED=y +CONFIG_MAC_PARTITION=y CONFIG_P2041_RDB=y CONFIG_P3041_DS=y CONFIG_P4080_DS=y CONFIG_P5020_DS=y CONFIG_HIGHMEM=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_MISC=m CONFIG_KEXEC=y CONFIG_IRQ_ALL_CPUS=y CONFIG_FORCE_MAX_ZONEORDER=13 -CONFIG_FSL_LBC=y CONFIG_PCI=y CONFIG_PCIEPORTBUS=y -CONFIG_PCI_MSI=y # CONFIG_PCIEASPM is not set +CONFIG_PCI_MSI=y CONFIG_RAPIDIO=y CONFIG_FSL_RIO=y CONFIG_NET=y @@ -70,6 +69,7 @@ CONFIG_INET_IPCOMP=y CONFIG_IPV6=y CONFIG_IP_SCTP=m CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DEVTMPFS=y CONFIG_MTD=y CONFIG_MTD_CMDLINE_PARTS=y CONFIG_MTD_CHAR=y @@ -77,17 +77,14 @@ CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y CONFIG_MTD_CFI_AMDSTD=y CONFIG_MTD_PHYSMAP_OF=y +CONFIG_MTD_M25P80=y CONFIG_MTD_NAND=y -CONFIG_MTD_NAND_ECC=y -CONFIG_MTD_NAND_IDS=y -CONFIG_MTD_NAND_FSL_IFC=y CONFIG_MTD_NAND_FSL_ELBC=y -CONFIG_MTD_M25P80=y +CONFIG_MTD_NAND_FSL_IFC=y CONFIG_PROC_DEVICETREE=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=131072 -CONFIG_MISC_DEVICES=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y CONFIG_BLK_DEV_SR=y @@ -115,11 +112,9 @@ CONFIG_SERIO_LIBPS2=y CONFIG_PPC_EPAPR_HV_BYTECHAN=y CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_SERIAL_8250_EXTENDED=y CONFIG_SERIAL_8250_MANY_PORTS=y CONFIG_SERIAL_8250_DETECT_IRQ=y CONFIG_SERIAL_8250_RSA=y -CONFIG_HW_RANDOM=y CONFIG_NVRAM=y CONFIG_I2C=y CONFIG_I2C_CHARDEV=y @@ -132,7 +127,6 @@ CONFIG_SPI_FSL_ESPI=y CONFIG_VIDEO_OUTPUT_CONTROL=y CONFIG_USB_HID=m CONFIG_USB=y -CONFIG_USB_DEVICEFS=y CONFIG_USB_MON=y CONFIG_USB_EHCI_HCD=y CONFIG_USB_EHCI_FSL=y @@ -142,8 +136,6 @@ CONFIG_USB_OHCI_HCD_PPC_OF_LE=y CONFIG_USB_STORAGE=y CONFIG_MMC=y CONFIG_MMC_SDHCI=y -CONFIG_MMC_SDHCI_OF=y -CONFIG_MMC_SDHCI_OF_ESDHC=y CONFIG_EDAC=y CONFIG_EDAC_MM_EDAC=y CONFIG_EDAC_MPC85XX=y @@ -170,19 +162,16 @@ CONFIG_HUGETLBFS=y CONFIG_JFFS2_FS=y CONFIG_CRAMFS=y CONFIG_NFS_FS=y -CONFIG_NFS_V3=y CONFIG_NFS_V4=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m -CONFIG_PARTITION_ADVANCED=y -CONFIG_MAC_PARTITION=y CONFIG_NLS_ISO8859_1=y CONFIG_NLS_UTF8=m CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_SHIRQ=y CONFIG_DETECT_HUNG_TASK=y CONFIG_DEBUG_INFO=y -CONFIG_SYSCTL_SYSCALL_CHECK=y +CONFIG_RCU_TRACE=y CONFIG_CRYPTO_NULL=y CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_MD4=y diff --git a/trunk/arch/powerpc/configs/corenet64_smp_defconfig b/trunk/arch/powerpc/configs/corenet64_smp_defconfig index dd89de8b0b7f..0516e22ca3de 100644 --- a/trunk/arch/powerpc/configs/corenet64_smp_defconfig +++ b/trunk/arch/powerpc/configs/corenet64_smp_defconfig @@ -56,6 +56,7 @@ CONFIG_INET_ESP=y CONFIG_IPV6=y CONFIG_IP_SCTP=m CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DEVTMPFS=y CONFIG_MTD=y CONFIG_MTD_CMDLINE_PARTS=y CONFIG_MTD_CHAR=y diff --git a/trunk/arch/powerpc/configs/g5_defconfig b/trunk/arch/powerpc/configs/g5_defconfig index 15130066e5e2..07b7f2af2dca 100644 --- a/trunk/arch/powerpc/configs/g5_defconfig +++ b/trunk/arch/powerpc/configs/g5_defconfig @@ -1,8 +1,10 @@ +CONFIG_PPC64=y +CONFIG_ALTIVEC=y +CONFIG_SMP=y +CONFIG_NR_CPUS=4 CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_BLK_DEV_INITRD=y @@ -13,15 +15,16 @@ CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y -CONFIG_PARTITION_ADVANCED=y -CONFIG_MAC_PARTITION=y -CONFIG_SMP=y -CONFIG_NR_CPUS=4 -CONFIG_KEXEC=y -# CONFIG_RELOCATABLE is not set +# CONFIG_PPC_PSERIES is not set CONFIG_CPU_FREQ=y CONFIG_CPU_FREQ_GOV_POWERSAVE=y CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_PMAC64=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_KEXEC=y +CONFIG_IRQ_ALL_CPUS=y +# CONFIG_MIGRATION is not set CONFIG_PCI_MSI=y CONFIG_NET=y CONFIG_PACKET=y @@ -49,6 +52,7 @@ CONFIG_NF_CT_NETLINK=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_IP_NF_QUEUE=m CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_PROC_DEVICETREE=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y @@ -56,6 +60,8 @@ CONFIG_BLK_DEV_RAM_SIZE=65536 CONFIG_CDROM_PKTCDVD=m CONFIG_IDE=y CONFIG_BLK_DEV_IDECD=y +CONFIG_BLK_DEV_IDE_PMAC=y +CONFIG_BLK_DEV_IDE_PMAC_ATA100FIRST=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y CONFIG_BLK_DEV_SR=y @@ -79,24 +85,33 @@ CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_MIRROR=m CONFIG_DM_ZERO=m -CONFIG_MACINTOSH_DRIVERS=y +CONFIG_IEEE1394=y +CONFIG_IEEE1394_OHCI1394=y +CONFIG_IEEE1394_SBP2=m +CONFIG_IEEE1394_ETH1394=m +CONFIG_IEEE1394_RAWIO=y +CONFIG_IEEE1394_VIDEO1394=m +CONFIG_IEEE1394_DV1394=m +CONFIG_ADB_PMU=y +CONFIG_PMAC_SMU=y CONFIG_MAC_EMUMOUSEBTN=y +CONFIG_THERM_PM72=y +CONFIG_WINDFARM=y +CONFIG_WINDFARM_PM81=y +CONFIG_WINDFARM_PM91=y +CONFIG_WINDFARM_PM112=y +CONFIG_WINDFARM_PM121=y CONFIG_NETDEVICES=y -CONFIG_BONDING=m CONFIG_DUMMY=m -CONFIG_MII=y +CONFIG_BONDING=m CONFIG_TUN=m +CONFIG_NET_ETHERNET=y +CONFIG_MII=y +CONFIG_SUNGEM=y CONFIG_ACENIC=m CONFIG_ACENIC_OMIT_TIGON_I=y -CONFIG_TIGON3=y CONFIG_E1000=y -CONFIG_SUNGEM=y -CONFIG_PPP=m -CONFIG_PPP_BSDCOMP=m -CONFIG_PPP_DEFLATE=m -CONFIG_PPPOE=m -CONFIG_PPP_ASYNC=m -CONFIG_PPP_SYNC_TTY=m +CONFIG_TIGON3=y CONFIG_USB_CATC=m CONFIG_USB_KAWETH=m CONFIG_USB_PEGASUS=m @@ -106,24 +121,36 @@ CONFIG_USB_USBNET=m # CONFIG_USB_NET_NET1080 is not set # CONFIG_USB_NET_CDC_SUBSET is not set # CONFIG_USB_NET_ZAURUS is not set +CONFIG_PPP=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPPOE=m # CONFIG_INPUT_MOUSEDEV_PSAUX is not set CONFIG_INPUT_JOYDEV=m CONFIG_INPUT_EVDEV=y +# CONFIG_KEYBOARD_ATKBD is not set # CONFIG_MOUSE_PS2 is not set +# CONFIG_SERIO_I8042 is not set # CONFIG_SERIO_SERPORT is not set -CONFIG_VT_HW_CONSOLE_BINDING=y # CONFIG_HW_RANDOM is not set CONFIG_GEN_RTC=y CONFIG_RAW_DRIVER=y CONFIG_I2C_CHARDEV=y # CONFIG_HWMON is not set -CONFIG_AGP=y -CONFIG_DRM=y -CONFIG_DRM_NOUVEAU=y +CONFIG_AGP=m +CONFIG_AGP_UNINORTH=m CONFIG_VIDEO_OUTPUT_CONTROL=m +CONFIG_FB=y CONFIG_FIRMWARE_EDID=y CONFIG_FB_TILEBLITTING=y +CONFIG_FB_OF=y +CONFIG_FB_NVIDIA=y +CONFIG_FB_NVIDIA_I2C=y CONFIG_FB_RADEON=y +# CONFIG_VGA_CONSOLE is not set +CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_LOGO=y CONFIG_SOUND=m CONFIG_SND=m @@ -131,7 +158,15 @@ CONFIG_SND_SEQUENCER=m CONFIG_SND_MIXER_OSS=m CONFIG_SND_PCM_OSS=m CONFIG_SND_SEQUENCER_OSS=y +CONFIG_SND_POWERMAC=m +CONFIG_SND_AOA=m +CONFIG_SND_AOA_FABRIC_LAYOUT=m +CONFIG_SND_AOA_ONYX=m +CONFIG_SND_AOA_TAS=m +CONFIG_SND_AOA_TOONIE=m CONFIG_SND_USB_AUDIO=m +CONFIG_HID_PID=y +CONFIG_USB_HIDDEV=y CONFIG_HID_GYRATION=y CONFIG_LOGITECH_FF=y CONFIG_HID_PANTHERLORD=y @@ -139,12 +174,13 @@ CONFIG_HID_PETALYNX=y CONFIG_HID_SAMSUNG=y CONFIG_HID_SONY=y CONFIG_HID_SUNPLUS=y -CONFIG_HID_PID=y -CONFIG_USB_HIDDEV=y CONFIG_USB=y +CONFIG_USB_DEVICEFS=y CONFIG_USB_MON=y CONFIG_USB_EHCI_HCD=y +# CONFIG_USB_EHCI_HCD_PPC_OF is not set CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PPC_OF_BE=y CONFIG_USB_ACM=m CONFIG_USB_PRINTER=y CONFIG_USB_STORAGE=y @@ -208,6 +244,8 @@ CONFIG_REISERFS_FS_POSIX_ACL=y CONFIG_REISERFS_FS_SECURITY=y CONFIG_XFS_FS=m CONFIG_XFS_POSIX_ACL=y +CONFIG_INOTIFY=y +CONFIG_AUTOFS_FS=m CONFIG_ISO9660_FS=y CONFIG_JOLIET=y CONFIG_ZISOFS=y @@ -221,12 +259,14 @@ CONFIG_HFS_FS=m CONFIG_HFSPLUS_FS=m CONFIG_CRAMFS=y CONFIG_NFS_FS=y +CONFIG_NFS_V3=y CONFIG_NFS_V3_ACL=y CONFIG_NFS_V4=y CONFIG_NFSD=y CONFIG_NFSD_V3_ACL=y CONFIG_NFSD_V4=y CONFIG_CIFS=m +CONFIG_PARTITION_ADVANCED=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_CODEPAGE_1250=y CONFIG_NLS_CODEPAGE_1251=y @@ -234,23 +274,29 @@ CONFIG_NLS_ASCII=y CONFIG_NLS_ISO8859_1=y CONFIG_NLS_ISO8859_15=y CONFIG_NLS_UTF8=y +CONFIG_CRC_T10DIF=y +CONFIG_LIBCRC32C=m CONFIG_MAGIC_SYSRQ=y -# CONFIG_UNUSED_SYMBOLS is not set CONFIG_DEBUG_FS=y CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_MUTEXES=y +# CONFIG_RCU_CPU_STALL_DETECTOR is not set CONFIG_LATENCYTOP=y -CONFIG_STRICT_DEVMEM=y +CONFIG_SYSCTL_SYSCALL_CHECK=y +CONFIG_BOOTX_TEXT=y CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_ECB=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_MD4=m CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_SHA256=m CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_AES=m CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_ARC4=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAST5=m CONFIG_CRYPTO_CAST6=m @@ -260,6 +306,3 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m # CONFIG_CRYPTO_ANSI_CPRNG is not set # CONFIG_CRYPTO_HW is not set -# CONFIG_VIRTUALIZATION is not set -CONFIG_CRC_T10DIF=y -CONFIG_LIBCRC32C=m diff --git a/trunk/arch/powerpc/configs/mpc83xx_defconfig b/trunk/arch/powerpc/configs/mpc83xx_defconfig index 5aac9a8bc53b..9352e4430c3b 100644 --- a/trunk/arch/powerpc/configs/mpc83xx_defconfig +++ b/trunk/arch/powerpc/configs/mpc83xx_defconfig @@ -2,12 +2,12 @@ CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_BLK_DEV_INITRD=y -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_EXPERT=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set +CONFIG_PARTITION_ADVANCED=y # CONFIG_PPC_CHRP is not set # CONFIG_PPC_PMAC is not set CONFIG_PPC_83xx=y @@ -25,7 +25,6 @@ CONFIG_ASP834x=y CONFIG_QUICC_ENGINE=y CONFIG_QE_GPIO=y CONFIG_MATH_EMULATION=y -CONFIG_SPARSE_IRQ=y CONFIG_PCI=y CONFIG_NET=y CONFIG_PACKET=y @@ -42,10 +41,9 @@ CONFIG_INET_ESP=y # CONFIG_INET_LRO is not set # CONFIG_IPV6 is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DEVTMPFS=y # CONFIG_FW_LOADER is not set CONFIG_MTD=y -CONFIG_MTD_PARTITIONS=y -CONFIG_MTD_OF_PARTS=y CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y @@ -64,15 +62,14 @@ CONFIG_ATA=y CONFIG_SATA_FSL=y CONFIG_SATA_SIL=y CONFIG_NETDEVICES=y +CONFIG_MII=y +CONFIG_UCC_GETH=y +CONFIG_GIANFAR=y CONFIG_MARVELL_PHY=y CONFIG_DAVICOM_PHY=y CONFIG_VITESSE_PHY=y CONFIG_ICPLUS_PHY=y CONFIG_FIXED_PHY=y -CONFIG_NET_ETHERNET=y -CONFIG_MII=y -CONFIG_GIANFAR=y -CONFIG_UCC_GETH=y CONFIG_INPUT_FF_MEMLESS=m # CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set @@ -112,17 +109,12 @@ CONFIG_RTC_DRV_DS1374=y CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -CONFIG_INOTIFY=y CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_NFS_FS=y -CONFIG_NFS_V3=y CONFIG_NFS_V4=y CONFIG_ROOT_NFS=y -CONFIG_PARTITION_ADVANCED=y CONFIG_CRC_T10DIF=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set -CONFIG_SYSCTL_SYSCALL_CHECK=y CONFIG_CRYPTO_ECB=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_SHA256=y diff --git a/trunk/arch/powerpc/configs/mpc85xx_defconfig b/trunk/arch/powerpc/configs/mpc85xx_defconfig index 03ee911c4577..8b5bda27d248 100644 --- a/trunk/arch/powerpc/configs/mpc85xx_defconfig +++ b/trunk/arch/powerpc/configs/mpc85xx_defconfig @@ -5,7 +5,9 @@ CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_AUDIT=y -CONFIG_SPARSE_IRQ=y +CONFIG_IRQ_DOMAIN_DEBUG=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 @@ -17,6 +19,8 @@ CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_MODVERSIONS=y # CONFIG_BLK_DEV_BSG is not set +CONFIG_PARTITION_ADVANCED=y +CONFIG_MAC_PARTITION=y CONFIG_MPC8540_ADS=y CONFIG_MPC8560_ADS=y CONFIG_MPC85xx_CDS=y @@ -40,8 +44,6 @@ CONFIG_SBC8548=y CONFIG_QUICC_ENGINE=y CONFIG_QE_GPIO=y CONFIG_HIGHMEM=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y CONFIG_BINFMT_MISC=m CONFIG_MATH_EMULATION=y CONFIG_FORCE_MAX_ZONEORDER=12 @@ -74,36 +76,25 @@ CONFIG_INET_ESP=y CONFIG_IPV6=y CONFIG_IP_SCTP=m CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DEVTMPFS=y CONFIG_MTD=y CONFIG_MTD_CMDLINE_PARTS=y CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y -CONFIG_MTD_CFI=y CONFIG_FTL=y -CONFIG_MTD_GEN_PROBE=y -CONFIG_MTD_MAP_BANK_WIDTH_1=y -CONFIG_MTD_MAP_BANK_WIDTH_2=y -CONFIG_MTD_MAP_BANK_WIDTH_4=y -CONFIG_MTD_CFI_I1=y -CONFIG_MTD_CFI_I2=y +CONFIG_MTD_CFI=y CONFIG_MTD_CFI_INTELEXT=y CONFIG_MTD_CFI_AMDSTD=y -CONFIG_MTD_CFI_UTIL=y CONFIG_MTD_PHYSMAP_OF=y -CONFIG_MTD_PARTITIONS=y -CONFIG_MTD_OF_PARTS=y +CONFIG_MTD_M25P80=y CONFIG_MTD_NAND=y CONFIG_MTD_NAND_FSL_ELBC=y CONFIG_MTD_NAND_FSL_IFC=y -CONFIG_MTD_NAND_IDS=y -CONFIG_MTD_NAND_ECC=y -CONFIG_MTD_M25P80=y CONFIG_PROC_DEVICETREE=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_NBD=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=131072 -CONFIG_MISC_DEVICES=y CONFIG_EEPROM_LEGACY=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y @@ -115,6 +106,7 @@ CONFIG_ATA=y CONFIG_SATA_AHCI=y CONFIG_SATA_FSL=y CONFIG_PATA_ALI=y +CONFIG_PATA_VIA=y CONFIG_NETDEVICES=y CONFIG_DUMMY=y CONFIG_FS_ENET=y @@ -134,7 +126,6 @@ CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_NR_UARTS=2 CONFIG_SERIAL_8250_RUNTIME_UARTS=2 -CONFIG_SERIAL_8250_EXTENDED=y CONFIG_SERIAL_8250_MANY_PORTS=y CONFIG_SERIAL_8250_DETECT_IRQ=y CONFIG_SERIAL_8250_RSA=y @@ -183,7 +174,6 @@ CONFIG_HID_SAMSUNG=y CONFIG_HID_SONY=y CONFIG_HID_SUNPLUS=y CONFIG_USB=y -CONFIG_USB_DEVICEFS=y CONFIG_USB_MON=y CONFIG_USB_EHCI_HCD=y CONFIG_USB_EHCI_FSL=y @@ -229,18 +219,13 @@ CONFIG_QNX4FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y -CONFIG_NFS_V3=y CONFIG_NFS_V4=y CONFIG_ROOT_NFS=y CONFIG_NFSD=y -CONFIG_PARTITION_ADVANCED=y -CONFIG_MAC_PARTITION=y CONFIG_CRC_T10DIF=y CONFIG_DEBUG_FS=y CONFIG_DETECT_HUNG_TASK=y CONFIG_DEBUG_INFO=y -CONFIG_SYSCTL_SYSCALL_CHECK=y -CONFIG_IRQ_DOMAIN_DEBUG=y CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_SHA256=y CONFIG_CRYPTO_SHA512=y diff --git a/trunk/arch/powerpc/configs/mpc85xx_smp_defconfig b/trunk/arch/powerpc/configs/mpc85xx_smp_defconfig index fdfa84dc908f..b0974e7e98ae 100644 --- a/trunk/arch/powerpc/configs/mpc85xx_smp_defconfig +++ b/trunk/arch/powerpc/configs/mpc85xx_smp_defconfig @@ -7,7 +7,9 @@ CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_AUDIT=y -CONFIG_SPARSE_IRQ=y +CONFIG_IRQ_DOMAIN_DEBUG=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 @@ -19,6 +21,8 @@ CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_MODVERSIONS=y # CONFIG_BLK_DEV_BSG is not set +CONFIG_PARTITION_ADVANCED=y +CONFIG_MAC_PARTITION=y CONFIG_MPC8540_ADS=y CONFIG_MPC8560_ADS=y CONFIG_MPC85xx_CDS=y @@ -42,8 +46,6 @@ CONFIG_SBC8548=y CONFIG_QUICC_ENGINE=y CONFIG_QE_GPIO=y CONFIG_HIGHMEM=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y CONFIG_BINFMT_MISC=m CONFIG_MATH_EMULATION=y CONFIG_IRQ_ALL_CPUS=y @@ -77,36 +79,25 @@ CONFIG_INET_ESP=y CONFIG_IPV6=y CONFIG_IP_SCTP=m CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DEVTMPFS=y CONFIG_MTD=y CONFIG_MTD_CMDLINE_PARTS=y CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y -CONFIG_MTD_CFI=y CONFIG_FTL=y -CONFIG_MTD_GEN_PROBE=y -CONFIG_MTD_MAP_BANK_WIDTH_1=y -CONFIG_MTD_MAP_BANK_WIDTH_2=y -CONFIG_MTD_MAP_BANK_WIDTH_4=y -CONFIG_MTD_CFI_I1=y -CONFIG_MTD_CFI_I2=y +CONFIG_MTD_CFI=y CONFIG_MTD_CFI_INTELEXT=y CONFIG_MTD_CFI_AMDSTD=y -CONFIG_MTD_CFI_UTIL=y CONFIG_MTD_PHYSMAP_OF=y -CONFIG_MTD_PARTITIONS=y -CONFIG_MTD_OF_PARTS=y +CONFIG_MTD_M25P80=y CONFIG_MTD_NAND=y CONFIG_MTD_NAND_FSL_ELBC=y CONFIG_MTD_NAND_FSL_IFC=y -CONFIG_MTD_NAND_IDS=y -CONFIG_MTD_NAND_ECC=y -CONFIG_MTD_M25P80=y CONFIG_PROC_DEVICETREE=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_NBD=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=131072 -CONFIG_MISC_DEVICES=y CONFIG_EEPROM_LEGACY=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y @@ -137,7 +128,6 @@ CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_NR_UARTS=2 CONFIG_SERIAL_8250_RUNTIME_UARTS=2 -CONFIG_SERIAL_8250_EXTENDED=y CONFIG_SERIAL_8250_MANY_PORTS=y CONFIG_SERIAL_8250_DETECT_IRQ=y CONFIG_SERIAL_8250_RSA=y @@ -186,7 +176,6 @@ CONFIG_HID_SAMSUNG=y CONFIG_HID_SONY=y CONFIG_HID_SUNPLUS=y CONFIG_USB=y -CONFIG_USB_DEVICEFS=y CONFIG_USB_MON=y CONFIG_USB_EHCI_HCD=y CONFIG_USB_EHCI_FSL=y @@ -232,18 +221,13 @@ CONFIG_QNX4FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y -CONFIG_NFS_V3=y CONFIG_NFS_V4=y CONFIG_ROOT_NFS=y CONFIG_NFSD=y -CONFIG_PARTITION_ADVANCED=y -CONFIG_MAC_PARTITION=y CONFIG_CRC_T10DIF=y CONFIG_DEBUG_FS=y CONFIG_DETECT_HUNG_TASK=y CONFIG_DEBUG_INFO=y -CONFIG_SYSCTL_SYSCALL_CHECK=y -CONFIG_IRQ_DOMAIN_DEBUG=y CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_SHA256=y CONFIG_CRYPTO_SHA512=y diff --git a/trunk/arch/powerpc/include/asm/cputable.h b/trunk/arch/powerpc/include/asm/cputable.h index 50d82c8a037f..b3c083de17ad 100644 --- a/trunk/arch/powerpc/include/asm/cputable.h +++ b/trunk/arch/powerpc/include/asm/cputable.h @@ -553,9 +553,7 @@ static inline int cpu_has_feature(unsigned long feature) & feature); } -#ifdef CONFIG_HAVE_HW_BREAKPOINT #define HBP_NUM 1 -#endif /* CONFIG_HAVE_HW_BREAKPOINT */ #endif /* !__ASSEMBLY__ */ diff --git a/trunk/arch/powerpc/include/asm/kvm_host.h b/trunk/arch/powerpc/include/asm/kvm_host.h index 50ea12fd7bf5..a8bf5c673a3c 100644 --- a/trunk/arch/powerpc/include/asm/kvm_host.h +++ b/trunk/arch/powerpc/include/asm/kvm_host.h @@ -33,6 +33,7 @@ #include #include #include +#include #define KVM_MAX_VCPUS NR_CPUS #define KVM_MAX_VCORES NR_CPUS diff --git a/trunk/arch/powerpc/include/asm/kvm_ppc.h b/trunk/arch/powerpc/include/asm/kvm_ppc.h index 0124937a23b9..e006f0bdea95 100644 --- a/trunk/arch/powerpc/include/asm/kvm_ppc.h +++ b/trunk/arch/powerpc/include/asm/kvm_ppc.h @@ -219,4 +219,16 @@ void kvmppc_claim_lpid(long lpid); void kvmppc_free_lpid(long lpid); void kvmppc_init_lpid(unsigned long nr_lpids); +static inline void kvmppc_mmu_flush_icache(pfn_t pfn) +{ + /* Clear i-cache for new pages */ + struct page *page; + page = pfn_to_page(pfn); + if (!test_bit(PG_arch_1, &page->flags)) { + flush_dcache_icache_page(page); + set_bit(PG_arch_1, &page->flags); + } +} + + #endif /* __POWERPC_KVM_PPC_H__ */ diff --git a/trunk/arch/powerpc/include/asm/mpic_msgr.h b/trunk/arch/powerpc/include/asm/mpic_msgr.h index 326d33ca55cd..d4f471fb1031 100644 --- a/trunk/arch/powerpc/include/asm/mpic_msgr.h +++ b/trunk/arch/powerpc/include/asm/mpic_msgr.h @@ -14,6 +14,7 @@ #include #include #include +#include struct mpic_msgr { u32 __iomem *base; diff --git a/trunk/arch/powerpc/include/asm/processor.h b/trunk/arch/powerpc/include/asm/processor.h index 53b6dfa83344..54b73a28c205 100644 --- a/trunk/arch/powerpc/include/asm/processor.h +++ b/trunk/arch/powerpc/include/asm/processor.h @@ -386,6 +386,7 @@ extern unsigned long cpuidle_disable; enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF}; extern int powersave_nap; /* set if nap mode can be used in idle loop */ +extern void power7_nap(void); #ifdef CONFIG_PSERIES_IDLE extern void update_smt_snooze_delay(int snooze); diff --git a/trunk/arch/powerpc/kernel/asm-offsets.c b/trunk/arch/powerpc/kernel/asm-offsets.c index 85b05c463fae..e8995727b1c1 100644 --- a/trunk/arch/powerpc/kernel/asm-offsets.c +++ b/trunk/arch/powerpc/kernel/asm-offsets.c @@ -76,6 +76,7 @@ int main(void) DEFINE(SIGSEGV, SIGSEGV); DEFINE(NMI_MASK, NMI_MASK); DEFINE(THREAD_DSCR, offsetof(struct thread_struct, dscr)); + DEFINE(THREAD_DSCR_INHERIT, offsetof(struct thread_struct, dscr_inherit)); #else DEFINE(THREAD_INFO, offsetof(struct task_struct, stack)); #endif /* CONFIG_PPC64 */ diff --git a/trunk/arch/powerpc/kernel/dbell.c b/trunk/arch/powerpc/kernel/dbell.c index 5b25c8060fd6..a892680668d8 100644 --- a/trunk/arch/powerpc/kernel/dbell.c +++ b/trunk/arch/powerpc/kernel/dbell.c @@ -28,6 +28,8 @@ void doorbell_setup_this_cpu(void) void doorbell_cause_ipi(int cpu, unsigned long data) { + /* Order previous accesses vs. msgsnd, which is treated as a store */ + mb(); ppc_msgsnd(PPC_DBELL, 0, data); } diff --git a/trunk/arch/powerpc/kernel/dma-iommu.c b/trunk/arch/powerpc/kernel/dma-iommu.c index 2d7bb8ced136..e4897523de41 100644 --- a/trunk/arch/powerpc/kernel/dma-iommu.c +++ b/trunk/arch/powerpc/kernel/dma-iommu.c @@ -83,11 +83,10 @@ static int dma_iommu_dma_supported(struct device *dev, u64 mask) return 0; } - if ((tbl->it_offset + tbl->it_size) > (mask >> IOMMU_PAGE_SHIFT)) { - dev_info(dev, "Warning: IOMMU window too big for device mask\n"); - dev_info(dev, "mask: 0x%08llx, table end: 0x%08lx\n", - mask, (tbl->it_offset + tbl->it_size) << - IOMMU_PAGE_SHIFT); + if (tbl->it_offset > (mask >> IOMMU_PAGE_SHIFT)) { + dev_info(dev, "Warning: IOMMU offset too big for device mask\n"); + dev_info(dev, "mask: 0x%08llx, table offset: 0x%08lx\n", + mask, tbl->it_offset << IOMMU_PAGE_SHIFT); return 0; } else return 1; diff --git a/trunk/arch/powerpc/kernel/entry_64.S b/trunk/arch/powerpc/kernel/entry_64.S index 4b01a25e29ef..b40e0b4815b3 100644 --- a/trunk/arch/powerpc/kernel/entry_64.S +++ b/trunk/arch/powerpc/kernel/entry_64.S @@ -370,6 +370,12 @@ _GLOBAL(ret_from_fork) li r3,0 b syscall_exit + .section ".toc","aw" +DSCR_DEFAULT: + .tc dscr_default[TC],dscr_default + + .section ".text" + /* * This routine switches between two different tasks. The process * state of one is saved on its kernel stack. Then the state @@ -509,9 +515,6 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) mr r1,r8 /* start using new stack pointer */ std r7,PACAKSAVE(r13) - ld r6,_CCR(r1) - mtcrf 0xFF,r6 - #ifdef CONFIG_ALTIVEC BEGIN_FTR_SECTION ld r0,THREAD_VRSAVE(r4) @@ -520,14 +523,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) #endif /* CONFIG_ALTIVEC */ #ifdef CONFIG_PPC64 BEGIN_FTR_SECTION + lwz r6,THREAD_DSCR_INHERIT(r4) + ld r7,DSCR_DEFAULT@toc(2) ld r0,THREAD_DSCR(r4) - cmpd r0,r25 - beq 1f + cmpwi r6,0 + bne 1f + ld r0,0(r7) +1: cmpd r0,r25 + beq 2f mtspr SPRN_DSCR,r0 -1: +2: END_FTR_SECTION_IFSET(CPU_FTR_DSCR) #endif + ld r6,_CCR(r1) + mtcrf 0xFF,r6 + /* r3-r13 are destroyed -- Cort */ REST_8GPRS(14, r1) REST_10GPRS(22, r1) diff --git a/trunk/arch/powerpc/kernel/exceptions-64s.S b/trunk/arch/powerpc/kernel/exceptions-64s.S index e894515e77bb..39aa97d3ff88 100644 --- a/trunk/arch/powerpc/kernel/exceptions-64s.S +++ b/trunk/arch/powerpc/kernel/exceptions-64s.S @@ -186,7 +186,7 @@ hardware_interrupt_hv: KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800) MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer) - MASKABLE_EXCEPTION_HV(0x980, 0x982, decrementer) + STD_EXCEPTION_HV(0x980, 0x982, hdecrementer) STD_EXCEPTION_PSERIES(0xa00, 0xa00, trap_0a) KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00) @@ -486,6 +486,7 @@ machine_check_common: STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ) STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt) + STD_EXCEPTION_COMMON(0x980, hdecrementer, .hdec_interrupt) STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception) STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) diff --git a/trunk/arch/powerpc/kernel/hw_breakpoint.c b/trunk/arch/powerpc/kernel/hw_breakpoint.c index f3a82dde61db..956a4c496de9 100644 --- a/trunk/arch/powerpc/kernel/hw_breakpoint.c +++ b/trunk/arch/powerpc/kernel/hw_breakpoint.c @@ -253,7 +253,7 @@ int __kprobes hw_breakpoint_handler(struct die_args *args) /* Do not emulate user-space instructions, instead single-step them */ if (user_mode(regs)) { - bp->ctx->task->thread.last_hit_ubp = bp; + current->thread.last_hit_ubp = bp; regs->msr |= MSR_SE; goto out; } diff --git a/trunk/arch/powerpc/kernel/idle_power7.S b/trunk/arch/powerpc/kernel/idle_power7.S index 7140d838339e..e11863f4e595 100644 --- a/trunk/arch/powerpc/kernel/idle_power7.S +++ b/trunk/arch/powerpc/kernel/idle_power7.S @@ -28,7 +28,9 @@ _GLOBAL(power7_idle) lwz r4,ADDROFF(powersave_nap)(r3) cmpwi 0,r4,0 beqlr + /* fall through */ +_GLOBAL(power7_nap) /* NAP is a state loss, we create a regs frame on the * stack, fill it up with the state we care about and * stick a pointer to it in PACAR1. We really only diff --git a/trunk/arch/powerpc/kernel/kgdb.c b/trunk/arch/powerpc/kernel/kgdb.c index 782bd0a3c2f0..c470a40b29f5 100644 --- a/trunk/arch/powerpc/kernel/kgdb.c +++ b/trunk/arch/powerpc/kernel/kgdb.c @@ -25,6 +25,7 @@ #include #include #include +#include /* * This table contains the mapping between PowerPC hardware trap types, and @@ -101,6 +102,21 @@ static int computeSignal(unsigned int tt) return SIGHUP; /* default for things we don't know about */ } +/** + * + * kgdb_skipexception - Bail out of KGDB when we've been triggered. + * @exception: Exception vector number + * @regs: Current &struct pt_regs. + * + * On some architectures we need to skip a breakpoint exception when + * it occurs after a breakpoint has been removed. + * + */ +int kgdb_skipexception(int exception, struct pt_regs *regs) +{ + return kgdb_isremovedbreak(regs->nip); +} + static int kgdb_call_nmi_hook(struct pt_regs *regs) { kgdb_nmicallback(raw_smp_processor_id(), regs); @@ -138,6 +154,8 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs) static int kgdb_singlestep(struct pt_regs *regs) { struct thread_info *thread_info, *exception_thread_info; + struct thread_info *backup_current_thread_info = \ + (struct thread_info *)kmalloc(sizeof(struct thread_info), GFP_KERNEL); if (user_mode(regs)) return 0; @@ -155,13 +173,17 @@ static int kgdb_singlestep(struct pt_regs *regs) thread_info = (struct thread_info *)(regs->gpr[1] & ~(THREAD_SIZE-1)); exception_thread_info = current_thread_info(); - if (thread_info != exception_thread_info) + if (thread_info != exception_thread_info) { + /* Save the original current_thread_info. */ + memcpy(backup_current_thread_info, exception_thread_info, sizeof *thread_info); memcpy(exception_thread_info, thread_info, sizeof *thread_info); + } kgdb_handle_exception(0, SIGTRAP, 0, regs); if (thread_info != exception_thread_info) - memcpy(thread_info, exception_thread_info, sizeof *thread_info); + /* Restore current_thread_info lastly. */ + memcpy(exception_thread_info, backup_current_thread_info, sizeof *thread_info); return 1; } @@ -410,7 +432,6 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code, #else linux_regs->msr |= MSR_SE; #endif - kgdb_single_step = 1; atomic_set(&kgdb_cpu_doing_single_step, raw_smp_processor_id()); } diff --git a/trunk/arch/powerpc/kernel/process.c b/trunk/arch/powerpc/kernel/process.c index 710f400476de..1a1f2ddfb581 100644 --- a/trunk/arch/powerpc/kernel/process.c +++ b/trunk/arch/powerpc/kernel/process.c @@ -802,16 +802,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, #endif /* CONFIG_PPC_STD_MMU_64 */ #ifdef CONFIG_PPC64 if (cpu_has_feature(CPU_FTR_DSCR)) { - if (current->thread.dscr_inherit) { - p->thread.dscr_inherit = 1; - p->thread.dscr = current->thread.dscr; - } else if (0 != dscr_default) { - p->thread.dscr_inherit = 1; - p->thread.dscr = dscr_default; - } else { - p->thread.dscr_inherit = 0; - p->thread.dscr = 0; - } + p->thread.dscr_inherit = current->thread.dscr_inherit; + p->thread.dscr = current->thread.dscr; } #endif diff --git a/trunk/arch/powerpc/kernel/smp.c b/trunk/arch/powerpc/kernel/smp.c index 0321007086f7..8d4214afc21d 100644 --- a/trunk/arch/powerpc/kernel/smp.c +++ b/trunk/arch/powerpc/kernel/smp.c @@ -198,8 +198,15 @@ void smp_muxed_ipi_message_pass(int cpu, int msg) struct cpu_messages *info = &per_cpu(ipi_message, cpu); char *message = (char *)&info->messages; + /* + * Order previous accesses before accesses in the IPI handler. + */ + smp_mb(); message[msg] = 1; - mb(); + /* + * cause_ipi functions are required to include a full barrier + * before doing whatever causes the IPI. + */ smp_ops->cause_ipi(cpu, info->data); } @@ -211,7 +218,7 @@ irqreturn_t smp_ipi_demux(void) mb(); /* order any irq clear */ do { - all = xchg_local(&info->messages, 0); + all = xchg(&info->messages, 0); #ifdef __BIG_ENDIAN if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNCTION))) diff --git a/trunk/arch/powerpc/kernel/syscalls.c b/trunk/arch/powerpc/kernel/syscalls.c index f2496f2faecc..4e3cc47f26b9 100644 --- a/trunk/arch/powerpc/kernel/syscalls.c +++ b/trunk/arch/powerpc/kernel/syscalls.c @@ -107,11 +107,11 @@ long ppc64_personality(unsigned long personality) long ret; if (personality(current->personality) == PER_LINUX32 - && personality == PER_LINUX) - personality = PER_LINUX32; + && personality(personality) == PER_LINUX) + personality = (personality & ~PER_MASK) | PER_LINUX32; ret = sys_personality(personality); - if (ret == PER_LINUX32) - ret = PER_LINUX; + if (personality(ret) == PER_LINUX32) + ret = (ret & ~PER_MASK) | PER_LINUX; return ret; } #endif diff --git a/trunk/arch/powerpc/kernel/sysfs.c b/trunk/arch/powerpc/kernel/sysfs.c index 3529446c2abd..8302af649219 100644 --- a/trunk/arch/powerpc/kernel/sysfs.c +++ b/trunk/arch/powerpc/kernel/sysfs.c @@ -194,6 +194,14 @@ static ssize_t show_dscr_default(struct device *dev, return sprintf(buf, "%lx\n", dscr_default); } +static void update_dscr(void *dummy) +{ + if (!current->thread.dscr_inherit) { + current->thread.dscr = dscr_default; + mtspr(SPRN_DSCR, dscr_default); + } +} + static ssize_t __used store_dscr_default(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -206,6 +214,8 @@ static ssize_t __used store_dscr_default(struct device *dev, return -EINVAL; dscr_default = val; + on_each_cpu(update_dscr, NULL, 1); + return count; } diff --git a/trunk/arch/powerpc/kernel/time.c b/trunk/arch/powerpc/kernel/time.c index be171ee73bf8..e49e93191b69 100644 --- a/trunk/arch/powerpc/kernel/time.c +++ b/trunk/arch/powerpc/kernel/time.c @@ -535,6 +535,15 @@ void timer_interrupt(struct pt_regs * regs) trace_timer_interrupt_exit(regs); } +/* + * Hypervisor decrementer interrupts shouldn't occur but are sometimes + * left pending on exit from a KVM guest. We don't need to do anything + * to clear them, as they are edge-triggered. + */ +void hdec_interrupt(struct pt_regs *regs) +{ +} + #ifdef CONFIG_SUSPEND static void generic_suspend_disable_irqs(void) { diff --git a/trunk/arch/powerpc/kernel/traps.c b/trunk/arch/powerpc/kernel/traps.c index 158972341a2d..ae0843fa7a61 100644 --- a/trunk/arch/powerpc/kernel/traps.c +++ b/trunk/arch/powerpc/kernel/traps.c @@ -972,8 +972,9 @@ static int emulate_instruction(struct pt_regs *regs) cpu_has_feature(CPU_FTR_DSCR)) { PPC_WARN_EMULATED(mtdscr, regs); rd = (instword >> 21) & 0x1f; - mtspr(SPRN_DSCR, regs->gpr[rd]); + current->thread.dscr = regs->gpr[rd]; current->thread.dscr_inherit = 1; + mtspr(SPRN_DSCR, current->thread.dscr); return 0; } #endif diff --git a/trunk/arch/powerpc/kvm/book3s_32_mmu_host.c b/trunk/arch/powerpc/kvm/book3s_32_mmu_host.c index f922c29bb234..837f13e7b6bf 100644 --- a/trunk/arch/powerpc/kvm/book3s_32_mmu_host.c +++ b/trunk/arch/powerpc/kvm/book3s_32_mmu_host.c @@ -211,6 +211,9 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) pteg1 |= PP_RWRX; } + if (orig_pte->may_execute) + kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT); + local_irq_disable(); if (pteg[rr]) { diff --git a/trunk/arch/powerpc/kvm/book3s_64_mmu_host.c b/trunk/arch/powerpc/kvm/book3s_64_mmu_host.c index 10fc8ec9d2a8..0688b6b39585 100644 --- a/trunk/arch/powerpc/kvm/book3s_64_mmu_host.c +++ b/trunk/arch/powerpc/kvm/book3s_64_mmu_host.c @@ -126,6 +126,8 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) if (!orig_pte->may_execute) rflags |= HPTE_R_N; + else + kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT); hash = hpt_hash(va, PTE_SIZE, MMU_SEGSIZE_256M); diff --git a/trunk/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/trunk/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 5a84c8d3d040..44b72feaff7d 100644 --- a/trunk/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/trunk/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -1421,13 +1421,13 @@ _GLOBAL(kvmppc_h_cede) sync /* order setting ceded vs. testing prodded */ lbz r5,VCPU_PRODDED(r3) cmpwi r5,0 - bne 1f + bne kvm_cede_prodded li r0,0 /* set trap to 0 to say hcall is handled */ stw r0,VCPU_TRAP(r3) li r0,H_SUCCESS std r0,VCPU_GPR(R3)(r3) BEGIN_FTR_SECTION - b 2f /* just send it up to host on 970 */ + b kvm_cede_exit /* just send it up to host on 970 */ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) /* @@ -1446,7 +1446,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) or r4,r4,r0 PPC_POPCNTW(R7,R4) cmpw r7,r8 - bge 2f + bge kvm_cede_exit stwcx. r4,0,r6 bne 31b li r0,1 @@ -1555,7 +1555,8 @@ kvm_end_cede: b hcall_real_fallback /* cede when already previously prodded case */ -1: li r0,0 +kvm_cede_prodded: + li r0,0 stb r0,VCPU_PRODDED(r3) sync /* order testing prodded vs. clearing ceded */ stb r0,VCPU_CEDED(r3) @@ -1563,7 +1564,8 @@ kvm_end_cede: blr /* we've ceded but we want to give control to the host */ -2: li r3,H_TOO_HARD +kvm_cede_exit: + li r3,H_TOO_HARD blr secondary_too_late: diff --git a/trunk/arch/powerpc/kvm/e500_tlb.c b/trunk/arch/powerpc/kvm/e500_tlb.c index c510fc961302..a2b66717813d 100644 --- a/trunk/arch/powerpc/kvm/e500_tlb.c +++ b/trunk/arch/powerpc/kvm/e500_tlb.c @@ -322,11 +322,11 @@ static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref) static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500) { if (vcpu_e500->g2h_tlb1_map) - memset(vcpu_e500->g2h_tlb1_map, - sizeof(u64) * vcpu_e500->gtlb_params[1].entries, 0); + memset(vcpu_e500->g2h_tlb1_map, 0, + sizeof(u64) * vcpu_e500->gtlb_params[1].entries); if (vcpu_e500->h2g_tlb1_rmap) - memset(vcpu_e500->h2g_tlb1_rmap, - sizeof(unsigned int) * host_tlb_params[1].entries, 0); + memset(vcpu_e500->h2g_tlb1_rmap, 0, + sizeof(unsigned int) * host_tlb_params[1].entries); } static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500) @@ -539,6 +539,9 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, ref, gvaddr, stlbe); + + /* Clear i-cache for new pages */ + kvmppc_mmu_flush_icache(pfn); } /* XXX only map the one-one case, for now use TLB0 */ diff --git a/trunk/arch/powerpc/lib/code-patching.c b/trunk/arch/powerpc/lib/code-patching.c index dd223b3eb333..17e5b2364312 100644 --- a/trunk/arch/powerpc/lib/code-patching.c +++ b/trunk/arch/powerpc/lib/code-patching.c @@ -20,7 +20,7 @@ int patch_instruction(unsigned int *addr, unsigned int instr) { int err; - err = __put_user(instr, addr); + __put_user_size(instr, addr, 4, err); if (err) return err; asm ("dcbst 0, %0; sync; icbi 0,%0; sync; isync" : : "r" (addr)); diff --git a/trunk/arch/powerpc/lib/copyuser_power7.S b/trunk/arch/powerpc/lib/copyuser_power7.S index f9ede7c6606e..0d24ff15f5f6 100644 --- a/trunk/arch/powerpc/lib/copyuser_power7.S +++ b/trunk/arch/powerpc/lib/copyuser_power7.S @@ -288,7 +288,7 @@ err1; stb r0,0(r3) std r0,16(r1) stdu r1,-STACKFRAMESIZE(r1) bl .enter_vmx_usercopy - cmpwi r3,0 + cmpwi cr1,r3,0 ld r0,STACKFRAMESIZE+16(r1) ld r3,STACKFRAMESIZE+48(r1) ld r4,STACKFRAMESIZE+56(r1) @@ -326,38 +326,7 @@ err1; stb r0,0(r3) dcbt r0,r8,0b01010 /* GO */ .machine pop - /* - * We prefetch both the source and destination using enhanced touch - * instructions. We use a stream ID of 0 for the load side and - * 1 for the store side. - */ - clrrdi r6,r4,7 - clrrdi r9,r3,7 - ori r9,r9,1 /* stream=1 */ - - srdi r7,r5,7 /* length in cachelines, capped at 0x3FF */ - cmpldi cr1,r7,0x3FF - ble cr1,1f - li r7,0x3FF -1: lis r0,0x0E00 /* depth=7 */ - sldi r7,r7,7 - or r7,r7,r0 - ori r10,r7,1 /* stream=1 */ - - lis r8,0x8000 /* GO=1 */ - clrldi r8,r8,32 - -.machine push -.machine "power4" - dcbt r0,r6,0b01000 - dcbt r0,r7,0b01010 - dcbtst r0,r9,0b01000 - dcbtst r0,r10,0b01010 - eieio - dcbt r0,r8,0b01010 /* GO */ -.machine pop - - beq .Lunwind_stack_nonvmx_copy + beq cr1,.Lunwind_stack_nonvmx_copy /* * If source and destination are not relatively aligned we use a diff --git a/trunk/arch/powerpc/lib/memcpy_power7.S b/trunk/arch/powerpc/lib/memcpy_power7.S index 0efdc51bc716..7ba6c96de778 100644 --- a/trunk/arch/powerpc/lib/memcpy_power7.S +++ b/trunk/arch/powerpc/lib/memcpy_power7.S @@ -222,7 +222,7 @@ _GLOBAL(memcpy_power7) std r0,16(r1) stdu r1,-STACKFRAMESIZE(r1) bl .enter_vmx_copy - cmpwi r3,0 + cmpwi cr1,r3,0 ld r0,STACKFRAMESIZE+16(r1) ld r3,STACKFRAMESIZE+48(r1) ld r4,STACKFRAMESIZE+56(r1) @@ -260,7 +260,7 @@ _GLOBAL(memcpy_power7) dcbt r0,r8,0b01010 /* GO */ .machine pop - beq .Lunwind_stack_nonvmx_copy + beq cr1,.Lunwind_stack_nonvmx_copy /* * If source and destination are not relatively aligned we use a diff --git a/trunk/arch/powerpc/mm/mem.c b/trunk/arch/powerpc/mm/mem.c index baaafde7d135..fbdad0e3929a 100644 --- a/trunk/arch/powerpc/mm/mem.c +++ b/trunk/arch/powerpc/mm/mem.c @@ -469,6 +469,7 @@ void flush_dcache_icache_page(struct page *page) __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT); #endif } +EXPORT_SYMBOL(flush_dcache_icache_page); void clear_user_page(void *page, unsigned long vaddr, struct page *pg) { diff --git a/trunk/arch/powerpc/mm/numa.c b/trunk/arch/powerpc/mm/numa.c index 39b159751c35..59213cfaeca9 100644 --- a/trunk/arch/powerpc/mm/numa.c +++ b/trunk/arch/powerpc/mm/numa.c @@ -1436,11 +1436,11 @@ static long vphn_get_associativity(unsigned long cpu, /* * Update the node maps and sysfs entries for each cpu whose home node - * has changed. + * has changed. Returns 1 when the topology has changed, and 0 otherwise. */ int arch_update_cpu_topology(void) { - int cpu, nid, old_nid; + int cpu, nid, old_nid, changed = 0; unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0}; struct device *dev; @@ -1466,9 +1466,10 @@ int arch_update_cpu_topology(void) dev = get_cpu_device(cpu); if (dev) kobject_uevent(&dev->kobj, KOBJ_CHANGE); + changed = 1; } - return 1; + return changed; } static void topology_work_fn(struct work_struct *work) diff --git a/trunk/arch/powerpc/perf/core-book3s.c b/trunk/arch/powerpc/perf/core-book3s.c index 77b49ddda9d3..7cd2dbd6e4c4 100644 --- a/trunk/arch/powerpc/perf/core-book3s.c +++ b/trunk/arch/powerpc/perf/core-book3s.c @@ -1431,7 +1431,7 @@ static void perf_event_interrupt(struct pt_regs *regs) if (!event->hw.idx || is_limited_pmc(event->hw.idx)) continue; val = read_pmc(event->hw.idx); - if ((int)val < 0) { + if (pmc_overflow(val)) { /* event has overflowed */ found = 1; record_and_restart(event, val, regs); diff --git a/trunk/arch/powerpc/platforms/cell/spufs/inode.c b/trunk/arch/powerpc/platforms/cell/spufs/inode.c index d544d7816df3..dba1ce235da5 100644 --- a/trunk/arch/powerpc/platforms/cell/spufs/inode.c +++ b/trunk/arch/powerpc/platforms/cell/spufs/inode.c @@ -186,10 +186,13 @@ static void spufs_prune_dir(struct dentry *dir) static int spufs_rmdir(struct inode *parent, struct dentry *dir) { /* remove all entries */ + int res; spufs_prune_dir(dir); d_drop(dir); - - return simple_rmdir(parent, dir); + res = simple_rmdir(parent, dir); + /* We have to give up the mm_struct */ + spu_forget(SPUFS_I(dir->d_inode)->i_ctx); + return res; } static int spufs_fill_dir(struct dentry *dir, @@ -245,9 +248,6 @@ static int spufs_dir_close(struct inode *inode, struct file *file) mutex_unlock(&parent->i_mutex); WARN_ON(ret); - /* We have to give up the mm_struct */ - spu_forget(ctx); - return dcache_dir_close(inode, file); } @@ -450,28 +450,24 @@ spufs_create_context(struct inode *inode, struct dentry *dentry, struct spu_context *neighbor; struct path path = {.mnt = mnt, .dentry = dentry}; - ret = -EPERM; if ((flags & SPU_CREATE_NOSCHED) && !capable(CAP_SYS_NICE)) - goto out_unlock; + return -EPERM; - ret = -EINVAL; if ((flags & (SPU_CREATE_NOSCHED | SPU_CREATE_ISOLATE)) == SPU_CREATE_ISOLATE) - goto out_unlock; + return -EINVAL; - ret = -ENODEV; if ((flags & SPU_CREATE_ISOLATE) && !isolated_loader) - goto out_unlock; + return -ENODEV; gang = NULL; neighbor = NULL; affinity = flags & (SPU_CREATE_AFFINITY_MEM | SPU_CREATE_AFFINITY_SPU); if (affinity) { gang = SPUFS_I(inode)->i_gang; - ret = -EINVAL; if (!gang) - goto out_unlock; + return -EINVAL; mutex_lock(&gang->aff_mutex); neighbor = spufs_assert_affinity(flags, gang, aff_filp); if (IS_ERR(neighbor)) { @@ -492,22 +488,12 @@ spufs_create_context(struct inode *inode, struct dentry *dentry, } ret = spufs_context_open(&path); - if (ret < 0) { + if (ret < 0) WARN_ON(spufs_rmdir(inode, dentry)); - if (affinity) - mutex_unlock(&gang->aff_mutex); - mutex_unlock(&inode->i_mutex); - spu_forget(SPUFS_I(dentry->d_inode)->i_ctx); - goto out; - } out_aff_unlock: if (affinity) mutex_unlock(&gang->aff_mutex); -out_unlock: - mutex_unlock(&inode->i_mutex); -out: - dput(dentry); return ret; } @@ -580,18 +566,13 @@ static int spufs_create_gang(struct inode *inode, int ret; ret = spufs_mkgang(inode, dentry, mode & S_IRWXUGO); - if (ret) - goto out; - - ret = spufs_gang_open(&path); - if (ret < 0) { - int err = simple_rmdir(inode, dentry); - WARN_ON(err); + if (!ret) { + ret = spufs_gang_open(&path); + if (ret < 0) { + int err = simple_rmdir(inode, dentry); + WARN_ON(err); + } } - -out: - mutex_unlock(&inode->i_mutex); - dput(dentry); return ret; } @@ -601,40 +582,32 @@ static struct file_system_type spufs_type; long spufs_create(struct path *path, struct dentry *dentry, unsigned int flags, umode_t mode, struct file *filp) { + struct inode *dir = path->dentry->d_inode; int ret; - ret = -EINVAL; /* check if we are on spufs */ if (path->dentry->d_sb->s_type != &spufs_type) - goto out; + return -EINVAL; /* don't accept undefined flags */ if (flags & (~SPU_CREATE_FLAG_ALL)) - goto out; + return -EINVAL; /* only threads can be underneath a gang */ - if (path->dentry != path->dentry->d_sb->s_root) { - if ((flags & SPU_CREATE_GANG) || - !SPUFS_I(path->dentry->d_inode)->i_gang) - goto out; - } + if (path->dentry != path->dentry->d_sb->s_root) + if ((flags & SPU_CREATE_GANG) || !SPUFS_I(dir)->i_gang) + return -EINVAL; mode &= ~current_umask(); if (flags & SPU_CREATE_GANG) - ret = spufs_create_gang(path->dentry->d_inode, - dentry, path->mnt, mode); + ret = spufs_create_gang(dir, dentry, path->mnt, mode); else - ret = spufs_create_context(path->dentry->d_inode, - dentry, path->mnt, flags, mode, + ret = spufs_create_context(dir, dentry, path->mnt, flags, mode, filp); if (ret >= 0) - fsnotify_mkdir(path->dentry->d_inode, dentry); - return ret; + fsnotify_mkdir(dir, dentry); -out: - mutex_unlock(&path->dentry->d_inode->i_mutex); - dput(dentry); return ret; } diff --git a/trunk/arch/powerpc/platforms/cell/spufs/syscalls.c b/trunk/arch/powerpc/platforms/cell/spufs/syscalls.c index 5665dcc382c7..5b7d8ffbf890 100644 --- a/trunk/arch/powerpc/platforms/cell/spufs/syscalls.c +++ b/trunk/arch/powerpc/platforms/cell/spufs/syscalls.c @@ -70,7 +70,7 @@ static long do_spu_create(const char __user *pathname, unsigned int flags, ret = PTR_ERR(dentry); if (!IS_ERR(dentry)) { ret = spufs_create(&path, dentry, flags, mode, neighbor); - path_put(&path); + done_path_create(&path, dentry); } return ret; diff --git a/trunk/arch/powerpc/platforms/powernv/smp.c b/trunk/arch/powerpc/platforms/powernv/smp.c index 3ef46254c35b..7698b6e13c57 100644 --- a/trunk/arch/powerpc/platforms/powernv/smp.c +++ b/trunk/arch/powerpc/platforms/powernv/smp.c @@ -106,14 +106,6 @@ static void pnv_smp_cpu_kill_self(void) { unsigned int cpu; - /* If powersave_nap is enabled, use NAP mode, else just - * spin aimlessly - */ - if (!powersave_nap) { - generic_mach_cpu_die(); - return; - } - /* Standard hot unplug procedure */ local_irq_disable(); idle_task_exit(); @@ -128,7 +120,7 @@ static void pnv_smp_cpu_kill_self(void) */ mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1); while (!generic_check_cpu_restart(cpu)) { - power7_idle(); + power7_nap(); if (!generic_check_cpu_restart(cpu)) { DBG("CPU%d Unexpected exit while offline !\n", cpu); /* We may be getting an IPI, so we re-enable diff --git a/trunk/arch/powerpc/sysdev/fsl_pci.c b/trunk/arch/powerpc/sysdev/fsl_pci.c index a7b2a600d0a4..c37f46136321 100644 --- a/trunk/arch/powerpc/sysdev/fsl_pci.c +++ b/trunk/arch/powerpc/sysdev/fsl_pci.c @@ -465,7 +465,7 @@ int __init fsl_add_bridge(struct device_node *dev, int is_primary) iounmap(hose->cfg_data); iounmap(hose->cfg_addr); pcibios_free_controller(hose); - return 0; + return -ENODEV; } setup_pci_cmd(hose); @@ -827,6 +827,7 @@ struct device_node *fsl_pci_primary; void __devinit fsl_pci_init(void) { + int ret; struct device_node *node; struct pci_controller *hose; dma_addr_t max = 0xffffffff; @@ -855,10 +856,12 @@ void __devinit fsl_pci_init(void) if (!fsl_pci_primary) fsl_pci_primary = node; - fsl_add_bridge(node, fsl_pci_primary == node); - hose = pci_find_hose_for_OF_device(node); - max = min(max, hose->dma_window_base_cur + - hose->dma_window_size); + ret = fsl_add_bridge(node, fsl_pci_primary == node); + if (ret == 0) { + hose = pci_find_hose_for_OF_device(node); + max = min(max, hose->dma_window_base_cur + + hose->dma_window_size); + } } } diff --git a/trunk/arch/powerpc/sysdev/mpic_msgr.c b/trunk/arch/powerpc/sysdev/mpic_msgr.c index 483d8fa72e8b..e961f8c4a8f0 100644 --- a/trunk/arch/powerpc/sysdev/mpic_msgr.c +++ b/trunk/arch/powerpc/sysdev/mpic_msgr.c @@ -14,6 +14,9 @@ #include #include #include +#include +#include +#include #include #include #include diff --git a/trunk/arch/powerpc/sysdev/xics/icp-hv.c b/trunk/arch/powerpc/sysdev/xics/icp-hv.c index 14469cf9df68..df0fc5821469 100644 --- a/trunk/arch/powerpc/sysdev/xics/icp-hv.c +++ b/trunk/arch/powerpc/sysdev/xics/icp-hv.c @@ -65,7 +65,11 @@ static inline void icp_hv_set_xirr(unsigned int value) static inline void icp_hv_set_qirr(int n_cpu , u8 value) { int hw_cpu = get_hard_smp_processor_id(n_cpu); - long rc = plpar_hcall_norets(H_IPI, hw_cpu, value); + long rc; + + /* Make sure all previous accesses are ordered before IPI sending */ + mb(); + rc = plpar_hcall_norets(H_IPI, hw_cpu, value); if (rc != H_SUCCESS) { pr_err("%s: bad return code qirr cpu=%d hw_cpu=%d mfrr=0x%x " "returned %ld\n", __func__, n_cpu, hw_cpu, value, rc); diff --git a/trunk/arch/powerpc/xmon/xmon.c b/trunk/arch/powerpc/xmon/xmon.c index eab3492a45c5..9b49c65ee7a4 100644 --- a/trunk/arch/powerpc/xmon/xmon.c +++ b/trunk/arch/powerpc/xmon/xmon.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -894,13 +895,13 @@ cmds(struct pt_regs *excp) #endif default: printf("Unrecognized command: "); - do { + do { if (' ' < cmd && cmd <= '~') putchar(cmd); else printf("\\x%x", cmd); cmd = inchar(); - } while (cmd != '\n'); + } while (cmd != '\n'); printf(" (type ? for help)\n"); break; } @@ -1097,7 +1098,7 @@ static long check_bp_loc(unsigned long addr) return 1; } -static char *breakpoint_help_string = +static char *breakpoint_help_string = "Breakpoint command usage:\n" "b show breakpoints\n" "b [cnt] set breakpoint at given instr addr\n" @@ -1193,7 +1194,7 @@ bpt_cmds(void) default: termch = cmd; - cmd = skipbl(); + cmd = skipbl(); if (cmd == '?') { printf(breakpoint_help_string); break; @@ -1359,7 +1360,7 @@ static void xmon_show_stack(unsigned long sp, unsigned long lr, sp + REGS_OFFSET); break; } - printf("--- Exception: %lx %s at ", regs.trap, + printf("--- Exception: %lx %s at ", regs.trap, getvecname(TRAP(®s))); pc = regs.nip; lr = regs.link; @@ -1623,14 +1624,14 @@ static void super_regs(void) cmd = skipbl(); if (cmd == '\n') { - unsigned long sp, toc; + unsigned long sp, toc; asm("mr %0,1" : "=r" (sp) :); asm("mr %0,2" : "=r" (toc) :); printf("msr = "REG" sprg0= "REG"\n", mfmsr(), mfspr(SPRN_SPRG0)); printf("pvr = "REG" sprg1= "REG"\n", - mfspr(SPRN_PVR), mfspr(SPRN_SPRG1)); + mfspr(SPRN_PVR), mfspr(SPRN_SPRG1)); printf("dec = "REG" sprg2= "REG"\n", mfspr(SPRN_DEC), mfspr(SPRN_SPRG2)); printf("sp = "REG" sprg3= "REG"\n", sp, mfspr(SPRN_SPRG3)); @@ -1783,7 +1784,7 @@ byterev(unsigned char *val, int size) static int brev; static int mnoread; -static char *memex_help_string = +static char *memex_help_string = "Memory examine command usage:\n" "m [addr] [flags] examine/change memory\n" " addr is optional. will start where left off.\n" @@ -1798,7 +1799,7 @@ static char *memex_help_string = "NOTE: flags are saved as defaults\n" ""; -static char *memex_subcmd_help_string = +static char *memex_subcmd_help_string = "Memory examine subcommands:\n" " hexval write this val to current location\n" " 'string' write chars from string to this location\n" @@ -2064,7 +2065,7 @@ prdump(unsigned long adrs, long ndump) nr = mread(adrs, temp, r); adrs += nr; for (m = 0; m < r; ++m) { - if ((m & (sizeof(long) - 1)) == 0 && m > 0) + if ((m & (sizeof(long) - 1)) == 0 && m > 0) putchar(' '); if (m < nr) printf("%.2x", temp[m]); @@ -2072,7 +2073,7 @@ prdump(unsigned long adrs, long ndump) printf("%s", fault_chars[fault_type]); } for (; m < 16; ++m) { - if ((m & (sizeof(long) - 1)) == 0) + if ((m & (sizeof(long) - 1)) == 0) putchar(' '); printf(" "); } @@ -2148,45 +2149,28 @@ print_address(unsigned long addr) void dump_log_buf(void) { - const unsigned long size = 128; - unsigned long end, addr; - unsigned char buf[size + 1]; - - addr = 0; - buf[size] = '\0'; - - if (setjmp(bus_error_jmp) != 0) { - printf("Unable to lookup symbol __log_buf!\n"); - return; - } - - catch_memory_errors = 1; - sync(); - addr = kallsyms_lookup_name("__log_buf"); - - if (! addr) - printf("Symbol __log_buf not found!\n"); - else { - end = addr + (1 << CONFIG_LOG_BUF_SHIFT); - while (addr < end) { - if (! mread(addr, buf, size)) { - printf("Can't read memory at address 0x%lx\n", addr); - break; - } - - printf("%s", buf); - - if (strlen(buf) < size) - break; - - addr += size; - } - } - - sync(); - /* wait a little while to see if we get a machine check */ - __delay(200); - catch_memory_errors = 0; + struct kmsg_dumper dumper = { .active = 1 }; + unsigned char buf[128]; + size_t len; + + if (setjmp(bus_error_jmp) != 0) { + printf("Error dumping printk buffer!\n"); + return; + } + + catch_memory_errors = 1; + sync(); + + kmsg_dump_rewind_nolock(&dumper); + while (kmsg_dump_get_line_nolock(&dumper, false, buf, sizeof(buf), &len)) { + buf[len] = '\0'; + printf("%s", buf); + } + + sync(); + /* wait a little while to see if we get a machine check */ + __delay(200); + catch_memory_errors = 0; } /* diff --git a/trunk/arch/s390/Kconfig b/trunk/arch/s390/Kconfig index 76de6b68487c..107610e01a29 100644 --- a/trunk/arch/s390/Kconfig +++ b/trunk/arch/s390/Kconfig @@ -124,6 +124,7 @@ config S390 select GENERIC_TIME_VSYSCALL select GENERIC_CLOCKEVENTS select KTIME_SCALAR if 32BIT + select HAVE_ARCH_SECCOMP_FILTER config SCHED_OMIT_FRAME_POINTER def_bool y diff --git a/trunk/arch/s390/include/asm/elf.h b/trunk/arch/s390/include/asm/elf.h index 32e8449640fa..9b94a160fe7f 100644 --- a/trunk/arch/s390/include/asm/elf.h +++ b/trunk/arch/s390/include/asm/elf.h @@ -180,7 +180,8 @@ extern char elf_platform[]; #define ELF_PLATFORM (elf_platform) #ifndef CONFIG_64BIT -#define SET_PERSONALITY(ex) set_personality(PER_LINUX) +#define SET_PERSONALITY(ex) \ + set_personality(PER_LINUX | (current->personality & (~PER_MASK))) #else /* CONFIG_64BIT */ #define SET_PERSONALITY(ex) \ do { \ diff --git a/trunk/arch/s390/include/asm/posix_types.h b/trunk/arch/s390/include/asm/posix_types.h index 7bcc14e395f0..bf2a2ad2f800 100644 --- a/trunk/arch/s390/include/asm/posix_types.h +++ b/trunk/arch/s390/include/asm/posix_types.h @@ -13,6 +13,7 @@ */ typedef unsigned long __kernel_size_t; +typedef long __kernel_ssize_t; #define __kernel_size_t __kernel_size_t typedef unsigned short __kernel_old_dev_t; @@ -25,7 +26,6 @@ typedef unsigned short __kernel_mode_t; typedef unsigned short __kernel_ipc_pid_t; typedef unsigned short __kernel_uid_t; typedef unsigned short __kernel_gid_t; -typedef int __kernel_ssize_t; typedef int __kernel_ptrdiff_t; #else /* __s390x__ */ @@ -35,7 +35,6 @@ typedef unsigned int __kernel_mode_t; typedef int __kernel_ipc_pid_t; typedef unsigned int __kernel_uid_t; typedef unsigned int __kernel_gid_t; -typedef long __kernel_ssize_t; typedef long __kernel_ptrdiff_t; typedef unsigned long __kernel_sigset_t; /* at least 32 bits */ diff --git a/trunk/arch/s390/include/asm/smp.h b/trunk/arch/s390/include/asm/smp.h index a0a8340daafa..ce26ac3cb162 100644 --- a/trunk/arch/s390/include/asm/smp.h +++ b/trunk/arch/s390/include/asm/smp.h @@ -44,6 +44,7 @@ static inline void smp_call_online_cpu(void (*func)(void *), void *data) } static inline int smp_find_processor_id(int address) { return 0; } +static inline int smp_store_status(int cpu) { return 0; } static inline int smp_vcpu_scheduled(int cpu) { return 1; } static inline void smp_yield_cpu(int cpu) { } static inline void smp_yield(void) { } diff --git a/trunk/arch/s390/include/asm/sparsemem.h b/trunk/arch/s390/include/asm/sparsemem.h index 0fb34027d3f6..a60d085ddb4d 100644 --- a/trunk/arch/s390/include/asm/sparsemem.h +++ b/trunk/arch/s390/include/asm/sparsemem.h @@ -4,13 +4,11 @@ #ifdef CONFIG_64BIT #define SECTION_SIZE_BITS 28 -#define MAX_PHYSADDR_BITS 46 #define MAX_PHYSMEM_BITS 46 #else #define SECTION_SIZE_BITS 25 -#define MAX_PHYSADDR_BITS 31 #define MAX_PHYSMEM_BITS 31 #endif /* CONFIG_64BIT */ diff --git a/trunk/arch/s390/include/asm/syscall.h b/trunk/arch/s390/include/asm/syscall.h index fb214dd9b7e0..fe7b99759e12 100644 --- a/trunk/arch/s390/include/asm/syscall.h +++ b/trunk/arch/s390/include/asm/syscall.h @@ -12,6 +12,7 @@ #ifndef _ASM_SYSCALL_H #define _ASM_SYSCALL_H 1 +#include #include #include #include @@ -87,4 +88,13 @@ static inline void syscall_set_arguments(struct task_struct *task, regs->orig_gpr2 = args[0]; } +static inline int syscall_get_arch(struct task_struct *task, + struct pt_regs *regs) +{ +#ifdef CONFIG_COMPAT + if (test_tsk_thread_flag(task, TIF_31BIT)) + return AUDIT_ARCH_S390; +#endif + return sizeof(long) == 8 ? AUDIT_ARCH_S390X : AUDIT_ARCH_S390; +} #endif /* _ASM_SYSCALL_H */ diff --git a/trunk/arch/s390/kernel/compat_linux.c b/trunk/arch/s390/kernel/compat_linux.c index d1225089a4bb..f606d935f495 100644 --- a/trunk/arch/s390/kernel/compat_linux.c +++ b/trunk/arch/s390/kernel/compat_linux.c @@ -620,7 +620,6 @@ asmlinkage unsigned long old32_mmap(struct mmap_arg_struct_emu31 __user *arg) return -EFAULT; if (a.offset & ~PAGE_MASK) return -EINVAL; - a.addr = (unsigned long) compat_ptr(a.addr); return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); } @@ -631,7 +630,6 @@ asmlinkage long sys32_mmap2(struct mmap_arg_struct_emu31 __user *arg) if (copy_from_user(&a, arg, sizeof(a))) return -EFAULT; - a.addr = (unsigned long) compat_ptr(a.addr); return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset); } diff --git a/trunk/arch/s390/kernel/compat_wrapper.S b/trunk/arch/s390/kernel/compat_wrapper.S index e835d6d5b7fd..2d82cfcbce5b 100644 --- a/trunk/arch/s390/kernel/compat_wrapper.S +++ b/trunk/arch/s390/kernel/compat_wrapper.S @@ -1635,7 +1635,7 @@ ENTRY(compat_sys_process_vm_readv_wrapper) llgfr %r6,%r6 # unsigned long llgf %r0,164(%r15) # unsigned long stg %r0,160(%r15) - jg sys_process_vm_readv + jg compat_sys_process_vm_readv ENTRY(compat_sys_process_vm_writev_wrapper) lgfr %r2,%r2 # compat_pid_t @@ -1645,4 +1645,4 @@ ENTRY(compat_sys_process_vm_writev_wrapper) llgfr %r6,%r6 # unsigned long llgf %r0,164(%r15) # unsigned long stg %r0,160(%r15) - jg sys_process_vm_writev + jg compat_sys_process_vm_writev diff --git a/trunk/arch/s390/kernel/ptrace.c b/trunk/arch/s390/kernel/ptrace.c index f4eb37680b91..e4be113fbac6 100644 --- a/trunk/arch/s390/kernel/ptrace.c +++ b/trunk/arch/s390/kernel/ptrace.c @@ -719,7 +719,11 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) long ret = 0; /* Do the secure computing check first. */ - secure_computing_strict(regs->gprs[2]); + if (secure_computing(regs->gprs[2])) { + /* seccomp failures shouldn't expose any additional code. */ + ret = -1; + goto out; + } /* * The sysc_tracesys code in entry.S stored the system @@ -745,6 +749,7 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) regs->gprs[2], regs->orig_gpr2, regs->gprs[3], regs->gprs[4], regs->gprs[5]); +out: return ret ?: regs->gprs[2]; } diff --git a/trunk/arch/s390/kernel/sys_s390.c b/trunk/arch/s390/kernel/sys_s390.c index b4a29eee41b8..d0964d22adb5 100644 --- a/trunk/arch/s390/kernel/sys_s390.c +++ b/trunk/arch/s390/kernel/sys_s390.c @@ -81,11 +81,12 @@ SYSCALL_DEFINE1(s390_personality, unsigned int, personality) { unsigned int ret; - if (current->personality == PER_LINUX32 && personality == PER_LINUX) - personality = PER_LINUX32; + if (personality(current->personality) == PER_LINUX32 && + personality(personality) == PER_LINUX) + personality |= PER_LINUX32; ret = sys_personality(personality); - if (ret == PER_LINUX32) - ret = PER_LINUX; + if (personality(ret) == PER_LINUX32) + ret &= ~PER_LINUX32; return ret; } diff --git a/trunk/arch/sh/boards/Kconfig b/trunk/arch/sh/boards/Kconfig index 7048c03490d9..fb5805745ace 100644 --- a/trunk/arch/sh/boards/Kconfig +++ b/trunk/arch/sh/boards/Kconfig @@ -57,6 +57,7 @@ config SH_7724_SOLUTION_ENGINE depends on CPU_SUBTYPE_SH7724 select ARCH_REQUIRE_GPIOLIB select SND_SOC_AK4642 if SND_SIMPLE_CARD + select REGULATOR_FIXED_VOLTAGE if REGULATOR help Select 7724 SolutionEngine if configuring for a Hitachi SH7724 evaluation board. @@ -140,6 +141,7 @@ config SH_RSK bool "Renesas Starter Kit" depends on CPU_SUBTYPE_SH7201 || CPU_SUBTYPE_SH7203 || \ CPU_SUBTYPE_SH7264 || CPU_SUBTYPE_SH7269 + select REGULATOR_FIXED_VOLTAGE if REGULATOR help Select this option if configuring for any of the RSK+ MCU evaluation platforms. @@ -159,6 +161,7 @@ config SH_SDK7786 select NO_IOPORT if !PCI select ARCH_WANT_OPTIONAL_GPIOLIB select HAVE_SRAM_POOL + select REGULATOR_FIXED_VOLTAGE if REGULATOR help Select SDK7786 if configuring for a Renesas Technology Europe SH7786-65nm board. @@ -173,6 +176,7 @@ config SH_SH7757LCR bool "SH7757LCR" depends on CPU_SUBTYPE_SH7757 select ARCH_REQUIRE_GPIOLIB + select REGULATOR_FIXED_VOLTAGE if REGULATOR config SH_SH7785LCR bool "SH7785LCR" @@ -206,6 +210,7 @@ config SH_MIGOR bool "Migo-R" depends on CPU_SUBTYPE_SH7722 select ARCH_REQUIRE_GPIOLIB + select REGULATOR_FIXED_VOLTAGE if REGULATOR help Select Migo-R if configuring for the SH7722 Migo-R platform by Renesas System Solutions Asia Pte. Ltd. @@ -214,6 +219,7 @@ config SH_AP325RXA bool "AP-325RXA" depends on CPU_SUBTYPE_SH7723 select ARCH_REQUIRE_GPIOLIB + select REGULATOR_FIXED_VOLTAGE if REGULATOR help Renesas "AP-325RXA" support. Compatible with ALGO SYSTEM CO.,LTD. "AP-320A" @@ -222,6 +228,7 @@ config SH_KFR2R09 bool "KFR2R09" depends on CPU_SUBTYPE_SH7724 select ARCH_REQUIRE_GPIOLIB + select REGULATOR_FIXED_VOLTAGE if REGULATOR help "Kit For R2R for 2009" support. @@ -230,6 +237,7 @@ config SH_ECOVEC depends on CPU_SUBTYPE_SH7724 select ARCH_REQUIRE_GPIOLIB select SND_SOC_DA7210 if SND_SIMPLE_CARD + select REGULATOR_FIXED_VOLTAGE if REGULATOR help Renesas "R0P7724LC0011/21RL (EcoVec)" support. @@ -305,6 +313,7 @@ config SH_MAGIC_PANEL_R2 bool "Magic Panel R2" depends on CPU_SUBTYPE_SH7720 select ARCH_REQUIRE_GPIOLIB + select REGULATOR_FIXED_VOLTAGE if REGULATOR help Select Magic Panel R2 if configuring for Magic Panel R2. @@ -316,6 +325,7 @@ config SH_CAYMAN config SH_POLARIS bool "SMSC Polaris" select CPU_HAS_IPR_IRQ + select REGULATOR_FIXED_VOLTAGE if REGULATOR depends on CPU_SUBTYPE_SH7709 help Select if configuring for an SMSC Polaris development board @@ -323,6 +333,7 @@ config SH_POLARIS config SH_SH2007 bool "SH-2007 board" select NO_IOPORT + select REGULATOR_FIXED_VOLTAGE if REGULATOR depends on CPU_SUBTYPE_SH7780 help SH-2007 is a single-board computer based around SH7780 chip @@ -334,6 +345,7 @@ config SH_SH2007 config SH_APSH4A3A bool "AP-SH4A-3A" select SH_ALPHA_BOARD + select REGULATOR_FIXED_VOLTAGE if REGULATOR depends on CPU_SUBTYPE_SH7785 help Select AP-SH4A-3A if configuring for an ALPHAPROJECT AP-SH4A-3A. @@ -342,6 +354,7 @@ config SH_APSH4AD0A bool "AP-SH4AD-0A" select SH_ALPHA_BOARD select SYS_SUPPORTS_PCI + select REGULATOR_FIXED_VOLTAGE if REGULATOR depends on CPU_SUBTYPE_SH7786 help Select AP-SH4AD-0A if configuring for an ALPHAPROJECT AP-SH4AD-0A. diff --git a/trunk/arch/sh/boards/board-apsh4a3a.c b/trunk/arch/sh/boards/board-apsh4a3a.c index 2823619c6006..0a39c241628a 100644 --- a/trunk/arch/sh/boards/board-apsh4a3a.c +++ b/trunk/arch/sh/boards/board-apsh4a3a.c @@ -13,6 +13,8 @@ #include #include #include +#include +#include #include #include #include @@ -66,6 +68,12 @@ static struct platform_device nor_flash_device = { .resource = nor_flash_resources, }; +/* Dummy supplies, where voltage doesn't matter */ +static struct regulator_consumer_supply dummy_supplies[] = { + REGULATOR_SUPPLY("vddvario", "smsc911x"), + REGULATOR_SUPPLY("vdd33a", "smsc911x"), +}; + static struct resource smsc911x_resources[] = { [0] = { .name = "smsc911x-memory", @@ -105,6 +113,8 @@ static struct platform_device *apsh4a3a_devices[] __initdata = { static int __init apsh4a3a_devices_setup(void) { + regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies)); + return platform_add_devices(apsh4a3a_devices, ARRAY_SIZE(apsh4a3a_devices)); } diff --git a/trunk/arch/sh/boards/board-apsh4ad0a.c b/trunk/arch/sh/boards/board-apsh4ad0a.c index b4d6292a9247..92eac3a99187 100644 --- a/trunk/arch/sh/boards/board-apsh4ad0a.c +++ b/trunk/arch/sh/boards/board-apsh4ad0a.c @@ -12,12 +12,20 @@ #include #include #include +#include +#include #include #include #include #include #include +/* Dummy supplies, where voltage doesn't matter */ +static struct regulator_consumer_supply dummy_supplies[] = { + REGULATOR_SUPPLY("vddvario", "smsc911x"), + REGULATOR_SUPPLY("vdd33a", "smsc911x"), +}; + static struct resource smsc911x_resources[] = { [0] = { .name = "smsc911x-memory", @@ -56,6 +64,8 @@ static struct platform_device *apsh4ad0a_devices[] __initdata = { static int __init apsh4ad0a_devices_setup(void) { + regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies)); + return platform_add_devices(apsh4ad0a_devices, ARRAY_SIZE(apsh4ad0a_devices)); } diff --git a/trunk/arch/sh/boards/board-magicpanelr2.c b/trunk/arch/sh/boards/board-magicpanelr2.c index 90568f9de3a4..20500858b56c 100644 --- a/trunk/arch/sh/boards/board-magicpanelr2.c +++ b/trunk/arch/sh/boards/board-magicpanelr2.c @@ -14,6 +14,8 @@ #include #include #include +#include +#include #include #include #include @@ -24,6 +26,12 @@ #include #include +/* Dummy supplies, where voltage doesn't matter */ +static struct regulator_consumer_supply dummy_supplies[] = { + REGULATOR_SUPPLY("vddvario", "smsc911x"), + REGULATOR_SUPPLY("vdd33a", "smsc911x"), +}; + #define LAN9115_READY (__raw_readl(0xA8000084UL) & 0x00000001UL) /* Wait until reset finished. Timeout is 100ms. */ @@ -348,6 +356,8 @@ static struct platform_device *mpr2_devices[] __initdata = { static int __init mpr2_devices_setup(void) { + regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies)); + return platform_add_devices(mpr2_devices, ARRAY_SIZE(mpr2_devices)); } device_initcall(mpr2_devices_setup); diff --git a/trunk/arch/sh/boards/board-polaris.c b/trunk/arch/sh/boards/board-polaris.c index 0978ae2e4847..37a08d094727 100644 --- a/trunk/arch/sh/boards/board-polaris.c +++ b/trunk/arch/sh/boards/board-polaris.c @@ -9,6 +9,8 @@ #include #include #include +#include +#include #include #include #include @@ -22,6 +24,12 @@ #define AREA5_WAIT_CTRL (0x1C00) #define WAIT_STATES_10 (0x7) +/* Dummy supplies, where voltage doesn't matter */ +static struct regulator_consumer_supply dummy_supplies[] = { + REGULATOR_SUPPLY("vddvario", "smsc911x.0"), + REGULATOR_SUPPLY("vdd33a", "smsc911x.0"), +}; + static struct resource smsc911x_resources[] = { [0] = { .name = "smsc911x-memory", @@ -88,6 +96,8 @@ static int __init polaris_initialise(void) printk(KERN_INFO "Configuring Polaris external bus\n"); + regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies)); + /* Configure area 5 with 2 wait states */ wcr = __raw_readw(WCR2); wcr &= (~AREA5_WAIT_CTRL); diff --git a/trunk/arch/sh/boards/board-sh2007.c b/trunk/arch/sh/boards/board-sh2007.c index b90b78f6a829..1980bb7e5780 100644 --- a/trunk/arch/sh/boards/board-sh2007.c +++ b/trunk/arch/sh/boards/board-sh2007.c @@ -6,6 +6,8 @@ */ #include #include +#include +#include #include #include #include @@ -13,6 +15,14 @@ #include #include +/* Dummy supplies, where voltage doesn't matter */ +static struct regulator_consumer_supply dummy_supplies[] = { + REGULATOR_SUPPLY("vddvario", "smsc911x.0"), + REGULATOR_SUPPLY("vdd33a", "smsc911x.0"), + REGULATOR_SUPPLY("vddvario", "smsc911x.1"), + REGULATOR_SUPPLY("vdd33a", "smsc911x.1"), +}; + struct smsc911x_platform_config smc911x_info = { .flags = SMSC911X_USE_32BIT, .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW, @@ -98,6 +108,8 @@ static struct platform_device *sh2007_devices[] __initdata = { static int __init sh2007_io_init(void) { + regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies)); + platform_add_devices(sh2007_devices, ARRAY_SIZE(sh2007_devices)); return 0; } diff --git a/trunk/arch/sh/boards/board-sh7757lcr.c b/trunk/arch/sh/boards/board-sh7757lcr.c index 5087f8bb4cff..41f86702eb9f 100644 --- a/trunk/arch/sh/boards/board-sh7757lcr.c +++ b/trunk/arch/sh/boards/board-sh7757lcr.c @@ -12,6 +12,8 @@ #include #include #include +#include +#include #include #include #include @@ -199,6 +201,15 @@ static struct platform_device sh7757_eth_giga1_device = { }, }; +/* Fixed 3.3V regulator to be used by SDHI0, MMCIF */ +static struct regulator_consumer_supply fixed3v3_power_consumers[] = +{ + REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"), + REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"), + REGULATOR_SUPPLY("vmmc", "sh_mmcif.0"), + REGULATOR_SUPPLY("vqmmc", "sh_mmcif.0"), +}; + /* SH_MMCIF */ static struct resource sh_mmcif_resources[] = { [0] = { @@ -329,6 +340,9 @@ static struct spi_board_info spi_board_info[] = { static int __init sh7757lcr_devices_setup(void) { + regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers, + ARRAY_SIZE(fixed3v3_power_consumers), 3300000); + /* RGMII (PTA) */ gpio_request(GPIO_FN_ET0_MDC, NULL); gpio_request(GPIO_FN_ET0_MDIO, NULL); diff --git a/trunk/arch/sh/boards/mach-ap325rxa/setup.c b/trunk/arch/sh/boards/mach-ap325rxa/setup.c index f33ebf447073..9e963c1d1447 100644 --- a/trunk/arch/sh/boards/mach-ap325rxa/setup.c +++ b/trunk/arch/sh/boards/mach-ap325rxa/setup.c @@ -20,6 +20,8 @@ #include #include #include +#include +#include #include #include #include @@ -34,6 +36,12 @@ #include #include +/* Dummy supplies, where voltage doesn't matter */ +static struct regulator_consumer_supply dummy_supplies[] = { + REGULATOR_SUPPLY("vddvario", "smsc911x"), + REGULATOR_SUPPLY("vdd33a", "smsc911x"), +}; + static struct smsc911x_platform_config smsc911x_config = { .phy_interface = PHY_INTERFACE_MODE_MII, .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW, @@ -423,6 +431,15 @@ static struct platform_device ceu_device = { }, }; +/* Fixed 3.3V regulators to be used by SDHI0, SDHI1 */ +static struct regulator_consumer_supply fixed3v3_power_consumers[] = +{ + REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"), + REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"), + REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.1"), + REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.1"), +}; + static struct resource sdhi0_cn3_resources[] = { [0] = { .name = "SDHI0", @@ -544,6 +561,10 @@ static int __init ap325rxa_devices_setup(void) &ap325rxa_sdram_leave_start, &ap325rxa_sdram_leave_end); + regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers, + ARRAY_SIZE(fixed3v3_power_consumers), 3300000); + regulator_register_fixed(1, dummy_supplies, ARRAY_SIZE(dummy_supplies)); + /* LD3 and LD4 LEDs */ gpio_request(GPIO_PTX5, NULL); /* RUN */ gpio_direction_output(GPIO_PTX5, 1); diff --git a/trunk/arch/sh/boards/mach-ecovec24/setup.c b/trunk/arch/sh/boards/mach-ecovec24/setup.c index 4158d70c0dea..64559e8af14b 100644 --- a/trunk/arch/sh/boards/mach-ecovec24/setup.c +++ b/trunk/arch/sh/boards/mach-ecovec24/setup.c @@ -19,6 +19,8 @@ #include #include #include +#include +#include #include #include #include @@ -242,9 +244,17 @@ static int usbhs_get_id(struct platform_device *pdev) return gpio_get_value(GPIO_PTB3); } +static void usbhs_phy_reset(struct platform_device *pdev) +{ + /* enable vbus if HOST */ + if (!gpio_get_value(GPIO_PTB3)) + gpio_set_value(GPIO_PTB5, 1); +} + static struct renesas_usbhs_platform_info usbhs_info = { .platform_callback = { .get_id = usbhs_get_id, + .phy_reset = usbhs_phy_reset, }, .driver_param = { .buswait_bwait = 4, @@ -518,10 +528,86 @@ static struct i2c_board_info ts_i2c_clients = { .irq = IRQ0, }; +static struct regulator_consumer_supply cn12_power_consumers[] = +{ + REGULATOR_SUPPLY("vmmc", "sh_mmcif.0"), + REGULATOR_SUPPLY("vqmmc", "sh_mmcif.0"), + REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.1"), + REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.1"), +}; + +static struct regulator_init_data cn12_power_init_data = { + .constraints = { + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, + .num_consumer_supplies = ARRAY_SIZE(cn12_power_consumers), + .consumer_supplies = cn12_power_consumers, +}; + +static struct fixed_voltage_config cn12_power_info = { + .supply_name = "CN12 SD/MMC Vdd", + .microvolts = 3300000, + .gpio = GPIO_PTB7, + .enable_high = 1, + .init_data = &cn12_power_init_data, +}; + +static struct platform_device cn12_power = { + .name = "reg-fixed-voltage", + .id = 0, + .dev = { + .platform_data = &cn12_power_info, + }, +}; + #if defined(CONFIG_MMC_SDHI) || defined(CONFIG_MMC_SDHI_MODULE) /* SDHI0 */ +static struct regulator_consumer_supply sdhi0_power_consumers[] = +{ + REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"), + REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"), +}; + +static struct regulator_init_data sdhi0_power_init_data = { + .constraints = { + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, + .num_consumer_supplies = ARRAY_SIZE(sdhi0_power_consumers), + .consumer_supplies = sdhi0_power_consumers, +}; + +static struct fixed_voltage_config sdhi0_power_info = { + .supply_name = "CN11 SD/MMC Vdd", + .microvolts = 3300000, + .gpio = GPIO_PTB6, + .enable_high = 1, + .init_data = &sdhi0_power_init_data, +}; + +static struct platform_device sdhi0_power = { + .name = "reg-fixed-voltage", + .id = 1, + .dev = { + .platform_data = &sdhi0_power_info, + }, +}; + static void sdhi0_set_pwr(struct platform_device *pdev, int state) { + static int power_gpio = -EINVAL; + + if (power_gpio < 0) { + int ret = gpio_request(GPIO_PTB6, NULL); + if (!ret) { + power_gpio = GPIO_PTB6; + gpio_direction_output(power_gpio, 0); + } + } + + /* + * Toggle the GPIO regardless, whether we managed to grab it above or + * the fixed regulator driver did. + */ gpio_set_value(GPIO_PTB6, state); } @@ -562,13 +648,27 @@ static struct platform_device sdhi0_device = { }, }; -#if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE) -/* SDHI1 */ -static void sdhi1_set_pwr(struct platform_device *pdev, int state) +static void cn12_set_pwr(struct platform_device *pdev, int state) { + static int power_gpio = -EINVAL; + + if (power_gpio < 0) { + int ret = gpio_request(GPIO_PTB7, NULL); + if (!ret) { + power_gpio = GPIO_PTB7; + gpio_direction_output(power_gpio, 0); + } + } + + /* + * Toggle the GPIO regardless, whether we managed to grab it above or + * the fixed regulator driver did. + */ gpio_set_value(GPIO_PTB7, state); } +#if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE) +/* SDHI1 */ static int sdhi1_get_cd(struct platform_device *pdev) { return !gpio_get_value(GPIO_PTW7); @@ -579,7 +679,7 @@ static struct sh_mobile_sdhi_info sdhi1_info = { .dma_slave_rx = SHDMA_SLAVE_SDHI1_RX, .tmio_caps = MMC_CAP_SDIO_IRQ | MMC_CAP_POWER_OFF_CARD | MMC_CAP_NEEDS_POLL, - .set_pwr = sdhi1_set_pwr, + .set_pwr = cn12_set_pwr, .get_cd = sdhi1_get_cd, }; @@ -899,14 +999,9 @@ static struct platform_device vou_device = { #if defined(CONFIG_MMC_SH_MMCIF) || defined(CONFIG_MMC_SH_MMCIF_MODULE) /* SH_MMCIF */ -static void mmcif_set_pwr(struct platform_device *pdev, int state) -{ - gpio_set_value(GPIO_PTB7, state); -} - static void mmcif_down_pwr(struct platform_device *pdev) { - gpio_set_value(GPIO_PTB7, 0); + cn12_set_pwr(pdev, 0); } static struct resource sh_mmcif_resources[] = { @@ -929,7 +1024,7 @@ static struct resource sh_mmcif_resources[] = { }; static struct sh_mmcif_plat_data sh_mmcif_plat = { - .set_pwr = mmcif_set_pwr, + .set_pwr = cn12_set_pwr, .down_pwr = mmcif_down_pwr, .sup_pclk = 0, /* SH7724: Max Pclk/2 */ .caps = MMC_CAP_4_BIT_DATA | @@ -960,7 +1055,9 @@ static struct platform_device *ecovec_devices[] __initdata = { &ceu0_device, &ceu1_device, &keysc_device, + &cn12_power, #if defined(CONFIG_MMC_SDHI) || defined(CONFIG_MMC_SDHI_MODULE) + &sdhi0_power, &sdhi0_device, #if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE) &sdhi1_device, @@ -1258,8 +1355,6 @@ static int __init arch_setup(void) gpio_request(GPIO_FN_SDHI0D2, NULL); gpio_request(GPIO_FN_SDHI0D1, NULL); gpio_request(GPIO_FN_SDHI0D0, NULL); - gpio_request(GPIO_PTB6, NULL); - gpio_direction_output(GPIO_PTB6, 0); #else /* enable MSIOF0 on CN11 (needs DS2.4 set to OFF) */ gpio_request(GPIO_FN_MSIOF0_TXD, NULL); @@ -1288,8 +1383,6 @@ static int __init arch_setup(void) gpio_request(GPIO_FN_MMC_D0, NULL); gpio_request(GPIO_FN_MMC_CLK, NULL); gpio_request(GPIO_FN_MMC_CMD, NULL); - gpio_request(GPIO_PTB7, NULL); - gpio_direction_output(GPIO_PTB7, 0); cn12_enabled = true; #elif defined(CONFIG_MMC_SDHI) || defined(CONFIG_MMC_SDHI_MODULE) @@ -1301,8 +1394,6 @@ static int __init arch_setup(void) gpio_request(GPIO_FN_SDHI1D2, NULL); gpio_request(GPIO_FN_SDHI1D1, NULL); gpio_request(GPIO_FN_SDHI1D0, NULL); - gpio_request(GPIO_PTB7, NULL); - gpio_direction_output(GPIO_PTB7, 0); /* Card-detect, used on CN12 with SDHI1 */ gpio_request(GPIO_PTW7, NULL); diff --git a/trunk/arch/sh/boards/mach-kfr2r09/setup.c b/trunk/arch/sh/boards/mach-kfr2r09/setup.c index 43a179ce9afc..f2a4304fbe23 100644 --- a/trunk/arch/sh/boards/mach-kfr2r09/setup.c +++ b/trunk/arch/sh/boards/mach-kfr2r09/setup.c @@ -21,6 +21,8 @@ #include #include #include +#include +#include #include #include #include @@ -341,6 +343,13 @@ static struct platform_device kfr2r09_camera = { }, }; +/* Fixed 3.3V regulator to be used by SDHI0 */ +static struct regulator_consumer_supply fixed3v3_power_consumers[] = +{ + REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"), + REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"), +}; + static struct resource kfr2r09_sh_sdhi0_resources[] = { [0] = { .name = "SDHI0", @@ -523,6 +532,9 @@ static int __init kfr2r09_devices_setup(void) &kfr2r09_sdram_leave_start, &kfr2r09_sdram_leave_end); + regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers, + ARRAY_SIZE(fixed3v3_power_consumers), 3300000); + /* enable SCIF1 serial port for YC401 console support */ gpio_request(GPIO_FN_SCIF1_RXD, NULL); gpio_request(GPIO_FN_SCIF1_TXD, NULL); diff --git a/trunk/arch/sh/boards/mach-migor/setup.c b/trunk/arch/sh/boards/mach-migor/setup.c index a8a1ca741c85..8b73194ed2ce 100644 --- a/trunk/arch/sh/boards/mach-migor/setup.c +++ b/trunk/arch/sh/boards/mach-migor/setup.c @@ -17,6 +17,8 @@ #include #include #include +#include +#include #include #include #include @@ -386,6 +388,13 @@ static struct platform_device migor_ceu_device = { }, }; +/* Fixed 3.3V regulator to be used by SDHI0 */ +static struct regulator_consumer_supply fixed3v3_power_consumers[] = +{ + REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"), + REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"), +}; + static struct resource sdhi_cn9_resources[] = { [0] = { .name = "SDHI", @@ -498,6 +507,10 @@ static int __init migor_devices_setup(void) &migor_sdram_enter_end, &migor_sdram_leave_start, &migor_sdram_leave_end); + + regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers, + ARRAY_SIZE(fixed3v3_power_consumers), 3300000); + /* Let D11 LED show STATUS0 */ gpio_request(GPIO_FN_STATUS0, NULL); diff --git a/trunk/arch/sh/boards/mach-rsk/setup.c b/trunk/arch/sh/boards/mach-rsk/setup.c index 895f030070d3..2685ea03b064 100644 --- a/trunk/arch/sh/boards/mach-rsk/setup.c +++ b/trunk/arch/sh/boards/mach-rsk/setup.c @@ -16,9 +16,17 @@ #include #include #include +#include +#include #include #include +/* Dummy supplies, where voltage doesn't matter */ +static struct regulator_consumer_supply dummy_supplies[] = { + REGULATOR_SUPPLY("vddvario", "smsc911x"), + REGULATOR_SUPPLY("vdd33a", "smsc911x"), +}; + static const char *part_probes[] = { "cmdlinepart", NULL }; static struct mtd_partition rsk_partitions[] = { @@ -67,6 +75,8 @@ static struct platform_device *rsk_devices[] __initdata = { static int __init rsk_devices_setup(void) { + regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies)); + return platform_add_devices(rsk_devices, ARRAY_SIZE(rsk_devices)); } diff --git a/trunk/arch/sh/boards/mach-sdk7786/setup.c b/trunk/arch/sh/boards/mach-sdk7786/setup.c index 27a2314f50ac..c29268bfd34a 100644 --- a/trunk/arch/sh/boards/mach-sdk7786/setup.c +++ b/trunk/arch/sh/boards/mach-sdk7786/setup.c @@ -11,6 +11,8 @@ #include #include #include +#include +#include #include #include #include @@ -38,6 +40,12 @@ static struct platform_device heartbeat_device = { .resource = &heartbeat_resource, }; +/* Dummy supplies, where voltage doesn't matter */ +static struct regulator_consumer_supply dummy_supplies[] = { + REGULATOR_SUPPLY("vddvario", "smsc911x"), + REGULATOR_SUPPLY("vdd33a", "smsc911x"), +}; + static struct resource smsc911x_resources[] = { [0] = { .name = "smsc911x-memory", @@ -236,6 +244,8 @@ static void __init sdk7786_setup(char **cmdline_p) { pr_info("Renesas Technology Europe SDK7786 support:\n"); + regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies)); + sdk7786_fpga_init(); sdk7786_nmi_init(); diff --git a/trunk/arch/sh/boards/mach-se/7724/setup.c b/trunk/arch/sh/boards/mach-se/7724/setup.c index ffbf5bc7366b..35f6efa3ac0e 100644 --- a/trunk/arch/sh/boards/mach-se/7724/setup.c +++ b/trunk/arch/sh/boards/mach-se/7724/setup.c @@ -18,6 +18,8 @@ #include #include #include +#include +#include #include #include #include @@ -454,6 +456,15 @@ static struct platform_device sh7724_usb1_gadget_device = { .resource = sh7724_usb1_gadget_resources, }; +/* Fixed 3.3V regulator to be used by SDHI0, SDHI1 */ +static struct regulator_consumer_supply fixed3v3_power_consumers[] = +{ + REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"), + REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"), + REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.1"), + REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.1"), +}; + static struct resource sdhi0_cn7_resources[] = { [0] = { .name = "SDHI0", @@ -684,6 +695,10 @@ static int __init devices_setup(void) &ms7724se_sdram_enter_end, &ms7724se_sdram_leave_start, &ms7724se_sdram_leave_end); + + regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers, + ARRAY_SIZE(fixed3v3_power_consumers), 3300000); + /* Reset Release */ fpga_out = __raw_readw(FPGA_OUT); /* bit4: NTSC_PDN, bit5: NTSC_RESET */ diff --git a/trunk/arch/sh/drivers/dma/dma-sh.c b/trunk/arch/sh/drivers/dma/dma-sh.c index 4c171f13b0e8..b22565623142 100644 --- a/trunk/arch/sh/drivers/dma/dma-sh.c +++ b/trunk/arch/sh/drivers/dma/dma-sh.c @@ -335,7 +335,7 @@ static int dmae_irq_init(void) for (n = 0; n < NR_DMAE; n++) { int i = request_irq(get_dma_error_irq(n), dma_err, - IRQF_SHARED, dmae_name[n], NULL); + IRQF_SHARED, dmae_name[n], (void *)dmae_name[n]); if (unlikely(i < 0)) { printk(KERN_ERR "%s request_irq fail\n", dmae_name[n]); return i; diff --git a/trunk/arch/sh/include/asm/sections.h b/trunk/arch/sh/include/asm/sections.h index 4a5350037c8f..1b6199740e98 100644 --- a/trunk/arch/sh/include/asm/sections.h +++ b/trunk/arch/sh/include/asm/sections.h @@ -6,7 +6,6 @@ extern long __nosave_begin, __nosave_end; extern long __machvec_start, __machvec_end; extern char __uncached_start, __uncached_end; -extern char _ebss[]; extern char __start_eh_frame[], __stop_eh_frame[]; #endif /* __ASM_SH_SECTIONS_H */ diff --git a/trunk/arch/sh/include/cpu-sh2a/cpu/sh7269.h b/trunk/arch/sh/include/cpu-sh2a/cpu/sh7269.h index 48d14498e774..2a0ca8780f0d 100644 --- a/trunk/arch/sh/include/cpu-sh2a/cpu/sh7269.h +++ b/trunk/arch/sh/include/cpu-sh2a/cpu/sh7269.h @@ -183,18 +183,30 @@ enum { GPIO_FN_DV_DATA1, GPIO_FN_DV_DATA0, GPIO_FN_LCD_CLK, GPIO_FN_LCD_EXTCLK, GPIO_FN_LCD_VSYNC, GPIO_FN_LCD_HSYNC, GPIO_FN_LCD_DE, - GPIO_FN_LCD_DATA23, GPIO_FN_LCD_DATA22, - GPIO_FN_LCD_DATA21, GPIO_FN_LCD_DATA20, - GPIO_FN_LCD_DATA19, GPIO_FN_LCD_DATA18, - GPIO_FN_LCD_DATA17, GPIO_FN_LCD_DATA16, - GPIO_FN_LCD_DATA15, GPIO_FN_LCD_DATA14, - GPIO_FN_LCD_DATA13, GPIO_FN_LCD_DATA12, - GPIO_FN_LCD_DATA11, GPIO_FN_LCD_DATA10, - GPIO_FN_LCD_DATA9, GPIO_FN_LCD_DATA8, - GPIO_FN_LCD_DATA7, GPIO_FN_LCD_DATA6, - GPIO_FN_LCD_DATA5, GPIO_FN_LCD_DATA4, - GPIO_FN_LCD_DATA3, GPIO_FN_LCD_DATA2, - GPIO_FN_LCD_DATA1, GPIO_FN_LCD_DATA0, + GPIO_FN_LCD_DATA23_PG23, GPIO_FN_LCD_DATA22_PG22, + GPIO_FN_LCD_DATA21_PG21, GPIO_FN_LCD_DATA20_PG20, + GPIO_FN_LCD_DATA19_PG19, GPIO_FN_LCD_DATA18_PG18, + GPIO_FN_LCD_DATA17_PG17, GPIO_FN_LCD_DATA16_PG16, + GPIO_FN_LCD_DATA15_PG15, GPIO_FN_LCD_DATA14_PG14, + GPIO_FN_LCD_DATA13_PG13, GPIO_FN_LCD_DATA12_PG12, + GPIO_FN_LCD_DATA11_PG11, GPIO_FN_LCD_DATA10_PG10, + GPIO_FN_LCD_DATA9_PG9, GPIO_FN_LCD_DATA8_PG8, + GPIO_FN_LCD_DATA7_PG7, GPIO_FN_LCD_DATA6_PG6, + GPIO_FN_LCD_DATA5_PG5, GPIO_FN_LCD_DATA4_PG4, + GPIO_FN_LCD_DATA3_PG3, GPIO_FN_LCD_DATA2_PG2, + GPIO_FN_LCD_DATA1_PG1, GPIO_FN_LCD_DATA0_PG0, + GPIO_FN_LCD_DATA23_PJ23, GPIO_FN_LCD_DATA22_PJ22, + GPIO_FN_LCD_DATA21_PJ21, GPIO_FN_LCD_DATA20_PJ20, + GPIO_FN_LCD_DATA19_PJ19, GPIO_FN_LCD_DATA18_PJ18, + GPIO_FN_LCD_DATA17_PJ17, GPIO_FN_LCD_DATA16_PJ16, + GPIO_FN_LCD_DATA15_PJ15, GPIO_FN_LCD_DATA14_PJ14, + GPIO_FN_LCD_DATA13_PJ13, GPIO_FN_LCD_DATA12_PJ12, + GPIO_FN_LCD_DATA11_PJ11, GPIO_FN_LCD_DATA10_PJ10, + GPIO_FN_LCD_DATA9_PJ9, GPIO_FN_LCD_DATA8_PJ8, + GPIO_FN_LCD_DATA7_PJ7, GPIO_FN_LCD_DATA6_PJ6, + GPIO_FN_LCD_DATA5_PJ5, GPIO_FN_LCD_DATA4_PJ4, + GPIO_FN_LCD_DATA3_PJ3, GPIO_FN_LCD_DATA2_PJ2, + GPIO_FN_LCD_DATA1_PJ1, GPIO_FN_LCD_DATA0_PJ0, GPIO_FN_LCD_M_DISP, }; diff --git a/trunk/arch/sh/include/cpu-sh4/cpu/sh7757.h b/trunk/arch/sh/include/cpu-sh4/cpu/sh7757.h index 41f9f8b9db73..5340f3bc1863 100644 --- a/trunk/arch/sh/include/cpu-sh4/cpu/sh7757.h +++ b/trunk/arch/sh/include/cpu-sh4/cpu/sh7757.h @@ -283,5 +283,7 @@ enum { SHDMA_SLAVE_RIIC8_RX, SHDMA_SLAVE_RIIC9_TX, SHDMA_SLAVE_RIIC9_RX, + SHDMA_SLAVE_RSPI_TX, + SHDMA_SLAVE_RSPI_RX, }; #endif /* __ASM_SH7757_H__ */ diff --git a/trunk/arch/sh/kernel/cpu/sh2a/pinmux-sh7269.c b/trunk/arch/sh/kernel/cpu/sh2a/pinmux-sh7269.c index f25127c46eca..039e4587dd9b 100644 --- a/trunk/arch/sh/kernel/cpu/sh2a/pinmux-sh7269.c +++ b/trunk/arch/sh/kernel/cpu/sh2a/pinmux-sh7269.c @@ -758,12 +758,22 @@ enum { DV_DATA3_MARK, DV_DATA2_MARK, DV_DATA1_MARK, DV_DATA0_MARK, LCD_CLK_MARK, LCD_EXTCLK_MARK, LCD_VSYNC_MARK, LCD_HSYNC_MARK, LCD_DE_MARK, - LCD_DATA23_MARK, LCD_DATA22_MARK, LCD_DATA21_MARK, LCD_DATA20_MARK, - LCD_DATA19_MARK, LCD_DATA18_MARK, LCD_DATA17_MARK, LCD_DATA16_MARK, - LCD_DATA15_MARK, LCD_DATA14_MARK, LCD_DATA13_MARK, LCD_DATA12_MARK, - LCD_DATA11_MARK, LCD_DATA10_MARK, LCD_DATA9_MARK, LCD_DATA8_MARK, - LCD_DATA7_MARK, LCD_DATA6_MARK, LCD_DATA5_MARK, LCD_DATA4_MARK, - LCD_DATA3_MARK, LCD_DATA2_MARK, LCD_DATA1_MARK, LCD_DATA0_MARK, + LCD_DATA23_PG23_MARK, LCD_DATA22_PG22_MARK, LCD_DATA21_PG21_MARK, + LCD_DATA20_PG20_MARK, LCD_DATA19_PG19_MARK, LCD_DATA18_PG18_MARK, + LCD_DATA17_PG17_MARK, LCD_DATA16_PG16_MARK, LCD_DATA15_PG15_MARK, + LCD_DATA14_PG14_MARK, LCD_DATA13_PG13_MARK, LCD_DATA12_PG12_MARK, + LCD_DATA11_PG11_MARK, LCD_DATA10_PG10_MARK, LCD_DATA9_PG9_MARK, + LCD_DATA8_PG8_MARK, LCD_DATA7_PG7_MARK, LCD_DATA6_PG6_MARK, + LCD_DATA5_PG5_MARK, LCD_DATA4_PG4_MARK, LCD_DATA3_PG3_MARK, + LCD_DATA2_PG2_MARK, LCD_DATA1_PG1_MARK, LCD_DATA0_PG0_MARK, + LCD_DATA23_PJ23_MARK, LCD_DATA22_PJ22_MARK, LCD_DATA21_PJ21_MARK, + LCD_DATA20_PJ20_MARK, LCD_DATA19_PJ19_MARK, LCD_DATA18_PJ18_MARK, + LCD_DATA17_PJ17_MARK, LCD_DATA16_PJ16_MARK, LCD_DATA15_PJ15_MARK, + LCD_DATA14_PJ14_MARK, LCD_DATA13_PJ13_MARK, LCD_DATA12_PJ12_MARK, + LCD_DATA11_PJ11_MARK, LCD_DATA10_PJ10_MARK, LCD_DATA9_PJ9_MARK, + LCD_DATA8_PJ8_MARK, LCD_DATA7_PJ7_MARK, LCD_DATA6_PJ6_MARK, + LCD_DATA5_PJ5_MARK, LCD_DATA4_PJ4_MARK, LCD_DATA3_PJ3_MARK, + LCD_DATA2_PJ2_MARK, LCD_DATA1_PJ1_MARK, LCD_DATA0_PJ0_MARK, LCD_TCON6_MARK, LCD_TCON5_MARK, LCD_TCON4_MARK, LCD_TCON3_MARK, LCD_TCON2_MARK, LCD_TCON1_MARK, LCD_TCON0_MARK, LCD_M_DISP_MARK, @@ -1036,6 +1046,7 @@ static pinmux_enum_t pinmux_data[] = { PINMUX_DATA(PF1_DATA, PF1MD_000), PINMUX_DATA(BACK_MARK, PF1MD_001), + PINMUX_DATA(SSL10_MARK, PF1MD_011), PINMUX_DATA(TIOC4B_MARK, PF1MD_100), PINMUX_DATA(DACK0_MARK, PF1MD_101), @@ -1049,47 +1060,50 @@ static pinmux_enum_t pinmux_data[] = { PINMUX_DATA(PG27_DATA, PG27MD_00), PINMUX_DATA(LCD_TCON2_MARK, PG27MD_10), PINMUX_DATA(LCD_EXTCLK_MARK, PG27MD_11), + PINMUX_DATA(LCD_DE_MARK, PG27MD_11), PINMUX_DATA(PG26_DATA, PG26MD_00), PINMUX_DATA(LCD_TCON1_MARK, PG26MD_10), + PINMUX_DATA(LCD_HSYNC_MARK, PG26MD_10), PINMUX_DATA(PG25_DATA, PG25MD_00), PINMUX_DATA(LCD_TCON0_MARK, PG25MD_10), + PINMUX_DATA(LCD_VSYNC_MARK, PG25MD_10), PINMUX_DATA(PG24_DATA, PG24MD_00), PINMUX_DATA(LCD_CLK_MARK, PG24MD_10), PINMUX_DATA(PG23_DATA, PG23MD_000), - PINMUX_DATA(LCD_DATA23_MARK, PG23MD_010), + PINMUX_DATA(LCD_DATA23_PG23_MARK, PG23MD_010), PINMUX_DATA(LCD_TCON6_MARK, PG23MD_011), PINMUX_DATA(TXD5_MARK, PG23MD_100), PINMUX_DATA(PG22_DATA, PG22MD_000), - PINMUX_DATA(LCD_DATA22_MARK, PG22MD_010), + PINMUX_DATA(LCD_DATA22_PG22_MARK, PG22MD_010), PINMUX_DATA(LCD_TCON5_MARK, PG22MD_011), PINMUX_DATA(RXD5_MARK, PG22MD_100), PINMUX_DATA(PG21_DATA, PG21MD_000), PINMUX_DATA(DV_DATA7_MARK, PG21MD_001), - PINMUX_DATA(LCD_DATA21_MARK, PG21MD_010), + PINMUX_DATA(LCD_DATA21_PG21_MARK, PG21MD_010), PINMUX_DATA(LCD_TCON4_MARK, PG21MD_011), PINMUX_DATA(TXD4_MARK, PG21MD_100), PINMUX_DATA(PG20_DATA, PG20MD_000), PINMUX_DATA(DV_DATA6_MARK, PG20MD_001), - PINMUX_DATA(LCD_DATA20_MARK, PG21MD_010), + PINMUX_DATA(LCD_DATA20_PG20_MARK, PG21MD_010), PINMUX_DATA(LCD_TCON3_MARK, PG20MD_011), PINMUX_DATA(RXD4_MARK, PG20MD_100), PINMUX_DATA(PG19_DATA, PG19MD_000), PINMUX_DATA(DV_DATA5_MARK, PG19MD_001), - PINMUX_DATA(LCD_DATA19_MARK, PG19MD_010), + PINMUX_DATA(LCD_DATA19_PG19_MARK, PG19MD_010), PINMUX_DATA(SPDIF_OUT_MARK, PG19MD_011), PINMUX_DATA(SCK5_MARK, PG19MD_100), PINMUX_DATA(PG18_DATA, PG18MD_000), PINMUX_DATA(DV_DATA4_MARK, PG18MD_001), - PINMUX_DATA(LCD_DATA18_MARK, PG18MD_010), + PINMUX_DATA(LCD_DATA18_PG18_MARK, PG18MD_010), PINMUX_DATA(SPDIF_IN_MARK, PG18MD_011), PINMUX_DATA(SCK4_MARK, PG18MD_100), @@ -1097,103 +1111,103 @@ static pinmux_enum_t pinmux_data[] = { // we're going with 2 bits PINMUX_DATA(PG17_DATA, PG17MD_00), PINMUX_DATA(WE3ICIOWRAHDQMUU_MARK, PG17MD_01), - PINMUX_DATA(LCD_DATA17_MARK, PG17MD_10), + PINMUX_DATA(LCD_DATA17_PG17_MARK, PG17MD_10), // TODO hardware manual has PG16 3 bits wide in reg picture and 2 bits in description // we're going with 2 bits PINMUX_DATA(PG16_DATA, PG16MD_00), PINMUX_DATA(WE2ICIORDDQMUL_MARK, PG16MD_01), - PINMUX_DATA(LCD_DATA16_MARK, PG16MD_10), + PINMUX_DATA(LCD_DATA16_PG16_MARK, PG16MD_10), PINMUX_DATA(PG15_DATA, PG15MD_00), PINMUX_DATA(D31_MARK, PG15MD_01), - PINMUX_DATA(LCD_DATA15_MARK, PG15MD_10), + PINMUX_DATA(LCD_DATA15_PG15_MARK, PG15MD_10), PINMUX_DATA(PINT7_PG_MARK, PG15MD_11), PINMUX_DATA(PG14_DATA, PG14MD_00), PINMUX_DATA(D30_MARK, PG14MD_01), - PINMUX_DATA(LCD_DATA14_MARK, PG14MD_10), + PINMUX_DATA(LCD_DATA14_PG14_MARK, PG14MD_10), PINMUX_DATA(PINT6_PG_MARK, PG14MD_11), PINMUX_DATA(PG13_DATA, PG13MD_00), PINMUX_DATA(D29_MARK, PG13MD_01), - PINMUX_DATA(LCD_DATA13_MARK, PG13MD_10), + PINMUX_DATA(LCD_DATA13_PG13_MARK, PG13MD_10), PINMUX_DATA(PINT5_PG_MARK, PG13MD_11), PINMUX_DATA(PG12_DATA, PG12MD_00), PINMUX_DATA(D28_MARK, PG12MD_01), - PINMUX_DATA(LCD_DATA12_MARK, PG12MD_10), + PINMUX_DATA(LCD_DATA12_PG12_MARK, PG12MD_10), PINMUX_DATA(PINT4_PG_MARK, PG12MD_11), PINMUX_DATA(PG11_DATA, PG11MD_000), PINMUX_DATA(D27_MARK, PG11MD_001), - PINMUX_DATA(LCD_DATA11_MARK, PG11MD_010), + PINMUX_DATA(LCD_DATA11_PG11_MARK, PG11MD_010), PINMUX_DATA(PINT3_PG_MARK, PG11MD_011), PINMUX_DATA(TIOC3D_MARK, PG11MD_100), PINMUX_DATA(PG10_DATA, PG10MD_000), PINMUX_DATA(D26_MARK, PG10MD_001), - PINMUX_DATA(LCD_DATA10_MARK, PG10MD_010), + PINMUX_DATA(LCD_DATA10_PG10_MARK, PG10MD_010), PINMUX_DATA(PINT2_PG_MARK, PG10MD_011), PINMUX_DATA(TIOC3C_MARK, PG10MD_100), PINMUX_DATA(PG9_DATA, PG9MD_000), PINMUX_DATA(D25_MARK, PG9MD_001), - PINMUX_DATA(LCD_DATA9_MARK, PG9MD_010), + PINMUX_DATA(LCD_DATA9_PG9_MARK, PG9MD_010), PINMUX_DATA(PINT1_PG_MARK, PG9MD_011), PINMUX_DATA(TIOC3B_MARK, PG9MD_100), PINMUX_DATA(PG8_DATA, PG8MD_000), PINMUX_DATA(D24_MARK, PG8MD_001), - PINMUX_DATA(LCD_DATA8_MARK, PG8MD_010), + PINMUX_DATA(LCD_DATA8_PG8_MARK, PG8MD_010), PINMUX_DATA(PINT0_PG_MARK, PG8MD_011), PINMUX_DATA(TIOC3A_MARK, PG8MD_100), PINMUX_DATA(PG7_DATA, PG7MD_000), PINMUX_DATA(D23_MARK, PG7MD_001), - PINMUX_DATA(LCD_DATA7_MARK, PG7MD_010), + PINMUX_DATA(LCD_DATA7_PG7_MARK, PG7MD_010), PINMUX_DATA(IRQ7_PG_MARK, PG7MD_011), PINMUX_DATA(TIOC2B_MARK, PG7MD_100), PINMUX_DATA(PG6_DATA, PG6MD_000), PINMUX_DATA(D22_MARK, PG6MD_001), - PINMUX_DATA(LCD_DATA6_MARK, PG6MD_010), + PINMUX_DATA(LCD_DATA6_PG6_MARK, PG6MD_010), PINMUX_DATA(IRQ6_PG_MARK, PG6MD_011), PINMUX_DATA(TIOC2A_MARK, PG6MD_100), PINMUX_DATA(PG5_DATA, PG5MD_000), PINMUX_DATA(D21_MARK, PG5MD_001), - PINMUX_DATA(LCD_DATA5_MARK, PG5MD_010), + PINMUX_DATA(LCD_DATA5_PG5_MARK, PG5MD_010), PINMUX_DATA(IRQ5_PG_MARK, PG5MD_011), PINMUX_DATA(TIOC1B_MARK, PG5MD_100), PINMUX_DATA(PG4_DATA, PG4MD_000), PINMUX_DATA(D20_MARK, PG4MD_001), - PINMUX_DATA(LCD_DATA4_MARK, PG4MD_010), + PINMUX_DATA(LCD_DATA4_PG4_MARK, PG4MD_010), PINMUX_DATA(IRQ4_PG_MARK, PG4MD_011), PINMUX_DATA(TIOC1A_MARK, PG4MD_100), PINMUX_DATA(PG3_DATA, PG3MD_000), PINMUX_DATA(D19_MARK, PG3MD_001), - PINMUX_DATA(LCD_DATA3_MARK, PG3MD_010), + PINMUX_DATA(LCD_DATA3_PG3_MARK, PG3MD_010), PINMUX_DATA(IRQ3_PG_MARK, PG3MD_011), PINMUX_DATA(TIOC0D_MARK, PG3MD_100), PINMUX_DATA(PG2_DATA, PG2MD_000), PINMUX_DATA(D18_MARK, PG2MD_001), - PINMUX_DATA(LCD_DATA2_MARK, PG2MD_010), + PINMUX_DATA(LCD_DATA2_PG2_MARK, PG2MD_010), PINMUX_DATA(IRQ2_PG_MARK, PG2MD_011), PINMUX_DATA(TIOC0C_MARK, PG2MD_100), PINMUX_DATA(PG1_DATA, PG1MD_000), PINMUX_DATA(D17_MARK, PG1MD_001), - PINMUX_DATA(LCD_DATA1_MARK, PG1MD_010), + PINMUX_DATA(LCD_DATA1_PG1_MARK, PG1MD_010), PINMUX_DATA(IRQ1_PG_MARK, PG1MD_011), PINMUX_DATA(TIOC0B_MARK, PG1MD_100), PINMUX_DATA(PG0_DATA, PG0MD_000), PINMUX_DATA(D16_MARK, PG0MD_001), - PINMUX_DATA(LCD_DATA0_MARK, PG0MD_010), + PINMUX_DATA(LCD_DATA0_PG0_MARK, PG0MD_010), PINMUX_DATA(IRQ0_PG_MARK, PG0MD_011), PINMUX_DATA(TIOC0A_MARK, PG0MD_100), @@ -1275,14 +1289,14 @@ static pinmux_enum_t pinmux_data[] = { PINMUX_DATA(PJ23_DATA, PJ23MD_000), PINMUX_DATA(DV_DATA23_MARK, PJ23MD_001), - PINMUX_DATA(LCD_DATA23_MARK, PJ23MD_010), + PINMUX_DATA(LCD_DATA23_PJ23_MARK, PJ23MD_010), PINMUX_DATA(LCD_TCON6_MARK, PJ23MD_011), PINMUX_DATA(IRQ3_PJ_MARK, PJ23MD_100), PINMUX_DATA(CTX1_MARK, PJ23MD_101), PINMUX_DATA(PJ22_DATA, PJ22MD_000), PINMUX_DATA(DV_DATA22_MARK, PJ22MD_001), - PINMUX_DATA(LCD_DATA22_MARK, PJ22MD_010), + PINMUX_DATA(LCD_DATA22_PJ22_MARK, PJ22MD_010), PINMUX_DATA(LCD_TCON5_MARK, PJ22MD_011), PINMUX_DATA(IRQ2_PJ_MARK, PJ22MD_100), PINMUX_DATA(CRX1_MARK, PJ22MD_101), @@ -1290,14 +1304,14 @@ static pinmux_enum_t pinmux_data[] = { PINMUX_DATA(PJ21_DATA, PJ21MD_000), PINMUX_DATA(DV_DATA21_MARK, PJ21MD_001), - PINMUX_DATA(LCD_DATA21_MARK, PJ21MD_010), + PINMUX_DATA(LCD_DATA21_PJ21_MARK, PJ21MD_010), PINMUX_DATA(LCD_TCON4_MARK, PJ21MD_011), PINMUX_DATA(IRQ1_PJ_MARK, PJ21MD_100), PINMUX_DATA(CTX2_MARK, PJ21MD_101), PINMUX_DATA(PJ20_DATA, PJ20MD_000), PINMUX_DATA(DV_DATA20_MARK, PJ20MD_001), - PINMUX_DATA(LCD_DATA20_MARK, PJ20MD_010), + PINMUX_DATA(LCD_DATA20_PJ20_MARK, PJ20MD_010), PINMUX_DATA(LCD_TCON3_MARK, PJ20MD_011), PINMUX_DATA(IRQ0_PJ_MARK, PJ20MD_100), PINMUX_DATA(CRX2_MARK, PJ20MD_101), @@ -1305,7 +1319,7 @@ static pinmux_enum_t pinmux_data[] = { PINMUX_DATA(PJ19_DATA, PJ19MD_000), PINMUX_DATA(DV_DATA19_MARK, PJ19MD_001), - PINMUX_DATA(LCD_DATA19_MARK, PJ19MD_010), + PINMUX_DATA(LCD_DATA19_PJ19_MARK, PJ19MD_010), PINMUX_DATA(MISO0_PJ19_MARK, PJ19MD_011), PINMUX_DATA(TIOC0D_MARK, PJ19MD_100), PINMUX_DATA(SIOFRXD_MARK, PJ19MD_101), @@ -1313,126 +1327,126 @@ static pinmux_enum_t pinmux_data[] = { PINMUX_DATA(PJ18_DATA, PJ18MD_000), PINMUX_DATA(DV_DATA18_MARK, PJ18MD_001), - PINMUX_DATA(LCD_DATA18_MARK, PJ18MD_010), + PINMUX_DATA(LCD_DATA18_PJ18_MARK, PJ18MD_010), PINMUX_DATA(MOSI0_PJ18_MARK, PJ18MD_011), PINMUX_DATA(TIOC0C_MARK, PJ18MD_100), PINMUX_DATA(SIOFTXD_MARK, PJ18MD_101), PINMUX_DATA(PJ17_DATA, PJ17MD_000), PINMUX_DATA(DV_DATA17_MARK, PJ17MD_001), - PINMUX_DATA(LCD_DATA17_MARK, PJ17MD_010), + PINMUX_DATA(LCD_DATA17_PJ17_MARK, PJ17MD_010), PINMUX_DATA(SSL00_PJ17_MARK, PJ17MD_011), PINMUX_DATA(TIOC0B_MARK, PJ17MD_100), PINMUX_DATA(SIOFSYNC_MARK, PJ17MD_101), PINMUX_DATA(PJ16_DATA, PJ16MD_000), PINMUX_DATA(DV_DATA16_MARK, PJ16MD_001), - PINMUX_DATA(LCD_DATA16_MARK, PJ16MD_010), + PINMUX_DATA(LCD_DATA16_PJ16_MARK, PJ16MD_010), PINMUX_DATA(RSPCK0_PJ16_MARK, PJ16MD_011), PINMUX_DATA(TIOC0A_MARK, PJ16MD_100), PINMUX_DATA(SIOFSCK_MARK, PJ16MD_101), PINMUX_DATA(PJ15_DATA, PJ15MD_000), PINMUX_DATA(DV_DATA15_MARK, PJ15MD_001), - PINMUX_DATA(LCD_DATA15_MARK, PJ15MD_010), + PINMUX_DATA(LCD_DATA15_PJ15_MARK, PJ15MD_010), PINMUX_DATA(PINT7_PJ_MARK, PJ15MD_011), PINMUX_DATA(PWM2H_MARK, PJ15MD_100), PINMUX_DATA(TXD7_MARK, PJ15MD_101), PINMUX_DATA(PJ14_DATA, PJ14MD_000), PINMUX_DATA(DV_DATA14_MARK, PJ14MD_001), - PINMUX_DATA(LCD_DATA14_MARK, PJ14MD_010), + PINMUX_DATA(LCD_DATA14_PJ14_MARK, PJ14MD_010), PINMUX_DATA(PINT6_PJ_MARK, PJ14MD_011), PINMUX_DATA(PWM2G_MARK, PJ14MD_100), PINMUX_DATA(TXD6_MARK, PJ14MD_101), PINMUX_DATA(PJ13_DATA, PJ13MD_000), PINMUX_DATA(DV_DATA13_MARK, PJ13MD_001), - PINMUX_DATA(LCD_DATA13_MARK, PJ13MD_010), + PINMUX_DATA(LCD_DATA13_PJ13_MARK, PJ13MD_010), PINMUX_DATA(PINT5_PJ_MARK, PJ13MD_011), PINMUX_DATA(PWM2F_MARK, PJ13MD_100), PINMUX_DATA(TXD5_MARK, PJ13MD_101), PINMUX_DATA(PJ12_DATA, PJ12MD_000), PINMUX_DATA(DV_DATA12_MARK, PJ12MD_001), - PINMUX_DATA(LCD_DATA12_MARK, PJ12MD_010), + PINMUX_DATA(LCD_DATA12_PJ12_MARK, PJ12MD_010), PINMUX_DATA(PINT4_PJ_MARK, PJ12MD_011), PINMUX_DATA(PWM2E_MARK, PJ12MD_100), PINMUX_DATA(SCK7_MARK, PJ12MD_101), PINMUX_DATA(PJ11_DATA, PJ11MD_000), PINMUX_DATA(DV_DATA11_MARK, PJ11MD_001), - PINMUX_DATA(LCD_DATA11_MARK, PJ11MD_010), + PINMUX_DATA(LCD_DATA11_PJ11_MARK, PJ11MD_010), PINMUX_DATA(PINT3_PJ_MARK, PJ11MD_011), PINMUX_DATA(PWM2D_MARK, PJ11MD_100), PINMUX_DATA(SCK6_MARK, PJ11MD_101), PINMUX_DATA(PJ10_DATA, PJ10MD_000), PINMUX_DATA(DV_DATA10_MARK, PJ10MD_001), - PINMUX_DATA(LCD_DATA10_MARK, PJ10MD_010), + PINMUX_DATA(LCD_DATA10_PJ10_MARK, PJ10MD_010), PINMUX_DATA(PINT2_PJ_MARK, PJ10MD_011), PINMUX_DATA(PWM2C_MARK, PJ10MD_100), PINMUX_DATA(SCK5_MARK, PJ10MD_101), PINMUX_DATA(PJ9_DATA, PJ9MD_000), PINMUX_DATA(DV_DATA9_MARK, PJ9MD_001), - PINMUX_DATA(LCD_DATA9_MARK, PJ9MD_010), + PINMUX_DATA(LCD_DATA9_PJ9_MARK, PJ9MD_010), PINMUX_DATA(PINT1_PJ_MARK, PJ9MD_011), PINMUX_DATA(PWM2B_MARK, PJ9MD_100), PINMUX_DATA(RTS5_MARK, PJ9MD_101), PINMUX_DATA(PJ8_DATA, PJ8MD_000), PINMUX_DATA(DV_DATA8_MARK, PJ8MD_001), - PINMUX_DATA(LCD_DATA8_MARK, PJ8MD_010), + PINMUX_DATA(LCD_DATA8_PJ8_MARK, PJ8MD_010), PINMUX_DATA(PINT0_PJ_MARK, PJ8MD_011), PINMUX_DATA(PWM2A_MARK, PJ8MD_100), PINMUX_DATA(CTS5_MARK, PJ8MD_101), PINMUX_DATA(PJ7_DATA, PJ7MD_000), PINMUX_DATA(DV_DATA7_MARK, PJ7MD_001), - PINMUX_DATA(LCD_DATA7_MARK, PJ7MD_010), + PINMUX_DATA(LCD_DATA7_PJ7_MARK, PJ7MD_010), PINMUX_DATA(SD_D2_MARK, PJ7MD_011), PINMUX_DATA(PWM1H_MARK, PJ7MD_100), PINMUX_DATA(PJ6_DATA, PJ6MD_000), PINMUX_DATA(DV_DATA6_MARK, PJ6MD_001), - PINMUX_DATA(LCD_DATA6_MARK, PJ6MD_010), + PINMUX_DATA(LCD_DATA6_PJ6_MARK, PJ6MD_010), PINMUX_DATA(SD_D3_MARK, PJ6MD_011), PINMUX_DATA(PWM1G_MARK, PJ6MD_100), PINMUX_DATA(PJ5_DATA, PJ5MD_000), PINMUX_DATA(DV_DATA5_MARK, PJ5MD_001), - PINMUX_DATA(LCD_DATA5_MARK, PJ5MD_010), + PINMUX_DATA(LCD_DATA5_PJ5_MARK, PJ5MD_010), PINMUX_DATA(SD_CMD_MARK, PJ5MD_011), PINMUX_DATA(PWM1F_MARK, PJ5MD_100), PINMUX_DATA(PJ4_DATA, PJ4MD_000), PINMUX_DATA(DV_DATA4_MARK, PJ4MD_001), - PINMUX_DATA(LCD_DATA4_MARK, PJ4MD_010), + PINMUX_DATA(LCD_DATA4_PJ4_MARK, PJ4MD_010), PINMUX_DATA(SD_CLK_MARK, PJ4MD_011), PINMUX_DATA(PWM1E_MARK, PJ4MD_100), PINMUX_DATA(PJ3_DATA, PJ3MD_000), PINMUX_DATA(DV_DATA3_MARK, PJ3MD_001), - PINMUX_DATA(LCD_DATA3_MARK, PJ3MD_010), + PINMUX_DATA(LCD_DATA3_PJ3_MARK, PJ3MD_010), PINMUX_DATA(SD_D0_MARK, PJ3MD_011), PINMUX_DATA(PWM1D_MARK, PJ3MD_100), PINMUX_DATA(PJ2_DATA, PJ2MD_000), PINMUX_DATA(DV_DATA2_MARK, PJ2MD_001), - PINMUX_DATA(LCD_DATA2_MARK, PJ2MD_010), + PINMUX_DATA(LCD_DATA2_PJ2_MARK, PJ2MD_010), PINMUX_DATA(SD_D1_MARK, PJ2MD_011), PINMUX_DATA(PWM1C_MARK, PJ2MD_100), PINMUX_DATA(PJ1_DATA, PJ1MD_000), PINMUX_DATA(DV_DATA1_MARK, PJ1MD_001), - PINMUX_DATA(LCD_DATA1_MARK, PJ1MD_010), + PINMUX_DATA(LCD_DATA1_PJ1_MARK, PJ1MD_010), PINMUX_DATA(SD_WP_MARK, PJ1MD_011), PINMUX_DATA(PWM1B_MARK, PJ1MD_100), PINMUX_DATA(PJ0_DATA, PJ0MD_000), PINMUX_DATA(DV_DATA0_MARK, PJ0MD_001), - PINMUX_DATA(LCD_DATA0_MARK, PJ0MD_010), + PINMUX_DATA(LCD_DATA0_PJ0_MARK, PJ0MD_010), PINMUX_DATA(SD_CD_MARK, PJ0MD_011), PINMUX_DATA(PWM1A_MARK, PJ0MD_100), }; @@ -1877,30 +1891,55 @@ static struct pinmux_gpio pinmux_gpios[] = { PINMUX_GPIO(GPIO_FN_LCD_HSYNC, LCD_HSYNC_MARK), PINMUX_GPIO(GPIO_FN_LCD_DE, LCD_DE_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA23, LCD_DATA23_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA22, LCD_DATA22_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA21, LCD_DATA21_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA20, LCD_DATA20_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA19, LCD_DATA19_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA18, LCD_DATA18_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA17, LCD_DATA17_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA16, LCD_DATA16_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA15, LCD_DATA15_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA14, LCD_DATA14_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA13, LCD_DATA13_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA12, LCD_DATA12_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA11, LCD_DATA11_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA10, LCD_DATA10_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA9, LCD_DATA9_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA8, LCD_DATA8_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA7, LCD_DATA7_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA6, LCD_DATA6_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA5, LCD_DATA5_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA4, LCD_DATA4_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA3, LCD_DATA3_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA2, LCD_DATA2_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA1, LCD_DATA1_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA0, LCD_DATA0_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA23_PG23, LCD_DATA23_PG23_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA22_PG22, LCD_DATA22_PG22_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA21_PG21, LCD_DATA21_PG21_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA20_PG20, LCD_DATA20_PG20_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA19_PG19, LCD_DATA19_PG19_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA18_PG18, LCD_DATA18_PG18_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA17_PG17, LCD_DATA17_PG17_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA16_PG16, LCD_DATA16_PG16_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA15_PG15, LCD_DATA15_PG15_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA14_PG14, LCD_DATA14_PG14_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA13_PG13, LCD_DATA13_PG13_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA12_PG12, LCD_DATA12_PG12_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA11_PG11, LCD_DATA11_PG11_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA10_PG10, LCD_DATA10_PG10_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA9_PG9, LCD_DATA9_PG9_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA8_PG8, LCD_DATA8_PG8_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA7_PG7, LCD_DATA7_PG7_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA6_PG6, LCD_DATA6_PG6_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA5_PG5, LCD_DATA5_PG5_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA4_PG4, LCD_DATA4_PG4_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA3_PG3, LCD_DATA3_PG3_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA2_PG2, LCD_DATA2_PG2_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA1_PG1, LCD_DATA1_PG1_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA0_PG0, LCD_DATA0_PG0_MARK), + + PINMUX_GPIO(GPIO_FN_LCD_DATA23_PJ23, LCD_DATA23_PJ23_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA22_PJ22, LCD_DATA22_PJ22_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA21_PJ21, LCD_DATA21_PJ21_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA20_PJ20, LCD_DATA20_PJ20_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA19_PJ19, LCD_DATA19_PJ19_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA18_PJ18, LCD_DATA18_PJ18_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA17_PJ17, LCD_DATA17_PJ17_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA16_PJ16, LCD_DATA16_PJ16_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA15_PJ15, LCD_DATA15_PJ15_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA14_PJ14, LCD_DATA14_PJ14_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA13_PJ13, LCD_DATA13_PJ13_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA12_PJ12, LCD_DATA12_PJ12_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA11_PJ11, LCD_DATA11_PJ11_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA10_PJ10, LCD_DATA10_PJ10_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA9_PJ9, LCD_DATA9_PJ9_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA8_PJ8, LCD_DATA8_PJ8_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA7_PJ7, LCD_DATA7_PJ7_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA6_PJ6, LCD_DATA6_PJ6_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA5_PJ5, LCD_DATA5_PJ5_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA4_PJ4, LCD_DATA4_PJ4_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA3_PJ3, LCD_DATA3_PJ3_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA2_PJ2, LCD_DATA2_PJ2_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA1_PJ1, LCD_DATA1_PJ1_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA0_PJ0, LCD_DATA0_PJ0_MARK), PINMUX_GPIO(GPIO_FN_LCD_M_DISP, LCD_M_DISP_MARK), }; diff --git a/trunk/arch/sh/kernel/cpu/sh4a/clock-sh7724.c b/trunk/arch/sh/kernel/cpu/sh4a/clock-sh7724.c index c87e78f73234..5f30f805d2f2 100644 --- a/trunk/arch/sh/kernel/cpu/sh4a/clock-sh7724.c +++ b/trunk/arch/sh/kernel/cpu/sh4a/clock-sh7724.c @@ -334,8 +334,8 @@ static struct clk_lookup lookups[] = { CLKDEV_CON_ID("tpu0", &mstp_clks[HWBLK_TPU]), CLKDEV_CON_ID("irda0", &mstp_clks[HWBLK_IRDA]), CLKDEV_CON_ID("tsif0", &mstp_clks[HWBLK_TSIF]), - CLKDEV_CON_ID("usb1", &mstp_clks[HWBLK_USB1]), - CLKDEV_CON_ID("usb0", &mstp_clks[HWBLK_USB0]), + CLKDEV_DEV_ID("renesas_usbhs.1", &mstp_clks[HWBLK_USB1]), + CLKDEV_DEV_ID("renesas_usbhs.0", &mstp_clks[HWBLK_USB0]), CLKDEV_CON_ID("2dg0", &mstp_clks[HWBLK_2DG]), CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[HWBLK_SDHI0]), CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[HWBLK_SDHI1]), diff --git a/trunk/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/trunk/arch/sh/kernel/cpu/sh4a/setup-sh7722.c index 65786c7f5ded..6a868b091c2d 100644 --- a/trunk/arch/sh/kernel/cpu/sh4a/setup-sh7722.c +++ b/trunk/arch/sh/kernel/cpu/sh4a/setup-sh7722.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include diff --git a/trunk/arch/sh/kernel/cpu/sh4a/setup-sh7757.c b/trunk/arch/sh/kernel/cpu/sh4a/setup-sh7757.c index a7708425afa9..4a2f357f4df8 100644 --- a/trunk/arch/sh/kernel/cpu/sh4a/setup-sh7757.c +++ b/trunk/arch/sh/kernel/cpu/sh4a/setup-sh7757.c @@ -216,6 +216,20 @@ static const struct sh_dmae_slave_config sh7757_dmae1_slaves[] = { TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0x42, }, + { + .slave_id = SHDMA_SLAVE_RSPI_TX, + .addr = 0xfe480004, + .chcr = SM_INC | 0x800 | 0x40000000 | + TS_INDEX2VAL(XMIT_SZ_16BIT), + .mid_rid = 0xc1, + }, + { + .slave_id = SHDMA_SLAVE_RSPI_RX, + .addr = 0xfe480004, + .chcr = DM_INC | 0x800 | 0x40000000 | + TS_INDEX2VAL(XMIT_SZ_16BIT), + .mid_rid = 0xc2, + }, }; static const struct sh_dmae_slave_config sh7757_dmae2_slaves[] = { diff --git a/trunk/arch/sh/kernel/setup.c b/trunk/arch/sh/kernel/setup.c index 7b57bf1dc855..ebe7a7d97215 100644 --- a/trunk/arch/sh/kernel/setup.c +++ b/trunk/arch/sh/kernel/setup.c @@ -273,7 +273,7 @@ void __init setup_arch(char **cmdline_p) data_resource.start = virt_to_phys(_etext); data_resource.end = virt_to_phys(_edata)-1; bss_resource.start = virt_to_phys(__bss_start); - bss_resource.end = virt_to_phys(_ebss)-1; + bss_resource.end = virt_to_phys(__bss_stop)-1; #ifdef CONFIG_CMDLINE_OVERWRITE strlcpy(command_line, CONFIG_CMDLINE, sizeof(command_line)); diff --git a/trunk/arch/sh/kernel/sh_ksyms_32.c b/trunk/arch/sh/kernel/sh_ksyms_32.c index 3896f26efa4a..2a0a596ebf67 100644 --- a/trunk/arch/sh/kernel/sh_ksyms_32.c +++ b/trunk/arch/sh/kernel/sh_ksyms_32.c @@ -19,7 +19,6 @@ EXPORT_SYMBOL(csum_partial); EXPORT_SYMBOL(csum_partial_copy_generic); EXPORT_SYMBOL(copy_page); EXPORT_SYMBOL(__clear_user); -EXPORT_SYMBOL(_ebss); EXPORT_SYMBOL(empty_zero_page); #define DECLARE_EXPORT(name) \ diff --git a/trunk/arch/sh/kernel/vmlinux.lds.S b/trunk/arch/sh/kernel/vmlinux.lds.S index c98905f71e28..db88cbf9eafd 100644 --- a/trunk/arch/sh/kernel/vmlinux.lds.S +++ b/trunk/arch/sh/kernel/vmlinux.lds.S @@ -78,7 +78,6 @@ SECTIONS . = ALIGN(PAGE_SIZE); __init_end = .; BSS_SECTION(0, PAGE_SIZE, 4) - _ebss = .; /* uClinux MTD sucks */ _end = . ; STABS_DEBUG diff --git a/trunk/arch/sh/lib/mcount.S b/trunk/arch/sh/lib/mcount.S index 84a57761f17e..60164e65d665 100644 --- a/trunk/arch/sh/lib/mcount.S +++ b/trunk/arch/sh/lib/mcount.S @@ -39,7 +39,7 @@ * * Make sure the stack pointer contains a valid address. Valid * addresses for kernel stacks are anywhere after the bss - * (after _ebss) and anywhere in init_thread_union (init_stack). + * (after __bss_stop) and anywhere in init_thread_union (init_stack). */ #define STACK_CHECK() \ mov #(THREAD_SIZE >> 10), r0; \ @@ -60,7 +60,7 @@ cmp/hi r2, r1; \ bf stack_panic; \ \ - /* If sp > _ebss then we're OK. */ \ + /* If sp > __bss_stop then we're OK. */ \ mov.l .L_ebss, r1; \ cmp/hi r1, r15; \ bt 1f; \ @@ -70,7 +70,7 @@ cmp/hs r1, r15; \ bf stack_panic; \ \ - /* If sp > init_stack && sp < _ebss, not OK. */ \ + /* If sp > init_stack && sp < __bss_stop, not OK. */ \ add r0, r1; \ cmp/hs r1, r15; \ bt stack_panic; \ @@ -292,8 +292,6 @@ stack_panic: nop .align 2 -.L_ebss: - .long _ebss .L_init_thread_union: .long init_thread_union .Lpanic: diff --git a/trunk/arch/sh/mm/fault.c b/trunk/arch/sh/mm/fault.c index 1fc25d85e515..3bdc1ad9a341 100644 --- a/trunk/arch/sh/mm/fault.c +++ b/trunk/arch/sh/mm/fault.c @@ -58,11 +58,15 @@ static void show_pte(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; - if (mm) + if (mm) { pgd = mm->pgd; - else + } else { pgd = get_TTB(); + if (unlikely(!pgd)) + pgd = swapper_pg_dir; + } + printk(KERN_ALERT "pgd = %p\n", pgd); pgd += pgd_index(addr); printk(KERN_ALERT "[%08lx] *pgd=%0*Lx", addr, diff --git a/trunk/arch/sparc/kernel/sys_sparc_64.c b/trunk/arch/sparc/kernel/sys_sparc_64.c index 0dc1f5786081..11c6c9603e71 100644 --- a/trunk/arch/sparc/kernel/sys_sparc_64.c +++ b/trunk/arch/sparc/kernel/sys_sparc_64.c @@ -502,12 +502,12 @@ SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality) { int ret; - if (current->personality == PER_LINUX32 && - personality == PER_LINUX) - personality = PER_LINUX32; + if (personality(current->personality) == PER_LINUX32 && + personality(personality) == PER_LINUX) + personality |= PER_LINUX32; ret = sys_personality(personality); - if (ret == PER_LINUX32) - ret = PER_LINUX; + if (personality(ret) == PER_LINUX32) + ret &= ~PER_LINUX32; return ret; } diff --git a/trunk/arch/sparc/mm/init_64.c b/trunk/arch/sparc/mm/init_64.c index 6026fdd1b2ed..d58edf5fefdb 100644 --- a/trunk/arch/sparc/mm/init_64.c +++ b/trunk/arch/sparc/mm/init_64.c @@ -2020,6 +2020,9 @@ EXPORT_SYMBOL(_PAGE_CACHE); #ifdef CONFIG_SPARSEMEM_VMEMMAP unsigned long vmemmap_table[VMEMMAP_SIZE]; +static long __meminitdata addr_start, addr_end; +static int __meminitdata node_start; + int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) { unsigned long vstart = (unsigned long) start; @@ -2050,15 +2053,30 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) *vmem_pp = pte_base | __pa(block); - printk(KERN_INFO "[%p-%p] page_structs=%lu " - "node=%d entry=%lu/%lu\n", start, block, nr, - node, - addr >> VMEMMAP_CHUNK_SHIFT, - VMEMMAP_SIZE); + /* check to see if we have contiguous blocks */ + if (addr_end != addr || node_start != node) { + if (addr_start) + printk(KERN_DEBUG " [%lx-%lx] on node %d\n", + addr_start, addr_end-1, node_start); + addr_start = addr; + node_start = node; + } + addr_end = addr + VMEMMAP_CHUNK; } } return 0; } + +void __meminit vmemmap_populate_print_last(void) +{ + if (addr_start) { + printk(KERN_DEBUG " [%lx-%lx] on node %d\n", + addr_start, addr_end-1, node_start); + addr_start = 0; + addr_end = 0; + node_start = 0; + } +} #endif /* CONFIG_SPARSEMEM_VMEMMAP */ static void prot_init_common(unsigned long page_none, diff --git a/trunk/arch/um/defconfig b/trunk/arch/um/defconfig index fec0d5d27460..08107a795062 100644 --- a/trunk/arch/um/defconfig +++ b/trunk/arch/um/defconfig @@ -163,7 +163,7 @@ CONFIG_CGROUP_SCHED=y CONFIG_FAIR_GROUP_SCHED=y # CONFIG_CFS_BANDWIDTH is not set # CONFIG_RT_GROUP_SCHED is not set -CONFIG_BLK_CGROUP=m +CONFIG_BLK_CGROUP=y # CONFIG_DEBUG_BLK_CGROUP is not set # CONFIG_CHECKPOINT_RESTORE is not set CONFIG_NAMESPACES=y diff --git a/trunk/arch/um/drivers/chan_kern.c b/trunk/arch/um/drivers/chan_kern.c index 45e248c2f43c..87eebfe03c61 100644 --- a/trunk/arch/um/drivers/chan_kern.c +++ b/trunk/arch/um/drivers/chan_kern.c @@ -150,9 +150,11 @@ void chan_enable_winch(struct chan *chan, struct tty_struct *tty) static void line_timer_cb(struct work_struct *work) { struct line *line = container_of(work, struct line, task.work); + struct tty_struct *tty = tty_port_tty_get(&line->port); if (!line->throttled) - chan_interrupt(line, line->tty, line->driver->read_irq); + chan_interrupt(line, tty, line->driver->read_irq); + tty_kref_put(tty); } int enable_chan(struct line *line) diff --git a/trunk/arch/um/drivers/line.c b/trunk/arch/um/drivers/line.c index ac9d25c8dc01..bbaf2c59830a 100644 --- a/trunk/arch/um/drivers/line.c +++ b/trunk/arch/um/drivers/line.c @@ -19,9 +19,11 @@ static irqreturn_t line_interrupt(int irq, void *data) { struct chan *chan = data; struct line *line = chan->line; + struct tty_struct *tty = tty_port_tty_get(&line->port); if (line) - chan_interrupt(line, line->tty, irq); + chan_interrupt(line, tty, irq); + tty_kref_put(tty); return IRQ_HANDLED; } @@ -219,92 +221,6 @@ void line_set_termios(struct tty_struct *tty, struct ktermios * old) /* nothing */ } -static const struct { - int cmd; - char *level; - char *name; -} tty_ioctls[] = { - /* don't print these, they flood the log ... */ - { TCGETS, NULL, "TCGETS" }, - { TCSETS, NULL, "TCSETS" }, - { TCSETSW, NULL, "TCSETSW" }, - { TCFLSH, NULL, "TCFLSH" }, - { TCSBRK, NULL, "TCSBRK" }, - - /* general tty stuff */ - { TCSETSF, KERN_DEBUG, "TCSETSF" }, - { TCGETA, KERN_DEBUG, "TCGETA" }, - { TIOCMGET, KERN_DEBUG, "TIOCMGET" }, - { TCSBRKP, KERN_DEBUG, "TCSBRKP" }, - { TIOCMSET, KERN_DEBUG, "TIOCMSET" }, - - /* linux-specific ones */ - { TIOCLINUX, KERN_INFO, "TIOCLINUX" }, - { KDGKBMODE, KERN_INFO, "KDGKBMODE" }, - { KDGKBTYPE, KERN_INFO, "KDGKBTYPE" }, - { KDSIGACCEPT, KERN_INFO, "KDSIGACCEPT" }, -}; - -int line_ioctl(struct tty_struct *tty, unsigned int cmd, - unsigned long arg) -{ - int ret; - int i; - - ret = 0; - switch(cmd) { -#ifdef TIOCGETP - case TIOCGETP: - case TIOCSETP: - case TIOCSETN: -#endif -#ifdef TIOCGETC - case TIOCGETC: - case TIOCSETC: -#endif -#ifdef TIOCGLTC - case TIOCGLTC: - case TIOCSLTC: -#endif - /* Note: these are out of date as we now have TCGETS2 etc but this - whole lot should probably go away */ - case TCGETS: - case TCSETSF: - case TCSETSW: - case TCSETS: - case TCGETA: - case TCSETAF: - case TCSETAW: - case TCSETA: - case TCXONC: - case TCFLSH: - case TIOCOUTQ: - case TIOCINQ: - case TIOCGLCKTRMIOS: - case TIOCSLCKTRMIOS: - case TIOCPKT: - case TIOCGSOFTCAR: - case TIOCSSOFTCAR: - return -ENOIOCTLCMD; -#if 0 - case TCwhatever: - /* do something */ - break; -#endif - default: - for (i = 0; i < ARRAY_SIZE(tty_ioctls); i++) - if (cmd == tty_ioctls[i].cmd) - break; - if (i == ARRAY_SIZE(tty_ioctls)) { - printk(KERN_ERR "%s: %s: unknown ioctl: 0x%x\n", - __func__, tty->name, cmd); - } - ret = -ENOIOCTLCMD; - break; - } - return ret; -} - void line_throttle(struct tty_struct *tty) { struct line *line = tty->driver_data; @@ -333,7 +249,7 @@ static irqreturn_t line_write_interrupt(int irq, void *data) { struct chan *chan = data; struct line *line = chan->line; - struct tty_struct *tty = line->tty; + struct tty_struct *tty; int err; /* @@ -352,10 +268,13 @@ static irqreturn_t line_write_interrupt(int irq, void *data) } spin_unlock(&line->lock); + tty = tty_port_tty_get(&line->port); if (tty == NULL) return IRQ_NONE; tty_wakeup(tty); + tty_kref_put(tty); + return IRQ_HANDLED; } @@ -377,43 +296,14 @@ int line_setup_irq(int fd, int input, int output, struct line *line, void *data) return err; } -/* - * Normally, a driver like this can rely mostly on the tty layer - * locking, particularly when it comes to the driver structure. - * However, in this case, mconsole requests can come in "from the - * side", and race with opens and closes. - * - * mconsole config requests will want to be sure the device isn't in - * use, and get_config, open, and close will want a stable - * configuration. The checking and modification of the configuration - * is done under a spinlock. Checking whether the device is in use is - * line->tty->count > 1, also under the spinlock. - * - * line->count serves to decide whether the device should be enabled or - * disabled on the host. If it's equal to 0, then we are doing the - * first open or last close. Otherwise, open and close just return. - */ - -int line_open(struct line *lines, struct tty_struct *tty) +static int line_activate(struct tty_port *port, struct tty_struct *tty) { - struct line *line = &lines[tty->index]; - int err = -ENODEV; - - mutex_lock(&line->count_lock); - if (!line->valid) - goto out_unlock; - - err = 0; - if (line->count++) - goto out_unlock; - - BUG_ON(tty->driver_data); - tty->driver_data = line; - line->tty = tty; + int ret; + struct line *line = tty->driver_data; - err = enable_chan(line); - if (err) /* line_close() will be called by our caller */ - goto out_unlock; + ret = enable_chan(line); + if (ret) + return ret; if (!line->sigio) { chan_enable_winch(line->chan_out, tty); @@ -421,44 +311,60 @@ int line_open(struct line *lines, struct tty_struct *tty) } chan_window_size(line, &tty->winsize.ws_row, - &tty->winsize.ws_col); -out_unlock: - mutex_unlock(&line->count_lock); - return err; + &tty->winsize.ws_col); + + return 0; } -static void unregister_winch(struct tty_struct *tty); +static const struct tty_port_operations line_port_ops = { + .activate = line_activate, +}; -void line_close(struct tty_struct *tty, struct file * filp) +int line_open(struct tty_struct *tty, struct file *filp) { struct line *line = tty->driver_data; - /* - * If line_open fails (and tty->driver_data is never set), - * tty_open will call line_close. So just return in this case. - */ - if (line == NULL) - return; + return tty_port_open(&line->port, tty, filp); +} - /* We ignore the error anyway! */ - flush_buffer(line); +int line_install(struct tty_driver *driver, struct tty_struct *tty, + struct line *line) +{ + int ret; - mutex_lock(&line->count_lock); - BUG_ON(!line->valid); + ret = tty_standard_install(driver, tty); + if (ret) + return ret; - if (--line->count) - goto out_unlock; + tty->driver_data = line; - line->tty = NULL; - tty->driver_data = NULL; + return 0; +} + +static void unregister_winch(struct tty_struct *tty); + +void line_cleanup(struct tty_struct *tty) +{ + struct line *line = tty->driver_data; if (line->sigio) { unregister_winch(tty); line->sigio = 0; } +} + +void line_close(struct tty_struct *tty, struct file * filp) +{ + struct line *line = tty->driver_data; -out_unlock: - mutex_unlock(&line->count_lock); + tty_port_close(&line->port, tty, filp); +} + +void line_hangup(struct tty_struct *tty) +{ + struct line *line = tty->driver_data; + + tty_port_hangup(&line->port); } void close_lines(struct line *lines, int nlines) @@ -476,9 +382,7 @@ int setup_one_line(struct line *lines, int n, char *init, struct tty_driver *driver = line->driver->driver; int err = -EINVAL; - mutex_lock(&line->count_lock); - - if (line->count) { + if (line->port.count) { *error_out = "Device is already open"; goto out; } @@ -519,7 +423,6 @@ int setup_one_line(struct line *lines, int n, char *init, } } out: - mutex_unlock(&line->count_lock); return err; } @@ -607,13 +510,17 @@ int line_get_config(char *name, struct line *lines, unsigned int num, char *str, line = &lines[dev]; - mutex_lock(&line->count_lock); if (!line->valid) CONFIG_CHUNK(str, size, n, "none", 1); - else if (line->tty == NULL) - CONFIG_CHUNK(str, size, n, line->init_str, 1); - else n = chan_config_string(line, str, size, error_out); - mutex_unlock(&line->count_lock); + else { + struct tty_struct *tty = tty_port_tty_get(&line->port); + if (tty == NULL) { + CONFIG_CHUNK(str, size, n, line->init_str, 1); + } else { + n = chan_config_string(line, str, size, error_out); + tty_kref_put(tty); + } + } return n; } @@ -663,8 +570,9 @@ int register_lines(struct line_driver *line_driver, driver->init_termios = tty_std_termios; for (i = 0; i < nlines; i++) { + tty_port_init(&lines[i].port); + lines[i].port.ops = &line_port_ops; spin_lock_init(&lines[i].lock); - mutex_init(&lines[i].count_lock); lines[i].driver = line_driver; INIT_LIST_HEAD(&lines[i].chan_list); } diff --git a/trunk/arch/um/drivers/line.h b/trunk/arch/um/drivers/line.h index 0a1834719dba..bae95611e7ab 100644 --- a/trunk/arch/um/drivers/line.h +++ b/trunk/arch/um/drivers/line.h @@ -32,9 +32,7 @@ struct line_driver { }; struct line { - struct tty_struct *tty; - struct mutex count_lock; - unsigned long count; + struct tty_port port; int valid; char *init_str; @@ -59,7 +57,11 @@ struct line { }; extern void line_close(struct tty_struct *tty, struct file * filp); -extern int line_open(struct line *lines, struct tty_struct *tty); +extern int line_open(struct tty_struct *tty, struct file *filp); +extern int line_install(struct tty_driver *driver, struct tty_struct *tty, + struct line *line); +extern void line_cleanup(struct tty_struct *tty); +extern void line_hangup(struct tty_struct *tty); extern int line_setup(char **conf, unsigned nlines, char **def, char *init, char *name); extern int line_write(struct tty_struct *tty, const unsigned char *buf, @@ -70,8 +72,6 @@ extern int line_chars_in_buffer(struct tty_struct *tty); extern void line_flush_buffer(struct tty_struct *tty); extern void line_flush_chars(struct tty_struct *tty); extern int line_write_room(struct tty_struct *tty); -extern int line_ioctl(struct tty_struct *tty, unsigned int cmd, - unsigned long arg); extern void line_throttle(struct tty_struct *tty); extern void line_unthrottle(struct tty_struct *tty); diff --git a/trunk/arch/um/drivers/ssl.c b/trunk/arch/um/drivers/ssl.c index e09801a1327b..7e86f0070123 100644 --- a/trunk/arch/um/drivers/ssl.c +++ b/trunk/arch/um/drivers/ssl.c @@ -87,40 +87,13 @@ static int ssl_remove(int n, char **error_out) error_out); } -static int ssl_open(struct tty_struct *tty, struct file *filp) -{ - int err = line_open(serial_lines, tty); - - if (err) - printk(KERN_ERR "Failed to open serial line %d, err = %d\n", - tty->index, err); - - return err; -} - -#if 0 -static void ssl_flush_buffer(struct tty_struct *tty) -{ - return; -} - -static void ssl_stop(struct tty_struct *tty) -{ - printk(KERN_ERR "Someone should implement ssl_stop\n"); -} - -static void ssl_start(struct tty_struct *tty) -{ - printk(KERN_ERR "Someone should implement ssl_start\n"); -} - -void ssl_hangup(struct tty_struct *tty) +static int ssl_install(struct tty_driver *driver, struct tty_struct *tty) { + return line_install(driver, tty, &serial_lines[tty->index]); } -#endif static const struct tty_operations ssl_ops = { - .open = ssl_open, + .open = line_open, .close = line_close, .write = line_write, .put_char = line_put_char, @@ -129,14 +102,11 @@ static const struct tty_operations ssl_ops = { .flush_buffer = line_flush_buffer, .flush_chars = line_flush_chars, .set_termios = line_set_termios, - .ioctl = line_ioctl, .throttle = line_throttle, .unthrottle = line_unthrottle, -#if 0 - .stop = ssl_stop, - .start = ssl_start, - .hangup = ssl_hangup, -#endif + .install = ssl_install, + .cleanup = line_cleanup, + .hangup = line_hangup, }; /* Changed by ssl_init and referenced by ssl_exit, which are both serialized diff --git a/trunk/arch/um/drivers/stdio_console.c b/trunk/arch/um/drivers/stdio_console.c index 7663541c372e..929b99a261f3 100644 --- a/trunk/arch/um/drivers/stdio_console.c +++ b/trunk/arch/um/drivers/stdio_console.c @@ -89,21 +89,17 @@ static int con_remove(int n, char **error_out) return line_remove(vts, ARRAY_SIZE(vts), n, error_out); } -static int con_open(struct tty_struct *tty, struct file *filp) -{ - int err = line_open(vts, tty); - if (err) - printk(KERN_ERR "Failed to open console %d, err = %d\n", - tty->index, err); - - return err; -} - /* Set in an initcall, checked in an exitcall */ static int con_init_done = 0; +static int con_install(struct tty_driver *driver, struct tty_struct *tty) +{ + return line_install(driver, tty, &vts[tty->index]); +} + static const struct tty_operations console_ops = { - .open = con_open, + .open = line_open, + .install = con_install, .close = line_close, .write = line_write, .put_char = line_put_char, @@ -112,9 +108,10 @@ static const struct tty_operations console_ops = { .flush_buffer = line_flush_buffer, .flush_chars = line_flush_chars, .set_termios = line_set_termios, - .ioctl = line_ioctl, .throttle = line_throttle, .unthrottle = line_unthrottle, + .cleanup = line_cleanup, + .hangup = line_hangup, }; static void uml_console_write(struct console *console, const char *string, diff --git a/trunk/arch/um/drivers/ubd_kern.c b/trunk/arch/um/drivers/ubd_kern.c index 20505cafa299..0643e5bc9f41 100644 --- a/trunk/arch/um/drivers/ubd_kern.c +++ b/trunk/arch/um/drivers/ubd_kern.c @@ -514,7 +514,7 @@ static inline int ubd_file_size(struct ubd *ubd_dev, __u64 *size_out) goto out; } - fd = os_open_file(ubd_dev->file, global_openflags, 0); + fd = os_open_file(ubd_dev->file, of_read(OPENFLAGS()), 0); if (fd < 0) return fd; diff --git a/trunk/arch/um/include/asm/ptrace-generic.h b/trunk/arch/um/include/asm/ptrace-generic.h index e786a6a3ec5e..442f1d025dc2 100644 --- a/trunk/arch/um/include/asm/ptrace-generic.h +++ b/trunk/arch/um/include/asm/ptrace-generic.h @@ -37,6 +37,8 @@ extern int putreg(struct task_struct *child, int regno, unsigned long value); extern int arch_copy_tls(struct task_struct *new); extern void clear_flushed_tls(struct task_struct *task); +extern void syscall_trace_enter(struct pt_regs *regs); +extern void syscall_trace_leave(struct pt_regs *regs); #endif diff --git a/trunk/arch/um/include/shared/as-layout.h b/trunk/arch/um/include/shared/as-layout.h index 896e16602176..86daa5461815 100644 --- a/trunk/arch/um/include/shared/as-layout.h +++ b/trunk/arch/um/include/shared/as-layout.h @@ -60,7 +60,8 @@ extern unsigned long host_task_size; extern int linux_main(int argc, char **argv); -extern void (*sig_info[])(int, struct uml_pt_regs *); +struct siginfo; +extern void (*sig_info[])(int, struct siginfo *si, struct uml_pt_regs *); #endif diff --git a/trunk/arch/um/include/shared/irq_user.h b/trunk/arch/um/include/shared/irq_user.h index c6c784df2673..2b6d703925b5 100644 --- a/trunk/arch/um/include/shared/irq_user.h +++ b/trunk/arch/um/include/shared/irq_user.h @@ -20,7 +20,8 @@ struct irq_fd { enum { IRQ_READ, IRQ_WRITE }; -extern void sigio_handler(int sig, struct uml_pt_regs *regs); +struct siginfo; +extern void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs); extern void free_irq_by_fd(int fd); extern void reactivate_fd(int fd, int irqnum); extern void deactivate_fd(int fd, int irqnum); diff --git a/trunk/arch/um/include/shared/kern_util.h b/trunk/arch/um/include/shared/kern_util.h index 00965d06d2ca..af6b6dc868ba 100644 --- a/trunk/arch/um/include/shared/kern_util.h +++ b/trunk/arch/um/include/shared/kern_util.h @@ -9,6 +9,8 @@ #include "sysdep/ptrace.h" #include "sysdep/faultinfo.h" +struct siginfo; + extern int uml_exitcode; extern int ncpus; @@ -22,7 +24,7 @@ extern void free_stack(unsigned long stack, int order); extern int do_signal(void); extern void interrupt_end(void); -extern void relay_signal(int sig, struct uml_pt_regs *regs); +extern void relay_signal(int sig, struct siginfo *si, struct uml_pt_regs *regs); extern unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user, struct uml_pt_regs *regs); @@ -33,9 +35,8 @@ extern unsigned int do_IRQ(int irq, struct uml_pt_regs *regs); extern int smp_sigio_handler(void); extern void initial_thread_cb(void (*proc)(void *), void *arg); extern int is_syscall(unsigned long addr); -extern void timer_handler(int sig, struct uml_pt_regs *regs); -extern void timer_handler(int sig, struct uml_pt_regs *regs); +extern void timer_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs); extern int start_uml(void); extern void paging_init(void); @@ -59,9 +60,9 @@ extern unsigned long from_irq_stack(int nested); extern void syscall_trace(struct uml_pt_regs *regs, int entryexit); extern int singlestepping(void *t); -extern void segv_handler(int sig, struct uml_pt_regs *regs); -extern void bus_handler(int sig, struct uml_pt_regs *regs); -extern void winch(int sig, struct uml_pt_regs *regs); +extern void segv_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs); +extern void bus_handler(int sig, struct siginfo *si, struct uml_pt_regs *regs); +extern void winch(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs); extern void fatal_sigsegv(void) __attribute__ ((noreturn)); diff --git a/trunk/arch/um/kernel/irq.c b/trunk/arch/um/kernel/irq.c index 00506c3d5d6e..9883026f0730 100644 --- a/trunk/arch/um/kernel/irq.c +++ b/trunk/arch/um/kernel/irq.c @@ -30,7 +30,7 @@ static struct irq_fd **last_irq_ptr = &active_fds; extern void free_irqs(void); -void sigio_handler(int sig, struct uml_pt_regs *regs) +void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs) { struct irq_fd *irq_fd; int n; diff --git a/trunk/arch/um/kernel/process.c b/trunk/arch/um/kernel/process.c index ccb9a9d283f1..57fc7028714a 100644 --- a/trunk/arch/um/kernel/process.c +++ b/trunk/arch/um/kernel/process.c @@ -151,12 +151,10 @@ void new_thread_handler(void) * 0 if it just exits */ n = run_kernel_thread(fn, arg, ¤t->thread.exec_buf); - if (n == 1) { - /* Handle any immediate reschedules or signals */ - interrupt_end(); + if (n == 1) userspace(¤t->thread.regs.regs); - } - else do_exit(0); + else + do_exit(0); } /* Called magically, see new_thread_handler above */ @@ -175,9 +173,6 @@ void fork_handler(void) current->thread.prev_sched = NULL; - /* Handle any immediate reschedules or signals */ - interrupt_end(); - userspace(¤t->thread.regs.regs); } @@ -193,7 +188,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, if (current->thread.forking) { memcpy(&p->thread.regs.regs, ®s->regs, sizeof(p->thread.regs.regs)); - UPT_SET_SYSCALL_RETURN(&p->thread.regs.regs, 0); + PT_REGS_SET_SYSCALL_RETURN(&p->thread.regs, 0); if (sp != 0) REGS_SP(p->thread.regs.regs.gp) = sp; diff --git a/trunk/arch/um/kernel/ptrace.c b/trunk/arch/um/kernel/ptrace.c index 06b190390505..694d551c8899 100644 --- a/trunk/arch/um/kernel/ptrace.c +++ b/trunk/arch/um/kernel/ptrace.c @@ -3,11 +3,12 @@ * Licensed under the GPL */ -#include "linux/audit.h" -#include "linux/ptrace.h" -#include "linux/sched.h" -#include "asm/uaccess.h" -#include "skas_ptrace.h" +#include +#include +#include +#include +#include +#include @@ -162,48 +163,36 @@ static void send_sigtrap(struct task_struct *tsk, struct uml_pt_regs *regs, * XXX Check PT_DTRACE vs TIF_SINGLESTEP for singlestepping check and * PT_PTRACED vs TIF_SYSCALL_TRACE for syscall tracing check */ -void syscall_trace(struct uml_pt_regs *regs, int entryexit) +void syscall_trace_enter(struct pt_regs *regs) { - int is_singlestep = (current->ptrace & PT_DTRACE) && entryexit; - int tracesysgood; - - if (!entryexit) - audit_syscall_entry(HOST_AUDIT_ARCH, - UPT_SYSCALL_NR(regs), - UPT_SYSCALL_ARG1(regs), - UPT_SYSCALL_ARG2(regs), - UPT_SYSCALL_ARG3(regs), - UPT_SYSCALL_ARG4(regs)); - else - audit_syscall_exit(regs); - - /* Fake a debug trap */ - if (is_singlestep) - send_sigtrap(current, regs, 0); + audit_syscall_entry(HOST_AUDIT_ARCH, + UPT_SYSCALL_NR(®s->regs), + UPT_SYSCALL_ARG1(®s->regs), + UPT_SYSCALL_ARG2(®s->regs), + UPT_SYSCALL_ARG3(®s->regs), + UPT_SYSCALL_ARG4(®s->regs)); if (!test_thread_flag(TIF_SYSCALL_TRACE)) return; - if (!(current->ptrace & PT_PTRACED)) - return; + tracehook_report_syscall_entry(regs); +} - /* - * the 0x80 provides a way for the tracing parent to distinguish - * between a syscall stop and SIGTRAP delivery - */ - tracesysgood = (current->ptrace & PT_TRACESYSGOOD); - ptrace_notify(SIGTRAP | (tracesysgood ? 0x80 : 0)); +void syscall_trace_leave(struct pt_regs *regs) +{ + int ptraced = current->ptrace; - if (entryexit) /* force do_signal() --> is_syscall() */ - set_thread_flag(TIF_SIGPENDING); + audit_syscall_exit(regs); - /* - * this isn't the same as continuing with a signal, but it will do - * for normal use. strace only continues with a signal if the - * stopping signal is not SIGTRAP. -brl - */ - if (current->exit_code) { - send_sig(current->exit_code, current, 1); - current->exit_code = 0; - } + /* Fake a debug trap */ + if (ptraced & PT_DTRACE) + send_sigtrap(current, ®s->regs, 0); + + if (!test_thread_flag(TIF_SYSCALL_TRACE)) + return; + + tracehook_report_syscall_exit(regs, 0); + /* force do_signal() --> is_syscall() */ + if (ptraced & PT_PTRACED) + set_thread_flag(TIF_SIGPENDING); } diff --git a/trunk/arch/um/kernel/skas/syscall.c b/trunk/arch/um/kernel/skas/syscall.c index 05fbeb480e0b..86368a025a96 100644 --- a/trunk/arch/um/kernel/skas/syscall.c +++ b/trunk/arch/um/kernel/skas/syscall.c @@ -18,7 +18,7 @@ void handle_syscall(struct uml_pt_regs *r) long result; int syscall; - syscall_trace(r, 0); + syscall_trace_enter(regs); /* * This should go in the declaration of syscall, but when I do that, @@ -34,7 +34,7 @@ void handle_syscall(struct uml_pt_regs *r) result = -ENOSYS; else result = EXECUTE_SYSCALL(syscall, regs); - UPT_SET_SYSCALL_RETURN(r, result); + PT_REGS_SET_SYSCALL_RETURN(regs, result); - syscall_trace(r, 1); + syscall_trace_leave(regs); } diff --git a/trunk/arch/um/kernel/time.c b/trunk/arch/um/kernel/time.c index d1a23fb3190d..5f76d4ba151c 100644 --- a/trunk/arch/um/kernel/time.c +++ b/trunk/arch/um/kernel/time.c @@ -13,7 +13,7 @@ #include "kern_util.h" #include "os.h" -void timer_handler(int sig, struct uml_pt_regs *regs) +void timer_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs) { unsigned long flags; diff --git a/trunk/arch/um/kernel/trap.c b/trunk/arch/um/kernel/trap.c index 3be60765c0e2..0353b98ae35a 100644 --- a/trunk/arch/um/kernel/trap.c +++ b/trunk/arch/um/kernel/trap.c @@ -172,7 +172,7 @@ void fatal_sigsegv(void) os_dump_core(); } -void segv_handler(int sig, struct uml_pt_regs *regs) +void segv_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs) { struct faultinfo * fi = UPT_FAULTINFO(regs); @@ -258,8 +258,11 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user, return 0; } -void relay_signal(int sig, struct uml_pt_regs *regs) +void relay_signal(int sig, struct siginfo *si, struct uml_pt_regs *regs) { + struct faultinfo *fi; + struct siginfo clean_si; + if (!UPT_IS_USER(regs)) { if (sig == SIGBUS) printk(KERN_ERR "Bus error - the host /dev/shm or /tmp " @@ -269,18 +272,40 @@ void relay_signal(int sig, struct uml_pt_regs *regs) arch_examine_signal(sig, regs); - current->thread.arch.faultinfo = *UPT_FAULTINFO(regs); - force_sig(sig, current); + memset(&clean_si, 0, sizeof(clean_si)); + clean_si.si_signo = si->si_signo; + clean_si.si_errno = si->si_errno; + clean_si.si_code = si->si_code; + switch (sig) { + case SIGILL: + case SIGFPE: + case SIGSEGV: + case SIGBUS: + case SIGTRAP: + fi = UPT_FAULTINFO(regs); + clean_si.si_addr = (void __user *) FAULT_ADDRESS(*fi); + current->thread.arch.faultinfo = *fi; +#ifdef __ARCH_SI_TRAPNO + clean_si.si_trapno = si->si_trapno; +#endif + break; + default: + printk(KERN_ERR "Attempted to relay unknown signal %d (si_code = %d)\n", + sig, si->si_code); + } + + force_sig_info(sig, &clean_si, current); } -void bus_handler(int sig, struct uml_pt_regs *regs) +void bus_handler(int sig, struct siginfo *si, struct uml_pt_regs *regs) { if (current->thread.fault_catcher != NULL) UML_LONGJMP(current->thread.fault_catcher, 1); - else relay_signal(sig, regs); + else + relay_signal(sig, si, regs); } -void winch(int sig, struct uml_pt_regs *regs) +void winch(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs) { do_IRQ(WINCH_IRQ, regs); } diff --git a/trunk/arch/um/os-Linux/internal.h b/trunk/arch/um/os-Linux/internal.h index 2c3c3ecd8c01..0dc2c9f135f6 100644 --- a/trunk/arch/um/os-Linux/internal.h +++ b/trunk/arch/um/os-Linux/internal.h @@ -1 +1 @@ -void alarm_handler(int, mcontext_t *); +void alarm_handler(int sig, struct siginfo *unused_si, mcontext_t *mc); diff --git a/trunk/arch/um/os-Linux/signal.c b/trunk/arch/um/os-Linux/signal.c index 2d22f1fcd8e2..6366ce904b9b 100644 --- a/trunk/arch/um/os-Linux/signal.c +++ b/trunk/arch/um/os-Linux/signal.c @@ -13,8 +13,9 @@ #include "kern_util.h" #include "os.h" #include "sysdep/mcontext.h" +#include "internal.h" -void (*sig_info[NSIG])(int, struct uml_pt_regs *) = { +void (*sig_info[NSIG])(int, siginfo_t *, struct uml_pt_regs *) = { [SIGTRAP] = relay_signal, [SIGFPE] = relay_signal, [SIGILL] = relay_signal, @@ -24,7 +25,7 @@ void (*sig_info[NSIG])(int, struct uml_pt_regs *) = { [SIGIO] = sigio_handler, [SIGVTALRM] = timer_handler }; -static void sig_handler_common(int sig, mcontext_t *mc) +static void sig_handler_common(int sig, siginfo_t *si, mcontext_t *mc) { struct uml_pt_regs r; int save_errno = errno; @@ -40,7 +41,7 @@ static void sig_handler_common(int sig, mcontext_t *mc) if ((sig != SIGIO) && (sig != SIGWINCH) && (sig != SIGVTALRM)) unblock_signals(); - (*sig_info[sig])(sig, &r); + (*sig_info[sig])(sig, si, &r); errno = save_errno; } @@ -60,7 +61,7 @@ static void sig_handler_common(int sig, mcontext_t *mc) static int signals_enabled; static unsigned int signals_pending; -void sig_handler(int sig, mcontext_t *mc) +void sig_handler(int sig, siginfo_t *si, mcontext_t *mc) { int enabled; @@ -72,7 +73,7 @@ void sig_handler(int sig, mcontext_t *mc) block_signals(); - sig_handler_common(sig, mc); + sig_handler_common(sig, si, mc); set_signals(enabled); } @@ -85,10 +86,10 @@ static void real_alarm_handler(mcontext_t *mc) get_regs_from_mc(®s, mc); regs.is_user = 0; unblock_signals(); - timer_handler(SIGVTALRM, ®s); + timer_handler(SIGVTALRM, NULL, ®s); } -void alarm_handler(int sig, mcontext_t *mc) +void alarm_handler(int sig, struct siginfo *unused_si, mcontext_t *mc) { int enabled; @@ -119,7 +120,7 @@ void set_sigstack(void *sig_stack, int size) panic("enabling signal stack failed, errno = %d\n", errno); } -static void (*handlers[_NSIG])(int sig, mcontext_t *mc) = { +static void (*handlers[_NSIG])(int sig, siginfo_t *si, mcontext_t *mc) = { [SIGSEGV] = sig_handler, [SIGBUS] = sig_handler, [SIGILL] = sig_handler, @@ -132,7 +133,7 @@ static void (*handlers[_NSIG])(int sig, mcontext_t *mc) = { }; -static void hard_handler(int sig, siginfo_t *info, void *p) +static void hard_handler(int sig, siginfo_t *si, void *p) { struct ucontext *uc = p; mcontext_t *mc = &uc->uc_mcontext; @@ -161,7 +162,7 @@ static void hard_handler(int sig, siginfo_t *info, void *p) while ((sig = ffs(pending)) != 0){ sig--; pending &= ~(1 << sig); - (*handlers[sig])(sig, mc); + (*handlers[sig])(sig, si, mc); } /* @@ -273,9 +274,12 @@ void unblock_signals(void) * Deal with SIGIO first because the alarm handler might * schedule, leaving the pending SIGIO stranded until we come * back here. + * + * SIGIO's handler doesn't use siginfo or mcontext, + * so they can be NULL. */ if (save_pending & SIGIO_MASK) - sig_handler_common(SIGIO, NULL); + sig_handler_common(SIGIO, NULL, NULL); if (save_pending & SIGVTALRM_MASK) real_alarm_handler(NULL); diff --git a/trunk/arch/um/os-Linux/skas/process.c b/trunk/arch/um/os-Linux/skas/process.c index cd65727854eb..d93bb40499f7 100644 --- a/trunk/arch/um/os-Linux/skas/process.c +++ b/trunk/arch/um/os-Linux/skas/process.c @@ -346,6 +346,10 @@ void userspace(struct uml_pt_regs *regs) int err, status, op, pid = userspace_pid[0]; /* To prevent races if using_sysemu changes under us.*/ int local_using_sysemu; + siginfo_t si; + + /* Handle any immediate reschedules or signals */ + interrupt_end(); if (getitimer(ITIMER_VIRTUAL, &timer)) printk(UM_KERN_ERR "Failed to get itimer, errno = %d\n", errno); @@ -404,13 +408,17 @@ void userspace(struct uml_pt_regs *regs) if (WIFSTOPPED(status)) { int sig = WSTOPSIG(status); + + ptrace(PTRACE_GETSIGINFO, pid, 0, &si); + switch (sig) { case SIGSEGV: if (PTRACE_FULL_FAULTINFO || !ptrace_faultinfo) { get_skas_faultinfo(pid, ®s->faultinfo); - (*sig_info[SIGSEGV])(SIGSEGV, regs); + (*sig_info[SIGSEGV])(SIGSEGV, &si, + regs); } else handle_segv(pid, regs); break; @@ -418,14 +426,14 @@ void userspace(struct uml_pt_regs *regs) handle_trap(pid, regs, local_using_sysemu); break; case SIGTRAP: - relay_signal(SIGTRAP, regs); + relay_signal(SIGTRAP, &si, regs); break; case SIGVTALRM: now = os_nsecs(); if (now < nsecs) break; block_signals(); - (*sig_info[sig])(sig, regs); + (*sig_info[sig])(sig, &si, regs); unblock_signals(); nsecs = timer.it_value.tv_sec * UM_NSEC_PER_SEC + @@ -439,7 +447,7 @@ void userspace(struct uml_pt_regs *regs) case SIGFPE: case SIGWINCH: block_signals(); - (*sig_info[sig])(sig, regs); + (*sig_info[sig])(sig, &si, regs); unblock_signals(); break; default: diff --git a/trunk/arch/um/os-Linux/time.c b/trunk/arch/um/os-Linux/time.c index 910499d76a67..0748fe0c8a73 100644 --- a/trunk/arch/um/os-Linux/time.c +++ b/trunk/arch/um/os-Linux/time.c @@ -87,7 +87,7 @@ static int after_sleep_interval(struct timespec *ts) static void deliver_alarm(void) { - alarm_handler(SIGVTALRM, NULL); + alarm_handler(SIGVTALRM, NULL, NULL); } static unsigned long long sleep_time(unsigned long long nsecs) @@ -114,7 +114,7 @@ static void deliver_alarm(void) skew += this_tick - last_tick; while (skew >= one_tick) { - alarm_handler(SIGVTALRM, NULL); + alarm_handler(SIGVTALRM, NULL, NULL); skew -= one_tick; } diff --git a/trunk/arch/x86/Kconfig b/trunk/arch/x86/Kconfig index ba2657c49217..8ec3a1aa4abd 100644 --- a/trunk/arch/x86/Kconfig +++ b/trunk/arch/x86/Kconfig @@ -1527,7 +1527,7 @@ config SECCOMP If unsure, say Y. Only embedded should say N here. config CC_STACKPROTECTOR - bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)" + bool "Enable -fstack-protector buffer overflow detection" ---help--- This option turns on the -fstack-protector GCC feature. This feature puts, at the beginning of functions, a canary value on diff --git a/trunk/arch/x86/Makefile b/trunk/arch/x86/Makefile index b0c5276861ec..682e9c210baa 100644 --- a/trunk/arch/x86/Makefile +++ b/trunk/arch/x86/Makefile @@ -27,6 +27,10 @@ ifeq ($(CONFIG_X86_32),y) KBUILD_CFLAGS += -msoft-float -mregparm=3 -freg-struct-return + # Never want PIC in a 32-bit kernel, prevent breakage with GCC built + # with nonstandard options + KBUILD_CFLAGS += -fno-pic + # prevent gcc from keeping the stack 16 byte aligned KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=2) diff --git a/trunk/arch/x86/boot/Makefile b/trunk/arch/x86/boot/Makefile index 5a747dd884db..f7535bedc33f 100644 --- a/trunk/arch/x86/boot/Makefile +++ b/trunk/arch/x86/boot/Makefile @@ -57,7 +57,7 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \ -Wall -Wstrict-prototypes \ -march=i386 -mregparm=3 \ -include $(srctree)/$(src)/code16gcc.h \ - -fno-strict-aliasing -fomit-frame-pointer \ + -fno-strict-aliasing -fomit-frame-pointer -fno-pic \ $(call cc-option, -ffreestanding) \ $(call cc-option, -fno-toplevel-reorder,\ $(call cc-option, -fno-unit-at-a-time)) \ diff --git a/trunk/arch/x86/include/asm/mce.h b/trunk/arch/x86/include/asm/mce.h index 441520e4174f..a3ac52b29cbf 100644 --- a/trunk/arch/x86/include/asm/mce.h +++ b/trunk/arch/x86/include/asm/mce.h @@ -33,6 +33,14 @@ #define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */ #define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */ #define MCI_STATUS_AR (1ULL<<55) /* Action required */ +#define MCACOD 0xffff /* MCA Error Code */ + +/* Architecturally defined codes from SDM Vol. 3B Chapter 15 */ +#define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */ +#define MCACOD_SCRUBMSK 0xfff0 +#define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */ +#define MCACOD_DATA 0x0134 /* Data Load */ +#define MCACOD_INSTR 0x0150 /* Instruction Fetch */ /* MCi_MISC register defines */ #define MCI_MISC_ADDR_LSB(m) ((m) & 0x3f) diff --git a/trunk/arch/x86/include/asm/olpc.h b/trunk/arch/x86/include/asm/olpc.h index 87bdbca72f94..72f9adf6eca4 100644 --- a/trunk/arch/x86/include/asm/olpc.h +++ b/trunk/arch/x86/include/asm/olpc.h @@ -100,25 +100,6 @@ extern void olpc_xo1_pm_wakeup_clear(u16 value); extern int pci_olpc_init(void); -/* EC related functions */ - -extern int olpc_ec_cmd(unsigned char cmd, unsigned char *inbuf, size_t inlen, - unsigned char *outbuf, size_t outlen); - -/* EC commands */ - -#define EC_FIRMWARE_REV 0x08 -#define EC_WRITE_SCI_MASK 0x1b -#define EC_WAKE_UP_WLAN 0x24 -#define EC_WLAN_LEAVE_RESET 0x25 -#define EC_READ_EB_MODE 0x2a -#define EC_SET_SCI_INHIBIT 0x32 -#define EC_SET_SCI_INHIBIT_RELEASE 0x34 -#define EC_WLAN_ENTER_RESET 0x35 -#define EC_WRITE_EXT_SCI_MASK 0x38 -#define EC_SCI_QUERY 0x84 -#define EC_EXT_SCI_QUERY 0x85 - /* SCI source values */ #define EC_SCI_SRC_EMPTY 0x00 diff --git a/trunk/arch/x86/include/asm/perf_event.h b/trunk/arch/x86/include/asm/perf_event.h index dab39350e51e..cb4e43bce98a 100644 --- a/trunk/arch/x86/include/asm/perf_event.h +++ b/trunk/arch/x86/include/asm/perf_event.h @@ -196,11 +196,16 @@ static inline u32 get_ibs_caps(void) { return 0; } extern void perf_events_lapic_init(void); /* - * Abuse bit 3 of the cpu eflags register to indicate proper PEBS IP fixups. - * This flag is otherwise unused and ABI specified to be 0, so nobody should - * care what we do with it. + * Abuse bits {3,5} of the cpu eflags register. These flags are otherwise + * unused and ABI specified to be 0, so nobody should care what we do with + * them. + * + * EXACT - the IP points to the exact instruction that triggered the + * event (HW bugs exempt). + * VM - original X86_VM_MASK; see set_linear_ip(). */ #define PERF_EFLAGS_EXACT (1UL << 3) +#define PERF_EFLAGS_VM (1UL << 5) struct pt_regs; extern unsigned long perf_instruction_pointer(struct pt_regs *regs); diff --git a/trunk/arch/x86/include/asm/spinlock.h b/trunk/arch/x86/include/asm/spinlock.h index b315a33867f2..33692eaabab5 100644 --- a/trunk/arch/x86/include/asm/spinlock.h +++ b/trunk/arch/x86/include/asm/spinlock.h @@ -12,8 +12,7 @@ * Simple spin lock operations. There are two variants, one clears IRQ's * on the local processor, one does not. * - * These are fair FIFO ticket locks, which are currently limited to 256 - * CPUs. + * These are fair FIFO ticket locks, which support up to 2^16 CPUs. * * (the type definitions are in asm/spinlock_types.h) */ diff --git a/trunk/arch/x86/kernel/acpi/sleep.c b/trunk/arch/x86/kernel/acpi/sleep.c index 95bf99de9058..1b8e5a03d942 100644 --- a/trunk/arch/x86/kernel/acpi/sleep.c +++ b/trunk/arch/x86/kernel/acpi/sleep.c @@ -25,10 +25,6 @@ unsigned long acpi_realmode_flags; static char temp_stack[4096]; #endif -asmlinkage void acpi_enter_s3(void) -{ - acpi_enter_sleep_state(3, wake_sleep_flags); -} /** * acpi_suspend_lowlevel - save kernel state * diff --git a/trunk/arch/x86/kernel/acpi/sleep.h b/trunk/arch/x86/kernel/acpi/sleep.h index 5653a5791ec9..67f59f8c6956 100644 --- a/trunk/arch/x86/kernel/acpi/sleep.h +++ b/trunk/arch/x86/kernel/acpi/sleep.h @@ -2,7 +2,6 @@ * Variables and functions used by the code in sleep.c */ -#include #include extern unsigned long saved_video_mode; @@ -11,7 +10,6 @@ extern long saved_magic; extern int wakeup_pmode_return; extern u8 wake_sleep_flags; -extern asmlinkage void acpi_enter_s3(void); extern unsigned long acpi_copy_wakeup_routine(unsigned long); extern void wakeup_long64(void); diff --git a/trunk/arch/x86/kernel/acpi/wakeup_32.S b/trunk/arch/x86/kernel/acpi/wakeup_32.S index 72610839f03b..13ab720573e3 100644 --- a/trunk/arch/x86/kernel/acpi/wakeup_32.S +++ b/trunk/arch/x86/kernel/acpi/wakeup_32.S @@ -74,7 +74,9 @@ restore_registers: ENTRY(do_suspend_lowlevel) call save_processor_state call save_registers - call acpi_enter_s3 + pushl $3 + call acpi_enter_sleep_state + addl $4, %esp # In case of S3 failure, we'll emerge here. Jump # to ret_point to recover diff --git a/trunk/arch/x86/kernel/acpi/wakeup_64.S b/trunk/arch/x86/kernel/acpi/wakeup_64.S index 014d1d28c397..8ea5164cbd04 100644 --- a/trunk/arch/x86/kernel/acpi/wakeup_64.S +++ b/trunk/arch/x86/kernel/acpi/wakeup_64.S @@ -71,7 +71,9 @@ ENTRY(do_suspend_lowlevel) movq %rsi, saved_rsi addq $8, %rsp - call acpi_enter_s3 + movl $3, %edi + xorl %eax, %eax + call acpi_enter_sleep_state /* in case something went wrong, restore the machine status and go on */ jmp resume_point diff --git a/trunk/arch/x86/kernel/alternative.c b/trunk/arch/x86/kernel/alternative.c index 931280ff8299..ced4534baed5 100644 --- a/trunk/arch/x86/kernel/alternative.c +++ b/trunk/arch/x86/kernel/alternative.c @@ -165,7 +165,7 @@ static const unsigned char * const k7_nops[ASM_NOP_MAX+2] = #endif #ifdef P6_NOP1 -static const unsigned char __initconst_or_module p6nops[] = +static const unsigned char p6nops[] = { P6_NOP1, P6_NOP2, @@ -224,7 +224,7 @@ void __init arch_init_ideal_nops(void) ideal_nops = intel_nops; #endif } - + break; default: #ifdef CONFIG_X86_64 ideal_nops = k8_nops; diff --git a/trunk/arch/x86/kernel/apic/io_apic.c b/trunk/arch/x86/kernel/apic/io_apic.c index 406eee784684..c265593ec2cd 100644 --- a/trunk/arch/x86/kernel/apic/io_apic.c +++ b/trunk/arch/x86/kernel/apic/io_apic.c @@ -1204,7 +1204,7 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg) BUG_ON(!cfg->vector); vector = cfg->vector; - for_each_cpu(cpu, cfg->domain) + for_each_cpu_and(cpu, cfg->domain, cpu_online_mask) per_cpu(vector_irq, cpu)[vector] = -1; cfg->vector = 0; @@ -1212,7 +1212,7 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg) if (likely(!cfg->move_in_progress)) return; - for_each_cpu(cpu, cfg->old_domain) { + for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) { for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { if (per_cpu(vector_irq, cpu)[vector] != irq) @@ -1356,6 +1356,16 @@ static void setup_ioapic_irq(unsigned int irq, struct irq_cfg *cfg, if (!IO_APIC_IRQ(irq)) return; + /* + * For legacy irqs, cfg->domain starts with cpu 0. Now that IO-APIC + * can handle this irq and the apic driver is finialized at this point, + * update the cfg->domain. + */ + if (irq < legacy_pic->nr_legacy_irqs && + cpumask_equal(cfg->domain, cpumask_of(0))) + apic->vector_allocation_domain(0, cfg->domain, + apic->target_cpus()); + if (assign_irq_vector(irq, cfg, apic->target_cpus())) return; diff --git a/trunk/arch/x86/kernel/cpu/common.c b/trunk/arch/x86/kernel/cpu/common.c index 46d8786d655e..a5fbc3c5fccc 100644 --- a/trunk/arch/x86/kernel/cpu/common.c +++ b/trunk/arch/x86/kernel/cpu/common.c @@ -144,6 +144,8 @@ static int __init x86_xsave_setup(char *s) { setup_clear_cpu_cap(X86_FEATURE_XSAVE); setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); + setup_clear_cpu_cap(X86_FEATURE_AVX); + setup_clear_cpu_cap(X86_FEATURE_AVX2); return 1; } __setup("noxsave", x86_xsave_setup); diff --git a/trunk/arch/x86/kernel/cpu/mcheck/mce-severity.c b/trunk/arch/x86/kernel/cpu/mcheck/mce-severity.c index 413c2ced887c..13017626f9a8 100644 --- a/trunk/arch/x86/kernel/cpu/mcheck/mce-severity.c +++ b/trunk/arch/x86/kernel/cpu/mcheck/mce-severity.c @@ -55,13 +55,6 @@ static struct severity { #define MCI_UC_S (MCI_STATUS_UC|MCI_STATUS_S) #define MCI_UC_SAR (MCI_STATUS_UC|MCI_STATUS_S|MCI_STATUS_AR) #define MCI_ADDR (MCI_STATUS_ADDRV|MCI_STATUS_MISCV) -#define MCACOD 0xffff -/* Architecturally defined codes from SDM Vol. 3B Chapter 15 */ -#define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */ -#define MCACOD_SCRUBMSK 0xfff0 -#define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */ -#define MCACOD_DATA 0x0134 /* Data Load */ -#define MCACOD_INSTR 0x0150 /* Instruction Fetch */ MCESEV( NO, "Invalid", diff --git a/trunk/arch/x86/kernel/cpu/mcheck/mce.c b/trunk/arch/x86/kernel/cpu/mcheck/mce.c index 5e095f873e3e..292d0258311c 100644 --- a/trunk/arch/x86/kernel/cpu/mcheck/mce.c +++ b/trunk/arch/x86/kernel/cpu/mcheck/mce.c @@ -103,6 +103,8 @@ DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = { static DEFINE_PER_CPU(struct work_struct, mce_work); +static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs); + /* * CPU/chipset specific EDAC code can register a notifier call here to print * MCE errors in a human-readable form. @@ -650,14 +652,18 @@ EXPORT_SYMBOL_GPL(machine_check_poll); * Do a quick check if any of the events requires a panic. * This decides if we keep the events around or clear them. */ -static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp) +static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp, + struct pt_regs *regs) { int i, ret = 0; for (i = 0; i < banks; i++) { m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i)); - if (m->status & MCI_STATUS_VAL) + if (m->status & MCI_STATUS_VAL) { __set_bit(i, validp); + if (quirk_no_way_out) + quirk_no_way_out(i, m, regs); + } if (mce_severity(m, tolerant, msg) >= MCE_PANIC_SEVERITY) ret = 1; } @@ -1040,7 +1046,7 @@ void do_machine_check(struct pt_regs *regs, long error_code) *final = m; memset(valid_banks, 0, sizeof(valid_banks)); - no_way_out = mce_no_way_out(&m, &msg, valid_banks); + no_way_out = mce_no_way_out(&m, &msg, valid_banks, regs); barrier(); @@ -1418,6 +1424,34 @@ static void __mcheck_cpu_init_generic(void) } } +/* + * During IFU recovery Sandy Bridge -EP4S processors set the RIPV and + * EIPV bits in MCG_STATUS to zero on the affected logical processor (SDM + * Vol 3B Table 15-20). But this confuses both the code that determines + * whether the machine check occurred in kernel or user mode, and also + * the severity assessment code. Pretend that EIPV was set, and take the + * ip/cs values from the pt_regs that mce_gather_info() ignored earlier. + */ +static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs) +{ + if (bank != 0) + return; + if ((m->mcgstatus & (MCG_STATUS_EIPV|MCG_STATUS_RIPV)) != 0) + return; + if ((m->status & (MCI_STATUS_OVER|MCI_STATUS_UC| + MCI_STATUS_EN|MCI_STATUS_MISCV|MCI_STATUS_ADDRV| + MCI_STATUS_PCC|MCI_STATUS_S|MCI_STATUS_AR| + MCACOD)) != + (MCI_STATUS_UC|MCI_STATUS_EN| + MCI_STATUS_MISCV|MCI_STATUS_ADDRV|MCI_STATUS_S| + MCI_STATUS_AR|MCACOD_INSTR)) + return; + + m->mcgstatus |= MCG_STATUS_EIPV; + m->ip = regs->ip; + m->cs = regs->cs; +} + /* Add per CPU specific workarounds here */ static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) { @@ -1515,6 +1549,9 @@ static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) */ if (c->x86 == 6 && c->x86_model <= 13 && mce_bootlog < 0) mce_bootlog = 0; + + if (c->x86 == 6 && c->x86_model == 45) + quirk_no_way_out = quirk_sandybridge_ifu; } if (monarch_timeout < 0) monarch_timeout = 0; diff --git a/trunk/arch/x86/kernel/cpu/perf_event.c b/trunk/arch/x86/kernel/cpu/perf_event.c index 29557aa06dda..915b876edd1e 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event.c +++ b/trunk/arch/x86/kernel/cpu/perf_event.c @@ -32,6 +32,8 @@ #include #include #include +#include +#include #include "perf_event.h" @@ -1738,6 +1740,29 @@ valid_user_frame(const void __user *fp, unsigned long size) return (__range_not_ok(fp, size, TASK_SIZE) == 0); } +static unsigned long get_segment_base(unsigned int segment) +{ + struct desc_struct *desc; + int idx = segment >> 3; + + if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) { + if (idx > LDT_ENTRIES) + return 0; + + if (idx > current->active_mm->context.size) + return 0; + + desc = current->active_mm->context.ldt; + } else { + if (idx > GDT_ENTRIES) + return 0; + + desc = __this_cpu_ptr(&gdt_page.gdt[0]); + } + + return get_desc_base(desc + idx); +} + #ifdef CONFIG_COMPAT #include @@ -1746,13 +1771,17 @@ static inline int perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) { /* 32-bit process in 64-bit kernel. */ + unsigned long ss_base, cs_base; struct stack_frame_ia32 frame; const void __user *fp; if (!test_thread_flag(TIF_IA32)) return 0; - fp = compat_ptr(regs->bp); + cs_base = get_segment_base(regs->cs); + ss_base = get_segment_base(regs->ss); + + fp = compat_ptr(ss_base + regs->bp); while (entry->nr < PERF_MAX_STACK_DEPTH) { unsigned long bytes; frame.next_frame = 0; @@ -1765,8 +1794,8 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) if (!valid_user_frame(fp, sizeof(frame))) break; - perf_callchain_store(entry, frame.return_address); - fp = compat_ptr(frame.next_frame); + perf_callchain_store(entry, cs_base + frame.return_address); + fp = compat_ptr(ss_base + frame.next_frame); } return 1; } @@ -1789,6 +1818,12 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) return; } + /* + * We don't know what to do with VM86 stacks.. ignore them for now. + */ + if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM)) + return; + fp = (void __user *)regs->bp; perf_callchain_store(entry, regs->ip); @@ -1816,16 +1851,50 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) } } -unsigned long perf_instruction_pointer(struct pt_regs *regs) +/* + * Deal with code segment offsets for the various execution modes: + * + * VM86 - the good olde 16 bit days, where the linear address is + * 20 bits and we use regs->ip + 0x10 * regs->cs. + * + * IA32 - Where we need to look at GDT/LDT segment descriptor tables + * to figure out what the 32bit base address is. + * + * X32 - has TIF_X32 set, but is running in x86_64 + * + * X86_64 - CS,DS,SS,ES are all zero based. + */ +static unsigned long code_segment_base(struct pt_regs *regs) { - unsigned long ip; + /* + * If we are in VM86 mode, add the segment offset to convert to a + * linear address. + */ + if (regs->flags & X86_VM_MASK) + return 0x10 * regs->cs; + + /* + * For IA32 we look at the GDT/LDT segment base to convert the + * effective IP to a linear address. + */ +#ifdef CONFIG_X86_32 + if (user_mode(regs) && regs->cs != __USER_CS) + return get_segment_base(regs->cs); +#else + if (test_thread_flag(TIF_IA32)) { + if (user_mode(regs) && regs->cs != __USER32_CS) + return get_segment_base(regs->cs); + } +#endif + return 0; +} +unsigned long perf_instruction_pointer(struct pt_regs *regs) +{ if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) - ip = perf_guest_cbs->get_guest_ip(); - else - ip = instruction_pointer(regs); + return perf_guest_cbs->get_guest_ip(); - return ip; + return regs->ip + code_segment_base(regs); } unsigned long perf_misc_flags(struct pt_regs *regs) @@ -1838,7 +1907,7 @@ unsigned long perf_misc_flags(struct pt_regs *regs) else misc |= PERF_RECORD_MISC_GUEST_KERNEL; } else { - if (!kernel_ip(regs->ip)) + if (user_mode(regs)) misc |= PERF_RECORD_MISC_USER; else misc |= PERF_RECORD_MISC_KERNEL; diff --git a/trunk/arch/x86/kernel/cpu/perf_event.h b/trunk/arch/x86/kernel/cpu/perf_event.h index 821d53b696d1..6605a81ba339 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event.h +++ b/trunk/arch/x86/kernel/cpu/perf_event.h @@ -516,6 +516,26 @@ static inline bool kernel_ip(unsigned long ip) #endif } +/* + * Not all PMUs provide the right context information to place the reported IP + * into full context. Specifically segment registers are typically not + * supplied. + * + * Assuming the address is a linear address (it is for IBS), we fake the CS and + * vm86 mode using the known zero-based code segment and 'fix up' the registers + * to reflect this. + * + * Intel PEBS/LBR appear to typically provide the effective address, nothing + * much we can do about that but pray and treat it like a linear address. + */ +static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip) +{ + regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS; + if (regs->flags & X86_VM_MASK) + regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK); + regs->ip = ip; +} + #ifdef CONFIG_CPU_SUP_AMD int amd_pmu_init(void); diff --git a/trunk/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/trunk/arch/x86/kernel/cpu/perf_event_amd_ibs.c index da9bcdcd9856..7bfb5bec8630 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event_amd_ibs.c +++ b/trunk/arch/x86/kernel/cpu/perf_event_amd_ibs.c @@ -13,6 +13,8 @@ #include +#include "perf_event.h" + static u32 ibs_caps; #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) @@ -536,7 +538,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs) if (check_rip && (ibs_data.regs[2] & IBS_RIP_INVALID)) { regs.flags &= ~PERF_EFLAGS_EXACT; } else { - instruction_pointer_set(®s, ibs_data.regs[1]); + set_linear_ip(®s, ibs_data.regs[1]); regs.flags |= PERF_EFLAGS_EXACT; } diff --git a/trunk/arch/x86/kernel/cpu/perf_event_intel.c b/trunk/arch/x86/kernel/cpu/perf_event_intel.c index 382366977d4c..7f2739e03e79 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event_intel.c +++ b/trunk/arch/x86/kernel/cpu/perf_event_intel.c @@ -1522,8 +1522,16 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr) arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL; arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask; arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask; + /* + * If PMU counter has PEBS enabled it is not enough to disable counter + * on a guest entry since PEBS memory write can overshoot guest entry + * and corrupt guest memory. Disabling PEBS solves the problem. + */ + arr[1].msr = MSR_IA32_PEBS_ENABLE; + arr[1].host = cpuc->pebs_enabled; + arr[1].guest = 0; - *nr = 1; + *nr = 2; return arr; } diff --git a/trunk/arch/x86/kernel/cpu/perf_event_intel_ds.c b/trunk/arch/x86/kernel/cpu/perf_event_intel_ds.c index 629ae0b7ad90..e38d97bf4259 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/trunk/arch/x86/kernel/cpu/perf_event_intel_ds.c @@ -499,7 +499,7 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs) * We sampled a branch insn, rewind using the LBR stack */ if (ip == to) { - regs->ip = from; + set_linear_ip(regs, from); return 1; } @@ -529,7 +529,7 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs) } while (to < ip); if (to == ip) { - regs->ip = old_to; + set_linear_ip(regs, old_to); return 1; } @@ -569,7 +569,8 @@ static void __intel_pmu_pebs_event(struct perf_event *event, * A possible PERF_SAMPLE_REGS will have to transfer all regs. */ regs = *iregs; - regs.ip = pebs->ip; + regs.flags = pebs->flags; + set_linear_ip(®s, pebs->ip); regs.bp = pebs->bp; regs.sp = pebs->sp; diff --git a/trunk/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/trunk/arch/x86/kernel/cpu/perf_event_intel_uncore.c index 7563fda9f033..0a5571080e74 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ b/trunk/arch/x86/kernel/cpu/perf_event_intel_uncore.c @@ -796,7 +796,6 @@ static struct intel_uncore_type *nhm_msr_uncores[] = { DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5"); DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7"); -DEFINE_UNCORE_FORMAT_ATTR(mm_cfg, mm_cfg, "config:63"); DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63"); DEFINE_UNCORE_FORMAT_ATTR(mask, mask, "config2:0-63"); @@ -902,16 +901,21 @@ static struct attribute_group nhmex_uncore_cbox_format_group = { .attrs = nhmex_uncore_cbox_formats_attr, }; +/* msr offset for each instance of cbox */ +static unsigned nhmex_cbox_msr_offsets[] = { + 0x0, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x240, 0x2c0, +}; + static struct intel_uncore_type nhmex_uncore_cbox = { .name = "cbox", .num_counters = 6, - .num_boxes = 8, + .num_boxes = 10, .perf_ctr_bits = 48, .event_ctl = NHMEX_C0_MSR_PMON_EV_SEL0, .perf_ctr = NHMEX_C0_MSR_PMON_CTR0, .event_mask = NHMEX_PMON_RAW_EVENT_MASK, .box_ctl = NHMEX_C0_MSR_PMON_GLOBAL_CTL, - .msr_offset = NHMEX_C_MSR_OFFSET, + .msr_offsets = nhmex_cbox_msr_offsets, .pair_ctr_ctl = 1, .ops = &nhmex_uncore_ops, .format_group = &nhmex_uncore_cbox_format_group @@ -1032,24 +1036,22 @@ static struct intel_uncore_type nhmex_uncore_bbox = { static int nhmex_sbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) { - struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; - struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; + struct hw_perf_event *hwc = &event->hw; + struct hw_perf_event_extra *reg1 = &hwc->extra_reg; + struct hw_perf_event_extra *reg2 = &hwc->branch_reg; - if (event->attr.config & NHMEX_S_PMON_MM_CFG_EN) { - reg1->config = event->attr.config1; - reg2->config = event->attr.config2; - } else { - reg1->config = ~0ULL; - reg2->config = ~0ULL; - } + /* only TO_R_PROG_EV event uses the match/mask register */ + if ((hwc->config & NHMEX_PMON_CTL_EV_SEL_MASK) != + NHMEX_S_EVENT_TO_R_PROG_EV) + return 0; if (box->pmu->pmu_idx == 0) reg1->reg = NHMEX_S0_MSR_MM_CFG; else reg1->reg = NHMEX_S1_MSR_MM_CFG; - reg1->idx = 0; - + reg1->config = event->attr.config1; + reg2->config = event->attr.config2; return 0; } @@ -1059,8 +1061,8 @@ static void nhmex_sbox_msr_enable_event(struct intel_uncore_box *box, struct per struct hw_perf_event_extra *reg1 = &hwc->extra_reg; struct hw_perf_event_extra *reg2 = &hwc->branch_reg; - wrmsrl(reg1->reg, 0); - if (reg1->config != ~0ULL || reg2->config != ~0ULL) { + if (reg1->idx != EXTRA_REG_NONE) { + wrmsrl(reg1->reg, 0); wrmsrl(reg1->reg + 1, reg1->config); wrmsrl(reg1->reg + 2, reg2->config); wrmsrl(reg1->reg, NHMEX_S_PMON_MM_CFG_EN); @@ -1074,7 +1076,6 @@ static struct attribute *nhmex_uncore_sbox_formats_attr[] = { &format_attr_edge.attr, &format_attr_inv.attr, &format_attr_thresh8.attr, - &format_attr_mm_cfg.attr, &format_attr_match.attr, &format_attr_mask.attr, NULL, @@ -1142,6 +1143,9 @@ static struct extra_reg nhmex_uncore_mbox_extra_regs[] = { EVENT_EXTRA_END }; +/* Nehalem-EX or Westmere-EX ? */ +bool uncore_nhmex; + static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config) { struct intel_uncore_extra_reg *er; @@ -1171,18 +1175,29 @@ static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 return false; /* mask of the shared fields */ - mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK; + if (uncore_nhmex) + mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK; + else + mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK; er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC]; raw_spin_lock_irqsave(&er->lock, flags); /* add mask of the non-shared field if it's in use */ - if (__BITS_VALUE(atomic_read(&er->ref), idx, 8)) - mask |= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); + if (__BITS_VALUE(atomic_read(&er->ref), idx, 8)) { + if (uncore_nhmex) + mask |= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); + else + mask |= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); + } if (!atomic_read(&er->ref) || !((er->config ^ config) & mask)) { atomic_add(1 << (idx * 8), &er->ref); - mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK | - NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); + if (uncore_nhmex) + mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK | + NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); + else + mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK | + WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); er->config &= ~mask; er->config |= (config & mask); ret = true; @@ -1216,7 +1231,10 @@ u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify) /* get the non-shared control bits and shift them */ idx = orig_idx - EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; - config &= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); + if (uncore_nhmex) + config &= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); + else + config &= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); if (new_idx > orig_idx) { idx = new_idx - orig_idx; config <<= 3 * idx; @@ -1226,6 +1244,10 @@ u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify) } /* add the shared control bits back */ + if (uncore_nhmex) + config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config; + else + config |= WSMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config; config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config; if (modify) { /* adjust the main event selector */ @@ -1264,7 +1286,8 @@ nhmex_mbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event } /* for the match/mask registers */ - if ((uncore_box_is_fake(box) || !reg2->alloc) && + if (reg2->idx != EXTRA_REG_NONE && + (uncore_box_is_fake(box) || !reg2->alloc) && !nhmex_mbox_get_shared_reg(box, reg2->idx, reg2->config)) goto fail; @@ -1278,7 +1301,8 @@ nhmex_mbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event if (idx[0] != 0xff && idx[0] != __BITS_VALUE(reg1->idx, 0, 8)) nhmex_mbox_alter_er(event, idx[0], true); reg1->alloc |= alloc; - reg2->alloc = 1; + if (reg2->idx != EXTRA_REG_NONE) + reg2->alloc = 1; } return NULL; fail: @@ -1342,9 +1366,6 @@ static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event struct extra_reg *er; unsigned msr; int reg_idx = 0; - - if (WARN_ON_ONCE(reg1->idx != -1)) - return -EINVAL; /* * The mbox events may require 2 extra MSRs at the most. But only * the lower 32 bits in these MSRs are significant, so we can use @@ -1355,11 +1376,6 @@ static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event continue; if (event->attr.config1 & ~er->valid_mask) return -EINVAL; - if (er->idx == __BITS_VALUE(reg1->idx, 0, 8) || - er->idx == __BITS_VALUE(reg1->idx, 1, 8)) - continue; - if (WARN_ON_ONCE(reg_idx >= 2)) - return -EINVAL; msr = er->msr + type->msr_offset * box->pmu->pmu_idx; if (WARN_ON_ONCE(msr >= 0xffff || er->idx >= 0xff)) @@ -1368,6 +1384,8 @@ static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event /* always use the 32~63 bits to pass the PLD config */ if (er->idx == EXTRA_REG_NHMEX_M_PLD) reg_idx = 1; + else if (WARN_ON_ONCE(reg_idx > 0)) + return -EINVAL; reg1->idx &= ~(0xff << (reg_idx * 8)); reg1->reg &= ~(0xffff << (reg_idx * 16)); @@ -1376,17 +1394,21 @@ static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event reg1->config = event->attr.config1; reg_idx++; } - /* use config2 to pass the filter config */ - reg2->idx = EXTRA_REG_NHMEX_M_FILTER; - if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN) - reg2->config = event->attr.config2; - else - reg2->config = ~0ULL; - if (box->pmu->pmu_idx == 0) - reg2->reg = NHMEX_M0_MSR_PMU_MM_CFG; - else - reg2->reg = NHMEX_M1_MSR_PMU_MM_CFG; - + /* + * The mbox only provides ability to perform address matching + * for the PLD events. + */ + if (reg_idx == 2) { + reg2->idx = EXTRA_REG_NHMEX_M_FILTER; + if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN) + reg2->config = event->attr.config2; + else + reg2->config = ~0ULL; + if (box->pmu->pmu_idx == 0) + reg2->reg = NHMEX_M0_MSR_PMU_MM_CFG; + else + reg2->reg = NHMEX_M1_MSR_PMU_MM_CFG; + } return 0; } @@ -1422,34 +1444,36 @@ static void nhmex_mbox_msr_enable_event(struct intel_uncore_box *box, struct per wrmsrl(__BITS_VALUE(reg1->reg, 1, 16), nhmex_mbox_shared_reg_config(box, idx)); - wrmsrl(reg2->reg, 0); - if (reg2->config != ~0ULL) { - wrmsrl(reg2->reg + 1, - reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK); - wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK & - (reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT)); - wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN); + if (reg2->idx != EXTRA_REG_NONE) { + wrmsrl(reg2->reg, 0); + if (reg2->config != ~0ULL) { + wrmsrl(reg2->reg + 1, + reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK); + wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK & + (reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT)); + wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN); + } } wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0); } -DEFINE_UNCORE_FORMAT_ATTR(count_mode, count_mode, "config:2-3"); -DEFINE_UNCORE_FORMAT_ATTR(storage_mode, storage_mode, "config:4-5"); -DEFINE_UNCORE_FORMAT_ATTR(wrap_mode, wrap_mode, "config:6"); -DEFINE_UNCORE_FORMAT_ATTR(flag_mode, flag_mode, "config:7"); -DEFINE_UNCORE_FORMAT_ATTR(inc_sel, inc_sel, "config:9-13"); -DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel, set_flag_sel, "config:19-21"); -DEFINE_UNCORE_FORMAT_ATTR(filter_cfg, filter_cfg, "config2:63"); -DEFINE_UNCORE_FORMAT_ATTR(filter_match, filter_match, "config2:0-33"); -DEFINE_UNCORE_FORMAT_ATTR(filter_mask, filter_mask, "config2:34-61"); -DEFINE_UNCORE_FORMAT_ATTR(dsp, dsp, "config1:0-31"); -DEFINE_UNCORE_FORMAT_ATTR(thr, thr, "config1:0-31"); -DEFINE_UNCORE_FORMAT_ATTR(fvc, fvc, "config1:0-31"); -DEFINE_UNCORE_FORMAT_ATTR(pgt, pgt, "config1:0-31"); -DEFINE_UNCORE_FORMAT_ATTR(map, map, "config1:0-31"); -DEFINE_UNCORE_FORMAT_ATTR(iss, iss, "config1:0-31"); -DEFINE_UNCORE_FORMAT_ATTR(pld, pld, "config1:32-63"); +DEFINE_UNCORE_FORMAT_ATTR(count_mode, count_mode, "config:2-3"); +DEFINE_UNCORE_FORMAT_ATTR(storage_mode, storage_mode, "config:4-5"); +DEFINE_UNCORE_FORMAT_ATTR(wrap_mode, wrap_mode, "config:6"); +DEFINE_UNCORE_FORMAT_ATTR(flag_mode, flag_mode, "config:7"); +DEFINE_UNCORE_FORMAT_ATTR(inc_sel, inc_sel, "config:9-13"); +DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel, set_flag_sel, "config:19-21"); +DEFINE_UNCORE_FORMAT_ATTR(filter_cfg_en, filter_cfg_en, "config2:63"); +DEFINE_UNCORE_FORMAT_ATTR(filter_match, filter_match, "config2:0-33"); +DEFINE_UNCORE_FORMAT_ATTR(filter_mask, filter_mask, "config2:34-61"); +DEFINE_UNCORE_FORMAT_ATTR(dsp, dsp, "config1:0-31"); +DEFINE_UNCORE_FORMAT_ATTR(thr, thr, "config1:0-31"); +DEFINE_UNCORE_FORMAT_ATTR(fvc, fvc, "config1:0-31"); +DEFINE_UNCORE_FORMAT_ATTR(pgt, pgt, "config1:0-31"); +DEFINE_UNCORE_FORMAT_ATTR(map, map, "config1:0-31"); +DEFINE_UNCORE_FORMAT_ATTR(iss, iss, "config1:0-31"); +DEFINE_UNCORE_FORMAT_ATTR(pld, pld, "config1:32-63"); static struct attribute *nhmex_uncore_mbox_formats_attr[] = { &format_attr_count_mode.attr, @@ -1458,7 +1482,7 @@ static struct attribute *nhmex_uncore_mbox_formats_attr[] = { &format_attr_flag_mode.attr, &format_attr_inc_sel.attr, &format_attr_set_flag_sel.attr, - &format_attr_filter_cfg.attr, + &format_attr_filter_cfg_en.attr, &format_attr_filter_match.attr, &format_attr_filter_mask.attr, &format_attr_dsp.attr, @@ -1482,6 +1506,12 @@ static struct uncore_event_desc nhmex_uncore_mbox_events[] = { { /* end: all zeroes */ }, }; +static struct uncore_event_desc wsmex_uncore_mbox_events[] = { + INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x5000"), + INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x5040"), + { /* end: all zeroes */ }, +}; + static struct intel_uncore_ops nhmex_uncore_mbox_ops = { NHMEX_UNCORE_OPS_COMMON_INIT(), .enable_event = nhmex_mbox_msr_enable_event, @@ -1513,7 +1543,7 @@ void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event) struct hw_perf_event_extra *reg1 = &hwc->extra_reg; int port; - /* adjust the main event selector */ + /* adjust the main event selector and extra register index */ if (reg1->idx % 2) { reg1->idx--; hwc->config -= 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT; @@ -1522,29 +1552,17 @@ void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event) hwc->config += 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT; } - /* adjust address or config of extra register */ + /* adjust extra register config */ port = reg1->idx / 6 + box->pmu->pmu_idx * 4; switch (reg1->idx % 6) { - case 0: - reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG0(port); - break; - case 1: - reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG1(port); - break; case 2: - /* the 8~15 bits to the 0~7 bits */ + /* shift the 8~15 bits to the 0~7 bits */ reg1->config >>= 8; break; case 3: - /* the 0~7 bits to the 8~15 bits */ + /* shift the 0~7 bits to the 8~15 bits */ reg1->config <<= 8; break; - case 4: - reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port); - break; - case 5: - reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port); - break; }; } @@ -1671,7 +1689,7 @@ static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event struct hw_perf_event *hwc = &event->hw; struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; - int port, idx; + int idx; idx = (event->hw.config & NHMEX_R_PMON_CTL_EV_SEL_MASK) >> NHMEX_R_PMON_CTL_EV_SEL_SHIFT; @@ -1681,27 +1699,11 @@ static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event reg1->idx = idx; reg1->config = event->attr.config1; - port = idx / 6 + box->pmu->pmu_idx * 4; - idx %= 6; - switch (idx) { - case 0: - reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG0(port); - break; - case 1: - reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG1(port); - break; - case 2: - case 3: - reg1->reg = NHMEX_R_MSR_PORTN_QLX_CFG(port); - break; + switch (idx % 6) { case 4: case 5: - if (idx == 4) - reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port); - else - reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port); - reg2->config = event->attr.config2; hwc->config |= event->attr.config & (~0ULL << 32); + reg2->config = event->attr.config2; break; }; return 0; @@ -1727,28 +1729,34 @@ static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct per struct hw_perf_event *hwc = &event->hw; struct hw_perf_event_extra *reg1 = &hwc->extra_reg; struct hw_perf_event_extra *reg2 = &hwc->branch_reg; - int idx, er_idx; + int idx, port; - idx = reg1->idx % 6; - er_idx = idx; - if (er_idx > 2) - er_idx--; - er_idx += (reg1->idx / 6) * 5; + idx = reg1->idx; + port = idx / 6 + box->pmu->pmu_idx * 4; - switch (idx) { + switch (idx % 6) { case 0: + wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG0(port), reg1->config); + break; case 1: - wrmsrl(reg1->reg, reg1->config); + wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG1(port), reg1->config); break; case 2: case 3: - wrmsrl(reg1->reg, nhmex_rbox_shared_reg_config(box, er_idx)); + wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port), + nhmex_rbox_shared_reg_config(box, 2 + (idx / 6) * 5)); break; case 4: + wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port), + hwc->config >> 32); + wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(port), reg1->config); + wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MASK(port), reg2->config); + break; case 5: - wrmsrl(reg1->reg, reg1->config); - wrmsrl(reg1->reg + 1, hwc->config >> 32); - wrmsrl(reg1->reg + 2, reg2->config); + wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port), + hwc->config >> 32); + wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(port), reg1->config); + wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MASK(port), reg2->config); break; }; @@ -1756,8 +1764,8 @@ static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct per (hwc->config & NHMEX_R_PMON_CTL_EV_SEL_MASK)); } -DEFINE_UNCORE_FORMAT_ATTR(xbr_match, xbr_match, "config:32-63"); -DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg, xbr_mm_cfg, "config1:0-63"); +DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg, xbr_mm_cfg, "config:32-63"); +DEFINE_UNCORE_FORMAT_ATTR(xbr_match, xbr_match, "config1:0-63"); DEFINE_UNCORE_FORMAT_ATTR(xbr_mask, xbr_mask, "config2:0-63"); DEFINE_UNCORE_FORMAT_ATTR(qlx_cfg, qlx_cfg, "config1:0-15"); DEFINE_UNCORE_FORMAT_ATTR(iperf_cfg, iperf_cfg, "config1:0-31"); @@ -2303,6 +2311,7 @@ int uncore_pmu_event_init(struct perf_event *event) event->hw.idx = -1; event->hw.last_tag = ~0ULL; event->hw.extra_reg.idx = EXTRA_REG_NONE; + event->hw.branch_reg.idx = EXTRA_REG_NONE; if (event->attr.config == UNCORE_FIXED_EVENT) { /* no fixed counter */ @@ -2373,7 +2382,7 @@ static void __init uncore_type_exit(struct intel_uncore_type *type) type->attr_groups[1] = NULL; } -static void uncore_types_exit(struct intel_uncore_type **types) +static void __init uncore_types_exit(struct intel_uncore_type **types) { int i; for (i = 0; types[i]; i++) @@ -2814,7 +2823,13 @@ static int __init uncore_cpu_init(void) snbep_uncore_cbox.num_boxes = max_cores; msr_uncores = snbep_msr_uncores; break; - case 46: + case 46: /* Nehalem-EX */ + uncore_nhmex = true; + case 47: /* Westmere-EX aka. Xeon E7 */ + if (!uncore_nhmex) + nhmex_uncore_mbox.event_descs = wsmex_uncore_mbox_events; + if (nhmex_uncore_cbox.num_boxes > max_cores) + nhmex_uncore_cbox.num_boxes = max_cores; msr_uncores = nhmex_msr_uncores; break; default: diff --git a/trunk/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/trunk/arch/x86/kernel/cpu/perf_event_intel_uncore.h index f3851892e077..5b81c1856aac 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event_intel_uncore.h +++ b/trunk/arch/x86/kernel/cpu/perf_event_intel_uncore.h @@ -5,7 +5,7 @@ #include "perf_event.h" #define UNCORE_PMU_NAME_LEN 32 -#define UNCORE_PMU_HRTIMER_INTERVAL (60 * NSEC_PER_SEC) +#define UNCORE_PMU_HRTIMER_INTERVAL (60LL * NSEC_PER_SEC) #define UNCORE_FIXED_EVENT 0xff #define UNCORE_PMC_IDX_MAX_GENERIC 8 @@ -230,6 +230,7 @@ #define NHMEX_S1_MSR_MASK 0xe5a #define NHMEX_S_PMON_MM_CFG_EN (0x1ULL << 63) +#define NHMEX_S_EVENT_TO_R_PROG_EV 0 /* NHM-EX Mbox */ #define NHMEX_M0_MSR_GLOBAL_CTL 0xca0 @@ -275,18 +276,12 @@ NHMEX_M_PMON_CTL_INC_SEL_MASK | \ NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK) - -#define NHMEX_M_PMON_ZDP_CTL_FVC_FVID_MASK 0x1f -#define NHMEX_M_PMON_ZDP_CTL_FVC_BCMD_MASK (0x7 << 5) -#define NHMEX_M_PMON_ZDP_CTL_FVC_RSP_MASK (0x7 << 8) -#define NHMEX_M_PMON_ZDP_CTL_FVC_PBOX_INIT_ERR (1 << 23) -#define NHMEX_M_PMON_ZDP_CTL_FVC_MASK \ - (NHMEX_M_PMON_ZDP_CTL_FVC_FVID_MASK | \ - NHMEX_M_PMON_ZDP_CTL_FVC_BCMD_MASK | \ - NHMEX_M_PMON_ZDP_CTL_FVC_RSP_MASK | \ - NHMEX_M_PMON_ZDP_CTL_FVC_PBOX_INIT_ERR) +#define NHMEX_M_PMON_ZDP_CTL_FVC_MASK (((1 << 11) - 1) | (1 << 23)) #define NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n) (0x7 << (11 + 3 * (n))) +#define WSMEX_M_PMON_ZDP_CTL_FVC_MASK (((1 << 12) - 1) | (1 << 24)) +#define WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n) (0x7 << (12 + 3 * (n))) + /* * use the 9~13 bits to select event If the 7th bit is not set, * otherwise use the 19~21 bits to select event. @@ -368,6 +363,7 @@ struct intel_uncore_type { unsigned num_shared_regs:8; unsigned single_fixed:1; unsigned pair_ctr_ctl:1; + unsigned *msr_offsets; struct event_constraint unconstrainted; struct event_constraint *constraints; struct intel_uncore_pmu *pmus; @@ -485,29 +481,31 @@ unsigned uncore_pci_perf_ctr(struct intel_uncore_box *box, int idx) return idx * 8 + box->pmu->type->perf_ctr; } -static inline -unsigned uncore_msr_box_ctl(struct intel_uncore_box *box) +static inline unsigned uncore_msr_box_offset(struct intel_uncore_box *box) +{ + struct intel_uncore_pmu *pmu = box->pmu; + return pmu->type->msr_offsets ? + pmu->type->msr_offsets[pmu->pmu_idx] : + pmu->type->msr_offset * pmu->pmu_idx; +} + +static inline unsigned uncore_msr_box_ctl(struct intel_uncore_box *box) { if (!box->pmu->type->box_ctl) return 0; - return box->pmu->type->box_ctl + - box->pmu->type->msr_offset * box->pmu->pmu_idx; + return box->pmu->type->box_ctl + uncore_msr_box_offset(box); } -static inline -unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box) +static inline unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box) { if (!box->pmu->type->fixed_ctl) return 0; - return box->pmu->type->fixed_ctl + - box->pmu->type->msr_offset * box->pmu->pmu_idx; + return box->pmu->type->fixed_ctl + uncore_msr_box_offset(box); } -static inline -unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box) +static inline unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box) { - return box->pmu->type->fixed_ctr + - box->pmu->type->msr_offset * box->pmu->pmu_idx; + return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box); } static inline @@ -515,7 +513,7 @@ unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx) { return box->pmu->type->event_ctl + (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + - box->pmu->type->msr_offset * box->pmu->pmu_idx; + uncore_msr_box_offset(box); } static inline @@ -523,7 +521,7 @@ unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx) { return box->pmu->type->perf_ctr + (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + - box->pmu->type->msr_offset * box->pmu->pmu_idx; + uncore_msr_box_offset(box); } static inline diff --git a/trunk/arch/x86/kernel/irq.c b/trunk/arch/x86/kernel/irq.c index 1f5f1d5d2a02..d44f7829968e 100644 --- a/trunk/arch/x86/kernel/irq.c +++ b/trunk/arch/x86/kernel/irq.c @@ -270,7 +270,7 @@ void fixup_irqs(void) if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { break_affinity = 1; - affinity = cpu_all_mask; + affinity = cpu_online_mask; } chip = irq_data_get_irq_chip(data); @@ -328,6 +328,7 @@ void fixup_irqs(void) chip->irq_retrigger(data); raw_spin_unlock(&desc->lock); } + __this_cpu_write(vector_irq[vector], -1); } } #endif diff --git a/trunk/arch/x86/kernel/kdebugfs.c b/trunk/arch/x86/kernel/kdebugfs.c index 1d5d31ea686b..dc1404bf8e4b 100644 --- a/trunk/arch/x86/kernel/kdebugfs.c +++ b/trunk/arch/x86/kernel/kdebugfs.c @@ -107,7 +107,7 @@ static int __init create_setup_data_nodes(struct dentry *parent) { struct setup_data_node *node; struct setup_data *data; - int error = -ENOMEM; + int error; struct dentry *d; struct page *pg; u64 pa_data; @@ -121,8 +121,10 @@ static int __init create_setup_data_nodes(struct dentry *parent) while (pa_data) { node = kmalloc(sizeof(*node), GFP_KERNEL); - if (!node) + if (!node) { + error = -ENOMEM; goto err_dir; + } pg = pfn_to_page((pa_data+sizeof(*data)-1) >> PAGE_SHIFT); if (PageHighMem(pg)) { diff --git a/trunk/arch/x86/kernel/microcode_amd.c b/trunk/arch/x86/kernel/microcode_amd.c index 8a2ce8fd41c0..82746f942cd8 100644 --- a/trunk/arch/x86/kernel/microcode_amd.c +++ b/trunk/arch/x86/kernel/microcode_amd.c @@ -143,11 +143,12 @@ static int get_matching_microcode(int cpu, const u8 *ucode_ptr, unsigned int *current_size) { struct microcode_header_amd *mc_hdr; - unsigned int actual_size; + unsigned int actual_size, patch_size; u16 equiv_cpu_id; /* size of the current patch we're staring at */ - *current_size = *(u32 *)(ucode_ptr + 4) + SECTION_HDR_SIZE; + patch_size = *(u32 *)(ucode_ptr + 4); + *current_size = patch_size + SECTION_HDR_SIZE; equiv_cpu_id = find_equiv_id(); if (!equiv_cpu_id) @@ -174,7 +175,7 @@ static int get_matching_microcode(int cpu, const u8 *ucode_ptr, /* * now that the header looks sane, verify its size */ - actual_size = verify_ucode_size(cpu, *current_size, leftover_size); + actual_size = verify_ucode_size(cpu, patch_size, leftover_size); if (!actual_size) return 0; diff --git a/trunk/arch/x86/kvm/emulate.c b/trunk/arch/x86/kvm/emulate.c index 97d9a9914ba8..a3b57a27be88 100644 --- a/trunk/arch/x86/kvm/emulate.c +++ b/trunk/arch/x86/kvm/emulate.c @@ -475,13 +475,26 @@ register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg) return address_mask(ctxt, reg); } +static void masked_increment(ulong *reg, ulong mask, int inc) +{ + assign_masked(reg, *reg + inc, mask); +} + static inline void register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc) { + ulong mask; + if (ctxt->ad_bytes == sizeof(unsigned long)) - *reg += inc; + mask = ~0UL; else - *reg = (*reg & ~ad_mask(ctxt)) | ((*reg + inc) & ad_mask(ctxt)); + mask = ad_mask(ctxt); + masked_increment(reg, mask, inc); +} + +static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc) +{ + masked_increment(&ctxt->regs[VCPU_REGS_RSP], stack_mask(ctxt), inc); } static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel) @@ -1522,8 +1535,8 @@ static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes) { struct segmented_address addr; - register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], -bytes); - addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]); + rsp_increment(ctxt, -bytes); + addr.ea = ctxt->regs[VCPU_REGS_RSP] & stack_mask(ctxt); addr.seg = VCPU_SREG_SS; return segmented_write(ctxt, addr, data, bytes); @@ -1542,13 +1555,13 @@ static int emulate_pop(struct x86_emulate_ctxt *ctxt, int rc; struct segmented_address addr; - addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]); + addr.ea = ctxt->regs[VCPU_REGS_RSP] & stack_mask(ctxt); addr.seg = VCPU_SREG_SS; rc = segmented_read(ctxt, addr, dest, len); if (rc != X86EMUL_CONTINUE) return rc; - register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], len); + rsp_increment(ctxt, len); return rc; } @@ -1688,8 +1701,7 @@ static int em_popa(struct x86_emulate_ctxt *ctxt) while (reg >= VCPU_REGS_RAX) { if (reg == VCPU_REGS_RSP) { - register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], - ctxt->op_bytes); + rsp_increment(ctxt, ctxt->op_bytes); --reg; } @@ -2825,7 +2837,7 @@ static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt) rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; - register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], ctxt->src.val); + rsp_increment(ctxt, ctxt->src.val); return X86EMUL_CONTINUE; } diff --git a/trunk/arch/x86/kvm/i8259.c b/trunk/arch/x86/kvm/i8259.c index 1df8fb9e1d5d..9fc9aa7ac703 100644 --- a/trunk/arch/x86/kvm/i8259.c +++ b/trunk/arch/x86/kvm/i8259.c @@ -316,6 +316,11 @@ static void pic_ioport_write(void *opaque, u32 addr, u32 val) addr &= 1; if (addr == 0) { if (val & 0x10) { + u8 edge_irr = s->irr & ~s->elcr; + int i; + bool found = false; + struct kvm_vcpu *vcpu; + s->init4 = val & 1; s->last_irr = 0; s->irr &= s->elcr; @@ -333,6 +338,18 @@ static void pic_ioport_write(void *opaque, u32 addr, u32 val) if (val & 0x08) pr_pic_unimpl( "level sensitive irq not supported"); + + kvm_for_each_vcpu(i, vcpu, s->pics_state->kvm) + if (kvm_apic_accept_pic_intr(vcpu)) { + found = true; + break; + } + + + if (found) + for (irq = 0; irq < PIC_NUM_PINS/2; irq++) + if (edge_irr & (1 << irq)) + pic_clear_isr(s, irq); } else if (val & 0x08) { if (val & 0x04) s->poll = 1; diff --git a/trunk/arch/x86/kvm/mmu.c b/trunk/arch/x86/kvm/mmu.c index 01ca00423938..7fbd0d273ea8 100644 --- a/trunk/arch/x86/kvm/mmu.c +++ b/trunk/arch/x86/kvm/mmu.c @@ -4112,17 +4112,22 @@ static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc) int idx; LIST_HEAD(invalid_list); + /* + * Never scan more than sc->nr_to_scan VM instances. + * Will not hit this condition practically since we do not try + * to shrink more than one VM and it is very unlikely to see + * !n_used_mmu_pages so many times. + */ + if (!nr_to_scan--) + break; /* * n_used_mmu_pages is accessed without holding kvm->mmu_lock * here. We may skip a VM instance errorneosly, but we do not * want to shrink a VM that only started to populate its MMU * anyway. */ - if (kvm->arch.n_used_mmu_pages > 0) { - if (!nr_to_scan--) - break; + if (!kvm->arch.n_used_mmu_pages) continue; - } idx = srcu_read_lock(&kvm->srcu); spin_lock(&kvm->mmu_lock); diff --git a/trunk/arch/x86/kvm/vmx.c b/trunk/arch/x86/kvm/vmx.c index c39b60707e02..b1eb202ee76a 100644 --- a/trunk/arch/x86/kvm/vmx.c +++ b/trunk/arch/x86/kvm/vmx.c @@ -1488,13 +1488,6 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx) loadsegment(ds, vmx->host_state.ds_sel); loadsegment(es, vmx->host_state.es_sel); } -#else - /* - * The sysexit path does not restore ds/es, so we must set them to - * a reasonable value ourselves. - */ - loadsegment(ds, __USER_DS); - loadsegment(es, __USER_DS); #endif reload_tss(); #ifdef CONFIG_X86_64 @@ -3626,6 +3619,7 @@ static void seg_setup(int seg) static int alloc_apic_access_page(struct kvm *kvm) { + struct page *page; struct kvm_userspace_memory_region kvm_userspace_mem; int r = 0; @@ -3640,7 +3634,13 @@ static int alloc_apic_access_page(struct kvm *kvm) if (r) goto out; - kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00); + page = gfn_to_page(kvm, 0xfee00); + if (is_error_page(page)) { + r = -EFAULT; + goto out; + } + + kvm->arch.apic_access_page = page; out: mutex_unlock(&kvm->slots_lock); return r; @@ -3648,6 +3648,7 @@ static int alloc_apic_access_page(struct kvm *kvm) static int alloc_identity_pagetable(struct kvm *kvm) { + struct page *page; struct kvm_userspace_memory_region kvm_userspace_mem; int r = 0; @@ -3663,8 +3664,13 @@ static int alloc_identity_pagetable(struct kvm *kvm) if (r) goto out; - kvm->arch.ept_identity_pagetable = gfn_to_page(kvm, - kvm->arch.ept_identity_map_addr >> PAGE_SHIFT); + page = gfn_to_page(kvm, kvm->arch.ept_identity_map_addr >> PAGE_SHIFT); + if (is_error_page(page)) { + r = -EFAULT; + goto out; + } + + kvm->arch.ept_identity_pagetable = page; out: mutex_unlock(&kvm->slots_lock); return r; @@ -6370,6 +6376,19 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) #endif ); +#ifndef CONFIG_X86_64 + /* + * The sysexit path does not restore ds/es, so we must set them to + * a reasonable value ourselves. + * + * We can't defer this to vmx_load_host_state() since that function + * may be executed in interrupt context, which saves and restore segments + * around it, nullifying its effect. + */ + loadsegment(ds, __USER_DS); + loadsegment(es, __USER_DS); +#endif + vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP) | (1 << VCPU_EXREG_RFLAGS) | (1 << VCPU_EXREG_CPL) @@ -6569,7 +6588,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu) /* Exposing INVPCID only when PCID is exposed */ best = kvm_find_cpuid_entry(vcpu, 0x7, 0); if (vmx_invpcid_supported() && - best && (best->ecx & bit(X86_FEATURE_INVPCID)) && + best && (best->ebx & bit(X86_FEATURE_INVPCID)) && guest_cpuid_has_pcid(vcpu)) { exec_control |= SECONDARY_EXEC_ENABLE_INVPCID; vmcs_write32(SECONDARY_VM_EXEC_CONTROL, @@ -6579,7 +6598,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu) vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); if (best) - best->ecx &= ~bit(X86_FEATURE_INVPCID); + best->ebx &= ~bit(X86_FEATURE_INVPCID); } } diff --git a/trunk/arch/x86/kvm/x86.c b/trunk/arch/x86/kvm/x86.c index 59b59508ff07..2966c847d489 100644 --- a/trunk/arch/x86/kvm/x86.c +++ b/trunk/arch/x86/kvm/x86.c @@ -806,7 +806,7 @@ EXPORT_SYMBOL_GPL(kvm_rdpmc); * kvm-specific. Those are put in the beginning of the list. */ -#define KVM_SAVE_MSRS_BEGIN 9 +#define KVM_SAVE_MSRS_BEGIN 10 static u32 msrs_to_save[] = { MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW, @@ -925,6 +925,10 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) */ getboottime(&boot); + if (kvm->arch.kvmclock_offset) { + struct timespec ts = ns_to_timespec(kvm->arch.kvmclock_offset); + boot = timespec_sub(boot, ts); + } wc.sec = boot.tv_sec; wc.nsec = boot.tv_nsec; wc.version = version; @@ -1996,6 +2000,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) case MSR_KVM_STEAL_TIME: data = vcpu->arch.st.msr_val; break; + case MSR_KVM_PV_EOI_EN: + data = vcpu->arch.pv_eoi.msr_val; + break; case MSR_IA32_P5_MC_ADDR: case MSR_IA32_P5_MC_TYPE: case MSR_IA32_MCG_CAP: @@ -5106,17 +5113,20 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) !kvm_event_needs_reinjection(vcpu); } -static void vapic_enter(struct kvm_vcpu *vcpu) +static int vapic_enter(struct kvm_vcpu *vcpu) { struct kvm_lapic *apic = vcpu->arch.apic; struct page *page; if (!apic || !apic->vapic_addr) - return; + return 0; page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); + if (is_error_page(page)) + return -EFAULT; vcpu->arch.apic->vapic_page = page; + return 0; } static void vapic_exit(struct kvm_vcpu *vcpu) @@ -5423,7 +5433,11 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) } vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); - vapic_enter(vcpu); + r = vapic_enter(vcpu); + if (r) { + srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); + return r; + } r = 1; while (r > 0) { diff --git a/trunk/arch/x86/mm/hugetlbpage.c b/trunk/arch/x86/mm/hugetlbpage.c index f6679a7fb8ca..b91e48512425 100644 --- a/trunk/arch/x86/mm/hugetlbpage.c +++ b/trunk/arch/x86/mm/hugetlbpage.c @@ -56,9 +56,16 @@ static int vma_shareable(struct vm_area_struct *vma, unsigned long addr) } /* - * search for a shareable pmd page for hugetlb. + * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc() + * and returns the corresponding pte. While this is not necessary for the + * !shared pmd case because we can allocate the pmd later as well, it makes the + * code much cleaner. pmd allocation is essential for the shared case because + * pud has to be populated inside the same i_mmap_mutex section - otherwise + * racing tasks could either miss the sharing (see huge_pte_offset) or select a + * bad pmd for sharing. */ -static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) +static pte_t * +huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) { struct vm_area_struct *vma = find_vma(mm, addr); struct address_space *mapping = vma->vm_file->f_mapping; @@ -68,9 +75,10 @@ static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) struct vm_area_struct *svma; unsigned long saddr; pte_t *spte = NULL; + pte_t *pte; if (!vma_shareable(vma, addr)) - return; + return (pte_t *)pmd_alloc(mm, pud, addr); mutex_lock(&mapping->i_mmap_mutex); vma_prio_tree_foreach(svma, &iter, &mapping->i_mmap, idx, idx) { @@ -97,7 +105,9 @@ static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) put_page(virt_to_page(spte)); spin_unlock(&mm->page_table_lock); out: + pte = (pte_t *)pmd_alloc(mm, pud, addr); mutex_unlock(&mapping->i_mmap_mutex); + return pte; } /* @@ -142,8 +152,9 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, } else { BUG_ON(sz != PMD_SIZE); if (pud_none(*pud)) - huge_pmd_share(mm, addr, pud); - pte = (pte_t *) pmd_alloc(mm, pud, addr); + pte = huge_pmd_share(mm, addr, pud); + else + pte = (pte_t *)pmd_alloc(mm, pud, addr); } } BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte)); diff --git a/trunk/arch/x86/mm/pageattr.c b/trunk/arch/x86/mm/pageattr.c index 931930a96160..a718e0d23503 100644 --- a/trunk/arch/x86/mm/pageattr.c +++ b/trunk/arch/x86/mm/pageattr.c @@ -919,13 +919,11 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages, /* * On success we use clflush, when the CPU supports it to - * avoid the wbindv. If the CPU does not support it, in the - * error case, and during early boot (for EFI) we fall back - * to cpa_flush_all (which uses wbinvd): + * avoid the wbindv. If the CPU does not support it and in the + * error case we fall back to cpa_flush_all (which uses + * wbindv): */ - if (early_boot_irqs_disabled) - __cpa_flush_all((void *)(long)cache); - else if (!ret && cpu_has_clflush) { + if (!ret && cpu_has_clflush) { if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) { cpa_flush_array(addr, numpages, cache, cpa.flags, pages); diff --git a/trunk/arch/x86/mm/srat.c b/trunk/arch/x86/mm/srat.c index 4599c3e8bcb6..4ddf497ca65b 100644 --- a/trunk/arch/x86/mm/srat.c +++ b/trunk/arch/x86/mm/srat.c @@ -142,23 +142,23 @@ static inline int save_add_info(void) {return 0;} #endif /* Callback for parsing of the Proximity Domain <-> Memory Area mappings */ -void __init +int __init acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) { u64 start, end; int node, pxm; if (srat_disabled()) - return; + return -1; if (ma->header.length != sizeof(struct acpi_srat_mem_affinity)) { bad_srat(); - return; + return -1; } if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0) - return; + return -1; if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info()) - return; + return -1; start = ma->base_address; end = start + ma->length; pxm = ma->proximity_domain; @@ -168,12 +168,12 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) if (node < 0) { printk(KERN_ERR "SRAT: Too many proximity domains.\n"); bad_srat(); - return; + return -1; } if (numa_add_memblk(node, start, end) < 0) { bad_srat(); - return; + return -1; } node_set(node, numa_nodes_parsed); @@ -181,6 +181,7 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n", node, pxm, (unsigned long long) start, (unsigned long long) end - 1); + return 0; } void __init acpi_numa_arch_fixup(void) {} diff --git a/trunk/arch/x86/platform/efi/efi.c b/trunk/arch/x86/platform/efi/efi.c index 2dc29f51e75a..92660edaa1e7 100644 --- a/trunk/arch/x86/platform/efi/efi.c +++ b/trunk/arch/x86/platform/efi/efi.c @@ -234,7 +234,22 @@ static efi_status_t __init phys_efi_set_virtual_address_map( return status; } -static int efi_set_rtc_mmss(unsigned long nowtime) +static efi_status_t __init phys_efi_get_time(efi_time_t *tm, + efi_time_cap_t *tc) +{ + unsigned long flags; + efi_status_t status; + + spin_lock_irqsave(&rtc_lock, flags); + efi_call_phys_prelog(); + status = efi_call_phys2(efi_phys.get_time, virt_to_phys(tm), + virt_to_phys(tc)); + efi_call_phys_epilog(); + spin_unlock_irqrestore(&rtc_lock, flags); + return status; +} + +int efi_set_rtc_mmss(unsigned long nowtime) { int real_seconds, real_minutes; efi_status_t status; @@ -263,7 +278,7 @@ static int efi_set_rtc_mmss(unsigned long nowtime) return 0; } -static unsigned long efi_get_time(void) +unsigned long efi_get_time(void) { efi_status_t status; efi_time_t eft; @@ -606,13 +621,18 @@ static int __init efi_runtime_init(void) } /* * We will only need *early* access to the following - * EFI runtime service before set_virtual_address_map + * two EFI runtime services before set_virtual_address_map * is invoked. */ + efi_phys.get_time = (efi_get_time_t *)runtime->get_time; efi_phys.set_virtual_address_map = (efi_set_virtual_address_map_t *) runtime->set_virtual_address_map; - + /* + * Make efi_get_time can be called before entering + * virtual mode. + */ + efi.get_time = phys_efi_get_time; early_iounmap(runtime, sizeof(efi_runtime_services_t)); return 0; @@ -700,10 +720,12 @@ void __init efi_init(void) efi_enabled = 0; return; } +#ifdef CONFIG_X86_32 if (efi_native) { x86_platform.get_wallclock = efi_get_time; x86_platform.set_wallclock = efi_set_rtc_mmss; } +#endif #if EFI_DEBUG print_efi_memmap(); diff --git a/trunk/arch/x86/platform/olpc/olpc-xo1-pm.c b/trunk/arch/x86/platform/olpc/olpc-xo1-pm.c index 0ce8616c88ae..d75582d1aa55 100644 --- a/trunk/arch/x86/platform/olpc/olpc-xo1-pm.c +++ b/trunk/arch/x86/platform/olpc/olpc-xo1-pm.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -51,16 +52,11 @@ EXPORT_SYMBOL_GPL(olpc_xo1_pm_wakeup_clear); static int xo1_power_state_enter(suspend_state_t pm_state) { unsigned long saved_sci_mask; - int r; /* Only STR is supported */ if (pm_state != PM_SUSPEND_MEM) return -EINVAL; - r = olpc_ec_cmd(EC_SET_SCI_INHIBIT, NULL, 0, NULL, 0); - if (r) - return r; - /* * Save SCI mask (this gets lost since PM1_EN is used as a mask for * wakeup events, which is not necessarily the same event set) @@ -76,16 +72,6 @@ static int xo1_power_state_enter(suspend_state_t pm_state) /* Restore SCI mask (using dword access to CS5536_PM1_EN) */ outl(saved_sci_mask, acpi_base + CS5536_PM1_STS); - /* Tell the EC to stop inhibiting SCIs */ - olpc_ec_cmd(EC_SET_SCI_INHIBIT_RELEASE, NULL, 0, NULL, 0); - - /* - * Tell the wireless module to restart USB communication. - * Must be done twice. - */ - olpc_ec_cmd(EC_WAKE_UP_WLAN, NULL, 0, NULL, 0); - olpc_ec_cmd(EC_WAKE_UP_WLAN, NULL, 0, NULL, 0); - return 0; } diff --git a/trunk/arch/x86/platform/olpc/olpc-xo1-sci.c b/trunk/arch/x86/platform/olpc/olpc-xo1-sci.c index 04b8c73659c5..63d4aa40956e 100644 --- a/trunk/arch/x86/platform/olpc/olpc-xo1-sci.c +++ b/trunk/arch/x86/platform/olpc/olpc-xo1-sci.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include diff --git a/trunk/arch/x86/platform/olpc/olpc-xo15-sci.c b/trunk/arch/x86/platform/olpc/olpc-xo15-sci.c index 599be499fdf7..2fdca25905ae 100644 --- a/trunk/arch/x86/platform/olpc/olpc-xo15-sci.c +++ b/trunk/arch/x86/platform/olpc/olpc-xo15-sci.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include diff --git a/trunk/arch/x86/platform/olpc/olpc.c b/trunk/arch/x86/platform/olpc/olpc.c index a4bee53c2e54..27376081ddec 100644 --- a/trunk/arch/x86/platform/olpc/olpc.c +++ b/trunk/arch/x86/platform/olpc/olpc.c @@ -14,14 +14,13 @@ #include #include #include -#include #include #include #include #include #include -#include #include +#include #include #include @@ -31,17 +30,6 @@ struct olpc_platform_t olpc_platform_info; EXPORT_SYMBOL_GPL(olpc_platform_info); -static DEFINE_SPINLOCK(ec_lock); - -/* debugfs interface to EC commands */ -#define EC_MAX_CMD_ARGS (5 + 1) /* cmd byte + 5 args */ -#define EC_MAX_CMD_REPLY (8) - -static struct dentry *ec_debugfs_dir; -static DEFINE_MUTEX(ec_debugfs_cmd_lock); -static unsigned char ec_debugfs_resp[EC_MAX_CMD_REPLY]; -static unsigned int ec_debugfs_resp_bytes; - /* EC event mask to be applied during suspend (defining wakeup sources). */ static u16 ec_wakeup_mask; @@ -125,16 +113,13 @@ static int __wait_on_obf(unsigned int line, unsigned int port, int desired) * . Unfortunately, while * OpenFirmware's source is available, the EC's is not. */ -int olpc_ec_cmd(unsigned char cmd, unsigned char *inbuf, size_t inlen, - unsigned char *outbuf, size_t outlen) +static int olpc_xo1_ec_cmd(u8 cmd, u8 *inbuf, size_t inlen, u8 *outbuf, + size_t outlen, void *arg) { - unsigned long flags; int ret = -EIO; int i; int restarts = 0; - spin_lock_irqsave(&ec_lock, flags); - /* Clear OBF */ for (i = 0; i < 10 && (obf_status(0x6c) == 1); i++) inb(0x68); @@ -198,10 +183,8 @@ int olpc_ec_cmd(unsigned char cmd, unsigned char *inbuf, size_t inlen, ret = 0; err: - spin_unlock_irqrestore(&ec_lock, flags); return ret; } -EXPORT_SYMBOL_GPL(olpc_ec_cmd); void olpc_ec_wakeup_set(u16 value) { @@ -280,96 +263,6 @@ int olpc_ec_sci_query(u16 *sci_value) } EXPORT_SYMBOL_GPL(olpc_ec_sci_query); -static ssize_t ec_debugfs_cmd_write(struct file *file, const char __user *buf, - size_t size, loff_t *ppos) -{ - int i, m; - unsigned char ec_cmd[EC_MAX_CMD_ARGS]; - unsigned int ec_cmd_int[EC_MAX_CMD_ARGS]; - char cmdbuf[64]; - int ec_cmd_bytes; - - mutex_lock(&ec_debugfs_cmd_lock); - - size = simple_write_to_buffer(cmdbuf, sizeof(cmdbuf), ppos, buf, size); - - m = sscanf(cmdbuf, "%x:%u %x %x %x %x %x", &ec_cmd_int[0], - &ec_debugfs_resp_bytes, - &ec_cmd_int[1], &ec_cmd_int[2], &ec_cmd_int[3], - &ec_cmd_int[4], &ec_cmd_int[5]); - if (m < 2 || ec_debugfs_resp_bytes > EC_MAX_CMD_REPLY) { - /* reset to prevent overflow on read */ - ec_debugfs_resp_bytes = 0; - - printk(KERN_DEBUG "olpc-ec: bad ec cmd: " - "cmd:response-count [arg1 [arg2 ...]]\n"); - size = -EINVAL; - goto out; - } - - /* convert scanf'd ints to char */ - ec_cmd_bytes = m - 2; - for (i = 0; i <= ec_cmd_bytes; i++) - ec_cmd[i] = ec_cmd_int[i]; - - printk(KERN_DEBUG "olpc-ec: debugfs cmd 0x%02x with %d args " - "%02x %02x %02x %02x %02x, want %d returns\n", - ec_cmd[0], ec_cmd_bytes, ec_cmd[1], ec_cmd[2], ec_cmd[3], - ec_cmd[4], ec_cmd[5], ec_debugfs_resp_bytes); - - olpc_ec_cmd(ec_cmd[0], (ec_cmd_bytes == 0) ? NULL : &ec_cmd[1], - ec_cmd_bytes, ec_debugfs_resp, ec_debugfs_resp_bytes); - - printk(KERN_DEBUG "olpc-ec: response " - "%02x %02x %02x %02x %02x %02x %02x %02x (%d bytes expected)\n", - ec_debugfs_resp[0], ec_debugfs_resp[1], ec_debugfs_resp[2], - ec_debugfs_resp[3], ec_debugfs_resp[4], ec_debugfs_resp[5], - ec_debugfs_resp[6], ec_debugfs_resp[7], ec_debugfs_resp_bytes); - -out: - mutex_unlock(&ec_debugfs_cmd_lock); - return size; -} - -static ssize_t ec_debugfs_cmd_read(struct file *file, char __user *buf, - size_t size, loff_t *ppos) -{ - unsigned int i, r; - char *rp; - char respbuf[64]; - - mutex_lock(&ec_debugfs_cmd_lock); - rp = respbuf; - rp += sprintf(rp, "%02x", ec_debugfs_resp[0]); - for (i = 1; i < ec_debugfs_resp_bytes; i++) - rp += sprintf(rp, ", %02x", ec_debugfs_resp[i]); - mutex_unlock(&ec_debugfs_cmd_lock); - rp += sprintf(rp, "\n"); - - r = rp - respbuf; - return simple_read_from_buffer(buf, size, ppos, respbuf, r); -} - -static const struct file_operations ec_debugfs_genops = { - .write = ec_debugfs_cmd_write, - .read = ec_debugfs_cmd_read, -}; - -static void setup_debugfs(void) -{ - ec_debugfs_dir = debugfs_create_dir("olpc-ec", 0); - if (ec_debugfs_dir == ERR_PTR(-ENODEV)) - return; - - debugfs_create_file("cmd", 0600, ec_debugfs_dir, NULL, - &ec_debugfs_genops); -} - -static int olpc_ec_suspend(void) -{ - return olpc_ec_mask_write(ec_wakeup_mask); -} - static bool __init check_ofw_architecture(struct device_node *root) { const char *olpc_arch; @@ -424,8 +317,59 @@ static int __init add_xo1_platform_devices(void) return 0; } -static struct syscore_ops olpc_syscore_ops = { - .suspend = olpc_ec_suspend, +static int olpc_xo1_ec_probe(struct platform_device *pdev) +{ + /* get the EC revision */ + olpc_ec_cmd(EC_FIRMWARE_REV, NULL, 0, + (unsigned char *) &olpc_platform_info.ecver, 1); + + /* EC version 0x5f adds support for wide SCI mask */ + if (olpc_platform_info.ecver >= 0x5f) + olpc_platform_info.flags |= OLPC_F_EC_WIDE_SCI; + + pr_info("OLPC board revision %s%X (EC=%x)\n", + ((olpc_platform_info.boardrev & 0xf) < 8) ? "pre" : "", + olpc_platform_info.boardrev >> 4, + olpc_platform_info.ecver); + + return 0; +} +static int olpc_xo1_ec_suspend(struct platform_device *pdev) +{ + olpc_ec_mask_write(ec_wakeup_mask); + + /* + * Squelch SCIs while suspended. This is a fix for + * . + */ + return olpc_ec_cmd(EC_SET_SCI_INHIBIT, NULL, 0, NULL, 0); +} + +static int olpc_xo1_ec_resume(struct platform_device *pdev) +{ + /* Tell the EC to stop inhibiting SCIs */ + olpc_ec_cmd(EC_SET_SCI_INHIBIT_RELEASE, NULL, 0, NULL, 0); + + /* + * Tell the wireless module to restart USB communication. + * Must be done twice. + */ + olpc_ec_cmd(EC_WAKE_UP_WLAN, NULL, 0, NULL, 0); + olpc_ec_cmd(EC_WAKE_UP_WLAN, NULL, 0, NULL, 0); + + return 0; +} + +static struct olpc_ec_driver ec_xo1_driver = { + .probe = olpc_xo1_ec_probe, + .suspend = olpc_xo1_ec_suspend, + .resume = olpc_xo1_ec_resume, + .ec_cmd = olpc_xo1_ec_cmd, +}; + +static struct olpc_ec_driver ec_xo1_5_driver = { + .probe = olpc_xo1_ec_probe, + .ec_cmd = olpc_xo1_ec_cmd, }; static int __init olpc_init(void) @@ -435,16 +379,17 @@ static int __init olpc_init(void) if (!olpc_ofw_present() || !platform_detect()) return 0; - spin_lock_init(&ec_lock); + /* register the XO-1 and 1.5-specific EC handler */ + if (olpc_platform_info.boardrev < olpc_board_pre(0xd0)) /* XO-1 */ + olpc_ec_driver_register(&ec_xo1_driver, NULL); + else + olpc_ec_driver_register(&ec_xo1_5_driver, NULL); + platform_device_register_simple("olpc-ec", -1, NULL, 0); /* assume B1 and above models always have a DCON */ if (olpc_board_at_least(olpc_board(0xb1))) olpc_platform_info.flags |= OLPC_F_DCON; - /* get the EC revision */ - olpc_ec_cmd(EC_FIRMWARE_REV, NULL, 0, - (unsigned char *) &olpc_platform_info.ecver, 1); - #ifdef CONFIG_PCI_OLPC /* If the VSA exists let it emulate PCI, if not emulate in kernel. * XO-1 only. */ @@ -452,14 +397,6 @@ static int __init olpc_init(void) !cs5535_has_vsa2()) x86_init.pci.arch_init = pci_olpc_init; #endif - /* EC version 0x5f adds support for wide SCI mask */ - if (olpc_platform_info.ecver >= 0x5f) - olpc_platform_info.flags |= OLPC_F_EC_WIDE_SCI; - - printk(KERN_INFO "OLPC board revision %s%X (EC=%x)\n", - ((olpc_platform_info.boardrev & 0xf) < 8) ? "pre" : "", - olpc_platform_info.boardrev >> 4, - olpc_platform_info.ecver); if (olpc_platform_info.boardrev < olpc_board_pre(0xd0)) { /* XO-1 */ r = add_xo1_platform_devices(); @@ -467,9 +404,6 @@ static int __init olpc_init(void) return r; } - register_syscore_ops(&olpc_syscore_ops); - setup_debugfs(); - return 0; } diff --git a/trunk/arch/x86/realmode/rm/Makefile b/trunk/arch/x86/realmode/rm/Makefile index b2d534cab25f..88692871823f 100644 --- a/trunk/arch/x86/realmode/rm/Makefile +++ b/trunk/arch/x86/realmode/rm/Makefile @@ -72,7 +72,7 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \ -Wall -Wstrict-prototypes \ -march=i386 -mregparm=3 \ -include $(srctree)/$(src)/../../boot/code16gcc.h \ - -fno-strict-aliasing -fomit-frame-pointer \ + -fno-strict-aliasing -fomit-frame-pointer -fno-pic \ $(call cc-option, -ffreestanding) \ $(call cc-option, -fno-toplevel-reorder,\ $(call cc-option, -fno-unit-at-a-time)) \ diff --git a/trunk/arch/x86/syscalls/syscall_64.tbl b/trunk/arch/x86/syscalls/syscall_64.tbl index 51171aeff0dc..a582bfed95bb 100644 --- a/trunk/arch/x86/syscalls/syscall_64.tbl +++ b/trunk/arch/x86/syscalls/syscall_64.tbl @@ -60,8 +60,8 @@ 51 common getsockname sys_getsockname 52 common getpeername sys_getpeername 53 common socketpair sys_socketpair -54 common setsockopt sys_setsockopt -55 common getsockopt sys_getsockopt +54 64 setsockopt sys_setsockopt +55 64 getsockopt sys_getsockopt 56 common clone stub_clone 57 common fork stub_fork 58 common vfork stub_vfork @@ -318,7 +318,7 @@ 309 common getcpu sys_getcpu 310 64 process_vm_readv sys_process_vm_readv 311 64 process_vm_writev sys_process_vm_writev -312 64 kcmp sys_kcmp +312 common kcmp sys_kcmp # # x32-specific system call numbers start at 512 to avoid cache impact @@ -353,3 +353,5 @@ 538 x32 sendmmsg compat_sys_sendmmsg 539 x32 process_vm_readv compat_sys_process_vm_readv 540 x32 process_vm_writev compat_sys_process_vm_writev +541 x32 setsockopt compat_sys_setsockopt +542 x32 getsockopt compat_sys_getsockopt diff --git a/trunk/arch/x86/um/asm/ptrace.h b/trunk/arch/x86/um/asm/ptrace.h index 950dfb7b8417..e72cd0df5ba3 100644 --- a/trunk/arch/x86/um/asm/ptrace.h +++ b/trunk/arch/x86/um/asm/ptrace.h @@ -30,10 +30,10 @@ #define profile_pc(regs) PT_REGS_IP(regs) #define UPT_RESTART_SYSCALL(r) (UPT_IP(r) -= 2) -#define UPT_SET_SYSCALL_RETURN(r, res) (UPT_AX(r) = (res)) +#define PT_REGS_SET_SYSCALL_RETURN(r, res) (PT_REGS_AX(r) = (res)) -static inline long regs_return_value(struct uml_pt_regs *regs) +static inline long regs_return_value(struct pt_regs *regs) { - return UPT_AX(regs); + return PT_REGS_AX(regs); } #endif /* __UM_X86_PTRACE_H */ diff --git a/trunk/arch/x86/xen/enlighten.c b/trunk/arch/x86/xen/enlighten.c index bf4bda6d3e9a..9642d4a38602 100644 --- a/trunk/arch/x86/xen/enlighten.c +++ b/trunk/arch/x86/xen/enlighten.c @@ -31,7 +31,6 @@ #include #include #include -#include #include #include @@ -1470,130 +1469,38 @@ asmlinkage void __init xen_start_kernel(void) #endif } -#ifdef CONFIG_XEN_PVHVM -/* - * The pfn containing the shared_info is located somewhere in RAM. This - * will cause trouble if the current kernel is doing a kexec boot into a - * new kernel. The new kernel (and its startup code) can not know where - * the pfn is, so it can not reserve the page. The hypervisor will - * continue to update the pfn, and as a result memory corruption occours - * in the new kernel. - * - * One way to work around this issue is to allocate a page in the - * xen-platform pci device's BAR memory range. But pci init is done very - * late and the shared_info page is already in use very early to read - * the pvclock. So moving the pfn from RAM to MMIO is racy because some - * code paths on other vcpus could access the pfn during the small - * window when the old pfn is moved to the new pfn. There is even a - * small window were the old pfn is not backed by a mfn, and during that - * time all reads return -1. - * - * Because it is not known upfront where the MMIO region is located it - * can not be used right from the start in xen_hvm_init_shared_info. - * - * To minimise trouble the move of the pfn is done shortly before kexec. - * This does not eliminate the race because all vcpus are still online - * when the syscore_ops will be called. But hopefully there is no work - * pending at this point in time. Also the syscore_op is run last which - * reduces the risk further. - */ - -static struct shared_info *xen_hvm_shared_info; - -static void xen_hvm_connect_shared_info(unsigned long pfn) +void __ref xen_hvm_init_shared_info(void) { + int cpu; struct xen_add_to_physmap xatp; + static struct shared_info *shared_info_page = 0; + if (!shared_info_page) + shared_info_page = (struct shared_info *) + extend_brk(PAGE_SIZE, PAGE_SIZE); xatp.domid = DOMID_SELF; xatp.idx = 0; xatp.space = XENMAPSPACE_shared_info; - xatp.gpfn = pfn; + xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT; if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) BUG(); -} -static void xen_hvm_set_shared_info(struct shared_info *sip) -{ - int cpu; - - HYPERVISOR_shared_info = sip; + HYPERVISOR_shared_info = (struct shared_info *)shared_info_page; /* xen_vcpu is a pointer to the vcpu_info struct in the shared_info * page, we use it in the event channel upcall and in some pvclock * related functions. We don't need the vcpu_info placement * optimizations because we don't use any pv_mmu or pv_irq op on * HVM. - * When xen_hvm_set_shared_info is run at boot time only vcpu 0 is - * online but xen_hvm_set_shared_info is run at resume time too and + * When xen_hvm_init_shared_info is run at boot time only vcpu 0 is + * online but xen_hvm_init_shared_info is run at resume time too and * in that case multiple vcpus might be online. */ for_each_online_cpu(cpu) { per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; } } -/* Reconnect the shared_info pfn to a mfn */ -void xen_hvm_resume_shared_info(void) -{ - xen_hvm_connect_shared_info(__pa(xen_hvm_shared_info) >> PAGE_SHIFT); -} - -#ifdef CONFIG_KEXEC -static struct shared_info *xen_hvm_shared_info_kexec; -static unsigned long xen_hvm_shared_info_pfn_kexec; - -/* Remember a pfn in MMIO space for kexec reboot */ -void __devinit xen_hvm_prepare_kexec(struct shared_info *sip, unsigned long pfn) -{ - xen_hvm_shared_info_kexec = sip; - xen_hvm_shared_info_pfn_kexec = pfn; -} - -static void xen_hvm_syscore_shutdown(void) -{ - struct xen_memory_reservation reservation = { - .domid = DOMID_SELF, - .nr_extents = 1, - }; - unsigned long prev_pfn; - int rc; - - if (!xen_hvm_shared_info_kexec) - return; - - prev_pfn = __pa(xen_hvm_shared_info) >> PAGE_SHIFT; - set_xen_guest_handle(reservation.extent_start, &prev_pfn); - - /* Move pfn to MMIO, disconnects previous pfn from mfn */ - xen_hvm_connect_shared_info(xen_hvm_shared_info_pfn_kexec); - - /* Update pointers, following hypercall is also a memory barrier */ - xen_hvm_set_shared_info(xen_hvm_shared_info_kexec); - - /* Allocate new mfn for previous pfn */ - do { - rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation); - if (rc == 0) - msleep(123); - } while (rc == 0); - - /* Make sure the previous pfn is really connected to a (new) mfn */ - BUG_ON(rc != 1); -} - -static struct syscore_ops xen_hvm_syscore_ops = { - .shutdown = xen_hvm_syscore_shutdown, -}; -#endif - -/* Use a pfn in RAM, may move to MMIO before kexec. */ -static void __init xen_hvm_init_shared_info(void) -{ - /* Remember pointer for resume */ - xen_hvm_shared_info = extend_brk(PAGE_SIZE, PAGE_SIZE); - xen_hvm_connect_shared_info(__pa(xen_hvm_shared_info) >> PAGE_SHIFT); - xen_hvm_set_shared_info(xen_hvm_shared_info); -} - +#ifdef CONFIG_XEN_PVHVM static void __init init_hvm_pv_info(void) { int major, minor; @@ -1644,9 +1551,6 @@ static void __init xen_hvm_guest_init(void) init_hvm_pv_info(); xen_hvm_init_shared_info(); -#ifdef CONFIG_KEXEC - register_syscore_ops(&xen_hvm_syscore_ops); -#endif if (xen_feature(XENFEAT_hvm_callback_vector)) xen_have_vector_callback = 1; diff --git a/trunk/arch/x86/xen/mmu.c b/trunk/arch/x86/xen/mmu.c index b65a76133f4f..5141d808e751 100644 --- a/trunk/arch/x86/xen/mmu.c +++ b/trunk/arch/x86/xen/mmu.c @@ -1283,7 +1283,7 @@ static void xen_flush_tlb_others(const struct cpumask *cpus, cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask)); args->op.cmd = MMUEXT_TLB_FLUSH_MULTI; - if (start != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) { + if (end != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) { args->op.cmd = MMUEXT_INVLPG_MULTI; args->op.arg1.linear_addr = start; } diff --git a/trunk/arch/x86/xen/p2m.c b/trunk/arch/x86/xen/p2m.c index 64effdc6da94..76ba0e97e530 100644 --- a/trunk/arch/x86/xen/p2m.c +++ b/trunk/arch/x86/xen/p2m.c @@ -194,6 +194,13 @@ RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID * boundary violation will require three middle nodes. */ RESERVE_BRK(p2m_mid_identity, PAGE_SIZE * 2 * 3); +/* When we populate back during bootup, the amount of pages can vary. The + * max we have is seen is 395979, but that does not mean it can't be more. + * Some machines can have 3GB I/O holes even. With early_can_reuse_p2m_middle + * it can re-use Xen provided mfn_list array, so we only need to allocate at + * most three P2M top nodes. */ +RESERVE_BRK(p2m_populated, PAGE_SIZE * 3); + static inline unsigned p2m_top_index(unsigned long pfn) { BUG_ON(pfn >= MAX_P2M_PFN); @@ -570,12 +577,99 @@ static bool __init early_alloc_p2m(unsigned long pfn) } return true; } + +/* + * Skim over the P2M tree looking at pages that are either filled with + * INVALID_P2M_ENTRY or with 1:1 PFNs. If found, re-use that page and + * replace the P2M leaf with a p2m_missing or p2m_identity. + * Stick the old page in the new P2M tree location. + */ +bool __init early_can_reuse_p2m_middle(unsigned long set_pfn, unsigned long set_mfn) +{ + unsigned topidx; + unsigned mididx; + unsigned ident_pfns; + unsigned inv_pfns; + unsigned long *p2m; + unsigned long *mid_mfn_p; + unsigned idx; + unsigned long pfn; + + /* We only look when this entails a P2M middle layer */ + if (p2m_index(set_pfn)) + return false; + + for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_PER_PAGE) { + topidx = p2m_top_index(pfn); + + if (!p2m_top[topidx]) + continue; + + if (p2m_top[topidx] == p2m_mid_missing) + continue; + + mididx = p2m_mid_index(pfn); + p2m = p2m_top[topidx][mididx]; + if (!p2m) + continue; + + if ((p2m == p2m_missing) || (p2m == p2m_identity)) + continue; + + if ((unsigned long)p2m == INVALID_P2M_ENTRY) + continue; + + ident_pfns = 0; + inv_pfns = 0; + for (idx = 0; idx < P2M_PER_PAGE; idx++) { + /* IDENTITY_PFNs are 1:1 */ + if (p2m[idx] == IDENTITY_FRAME(pfn + idx)) + ident_pfns++; + else if (p2m[idx] == INVALID_P2M_ENTRY) + inv_pfns++; + else + break; + } + if ((ident_pfns == P2M_PER_PAGE) || (inv_pfns == P2M_PER_PAGE)) + goto found; + } + return false; +found: + /* Found one, replace old with p2m_identity or p2m_missing */ + p2m_top[topidx][mididx] = (ident_pfns ? p2m_identity : p2m_missing); + /* And the other for save/restore.. */ + mid_mfn_p = p2m_top_mfn_p[topidx]; + /* NOTE: Even if it is a p2m_identity it should still be point to + * a page filled with INVALID_P2M_ENTRY entries. */ + mid_mfn_p[mididx] = virt_to_mfn(p2m_missing); + + /* Reset where we want to stick the old page in. */ + topidx = p2m_top_index(set_pfn); + mididx = p2m_mid_index(set_pfn); + + /* This shouldn't happen */ + if (WARN_ON(p2m_top[topidx] == p2m_mid_missing)) + early_alloc_p2m(set_pfn); + + if (WARN_ON(p2m_top[topidx][mididx] != p2m_missing)) + return false; + + p2m_init(p2m); + p2m_top[topidx][mididx] = p2m; + mid_mfn_p = p2m_top_mfn_p[topidx]; + mid_mfn_p[mididx] = virt_to_mfn(p2m); + + return true; +} bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn) { if (unlikely(!__set_phys_to_machine(pfn, mfn))) { if (!early_alloc_p2m(pfn)) return false; + if (early_can_reuse_p2m_middle(pfn, mfn)) + return __set_phys_to_machine(pfn, mfn); + if (!early_alloc_p2m_middle(pfn, false /* boundary crossover OK!*/)) return false; diff --git a/trunk/arch/x86/xen/setup.c b/trunk/arch/x86/xen/setup.c index ead85576d54a..d11ca11d14fc 100644 --- a/trunk/arch/x86/xen/setup.c +++ b/trunk/arch/x86/xen/setup.c @@ -78,9 +78,16 @@ static void __init xen_add_extra_mem(u64 start, u64 size) memblock_reserve(start, size); xen_max_p2m_pfn = PFN_DOWN(start + size); + for (pfn = PFN_DOWN(start); pfn < xen_max_p2m_pfn; pfn++) { + unsigned long mfn = pfn_to_mfn(pfn); + + if (WARN(mfn == pfn, "Trying to over-write 1-1 mapping (pfn: %lx)\n", pfn)) + continue; + WARN(mfn != INVALID_P2M_ENTRY, "Trying to remove %lx which has %lx mfn!\n", + pfn, mfn); - for (pfn = PFN_DOWN(start); pfn <= xen_max_p2m_pfn; pfn++) __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); + } } static unsigned long __init xen_do_chunk(unsigned long start, diff --git a/trunk/arch/x86/xen/suspend.c b/trunk/arch/x86/xen/suspend.c index ae8a00c39de4..45329c8c226e 100644 --- a/trunk/arch/x86/xen/suspend.c +++ b/trunk/arch/x86/xen/suspend.c @@ -30,7 +30,7 @@ void xen_arch_hvm_post_suspend(int suspend_cancelled) { #ifdef CONFIG_XEN_PVHVM int cpu; - xen_hvm_resume_shared_info(); + xen_hvm_init_shared_info(); xen_callback_vector(); xen_unplug_emulated_devices(); if (xen_feature(XENFEAT_hvm_safe_pvclock)) { diff --git a/trunk/arch/x86/xen/xen-ops.h b/trunk/arch/x86/xen/xen-ops.h index 1e4329e04e0f..202d4c150154 100644 --- a/trunk/arch/x86/xen/xen-ops.h +++ b/trunk/arch/x86/xen/xen-ops.h @@ -41,7 +41,7 @@ void xen_enable_syscall(void); void xen_vcpu_restore(void); void xen_callback_vector(void); -void xen_hvm_resume_shared_info(void); +void xen_hvm_init_shared_info(void); void xen_unplug_emulated_devices(void); void __init xen_build_dynamic_phys_to_machine(void); diff --git a/trunk/block/blk-cgroup.c b/trunk/block/blk-cgroup.c index e7dee617358e..f3b44a65fc7a 100644 --- a/trunk/block/blk-cgroup.c +++ b/trunk/block/blk-cgroup.c @@ -31,27 +31,6 @@ EXPORT_SYMBOL_GPL(blkcg_root); static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS]; -struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) -{ - return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id), - struct blkcg, css); -} -EXPORT_SYMBOL_GPL(cgroup_to_blkcg); - -static struct blkcg *task_blkcg(struct task_struct *tsk) -{ - return container_of(task_subsys_state(tsk, blkio_subsys_id), - struct blkcg, css); -} - -struct blkcg *bio_blkcg(struct bio *bio) -{ - if (bio && bio->bi_css) - return container_of(bio->bi_css, struct blkcg, css); - return task_blkcg(current); -} -EXPORT_SYMBOL_GPL(bio_blkcg); - static bool blkcg_policy_enabled(struct request_queue *q, const struct blkcg_policy *pol) { @@ -84,6 +63,7 @@ static void blkg_free(struct blkcg_gq *blkg) kfree(pd); } + blk_exit_rl(&blkg->rl); kfree(blkg); } @@ -91,16 +71,18 @@ static void blkg_free(struct blkcg_gq *blkg) * blkg_alloc - allocate a blkg * @blkcg: block cgroup the new blkg is associated with * @q: request_queue the new blkg is associated with + * @gfp_mask: allocation mask to use * * Allocate a new blkg assocating @blkcg and @q. */ -static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q) +static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q, + gfp_t gfp_mask) { struct blkcg_gq *blkg; int i; /* alloc and init base part */ - blkg = kzalloc_node(sizeof(*blkg), GFP_ATOMIC, q->node); + blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node); if (!blkg) return NULL; @@ -109,6 +91,13 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q) blkg->blkcg = blkcg; blkg->refcnt = 1; + /* root blkg uses @q->root_rl, init rl only for !root blkgs */ + if (blkcg != &blkcg_root) { + if (blk_init_rl(&blkg->rl, q, gfp_mask)) + goto err_free; + blkg->rl.blkg = blkg; + } + for (i = 0; i < BLKCG_MAX_POLS; i++) { struct blkcg_policy *pol = blkcg_policy[i]; struct blkg_policy_data *pd; @@ -117,11 +106,9 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q) continue; /* alloc per-policy data and attach it to blkg */ - pd = kzalloc_node(pol->pd_size, GFP_ATOMIC, q->node); - if (!pd) { - blkg_free(blkg); - return NULL; - } + pd = kzalloc_node(pol->pd_size, gfp_mask, q->node); + if (!pd) + goto err_free; blkg->pd[i] = pd; pd->blkg = blkg; @@ -132,6 +119,10 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q) } return blkg; + +err_free: + blkg_free(blkg); + return NULL; } static struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, @@ -175,9 +166,13 @@ struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q) } EXPORT_SYMBOL_GPL(blkg_lookup); +/* + * If @new_blkg is %NULL, this function tries to allocate a new one as + * necessary using %GFP_ATOMIC. @new_blkg is always consumed on return. + */ static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, - struct request_queue *q) - __releases(q->queue_lock) __acquires(q->queue_lock) + struct request_queue *q, + struct blkcg_gq *new_blkg) { struct blkcg_gq *blkg; int ret; @@ -189,24 +184,26 @@ static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, blkg = __blkg_lookup(blkcg, q); if (blkg) { rcu_assign_pointer(blkcg->blkg_hint, blkg); - return blkg; + goto out_free; } /* blkg holds a reference to blkcg */ - if (!css_tryget(&blkcg->css)) - return ERR_PTR(-EINVAL); + if (!css_tryget(&blkcg->css)) { + blkg = ERR_PTR(-EINVAL); + goto out_free; + } /* allocate */ - ret = -ENOMEM; - blkg = blkg_alloc(blkcg, q); - if (unlikely(!blkg)) - goto err_put; + if (!new_blkg) { + new_blkg = blkg_alloc(blkcg, q, GFP_ATOMIC); + if (unlikely(!new_blkg)) { + blkg = ERR_PTR(-ENOMEM); + goto out_put; + } + } + blkg = new_blkg; /* insert */ - ret = radix_tree_preload(GFP_ATOMIC); - if (ret) - goto err_free; - spin_lock(&blkcg->lock); ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg); if (likely(!ret)) { @@ -215,15 +212,15 @@ static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, } spin_unlock(&blkcg->lock); - radix_tree_preload_end(); - if (!ret) return blkg; -err_free: - blkg_free(blkg); -err_put: + + blkg = ERR_PTR(ret); +out_put: css_put(&blkcg->css); - return ERR_PTR(ret); +out_free: + blkg_free(new_blkg); + return blkg; } struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, @@ -235,7 +232,7 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, */ if (unlikely(blk_queue_bypass(q))) return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY); - return __blkg_lookup_create(blkcg, q); + return __blkg_lookup_create(blkcg, q, NULL); } EXPORT_SYMBOL_GPL(blkg_lookup_create); @@ -313,6 +310,38 @@ void __blkg_release(struct blkcg_gq *blkg) } EXPORT_SYMBOL_GPL(__blkg_release); +/* + * The next function used by blk_queue_for_each_rl(). It's a bit tricky + * because the root blkg uses @q->root_rl instead of its own rl. + */ +struct request_list *__blk_queue_next_rl(struct request_list *rl, + struct request_queue *q) +{ + struct list_head *ent; + struct blkcg_gq *blkg; + + /* + * Determine the current blkg list_head. The first entry is + * root_rl which is off @q->blkg_list and mapped to the head. + */ + if (rl == &q->root_rl) { + ent = &q->blkg_list; + } else { + blkg = container_of(rl, struct blkcg_gq, rl); + ent = &blkg->q_node; + } + + /* walk to the next list_head, skip root blkcg */ + ent = ent->next; + if (ent == &q->root_blkg->q_node) + ent = ent->next; + if (ent == &q->blkg_list) + return NULL; + + blkg = container_of(ent, struct blkcg_gq, q_node); + return &blkg->rl; +} + static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val) { @@ -734,24 +763,36 @@ int blkcg_activate_policy(struct request_queue *q, struct blkcg_gq *blkg; struct blkg_policy_data *pd, *n; int cnt = 0, ret; + bool preloaded; if (blkcg_policy_enabled(q, pol)) return 0; + /* preallocations for root blkg */ + blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL); + if (!blkg) + return -ENOMEM; + + preloaded = !radix_tree_preload(GFP_KERNEL); + blk_queue_bypass_start(q); /* make sure the root blkg exists and count the existing blkgs */ spin_lock_irq(q->queue_lock); rcu_read_lock(); - blkg = __blkg_lookup_create(&blkcg_root, q); + blkg = __blkg_lookup_create(&blkcg_root, q, blkg); rcu_read_unlock(); + if (preloaded) + radix_tree_preload_end(); + if (IS_ERR(blkg)) { ret = PTR_ERR(blkg); goto out_unlock; } q->root_blkg = blkg; + q->root_rl.blkg = blkg; list_for_each_entry(blkg, &q->blkg_list, q_node) cnt++; diff --git a/trunk/block/blk-cgroup.h b/trunk/block/blk-cgroup.h index 8ac457ce7783..24597309e23d 100644 --- a/trunk/block/blk-cgroup.h +++ b/trunk/block/blk-cgroup.h @@ -17,6 +17,7 @@ #include #include #include +#include /* Max limits for throttle policy */ #define THROTL_IOPS_MAX UINT_MAX @@ -93,6 +94,8 @@ struct blkcg_gq { struct list_head q_node; struct hlist_node blkcg_node; struct blkcg *blkcg; + /* request allocation list for this blkcg-q pair */ + struct request_list rl; /* reference count */ int refcnt; @@ -120,8 +123,6 @@ struct blkcg_policy { extern struct blkcg blkcg_root; -struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup); -struct blkcg *bio_blkcg(struct bio *bio); struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q); struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, struct request_queue *q); @@ -160,6 +161,25 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, void blkg_conf_finish(struct blkg_conf_ctx *ctx); +static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) +{ + return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id), + struct blkcg, css); +} + +static inline struct blkcg *task_blkcg(struct task_struct *tsk) +{ + return container_of(task_subsys_state(tsk, blkio_subsys_id), + struct blkcg, css); +} + +static inline struct blkcg *bio_blkcg(struct bio *bio) +{ + if (bio && bio->bi_css) + return container_of(bio->bi_css, struct blkcg, css); + return task_blkcg(current); +} + /** * blkg_to_pdata - get policy private data * @blkg: blkg of interest @@ -233,6 +253,95 @@ static inline void blkg_put(struct blkcg_gq *blkg) __blkg_release(blkg); } +/** + * blk_get_rl - get request_list to use + * @q: request_queue of interest + * @bio: bio which will be attached to the allocated request (may be %NULL) + * + * The caller wants to allocate a request from @q to use for @bio. Find + * the request_list to use and obtain a reference on it. Should be called + * under queue_lock. This function is guaranteed to return non-%NULL + * request_list. + */ +static inline struct request_list *blk_get_rl(struct request_queue *q, + struct bio *bio) +{ + struct blkcg *blkcg; + struct blkcg_gq *blkg; + + rcu_read_lock(); + + blkcg = bio_blkcg(bio); + + /* bypass blkg lookup and use @q->root_rl directly for root */ + if (blkcg == &blkcg_root) + goto root_rl; + + /* + * Try to use blkg->rl. blkg lookup may fail under memory pressure + * or if either the blkcg or queue is going away. Fall back to + * root_rl in such cases. + */ + blkg = blkg_lookup_create(blkcg, q); + if (unlikely(IS_ERR(blkg))) + goto root_rl; + + blkg_get(blkg); + rcu_read_unlock(); + return &blkg->rl; +root_rl: + rcu_read_unlock(); + return &q->root_rl; +} + +/** + * blk_put_rl - put request_list + * @rl: request_list to put + * + * Put the reference acquired by blk_get_rl(). Should be called under + * queue_lock. + */ +static inline void blk_put_rl(struct request_list *rl) +{ + /* root_rl may not have blkg set */ + if (rl->blkg && rl->blkg->blkcg != &blkcg_root) + blkg_put(rl->blkg); +} + +/** + * blk_rq_set_rl - associate a request with a request_list + * @rq: request of interest + * @rl: target request_list + * + * Associate @rq with @rl so that accounting and freeing can know the + * request_list @rq came from. + */ +static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) +{ + rq->rl = rl; +} + +/** + * blk_rq_rl - return the request_list a request came from + * @rq: request of interest + * + * Return the request_list @rq is allocated from. + */ +static inline struct request_list *blk_rq_rl(struct request *rq) +{ + return rq->rl; +} + +struct request_list *__blk_queue_next_rl(struct request_list *rl, + struct request_queue *q); +/** + * blk_queue_for_each_rl - iterate through all request_lists of a request_queue + * + * Should be used under queue_lock. + */ +#define blk_queue_for_each_rl(rl, q) \ + for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q))) + /** * blkg_stat_add - add a value to a blkg_stat * @stat: target blkg_stat @@ -351,6 +460,7 @@ static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat) #else /* CONFIG_BLK_CGROUP */ struct cgroup; +struct blkcg; struct blkg_policy_data { }; @@ -361,8 +471,6 @@ struct blkcg_gq { struct blkcg_policy { }; -static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) { return NULL; } -static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; } static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; } static inline int blkcg_init_queue(struct request_queue *q) { return 0; } static inline void blkcg_drain_queue(struct request_queue *q) { } @@ -374,6 +482,9 @@ static inline int blkcg_activate_policy(struct request_queue *q, static inline void blkcg_deactivate_policy(struct request_queue *q, const struct blkcg_policy *pol) { } +static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) { return NULL; } +static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; } + static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, struct blkcg_policy *pol) { return NULL; } static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; } @@ -381,5 +492,14 @@ static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; } static inline void blkg_get(struct blkcg_gq *blkg) { } static inline void blkg_put(struct blkcg_gq *blkg) { } +static inline struct request_list *blk_get_rl(struct request_queue *q, + struct bio *bio) { return &q->root_rl; } +static inline void blk_put_rl(struct request_list *rl) { } +static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { } +static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; } + +#define blk_queue_for_each_rl(rl, q) \ + for ((rl) = &(q)->root_rl; (rl); (rl) = NULL) + #endif /* CONFIG_BLK_CGROUP */ #endif /* _BLK_CGROUP_H */ diff --git a/trunk/block/blk-core.c b/trunk/block/blk-core.c index 93eb3e4f88ce..4b4dbdfbca89 100644 --- a/trunk/block/blk-core.c +++ b/trunk/block/blk-core.c @@ -387,7 +387,7 @@ void blk_drain_queue(struct request_queue *q, bool drain_all) if (!list_empty(&q->queue_head) && q->request_fn) __blk_run_queue(q); - drain |= q->rq.elvpriv; + drain |= q->nr_rqs_elvpriv; /* * Unfortunately, requests are queued at and tracked from @@ -397,7 +397,7 @@ void blk_drain_queue(struct request_queue *q, bool drain_all) if (drain_all) { drain |= !list_empty(&q->queue_head); for (i = 0; i < 2; i++) { - drain |= q->rq.count[i]; + drain |= q->nr_rqs[i]; drain |= q->in_flight[i]; drain |= !list_empty(&q->flush_queue[i]); } @@ -416,9 +416,14 @@ void blk_drain_queue(struct request_queue *q, bool drain_all) * left with hung waiters. We need to wake up those waiters. */ if (q->request_fn) { + struct request_list *rl; + spin_lock_irq(q->queue_lock); - for (i = 0; i < ARRAY_SIZE(q->rq.wait); i++) - wake_up_all(&q->rq.wait[i]); + + blk_queue_for_each_rl(rl, q) + for (i = 0; i < ARRAY_SIZE(rl->wait); i++) + wake_up_all(&rl->wait[i]); + spin_unlock_irq(q->queue_lock); } } @@ -517,28 +522,33 @@ void blk_cleanup_queue(struct request_queue *q) } EXPORT_SYMBOL(blk_cleanup_queue); -static int blk_init_free_list(struct request_queue *q) +int blk_init_rl(struct request_list *rl, struct request_queue *q, + gfp_t gfp_mask) { - struct request_list *rl = &q->rq; - if (unlikely(rl->rq_pool)) return 0; + rl->q = q; rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0; - rl->elvpriv = 0; init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]); rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, - mempool_free_slab, request_cachep, q->node); - + mempool_free_slab, request_cachep, + gfp_mask, q->node); if (!rl->rq_pool) return -ENOMEM; return 0; } +void blk_exit_rl(struct request_list *rl) +{ + if (rl->rq_pool) + mempool_destroy(rl->rq_pool); +} + struct request_queue *blk_alloc_queue(gfp_t gfp_mask) { return blk_alloc_queue_node(gfp_mask, -1); @@ -680,7 +690,7 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn, if (!q) return NULL; - if (blk_init_free_list(q)) + if (blk_init_rl(&q->root_rl, q, GFP_KERNEL)) return NULL; q->request_fn = rfn; @@ -722,15 +732,15 @@ bool blk_get_queue(struct request_queue *q) } EXPORT_SYMBOL(blk_get_queue); -static inline void blk_free_request(struct request_queue *q, struct request *rq) +static inline void blk_free_request(struct request_list *rl, struct request *rq) { if (rq->cmd_flags & REQ_ELVPRIV) { - elv_put_request(q, rq); + elv_put_request(rl->q, rq); if (rq->elv.icq) put_io_context(rq->elv.icq->ioc); } - mempool_free(rq, q->rq.rq_pool); + mempool_free(rq, rl->rq_pool); } /* @@ -767,18 +777,23 @@ static void ioc_set_batching(struct request_queue *q, struct io_context *ioc) ioc->last_waited = jiffies; } -static void __freed_request(struct request_queue *q, int sync) +static void __freed_request(struct request_list *rl, int sync) { - struct request_list *rl = &q->rq; + struct request_queue *q = rl->q; - if (rl->count[sync] < queue_congestion_off_threshold(q)) + /* + * bdi isn't aware of blkcg yet. As all async IOs end up root + * blkcg anyway, just use root blkcg state. + */ + if (rl == &q->root_rl && + rl->count[sync] < queue_congestion_off_threshold(q)) blk_clear_queue_congested(q, sync); if (rl->count[sync] + 1 <= q->nr_requests) { if (waitqueue_active(&rl->wait[sync])) wake_up(&rl->wait[sync]); - blk_clear_queue_full(q, sync); + blk_clear_rl_full(rl, sync); } } @@ -786,19 +801,20 @@ static void __freed_request(struct request_queue *q, int sync) * A request has just been released. Account for it, update the full and * congestion status, wake up any waiters. Called under q->queue_lock. */ -static void freed_request(struct request_queue *q, unsigned int flags) +static void freed_request(struct request_list *rl, unsigned int flags) { - struct request_list *rl = &q->rq; + struct request_queue *q = rl->q; int sync = rw_is_sync(flags); + q->nr_rqs[sync]--; rl->count[sync]--; if (flags & REQ_ELVPRIV) - rl->elvpriv--; + q->nr_rqs_elvpriv--; - __freed_request(q, sync); + __freed_request(rl, sync); if (unlikely(rl->starved[sync ^ 1])) - __freed_request(q, sync ^ 1); + __freed_request(rl, sync ^ 1); } /* @@ -837,8 +853,8 @@ static struct io_context *rq_ioc(struct bio *bio) } /** - * get_request - get a free request - * @q: request_queue to allocate request from + * __get_request - get a free request + * @rl: request list to allocate from * @rw_flags: RW and SYNC flags * @bio: bio to allocate request for (can be %NULL) * @gfp_mask: allocation mask @@ -850,20 +866,16 @@ static struct io_context *rq_ioc(struct bio *bio) * Returns %NULL on failure, with @q->queue_lock held. * Returns !%NULL on success, with @q->queue_lock *not held*. */ -static struct request *get_request(struct request_queue *q, int rw_flags, - struct bio *bio, gfp_t gfp_mask) +static struct request *__get_request(struct request_list *rl, int rw_flags, + struct bio *bio, gfp_t gfp_mask) { + struct request_queue *q = rl->q; struct request *rq; - struct request_list *rl = &q->rq; - struct elevator_type *et; - struct io_context *ioc; + struct elevator_type *et = q->elevator->type; + struct io_context *ioc = rq_ioc(bio); struct io_cq *icq = NULL; const bool is_sync = rw_is_sync(rw_flags) != 0; - bool retried = false; int may_queue; -retry: - et = q->elevator->type; - ioc = rq_ioc(bio); if (unlikely(blk_queue_dead(q))) return NULL; @@ -874,29 +886,15 @@ static struct request *get_request(struct request_queue *q, int rw_flags, if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) { if (rl->count[is_sync]+1 >= q->nr_requests) { - /* - * We want ioc to record batching state. If it's - * not already there, creating a new one requires - * dropping queue_lock, which in turn requires - * retesting conditions to avoid queue hang. - */ - if (!ioc && !retried) { - spin_unlock_irq(q->queue_lock); - create_io_context(gfp_mask, q->node); - spin_lock_irq(q->queue_lock); - retried = true; - goto retry; - } - /* * The queue will fill after this allocation, so set * it as full, and mark this process as "batching". * This process will be allowed to complete a batch of * requests, others will be blocked. */ - if (!blk_queue_full(q, is_sync)) { + if (!blk_rl_full(rl, is_sync)) { ioc_set_batching(q, ioc); - blk_set_queue_full(q, is_sync); + blk_set_rl_full(rl, is_sync); } else { if (may_queue != ELV_MQUEUE_MUST && !ioc_batching(q, ioc)) { @@ -909,7 +907,12 @@ static struct request *get_request(struct request_queue *q, int rw_flags, } } } - blk_set_queue_congested(q, is_sync); + /* + * bdi isn't aware of blkcg yet. As all async IOs end up + * root blkcg anyway, just use root blkcg state. + */ + if (rl == &q->root_rl) + blk_set_queue_congested(q, is_sync); } /* @@ -920,6 +923,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags, if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) return NULL; + q->nr_rqs[is_sync]++; rl->count[is_sync]++; rl->starved[is_sync] = 0; @@ -935,7 +939,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags, */ if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) { rw_flags |= REQ_ELVPRIV; - rl->elvpriv++; + q->nr_rqs_elvpriv++; if (et->icq_cache && ioc) icq = ioc_lookup_icq(ioc, q); } @@ -945,22 +949,19 @@ static struct request *get_request(struct request_queue *q, int rw_flags, spin_unlock_irq(q->queue_lock); /* allocate and init request */ - rq = mempool_alloc(q->rq.rq_pool, gfp_mask); + rq = mempool_alloc(rl->rq_pool, gfp_mask); if (!rq) goto fail_alloc; blk_rq_init(q, rq); + blk_rq_set_rl(rq, rl); rq->cmd_flags = rw_flags | REQ_ALLOCED; /* init elvpriv */ if (rw_flags & REQ_ELVPRIV) { if (unlikely(et->icq_cache && !icq)) { - create_io_context(gfp_mask, q->node); - ioc = rq_ioc(bio); - if (!ioc) - goto fail_elvpriv; - - icq = ioc_create_icq(ioc, q, gfp_mask); + if (ioc) + icq = ioc_create_icq(ioc, q, gfp_mask); if (!icq) goto fail_elvpriv; } @@ -1000,7 +1001,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags, rq->elv.icq = NULL; spin_lock_irq(q->queue_lock); - rl->elvpriv--; + q->nr_rqs_elvpriv--; spin_unlock_irq(q->queue_lock); goto out; @@ -1013,7 +1014,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags, * queue, but this is pretty rare. */ spin_lock_irq(q->queue_lock); - freed_request(q, rw_flags); + freed_request(rl, rw_flags); /* * in the very unlikely event that allocation failed and no @@ -1029,56 +1030,58 @@ static struct request *get_request(struct request_queue *q, int rw_flags, } /** - * get_request_wait - get a free request with retry + * get_request - get a free request * @q: request_queue to allocate request from * @rw_flags: RW and SYNC flags * @bio: bio to allocate request for (can be %NULL) + * @gfp_mask: allocation mask * - * Get a free request from @q. This function keeps retrying under memory - * pressure and fails iff @q is dead. + * Get a free request from @q. If %__GFP_WAIT is set in @gfp_mask, this + * function keeps retrying under memory pressure and fails iff @q is dead. * * Must be callled with @q->queue_lock held and, * Returns %NULL on failure, with @q->queue_lock held. * Returns !%NULL on success, with @q->queue_lock *not held*. */ -static struct request *get_request_wait(struct request_queue *q, int rw_flags, - struct bio *bio) +static struct request *get_request(struct request_queue *q, int rw_flags, + struct bio *bio, gfp_t gfp_mask) { const bool is_sync = rw_is_sync(rw_flags) != 0; + DEFINE_WAIT(wait); + struct request_list *rl; struct request *rq; - rq = get_request(q, rw_flags, bio, GFP_NOIO); - while (!rq) { - DEFINE_WAIT(wait); - struct request_list *rl = &q->rq; - - if (unlikely(blk_queue_dead(q))) - return NULL; + rl = blk_get_rl(q, bio); /* transferred to @rq on success */ +retry: + rq = __get_request(rl, rw_flags, bio, gfp_mask); + if (rq) + return rq; - prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, - TASK_UNINTERRUPTIBLE); + if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dead(q))) { + blk_put_rl(rl); + return NULL; + } - trace_block_sleeprq(q, bio, rw_flags & 1); + /* wait on @rl and retry */ + prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, + TASK_UNINTERRUPTIBLE); - spin_unlock_irq(q->queue_lock); - io_schedule(); + trace_block_sleeprq(q, bio, rw_flags & 1); - /* - * After sleeping, we become a "batching" process and - * will be able to allocate at least one request, and - * up to a big batch of them for a small period time. - * See ioc_batching, ioc_set_batching - */ - create_io_context(GFP_NOIO, q->node); - ioc_set_batching(q, current->io_context); + spin_unlock_irq(q->queue_lock); + io_schedule(); - spin_lock_irq(q->queue_lock); - finish_wait(&rl->wait[is_sync], &wait); + /* + * After sleeping, we become a "batching" process and will be able + * to allocate at least one request, and up to a big batch of them + * for a small period time. See ioc_batching, ioc_set_batching + */ + ioc_set_batching(q, current->io_context); - rq = get_request(q, rw_flags, bio, GFP_NOIO); - }; + spin_lock_irq(q->queue_lock); + finish_wait(&rl->wait[is_sync], &wait); - return rq; + goto retry; } struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) @@ -1087,11 +1090,11 @@ struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) BUG_ON(rw != READ && rw != WRITE); + /* create ioc upfront */ + create_io_context(gfp_mask, q->node); + spin_lock_irq(q->queue_lock); - if (gfp_mask & __GFP_WAIT) - rq = get_request_wait(q, rw, NULL); - else - rq = get_request(q, rw, NULL, gfp_mask); + rq = get_request(q, rw, NULL, gfp_mask); if (!rq) spin_unlock_irq(q->queue_lock); /* q->queue_lock is unlocked at this point */ @@ -1248,12 +1251,14 @@ void __blk_put_request(struct request_queue *q, struct request *req) */ if (req->cmd_flags & REQ_ALLOCED) { unsigned int flags = req->cmd_flags; + struct request_list *rl = blk_rq_rl(req); BUG_ON(!list_empty(&req->queuelist)); BUG_ON(!hlist_unhashed(&req->hash)); - blk_free_request(q, req); - freed_request(q, flags); + blk_free_request(rl, req); + freed_request(rl, flags); + blk_put_rl(rl); } } EXPORT_SYMBOL_GPL(__blk_put_request); @@ -1481,7 +1486,7 @@ void blk_queue_bio(struct request_queue *q, struct bio *bio) * Grab a free request. This is might sleep but can not fail. * Returns with the queue unlocked. */ - req = get_request_wait(q, rw_flags, bio); + req = get_request(q, rw_flags, bio, GFP_NOIO); if (unlikely(!req)) { bio_endio(bio, -ENODEV); /* @q is dead */ goto out_unlock; @@ -1702,6 +1707,14 @@ generic_make_request_checks(struct bio *bio) goto end_io; } + /* + * Various block parts want %current->io_context and lazy ioc + * allocation ends up trading a lot of pain for a small amount of + * memory. Just allocate it upfront. This may fail and block + * layer knows how to live with it. + */ + create_io_context(GFP_ATOMIC, q->node); + if (blk_throtl_bio(q, bio)) return false; /* throttled, will be resubmitted later */ @@ -2896,23 +2909,47 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth, } -static void flush_plug_callbacks(struct blk_plug *plug) +static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule) { LIST_HEAD(callbacks); - if (list_empty(&plug->cb_list)) - return; + while (!list_empty(&plug->cb_list)) { + list_splice_init(&plug->cb_list, &callbacks); - list_splice_init(&plug->cb_list, &callbacks); - - while (!list_empty(&callbacks)) { - struct blk_plug_cb *cb = list_first_entry(&callbacks, + while (!list_empty(&callbacks)) { + struct blk_plug_cb *cb = list_first_entry(&callbacks, struct blk_plug_cb, list); - list_del(&cb->list); - cb->callback(cb); + list_del(&cb->list); + cb->callback(cb, from_schedule); + } + } +} + +struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data, + int size) +{ + struct blk_plug *plug = current->plug; + struct blk_plug_cb *cb; + + if (!plug) + return NULL; + + list_for_each_entry(cb, &plug->cb_list, list) + if (cb->callback == unplug && cb->data == data) + return cb; + + /* Not currently on the callback list */ + BUG_ON(size < sizeof(*cb)); + cb = kzalloc(size, GFP_ATOMIC); + if (cb) { + cb->data = data; + cb->callback = unplug; + list_add(&cb->list, &plug->cb_list); } + return cb; } +EXPORT_SYMBOL(blk_check_plugged); void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) { @@ -2924,7 +2961,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) BUG_ON(plug->magic != PLUG_MAGIC); - flush_plug_callbacks(plug); + flush_plug_callbacks(plug, from_schedule); if (list_empty(&plug->list)) return; diff --git a/trunk/block/blk-ioc.c b/trunk/block/blk-ioc.c index 893b8007c657..fab4cdd3f7bb 100644 --- a/trunk/block/blk-ioc.c +++ b/trunk/block/blk-ioc.c @@ -244,6 +244,7 @@ int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node) /* initialize */ atomic_long_set(&ioc->refcount, 1); + atomic_set(&ioc->nr_tasks, 1); atomic_set(&ioc->active_ref, 1); spin_lock_init(&ioc->lock); INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH); diff --git a/trunk/block/blk-lib.c b/trunk/block/blk-lib.c index 2b461b496a78..19cc761cacb2 100644 --- a/trunk/block/blk-lib.c +++ b/trunk/block/blk-lib.c @@ -44,6 +44,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, struct request_queue *q = bdev_get_queue(bdev); int type = REQ_WRITE | REQ_DISCARD; unsigned int max_discard_sectors; + unsigned int granularity, alignment, mask; struct bio_batch bb; struct bio *bio; int ret = 0; @@ -54,18 +55,20 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, if (!blk_queue_discard(q)) return -EOPNOTSUPP; + /* Zero-sector (unknown) and one-sector granularities are the same. */ + granularity = max(q->limits.discard_granularity >> 9, 1U); + mask = granularity - 1; + alignment = (bdev_discard_alignment(bdev) >> 9) & mask; + /* * Ensure that max_discard_sectors is of the proper - * granularity + * granularity, so that requests stay aligned after a split. */ max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); + max_discard_sectors = round_down(max_discard_sectors, granularity); if (unlikely(!max_discard_sectors)) { /* Avoid infinite loop below. Being cautious never hurts. */ return -EOPNOTSUPP; - } else if (q->limits.discard_granularity) { - unsigned int disc_sects = q->limits.discard_granularity >> 9; - - max_discard_sectors &= ~(disc_sects - 1); } if (flags & BLKDEV_DISCARD_SECURE) { @@ -79,25 +82,37 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, bb.wait = &wait; while (nr_sects) { + unsigned int req_sects; + sector_t end_sect; + bio = bio_alloc(gfp_mask, 1); if (!bio) { ret = -ENOMEM; break; } + req_sects = min_t(sector_t, nr_sects, max_discard_sectors); + + /* + * If splitting a request, and the next starting sector would be + * misaligned, stop the discard at the previous aligned sector. + */ + end_sect = sector + req_sects; + if (req_sects < nr_sects && (end_sect & mask) != alignment) { + end_sect = + round_down(end_sect - alignment, granularity) + + alignment; + req_sects = end_sect - sector; + } + bio->bi_sector = sector; bio->bi_end_io = bio_batch_end_io; bio->bi_bdev = bdev; bio->bi_private = &bb; - if (nr_sects > max_discard_sectors) { - bio->bi_size = max_discard_sectors << 9; - nr_sects -= max_discard_sectors; - sector += max_discard_sectors; - } else { - bio->bi_size = nr_sects << 9; - nr_sects = 0; - } + bio->bi_size = req_sects << 9; + nr_sects -= req_sects; + sector = end_sect; atomic_inc(&bb.done); submit_bio(type, bio); diff --git a/trunk/block/blk-merge.c b/trunk/block/blk-merge.c index 160035f54882..e76279e41162 100644 --- a/trunk/block/blk-merge.c +++ b/trunk/block/blk-merge.c @@ -110,6 +110,49 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, return 0; } +static void +__blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, + struct scatterlist *sglist, struct bio_vec **bvprv, + struct scatterlist **sg, int *nsegs, int *cluster) +{ + + int nbytes = bvec->bv_len; + + if (*bvprv && *cluster) { + if ((*sg)->length + nbytes > queue_max_segment_size(q)) + goto new_segment; + + if (!BIOVEC_PHYS_MERGEABLE(*bvprv, bvec)) + goto new_segment; + if (!BIOVEC_SEG_BOUNDARY(q, *bvprv, bvec)) + goto new_segment; + + (*sg)->length += nbytes; + } else { +new_segment: + if (!*sg) + *sg = sglist; + else { + /* + * If the driver previously mapped a shorter + * list, we could see a termination bit + * prematurely unless it fully inits the sg + * table on each mapping. We KNOW that there + * must be more entries here or the driver + * would be buggy, so force clear the + * termination bit to avoid doing a full + * sg_init_table() in drivers for each command. + */ + (*sg)->page_link &= ~0x02; + *sg = sg_next(*sg); + } + + sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset); + (*nsegs)++; + } + *bvprv = bvec; +} + /* * map a request to scatterlist, return number of sg entries setup. Caller * must make sure sg can hold rq->nr_phys_segments entries @@ -131,41 +174,8 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, bvprv = NULL; sg = NULL; rq_for_each_segment(bvec, rq, iter) { - int nbytes = bvec->bv_len; - - if (bvprv && cluster) { - if (sg->length + nbytes > queue_max_segment_size(q)) - goto new_segment; - - if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) - goto new_segment; - if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec)) - goto new_segment; - - sg->length += nbytes; - } else { -new_segment: - if (!sg) - sg = sglist; - else { - /* - * If the driver previously mapped a shorter - * list, we could see a termination bit - * prematurely unless it fully inits the sg - * table on each mapping. We KNOW that there - * must be more entries here or the driver - * would be buggy, so force clear the - * termination bit to avoid doing a full - * sg_init_table() in drivers for each command. - */ - sg->page_link &= ~0x02; - sg = sg_next(sg); - } - - sg_set_page(sg, bvec->bv_page, nbytes, bvec->bv_offset); - nsegs++; - } - bvprv = bvec; + __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg, + &nsegs, &cluster); } /* segments in rq */ @@ -199,6 +209,43 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, } EXPORT_SYMBOL(blk_rq_map_sg); +/** + * blk_bio_map_sg - map a bio to a scatterlist + * @q: request_queue in question + * @bio: bio being mapped + * @sglist: scatterlist being mapped + * + * Note: + * Caller must make sure sg can hold bio->bi_phys_segments entries + * + * Will return the number of sg entries setup + */ +int blk_bio_map_sg(struct request_queue *q, struct bio *bio, + struct scatterlist *sglist) +{ + struct bio_vec *bvec, *bvprv; + struct scatterlist *sg; + int nsegs, cluster; + unsigned long i; + + nsegs = 0; + cluster = blk_queue_cluster(q); + + bvprv = NULL; + sg = NULL; + bio_for_each_segment(bvec, bio, i) { + __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg, + &nsegs, &cluster); + } /* segments in bio */ + + if (sg) + sg_mark_end(sg); + + BUG_ON(bio->bi_phys_segments && nsegs > bio->bi_phys_segments); + return nsegs; +} +EXPORT_SYMBOL(blk_bio_map_sg); + static inline int ll_new_hw_segment(struct request_queue *q, struct request *req, struct bio *bio) diff --git a/trunk/block/blk-settings.c b/trunk/block/blk-settings.c index d3234fc494ad..565a6786032f 100644 --- a/trunk/block/blk-settings.c +++ b/trunk/block/blk-settings.c @@ -143,8 +143,7 @@ void blk_set_stacking_limits(struct queue_limits *lim) lim->discard_zeroes_data = 1; lim->max_segments = USHRT_MAX; lim->max_hw_sectors = UINT_MAX; - - lim->max_sectors = BLK_DEF_MAX_SECTORS; + lim->max_sectors = UINT_MAX; } EXPORT_SYMBOL(blk_set_stacking_limits); diff --git a/trunk/block/blk-sysfs.c b/trunk/block/blk-sysfs.c index aa41b47c22d2..9628b291f960 100644 --- a/trunk/block/blk-sysfs.c +++ b/trunk/block/blk-sysfs.c @@ -40,7 +40,7 @@ static ssize_t queue_requests_show(struct request_queue *q, char *page) static ssize_t queue_requests_store(struct request_queue *q, const char *page, size_t count) { - struct request_list *rl = &q->rq; + struct request_list *rl; unsigned long nr; int ret; @@ -55,6 +55,9 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count) q->nr_requests = nr; blk_queue_congestion_threshold(q); + /* congestion isn't cgroup aware and follows root blkcg for now */ + rl = &q->root_rl; + if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q)) blk_set_queue_congested(q, BLK_RW_SYNC); else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q)) @@ -65,19 +68,22 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count) else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q)) blk_clear_queue_congested(q, BLK_RW_ASYNC); - if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { - blk_set_queue_full(q, BLK_RW_SYNC); - } else { - blk_clear_queue_full(q, BLK_RW_SYNC); - wake_up(&rl->wait[BLK_RW_SYNC]); + blk_queue_for_each_rl(rl, q) { + if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { + blk_set_rl_full(rl, BLK_RW_SYNC); + } else { + blk_clear_rl_full(rl, BLK_RW_SYNC); + wake_up(&rl->wait[BLK_RW_SYNC]); + } + + if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { + blk_set_rl_full(rl, BLK_RW_ASYNC); + } else { + blk_clear_rl_full(rl, BLK_RW_ASYNC); + wake_up(&rl->wait[BLK_RW_ASYNC]); + } } - if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { - blk_set_queue_full(q, BLK_RW_ASYNC); - } else { - blk_clear_queue_full(q, BLK_RW_ASYNC); - wake_up(&rl->wait[BLK_RW_ASYNC]); - } spin_unlock_irq(q->queue_lock); return ret; } @@ -476,7 +482,6 @@ static void blk_release_queue(struct kobject *kobj) { struct request_queue *q = container_of(kobj, struct request_queue, kobj); - struct request_list *rl = &q->rq; blk_sync_queue(q); @@ -489,8 +494,7 @@ static void blk_release_queue(struct kobject *kobj) elevator_exit(q->elevator); } - if (rl->rq_pool) - mempool_destroy(rl->rq_pool); + blk_exit_rl(&q->root_rl); if (q->queue_tags) __blk_queue_free_tags(q); diff --git a/trunk/block/blk-throttle.c b/trunk/block/blk-throttle.c index 5b0659512047..e287c19908c8 100644 --- a/trunk/block/blk-throttle.c +++ b/trunk/block/blk-throttle.c @@ -1123,9 +1123,6 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio) goto out; } - /* bio_associate_current() needs ioc, try creating */ - create_io_context(GFP_ATOMIC, q->node); - /* * A throtl_grp pointer retrieved under rcu can be used to access * basic fields like stats and io rates. If a group has no rules, diff --git a/trunk/block/blk.h b/trunk/block/blk.h index 85f6ae42f7d3..2a0ea32d249f 100644 --- a/trunk/block/blk.h +++ b/trunk/block/blk.h @@ -18,6 +18,9 @@ static inline void __blk_get_queue(struct request_queue *q) kobject_get(&q->kobj); } +int blk_init_rl(struct request_list *rl, struct request_queue *q, + gfp_t gfp_mask); +void blk_exit_rl(struct request_list *rl); void init_request_from_bio(struct request *req, struct bio *bio); void blk_rq_bio_prep(struct request_queue *q, struct request *rq, struct bio *bio); @@ -33,7 +36,6 @@ bool __blk_end_bidi_request(struct request *rq, int error, void blk_rq_timed_out_timer(unsigned long data); void blk_delete_timer(struct request *); void blk_add_timer(struct request *); -void __generic_unplug_device(struct request_queue *); /* * Internal atomic flags for request handling diff --git a/trunk/block/bsg-lib.c b/trunk/block/bsg-lib.c index 7ad49c88f6b1..deee61fbb741 100644 --- a/trunk/block/bsg-lib.c +++ b/trunk/block/bsg-lib.c @@ -243,56 +243,3 @@ int bsg_setup_queue(struct device *dev, struct request_queue *q, return 0; } EXPORT_SYMBOL_GPL(bsg_setup_queue); - -/** - * bsg_remove_queue - Deletes the bsg dev from the q - * @q: the request_queue that is to be torn down. - * - * Notes: - * Before unregistering the queue empty any requests that are blocked - */ -void bsg_remove_queue(struct request_queue *q) -{ - struct request *req; /* block request */ - int counts; /* totals for request_list count and starved */ - - if (!q) - return; - - /* Stop taking in new requests */ - spin_lock_irq(q->queue_lock); - blk_stop_queue(q); - - /* drain all requests in the queue */ - while (1) { - /* need the lock to fetch a request - * this may fetch the same reqeust as the previous pass - */ - req = blk_fetch_request(q); - /* save requests in use and starved */ - counts = q->rq.count[0] + q->rq.count[1] + - q->rq.starved[0] + q->rq.starved[1]; - spin_unlock_irq(q->queue_lock); - /* any requests still outstanding? */ - if (counts == 0) - break; - - /* This may be the same req as the previous iteration, - * always send the blk_end_request_all after a prefetch. - * It is not okay to not end the request because the - * prefetch started the request. - */ - if (req) { - /* return -ENXIO to indicate that this queue is - * going away - */ - req->errors = -ENXIO; - blk_end_request_all(req, -ENXIO); - } - - msleep(200); /* allow bsg to possibly finish */ - spin_lock_irq(q->queue_lock); - } - bsg_unregister_queue(q); -} -EXPORT_SYMBOL_GPL(bsg_remove_queue); diff --git a/trunk/block/genhd.c b/trunk/block/genhd.c index 9cf5583c90ff..d839723303c8 100644 --- a/trunk/block/genhd.c +++ b/trunk/block/genhd.c @@ -154,7 +154,7 @@ struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter) part = rcu_dereference(ptbl->part[piter->idx]); if (!part) continue; - if (!part->nr_sects && + if (!part_nr_sects_read(part) && !(piter->flags & DISK_PITER_INCL_EMPTY) && !(piter->flags & DISK_PITER_INCL_EMPTY_PART0 && piter->idx == 0)) @@ -191,7 +191,7 @@ EXPORT_SYMBOL_GPL(disk_part_iter_exit); static inline int sector_in_part(struct hd_struct *part, sector_t sector) { return part->start_sect <= sector && - sector < part->start_sect + part->nr_sects; + sector < part->start_sect + part_nr_sects_read(part); } /** @@ -769,8 +769,8 @@ void __init printk_all_partitions(void) printk("%s%s %10llu %s %s", is_part0 ? "" : " ", bdevt_str(part_devt(part), devt_buf), - (unsigned long long)part->nr_sects >> 1, - disk_name(disk, part->partno, name_buf), + (unsigned long long)part_nr_sects_read(part) >> 1 + , disk_name(disk, part->partno, name_buf), uuid_buf); if (is_part0) { if (disk->driverfs_dev != NULL && @@ -835,7 +835,7 @@ static void disk_seqf_stop(struct seq_file *seqf, void *v) static void *show_partition_start(struct seq_file *seqf, loff_t *pos) { - static void *p; + void *p; p = disk_seqf_start(seqf, pos); if (!IS_ERR_OR_NULL(p) && !*pos) @@ -862,7 +862,7 @@ static int show_partition(struct seq_file *seqf, void *v) while ((part = disk_part_iter_next(&piter))) seq_printf(seqf, "%4d %7d %10llu %s\n", MAJOR(part_devt(part)), MINOR(part_devt(part)), - (unsigned long long)part->nr_sects >> 1, + (unsigned long long)part_nr_sects_read(part) >> 1, disk_name(sgp, part->partno, buf)); disk_part_iter_exit(&piter); @@ -1268,6 +1268,16 @@ struct gendisk *alloc_disk_node(int minors, int node_id) } disk->part_tbl->part[0] = &disk->part0; + /* + * set_capacity() and get_capacity() currently don't use + * seqcounter to read/update the part0->nr_sects. Still init + * the counter as we can read the sectors in IO submission + * patch using seqence counters. + * + * TODO: Ideally set_capacity() and get_capacity() should be + * converted to make use of bd_mutex and sequence counters. + */ + seqcount_init(&disk->part0.nr_sects_seq); hd_ref_init(&disk->part0); disk->minors = minors; diff --git a/trunk/block/ioctl.c b/trunk/block/ioctl.c index ba15b2dbfb98..4476e0e85d16 100644 --- a/trunk/block/ioctl.c +++ b/trunk/block/ioctl.c @@ -13,7 +13,7 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user { struct block_device *bdevp; struct gendisk *disk; - struct hd_struct *part; + struct hd_struct *part, *lpart; struct blkpg_ioctl_arg a; struct blkpg_partition p; struct disk_part_iter piter; @@ -36,8 +36,8 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user case BLKPG_ADD_PARTITION: start = p.start >> 9; length = p.length >> 9; - /* check for fit in a hd_struct */ - if (sizeof(sector_t) == sizeof(long) && + /* check for fit in a hd_struct */ + if (sizeof(sector_t) == sizeof(long) && sizeof(long long) > sizeof(long)) { long pstart = start, plength = length; if (pstart != start || plength != length @@ -91,6 +91,59 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user mutex_unlock(&bdevp->bd_mutex); bdput(bdevp); + return 0; + case BLKPG_RESIZE_PARTITION: + start = p.start >> 9; + /* new length of partition in bytes */ + length = p.length >> 9; + /* check for fit in a hd_struct */ + if (sizeof(sector_t) == sizeof(long) && + sizeof(long long) > sizeof(long)) { + long pstart = start, plength = length; + if (pstart != start || plength != length + || pstart < 0 || plength < 0) + return -EINVAL; + } + part = disk_get_part(disk, partno); + if (!part) + return -ENXIO; + bdevp = bdget(part_devt(part)); + if (!bdevp) { + disk_put_part(part); + return -ENOMEM; + } + mutex_lock(&bdevp->bd_mutex); + mutex_lock_nested(&bdev->bd_mutex, 1); + if (start != part->start_sect) { + mutex_unlock(&bdevp->bd_mutex); + mutex_unlock(&bdev->bd_mutex); + bdput(bdevp); + disk_put_part(part); + return -EINVAL; + } + /* overlap? */ + disk_part_iter_init(&piter, disk, + DISK_PITER_INCL_EMPTY); + while ((lpart = disk_part_iter_next(&piter))) { + if (lpart->partno != partno && + !(start + length <= lpart->start_sect || + start >= lpart->start_sect + lpart->nr_sects) + ) { + disk_part_iter_exit(&piter); + mutex_unlock(&bdevp->bd_mutex); + mutex_unlock(&bdev->bd_mutex); + bdput(bdevp); + disk_put_part(part); + return -EBUSY; + } + } + disk_part_iter_exit(&piter); + part_nr_sects_write(part, (sector_t)length); + i_size_write(bdevp->bd_inode, p.length); + mutex_unlock(&bdevp->bd_mutex); + mutex_unlock(&bdev->bd_mutex); + bdput(bdevp); + disk_put_part(part); return 0; default: return -EINVAL; diff --git a/trunk/block/partition-generic.c b/trunk/block/partition-generic.c index 6df5d6928a44..f1d14519cc04 100644 --- a/trunk/block/partition-generic.c +++ b/trunk/block/partition-generic.c @@ -84,7 +84,7 @@ ssize_t part_size_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hd_struct *p = dev_to_part(dev); - return sprintf(buf, "%llu\n",(unsigned long long)p->nr_sects); + return sprintf(buf, "%llu\n",(unsigned long long)part_nr_sects_read(p)); } static ssize_t part_ro_show(struct device *dev, @@ -294,6 +294,8 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno, err = -ENOMEM; goto out_free; } + + seqcount_init(&p->nr_sects_seq); pdev = part_to_dev(p); p->start_sect = start; diff --git a/trunk/crypto/authenc.c b/trunk/crypto/authenc.c index 5ef7ba6b6a76..d0583a4489e6 100644 --- a/trunk/crypto/authenc.c +++ b/trunk/crypto/authenc.c @@ -336,7 +336,7 @@ static int crypto_authenc_genicv(struct aead_request *req, u8 *iv, cryptlen += ivsize; } - if (sg_is_last(assoc)) { + if (req->assoclen && sg_is_last(assoc)) { authenc_ahash_fn = crypto_authenc_ahash; sg_init_table(asg, 2); sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset); @@ -490,7 +490,7 @@ static int crypto_authenc_iverify(struct aead_request *req, u8 *iv, cryptlen += ivsize; } - if (sg_is_last(assoc)) { + if (req->assoclen && sg_is_last(assoc)) { authenc_ahash_fn = crypto_authenc_ahash; sg_init_table(asg, 2); sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset); diff --git a/trunk/drivers/acpi/ac.c b/trunk/drivers/acpi/ac.c index ac7034129f3f..d5fdd36190cc 100644 --- a/trunk/drivers/acpi/ac.c +++ b/trunk/drivers/acpi/ac.c @@ -69,7 +69,9 @@ static const struct acpi_device_id ac_device_ids[] = { }; MODULE_DEVICE_TABLE(acpi, ac_device_ids); +#ifdef CONFIG_PM_SLEEP static int acpi_ac_resume(struct device *dev); +#endif static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume); static struct acpi_driver acpi_ac_driver = { @@ -313,6 +315,7 @@ static int acpi_ac_add(struct acpi_device *device) return result; } +#ifdef CONFIG_PM_SLEEP static int acpi_ac_resume(struct device *dev) { struct acpi_ac *ac; @@ -332,6 +335,7 @@ static int acpi_ac_resume(struct device *dev) kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE); return 0; } +#endif static int acpi_ac_remove(struct acpi_device *device, int type) { diff --git a/trunk/drivers/acpi/acpica/achware.h b/trunk/drivers/acpi/acpica/achware.h index 5ccb99ae3a6f..5de4ec72766d 100644 --- a/trunk/drivers/acpi/acpica/achware.h +++ b/trunk/drivers/acpi/acpica/achware.h @@ -83,22 +83,22 @@ acpi_status acpi_hw_clear_acpi_status(void); /* * hwsleep - sleep/wake support (Legacy sleep registers) */ -acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags); +acpi_status acpi_hw_legacy_sleep(u8 sleep_state); -acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags); +acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state); -acpi_status acpi_hw_legacy_wake(u8 sleep_state, u8 flags); +acpi_status acpi_hw_legacy_wake(u8 sleep_state); /* * hwesleep - sleep/wake support (Extended FADT-V5 sleep registers) */ void acpi_hw_execute_sleep_method(char *method_name, u32 integer_argument); -acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags); +acpi_status acpi_hw_extended_sleep(u8 sleep_state); -acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags); +acpi_status acpi_hw_extended_wake_prep(u8 sleep_state); -acpi_status acpi_hw_extended_wake(u8 sleep_state, u8 flags); +acpi_status acpi_hw_extended_wake(u8 sleep_state); /* * hwvalid - Port I/O with validation diff --git a/trunk/drivers/acpi/acpica/hwesleep.c b/trunk/drivers/acpi/acpica/hwesleep.c index 48518dac5342..94996f9ae3ad 100644 --- a/trunk/drivers/acpi/acpica/hwesleep.c +++ b/trunk/drivers/acpi/acpica/hwesleep.c @@ -90,7 +90,6 @@ void acpi_hw_execute_sleep_method(char *method_pathname, u32 integer_argument) * FUNCTION: acpi_hw_extended_sleep * * PARAMETERS: sleep_state - Which sleep state to enter - * flags - ACPI_EXECUTE_GTS to run optional method * * RETURN: Status * @@ -100,7 +99,7 @@ void acpi_hw_execute_sleep_method(char *method_pathname, u32 integer_argument) * ******************************************************************************/ -acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags) +acpi_status acpi_hw_extended_sleep(u8 sleep_state) { acpi_status status; u8 sleep_type_value; @@ -125,12 +124,6 @@ acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags) acpi_gbl_system_awake_and_running = FALSE; - /* Optionally execute _GTS (Going To Sleep) */ - - if (flags & ACPI_EXECUTE_GTS) { - acpi_hw_execute_sleep_method(METHOD_PATHNAME__GTS, sleep_state); - } - /* Flush caches, as per ACPI specification */ ACPI_FLUSH_CPU_CACHE(); @@ -172,7 +165,6 @@ acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags) * FUNCTION: acpi_hw_extended_wake_prep * * PARAMETERS: sleep_state - Which sleep state we just exited - * flags - ACPI_EXECUTE_BFS to run optional method * * RETURN: Status * @@ -181,7 +173,7 @@ acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags) * ******************************************************************************/ -acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags) +acpi_status acpi_hw_extended_wake_prep(u8 sleep_state) { acpi_status status; u8 sleep_type_value; @@ -200,11 +192,6 @@ acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags) &acpi_gbl_FADT.sleep_control); } - /* Optionally execute _BFS (Back From Sleep) */ - - if (flags & ACPI_EXECUTE_BFS) { - acpi_hw_execute_sleep_method(METHOD_PATHNAME__BFS, sleep_state); - } return_ACPI_STATUS(AE_OK); } @@ -222,7 +209,7 @@ acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags) * ******************************************************************************/ -acpi_status acpi_hw_extended_wake(u8 sleep_state, u8 flags) +acpi_status acpi_hw_extended_wake(u8 sleep_state) { ACPI_FUNCTION_TRACE(hw_extended_wake); diff --git a/trunk/drivers/acpi/acpica/hwsleep.c b/trunk/drivers/acpi/acpica/hwsleep.c index 9960fe9ef533..3fddde056a5e 100644 --- a/trunk/drivers/acpi/acpica/hwsleep.c +++ b/trunk/drivers/acpi/acpica/hwsleep.c @@ -56,7 +56,6 @@ ACPI_MODULE_NAME("hwsleep") * FUNCTION: acpi_hw_legacy_sleep * * PARAMETERS: sleep_state - Which sleep state to enter - * flags - ACPI_EXECUTE_GTS to run optional method * * RETURN: Status * @@ -64,7 +63,7 @@ ACPI_MODULE_NAME("hwsleep") * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED * ******************************************************************************/ -acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags) +acpi_status acpi_hw_legacy_sleep(u8 sleep_state) { struct acpi_bit_register_info *sleep_type_reg_info; struct acpi_bit_register_info *sleep_enable_reg_info; @@ -110,12 +109,6 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags) return_ACPI_STATUS(status); } - /* Optionally execute _GTS (Going To Sleep) */ - - if (flags & ACPI_EXECUTE_GTS) { - acpi_hw_execute_sleep_method(METHOD_PATHNAME__GTS, sleep_state); - } - /* Get current value of PM1A control */ status = acpi_hw_register_read(ACPI_REGISTER_PM1_CONTROL, @@ -214,7 +207,6 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags) * FUNCTION: acpi_hw_legacy_wake_prep * * PARAMETERS: sleep_state - Which sleep state we just exited - * flags - ACPI_EXECUTE_BFS to run optional method * * RETURN: Status * @@ -224,7 +216,7 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags) * ******************************************************************************/ -acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags) +acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state) { acpi_status status; struct acpi_bit_register_info *sleep_type_reg_info; @@ -275,11 +267,6 @@ acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags) } } - /* Optionally execute _BFS (Back From Sleep) */ - - if (flags & ACPI_EXECUTE_BFS) { - acpi_hw_execute_sleep_method(METHOD_PATHNAME__BFS, sleep_state); - } return_ACPI_STATUS(status); } @@ -288,7 +275,6 @@ acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags) * FUNCTION: acpi_hw_legacy_wake * * PARAMETERS: sleep_state - Which sleep state we just exited - * flags - Reserved, set to zero * * RETURN: Status * @@ -297,7 +283,7 @@ acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags) * ******************************************************************************/ -acpi_status acpi_hw_legacy_wake(u8 sleep_state, u8 flags) +acpi_status acpi_hw_legacy_wake(u8 sleep_state) { acpi_status status; diff --git a/trunk/drivers/acpi/acpica/hwxfsleep.c b/trunk/drivers/acpi/acpica/hwxfsleep.c index f8684bfe7907..1f165a750ae2 100644 --- a/trunk/drivers/acpi/acpica/hwxfsleep.c +++ b/trunk/drivers/acpi/acpica/hwxfsleep.c @@ -50,7 +50,7 @@ ACPI_MODULE_NAME("hwxfsleep") /* Local prototypes */ static acpi_status -acpi_hw_sleep_dispatch(u8 sleep_state, u8 flags, u32 function_id); +acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id); /* * Dispatch table used to efficiently branch to the various sleep @@ -235,7 +235,7 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_s4bios) * ******************************************************************************/ static acpi_status -acpi_hw_sleep_dispatch(u8 sleep_state, u8 flags, u32 function_id) +acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id) { acpi_status status; struct acpi_sleep_functions *sleep_functions = @@ -248,11 +248,11 @@ acpi_hw_sleep_dispatch(u8 sleep_state, u8 flags, u32 function_id) * use the extended sleep registers */ if (acpi_gbl_reduced_hardware || acpi_gbl_FADT.sleep_control.address) { - status = sleep_functions->extended_function(sleep_state, flags); + status = sleep_functions->extended_function(sleep_state); } else { /* Legacy sleep */ - status = sleep_functions->legacy_function(sleep_state, flags); + status = sleep_functions->legacy_function(sleep_state); } return (status); @@ -262,7 +262,7 @@ acpi_hw_sleep_dispatch(u8 sleep_state, u8 flags, u32 function_id) * For the case where reduced-hardware-only code is being generated, * we know that only the extended sleep registers are available */ - status = sleep_functions->extended_function(sleep_state, flags); + status = sleep_functions->extended_function(sleep_state); return (status); #endif /* !ACPI_REDUCED_HARDWARE */ @@ -349,7 +349,6 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep) * FUNCTION: acpi_enter_sleep_state * * PARAMETERS: sleep_state - Which sleep state to enter - * flags - ACPI_EXECUTE_GTS to run optional method * * RETURN: Status * @@ -357,7 +356,7 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep) * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED * ******************************************************************************/ -acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state, u8 flags) +acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state) { acpi_status status; @@ -371,7 +370,7 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state, u8 flags) } status = - acpi_hw_sleep_dispatch(sleep_state, flags, ACPI_SLEEP_FUNCTION_ID); + acpi_hw_sleep_dispatch(sleep_state, ACPI_SLEEP_FUNCTION_ID); return_ACPI_STATUS(status); } @@ -391,14 +390,14 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state) * Called with interrupts DISABLED. * ******************************************************************************/ -acpi_status acpi_leave_sleep_state_prep(u8 sleep_state, u8 flags) +acpi_status acpi_leave_sleep_state_prep(u8 sleep_state) { acpi_status status; ACPI_FUNCTION_TRACE(acpi_leave_sleep_state_prep); status = - acpi_hw_sleep_dispatch(sleep_state, flags, + acpi_hw_sleep_dispatch(sleep_state, ACPI_WAKE_PREP_FUNCTION_ID); return_ACPI_STATUS(status); } @@ -423,8 +422,7 @@ acpi_status acpi_leave_sleep_state(u8 sleep_state) ACPI_FUNCTION_TRACE(acpi_leave_sleep_state); - - status = acpi_hw_sleep_dispatch(sleep_state, 0, ACPI_WAKE_FUNCTION_ID); + status = acpi_hw_sleep_dispatch(sleep_state, ACPI_WAKE_FUNCTION_ID); return_ACPI_STATUS(status); } diff --git a/trunk/drivers/acpi/acpica/tbxface.c b/trunk/drivers/acpi/acpica/tbxface.c index ea4c6d52605a..29e51bc01383 100644 --- a/trunk/drivers/acpi/acpica/tbxface.c +++ b/trunk/drivers/acpi/acpica/tbxface.c @@ -387,6 +387,7 @@ acpi_get_table_with_size(char *signature, return (AE_NOT_FOUND); } +ACPI_EXPORT_SYMBOL(acpi_get_table_with_size) acpi_status acpi_get_table(char *signature, diff --git a/trunk/drivers/acpi/battery.c b/trunk/drivers/acpi/battery.c index ff2c876ec412..45e3e1759fb8 100644 --- a/trunk/drivers/acpi/battery.c +++ b/trunk/drivers/acpi/battery.c @@ -1052,6 +1052,7 @@ static int acpi_battery_remove(struct acpi_device *device, int type) return 0; } +#ifdef CONFIG_PM_SLEEP /* this is needed to learn about changes made in suspended state */ static int acpi_battery_resume(struct device *dev) { @@ -1068,6 +1069,7 @@ static int acpi_battery_resume(struct device *dev) acpi_battery_update(battery); return 0; } +#endif static SIMPLE_DEV_PM_OPS(acpi_battery_pm, NULL, acpi_battery_resume); diff --git a/trunk/drivers/acpi/button.c b/trunk/drivers/acpi/button.c index 79d4c22f7a6d..314a3b84bbc7 100644 --- a/trunk/drivers/acpi/button.c +++ b/trunk/drivers/acpi/button.c @@ -78,7 +78,9 @@ static int acpi_button_add(struct acpi_device *device); static int acpi_button_remove(struct acpi_device *device, int type); static void acpi_button_notify(struct acpi_device *device, u32 event); +#ifdef CONFIG_PM_SLEEP static int acpi_button_resume(struct device *dev); +#endif static SIMPLE_DEV_PM_OPS(acpi_button_pm, NULL, acpi_button_resume); static struct acpi_driver acpi_button_driver = { @@ -310,6 +312,7 @@ static void acpi_button_notify(struct acpi_device *device, u32 event) } } +#ifdef CONFIG_PM_SLEEP static int acpi_button_resume(struct device *dev) { struct acpi_device *device = to_acpi_device(dev); @@ -319,6 +322,7 @@ static int acpi_button_resume(struct device *dev) return acpi_lid_send_state(device); return 0; } +#endif static int acpi_button_add(struct acpi_device *device) { diff --git a/trunk/drivers/acpi/fan.c b/trunk/drivers/acpi/fan.c index 669d9ee80d16..bc36a476f1ab 100644 --- a/trunk/drivers/acpi/fan.c +++ b/trunk/drivers/acpi/fan.c @@ -53,8 +53,10 @@ static const struct acpi_device_id fan_device_ids[] = { }; MODULE_DEVICE_TABLE(acpi, fan_device_ids); +#ifdef CONFIG_PM_SLEEP static int acpi_fan_suspend(struct device *dev); static int acpi_fan_resume(struct device *dev); +#endif static SIMPLE_DEV_PM_OPS(acpi_fan_pm, acpi_fan_suspend, acpi_fan_resume); static struct acpi_driver acpi_fan_driver = { @@ -184,6 +186,7 @@ static int acpi_fan_remove(struct acpi_device *device, int type) return 0; } +#ifdef CONFIG_PM_SLEEP static int acpi_fan_suspend(struct device *dev) { if (!dev) @@ -207,6 +210,7 @@ static int acpi_fan_resume(struct device *dev) return result; } +#endif static int __init acpi_fan_init(void) { diff --git a/trunk/drivers/acpi/numa.c b/trunk/drivers/acpi/numa.c index e56f3be7b07d..cb31298ca684 100644 --- a/trunk/drivers/acpi/numa.c +++ b/trunk/drivers/acpi/numa.c @@ -237,6 +237,8 @@ acpi_parse_processor_affinity(struct acpi_subtable_header *header, return 0; } +static int __initdata parsed_numa_memblks; + static int __init acpi_parse_memory_affinity(struct acpi_subtable_header * header, const unsigned long end) @@ -250,8 +252,8 @@ acpi_parse_memory_affinity(struct acpi_subtable_header * header, acpi_table_print_srat_entry(header); /* let architecture-dependent part to do it */ - acpi_numa_memory_affinity_init(memory_affinity); - + if (!acpi_numa_memory_affinity_init(memory_affinity)) + parsed_numa_memblks++; return 0; } @@ -304,8 +306,10 @@ int __init acpi_numa_init(void) acpi_numa_arch_fixup(); - if (cnt <= 0) - return cnt ?: -ENOENT; + if (cnt < 0) + return cnt; + else if (!parsed_numa_memblks) + return -ENOENT; return 0; } diff --git a/trunk/drivers/acpi/pci_root.c b/trunk/drivers/acpi/pci_root.c index ec54014c321c..72a2c98bc429 100644 --- a/trunk/drivers/acpi/pci_root.c +++ b/trunk/drivers/acpi/pci_root.c @@ -573,8 +573,15 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device) OSC_CLOCK_PWR_CAPABILITY_SUPPORT; if (pci_msi_enabled()) flags |= OSC_MSI_SUPPORT; - if (flags != base_flags) - acpi_pci_osc_support(root, flags); + if (flags != base_flags) { + status = acpi_pci_osc_support(root, flags); + if (ACPI_FAILURE(status)) { + dev_info(root->bus->bridge, "ACPI _OSC support " + "notification failed, disabling PCIe ASPM\n"); + pcie_no_aspm(); + flags = base_flags; + } + } if (!pcie_ports_disabled && (flags & ACPI_PCIE_REQ_SUPPORT) == ACPI_PCIE_REQ_SUPPORT) { diff --git a/trunk/drivers/acpi/power.c b/trunk/drivers/acpi/power.c index 215ecd097408..fc1803414629 100644 --- a/trunk/drivers/acpi/power.c +++ b/trunk/drivers/acpi/power.c @@ -67,7 +67,9 @@ static const struct acpi_device_id power_device_ids[] = { }; MODULE_DEVICE_TABLE(acpi, power_device_ids); +#ifdef CONFIG_PM_SLEEP static int acpi_power_resume(struct device *dev); +#endif static SIMPLE_DEV_PM_OPS(acpi_power_pm, NULL, acpi_power_resume); static struct acpi_driver acpi_power_driver = { @@ -775,6 +777,7 @@ static int acpi_power_remove(struct acpi_device *device, int type) return 0; } +#ifdef CONFIG_PM_SLEEP static int acpi_power_resume(struct device *dev) { int result = 0, state; @@ -803,6 +806,7 @@ static int acpi_power_resume(struct device *dev) return result; } +#endif int __init acpi_power_init(void) { diff --git a/trunk/drivers/acpi/processor_driver.c b/trunk/drivers/acpi/processor_driver.c index ff8e04f2fab4..bfc31cb0dd3e 100644 --- a/trunk/drivers/acpi/processor_driver.c +++ b/trunk/drivers/acpi/processor_driver.c @@ -437,7 +437,7 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb, /* Normal CPU soft online event */ } else { acpi_processor_ppc_has_changed(pr, 0); - acpi_processor_cst_has_changed(pr); + acpi_processor_hotplug(pr); acpi_processor_reevaluate_tstate(pr, action); acpi_processor_tstate_has_changed(pr); } diff --git a/trunk/drivers/acpi/sbs.c b/trunk/drivers/acpi/sbs.c index c0b9aa5faf4c..ff0740e0a9c2 100644 --- a/trunk/drivers/acpi/sbs.c +++ b/trunk/drivers/acpi/sbs.c @@ -988,6 +988,7 @@ static void acpi_sbs_rmdirs(void) #endif } +#ifdef CONFIG_PM_SLEEP static int acpi_sbs_resume(struct device *dev) { struct acpi_sbs *sbs; @@ -997,6 +998,7 @@ static int acpi_sbs_resume(struct device *dev) acpi_sbs_callback(sbs); return 0; } +#endif static SIMPLE_DEV_PM_OPS(acpi_sbs_pm, NULL, acpi_sbs_resume); diff --git a/trunk/drivers/acpi/sleep.c b/trunk/drivers/acpi/sleep.c index 7a7a9c929247..fdcdbb652915 100644 --- a/trunk/drivers/acpi/sleep.c +++ b/trunk/drivers/acpi/sleep.c @@ -28,36 +28,7 @@ #include "internal.h" #include "sleep.h" -u8 wake_sleep_flags = ACPI_NO_OPTIONAL_METHODS; -static unsigned int gts, bfs; -static int set_param_wake_flag(const char *val, struct kernel_param *kp) -{ - int ret = param_set_int(val, kp); - - if (ret) - return ret; - - if (kp->arg == (const char *)>s) { - if (gts) - wake_sleep_flags |= ACPI_EXECUTE_GTS; - else - wake_sleep_flags &= ~ACPI_EXECUTE_GTS; - } - if (kp->arg == (const char *)&bfs) { - if (bfs) - wake_sleep_flags |= ACPI_EXECUTE_BFS; - else - wake_sleep_flags &= ~ACPI_EXECUTE_BFS; - } - return ret; -} -module_param_call(gts, set_param_wake_flag, param_get_int, >s, 0644); -module_param_call(bfs, set_param_wake_flag, param_get_int, &bfs, 0644); -MODULE_PARM_DESC(gts, "Enable evaluation of _GTS on suspend."); -MODULE_PARM_DESC(bfs, "Enable evaluation of _BFS on resume".); - static u8 sleep_states[ACPI_S_STATE_COUNT]; -static bool pwr_btn_event_pending; static void acpi_sleep_tts_switch(u32 acpi_state) { @@ -110,6 +81,7 @@ static int acpi_sleep_prepare(u32 acpi_state) #ifdef CONFIG_ACPI_SLEEP static u32 acpi_target_sleep_state = ACPI_STATE_S0; +static bool pwr_btn_event_pending; /* * The ACPI specification wants us to save NVS memory regions during hibernation @@ -305,7 +277,7 @@ static int acpi_suspend_enter(suspend_state_t pm_state) switch (acpi_state) { case ACPI_STATE_S1: barrier(); - status = acpi_enter_sleep_state(acpi_state, wake_sleep_flags); + status = acpi_enter_sleep_state(acpi_state); break; case ACPI_STATE_S3: @@ -319,8 +291,8 @@ static int acpi_suspend_enter(suspend_state_t pm_state) /* This violates the spec but is required for bug compatibility. */ acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1); - /* Reprogram control registers and execute _BFS */ - acpi_leave_sleep_state_prep(acpi_state, wake_sleep_flags); + /* Reprogram control registers */ + acpi_leave_sleep_state_prep(acpi_state); /* ACPI 3.0 specs (P62) says that it's the responsibility * of the OSPM to clear the status bit [ implying that the @@ -603,9 +575,9 @@ static int acpi_hibernation_enter(void) ACPI_FLUSH_CPU_CACHE(); /* This shouldn't return. If it returns, we have a problem */ - status = acpi_enter_sleep_state(ACPI_STATE_S4, wake_sleep_flags); - /* Reprogram control registers and execute _BFS */ - acpi_leave_sleep_state_prep(ACPI_STATE_S4, wake_sleep_flags); + status = acpi_enter_sleep_state(ACPI_STATE_S4); + /* Reprogram control registers */ + acpi_leave_sleep_state_prep(ACPI_STATE_S4); return ACPI_SUCCESS(status) ? 0 : -EFAULT; } @@ -617,8 +589,8 @@ static void acpi_hibernation_leave(void) * enable it here. */ acpi_enable(); - /* Reprogram control registers and execute _BFS */ - acpi_leave_sleep_state_prep(ACPI_STATE_S4, wake_sleep_flags); + /* Reprogram control registers */ + acpi_leave_sleep_state_prep(ACPI_STATE_S4); /* Check the hardware signature */ if (facs && s4_hardware_signature != facs->hardware_signature) { printk(KERN_EMERG "ACPI: Hardware changed while hibernated, " @@ -892,33 +864,7 @@ static void acpi_power_off(void) /* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */ printk(KERN_DEBUG "%s called\n", __func__); local_irq_disable(); - acpi_enter_sleep_state(ACPI_STATE_S5, wake_sleep_flags); -} - -/* - * ACPI 2.0 created the optional _GTS and _BFS, - * but industry adoption has been neither rapid nor broad. - * - * Linux gets into trouble when it executes poorly validated - * paths through the BIOS, so disable _GTS and _BFS by default, - * but do speak up and offer the option to enable them. - */ -static void __init acpi_gts_bfs_check(void) -{ - acpi_handle dummy; - - if (ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT, METHOD_PATHNAME__GTS, &dummy))) - { - printk(KERN_NOTICE PREFIX "BIOS offers _GTS\n"); - printk(KERN_NOTICE PREFIX "If \"acpi.gts=1\" improves suspend, " - "please notify linux-acpi@vger.kernel.org\n"); - } - if (ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT, METHOD_PATHNAME__BFS, &dummy))) - { - printk(KERN_NOTICE PREFIX "BIOS offers _BFS\n"); - printk(KERN_NOTICE PREFIX "If \"acpi.bfs=1\" improves resume, " - "please notify linux-acpi@vger.kernel.org\n"); - } + acpi_enter_sleep_state(ACPI_STATE_S5); } int __init acpi_sleep_init(void) @@ -979,6 +925,5 @@ int __init acpi_sleep_init(void) * object can also be evaluated when the system enters S5. */ register_reboot_notifier(&tts_notifier); - acpi_gts_bfs_check(); return 0; } diff --git a/trunk/drivers/acpi/sysfs.c b/trunk/drivers/acpi/sysfs.c index 240a24400976..7c3f98ba4afe 100644 --- a/trunk/drivers/acpi/sysfs.c +++ b/trunk/drivers/acpi/sysfs.c @@ -173,7 +173,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp) { int result = 0; - if (!strncmp(val, "enable", strlen("enable"))) { + if (!strncmp(val, "enable", sizeof("enable") - 1)) { result = acpi_debug_trace(trace_method_name, trace_debug_level, trace_debug_layer, 0); if (result) @@ -181,7 +181,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp) goto exit; } - if (!strncmp(val, "disable", strlen("disable"))) { + if (!strncmp(val, "disable", sizeof("disable") - 1)) { int name = 0; result = acpi_debug_trace((char *)&name, trace_debug_level, trace_debug_layer, 0); diff --git a/trunk/drivers/acpi/thermal.c b/trunk/drivers/acpi/thermal.c index 9fe90e9fecb5..edda74a43406 100644 --- a/trunk/drivers/acpi/thermal.c +++ b/trunk/drivers/acpi/thermal.c @@ -106,7 +106,9 @@ static const struct acpi_device_id thermal_device_ids[] = { }; MODULE_DEVICE_TABLE(acpi, thermal_device_ids); +#ifdef CONFIG_PM_SLEEP static int acpi_thermal_resume(struct device *dev); +#endif static SIMPLE_DEV_PM_OPS(acpi_thermal_pm, NULL, acpi_thermal_resume); static struct acpi_driver acpi_thermal_driver = { @@ -1041,6 +1043,7 @@ static int acpi_thermal_remove(struct acpi_device *device, int type) return 0; } +#ifdef CONFIG_PM_SLEEP static int acpi_thermal_resume(struct device *dev) { struct acpi_thermal *tz; @@ -1075,6 +1078,7 @@ static int acpi_thermal_resume(struct device *dev) return AE_OK; } +#endif static int thermal_act(const struct dmi_system_id *d) { diff --git a/trunk/drivers/ata/Kconfig b/trunk/drivers/ata/Kconfig index 2be8ef1d3093..27cecd313e75 100644 --- a/trunk/drivers/ata/Kconfig +++ b/trunk/drivers/ata/Kconfig @@ -115,7 +115,7 @@ config SATA_SIL24 If unsure, say N. config ATA_SFF - bool "ATA SFF support" + bool "ATA SFF support (for legacy IDE and PATA)" default y help This option adds support for ATA controllers with SFF diff --git a/trunk/drivers/ata/ahci.c b/trunk/drivers/ata/ahci.c index 062e6a1a248f..50d5dea0ff59 100644 --- a/trunk/drivers/ata/ahci.c +++ b/trunk/drivers/ata/ahci.c @@ -256,6 +256,14 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0x8c07), board_ahci }, /* Lynx Point RAID */ { PCI_VDEVICE(INTEL, 0x8c0e), board_ahci }, /* Lynx Point RAID */ { PCI_VDEVICE(INTEL, 0x8c0f), board_ahci }, /* Lynx Point RAID */ + { PCI_VDEVICE(INTEL, 0x9c02), board_ahci }, /* Lynx Point-LP AHCI */ + { PCI_VDEVICE(INTEL, 0x9c03), board_ahci }, /* Lynx Point-LP AHCI */ + { PCI_VDEVICE(INTEL, 0x9c04), board_ahci }, /* Lynx Point-LP RAID */ + { PCI_VDEVICE(INTEL, 0x9c05), board_ahci }, /* Lynx Point-LP RAID */ + { PCI_VDEVICE(INTEL, 0x9c06), board_ahci }, /* Lynx Point-LP RAID */ + { PCI_VDEVICE(INTEL, 0x9c07), board_ahci }, /* Lynx Point-LP RAID */ + { PCI_VDEVICE(INTEL, 0x9c0e), board_ahci }, /* Lynx Point-LP RAID */ + { PCI_VDEVICE(INTEL, 0x9c0f), board_ahci }, /* Lynx Point-LP RAID */ /* JMicron 360/1/3/5/6, match class to avoid IDE function */ { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, diff --git a/trunk/drivers/ata/ahci.h b/trunk/drivers/ata/ahci.h index c2594ddf25b0..57eb1c212a4c 100644 --- a/trunk/drivers/ata/ahci.h +++ b/trunk/drivers/ata/ahci.h @@ -320,6 +320,7 @@ extern struct device_attribute *ahci_sdev_attrs[]; extern struct ata_port_operations ahci_ops; extern struct ata_port_operations ahci_pmp_retry_srst_ops; +unsigned int ahci_dev_classify(struct ata_port *ap); void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, u32 opts); void ahci_save_initial_config(struct device *dev, diff --git a/trunk/drivers/ata/ata_piix.c b/trunk/drivers/ata/ata_piix.c index 3c809bfbccf5..ef773e12af79 100644 --- a/trunk/drivers/ata/ata_piix.c +++ b/trunk/drivers/ata/ata_piix.c @@ -329,6 +329,14 @@ static const struct pci_device_id piix_pci_tbl[] = { { 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, /* SATA Controller IDE (Lynx Point) */ { 0x8086, 0x8c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, + /* SATA Controller IDE (Lynx Point-LP) */ + { 0x8086, 0x9c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, + /* SATA Controller IDE (Lynx Point-LP) */ + { 0x8086, 0x9c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, + /* SATA Controller IDE (Lynx Point-LP) */ + { 0x8086, 0x9c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, + /* SATA Controller IDE (Lynx Point-LP) */ + { 0x8086, 0x9c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, /* SATA Controller IDE (DH89xxCC) */ { 0x8086, 0x2326, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, { } /* terminate list */ diff --git a/trunk/drivers/ata/libahci.c b/trunk/drivers/ata/libahci.c index f9eaa82311a9..555c07afa05b 100644 --- a/trunk/drivers/ata/libahci.c +++ b/trunk/drivers/ata/libahci.c @@ -1139,7 +1139,7 @@ static void ahci_dev_config(struct ata_device *dev) } } -static unsigned int ahci_dev_classify(struct ata_port *ap) +unsigned int ahci_dev_classify(struct ata_port *ap) { void __iomem *port_mmio = ahci_port_base(ap); struct ata_taskfile tf; @@ -1153,6 +1153,7 @@ static unsigned int ahci_dev_classify(struct ata_port *ap) return ata_dev_classify(&tf); } +EXPORT_SYMBOL_GPL(ahci_dev_classify); void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, u32 opts) diff --git a/trunk/drivers/ata/libata-acpi.c b/trunk/drivers/ata/libata-acpi.c index 902b5a457170..fd9ecf74e631 100644 --- a/trunk/drivers/ata/libata-acpi.c +++ b/trunk/drivers/ata/libata-acpi.c @@ -60,17 +60,7 @@ acpi_handle ata_ap_acpi_handle(struct ata_port *ap) if (ap->flags & ATA_FLAG_ACPI_SATA) return NULL; - /* - * If acpi bind operation has already happened, we can get the handle - * for the port by checking the corresponding scsi_host device's - * firmware node, otherwise we will need to find out the handle from - * its parent's acpi node. - */ - if (ap->scsi_host) - return DEVICE_ACPI_HANDLE(&ap->scsi_host->shost_gendev); - else - return acpi_get_child(DEVICE_ACPI_HANDLE(ap->host->dev), - ap->port_no); + return acpi_get_child(DEVICE_ACPI_HANDLE(ap->host->dev), ap->port_no); } EXPORT_SYMBOL(ata_ap_acpi_handle); @@ -1101,6 +1091,9 @@ static int ata_acpi_bind_host(struct ata_port *ap, acpi_handle *handle) if (!*handle) return -ENODEV; + if (ata_acpi_gtm(ap, &ap->__acpi_init_gtm) == 0) + ap->pflags |= ATA_PFLAG_INIT_GTM_VALID; + return 0; } diff --git a/trunk/drivers/ata/libata-core.c b/trunk/drivers/ata/libata-core.c index fadd5866d40f..8e1039c8e159 100644 --- a/trunk/drivers/ata/libata-core.c +++ b/trunk/drivers/ata/libata-core.c @@ -4062,7 +4062,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA }, { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA }, { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA }, - { "2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA }, + { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA }, /* Odd clown on sil3726/4726 PMPs */ { "Config Disk", NULL, ATA_HORKAGE_DISABLE }, @@ -4128,6 +4128,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { /* Devices that do not need bridging limits applied */ { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, }, + { "BUFFALO HD-QSU2/R5", NULL, ATA_HORKAGE_BRIDGE_OK, }, /* Devices which aren't very happy with higher link speeds */ { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, }, diff --git a/trunk/drivers/ata/pata_atiixp.c b/trunk/drivers/ata/pata_atiixp.c index 361c75cea57b..24e51056ac26 100644 --- a/trunk/drivers/ata/pata_atiixp.c +++ b/trunk/drivers/ata/pata_atiixp.c @@ -20,6 +20,7 @@ #include #include #include +#include #define DRV_NAME "pata_atiixp" #define DRV_VERSION "0.4.6" @@ -33,11 +34,26 @@ enum { ATIIXP_IDE_UDMA_MODE = 0x56 }; +static const struct dmi_system_id attixp_cable_override_dmi_table[] = { + { + /* Board has onboard PATA<->SATA converters */ + .ident = "MSI E350DM-E33", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "MSI"), + DMI_MATCH(DMI_BOARD_NAME, "E350DM-E33(MS-7720)"), + }, + }, + { } +}; + static int atiixp_cable_detect(struct ata_port *ap) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); u8 udma; + if (dmi_check_system(attixp_cable_override_dmi_table)) + return ATA_CBL_PATA40_SHORT; + /* Hack from drivers/ide/pci. Really we want to know how to do the raw detection not play follow the bios mode guess */ pci_read_config_byte(pdev, ATIIXP_IDE_UDMA_MODE + ap->port_no, &udma); diff --git a/trunk/drivers/ata/sata_mv.c b/trunk/drivers/ata/sata_mv.c index 24712adf69df..311be18d3f03 100644 --- a/trunk/drivers/ata/sata_mv.c +++ b/trunk/drivers/ata/sata_mv.c @@ -65,6 +65,8 @@ #include #include #include +#include +#include #include #include #include @@ -4026,7 +4028,7 @@ static int mv_platform_probe(struct platform_device *pdev) struct ata_host *host; struct mv_host_priv *hpriv; struct resource *res; - int n_ports = 0; + int n_ports = 0, irq = 0; int rc; #if defined(CONFIG_HAVE_CLK) int port; @@ -4050,8 +4052,14 @@ static int mv_platform_probe(struct platform_device *pdev) return -EINVAL; /* allocate host */ - mv_platform_data = pdev->dev.platform_data; - n_ports = mv_platform_data->n_ports; + if (pdev->dev.of_node) { + of_property_read_u32(pdev->dev.of_node, "nr-ports", &n_ports); + irq = irq_of_parse_and_map(pdev->dev.of_node, 0); + } else { + mv_platform_data = pdev->dev.platform_data; + n_ports = mv_platform_data->n_ports; + irq = platform_get_irq(pdev, 0); + } host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); @@ -4109,8 +4117,7 @@ static int mv_platform_probe(struct platform_device *pdev) dev_info(&pdev->dev, "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH, host->n_ports); - rc = ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt, - IRQF_SHARED, &mv6_sht); + rc = ata_host_activate(host, irq, mv_interrupt, IRQF_SHARED, &mv6_sht); if (!rc) return 0; @@ -4205,15 +4212,24 @@ static int mv_platform_resume(struct platform_device *pdev) #define mv_platform_resume NULL #endif +#ifdef CONFIG_OF +static struct of_device_id mv_sata_dt_ids[] __devinitdata = { + { .compatible = "marvell,orion-sata", }, + {}, +}; +MODULE_DEVICE_TABLE(of, mv_sata_dt_ids); +#endif + static struct platform_driver mv_platform_driver = { - .probe = mv_platform_probe, - .remove = __devexit_p(mv_platform_remove), - .suspend = mv_platform_suspend, - .resume = mv_platform_resume, - .driver = { - .name = DRV_NAME, - .owner = THIS_MODULE, - }, + .probe = mv_platform_probe, + .remove = __devexit_p(mv_platform_remove), + .suspend = mv_platform_suspend, + .resume = mv_platform_resume, + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(mv_sata_dt_ids), + }, }; diff --git a/trunk/drivers/atm/iphase.c b/trunk/drivers/atm/iphase.c index d4386019af5d..96cce6d53195 100644 --- a/trunk/drivers/atm/iphase.c +++ b/trunk/drivers/atm/iphase.c @@ -2362,7 +2362,7 @@ static int __devinit ia_init(struct atm_dev *dev) { printk(DEV_LABEL " (itf %d): can't set up page mapping\n", dev->number); - return error; + return -ENOMEM; } IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=%p,irq=%d\n", dev->number, iadev->pci->revision, base, iadev->irq);) diff --git a/trunk/drivers/base/core.c b/trunk/drivers/base/core.c index f338037a4f3d..5e6e00bc1652 100644 --- a/trunk/drivers/base/core.c +++ b/trunk/drivers/base/core.c @@ -1865,6 +1865,7 @@ int __dev_printk(const char *level, const struct device *dev, struct va_format *vaf) { char dict[128]; + const char *level_extra = ""; size_t dictlen = 0; const char *subsys; @@ -1911,10 +1912,14 @@ int __dev_printk(const char *level, const struct device *dev, "DEVICE=+%s:%s", subsys, dev_name(dev)); } skip: + if (level[2]) + level_extra = &level[2]; /* skip past KERN_SOH "L" */ + return printk_emit(0, level[1] - '0', dictlen ? dict : NULL, dictlen, - "%s %s: %pV", - dev_driver_string(dev), dev_name(dev), vaf); + "%s %s: %s%pV", + dev_driver_string(dev), dev_name(dev), + level_extra, vaf); } EXPORT_SYMBOL(__dev_printk); diff --git a/trunk/drivers/base/devtmpfs.c b/trunk/drivers/base/devtmpfs.c index d91a3a0b2325..deb4a456cf83 100644 --- a/trunk/drivers/base/devtmpfs.c +++ b/trunk/drivers/base/devtmpfs.c @@ -156,9 +156,7 @@ static int dev_mkdir(const char *name, umode_t mode) if (!err) /* mark as kernel-created inode */ dentry->d_inode->i_private = &thread; - dput(dentry); - mutex_unlock(&path.dentry->d_inode->i_mutex); - path_put(&path); + done_path_create(&path, dentry); return err; } @@ -218,10 +216,7 @@ static int handle_create(const char *nodename, umode_t mode, struct device *dev) /* mark as kernel-created inode */ dentry->d_inode->i_private = &thread; } - dput(dentry); - - mutex_unlock(&path.dentry->d_inode->i_mutex); - path_put(&path); + done_path_create(&path, dentry); return err; } diff --git a/trunk/drivers/base/dma-contiguous.c b/trunk/drivers/base/dma-contiguous.c index 78efb0306a44..34d94c762a1e 100644 --- a/trunk/drivers/base/dma-contiguous.c +++ b/trunk/drivers/base/dma-contiguous.c @@ -250,7 +250,7 @@ int __init dma_declare_contiguous(struct device *dev, unsigned long size, return -EINVAL; /* Sanitise input arguments */ - alignment = PAGE_SIZE << max(MAX_ORDER, pageblock_order); + alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order); base = ALIGN(base, alignment); size = ALIGN(size, alignment); limit &= ~(alignment - 1); diff --git a/trunk/drivers/base/power/clock_ops.c b/trunk/drivers/base/power/clock_ops.c index 869d7ff2227f..eb78e9640c4a 100644 --- a/trunk/drivers/base/power/clock_ops.c +++ b/trunk/drivers/base/power/clock_ops.c @@ -169,8 +169,7 @@ void pm_clk_init(struct device *dev) */ int pm_clk_create(struct device *dev) { - int ret = dev_pm_get_subsys_data(dev); - return ret < 0 ? ret : 0; + return dev_pm_get_subsys_data(dev); } /** diff --git a/trunk/drivers/base/power/common.c b/trunk/drivers/base/power/common.c index a14085cc613f..39c32529b833 100644 --- a/trunk/drivers/base/power/common.c +++ b/trunk/drivers/base/power/common.c @@ -24,7 +24,6 @@ int dev_pm_get_subsys_data(struct device *dev) { struct pm_subsys_data *psd; - int ret = 0; psd = kzalloc(sizeof(*psd), GFP_KERNEL); if (!psd) @@ -40,7 +39,6 @@ int dev_pm_get_subsys_data(struct device *dev) dev->power.subsys_data = psd; pm_clk_init(dev); psd = NULL; - ret = 1; } spin_unlock_irq(&dev->power.lock); @@ -48,7 +46,7 @@ int dev_pm_get_subsys_data(struct device *dev) /* kfree() verifies that its argument is nonzero. */ kfree(psd); - return ret; + return 0; } EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data); diff --git a/trunk/drivers/base/power/runtime.c b/trunk/drivers/base/power/runtime.c index 59894873a3b3..7d9c1cb1c39a 100644 --- a/trunk/drivers/base/power/runtime.c +++ b/trunk/drivers/base/power/runtime.c @@ -147,6 +147,8 @@ static int rpm_check_suspend_allowed(struct device *dev) || (dev->power.request_pending && dev->power.request == RPM_REQ_RESUME)) retval = -EAGAIN; + else if (__dev_pm_qos_read_value(dev) < 0) + retval = -EPERM; else if (dev->power.runtime_status == RPM_SUSPENDED) retval = 1; @@ -388,7 +390,6 @@ static int rpm_suspend(struct device *dev, int rpmflags) goto repeat; } - dev->power.deferred_resume = false; if (dev->power.no_callbacks) goto no_callback; /* Assume success. */ @@ -403,12 +404,6 @@ static int rpm_suspend(struct device *dev, int rpmflags) goto out; } - if (__dev_pm_qos_read_value(dev) < 0) { - /* Negative PM QoS constraint means "never suspend". */ - retval = -EPERM; - goto out; - } - __update_runtime_status(dev, RPM_SUSPENDING); if (dev->pm_domain) @@ -440,6 +435,7 @@ static int rpm_suspend(struct device *dev, int rpmflags) wake_up_all(&dev->power.wait_queue); if (dev->power.deferred_resume) { + dev->power.deferred_resume = false; rpm_resume(dev, 0); retval = -EAGAIN; goto out; @@ -584,6 +580,7 @@ static int rpm_resume(struct device *dev, int rpmflags) || dev->parent->power.runtime_status == RPM_ACTIVE) { atomic_inc(&dev->parent->power.child_count); spin_unlock(&dev->parent->power.lock); + retval = 1; goto no_callback; /* Assume success. */ } spin_unlock(&dev->parent->power.lock); @@ -664,7 +661,7 @@ static int rpm_resume(struct device *dev, int rpmflags) } wake_up_all(&dev->power.wait_queue); - if (!retval) + if (retval >= 0) rpm_idle(dev, RPM_ASYNC); out: diff --git a/trunk/drivers/bcma/host_pci.c b/trunk/drivers/bcma/host_pci.c index 11b32d2642df..a6e5672c67e7 100644 --- a/trunk/drivers/bcma/host_pci.c +++ b/trunk/drivers/bcma/host_pci.c @@ -272,6 +272,7 @@ static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = { { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) }, + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4359) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) }, { 0, }, }; diff --git a/trunk/drivers/bcma/sprom.c b/trunk/drivers/bcma/sprom.c index 26823d97fd9f..9ea4627dc0c2 100644 --- a/trunk/drivers/bcma/sprom.c +++ b/trunk/drivers/bcma/sprom.c @@ -507,7 +507,9 @@ static bool bcma_sprom_onchip_available(struct bcma_bus *bus) /* for these chips OTP is always available */ present = true; break; - + case BCMA_CHIP_ID_BCM43228: + present = chip_status & BCMA_CC_CHIPST_43228_OTP_PRESENT; + break; default: present = false; break; diff --git a/trunk/drivers/block/cciss_scsi.c b/trunk/drivers/block/cciss_scsi.c index acda773b3720..38aa6dda6b81 100644 --- a/trunk/drivers/block/cciss_scsi.c +++ b/trunk/drivers/block/cciss_scsi.c @@ -763,16 +763,7 @@ static void complete_scsi_command(CommandList_struct *c, int timeout, { case CMD_TARGET_STATUS: /* Pass it up to the upper layers... */ - if( ei->ScsiStatus) - { -#if 0 - printk(KERN_WARNING "cciss: cmd %p " - "has SCSI Status = %x\n", - c, ei->ScsiStatus); -#endif - cmd->result |= (ei->ScsiStatus << 1); - } - else { /* scsi status is zero??? How??? */ + if (!ei->ScsiStatus) { /* Ordinarily, this case should never happen, but there is a bug in some released firmware revisions that allows it to happen diff --git a/trunk/drivers/block/drbd/drbd_actlog.c b/trunk/drivers/block/drbd/drbd_actlog.c index e54e31b02b88..3fbef018ce55 100644 --- a/trunk/drivers/block/drbd/drbd_actlog.c +++ b/trunk/drivers/block/drbd/drbd_actlog.c @@ -411,7 +411,7 @@ w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused) + mdev->ldev->md.al_offset + mdev->al_tr_pos; if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) - drbd_chk_io_error(mdev, 1, true); + drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR); if (++mdev->al_tr_pos > div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT)) @@ -876,7 +876,11 @@ int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size, unsigned int enr, count = 0; struct lc_element *e; - if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) { + /* this should be an empty REQ_FLUSH */ + if (size == 0) + return 0; + + if (size < 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) { dev_err(DEV, "sector: %llus, size: %d\n", (unsigned long long)sector, size); return 0; diff --git a/trunk/drivers/block/drbd/drbd_bitmap.c b/trunk/drivers/block/drbd/drbd_bitmap.c index fcb956bb4b4c..d84566496746 100644 --- a/trunk/drivers/block/drbd/drbd_bitmap.c +++ b/trunk/drivers/block/drbd/drbd_bitmap.c @@ -889,6 +889,7 @@ struct bm_aio_ctx { unsigned int done; unsigned flags; #define BM_AIO_COPY_PAGES 1 +#define BM_WRITE_ALL_PAGES 2 int error; struct kref kref; }; @@ -1059,7 +1060,8 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_w if (lazy_writeout_upper_idx && i == lazy_writeout_upper_idx) break; if (rw & WRITE) { - if (bm_test_page_unchanged(b->bm_pages[i])) { + if (!(flags & BM_WRITE_ALL_PAGES) && + bm_test_page_unchanged(b->bm_pages[i])) { dynamic_dev_dbg(DEV, "skipped bm write for idx %u\n", i); continue; } @@ -1096,7 +1098,7 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_w if (ctx->error) { dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n"); - drbd_chk_io_error(mdev, 1, true); + drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR); err = -EIO; /* ctx->error ? */ } @@ -1140,6 +1142,17 @@ int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local) return bm_rw(mdev, WRITE, 0, 0); } +/** + * drbd_bm_write_all() - Write the whole bitmap to its on disk location. + * @mdev: DRBD device. + * + * Will write all pages. + */ +int drbd_bm_write_all(struct drbd_conf *mdev) __must_hold(local) +{ + return bm_rw(mdev, WRITE, BM_WRITE_ALL_PAGES, 0); +} + /** * drbd_bm_lazy_write_out() - Write bitmap pages 0 to @upper_idx-1, if they have changed. * @mdev: DRBD device. @@ -1212,7 +1225,7 @@ int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(loc wait_until_done_or_disk_failure(mdev, mdev->ldev, &ctx->done); if (ctx->error) - drbd_chk_io_error(mdev, 1, true); + drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR); /* that should force detach, so the in memory bitmap will be * gone in a moment as well. */ diff --git a/trunk/drivers/block/drbd/drbd_int.h b/trunk/drivers/block/drbd/drbd_int.h index 02f013a073a7..b953cc7c9c00 100644 --- a/trunk/drivers/block/drbd/drbd_int.h +++ b/trunk/drivers/block/drbd/drbd_int.h @@ -813,7 +813,6 @@ enum { SIGNAL_ASENDER, /* whether asender wants to be interrupted */ SEND_PING, /* whether asender should send a ping asap */ - UNPLUG_QUEUED, /* only relevant with kernel 2.4 */ UNPLUG_REMOTE, /* sending a "UnplugRemote" could help */ MD_DIRTY, /* current uuids and flags not yet on disk */ DISCARD_CONCURRENT, /* Set on one node, cleared on the peer! */ @@ -824,7 +823,6 @@ enum { CRASHED_PRIMARY, /* This node was a crashed primary. * Gets cleared when the state.conn * goes into C_CONNECTED state. */ - NO_BARRIER_SUPP, /* underlying block device doesn't implement barriers */ CONSIDER_RESYNC, MD_NO_FUA, /* Users wants us to not use FUA/FLUSH on meta data dev */ @@ -834,6 +832,7 @@ enum { BITMAP_IO_QUEUED, /* Started bitmap IO */ GO_DISKLESS, /* Disk is being detached, on io-error or admin request. */ WAS_IO_ERROR, /* Local disk failed returned IO error */ + FORCE_DETACH, /* Force-detach from local disk, aborting any pending local IO */ RESYNC_AFTER_NEG, /* Resync after online grow after the attach&negotiate finished. */ NET_CONGESTED, /* The data socket is congested */ @@ -851,6 +850,13 @@ enum { AL_SUSPENDED, /* Activity logging is currently suspended. */ AHEAD_TO_SYNC_SOURCE, /* Ahead -> SyncSource queued */ STATE_SENT, /* Do not change state/UUIDs while this is set */ + + CALLBACK_PENDING, /* Whether we have a call_usermodehelper(, UMH_WAIT_PROC) + * pending, from drbd worker context. + * If set, bdi_write_congested() returns true, + * so shrink_page_list() would not recurse into, + * and potentially deadlock on, this drbd worker. + */ }; struct drbd_bitmap; /* opaque for drbd_conf */ @@ -1130,8 +1136,8 @@ struct drbd_conf { int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */ int rs_planed; /* resync sectors already planned */ atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */ - int peer_max_bio_size; - int local_max_bio_size; + unsigned int peer_max_bio_size; + unsigned int local_max_bio_size; }; static inline struct drbd_conf *minor_to_mdev(unsigned int minor) @@ -1435,9 +1441,9 @@ struct bm_extent { * hash table. */ #define HT_SHIFT 8 #define DRBD_MAX_BIO_SIZE (1U<<(9+HT_SHIFT)) -#define DRBD_MAX_BIO_SIZE_SAFE (1 << 12) /* Works always = 4k */ +#define DRBD_MAX_BIO_SIZE_SAFE (1U << 12) /* Works always = 4k */ -#define DRBD_MAX_SIZE_H80_PACKET (1 << 15) /* The old header only allows packets up to 32Kib data */ +#define DRBD_MAX_SIZE_H80_PACKET (1U << 15) /* The old header only allows packets up to 32Kib data */ /* Number of elements in the app_reads_hash */ #define APP_R_HSIZE 15 @@ -1463,6 +1469,7 @@ extern int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr); extern int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local); extern int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local); extern int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local); +extern int drbd_bm_write_all(struct drbd_conf *mdev) __must_hold(local); extern int drbd_bm_write_copy_pages(struct drbd_conf *mdev) __must_hold(local); extern unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, unsigned long al_enr); @@ -1840,12 +1847,20 @@ static inline int drbd_request_state(struct drbd_conf *mdev, return _drbd_request_state(mdev, mask, val, CS_VERBOSE + CS_ORDERED); } +enum drbd_force_detach_flags { + DRBD_IO_ERROR, + DRBD_META_IO_ERROR, + DRBD_FORCE_DETACH, +}; + #define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__) -static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach, const char *where) +static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, + enum drbd_force_detach_flags forcedetach, + const char *where) { switch (mdev->ldev->dc.on_io_error) { case EP_PASS_ON: - if (!forcedetach) { + if (forcedetach == DRBD_IO_ERROR) { if (__ratelimit(&drbd_ratelimit_state)) dev_err(DEV, "Local IO failed in %s.\n", where); if (mdev->state.disk > D_INCONSISTENT) @@ -1856,6 +1871,8 @@ static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach, case EP_DETACH: case EP_CALL_HELPER: set_bit(WAS_IO_ERROR, &mdev->flags); + if (forcedetach == DRBD_FORCE_DETACH) + set_bit(FORCE_DETACH, &mdev->flags); if (mdev->state.disk > D_FAILED) { _drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL); dev_err(DEV, @@ -1875,7 +1892,7 @@ static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach, */ #define drbd_chk_io_error(m,e,f) drbd_chk_io_error_(m,e,f, __func__) static inline void drbd_chk_io_error_(struct drbd_conf *mdev, - int error, int forcedetach, const char *where) + int error, enum drbd_force_detach_flags forcedetach, const char *where) { if (error) { unsigned long flags; @@ -2405,15 +2422,17 @@ static inline void dec_ap_bio(struct drbd_conf *mdev) int ap_bio = atomic_dec_return(&mdev->ap_bio_cnt); D_ASSERT(ap_bio >= 0); + + if (ap_bio == 0 && test_bit(BITMAP_IO, &mdev->flags)) { + if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags)) + drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w); + } + /* this currently does wake_up for every dec_ap_bio! * maybe rather introduce some type of hysteresis? * e.g. (ap_bio == mxb/2 || ap_bio == 0) ? */ if (ap_bio < mxb) wake_up(&mdev->misc_wait); - if (ap_bio == 0 && test_bit(BITMAP_IO, &mdev->flags)) { - if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags)) - drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w); - } } static inline int drbd_set_ed_uuid(struct drbd_conf *mdev, u64 val) diff --git a/trunk/drivers/block/drbd/drbd_main.c b/trunk/drivers/block/drbd/drbd_main.c index 920ede2829d6..f93a0320e952 100644 --- a/trunk/drivers/block/drbd/drbd_main.c +++ b/trunk/drivers/block/drbd/drbd_main.c @@ -79,6 +79,7 @@ static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused); static void md_sync_timer_fn(unsigned long data); static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused); static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused); +static void _tl_clear(struct drbd_conf *mdev); MODULE_AUTHOR("Philipp Reisner , " "Lars Ellenberg "); @@ -432,19 +433,10 @@ static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what) /* Actions operating on the disk state, also want to work on requests that got barrier acked. */ - switch (what) { - case fail_frozen_disk_io: - case restart_frozen_disk_io: - list_for_each_safe(le, tle, &mdev->barrier_acked_requests) { - req = list_entry(le, struct drbd_request, tl_requests); - _req_mod(req, what); - } - case connection_lost_while_pending: - case resend: - break; - default: - dev_err(DEV, "what = %d in _tl_restart()\n", what); + list_for_each_safe(le, tle, &mdev->barrier_acked_requests) { + req = list_entry(le, struct drbd_request, tl_requests); + _req_mod(req, what); } } @@ -458,12 +450,17 @@ static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what) * receiver thread and the worker thread. */ void tl_clear(struct drbd_conf *mdev) +{ + spin_lock_irq(&mdev->req_lock); + _tl_clear(mdev); + spin_unlock_irq(&mdev->req_lock); +} + +static void _tl_clear(struct drbd_conf *mdev) { struct list_head *le, *tle; struct drbd_request *r; - spin_lock_irq(&mdev->req_lock); - _tl_restart(mdev, connection_lost_while_pending); /* we expect this list to be empty. */ @@ -482,7 +479,6 @@ void tl_clear(struct drbd_conf *mdev) memset(mdev->app_reads_hash, 0, APP_R_HSIZE*sizeof(void *)); - spin_unlock_irq(&mdev->req_lock); } void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what) @@ -1476,12 +1472,12 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, if (ns.susp_fen) { /* case1: The outdate peer handler is successful: */ if (os.pdsk > D_OUTDATED && ns.pdsk <= D_OUTDATED) { - tl_clear(mdev); if (test_bit(NEW_CUR_UUID, &mdev->flags)) { drbd_uuid_new_current(mdev); clear_bit(NEW_CUR_UUID, &mdev->flags); } spin_lock_irq(&mdev->req_lock); + _tl_clear(mdev); _drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL); spin_unlock_irq(&mdev->req_lock); } @@ -1514,6 +1510,13 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, /* Do not change the order of the if above and the two below... */ if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) { /* attach on the peer */ + /* we probably will start a resync soon. + * make sure those things are properly reset. */ + mdev->rs_total = 0; + mdev->rs_failed = 0; + atomic_set(&mdev->rs_pending_cnt, 0); + drbd_rs_cancel_all(mdev); + drbd_send_uuids(mdev); drbd_send_state(mdev, ns); } @@ -1630,9 +1633,24 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, eh = mdev->ldev->dc.on_io_error; was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags); - /* Immediately allow completion of all application IO, that waits - for completion from the local disk. */ - tl_abort_disk_io(mdev); + if (was_io_error && eh == EP_CALL_HELPER) + drbd_khelper(mdev, "local-io-error"); + + /* Immediately allow completion of all application IO, + * that waits for completion from the local disk, + * if this was a force-detach due to disk_timeout + * or administrator request (drbdsetup detach --force). + * Do NOT abort otherwise. + * Aborting local requests may cause serious problems, + * if requests are completed to upper layers already, + * and then later the already submitted local bio completes. + * This can cause DMA into former bio pages that meanwhile + * have been re-used for other things. + * So aborting local requests may cause crashes, + * or even worse, silent data corruption. + */ + if (test_and_clear_bit(FORCE_DETACH, &mdev->flags)) + tl_abort_disk_io(mdev); /* current state still has to be D_FAILED, * there is only one way out: to D_DISKLESS, @@ -1653,9 +1671,6 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, drbd_md_sync(mdev); } put_ldev(mdev); - - if (was_io_error && eh == EP_CALL_HELPER) - drbd_khelper(mdev, "local-io-error"); } /* second half of local IO error, failure to attach, @@ -1669,10 +1684,6 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, "ASSERT FAILED: disk is %s while going diskless\n", drbd_disk_str(mdev->state.disk)); - mdev->rs_total = 0; - mdev->rs_failed = 0; - atomic_set(&mdev->rs_pending_cnt, 0); - if (ns.conn >= C_CONNECTED) drbd_send_state(mdev, ns); @@ -2194,7 +2205,8 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl { struct p_sizes p; sector_t d_size, u_size; - int q_order_type, max_bio_size; + int q_order_type; + unsigned int max_bio_size; int ok; if (get_ldev_if_state(mdev, D_NEGOTIATING)) { @@ -2203,7 +2215,7 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl u_size = mdev->ldev->dc.disk_size; q_order_type = drbd_queue_order_type(mdev); max_bio_size = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9; - max_bio_size = min_t(int, max_bio_size, DRBD_MAX_BIO_SIZE); + max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE); put_ldev(mdev); } else { d_size = 0; @@ -2214,7 +2226,7 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl /* Never allow old drbd (up to 8.3.7) to see more than 32KiB */ if (mdev->agreed_pro_version <= 94) - max_bio_size = min_t(int, max_bio_size, DRBD_MAX_SIZE_H80_PACKET); + max_bio_size = min(max_bio_size, DRBD_MAX_SIZE_H80_PACKET); p.d_size = cpu_to_be64(d_size); p.u_size = cpu_to_be64(u_size); @@ -3521,9 +3533,9 @@ static void drbd_cleanup(void) } /** - * drbd_congested() - Callback for pdflush + * drbd_congested() - Callback for the flusher thread * @congested_data: User data - * @bdi_bits: Bits pdflush is currently interested in + * @bdi_bits: Bits the BDI flusher thread is currently interested in * * Returns 1<flags)) { + r |= (1 << BDI_async_congested); + /* Without good local data, we would need to read from remote, + * and that would need the worker thread as well, which is + * currently blocked waiting for that usermode helper to + * finish. + */ + if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) + r |= (1 << BDI_sync_congested); + else + put_ldev(mdev); + r &= bdi_bits; + reason = 'c'; + goto out; + } + if (get_ldev(mdev)) { q = bdev_get_queue(mdev->ldev->backing_bdev); r = bdi_congested(&q->backing_dev_info, bdi_bits); @@ -3604,6 +3632,7 @@ struct drbd_conf *drbd_new_device(unsigned int minor) q->backing_dev_info.congested_data = mdev; blk_queue_make_request(q, drbd_make_request); + blk_queue_flush(q, REQ_FLUSH | REQ_FUA); /* Setting the max_hw_sectors to an odd value of 8kibyte here This triggers a max_bio_size message upon first attach or connect */ blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8); @@ -3870,7 +3899,7 @@ void drbd_md_sync(struct drbd_conf *mdev) if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) { /* this was a try anyways ... */ dev_err(DEV, "meta data update failed!\n"); - drbd_chk_io_error(mdev, 1, true); + drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR); } /* Update mdev->ldev->md.la_size_sect, @@ -3950,9 +3979,9 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev) spin_lock_irq(&mdev->req_lock); if (mdev->state.conn < C_CONNECTED) { - int peer; + unsigned int peer; peer = be32_to_cpu(buffer->la_peer_max_bio_size); - peer = max_t(int, peer, DRBD_MAX_BIO_SIZE_SAFE); + peer = max(peer, DRBD_MAX_BIO_SIZE_SAFE); mdev->peer_max_bio_size = peer; } spin_unlock_irq(&mdev->req_lock); diff --git a/trunk/drivers/block/drbd/drbd_nl.c b/trunk/drivers/block/drbd/drbd_nl.c index 6d4de6a72e80..edb490aad8b4 100644 --- a/trunk/drivers/block/drbd/drbd_nl.c +++ b/trunk/drivers/block/drbd/drbd_nl.c @@ -147,6 +147,9 @@ int drbd_khelper(struct drbd_conf *mdev, char *cmd) char *argv[] = {usermode_helper, cmd, mb, NULL }; int ret; + if (current == mdev->worker.task) + set_bit(CALLBACK_PENDING, &mdev->flags); + snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev)); if (get_net_conf(mdev)) { @@ -189,6 +192,9 @@ int drbd_khelper(struct drbd_conf *mdev, char *cmd) usermode_helper, cmd, mb, (ret >> 8) & 0xff, ret); + if (current == mdev->worker.task) + clear_bit(CALLBACK_PENDING, &mdev->flags); + if (ret < 0) /* Ignore any ERRNOs we got. */ ret = 0; @@ -668,8 +674,8 @@ enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds la_size_changed && md_moved ? "size changed and md moved" : la_size_changed ? "size changed" : "md moved"); /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */ - err = drbd_bitmap_io(mdev, &drbd_bm_write, - "size changed", BM_LOCKED_MASK); + err = drbd_bitmap_io(mdev, md_moved ? &drbd_bm_write_all : &drbd_bm_write, + "size changed", BM_LOCKED_MASK); if (err) { rv = dev_size_error; goto out; @@ -795,8 +801,8 @@ static int drbd_check_al_size(struct drbd_conf *mdev) static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size) { struct request_queue * const q = mdev->rq_queue; - int max_hw_sectors = max_bio_size >> 9; - int max_segments = 0; + unsigned int max_hw_sectors = max_bio_size >> 9; + unsigned int max_segments = 0; if (get_ldev_if_state(mdev, D_ATTACHING)) { struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue; @@ -829,7 +835,7 @@ static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev) { - int now, new, local, peer; + unsigned int now, new, local, peer; now = queue_max_hw_sectors(mdev->rq_queue) << 9; local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */ @@ -840,13 +846,14 @@ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev) mdev->local_max_bio_size = local; put_ldev(mdev); } + local = min(local, DRBD_MAX_BIO_SIZE); /* We may ignore peer limits if the peer is modern enough. Because new from 8.3.8 onwards the peer can use multiple BIOs for a single peer_request */ if (mdev->state.conn >= C_CONNECTED) { if (mdev->agreed_pro_version < 94) { - peer = min_t(int, mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET); + peer = min(mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET); /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */ } else if (mdev->agreed_pro_version == 94) peer = DRBD_MAX_SIZE_H80_PACKET; @@ -854,10 +861,10 @@ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev) peer = DRBD_MAX_BIO_SIZE; } - new = min_t(int, local, peer); + new = min(local, peer); if (mdev->state.role == R_PRIMARY && new < now) - dev_err(DEV, "ASSERT FAILED new < now; (%d < %d)\n", new, now); + dev_err(DEV, "ASSERT FAILED new < now; (%u < %u)\n", new, now); if (new != now) dev_info(DEV, "max BIO size = %u\n", new); @@ -950,6 +957,14 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp * to realize a "hot spare" feature (not that I'd recommend that) */ wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt)); + /* make sure there is no leftover from previous force-detach attempts */ + clear_bit(FORCE_DETACH, &mdev->flags); + + /* and no leftover from previously aborted resync or verify, either */ + mdev->rs_total = 0; + mdev->rs_failed = 0; + atomic_set(&mdev->rs_pending_cnt, 0); + /* allocation not in the IO path, cqueue thread context */ nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL); if (!nbc) { @@ -1345,6 +1360,7 @@ static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, } if (dt.detach_force) { + set_bit(FORCE_DETACH, &mdev->flags); drbd_force_state(mdev, NS(disk, D_FAILED)); reply->ret_code = SS_SUCCESS; goto out; @@ -1962,9 +1978,11 @@ static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl int retcode; /* If there is still bitmap IO pending, probably because of a previous - * resync just being finished, wait for it before requesting a new resync. */ + * resync just being finished, wait for it before requesting a new resync. + * Also wait for it's after_state_ch(). */ drbd_suspend_io(mdev); wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags)); + drbd_flush_workqueue(mdev); retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED); @@ -2003,9 +2021,11 @@ static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_re int retcode; /* If there is still bitmap IO pending, probably because of a previous - * resync just being finished, wait for it before requesting a new resync. */ + * resync just being finished, wait for it before requesting a new resync. + * Also wait for it's after_state_ch(). */ drbd_suspend_io(mdev); wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags)); + drbd_flush_workqueue(mdev); retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED); diff --git a/trunk/drivers/block/drbd/drbd_proc.c b/trunk/drivers/block/drbd/drbd_proc.c index 869bada2ed06..5496104f90b9 100644 --- a/trunk/drivers/block/drbd/drbd_proc.c +++ b/trunk/drivers/block/drbd/drbd_proc.c @@ -245,6 +245,9 @@ static int drbd_seq_show(struct seq_file *seq, void *v) mdev->state.role == R_SECONDARY) { seq_printf(seq, "%2d: cs:Unconfigured\n", i); } else { + /* reset mdev->congestion_reason */ + bdi_rw_congested(&mdev->rq_queue->backing_dev_info); + seq_printf(seq, "%2d: cs:%s ro:%s/%s ds:%s/%s %c %c%c%c%c%c%c\n" " ns:%u nr:%u dw:%u dr:%u al:%u bm:%u " diff --git a/trunk/drivers/block/drbd/drbd_receiver.c b/trunk/drivers/block/drbd/drbd_receiver.c index ea4836e0ae98..c74ca2df7431 100644 --- a/trunk/drivers/block/drbd/drbd_receiver.c +++ b/trunk/drivers/block/drbd/drbd_receiver.c @@ -277,6 +277,9 @@ static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net) atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use; int i; + if (page == NULL) + return; + if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE)*minor_count) i = page_chain_free(page); else { @@ -316,7 +319,7 @@ struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev, gfp_t gfp_mask) __must_hold(local) { struct drbd_epoch_entry *e; - struct page *page; + struct page *page = NULL; unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT; if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE)) @@ -329,9 +332,11 @@ struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev, return NULL; } - page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT)); - if (!page) - goto fail; + if (data_size) { + page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT)); + if (!page) + goto fail; + } INIT_HLIST_NODE(&e->collision); e->epoch = NULL; @@ -1270,7 +1275,6 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __ data_size -= dgs; - ERR_IF(data_size == 0) return NULL; ERR_IF(data_size & 0x1ff) return NULL; ERR_IF(data_size > DRBD_MAX_BIO_SIZE) return NULL; @@ -1291,6 +1295,9 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __ if (!e) return NULL; + if (!data_size) + return e; + ds = data_size; page = e->pages; page_chain_for_each(page) { @@ -1715,6 +1722,10 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned dp_flags = be32_to_cpu(p->dp_flags); rw |= wire_flags_to_bio(mdev, dp_flags); + if (e->pages == NULL) { + D_ASSERT(e->size == 0); + D_ASSERT(dp_flags & DP_FLUSH); + } if (dp_flags & DP_MAY_SET_IN_SYNC) e->flags |= EE_MAY_SET_IN_SYNC; @@ -3801,11 +3812,18 @@ void drbd_free_tl_hash(struct drbd_conf *mdev) mdev->ee_hash = NULL; mdev->ee_hash_s = 0; - /* paranoia code */ - for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++) - if (h->first) - dev_err(DEV, "ASSERT FAILED tl_hash[%u] == %p, expected NULL\n", - (int)(h - mdev->tl_hash), h->first); + /* We may not have had the chance to wait for all locally pending + * application requests. The hlist_add_fake() prevents access after + * free on master bio completion. */ + for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++) { + struct drbd_request *req; + struct hlist_node *pos, *n; + hlist_for_each_entry_safe(req, pos, n, h, collision) { + hlist_del_init(&req->collision); + hlist_add_fake(&req->collision); + } + } + kfree(mdev->tl_hash); mdev->tl_hash = NULL; mdev->tl_hash_s = 0; diff --git a/trunk/drivers/block/drbd/drbd_req.c b/trunk/drivers/block/drbd/drbd_req.c index 8e93a6ac9bb6..01b2ac641c7b 100644 --- a/trunk/drivers/block/drbd/drbd_req.c +++ b/trunk/drivers/block/drbd/drbd_req.c @@ -455,7 +455,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, req->rq_state |= RQ_LOCAL_COMPLETED; req->rq_state &= ~RQ_LOCAL_PENDING; - __drbd_chk_io_error(mdev, false); + __drbd_chk_io_error(mdev, DRBD_IO_ERROR); _req_may_be_done_not_susp(req, m); break; @@ -477,7 +477,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, break; } - __drbd_chk_io_error(mdev, false); + __drbd_chk_io_error(mdev, DRBD_IO_ERROR); goto_queue_for_net_read: @@ -695,6 +695,12 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, break; case resend: + /* Simply complete (local only) READs. */ + if (!(req->rq_state & RQ_WRITE) && !req->w.cb) { + _req_may_be_done(req, m); + break; + } + /* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK before the connection loss (B&C only); only P_BARRIER_ACK was missing. Trowing them out of the TL here by pretending we got a BARRIER_ACK @@ -834,7 +840,15 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns req->private_bio = NULL; } if (rw == WRITE) { - remote = 1; + /* Need to replicate writes. Unless it is an empty flush, + * which is better mapped to a DRBD P_BARRIER packet, + * also for drbd wire protocol compatibility reasons. */ + if (unlikely(size == 0)) { + /* The only size==0 bios we expect are empty flushes. */ + D_ASSERT(bio->bi_rw & REQ_FLUSH); + remote = 0; + } else + remote = 1; } else { /* READ || READA */ if (local) { @@ -870,8 +884,11 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns * extent. This waits for any resync activity in the corresponding * resync extent to finish, and, if necessary, pulls in the target * extent into the activity log, which involves further disk io because - * of transactional on-disk meta data updates. */ - if (rw == WRITE && local && !test_bit(AL_SUSPENDED, &mdev->flags)) { + * of transactional on-disk meta data updates. + * Empty flushes don't need to go into the activity log, they can only + * flush data for pending writes which are already in there. */ + if (rw == WRITE && local && size + && !test_bit(AL_SUSPENDED, &mdev->flags)) { req->rq_state |= RQ_IN_ACT_LOG; drbd_al_begin_io(mdev, sector); } @@ -994,7 +1011,10 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns if (rw == WRITE && _req_conflicts(req)) goto fail_conflicting; - list_add_tail(&req->tl_requests, &mdev->newest_tle->requests); + /* no point in adding empty flushes to the transfer log, + * they are mapped to drbd barriers already. */ + if (likely(size!=0)) + list_add_tail(&req->tl_requests, &mdev->newest_tle->requests); /* NOTE remote first: to get the concurrent write detection right, * we must register the request before start of local IO. */ @@ -1014,6 +1034,14 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96) maybe_pull_ahead(mdev); + /* If this was a flush, queue a drbd barrier/start a new epoch. + * Unless the current epoch was empty anyways, or we are not currently + * replicating, in which case there is no point. */ + if (unlikely(bio->bi_rw & REQ_FLUSH) + && mdev->newest_tle->n_writes + && drbd_should_do_remote(mdev->state)) + queue_barrier(mdev); + spin_unlock_irq(&mdev->req_lock); kfree(b); /* if someone else has beaten us to it... */ @@ -1111,13 +1139,12 @@ void drbd_make_request(struct request_queue *q, struct bio *bio) /* * what we "blindly" assume: */ - D_ASSERT(bio->bi_size > 0); D_ASSERT((bio->bi_size & 0x1ff) == 0); /* to make some things easier, force alignment of requests within the * granularity of our hash tables */ s_enr = bio->bi_sector >> HT_SHIFT; - e_enr = (bio->bi_sector+(bio->bi_size>>9)-1) >> HT_SHIFT; + e_enr = bio->bi_size ? (bio->bi_sector+(bio->bi_size>>9)-1) >> HT_SHIFT : s_enr; if (likely(s_enr == e_enr)) { do { @@ -1275,7 +1302,7 @@ void request_timer_fn(unsigned long data) time_after(now, req->start_time + dt) && !time_in_range(now, mdev->last_reattach_jif, mdev->last_reattach_jif + dt)) { dev_warn(DEV, "Local backing device failed to meet the disk-timeout\n"); - __drbd_chk_io_error(mdev, 1); + __drbd_chk_io_error(mdev, DRBD_FORCE_DETACH); } nt = (time_after(now, req->start_time + et) ? now : req->start_time) + et; spin_unlock_irq(&mdev->req_lock); diff --git a/trunk/drivers/block/drbd/drbd_worker.c b/trunk/drivers/block/drbd/drbd_worker.c index 620c70ff2231..6bce2cc179d4 100644 --- a/trunk/drivers/block/drbd/drbd_worker.c +++ b/trunk/drivers/block/drbd/drbd_worker.c @@ -111,7 +111,7 @@ void drbd_endio_read_sec_final(struct drbd_epoch_entry *e) __releases(local) if (list_empty(&mdev->read_ee)) wake_up(&mdev->ee_wait); if (test_bit(__EE_WAS_ERROR, &e->flags)) - __drbd_chk_io_error(mdev, false); + __drbd_chk_io_error(mdev, DRBD_IO_ERROR); spin_unlock_irqrestore(&mdev->req_lock, flags); drbd_queue_work(&mdev->data.work, &e->w); @@ -154,7 +154,7 @@ static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(lo : list_empty(&mdev->active_ee); if (test_bit(__EE_WAS_ERROR, &e->flags)) - __drbd_chk_io_error(mdev, false); + __drbd_chk_io_error(mdev, DRBD_IO_ERROR); spin_unlock_irqrestore(&mdev->req_lock, flags); if (is_syncer_req) @@ -1501,14 +1501,6 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side) return; } - if (mdev->state.conn < C_AHEAD) { - /* In case a previous resync run was aborted by an IO error/detach on the peer. */ - drbd_rs_cancel_all(mdev); - /* This should be done when we abort the resync. We definitely do not - want to have this for connections going back and forth between - Ahead/Behind and SyncSource/SyncTarget */ - } - if (side == C_SYNC_TARGET) { /* Since application IO was locked out during C_WF_BITMAP_T and C_WF_SYNC_UUID we are still unmodified. Before going to C_SYNC_TARGET diff --git a/trunk/drivers/block/floppy.c b/trunk/drivers/block/floppy.c index 553f43a90953..a7d6347aaa79 100644 --- a/trunk/drivers/block/floppy.c +++ b/trunk/drivers/block/floppy.c @@ -191,6 +191,7 @@ static int print_unex = 1; #include #include #include +#include /* * PS/2 floppies have much slower step rates than regular floppies. @@ -2516,8 +2517,7 @@ static int make_raw_rw_request(void) set_fdc((long)current_req->rq_disk->private_data); raw_cmd = &default_raw_cmd; - raw_cmd->flags = FD_RAW_SPIN | FD_RAW_NEED_DISK | FD_RAW_NEED_DISK | - FD_RAW_NEED_SEEK; + raw_cmd->flags = FD_RAW_SPIN | FD_RAW_NEED_DISK | FD_RAW_NEED_SEEK; raw_cmd->cmd_count = NR_RW; if (rq_data_dir(current_req) == READ) { raw_cmd->flags |= FD_RAW_READ; @@ -4123,7 +4123,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data) return get_disk(disks[drive]); } -static int __init floppy_init(void) +static int __init do_floppy_init(void) { int i, unit, drive; int err, dr; @@ -4338,6 +4338,24 @@ static int __init floppy_init(void) return err; } +#ifndef MODULE +static __init void floppy_async_init(void *data, async_cookie_t cookie) +{ + do_floppy_init(); +} +#endif + +static int __init floppy_init(void) +{ +#ifdef MODULE + return do_floppy_init(); +#else + /* Don't hold up the bootup by the floppy initialization */ + async_schedule(floppy_async_init, NULL); + return 0; +#endif +} + static const struct io_region { int offset; int size; diff --git a/trunk/drivers/block/nbd.c b/trunk/drivers/block/nbd.c index 76bc96fd01c8..d07c9f7fded6 100644 --- a/trunk/drivers/block/nbd.c +++ b/trunk/drivers/block/nbd.c @@ -485,7 +485,7 @@ static void nbd_handle_req(struct nbd_device *nbd, struct request *req) nbd_end_request(req); } else { spin_lock(&nbd->queue_lock); - list_add(&req->queuelist, &nbd->queue_head); + list_add_tail(&req->queuelist, &nbd->queue_head); spin_unlock(&nbd->queue_lock); } diff --git a/trunk/drivers/block/umem.c b/trunk/drivers/block/umem.c index 9a72277a31df..eb0d8216f557 100644 --- a/trunk/drivers/block/umem.c +++ b/trunk/drivers/block/umem.c @@ -513,42 +513,19 @@ static void process_page(unsigned long data) } } -struct mm_plug_cb { - struct blk_plug_cb cb; - struct cardinfo *card; -}; - -static void mm_unplug(struct blk_plug_cb *cb) +static void mm_unplug(struct blk_plug_cb *cb, bool from_schedule) { - struct mm_plug_cb *mmcb = container_of(cb, struct mm_plug_cb, cb); + struct cardinfo *card = cb->data; - spin_lock_irq(&mmcb->card->lock); - activate(mmcb->card); - spin_unlock_irq(&mmcb->card->lock); - kfree(mmcb); + spin_lock_irq(&card->lock); + activate(card); + spin_unlock_irq(&card->lock); + kfree(cb); } static int mm_check_plugged(struct cardinfo *card) { - struct blk_plug *plug = current->plug; - struct mm_plug_cb *mmcb; - - if (!plug) - return 0; - - list_for_each_entry(mmcb, &plug->cb_list, cb.list) { - if (mmcb->cb.callback == mm_unplug && mmcb->card == card) - return 1; - } - /* Not currently on the callback list */ - mmcb = kmalloc(sizeof(*mmcb), GFP_ATOMIC); - if (!mmcb) - return 0; - - mmcb->card = card; - mmcb->cb.callback = mm_unplug; - list_add(&mmcb->cb.list, &plug->cb_list); - return 1; + return !!blk_check_plugged(mm_unplug, card, sizeof(struct blk_plug_cb)); } static void mm_make_request(struct request_queue *q, struct bio *bio) diff --git a/trunk/drivers/bluetooth/ath3k.c b/trunk/drivers/bluetooth/ath3k.c index 10308cd8a7ed..11f36e502136 100644 --- a/trunk/drivers/bluetooth/ath3k.c +++ b/trunk/drivers/bluetooth/ath3k.c @@ -79,6 +79,7 @@ static struct usb_device_id ath3k_table[] = { { USB_DEVICE(0x13d3, 0x3362) }, { USB_DEVICE(0x0CF3, 0xE004) }, { USB_DEVICE(0x0930, 0x0219) }, + { USB_DEVICE(0x0489, 0xe057) }, /* Atheros AR5BBU12 with sflash firmware */ { USB_DEVICE(0x0489, 0xE02C) }, @@ -104,6 +105,7 @@ static struct usb_device_id ath3k_blist_tbl[] = { { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, /* Atheros AR5BBU22 with sflash firmware */ { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 }, diff --git a/trunk/drivers/bluetooth/btusb.c b/trunk/drivers/bluetooth/btusb.c index e27221411036..cef3bac1a543 100644 --- a/trunk/drivers/bluetooth/btusb.c +++ b/trunk/drivers/bluetooth/btusb.c @@ -98,6 +98,7 @@ static struct usb_device_id btusb_table[] = { { USB_DEVICE(0x0a5c, 0x21e6) }, { USB_DEVICE(0x0a5c, 0x21e8) }, { USB_DEVICE(0x0a5c, 0x21f3) }, + { USB_DEVICE(0x0a5c, 0x21f4) }, { USB_DEVICE(0x413c, 0x8197) }, /* Foxconn - Hon Hai */ @@ -133,6 +134,7 @@ static struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, /* Atheros AR5BBU12 with sflash firmware */ { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE }, diff --git a/trunk/drivers/char/agp/intel-agp.h b/trunk/drivers/char/agp/intel-agp.h index 57226424690c..6ec0fff79bc2 100644 --- a/trunk/drivers/char/agp/intel-agp.h +++ b/trunk/drivers/char/agp/intel-agp.h @@ -64,6 +64,7 @@ #define I830_PTE_SYSTEM_CACHED 0x00000006 /* GT PTE cache control fields */ #define GEN6_PTE_UNCACHED 0x00000002 +#define HSW_PTE_UNCACHED 0x00000000 #define GEN6_PTE_LLC 0x00000004 #define GEN6_PTE_LLC_MLC 0x00000006 #define GEN6_PTE_GFDT 0x00000008 @@ -239,16 +240,45 @@ #define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG 0x016A #define PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB 0x0F00 /* VLV1 */ #define PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG 0x0F30 -#define PCI_DEVICE_ID_INTEL_HASWELL_HB 0x0400 /* Desktop */ +#define PCI_DEVICE_ID_INTEL_HASWELL_HB 0x0400 /* Desktop */ #define PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG 0x0402 #define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG 0x0412 -#define PCI_DEVICE_ID_INTEL_HASWELL_M_HB 0x0404 /* Mobile */ +#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG 0x0422 +#define PCI_DEVICE_ID_INTEL_HASWELL_M_HB 0x0404 /* Mobile */ #define PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG 0x0406 #define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG 0x0416 -#define PCI_DEVICE_ID_INTEL_HASWELL_S_HB 0x0408 /* Server */ +#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG 0x0426 +#define PCI_DEVICE_ID_INTEL_HASWELL_S_HB 0x0408 /* Server */ #define PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG 0x040a #define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG 0x041a -#define PCI_DEVICE_ID_INTEL_HASWELL_SDV 0x0c16 /* SDV */ -#define PCI_DEVICE_ID_INTEL_HASWELL_E_HB 0x0c04 +#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG 0x042a +#define PCI_DEVICE_ID_INTEL_HASWELL_E_HB 0x0c04 +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG 0x0C02 +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG 0x0C12 +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG 0x0C22 +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG 0x0C06 +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG 0x0C16 +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG 0x0C26 +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG 0x0C0A +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG 0x0C1A +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG 0x0C2A +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG 0x0A02 +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG 0x0A12 +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG 0x0A22 +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG 0x0A06 +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG 0x0A16 +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG 0x0A26 +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG 0x0A0A +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG 0x0A1A +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG 0x0A2A +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG 0x0D12 +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG 0x0D22 +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG 0x0D32 +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG 0x0D16 +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG 0x0D26 +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG 0x0D36 +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG 0x0D1A +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG 0x0D2A +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG 0x0D3A #endif diff --git a/trunk/drivers/char/agp/intel-gtt.c b/trunk/drivers/char/agp/intel-gtt.c index 9ed92ef5829b..58e32f7c3229 100644 --- a/trunk/drivers/char/agp/intel-gtt.c +++ b/trunk/drivers/char/agp/intel-gtt.c @@ -1156,6 +1156,30 @@ static bool gen6_check_flags(unsigned int flags) return true; } +static void haswell_write_entry(dma_addr_t addr, unsigned int entry, + unsigned int flags) +{ + unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT; + unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT; + u32 pte_flags; + + if (type_mask == AGP_USER_MEMORY) + pte_flags = HSW_PTE_UNCACHED | I810_PTE_VALID; + else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) { + pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID; + if (gfdt) + pte_flags |= GEN6_PTE_GFDT; + } else { /* set 'normal'/'cached' to LLC by default */ + pte_flags = GEN6_PTE_LLC | I810_PTE_VALID; + if (gfdt) + pte_flags |= GEN6_PTE_GFDT; + } + + /* gen6 has bit11-4 for physical addr bit39-32 */ + addr |= (addr >> 28) & 0xff0; + writel(addr | pte_flags, intel_private.gtt + entry); +} + static void gen6_write_entry(dma_addr_t addr, unsigned int entry, unsigned int flags) { @@ -1382,6 +1406,15 @@ static const struct intel_gtt_driver sandybridge_gtt_driver = { .check_flags = gen6_check_flags, .chipset_flush = i9xx_chipset_flush, }; +static const struct intel_gtt_driver haswell_gtt_driver = { + .gen = 6, + .setup = i9xx_setup, + .cleanup = gen6_cleanup, + .write_entry = haswell_write_entry, + .dma_mask_size = 40, + .check_flags = gen6_check_flags, + .chipset_flush = i9xx_chipset_flush, +}; static const struct intel_gtt_driver valleyview_gtt_driver = { .gen = 7, .setup = i9xx_setup, @@ -1499,19 +1532,77 @@ static const struct intel_gtt_driver_description { { PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG, "ValleyView", &valleyview_gtt_driver }, { PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG, - "Haswell", &sandybridge_gtt_driver }, + "Haswell", &haswell_gtt_driver }, { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG, - "Haswell", &sandybridge_gtt_driver }, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG, + "Haswell", &haswell_gtt_driver }, { PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG, - "Haswell", &sandybridge_gtt_driver }, + "Haswell", &haswell_gtt_driver }, { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG, - "Haswell", &sandybridge_gtt_driver }, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG, + "Haswell", &haswell_gtt_driver }, { PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG, - "Haswell", &sandybridge_gtt_driver }, + "Haswell", &haswell_gtt_driver }, { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG, - "Haswell", &sandybridge_gtt_driver }, - { PCI_DEVICE_ID_INTEL_HASWELL_SDV, - "Haswell", &sandybridge_gtt_driver }, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG, + "Haswell", &haswell_gtt_driver }, { 0, NULL, NULL } }; diff --git a/trunk/drivers/char/hw_random/omap-rng.c b/trunk/drivers/char/hw_random/omap-rng.c index d706bd0e9e80..4fbdceb6f773 100644 --- a/trunk/drivers/char/hw_random/omap-rng.c +++ b/trunk/drivers/char/hw_random/omap-rng.c @@ -160,7 +160,7 @@ static int __exit omap_rng_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int omap_rng_suspend(struct device *dev) { diff --git a/trunk/drivers/char/tpm/tpm_tis.c b/trunk/drivers/char/tpm/tpm_tis.c index 89682fa8801e..c4be3519a587 100644 --- a/trunk/drivers/char/tpm/tpm_tis.c +++ b/trunk/drivers/char/tpm/tpm_tis.c @@ -807,6 +807,7 @@ module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id, MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe"); #endif +#ifdef CONFIG_PM_SLEEP static int tpm_tis_resume(struct device *dev) { struct tpm_chip *chip = dev_get_drvdata(dev); @@ -816,6 +817,7 @@ static int tpm_tis_resume(struct device *dev) return tpm_pm_resume(dev); } +#endif static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume); diff --git a/trunk/drivers/clocksource/cs5535-clockevt.c b/trunk/drivers/clocksource/cs5535-clockevt.c index 540795cd0760..d9279385304d 100644 --- a/trunk/drivers/clocksource/cs5535-clockevt.c +++ b/trunk/drivers/clocksource/cs5535-clockevt.c @@ -53,7 +53,7 @@ static struct cs5535_mfgpt_timer *cs5535_event_clock; #define MFGPT_PERIODIC (MFGPT_HZ / HZ) /* - * The MFPGT timers on the CS5536 provide us with suitable timers to use + * The MFGPT timers on the CS5536 provide us with suitable timers to use * as clock event sources - not as good as a HPET or APIC, but certainly * better than the PIT. This isn't a general purpose MFGPT driver, but * a simplified one designed specifically to act as a clock event source. @@ -144,7 +144,7 @@ static int __init cs5535_mfgpt_init(void) timer = cs5535_mfgpt_alloc_timer(MFGPT_TIMER_ANY, MFGPT_DOMAIN_WORKING); if (!timer) { - printk(KERN_ERR DRV_NAME ": Could not allocate MFPGT timer\n"); + printk(KERN_ERR DRV_NAME ": Could not allocate MFGPT timer\n"); return -ENODEV; } cs5535_event_clock = timer; diff --git a/trunk/drivers/cpufreq/omap-cpufreq.c b/trunk/drivers/cpufreq/omap-cpufreq.c index 17fa04d08be9..b47034e650a5 100644 --- a/trunk/drivers/cpufreq/omap-cpufreq.c +++ b/trunk/drivers/cpufreq/omap-cpufreq.c @@ -218,7 +218,7 @@ static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy) policy->cur = policy->min = policy->max = omap_getspeed(policy->cpu); - if (atomic_inc_return(&freq_table_users) == 1) + if (!freq_table) result = opp_init_cpufreq_table(mpu_dev, &freq_table); if (result) { @@ -227,6 +227,8 @@ static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy) goto fail_ck; } + atomic_inc_return(&freq_table_users); + result = cpufreq_frequency_table_cpuinfo(policy, freq_table); if (result) goto fail_table; diff --git a/trunk/drivers/cpufreq/pcc-cpufreq.c b/trunk/drivers/cpufreq/pcc-cpufreq.c index cdc02ac8f41a..503996a94a6a 100644 --- a/trunk/drivers/cpufreq/pcc-cpufreq.c +++ b/trunk/drivers/cpufreq/pcc-cpufreq.c @@ -454,6 +454,7 @@ static int __init pcc_cpufreq_probe(void) mem_resource->address_length); if (pcch_virt_addr == NULL) { pr_debug("probe: could not map shared mem region\n"); + ret = -ENOMEM; goto out_free; } pcch_hdr = pcch_virt_addr; diff --git a/trunk/drivers/cpuidle/coupled.c b/trunk/drivers/cpuidle/coupled.c index 2c9bf2692232..3265844839bf 100644 --- a/trunk/drivers/cpuidle/coupled.c +++ b/trunk/drivers/cpuidle/coupled.c @@ -678,10 +678,22 @@ static int cpuidle_coupled_cpu_notify(struct notifier_block *nb, int cpu = (unsigned long)hcpu; struct cpuidle_device *dev; + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_UP_PREPARE: + case CPU_DOWN_PREPARE: + case CPU_ONLINE: + case CPU_DEAD: + case CPU_UP_CANCELED: + case CPU_DOWN_FAILED: + break; + default: + return NOTIFY_OK; + } + mutex_lock(&cpuidle_lock); dev = per_cpu(cpuidle_devices, cpu); - if (!dev->coupled) + if (!dev || !dev->coupled) goto out; switch (action & ~CPU_TASKS_FROZEN) { diff --git a/trunk/drivers/crypto/caam/jr.c b/trunk/drivers/crypto/caam/jr.c index 53c8c51d5881..93d14070141a 100644 --- a/trunk/drivers/crypto/caam/jr.c +++ b/trunk/drivers/crypto/caam/jr.c @@ -63,7 +63,7 @@ static void caam_jr_dequeue(unsigned long devarg) head = ACCESS_ONCE(jrp->head); - spin_lock_bh(&jrp->outlock); + spin_lock(&jrp->outlock); sw_idx = tail = jrp->tail; hw_idx = jrp->out_ring_read_index; @@ -115,7 +115,7 @@ static void caam_jr_dequeue(unsigned long devarg) jrp->tail = tail; } - spin_unlock_bh(&jrp->outlock); + spin_unlock(&jrp->outlock); /* Finally, execute user's callback */ usercall(dev, userdesc, userstatus, userarg); @@ -236,14 +236,14 @@ int caam_jr_enqueue(struct device *dev, u32 *desc, return -EIO; } - spin_lock(&jrp->inplock); + spin_lock_bh(&jrp->inplock); head = jrp->head; tail = ACCESS_ONCE(jrp->tail); if (!rd_reg32(&jrp->rregs->inpring_avail) || CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) { - spin_unlock(&jrp->inplock); + spin_unlock_bh(&jrp->inplock); dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE); return -EBUSY; } @@ -265,7 +265,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc, wr_reg32(&jrp->rregs->inpring_jobadd, 1); - spin_unlock(&jrp->inplock); + spin_unlock_bh(&jrp->inplock); return 0; } diff --git a/trunk/drivers/crypto/caam/key_gen.c b/trunk/drivers/crypto/caam/key_gen.c index 002888185f17..d216cd3cc569 100644 --- a/trunk/drivers/crypto/caam/key_gen.c +++ b/trunk/drivers/crypto/caam/key_gen.c @@ -120,3 +120,4 @@ u32 gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len, return ret; } +EXPORT_SYMBOL(gen_split_key); diff --git a/trunk/drivers/crypto/hifn_795x.c b/trunk/drivers/crypto/hifn_795x.c index c9c4befb5a8d..df14358d7fa1 100644 --- a/trunk/drivers/crypto/hifn_795x.c +++ b/trunk/drivers/crypto/hifn_795x.c @@ -821,8 +821,8 @@ static int hifn_register_rng(struct hifn_device *dev) /* * We must wait at least 256 Pk_clk cycles between two reads of the rng. */ - dev->rng_wait_time = DIV_ROUND_UP(NSEC_PER_SEC, dev->pk_clk_freq) * - 256; + dev->rng_wait_time = DIV_ROUND_UP_ULL(NSEC_PER_SEC, + dev->pk_clk_freq) * 256; dev->rng.name = dev->name; dev->rng.data_present = hifn_rng_data_present, diff --git a/trunk/drivers/dma/Kconfig b/trunk/drivers/dma/Kconfig index d45cf1bcbde5..d06ea2950dd9 100644 --- a/trunk/drivers/dma/Kconfig +++ b/trunk/drivers/dma/Kconfig @@ -53,6 +53,7 @@ config AMBA_PL08X bool "ARM PrimeCell PL080 or PL081 support" depends on ARM_AMBA && EXPERIMENTAL select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS help Platform has a PL08x DMAC device which can provide DMA engine support @@ -269,6 +270,7 @@ config DMA_SA11X0 tristate "SA-11x0 DMA support" depends on ARCH_SA1100 select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS help Support the DMA engine found on Intel StrongARM SA-1100 and SA-1110 SoCs. This DMA engine can only be used with on-chip @@ -284,9 +286,18 @@ config MMP_TDMA Say Y here if you enabled MMP ADMA, otherwise say N. +config DMA_OMAP + tristate "OMAP DMA support" + depends on ARCH_OMAP + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + config DMA_ENGINE bool +config DMA_VIRTUAL_CHANNELS + tristate + comment "DMA Clients" depends on DMA_ENGINE diff --git a/trunk/drivers/dma/Makefile b/trunk/drivers/dma/Makefile index 640356add0a3..4cf6b128ab9a 100644 --- a/trunk/drivers/dma/Makefile +++ b/trunk/drivers/dma/Makefile @@ -2,6 +2,7 @@ ccflags-$(CONFIG_DMADEVICES_DEBUG) := -DDEBUG ccflags-$(CONFIG_DMADEVICES_VDEBUG) += -DVERBOSE_DEBUG obj-$(CONFIG_DMA_ENGINE) += dmaengine.o +obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o obj-$(CONFIG_NET_DMA) += iovlock.o obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o obj-$(CONFIG_DMATEST) += dmatest.o @@ -30,3 +31,4 @@ obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o +obj-$(CONFIG_DMA_OMAP) += omap-dma.o diff --git a/trunk/drivers/dma/amba-pl08x.c b/trunk/drivers/dma/amba-pl08x.c index 49ecbbb8932d..6fbeebb9486f 100644 --- a/trunk/drivers/dma/amba-pl08x.c +++ b/trunk/drivers/dma/amba-pl08x.c @@ -86,10 +86,12 @@ #include #include "dmaengine.h" +#include "virt-dma.h" #define DRIVER_NAME "pl08xdmac" static struct amba_driver pl08x_amba_driver; +struct pl08x_driver_data; /** * struct vendor_data - vendor-specific config parameters for PL08x derivatives @@ -118,6 +120,123 @@ struct pl08x_lli { u32 cctl; }; +/** + * struct pl08x_bus_data - information of source or destination + * busses for a transfer + * @addr: current address + * @maxwidth: the maximum width of a transfer on this bus + * @buswidth: the width of this bus in bytes: 1, 2 or 4 + */ +struct pl08x_bus_data { + dma_addr_t addr; + u8 maxwidth; + u8 buswidth; +}; + +/** + * struct pl08x_phy_chan - holder for the physical channels + * @id: physical index to this channel + * @lock: a lock to use when altering an instance of this struct + * @serving: the virtual channel currently being served by this physical + * channel + * @locked: channel unavailable for the system, e.g. dedicated to secure + * world + */ +struct pl08x_phy_chan { + unsigned int id; + void __iomem *base; + spinlock_t lock; + struct pl08x_dma_chan *serving; + bool locked; +}; + +/** + * struct pl08x_sg - structure containing data per sg + * @src_addr: src address of sg + * @dst_addr: dst address of sg + * @len: transfer len in bytes + * @node: node for txd's dsg_list + */ +struct pl08x_sg { + dma_addr_t src_addr; + dma_addr_t dst_addr; + size_t len; + struct list_head node; +}; + +/** + * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor + * @vd: virtual DMA descriptor + * @dsg_list: list of children sg's + * @llis_bus: DMA memory address (physical) start for the LLIs + * @llis_va: virtual memory address start for the LLIs + * @cctl: control reg values for current txd + * @ccfg: config reg values for current txd + * @done: this marks completed descriptors, which should not have their + * mux released. + */ +struct pl08x_txd { + struct virt_dma_desc vd; + struct list_head dsg_list; + dma_addr_t llis_bus; + struct pl08x_lli *llis_va; + /* Default cctl value for LLIs */ + u32 cctl; + /* + * Settings to be put into the physical channel when we + * trigger this txd. Other registers are in llis_va[0]. + */ + u32 ccfg; + bool done; +}; + +/** + * struct pl08x_dma_chan_state - holds the PL08x specific virtual channel + * states + * @PL08X_CHAN_IDLE: the channel is idle + * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport + * channel and is running a transfer on it + * @PL08X_CHAN_PAUSED: the channel has allocated a physical transport + * channel, but the transfer is currently paused + * @PL08X_CHAN_WAITING: the channel is waiting for a physical transport + * channel to become available (only pertains to memcpy channels) + */ +enum pl08x_dma_chan_state { + PL08X_CHAN_IDLE, + PL08X_CHAN_RUNNING, + PL08X_CHAN_PAUSED, + PL08X_CHAN_WAITING, +}; + +/** + * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel + * @vc: wrappped virtual channel + * @phychan: the physical channel utilized by this channel, if there is one + * @name: name of channel + * @cd: channel platform data + * @runtime_addr: address for RX/TX according to the runtime config + * @at: active transaction on this channel + * @lock: a lock for this channel data + * @host: a pointer to the host (internal use) + * @state: whether the channel is idle, paused, running etc + * @slave: whether this channel is a device (slave) or for memcpy + * @signal: the physical DMA request signal which this channel is using + * @mux_use: count of descriptors using this DMA request signal setting + */ +struct pl08x_dma_chan { + struct virt_dma_chan vc; + struct pl08x_phy_chan *phychan; + const char *name; + const struct pl08x_channel_data *cd; + struct dma_slave_config cfg; + struct pl08x_txd *at; + struct pl08x_driver_data *host; + enum pl08x_dma_chan_state state; + bool slave; + int signal; + unsigned mux_use; +}; + /** * struct pl08x_driver_data - the local state holder for the PL08x * @slave: slave engine for this instance @@ -128,7 +247,6 @@ struct pl08x_lli { * @pd: platform data passed in from the platform/machine * @phy_chans: array of data for the physical channels * @pool: a pool for the LLI descriptors - * @pool_ctr: counter of LLIs in the pool * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI * fetches * @mem_buses: set to indicate memory transfers on AHB2. @@ -143,10 +261,8 @@ struct pl08x_driver_data { struct pl08x_platform_data *pd; struct pl08x_phy_chan *phy_chans; struct dma_pool *pool; - int pool_ctr; u8 lli_buses; u8 mem_buses; - spinlock_t lock; }; /* @@ -162,12 +278,51 @@ struct pl08x_driver_data { static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan) { - return container_of(chan, struct pl08x_dma_chan, chan); + return container_of(chan, struct pl08x_dma_chan, vc.chan); } static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx) { - return container_of(tx, struct pl08x_txd, tx); + return container_of(tx, struct pl08x_txd, vd.tx); +} + +/* + * Mux handling. + * + * This gives us the DMA request input to the PL08x primecell which the + * peripheral described by the channel data will be routed to, possibly + * via a board/SoC specific external MUX. One important point to note + * here is that this does not depend on the physical channel. + */ +static int pl08x_request_mux(struct pl08x_dma_chan *plchan) +{ + const struct pl08x_platform_data *pd = plchan->host->pd; + int ret; + + if (plchan->mux_use++ == 0 && pd->get_signal) { + ret = pd->get_signal(plchan->cd); + if (ret < 0) { + plchan->mux_use = 0; + return ret; + } + + plchan->signal = ret; + } + return 0; +} + +static void pl08x_release_mux(struct pl08x_dma_chan *plchan) +{ + const struct pl08x_platform_data *pd = plchan->host->pd; + + if (plchan->signal >= 0) { + WARN_ON(plchan->mux_use == 0); + + if (--plchan->mux_use == 0 && pd->put_signal) { + pd->put_signal(plchan->cd, plchan->signal); + plchan->signal = -1; + } + } } /* @@ -189,20 +344,25 @@ static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch) * been set when the LLIs were constructed. Poke them into the hardware * and start the transfer. */ -static void pl08x_start_txd(struct pl08x_dma_chan *plchan, - struct pl08x_txd *txd) +static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan) { struct pl08x_driver_data *pl08x = plchan->host; struct pl08x_phy_chan *phychan = plchan->phychan; - struct pl08x_lli *lli = &txd->llis_va[0]; + struct virt_dma_desc *vd = vchan_next_desc(&plchan->vc); + struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); + struct pl08x_lli *lli; u32 val; + list_del(&txd->vd.node); + plchan->at = txd; /* Wait for channel inactive */ while (pl08x_phy_channel_busy(phychan)) cpu_relax(); + lli = &txd->llis_va[0]; + dev_vdbg(&pl08x->adev->dev, "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, " "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n", @@ -311,10 +471,8 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) { struct pl08x_phy_chan *ch; struct pl08x_txd *txd; - unsigned long flags; size_t bytes = 0; - spin_lock_irqsave(&plchan->lock, flags); ch = plchan->phychan; txd = plchan->at; @@ -354,18 +512,6 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) } } - /* Sum up all queued transactions */ - if (!list_empty(&plchan->pend_list)) { - struct pl08x_txd *txdi; - list_for_each_entry(txdi, &plchan->pend_list, node) { - struct pl08x_sg *dsg; - list_for_each_entry(dsg, &txd->dsg_list, node) - bytes += dsg->len; - } - } - - spin_unlock_irqrestore(&plchan->lock, flags); - return bytes; } @@ -391,7 +537,6 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x, if (!ch->locked && !ch->serving) { ch->serving = virt_chan; - ch->signal = -1; spin_unlock_irqrestore(&ch->lock, flags); break; } @@ -404,25 +549,114 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x, return NULL; } - pm_runtime_get_sync(&pl08x->adev->dev); return ch; } +/* Mark the physical channel as free. Note, this write is atomic. */ static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x, struct pl08x_phy_chan *ch) { - unsigned long flags; + ch->serving = NULL; +} + +/* + * Try to allocate a physical channel. When successful, assign it to + * this virtual channel, and initiate the next descriptor. The + * virtual channel lock must be held at this point. + */ +static void pl08x_phy_alloc_and_start(struct pl08x_dma_chan *plchan) +{ + struct pl08x_driver_data *pl08x = plchan->host; + struct pl08x_phy_chan *ch; - spin_lock_irqsave(&ch->lock, flags); + ch = pl08x_get_phy_channel(pl08x, plchan); + if (!ch) { + dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name); + plchan->state = PL08X_CHAN_WAITING; + return; + } - /* Stop the channel and clear its interrupts */ - pl08x_terminate_phy_chan(pl08x, ch); + dev_dbg(&pl08x->adev->dev, "allocated physical channel %d for xfer on %s\n", + ch->id, plchan->name); - pm_runtime_put(&pl08x->adev->dev); + plchan->phychan = ch; + plchan->state = PL08X_CHAN_RUNNING; + pl08x_start_next_txd(plchan); +} - /* Mark it as free */ - ch->serving = NULL; - spin_unlock_irqrestore(&ch->lock, flags); +static void pl08x_phy_reassign_start(struct pl08x_phy_chan *ch, + struct pl08x_dma_chan *plchan) +{ + struct pl08x_driver_data *pl08x = plchan->host; + + dev_dbg(&pl08x->adev->dev, "reassigned physical channel %d for xfer on %s\n", + ch->id, plchan->name); + + /* + * We do this without taking the lock; we're really only concerned + * about whether this pointer is NULL or not, and we're guaranteed + * that this will only be called when it _already_ is non-NULL. + */ + ch->serving = plchan; + plchan->phychan = ch; + plchan->state = PL08X_CHAN_RUNNING; + pl08x_start_next_txd(plchan); +} + +/* + * Free a physical DMA channel, potentially reallocating it to another + * virtual channel if we have any pending. + */ +static void pl08x_phy_free(struct pl08x_dma_chan *plchan) +{ + struct pl08x_driver_data *pl08x = plchan->host; + struct pl08x_dma_chan *p, *next; + + retry: + next = NULL; + + /* Find a waiting virtual channel for the next transfer. */ + list_for_each_entry(p, &pl08x->memcpy.channels, vc.chan.device_node) + if (p->state == PL08X_CHAN_WAITING) { + next = p; + break; + } + + if (!next) { + list_for_each_entry(p, &pl08x->slave.channels, vc.chan.device_node) + if (p->state == PL08X_CHAN_WAITING) { + next = p; + break; + } + } + + /* Ensure that the physical channel is stopped */ + pl08x_terminate_phy_chan(pl08x, plchan->phychan); + + if (next) { + bool success; + + /* + * Eww. We know this isn't going to deadlock + * but lockdep probably doesn't. + */ + spin_lock(&next->vc.lock); + /* Re-check the state now that we have the lock */ + success = next->state == PL08X_CHAN_WAITING; + if (success) + pl08x_phy_reassign_start(plchan->phychan, next); + spin_unlock(&next->vc.lock); + + /* If the state changed, try to find another channel */ + if (!success) + goto retry; + } else { + /* No more jobs, so free up the physical channel */ + pl08x_put_phy_channel(pl08x, plchan->phychan); + } + + plchan->phychan = NULL; + plchan->state = PL08X_CHAN_IDLE; } /* @@ -585,8 +819,6 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, return 0; } - pl08x->pool_ctr++; - bd.txd = txd; bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0; cctl = txd->cctl; @@ -802,18 +1034,14 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, return num_llis; } -/* You should call this with the struct pl08x lock held */ static void pl08x_free_txd(struct pl08x_driver_data *pl08x, struct pl08x_txd *txd) { struct pl08x_sg *dsg, *_dsg; - /* Free the LLI */ if (txd->llis_va) dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus); - pl08x->pool_ctr--; - list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) { list_del(&dsg->node); kfree(dsg); @@ -822,133 +1050,75 @@ static void pl08x_free_txd(struct pl08x_driver_data *pl08x, kfree(txd); } -static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x, - struct pl08x_dma_chan *plchan) +static void pl08x_unmap_buffers(struct pl08x_txd *txd) { - struct pl08x_txd *txdi = NULL; - struct pl08x_txd *next; - - if (!list_empty(&plchan->pend_list)) { - list_for_each_entry_safe(txdi, - next, &plchan->pend_list, node) { - list_del(&txdi->node); - pl08x_free_txd(pl08x, txdi); + struct device *dev = txd->vd.tx.chan->device->dev; + struct pl08x_sg *dsg; + + if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { + if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) + list_for_each_entry(dsg, &txd->dsg_list, node) + dma_unmap_single(dev, dsg->src_addr, dsg->len, + DMA_TO_DEVICE); + else { + list_for_each_entry(dsg, &txd->dsg_list, node) + dma_unmap_page(dev, dsg->src_addr, dsg->len, + DMA_TO_DEVICE); } } + if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { + if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) + list_for_each_entry(dsg, &txd->dsg_list, node) + dma_unmap_single(dev, dsg->dst_addr, dsg->len, + DMA_FROM_DEVICE); + else + list_for_each_entry(dsg, &txd->dsg_list, node) + dma_unmap_page(dev, dsg->dst_addr, dsg->len, + DMA_FROM_DEVICE); + } } -/* - * The DMA ENGINE API - */ -static int pl08x_alloc_chan_resources(struct dma_chan *chan) +static void pl08x_desc_free(struct virt_dma_desc *vd) { - return 0; -} + struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); + struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan); -static void pl08x_free_chan_resources(struct dma_chan *chan) -{ + if (!plchan->slave) + pl08x_unmap_buffers(txd); + + if (!txd->done) + pl08x_release_mux(plchan); + + pl08x_free_txd(plchan->host, txd); } -/* - * This should be called with the channel plchan->lock held - */ -static int prep_phy_channel(struct pl08x_dma_chan *plchan, - struct pl08x_txd *txd) +static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x, + struct pl08x_dma_chan *plchan) { - struct pl08x_driver_data *pl08x = plchan->host; - struct pl08x_phy_chan *ch; - int ret; - - /* Check if we already have a channel */ - if (plchan->phychan) { - ch = plchan->phychan; - goto got_channel; - } + LIST_HEAD(head); + struct pl08x_txd *txd; - ch = pl08x_get_phy_channel(pl08x, plchan); - if (!ch) { - /* No physical channel available, cope with it */ - dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name); - return -EBUSY; - } + vchan_get_all_descriptors(&plchan->vc, &head); - /* - * OK we have a physical channel: for memcpy() this is all we - * need, but for slaves the physical signals may be muxed! - * Can the platform allow us to use this channel? - */ - if (plchan->slave && pl08x->pd->get_signal) { - ret = pl08x->pd->get_signal(plchan); - if (ret < 0) { - dev_dbg(&pl08x->adev->dev, - "unable to use physical channel %d for transfer on %s due to platform restrictions\n", - ch->id, plchan->name); - /* Release physical channel & return */ - pl08x_put_phy_channel(pl08x, ch); - return -EBUSY; - } - ch->signal = ret; + while (!list_empty(&head)) { + txd = list_first_entry(&head, struct pl08x_txd, vd.node); + list_del(&txd->vd.node); + pl08x_desc_free(&txd->vd); } - - plchan->phychan = ch; - dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n", - ch->id, - ch->signal, - plchan->name); - -got_channel: - /* Assign the flow control signal to this channel */ - if (txd->direction == DMA_MEM_TO_DEV) - txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT; - else if (txd->direction == DMA_DEV_TO_MEM) - txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT; - - plchan->phychan_hold++; - - return 0; } -static void release_phy_channel(struct pl08x_dma_chan *plchan) +/* + * The DMA ENGINE API + */ +static int pl08x_alloc_chan_resources(struct dma_chan *chan) { - struct pl08x_driver_data *pl08x = plchan->host; - - if ((plchan->phychan->signal >= 0) && pl08x->pd->put_signal) { - pl08x->pd->put_signal(plchan); - plchan->phychan->signal = -1; - } - pl08x_put_phy_channel(pl08x, plchan->phychan); - plchan->phychan = NULL; + return 0; } -static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx) +static void pl08x_free_chan_resources(struct dma_chan *chan) { - struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan); - struct pl08x_txd *txd = to_pl08x_txd(tx); - unsigned long flags; - dma_cookie_t cookie; - - spin_lock_irqsave(&plchan->lock, flags); - cookie = dma_cookie_assign(tx); - - /* Put this onto the pending list */ - list_add_tail(&txd->node, &plchan->pend_list); - - /* - * If there was no physical channel available for this memcpy, - * stack the request up and indicate that the channel is waiting - * for a free physical channel. - */ - if (!plchan->slave && !plchan->phychan) { - /* Do this memcpy whenever there is a channel ready */ - plchan->state = PL08X_CHAN_WAITING; - plchan->waiting = txd; - } else { - plchan->phychan_hold--; - } - - spin_unlock_irqrestore(&plchan->lock, flags); - - return cookie; + /* Ensure all queued descriptors are freed */ + vchan_free_chan_resources(to_virt_chan(chan)); } static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt( @@ -968,23 +1138,53 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) { struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); + struct virt_dma_desc *vd; + unsigned long flags; enum dma_status ret; + size_t bytes = 0; ret = dma_cookie_status(chan, cookie, txstate); if (ret == DMA_SUCCESS) return ret; + /* + * There's no point calculating the residue if there's + * no txstate to store the value. + */ + if (!txstate) { + if (plchan->state == PL08X_CHAN_PAUSED) + ret = DMA_PAUSED; + return ret; + } + + spin_lock_irqsave(&plchan->vc.lock, flags); + ret = dma_cookie_status(chan, cookie, txstate); + if (ret != DMA_SUCCESS) { + vd = vchan_find_desc(&plchan->vc, cookie); + if (vd) { + /* On the issued list, so hasn't been processed yet */ + struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); + struct pl08x_sg *dsg; + + list_for_each_entry(dsg, &txd->dsg_list, node) + bytes += dsg->len; + } else { + bytes = pl08x_getbytes_chan(plchan); + } + } + spin_unlock_irqrestore(&plchan->vc.lock, flags); + /* * This cookie not complete yet * Get number of bytes left in the active transactions and queue */ - dma_set_residue(txstate, pl08x_getbytes_chan(plchan)); + dma_set_residue(txstate, bytes); - if (plchan->state == PL08X_CHAN_PAUSED) - return DMA_PAUSED; + if (plchan->state == PL08X_CHAN_PAUSED && ret == DMA_IN_PROGRESS) + ret = DMA_PAUSED; /* Whether waiting or running, we're in progress */ - return DMA_IN_PROGRESS; + return ret; } /* PrimeCell DMA extension */ @@ -1080,38 +1280,14 @@ static u32 pl08x_burst(u32 maxburst) return burst_sizes[i].reg; } -static int dma_set_runtime_config(struct dma_chan *chan, - struct dma_slave_config *config) +static u32 pl08x_get_cctl(struct pl08x_dma_chan *plchan, + enum dma_slave_buswidth addr_width, u32 maxburst) { - struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); - struct pl08x_driver_data *pl08x = plchan->host; - enum dma_slave_buswidth addr_width; - u32 width, burst, maxburst; - u32 cctl = 0; - - if (!plchan->slave) - return -EINVAL; - - /* Transfer direction */ - plchan->runtime_direction = config->direction; - if (config->direction == DMA_MEM_TO_DEV) { - addr_width = config->dst_addr_width; - maxburst = config->dst_maxburst; - } else if (config->direction == DMA_DEV_TO_MEM) { - addr_width = config->src_addr_width; - maxburst = config->src_maxburst; - } else { - dev_err(&pl08x->adev->dev, - "bad runtime_config: alien transfer direction\n"); - return -EINVAL; - } + u32 width, burst, cctl = 0; width = pl08x_width(addr_width); - if (width == ~0) { - dev_err(&pl08x->adev->dev, - "bad runtime_config: alien address width\n"); - return -EINVAL; - } + if (width == ~0) + return ~0; cctl |= width << PL080_CONTROL_SWIDTH_SHIFT; cctl |= width << PL080_CONTROL_DWIDTH_SHIFT; @@ -1128,28 +1304,23 @@ static int dma_set_runtime_config(struct dma_chan *chan, cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT; cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT; - plchan->device_fc = config->device_fc; + return pl08x_cctl(cctl); +} - if (plchan->runtime_direction == DMA_DEV_TO_MEM) { - plchan->src_addr = config->src_addr; - plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR | - pl08x_select_bus(plchan->cd->periph_buses, - pl08x->mem_buses); - } else { - plchan->dst_addr = config->dst_addr; - plchan->dst_cctl = pl08x_cctl(cctl) | PL080_CONTROL_SRC_INCR | - pl08x_select_bus(pl08x->mem_buses, - plchan->cd->periph_buses); - } +static int dma_set_runtime_config(struct dma_chan *chan, + struct dma_slave_config *config) +{ + struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); - dev_dbg(&pl08x->adev->dev, - "configured channel %s (%s) for %s, data width %d, " - "maxburst %d words, LE, CCTL=0x%08x\n", - dma_chan_name(chan), plchan->name, - (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX", - addr_width, - maxburst, - cctl); + if (!plchan->slave) + return -EINVAL; + + /* Reject definitely invalid configurations */ + if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || + config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) + return -EINVAL; + + plchan->cfg = *config; return 0; } @@ -1163,95 +1334,19 @@ static void pl08x_issue_pending(struct dma_chan *chan) struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); unsigned long flags; - spin_lock_irqsave(&plchan->lock, flags); - /* Something is already active, or we're waiting for a channel... */ - if (plchan->at || plchan->state == PL08X_CHAN_WAITING) { - spin_unlock_irqrestore(&plchan->lock, flags); - return; - } - - /* Take the first element in the queue and execute it */ - if (!list_empty(&plchan->pend_list)) { - struct pl08x_txd *next; - - next = list_first_entry(&plchan->pend_list, - struct pl08x_txd, - node); - list_del(&next->node); - plchan->state = PL08X_CHAN_RUNNING; - - pl08x_start_txd(plchan, next); + spin_lock_irqsave(&plchan->vc.lock, flags); + if (vchan_issue_pending(&plchan->vc)) { + if (!plchan->phychan && plchan->state != PL08X_CHAN_WAITING) + pl08x_phy_alloc_and_start(plchan); } - - spin_unlock_irqrestore(&plchan->lock, flags); -} - -static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan, - struct pl08x_txd *txd) -{ - struct pl08x_driver_data *pl08x = plchan->host; - unsigned long flags; - int num_llis, ret; - - num_llis = pl08x_fill_llis_for_desc(pl08x, txd); - if (!num_llis) { - spin_lock_irqsave(&plchan->lock, flags); - pl08x_free_txd(pl08x, txd); - spin_unlock_irqrestore(&plchan->lock, flags); - return -EINVAL; - } - - spin_lock_irqsave(&plchan->lock, flags); - - /* - * See if we already have a physical channel allocated, - * else this is the time to try to get one. - */ - ret = prep_phy_channel(plchan, txd); - if (ret) { - /* - * No physical channel was available. - * - * memcpy transfers can be sorted out at submission time. - * - * Slave transfers may have been denied due to platform - * channel muxing restrictions. Since there is no guarantee - * that this will ever be resolved, and the signal must be - * acquired AFTER acquiring the physical channel, we will let - * them be NACK:ed with -EBUSY here. The drivers can retry - * the prep() call if they are eager on doing this using DMA. - */ - if (plchan->slave) { - pl08x_free_txd_list(pl08x, plchan); - pl08x_free_txd(pl08x, txd); - spin_unlock_irqrestore(&plchan->lock, flags); - return -EBUSY; - } - } else - /* - * Else we're all set, paused and ready to roll, status - * will switch to PL08X_CHAN_RUNNING when we call - * issue_pending(). If there is something running on the - * channel already we don't change its state. - */ - if (plchan->state == PL08X_CHAN_IDLE) - plchan->state = PL08X_CHAN_PAUSED; - - spin_unlock_irqrestore(&plchan->lock, flags); - - return 0; + spin_unlock_irqrestore(&plchan->vc.lock, flags); } -static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan, - unsigned long flags) +static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan) { struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT); if (txd) { - dma_async_tx_descriptor_init(&txd->tx, &plchan->chan); - txd->tx.flags = flags; - txd->tx.tx_submit = pl08x_tx_submit; - INIT_LIST_HEAD(&txd->node); INIT_LIST_HEAD(&txd->dsg_list); /* Always enable error and terminal interrupts */ @@ -1274,7 +1369,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( struct pl08x_sg *dsg; int ret; - txd = pl08x_get_txd(plchan, flags); + txd = pl08x_get_txd(plchan); if (!txd) { dev_err(&pl08x->adev->dev, "%s no memory for descriptor\n", __func__); @@ -1290,14 +1385,13 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( } list_add_tail(&dsg->node, &txd->dsg_list); - txd->direction = DMA_NONE; dsg->src_addr = src; dsg->dst_addr = dest; dsg->len = len; /* Set platform data for m2m */ txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; - txd->cctl = pl08x->pd->memcpy_channel.cctl & + txd->cctl = pl08x->pd->memcpy_channel.cctl_memcpy & ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2); /* Both to be incremented or the code will break */ @@ -1307,11 +1401,13 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( txd->cctl |= pl08x_select_bus(pl08x->mem_buses, pl08x->mem_buses); - ret = pl08x_prep_channel_resources(plchan, txd); - if (ret) + ret = pl08x_fill_llis_for_desc(plchan->host, txd); + if (!ret) { + pl08x_free_txd(pl08x, txd); return NULL; + } - return &txd->tx; + return vchan_tx_prep(&plchan->vc, &txd->vd, flags); } static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( @@ -1324,36 +1420,40 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( struct pl08x_txd *txd; struct pl08x_sg *dsg; struct scatterlist *sg; + enum dma_slave_buswidth addr_width; dma_addr_t slave_addr; int ret, tmp; + u8 src_buses, dst_buses; + u32 maxburst, cctl; dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", __func__, sg_dma_len(sgl), plchan->name); - txd = pl08x_get_txd(plchan, flags); + txd = pl08x_get_txd(plchan); if (!txd) { dev_err(&pl08x->adev->dev, "%s no txd\n", __func__); return NULL; } - if (direction != plchan->runtime_direction) - dev_err(&pl08x->adev->dev, "%s DMA setup does not match " - "the direction configured for the PrimeCell\n", - __func__); - /* * Set up addresses, the PrimeCell configured address * will take precedence since this may configure the * channel target address dynamically at runtime. */ - txd->direction = direction; - if (direction == DMA_MEM_TO_DEV) { - txd->cctl = plchan->dst_cctl; - slave_addr = plchan->dst_addr; + cctl = PL080_CONTROL_SRC_INCR; + slave_addr = plchan->cfg.dst_addr; + addr_width = plchan->cfg.dst_addr_width; + maxburst = plchan->cfg.dst_maxburst; + src_buses = pl08x->mem_buses; + dst_buses = plchan->cd->periph_buses; } else if (direction == DMA_DEV_TO_MEM) { - txd->cctl = plchan->src_cctl; - slave_addr = plchan->src_addr; + cctl = PL080_CONTROL_DST_INCR; + slave_addr = plchan->cfg.src_addr; + addr_width = plchan->cfg.src_addr_width; + maxburst = plchan->cfg.src_maxburst; + src_buses = plchan->cd->periph_buses; + dst_buses = pl08x->mem_buses; } else { pl08x_free_txd(pl08x, txd); dev_err(&pl08x->adev->dev, @@ -1361,7 +1461,17 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( return NULL; } - if (plchan->device_fc) + cctl |= pl08x_get_cctl(plchan, addr_width, maxburst); + if (cctl == ~0) { + pl08x_free_txd(pl08x, txd); + dev_err(&pl08x->adev->dev, + "DMA slave configuration botched?\n"); + return NULL; + } + + txd->cctl = cctl | pl08x_select_bus(src_buses, dst_buses); + + if (plchan->cfg.device_fc) tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER : PL080_FLOW_PER2MEM_PER; else @@ -1370,9 +1480,28 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT; + ret = pl08x_request_mux(plchan); + if (ret < 0) { + pl08x_free_txd(pl08x, txd); + dev_dbg(&pl08x->adev->dev, + "unable to mux for transfer on %s due to platform restrictions\n", + plchan->name); + return NULL; + } + + dev_dbg(&pl08x->adev->dev, "allocated DMA request signal %d for xfer on %s\n", + plchan->signal, plchan->name); + + /* Assign the flow control signal to this channel */ + if (direction == DMA_MEM_TO_DEV) + txd->ccfg |= plchan->signal << PL080_CONFIG_DST_SEL_SHIFT; + else + txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT; + for_each_sg(sgl, sg, sg_len, tmp) { dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT); if (!dsg) { + pl08x_release_mux(plchan); pl08x_free_txd(pl08x, txd); dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n", __func__); @@ -1390,11 +1519,14 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( } } - ret = pl08x_prep_channel_resources(plchan, txd); - if (ret) + ret = pl08x_fill_llis_for_desc(plchan->host, txd); + if (!ret) { + pl08x_release_mux(plchan); + pl08x_free_txd(pl08x, txd); return NULL; + } - return &txd->tx; + return vchan_tx_prep(&plchan->vc, &txd->vd, flags); } static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, @@ -1415,9 +1547,9 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, * Anything succeeds on channels with no physical allocation and * no queued transfers. */ - spin_lock_irqsave(&plchan->lock, flags); + spin_lock_irqsave(&plchan->vc.lock, flags); if (!plchan->phychan && !plchan->at) { - spin_unlock_irqrestore(&plchan->lock, flags); + spin_unlock_irqrestore(&plchan->vc.lock, flags); return 0; } @@ -1426,18 +1558,15 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, plchan->state = PL08X_CHAN_IDLE; if (plchan->phychan) { - pl08x_terminate_phy_chan(pl08x, plchan->phychan); - /* * Mark physical channel as free and free any slave * signal */ - release_phy_channel(plchan); - plchan->phychan_hold = 0; + pl08x_phy_free(plchan); } /* Dequeue jobs and free LLIs */ if (plchan->at) { - pl08x_free_txd(pl08x, plchan->at); + pl08x_desc_free(&plchan->at->vd); plchan->at = NULL; } /* Dequeue jobs not yet fired as well */ @@ -1457,7 +1586,7 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, break; } - spin_unlock_irqrestore(&plchan->lock, flags); + spin_unlock_irqrestore(&plchan->vc.lock, flags); return ret; } @@ -1494,123 +1623,6 @@ static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG); } -static void pl08x_unmap_buffers(struct pl08x_txd *txd) -{ - struct device *dev = txd->tx.chan->device->dev; - struct pl08x_sg *dsg; - - if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) - list_for_each_entry(dsg, &txd->dsg_list, node) - dma_unmap_single(dev, dsg->src_addr, dsg->len, - DMA_TO_DEVICE); - else { - list_for_each_entry(dsg, &txd->dsg_list, node) - dma_unmap_page(dev, dsg->src_addr, dsg->len, - DMA_TO_DEVICE); - } - } - if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) - list_for_each_entry(dsg, &txd->dsg_list, node) - dma_unmap_single(dev, dsg->dst_addr, dsg->len, - DMA_FROM_DEVICE); - else - list_for_each_entry(dsg, &txd->dsg_list, node) - dma_unmap_page(dev, dsg->dst_addr, dsg->len, - DMA_FROM_DEVICE); - } -} - -static void pl08x_tasklet(unsigned long data) -{ - struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data; - struct pl08x_driver_data *pl08x = plchan->host; - struct pl08x_txd *txd; - unsigned long flags; - - spin_lock_irqsave(&plchan->lock, flags); - - txd = plchan->at; - plchan->at = NULL; - - if (txd) { - /* Update last completed */ - dma_cookie_complete(&txd->tx); - } - - /* If a new descriptor is queued, set it up plchan->at is NULL here */ - if (!list_empty(&plchan->pend_list)) { - struct pl08x_txd *next; - - next = list_first_entry(&plchan->pend_list, - struct pl08x_txd, - node); - list_del(&next->node); - - pl08x_start_txd(plchan, next); - } else if (plchan->phychan_hold) { - /* - * This channel is still in use - we have a new txd being - * prepared and will soon be queued. Don't give up the - * physical channel. - */ - } else { - struct pl08x_dma_chan *waiting = NULL; - - /* - * No more jobs, so free up the physical channel - * Free any allocated signal on slave transfers too - */ - release_phy_channel(plchan); - plchan->state = PL08X_CHAN_IDLE; - - /* - * And NOW before anyone else can grab that free:d up - * physical channel, see if there is some memcpy pending - * that seriously needs to start because of being stacked - * up while we were choking the physical channels with data. - */ - list_for_each_entry(waiting, &pl08x->memcpy.channels, - chan.device_node) { - if (waiting->state == PL08X_CHAN_WAITING && - waiting->waiting != NULL) { - int ret; - - /* This should REALLY not fail now */ - ret = prep_phy_channel(waiting, - waiting->waiting); - BUG_ON(ret); - waiting->phychan_hold--; - waiting->state = PL08X_CHAN_RUNNING; - waiting->waiting = NULL; - pl08x_issue_pending(&waiting->chan); - break; - } - } - } - - spin_unlock_irqrestore(&plchan->lock, flags); - - if (txd) { - dma_async_tx_callback callback = txd->tx.callback; - void *callback_param = txd->tx.callback_param; - - /* Don't try to unmap buffers on slave channels */ - if (!plchan->slave) - pl08x_unmap_buffers(txd); - - /* Free the descriptor */ - spin_lock_irqsave(&plchan->lock, flags); - pl08x_free_txd(pl08x, txd); - spin_unlock_irqrestore(&plchan->lock, flags); - - /* Callback to signal completion */ - if (callback) - callback(callback_param); - } -} - static irqreturn_t pl08x_irq(int irq, void *dev) { struct pl08x_driver_data *pl08x = dev; @@ -1635,6 +1647,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev) /* Locate physical channel */ struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i]; struct pl08x_dma_chan *plchan = phychan->serving; + struct pl08x_txd *tx; if (!plchan) { dev_err(&pl08x->adev->dev, @@ -1643,8 +1656,29 @@ static irqreturn_t pl08x_irq(int irq, void *dev) continue; } - /* Schedule tasklet on this channel */ - tasklet_schedule(&plchan->tasklet); + spin_lock(&plchan->vc.lock); + tx = plchan->at; + if (tx) { + plchan->at = NULL; + /* + * This descriptor is done, release its mux + * reservation. + */ + pl08x_release_mux(plchan); + tx->done = true; + vchan_cookie_complete(&tx->vd); + + /* + * And start the next descriptor (if any), + * otherwise free this channel. + */ + if (vchan_next_desc(&plchan->vc)) + pl08x_start_next_txd(plchan); + else + pl08x_phy_free(plchan); + } + spin_unlock(&plchan->vc.lock); + mask |= (1 << i); } } @@ -1654,16 +1688,10 @@ static irqreturn_t pl08x_irq(int irq, void *dev) static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan) { - u32 cctl = pl08x_cctl(chan->cd->cctl); - chan->slave = true; chan->name = chan->cd->bus_id; - chan->src_addr = chan->cd->addr; - chan->dst_addr = chan->cd->addr; - chan->src_cctl = cctl | PL080_CONTROL_DST_INCR | - pl08x_select_bus(chan->cd->periph_buses, chan->host->mem_buses); - chan->dst_cctl = cctl | PL080_CONTROL_SRC_INCR | - pl08x_select_bus(chan->host->mem_buses, chan->cd->periph_buses); + chan->cfg.src_addr = chan->cd->addr; + chan->cfg.dst_addr = chan->cd->addr; } /* @@ -1693,6 +1721,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, chan->host = pl08x; chan->state = PL08X_CHAN_IDLE; + chan->signal = -1; if (slave) { chan->cd = &pl08x->pd->slave_channels[i]; @@ -1705,26 +1734,12 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, return -ENOMEM; } } - if (chan->cd->circular_buffer) { - dev_err(&pl08x->adev->dev, - "channel %s: circular buffers not supported\n", - chan->name); - kfree(chan); - continue; - } dev_dbg(&pl08x->adev->dev, "initialize virtual channel \"%s\"\n", chan->name); - chan->chan.device = dmadev; - dma_cookie_init(&chan->chan); - - spin_lock_init(&chan->lock); - INIT_LIST_HEAD(&chan->pend_list); - tasklet_init(&chan->tasklet, pl08x_tasklet, - (unsigned long) chan); - - list_add_tail(&chan->chan.device_node, &dmadev->channels); + chan->vc.desc_free = pl08x_desc_free; + vchan_init(&chan->vc, dmadev); } dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n", i, slave ? "slave" : "memcpy"); @@ -1737,8 +1752,8 @@ static void pl08x_free_virtual_channels(struct dma_device *dmadev) struct pl08x_dma_chan *next; list_for_each_entry_safe(chan, - next, &dmadev->channels, chan.device_node) { - list_del(&chan->chan.device_node); + next, &dmadev->channels, vc.chan.device_node) { + list_del(&chan->vc.chan.device_node); kfree(chan); } } @@ -1791,7 +1806,7 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data) seq_printf(s, "\nPL08x virtual memcpy channels:\n"); seq_printf(s, "CHANNEL:\tSTATE:\n"); seq_printf(s, "--------\t------\n"); - list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) { + list_for_each_entry(chan, &pl08x->memcpy.channels, vc.chan.device_node) { seq_printf(s, "%s\t\t%s\n", chan->name, pl08x_state_str(chan->state)); } @@ -1799,7 +1814,7 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data) seq_printf(s, "\nPL08x virtual slave channels:\n"); seq_printf(s, "CHANNEL:\tSTATE:\n"); seq_printf(s, "--------\t------\n"); - list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) { + list_for_each_entry(chan, &pl08x->slave.channels, vc.chan.device_node) { seq_printf(s, "%s\t\t%s\n", chan->name, pl08x_state_str(chan->state)); } @@ -1851,9 +1866,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) goto out_no_pl08x; } - pm_runtime_set_active(&adev->dev); - pm_runtime_enable(&adev->dev); - /* Initialize memcpy engine */ dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask); pl08x->memcpy.dev = &adev->dev; @@ -1903,8 +1915,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) goto out_no_lli_pool; } - spin_lock_init(&pl08x->lock); - pl08x->base = ioremap(adev->res.start, resource_size(&adev->res)); if (!pl08x->base) { ret = -ENOMEM; @@ -1942,7 +1952,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) ch->id = i; ch->base = pl08x->base + PL080_Cx_BASE(i); spin_lock_init(&ch->lock); - ch->signal = -1; /* * Nomadik variants can have channels that are locked @@ -2007,7 +2016,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) amba_part(adev), amba_rev(adev), (unsigned long long)adev->res.start, adev->irq[0]); - pm_runtime_put(&adev->dev); return 0; out_no_slave_reg: @@ -2026,9 +2034,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) dma_pool_destroy(pl08x->pool); out_no_lli_pool: out_no_platdata: - pm_runtime_put(&adev->dev); - pm_runtime_disable(&adev->dev); - kfree(pl08x); out_no_pl08x: amba_release_regions(adev); diff --git a/trunk/drivers/dma/imx-dma.c b/trunk/drivers/dma/imx-dma.c index fcfeb3cd8d31..5084975d793c 100644 --- a/trunk/drivers/dma/imx-dma.c +++ b/trunk/drivers/dma/imx-dma.c @@ -172,7 +172,8 @@ struct imxdma_engine { struct device_dma_parameters dma_parms; struct dma_device dma_device; void __iomem *base; - struct clk *dma_clk; + struct clk *dma_ahb; + struct clk *dma_ipg; spinlock_t lock; struct imx_dma_2d_config slots_2d[IMX_DMA_2D_SLOTS]; struct imxdma_channel channel[IMX_DMA_CHANNELS]; @@ -976,10 +977,20 @@ static int __init imxdma_probe(struct platform_device *pdev) return 0; } - imxdma->dma_clk = clk_get(NULL, "dma"); - if (IS_ERR(imxdma->dma_clk)) - return PTR_ERR(imxdma->dma_clk); - clk_enable(imxdma->dma_clk); + imxdma->dma_ipg = devm_clk_get(&pdev->dev, "ipg"); + if (IS_ERR(imxdma->dma_ipg)) { + ret = PTR_ERR(imxdma->dma_ipg); + goto err_clk; + } + + imxdma->dma_ahb = devm_clk_get(&pdev->dev, "ahb"); + if (IS_ERR(imxdma->dma_ahb)) { + ret = PTR_ERR(imxdma->dma_ahb); + goto err_clk; + } + + clk_prepare_enable(imxdma->dma_ipg); + clk_prepare_enable(imxdma->dma_ahb); /* reset DMA module */ imx_dmav1_writel(imxdma, DCR_DRST, DMA_DCR); @@ -988,16 +999,14 @@ static int __init imxdma_probe(struct platform_device *pdev) ret = request_irq(MX1_DMA_INT, dma_irq_handler, 0, "DMA", imxdma); if (ret) { dev_warn(imxdma->dev, "Can't register IRQ for DMA\n"); - kfree(imxdma); - return ret; + goto err_enable; } ret = request_irq(MX1_DMA_ERR, imxdma_err_handler, 0, "DMA", imxdma); if (ret) { dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n"); free_irq(MX1_DMA_INT, NULL); - kfree(imxdma); - return ret; + goto err_enable; } } @@ -1094,7 +1103,10 @@ static int __init imxdma_probe(struct platform_device *pdev) free_irq(MX1_DMA_INT, NULL); free_irq(MX1_DMA_ERR, NULL); } - +err_enable: + clk_disable_unprepare(imxdma->dma_ipg); + clk_disable_unprepare(imxdma->dma_ahb); +err_clk: kfree(imxdma); return ret; } @@ -1114,7 +1126,9 @@ static int __exit imxdma_remove(struct platform_device *pdev) free_irq(MX1_DMA_ERR, NULL); } - kfree(imxdma); + clk_disable_unprepare(imxdma->dma_ipg); + clk_disable_unprepare(imxdma->dma_ahb); + kfree(imxdma); return 0; } diff --git a/trunk/drivers/dma/omap-dma.c b/trunk/drivers/dma/omap-dma.c new file mode 100644 index 000000000000..ae0561826137 --- /dev/null +++ b/trunk/drivers/dma/omap-dma.c @@ -0,0 +1,669 @@ +/* + * OMAP DMAengine support + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "virt-dma.h" +#include + +struct omap_dmadev { + struct dma_device ddev; + spinlock_t lock; + struct tasklet_struct task; + struct list_head pending; +}; + +struct omap_chan { + struct virt_dma_chan vc; + struct list_head node; + + struct dma_slave_config cfg; + unsigned dma_sig; + bool cyclic; + + int dma_ch; + struct omap_desc *desc; + unsigned sgidx; +}; + +struct omap_sg { + dma_addr_t addr; + uint32_t en; /* number of elements (24-bit) */ + uint32_t fn; /* number of frames (16-bit) */ +}; + +struct omap_desc { + struct virt_dma_desc vd; + enum dma_transfer_direction dir; + dma_addr_t dev_addr; + + int16_t fi; /* for OMAP_DMA_SYNC_PACKET */ + uint8_t es; /* OMAP_DMA_DATA_TYPE_xxx */ + uint8_t sync_mode; /* OMAP_DMA_SYNC_xxx */ + uint8_t sync_type; /* OMAP_DMA_xxx_SYNC* */ + uint8_t periph_port; /* Peripheral port */ + + unsigned sglen; + struct omap_sg sg[0]; +}; + +static const unsigned es_bytes[] = { + [OMAP_DMA_DATA_TYPE_S8] = 1, + [OMAP_DMA_DATA_TYPE_S16] = 2, + [OMAP_DMA_DATA_TYPE_S32] = 4, +}; + +static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d) +{ + return container_of(d, struct omap_dmadev, ddev); +} + +static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c) +{ + return container_of(c, struct omap_chan, vc.chan); +} + +static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t) +{ + return container_of(t, struct omap_desc, vd.tx); +} + +static void omap_dma_desc_free(struct virt_dma_desc *vd) +{ + kfree(container_of(vd, struct omap_desc, vd)); +} + +static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d, + unsigned idx) +{ + struct omap_sg *sg = d->sg + idx; + + if (d->dir == DMA_DEV_TO_MEM) + omap_set_dma_dest_params(c->dma_ch, OMAP_DMA_PORT_EMIFF, + OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0); + else + omap_set_dma_src_params(c->dma_ch, OMAP_DMA_PORT_EMIFF, + OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0); + + omap_set_dma_transfer_params(c->dma_ch, d->es, sg->en, sg->fn, + d->sync_mode, c->dma_sig, d->sync_type); + + omap_start_dma(c->dma_ch); +} + +static void omap_dma_start_desc(struct omap_chan *c) +{ + struct virt_dma_desc *vd = vchan_next_desc(&c->vc); + struct omap_desc *d; + + if (!vd) { + c->desc = NULL; + return; + } + + list_del(&vd->node); + + c->desc = d = to_omap_dma_desc(&vd->tx); + c->sgidx = 0; + + if (d->dir == DMA_DEV_TO_MEM) + omap_set_dma_src_params(c->dma_ch, d->periph_port, + OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi); + else + omap_set_dma_dest_params(c->dma_ch, d->periph_port, + OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi); + + omap_dma_start_sg(c, d, 0); +} + +static void omap_dma_callback(int ch, u16 status, void *data) +{ + struct omap_chan *c = data; + struct omap_desc *d; + unsigned long flags; + + spin_lock_irqsave(&c->vc.lock, flags); + d = c->desc; + if (d) { + if (!c->cyclic) { + if (++c->sgidx < d->sglen) { + omap_dma_start_sg(c, d, c->sgidx); + } else { + omap_dma_start_desc(c); + vchan_cookie_complete(&d->vd); + } + } else { + vchan_cyclic_callback(&d->vd); + } + } + spin_unlock_irqrestore(&c->vc.lock, flags); +} + +/* + * This callback schedules all pending channels. We could be more + * clever here by postponing allocation of the real DMA channels to + * this point, and freeing them when our virtual channel becomes idle. + * + * We would then need to deal with 'all channels in-use' + */ +static void omap_dma_sched(unsigned long data) +{ + struct omap_dmadev *d = (struct omap_dmadev *)data; + LIST_HEAD(head); + + spin_lock_irq(&d->lock); + list_splice_tail_init(&d->pending, &head); + spin_unlock_irq(&d->lock); + + while (!list_empty(&head)) { + struct omap_chan *c = list_first_entry(&head, + struct omap_chan, node); + + spin_lock_irq(&c->vc.lock); + list_del_init(&c->node); + omap_dma_start_desc(c); + spin_unlock_irq(&c->vc.lock); + } +} + +static int omap_dma_alloc_chan_resources(struct dma_chan *chan) +{ + struct omap_chan *c = to_omap_dma_chan(chan); + + dev_info(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig); + + return omap_request_dma(c->dma_sig, "DMA engine", + omap_dma_callback, c, &c->dma_ch); +} + +static void omap_dma_free_chan_resources(struct dma_chan *chan) +{ + struct omap_chan *c = to_omap_dma_chan(chan); + + vchan_free_chan_resources(&c->vc); + omap_free_dma(c->dma_ch); + + dev_info(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig); +} + +static size_t omap_dma_sg_size(struct omap_sg *sg) +{ + return sg->en * sg->fn; +} + +static size_t omap_dma_desc_size(struct omap_desc *d) +{ + unsigned i; + size_t size; + + for (size = i = 0; i < d->sglen; i++) + size += omap_dma_sg_size(&d->sg[i]); + + return size * es_bytes[d->es]; +} + +static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr) +{ + unsigned i; + size_t size, es_size = es_bytes[d->es]; + + for (size = i = 0; i < d->sglen; i++) { + size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size; + + if (size) + size += this_size; + else if (addr >= d->sg[i].addr && + addr < d->sg[i].addr + this_size) + size += d->sg[i].addr + this_size - addr; + } + return size; +} + +static enum dma_status omap_dma_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, struct dma_tx_state *txstate) +{ + struct omap_chan *c = to_omap_dma_chan(chan); + struct virt_dma_desc *vd; + enum dma_status ret; + unsigned long flags; + + ret = dma_cookie_status(chan, cookie, txstate); + if (ret == DMA_SUCCESS || !txstate) + return ret; + + spin_lock_irqsave(&c->vc.lock, flags); + vd = vchan_find_desc(&c->vc, cookie); + if (vd) { + txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx)); + } else if (c->desc && c->desc->vd.tx.cookie == cookie) { + struct omap_desc *d = c->desc; + dma_addr_t pos; + + if (d->dir == DMA_MEM_TO_DEV) + pos = omap_get_dma_src_pos(c->dma_ch); + else if (d->dir == DMA_DEV_TO_MEM) + pos = omap_get_dma_dst_pos(c->dma_ch); + else + pos = 0; + + txstate->residue = omap_dma_desc_size_pos(d, pos); + } else { + txstate->residue = 0; + } + spin_unlock_irqrestore(&c->vc.lock, flags); + + return ret; +} + +static void omap_dma_issue_pending(struct dma_chan *chan) +{ + struct omap_chan *c = to_omap_dma_chan(chan); + unsigned long flags; + + spin_lock_irqsave(&c->vc.lock, flags); + if (vchan_issue_pending(&c->vc) && !c->desc) { + struct omap_dmadev *d = to_omap_dma_dev(chan->device); + spin_lock(&d->lock); + if (list_empty(&c->node)) + list_add_tail(&c->node, &d->pending); + spin_unlock(&d->lock); + tasklet_schedule(&d->task); + } + spin_unlock_irqrestore(&c->vc.lock, flags); +} + +static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg( + struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen, + enum dma_transfer_direction dir, unsigned long tx_flags, void *context) +{ + struct omap_chan *c = to_omap_dma_chan(chan); + enum dma_slave_buswidth dev_width; + struct scatterlist *sgent; + struct omap_desc *d; + dma_addr_t dev_addr; + unsigned i, j = 0, es, en, frame_bytes, sync_type; + u32 burst; + + if (dir == DMA_DEV_TO_MEM) { + dev_addr = c->cfg.src_addr; + dev_width = c->cfg.src_addr_width; + burst = c->cfg.src_maxburst; + sync_type = OMAP_DMA_SRC_SYNC; + } else if (dir == DMA_MEM_TO_DEV) { + dev_addr = c->cfg.dst_addr; + dev_width = c->cfg.dst_addr_width; + burst = c->cfg.dst_maxburst; + sync_type = OMAP_DMA_DST_SYNC; + } else { + dev_err(chan->device->dev, "%s: bad direction?\n", __func__); + return NULL; + } + + /* Bus width translates to the element size (ES) */ + switch (dev_width) { + case DMA_SLAVE_BUSWIDTH_1_BYTE: + es = OMAP_DMA_DATA_TYPE_S8; + break; + case DMA_SLAVE_BUSWIDTH_2_BYTES: + es = OMAP_DMA_DATA_TYPE_S16; + break; + case DMA_SLAVE_BUSWIDTH_4_BYTES: + es = OMAP_DMA_DATA_TYPE_S32; + break; + default: /* not reached */ + return NULL; + } + + /* Now allocate and setup the descriptor. */ + d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC); + if (!d) + return NULL; + + d->dir = dir; + d->dev_addr = dev_addr; + d->es = es; + d->sync_mode = OMAP_DMA_SYNC_FRAME; + d->sync_type = sync_type; + d->periph_port = OMAP_DMA_PORT_TIPB; + + /* + * Build our scatterlist entries: each contains the address, + * the number of elements (EN) in each frame, and the number of + * frames (FN). Number of bytes for this entry = ES * EN * FN. + * + * Burst size translates to number of elements with frame sync. + * Note: DMA engine defines burst to be the number of dev-width + * transfers. + */ + en = burst; + frame_bytes = es_bytes[es] * en; + for_each_sg(sgl, sgent, sglen, i) { + d->sg[j].addr = sg_dma_address(sgent); + d->sg[j].en = en; + d->sg[j].fn = sg_dma_len(sgent) / frame_bytes; + j++; + } + + d->sglen = j; + + return vchan_tx_prep(&c->vc, &d->vd, tx_flags); +} + +static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic( + struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, + size_t period_len, enum dma_transfer_direction dir, void *context) +{ + struct omap_chan *c = to_omap_dma_chan(chan); + enum dma_slave_buswidth dev_width; + struct omap_desc *d; + dma_addr_t dev_addr; + unsigned es, sync_type; + u32 burst; + + if (dir == DMA_DEV_TO_MEM) { + dev_addr = c->cfg.src_addr; + dev_width = c->cfg.src_addr_width; + burst = c->cfg.src_maxburst; + sync_type = OMAP_DMA_SRC_SYNC; + } else if (dir == DMA_MEM_TO_DEV) { + dev_addr = c->cfg.dst_addr; + dev_width = c->cfg.dst_addr_width; + burst = c->cfg.dst_maxburst; + sync_type = OMAP_DMA_DST_SYNC; + } else { + dev_err(chan->device->dev, "%s: bad direction?\n", __func__); + return NULL; + } + + /* Bus width translates to the element size (ES) */ + switch (dev_width) { + case DMA_SLAVE_BUSWIDTH_1_BYTE: + es = OMAP_DMA_DATA_TYPE_S8; + break; + case DMA_SLAVE_BUSWIDTH_2_BYTES: + es = OMAP_DMA_DATA_TYPE_S16; + break; + case DMA_SLAVE_BUSWIDTH_4_BYTES: + es = OMAP_DMA_DATA_TYPE_S32; + break; + default: /* not reached */ + return NULL; + } + + /* Now allocate and setup the descriptor. */ + d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC); + if (!d) + return NULL; + + d->dir = dir; + d->dev_addr = dev_addr; + d->fi = burst; + d->es = es; + d->sync_mode = OMAP_DMA_SYNC_PACKET; + d->sync_type = sync_type; + d->periph_port = OMAP_DMA_PORT_MPUI; + d->sg[0].addr = buf_addr; + d->sg[0].en = period_len / es_bytes[es]; + d->sg[0].fn = buf_len / period_len; + d->sglen = 1; + + if (!c->cyclic) { + c->cyclic = true; + omap_dma_link_lch(c->dma_ch, c->dma_ch); + omap_enable_dma_irq(c->dma_ch, OMAP_DMA_FRAME_IRQ); + omap_disable_dma_irq(c->dma_ch, OMAP_DMA_BLOCK_IRQ); + } + + if (!cpu_class_is_omap1()) { + omap_set_dma_src_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16); + omap_set_dma_dest_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16); + } + + return vchan_tx_prep(&c->vc, &d->vd, DMA_CTRL_ACK | DMA_PREP_INTERRUPT); +} + +static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg) +{ + if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || + cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) + return -EINVAL; + + memcpy(&c->cfg, cfg, sizeof(c->cfg)); + + return 0; +} + +static int omap_dma_terminate_all(struct omap_chan *c) +{ + struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device); + unsigned long flags; + LIST_HEAD(head); + + spin_lock_irqsave(&c->vc.lock, flags); + + /* Prevent this channel being scheduled */ + spin_lock(&d->lock); + list_del_init(&c->node); + spin_unlock(&d->lock); + + /* + * Stop DMA activity: we assume the callback will not be called + * after omap_stop_dma() returns (even if it does, it will see + * c->desc is NULL and exit.) + */ + if (c->desc) { + c->desc = NULL; + omap_stop_dma(c->dma_ch); + } + + if (c->cyclic) { + c->cyclic = false; + omap_dma_unlink_lch(c->dma_ch, c->dma_ch); + } + + vchan_get_all_descriptors(&c->vc, &head); + spin_unlock_irqrestore(&c->vc.lock, flags); + vchan_dma_desc_free_list(&c->vc, &head); + + return 0; +} + +static int omap_dma_pause(struct omap_chan *c) +{ + /* FIXME: not supported by platform private API */ + return -EINVAL; +} + +static int omap_dma_resume(struct omap_chan *c) +{ + /* FIXME: not supported by platform private API */ + return -EINVAL; +} + +static int omap_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, + unsigned long arg) +{ + struct omap_chan *c = to_omap_dma_chan(chan); + int ret; + + switch (cmd) { + case DMA_SLAVE_CONFIG: + ret = omap_dma_slave_config(c, (struct dma_slave_config *)arg); + break; + + case DMA_TERMINATE_ALL: + ret = omap_dma_terminate_all(c); + break; + + case DMA_PAUSE: + ret = omap_dma_pause(c); + break; + + case DMA_RESUME: + ret = omap_dma_resume(c); + break; + + default: + ret = -ENXIO; + break; + } + + return ret; +} + +static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig) +{ + struct omap_chan *c; + + c = kzalloc(sizeof(*c), GFP_KERNEL); + if (!c) + return -ENOMEM; + + c->dma_sig = dma_sig; + c->vc.desc_free = omap_dma_desc_free; + vchan_init(&c->vc, &od->ddev); + INIT_LIST_HEAD(&c->node); + + od->ddev.chancnt++; + + return 0; +} + +static void omap_dma_free(struct omap_dmadev *od) +{ + tasklet_kill(&od->task); + while (!list_empty(&od->ddev.channels)) { + struct omap_chan *c = list_first_entry(&od->ddev.channels, + struct omap_chan, vc.chan.device_node); + + list_del(&c->vc.chan.device_node); + tasklet_kill(&c->vc.task); + kfree(c); + } + kfree(od); +} + +static int omap_dma_probe(struct platform_device *pdev) +{ + struct omap_dmadev *od; + int rc, i; + + od = kzalloc(sizeof(*od), GFP_KERNEL); + if (!od) + return -ENOMEM; + + dma_cap_set(DMA_SLAVE, od->ddev.cap_mask); + dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask); + od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources; + od->ddev.device_free_chan_resources = omap_dma_free_chan_resources; + od->ddev.device_tx_status = omap_dma_tx_status; + od->ddev.device_issue_pending = omap_dma_issue_pending; + od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg; + od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic; + od->ddev.device_control = omap_dma_control; + od->ddev.dev = &pdev->dev; + INIT_LIST_HEAD(&od->ddev.channels); + INIT_LIST_HEAD(&od->pending); + spin_lock_init(&od->lock); + + tasklet_init(&od->task, omap_dma_sched, (unsigned long)od); + + for (i = 0; i < 127; i++) { + rc = omap_dma_chan_init(od, i); + if (rc) { + omap_dma_free(od); + return rc; + } + } + + rc = dma_async_device_register(&od->ddev); + if (rc) { + pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n", + rc); + omap_dma_free(od); + } else { + platform_set_drvdata(pdev, od); + } + + dev_info(&pdev->dev, "OMAP DMA engine driver\n"); + + return rc; +} + +static int omap_dma_remove(struct platform_device *pdev) +{ + struct omap_dmadev *od = platform_get_drvdata(pdev); + + dma_async_device_unregister(&od->ddev); + omap_dma_free(od); + + return 0; +} + +static struct platform_driver omap_dma_driver = { + .probe = omap_dma_probe, + .remove = omap_dma_remove, + .driver = { + .name = "omap-dma-engine", + .owner = THIS_MODULE, + }, +}; + +bool omap_dma_filter_fn(struct dma_chan *chan, void *param) +{ + if (chan->device->dev->driver == &omap_dma_driver.driver) { + struct omap_chan *c = to_omap_dma_chan(chan); + unsigned req = *(unsigned *)param; + + return req == c->dma_sig; + } + return false; +} +EXPORT_SYMBOL_GPL(omap_dma_filter_fn); + +static struct platform_device *pdev; + +static const struct platform_device_info omap_dma_dev_info = { + .name = "omap-dma-engine", + .id = -1, + .dma_mask = DMA_BIT_MASK(32), +}; + +static int omap_dma_init(void) +{ + int rc = platform_driver_register(&omap_dma_driver); + + if (rc == 0) { + pdev = platform_device_register_full(&omap_dma_dev_info); + if (IS_ERR(pdev)) { + platform_driver_unregister(&omap_dma_driver); + rc = PTR_ERR(pdev); + } + } + return rc; +} +subsys_initcall(omap_dma_init); + +static void __exit omap_dma_exit(void) +{ + platform_device_unregister(pdev); + platform_driver_unregister(&omap_dma_driver); +} +module_exit(omap_dma_exit); + +MODULE_AUTHOR("Russell King"); +MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/dma/sa11x0-dma.c b/trunk/drivers/dma/sa11x0-dma.c index ec78ccef9132..f5a73606217e 100644 --- a/trunk/drivers/dma/sa11x0-dma.c +++ b/trunk/drivers/dma/sa11x0-dma.c @@ -21,6 +21,8 @@ #include #include +#include "virt-dma.h" + #define NR_PHY_CHAN 6 #define DMA_ALIGN 3 #define DMA_MAX_SIZE 0x1fff @@ -72,12 +74,13 @@ struct sa11x0_dma_sg { }; struct sa11x0_dma_desc { - struct dma_async_tx_descriptor tx; + struct virt_dma_desc vd; + u32 ddar; size_t size; + unsigned period; + bool cyclic; - /* maybe protected by c->lock */ - struct list_head node; unsigned sglen; struct sa11x0_dma_sg sg[0]; }; @@ -85,15 +88,11 @@ struct sa11x0_dma_desc { struct sa11x0_dma_phy; struct sa11x0_dma_chan { - struct dma_chan chan; - spinlock_t lock; - dma_cookie_t lc; + struct virt_dma_chan vc; - /* protected by c->lock */ + /* protected by c->vc.lock */ struct sa11x0_dma_phy *phy; enum dma_status status; - struct list_head desc_submitted; - struct list_head desc_issued; /* protected by d->lock */ struct list_head node; @@ -109,7 +108,7 @@ struct sa11x0_dma_phy { struct sa11x0_dma_chan *vchan; - /* Protected by c->lock */ + /* Protected by c->vc.lock */ unsigned sg_load; struct sa11x0_dma_desc *txd_load; unsigned sg_done; @@ -127,13 +126,12 @@ struct sa11x0_dma_dev { spinlock_t lock; struct tasklet_struct task; struct list_head chan_pending; - struct list_head desc_complete; struct sa11x0_dma_phy phy[NR_PHY_CHAN]; }; static struct sa11x0_dma_chan *to_sa11x0_dma_chan(struct dma_chan *chan) { - return container_of(chan, struct sa11x0_dma_chan, chan); + return container_of(chan, struct sa11x0_dma_chan, vc.chan); } static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev) @@ -141,27 +139,26 @@ static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev) return container_of(dmadev, struct sa11x0_dma_dev, slave); } -static struct sa11x0_dma_desc *to_sa11x0_dma_tx(struct dma_async_tx_descriptor *tx) +static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c) { - return container_of(tx, struct sa11x0_dma_desc, tx); + struct virt_dma_desc *vd = vchan_next_desc(&c->vc); + + return vd ? container_of(vd, struct sa11x0_dma_desc, vd) : NULL; } -static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c) +static void sa11x0_dma_free_desc(struct virt_dma_desc *vd) { - if (list_empty(&c->desc_issued)) - return NULL; - - return list_first_entry(&c->desc_issued, struct sa11x0_dma_desc, node); + kfree(container_of(vd, struct sa11x0_dma_desc, vd)); } static void sa11x0_dma_start_desc(struct sa11x0_dma_phy *p, struct sa11x0_dma_desc *txd) { - list_del(&txd->node); + list_del(&txd->vd.node); p->txd_load = txd; p->sg_load = 0; dev_vdbg(p->dev->slave.dev, "pchan %u: txd %p[%x]: starting: DDAR:%x\n", - p->num, txd, txd->tx.cookie, txd->ddar); + p->num, &txd->vd, txd->vd.tx.cookie, txd->ddar); } static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p, @@ -183,19 +180,24 @@ static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p, return; if (p->sg_load == txd->sglen) { - struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c); + if (!txd->cyclic) { + struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c); - /* - * We have reached the end of the current descriptor. - * Peek at the next descriptor, and if compatible with - * the current, start processing it. - */ - if (txn && txn->ddar == txd->ddar) { - txd = txn; - sa11x0_dma_start_desc(p, txn); + /* + * We have reached the end of the current descriptor. + * Peek at the next descriptor, and if compatible with + * the current, start processing it. + */ + if (txn && txn->ddar == txd->ddar) { + txd = txn; + sa11x0_dma_start_desc(p, txn); + } else { + p->txd_load = NULL; + return; + } } else { - p->txd_load = NULL; - return; + /* Cyclic: reset back to beginning */ + p->sg_load = 0; } } @@ -229,21 +231,21 @@ static void noinline sa11x0_dma_complete(struct sa11x0_dma_phy *p, struct sa11x0_dma_desc *txd = p->txd_done; if (++p->sg_done == txd->sglen) { - struct sa11x0_dma_dev *d = p->dev; - - dev_vdbg(d->slave.dev, "pchan %u: txd %p[%x]: completed\n", - p->num, p->txd_done, p->txd_done->tx.cookie); - - c->lc = txd->tx.cookie; + if (!txd->cyclic) { + vchan_cookie_complete(&txd->vd); - spin_lock(&d->lock); - list_add_tail(&txd->node, &d->desc_complete); - spin_unlock(&d->lock); + p->sg_done = 0; + p->txd_done = p->txd_load; - p->sg_done = 0; - p->txd_done = p->txd_load; + if (!p->txd_done) + tasklet_schedule(&p->dev->task); + } else { + if ((p->sg_done % txd->period) == 0) + vchan_cyclic_callback(&txd->vd); - tasklet_schedule(&d->task); + /* Cyclic: reset back to beginning */ + p->sg_done = 0; + } } sa11x0_dma_start_sg(p, c); @@ -280,7 +282,7 @@ static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id) if (c) { unsigned long flags; - spin_lock_irqsave(&c->lock, flags); + spin_lock_irqsave(&c->vc.lock, flags); /* * Now that we're holding the lock, check that the vchan * really is associated with this pchan before touching the @@ -294,7 +296,7 @@ static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id) if (dcsr & DCSR_DONEB) sa11x0_dma_complete(p, c); } - spin_unlock_irqrestore(&c->lock, flags); + spin_unlock_irqrestore(&c->vc.lock, flags); } return IRQ_HANDLED; @@ -332,28 +334,15 @@ static void sa11x0_dma_tasklet(unsigned long arg) struct sa11x0_dma_dev *d = (struct sa11x0_dma_dev *)arg; struct sa11x0_dma_phy *p; struct sa11x0_dma_chan *c; - struct sa11x0_dma_desc *txd, *txn; - LIST_HEAD(head); unsigned pch, pch_alloc = 0; dev_dbg(d->slave.dev, "tasklet enter\n"); - /* Get the completed tx descriptors */ - spin_lock_irq(&d->lock); - list_splice_init(&d->desc_complete, &head); - spin_unlock_irq(&d->lock); - - list_for_each_entry(txd, &head, node) { - c = to_sa11x0_dma_chan(txd->tx.chan); - - dev_dbg(d->slave.dev, "vchan %p: txd %p[%x] completed\n", - c, txd, txd->tx.cookie); - - spin_lock_irq(&c->lock); + list_for_each_entry(c, &d->slave.channels, vc.chan.device_node) { + spin_lock_irq(&c->vc.lock); p = c->phy; - if (p) { - if (!p->txd_done) - sa11x0_dma_start_txd(c); + if (p && !p->txd_done) { + sa11x0_dma_start_txd(c); if (!p->txd_done) { /* No current txd associated with this channel */ dev_dbg(d->slave.dev, "pchan %u: free\n", p->num); @@ -363,7 +352,7 @@ static void sa11x0_dma_tasklet(unsigned long arg) p->vchan = NULL; } } - spin_unlock_irq(&c->lock); + spin_unlock_irq(&c->vc.lock); } spin_lock_irq(&d->lock); @@ -380,7 +369,7 @@ static void sa11x0_dma_tasklet(unsigned long arg) /* Mark this channel allocated */ p->vchan = c; - dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, c); + dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc); } } spin_unlock_irq(&d->lock); @@ -390,42 +379,18 @@ static void sa11x0_dma_tasklet(unsigned long arg) p = &d->phy[pch]; c = p->vchan; - spin_lock_irq(&c->lock); + spin_lock_irq(&c->vc.lock); c->phy = p; sa11x0_dma_start_txd(c); - spin_unlock_irq(&c->lock); + spin_unlock_irq(&c->vc.lock); } } - /* Now free the completed tx descriptor, and call their callbacks */ - list_for_each_entry_safe(txd, txn, &head, node) { - dma_async_tx_callback callback = txd->tx.callback; - void *callback_param = txd->tx.callback_param; - - dev_dbg(d->slave.dev, "txd %p[%x]: callback and free\n", - txd, txd->tx.cookie); - - kfree(txd); - - if (callback) - callback(callback_param); - } - dev_dbg(d->slave.dev, "tasklet exit\n"); } -static void sa11x0_dma_desc_free(struct sa11x0_dma_dev *d, struct list_head *head) -{ - struct sa11x0_dma_desc *txd, *txn; - - list_for_each_entry_safe(txd, txn, head, node) { - dev_dbg(d->slave.dev, "txd %p: freeing\n", txd); - kfree(txd); - } -} - static int sa11x0_dma_alloc_chan_resources(struct dma_chan *chan) { return 0; @@ -436,18 +401,12 @@ static void sa11x0_dma_free_chan_resources(struct dma_chan *chan) struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); unsigned long flags; - LIST_HEAD(head); - spin_lock_irqsave(&c->lock, flags); - spin_lock(&d->lock); + spin_lock_irqsave(&d->lock, flags); list_del_init(&c->node); - spin_unlock(&d->lock); - - list_splice_tail_init(&c->desc_submitted, &head); - list_splice_tail_init(&c->desc_issued, &head); - spin_unlock_irqrestore(&c->lock, flags); + spin_unlock_irqrestore(&d->lock, flags); - sa11x0_dma_desc_free(d, &head); + vchan_free_chan_resources(&c->vc); } static dma_addr_t sa11x0_dma_pos(struct sa11x0_dma_phy *p) @@ -472,33 +431,47 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan, struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); struct sa11x0_dma_phy *p; - struct sa11x0_dma_desc *txd; - dma_cookie_t last_used, last_complete; + struct virt_dma_desc *vd; unsigned long flags; enum dma_status ret; - size_t bytes = 0; - - last_used = c->chan.cookie; - last_complete = c->lc; - ret = dma_async_is_complete(cookie, last_complete, last_used); - if (ret == DMA_SUCCESS) { - dma_set_tx_state(state, last_complete, last_used, 0); + ret = dma_cookie_status(&c->vc.chan, cookie, state); + if (ret == DMA_SUCCESS) return ret; - } - spin_lock_irqsave(&c->lock, flags); + if (!state) + return c->status; + + spin_lock_irqsave(&c->vc.lock, flags); p = c->phy; - ret = c->status; - if (p) { - dma_addr_t addr = sa11x0_dma_pos(p); - dev_vdbg(d->slave.dev, "tx_status: addr:%x\n", addr); + /* + * If the cookie is on our issue queue, then the residue is + * its total size. + */ + vd = vchan_find_desc(&c->vc, cookie); + if (vd) { + state->residue = container_of(vd, struct sa11x0_dma_desc, vd)->size; + } else if (!p) { + state->residue = 0; + } else { + struct sa11x0_dma_desc *txd; + size_t bytes = 0; - txd = p->txd_done; + if (p->txd_done && p->txd_done->vd.tx.cookie == cookie) + txd = p->txd_done; + else if (p->txd_load && p->txd_load->vd.tx.cookie == cookie) + txd = p->txd_load; + else + txd = NULL; + + ret = c->status; if (txd) { + dma_addr_t addr = sa11x0_dma_pos(p); unsigned i; + dev_vdbg(d->slave.dev, "tx_status: addr:%x\n", addr); + for (i = 0; i < txd->sglen; i++) { dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x\n", i, txd->sg[i].addr, txd->sg[i].len); @@ -521,17 +494,11 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan, bytes += txd->sg[i].len; } } - if (txd != p->txd_load && p->txd_load) - bytes += p->txd_load->size; - } - list_for_each_entry(txd, &c->desc_issued, node) { - bytes += txd->size; + state->residue = bytes; } - spin_unlock_irqrestore(&c->lock, flags); - - dma_set_tx_state(state, last_complete, last_used, bytes); + spin_unlock_irqrestore(&c->vc.lock, flags); - dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", bytes); + dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", state->residue); return ret; } @@ -547,40 +514,20 @@ static void sa11x0_dma_issue_pending(struct dma_chan *chan) struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); unsigned long flags; - spin_lock_irqsave(&c->lock, flags); - list_splice_tail_init(&c->desc_submitted, &c->desc_issued); - if (!list_empty(&c->desc_issued)) { - spin_lock(&d->lock); - if (!c->phy && list_empty(&c->node)) { - list_add_tail(&c->node, &d->chan_pending); - tasklet_schedule(&d->task); - dev_dbg(d->slave.dev, "vchan %p: issued\n", c); + spin_lock_irqsave(&c->vc.lock, flags); + if (vchan_issue_pending(&c->vc)) { + if (!c->phy) { + spin_lock(&d->lock); + if (list_empty(&c->node)) { + list_add_tail(&c->node, &d->chan_pending); + tasklet_schedule(&d->task); + dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc); + } + spin_unlock(&d->lock); } - spin_unlock(&d->lock); } else - dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", c); - spin_unlock_irqrestore(&c->lock, flags); -} - -static dma_cookie_t sa11x0_dma_tx_submit(struct dma_async_tx_descriptor *tx) -{ - struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(tx->chan); - struct sa11x0_dma_desc *txd = to_sa11x0_dma_tx(tx); - unsigned long flags; - - spin_lock_irqsave(&c->lock, flags); - c->chan.cookie += 1; - if (c->chan.cookie < 0) - c->chan.cookie = 1; - txd->tx.cookie = c->chan.cookie; - - list_add_tail(&txd->node, &c->desc_submitted); - spin_unlock_irqrestore(&c->lock, flags); - - dev_dbg(tx->chan->device->dev, "vchan %p: txd %p[%x]: submitted\n", - c, txd, txd->tx.cookie); - - return txd->tx.cookie; + dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc); + spin_unlock_irqrestore(&c->vc.lock, flags); } static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg( @@ -596,7 +543,7 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg( /* SA11x0 channels can only operate in their native direction */ if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) { dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n", - c, c->ddar, dir); + &c->vc, c->ddar, dir); return NULL; } @@ -612,14 +559,14 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg( j += DIV_ROUND_UP(len, DMA_MAX_SIZE & ~DMA_ALIGN) - 1; if (addr & DMA_ALIGN) { dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %08x\n", - c, addr); + &c->vc, addr); return NULL; } } txd = kzalloc(sizeof(*txd) + j * sizeof(txd->sg[0]), GFP_ATOMIC); if (!txd) { - dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", c); + dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc); return NULL; } @@ -655,17 +602,73 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg( } while (len); } - dma_async_tx_descriptor_init(&txd->tx, &c->chan); - txd->tx.flags = flags; - txd->tx.tx_submit = sa11x0_dma_tx_submit; txd->ddar = c->ddar; txd->size = size; txd->sglen = j; dev_dbg(chan->device->dev, "vchan %p: txd %p: size %u nr %u\n", - c, txd, txd->size, txd->sglen); + &c->vc, &txd->vd, txd->size, txd->sglen); - return &txd->tx; + return vchan_tx_prep(&c->vc, &txd->vd, flags); +} + +static struct dma_async_tx_descriptor *sa11x0_dma_prep_dma_cyclic( + struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period, + enum dma_transfer_direction dir, void *context) +{ + struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); + struct sa11x0_dma_desc *txd; + unsigned i, j, k, sglen, sgperiod; + + /* SA11x0 channels can only operate in their native direction */ + if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) { + dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n", + &c->vc, c->ddar, dir); + return NULL; + } + + sgperiod = DIV_ROUND_UP(period, DMA_MAX_SIZE & ~DMA_ALIGN); + sglen = size * sgperiod / period; + + /* Do not allow zero-sized txds */ + if (sglen == 0) + return NULL; + + txd = kzalloc(sizeof(*txd) + sglen * sizeof(txd->sg[0]), GFP_ATOMIC); + if (!txd) { + dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc); + return NULL; + } + + for (i = k = 0; i < size / period; i++) { + size_t tlen, len = period; + + for (j = 0; j < sgperiod; j++, k++) { + tlen = len; + + if (tlen > DMA_MAX_SIZE) { + unsigned mult = DIV_ROUND_UP(tlen, DMA_MAX_SIZE & ~DMA_ALIGN); + tlen = (tlen / mult) & ~DMA_ALIGN; + } + + txd->sg[k].addr = addr; + txd->sg[k].len = tlen; + addr += tlen; + len -= tlen; + } + + WARN_ON(len != 0); + } + + WARN_ON(k != sglen); + + txd->ddar = c->ddar; + txd->size = size; + txd->sglen = sglen; + txd->cyclic = 1; + txd->period = sgperiod; + + return vchan_tx_prep(&c->vc, &txd->vd, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); } static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_config *cfg) @@ -695,8 +698,8 @@ static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_c if (maxburst == 8) ddar |= DDAR_BS; - dev_dbg(c->chan.device->dev, "vchan %p: dma_slave_config addr %x width %u burst %u\n", - c, addr, width, maxburst); + dev_dbg(c->vc.chan.device->dev, "vchan %p: dma_slave_config addr %x width %u burst %u\n", + &c->vc, addr, width, maxburst); c->ddar = ddar | (addr & 0xf0000000) | (addr & 0x003ffffc) << 6; @@ -718,16 +721,13 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, return sa11x0_dma_slave_config(c, (struct dma_slave_config *)arg); case DMA_TERMINATE_ALL: - dev_dbg(d->slave.dev, "vchan %p: terminate all\n", c); + dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc); /* Clear the tx descriptor lists */ - spin_lock_irqsave(&c->lock, flags); - list_splice_tail_init(&c->desc_submitted, &head); - list_splice_tail_init(&c->desc_issued, &head); + spin_lock_irqsave(&c->vc.lock, flags); + vchan_get_all_descriptors(&c->vc, &head); p = c->phy; if (p) { - struct sa11x0_dma_desc *txd, *txn; - dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num); /* vchan is assigned to a pchan - stop the channel */ writel(DCSR_RUN | DCSR_IE | @@ -735,17 +735,13 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, DCSR_STRTB | DCSR_DONEB, p->base + DMA_DCSR_C); - list_for_each_entry_safe(txd, txn, &d->desc_complete, node) - if (txd->tx.chan == &c->chan) - list_move(&txd->node, &head); - if (p->txd_load) { if (p->txd_load != p->txd_done) - list_add_tail(&p->txd_load->node, &head); + list_add_tail(&p->txd_load->vd.node, &head); p->txd_load = NULL; } if (p->txd_done) { - list_add_tail(&p->txd_done->node, &head); + list_add_tail(&p->txd_done->vd.node, &head); p->txd_done = NULL; } c->phy = NULL; @@ -754,14 +750,14 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, spin_unlock(&d->lock); tasklet_schedule(&d->task); } - spin_unlock_irqrestore(&c->lock, flags); - sa11x0_dma_desc_free(d, &head); + spin_unlock_irqrestore(&c->vc.lock, flags); + vchan_dma_desc_free_list(&c->vc, &head); ret = 0; break; case DMA_PAUSE: - dev_dbg(d->slave.dev, "vchan %p: pause\n", c); - spin_lock_irqsave(&c->lock, flags); + dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc); + spin_lock_irqsave(&c->vc.lock, flags); if (c->status == DMA_IN_PROGRESS) { c->status = DMA_PAUSED; @@ -774,26 +770,26 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, spin_unlock(&d->lock); } } - spin_unlock_irqrestore(&c->lock, flags); + spin_unlock_irqrestore(&c->vc.lock, flags); ret = 0; break; case DMA_RESUME: - dev_dbg(d->slave.dev, "vchan %p: resume\n", c); - spin_lock_irqsave(&c->lock, flags); + dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc); + spin_lock_irqsave(&c->vc.lock, flags); if (c->status == DMA_PAUSED) { c->status = DMA_IN_PROGRESS; p = c->phy; if (p) { writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S); - } else if (!list_empty(&c->desc_issued)) { + } else if (!list_empty(&c->vc.desc_issued)) { spin_lock(&d->lock); list_add_tail(&c->node, &d->chan_pending); spin_unlock(&d->lock); } } - spin_unlock_irqrestore(&c->lock, flags); + spin_unlock_irqrestore(&c->vc.lock, flags); ret = 0; break; @@ -853,15 +849,13 @@ static int __devinit sa11x0_dma_init_dmadev(struct dma_device *dmadev, return -ENOMEM; } - c->chan.device = dmadev; c->status = DMA_IN_PROGRESS; c->ddar = chan_desc[i].ddar; c->name = chan_desc[i].name; - spin_lock_init(&c->lock); - INIT_LIST_HEAD(&c->desc_submitted); - INIT_LIST_HEAD(&c->desc_issued); INIT_LIST_HEAD(&c->node); - list_add_tail(&c->chan.device_node, &dmadev->channels); + + c->vc.desc_free = sa11x0_dma_free_desc; + vchan_init(&c->vc, dmadev); } return dma_async_device_register(dmadev); @@ -890,8 +884,9 @@ static void sa11x0_dma_free_channels(struct dma_device *dmadev) { struct sa11x0_dma_chan *c, *cn; - list_for_each_entry_safe(c, cn, &dmadev->channels, chan.device_node) { - list_del(&c->chan.device_node); + list_for_each_entry_safe(c, cn, &dmadev->channels, vc.chan.device_node) { + list_del(&c->vc.chan.device_node); + tasklet_kill(&c->vc.task); kfree(c); } } @@ -915,7 +910,6 @@ static int __devinit sa11x0_dma_probe(struct platform_device *pdev) spin_lock_init(&d->lock); INIT_LIST_HEAD(&d->chan_pending); - INIT_LIST_HEAD(&d->desc_complete); d->base = ioremap(res->start, resource_size(res)); if (!d->base) { @@ -947,7 +941,9 @@ static int __devinit sa11x0_dma_probe(struct platform_device *pdev) } dma_cap_set(DMA_SLAVE, d->slave.cap_mask); + dma_cap_set(DMA_CYCLIC, d->slave.cap_mask); d->slave.device_prep_slave_sg = sa11x0_dma_prep_slave_sg; + d->slave.device_prep_dma_cyclic = sa11x0_dma_prep_dma_cyclic; ret = sa11x0_dma_init_dmadev(&d->slave, &pdev->dev); if (ret) { dev_warn(d->slave.dev, "failed to register slave async device: %d\n", diff --git a/trunk/drivers/dma/sh/shdma-base.c b/trunk/drivers/dma/sh/shdma-base.c index 27f5c781fd73..f4cd946d259d 100644 --- a/trunk/drivers/dma/sh/shdma-base.c +++ b/trunk/drivers/dma/sh/shdma-base.c @@ -483,6 +483,7 @@ static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan, new->mark = DESC_PREPARED; new->async_tx.flags = flags; new->direction = direction; + new->partial = 0; *len -= copy_size; if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV) @@ -644,6 +645,14 @@ static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, case DMA_TERMINATE_ALL: spin_lock_irqsave(&schan->chan_lock, flags); ops->halt_channel(schan); + + if (ops->get_partial && !list_empty(&schan->ld_queue)) { + /* Record partial transfer */ + struct shdma_desc *desc = list_first_entry(&schan->ld_queue, + struct shdma_desc, node); + desc->partial = ops->get_partial(schan, desc); + } + spin_unlock_irqrestore(&schan->chan_lock, flags); shdma_chan_ld_cleanup(schan, true); diff --git a/trunk/drivers/dma/sh/shdma.c b/trunk/drivers/dma/sh/shdma.c index 027c9be97654..f41bcc5267fd 100644 --- a/trunk/drivers/dma/sh/shdma.c +++ b/trunk/drivers/dma/sh/shdma.c @@ -381,6 +381,17 @@ static bool sh_dmae_chan_irq(struct shdma_chan *schan, int irq) return true; } +static size_t sh_dmae_get_partial(struct shdma_chan *schan, + struct shdma_desc *sdesc) +{ + struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, + shdma_chan); + struct sh_dmae_desc *sh_desc = container_of(sdesc, + struct sh_dmae_desc, shdma_desc); + return (sh_desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) << + sh_chan->xmit_shift; +} + /* Called from error IRQ or NMI */ static bool sh_dmae_reset(struct sh_dmae_device *shdev) { @@ -632,6 +643,7 @@ static const struct shdma_ops sh_dmae_shdma_ops = { .start_xfer = sh_dmae_start_xfer, .embedded_desc = sh_dmae_embedded_desc, .chan_irq = sh_dmae_chan_irq, + .get_partial = sh_dmae_get_partial, }; static int __devinit sh_dmae_probe(struct platform_device *pdev) diff --git a/trunk/drivers/dma/tegra20-apb-dma.c b/trunk/drivers/dma/tegra20-apb-dma.c index d52dbc6c54ab..24acd711e032 100644 --- a/trunk/drivers/dma/tegra20-apb-dma.c +++ b/trunk/drivers/dma/tegra20-apb-dma.c @@ -1119,15 +1119,21 @@ struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( static int tegra_dma_alloc_chan_resources(struct dma_chan *dc) { struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); + struct tegra_dma *tdma = tdc->tdma; + int ret; dma_cookie_init(&tdc->dma_chan); tdc->config_init = false; - return 0; + ret = clk_prepare_enable(tdma->dma_clk); + if (ret < 0) + dev_err(tdc2dev(tdc), "clk_prepare_enable failed: %d\n", ret); + return ret; } static void tegra_dma_free_chan_resources(struct dma_chan *dc) { struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); + struct tegra_dma *tdma = tdc->tdma; struct tegra_dma_desc *dma_desc; struct tegra_dma_sg_req *sg_req; @@ -1163,6 +1169,7 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc) list_del(&sg_req->node); kfree(sg_req); } + clk_disable_unprepare(tdma->dma_clk); } /* Tegra20 specific DMA controller information */ @@ -1255,6 +1262,13 @@ static int __devinit tegra_dma_probe(struct platform_device *pdev) } } + /* Enable clock before accessing registers */ + ret = clk_prepare_enable(tdma->dma_clk); + if (ret < 0) { + dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n", ret); + goto err_pm_disable; + } + /* Reset DMA controller */ tegra_periph_reset_assert(tdma->dma_clk); udelay(2); @@ -1265,6 +1279,8 @@ static int __devinit tegra_dma_probe(struct platform_device *pdev) tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0); tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul); + clk_disable_unprepare(tdma->dma_clk); + INIT_LIST_HEAD(&tdma->dma_dev.channels); for (i = 0; i < cdata->nr_channels; i++) { struct tegra_dma_channel *tdc = &tdma->channels[i]; diff --git a/trunk/drivers/dma/virt-dma.c b/trunk/drivers/dma/virt-dma.c new file mode 100644 index 000000000000..6f80432a3f0a --- /dev/null +++ b/trunk/drivers/dma/virt-dma.c @@ -0,0 +1,123 @@ +/* + * Virtual DMA channel support for DMAengine + * + * Copyright (C) 2012 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include + +#include "virt-dma.h" + +static struct virt_dma_desc *to_virt_desc(struct dma_async_tx_descriptor *tx) +{ + return container_of(tx, struct virt_dma_desc, tx); +} + +dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct virt_dma_chan *vc = to_virt_chan(tx->chan); + struct virt_dma_desc *vd = to_virt_desc(tx); + unsigned long flags; + dma_cookie_t cookie; + + spin_lock_irqsave(&vc->lock, flags); + cookie = dma_cookie_assign(tx); + + list_add_tail(&vd->node, &vc->desc_submitted); + spin_unlock_irqrestore(&vc->lock, flags); + + dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n", + vc, vd, cookie); + + return cookie; +} +EXPORT_SYMBOL_GPL(vchan_tx_submit); + +struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc, + dma_cookie_t cookie) +{ + struct virt_dma_desc *vd; + + list_for_each_entry(vd, &vc->desc_issued, node) + if (vd->tx.cookie == cookie) + return vd; + + return NULL; +} +EXPORT_SYMBOL_GPL(vchan_find_desc); + +/* + * This tasklet handles the completion of a DMA descriptor by + * calling its callback and freeing it. + */ +static void vchan_complete(unsigned long arg) +{ + struct virt_dma_chan *vc = (struct virt_dma_chan *)arg; + struct virt_dma_desc *vd; + dma_async_tx_callback cb = NULL; + void *cb_data = NULL; + LIST_HEAD(head); + + spin_lock_irq(&vc->lock); + list_splice_tail_init(&vc->desc_completed, &head); + vd = vc->cyclic; + if (vd) { + vc->cyclic = NULL; + cb = vd->tx.callback; + cb_data = vd->tx.callback_param; + } + spin_unlock_irq(&vc->lock); + + if (cb) + cb(cb_data); + + while (!list_empty(&head)) { + vd = list_first_entry(&head, struct virt_dma_desc, node); + cb = vd->tx.callback; + cb_data = vd->tx.callback_param; + + list_del(&vd->node); + + vc->desc_free(vd); + + if (cb) + cb(cb_data); + } +} + +void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head) +{ + while (!list_empty(head)) { + struct virt_dma_desc *vd = list_first_entry(head, + struct virt_dma_desc, node); + list_del(&vd->node); + dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd); + vc->desc_free(vd); + } +} +EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list); + +void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev) +{ + dma_cookie_init(&vc->chan); + + spin_lock_init(&vc->lock); + INIT_LIST_HEAD(&vc->desc_submitted); + INIT_LIST_HEAD(&vc->desc_issued); + INIT_LIST_HEAD(&vc->desc_completed); + + tasklet_init(&vc->task, vchan_complete, (unsigned long)vc); + + vc->chan.device = dmadev; + list_add_tail(&vc->chan.device_node, &dmadev->channels); +} +EXPORT_SYMBOL_GPL(vchan_init); + +MODULE_AUTHOR("Russell King"); +MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/dma/virt-dma.h b/trunk/drivers/dma/virt-dma.h new file mode 100644 index 000000000000..85c19d63f9fb --- /dev/null +++ b/trunk/drivers/dma/virt-dma.h @@ -0,0 +1,152 @@ +/* + * Virtual DMA channel support for DMAengine + * + * Copyright (C) 2012 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef VIRT_DMA_H +#define VIRT_DMA_H + +#include +#include + +#include "dmaengine.h" + +struct virt_dma_desc { + struct dma_async_tx_descriptor tx; + /* protected by vc.lock */ + struct list_head node; +}; + +struct virt_dma_chan { + struct dma_chan chan; + struct tasklet_struct task; + void (*desc_free)(struct virt_dma_desc *); + + spinlock_t lock; + + /* protected by vc.lock */ + struct list_head desc_submitted; + struct list_head desc_issued; + struct list_head desc_completed; + + struct virt_dma_desc *cyclic; +}; + +static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan) +{ + return container_of(chan, struct virt_dma_chan, chan); +} + +void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head); +void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev); +struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *, dma_cookie_t); + +/** + * vchan_tx_prep - prepare a descriptor + * vc: virtual channel allocating this descriptor + * vd: virtual descriptor to prepare + * tx_flags: flags argument passed in to prepare function + */ +static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan *vc, + struct virt_dma_desc *vd, unsigned long tx_flags) +{ + extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *); + + dma_async_tx_descriptor_init(&vd->tx, &vc->chan); + vd->tx.flags = tx_flags; + vd->tx.tx_submit = vchan_tx_submit; + + return &vd->tx; +} + +/** + * vchan_issue_pending - move submitted descriptors to issued list + * vc: virtual channel to update + * + * vc.lock must be held by caller + */ +static inline bool vchan_issue_pending(struct virt_dma_chan *vc) +{ + list_splice_tail_init(&vc->desc_submitted, &vc->desc_issued); + return !list_empty(&vc->desc_issued); +} + +/** + * vchan_cookie_complete - report completion of a descriptor + * vd: virtual descriptor to update + * + * vc.lock must be held by caller + */ +static inline void vchan_cookie_complete(struct virt_dma_desc *vd) +{ + struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); + + dma_cookie_complete(&vd->tx); + dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n", + vd, vd->tx.cookie); + list_add_tail(&vd->node, &vc->desc_completed); + + tasklet_schedule(&vc->task); +} + +/** + * vchan_cyclic_callback - report the completion of a period + * vd: virtual descriptor + */ +static inline void vchan_cyclic_callback(struct virt_dma_desc *vd) +{ + struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); + + vc->cyclic = vd; + tasklet_schedule(&vc->task); +} + +/** + * vchan_next_desc - peek at the next descriptor to be processed + * vc: virtual channel to obtain descriptor from + * + * vc.lock must be held by caller + */ +static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc) +{ + if (list_empty(&vc->desc_issued)) + return NULL; + + return list_first_entry(&vc->desc_issued, struct virt_dma_desc, node); +} + +/** + * vchan_get_all_descriptors - obtain all submitted and issued descriptors + * vc: virtual channel to get descriptors from + * head: list of descriptors found + * + * vc.lock must be held by caller + * + * Removes all submitted and issued descriptors from internal lists, and + * provides a list of all descriptors found + */ +static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, + struct list_head *head) +{ + list_splice_tail_init(&vc->desc_submitted, head); + list_splice_tail_init(&vc->desc_issued, head); + list_splice_tail_init(&vc->desc_completed, head); +} + +static inline void vchan_free_chan_resources(struct virt_dma_chan *vc) +{ + unsigned long flags; + LIST_HEAD(head); + + spin_lock_irqsave(&vc->lock, flags); + vchan_get_all_descriptors(vc, &head); + spin_unlock_irqrestore(&vc->lock, flags); + + vchan_dma_desc_free_list(vc, &head); +} + +#endif diff --git a/trunk/drivers/extcon/extcon_gpio.c b/trunk/drivers/extcon/extcon_gpio.c index fe3db45fa83c..3cc152e690b0 100644 --- a/trunk/drivers/extcon/extcon_gpio.c +++ b/trunk/drivers/extcon/extcon_gpio.c @@ -107,7 +107,8 @@ static int __devinit gpio_extcon_probe(struct platform_device *pdev) if (ret < 0) return ret; - ret = gpio_request_one(extcon_data->gpio, GPIOF_DIR_IN, pdev->name); + ret = devm_gpio_request_one(&pdev->dev, extcon_data->gpio, GPIOF_DIR_IN, + pdev->name); if (ret < 0) goto err; diff --git a/trunk/drivers/gpio/Kconfig b/trunk/drivers/gpio/Kconfig index b16c8a72a2e2..ba7926f5c099 100644 --- a/trunk/drivers/gpio/Kconfig +++ b/trunk/drivers/gpio/Kconfig @@ -294,7 +294,7 @@ config GPIO_MAX732X_IRQ config GPIO_MC9S08DZ60 bool "MX35 3DS BOARD MC9S08DZ60 GPIO functions" - depends on I2C && MACH_MX35_3DS + depends on I2C=y && MACH_MX35_3DS help Select this to enable the MC9S08DZ60 GPIO driver diff --git a/trunk/drivers/gpio/gpio-em.c b/trunk/drivers/gpio/gpio-em.c index 150d9768811d..ec48ed512628 100644 --- a/trunk/drivers/gpio/gpio-em.c +++ b/trunk/drivers/gpio/gpio-em.c @@ -247,9 +247,9 @@ static int __devinit em_gio_irq_domain_init(struct em_gio_priv *p) p->irq_base = irq_alloc_descs(pdata->irq_base, 0, pdata->number_of_pins, numa_node_id()); - if (IS_ERR_VALUE(p->irq_base)) { + if (p->irq_base < 0) { dev_err(&pdev->dev, "cannot get irq_desc\n"); - return -ENXIO; + return p->irq_base; } pr_debug("gio: hw base = %d, nr = %d, sw base = %d\n", pdata->gpio_base, pdata->number_of_pins, p->irq_base); @@ -266,7 +266,7 @@ static int __devinit em_gio_irq_domain_init(struct em_gio_priv *p) return 0; } -static void __devexit em_gio_irq_domain_cleanup(struct em_gio_priv *p) +static void em_gio_irq_domain_cleanup(struct em_gio_priv *p) { struct gpio_em_config *pdata = p->pdev->dev.platform_data; diff --git a/trunk/drivers/gpio/gpio-langwell.c b/trunk/drivers/gpio/gpio-langwell.c index a1c8754f52cf..202a99207b7d 100644 --- a/trunk/drivers/gpio/gpio-langwell.c +++ b/trunk/drivers/gpio/gpio-langwell.c @@ -339,7 +339,7 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev, resource_size_t start, len; struct lnw_gpio *lnw; u32 gpio_base; - int retval = 0; + int retval; int ngpio = id->driver_data; retval = pci_enable_device(pdev); @@ -357,6 +357,7 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev, base = ioremap_nocache(start, len); if (!base) { dev_err(&pdev->dev, "error mapping bar1\n"); + retval = -EFAULT; goto err3; } gpio_base = *((u32 *)base + 1); @@ -381,8 +382,10 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev, lnw->domain = irq_domain_add_linear(pdev->dev.of_node, ngpio, &lnw_gpio_irq_ops, lnw); - if (!lnw->domain) + if (!lnw->domain) { + retval = -ENOMEM; goto err3; + } lnw->reg_base = base; lnw->chip.label = dev_name(&pdev->dev); diff --git a/trunk/drivers/gpio/gpio-msic.c b/trunk/drivers/gpio/gpio-msic.c index 71a838f44501..b38986285868 100644 --- a/trunk/drivers/gpio/gpio-msic.c +++ b/trunk/drivers/gpio/gpio-msic.c @@ -99,7 +99,7 @@ static int msic_gpio_to_oreg(unsigned offset) if (offset < 20) return INTEL_MSIC_GPIO0HV0CTLO - offset + 16; - return INTEL_MSIC_GPIO1HV0CTLO + offset + 20; + return INTEL_MSIC_GPIO1HV0CTLO - offset + 20; } static int msic_gpio_direction_input(struct gpio_chip *chip, unsigned offset) diff --git a/trunk/drivers/gpio/gpio-mxc.c b/trunk/drivers/gpio/gpio-mxc.c index 4db460b6ecf7..80f44bb64a87 100644 --- a/trunk/drivers/gpio/gpio-mxc.c +++ b/trunk/drivers/gpio/gpio-mxc.c @@ -465,9 +465,8 @@ static int __devinit mxc_gpio_probe(struct platform_device *pdev) goto out_iounmap; port->bgc.gc.to_irq = mxc_gpio_to_irq; - port->bgc.gc.base = pdev->id * 32; - port->bgc.dir = port->bgc.read_reg(port->bgc.reg_dir); - port->bgc.data = port->bgc.read_reg(port->bgc.reg_set); + port->bgc.gc.base = (pdev->id < 0) ? of_alias_get_id(np, "gpio") * 32 : + pdev->id * 32; err = gpiochip_add(&port->bgc.gc); if (err) diff --git a/trunk/drivers/gpio/gpio-pxa.c b/trunk/drivers/gpio/gpio-pxa.c index 58a6a63a6ece..9cac88a65f78 100644 --- a/trunk/drivers/gpio/gpio-pxa.c +++ b/trunk/drivers/gpio/gpio-pxa.c @@ -62,6 +62,7 @@ int pxa_last_gpio; #ifdef CONFIG_OF static struct irq_domain *domain; +static struct device_node *pxa_gpio_of_node; #endif struct pxa_gpio_chip { @@ -277,6 +278,24 @@ static void pxa_gpio_set(struct gpio_chip *chip, unsigned offset, int value) (value ? GPSR_OFFSET : GPCR_OFFSET)); } +#ifdef CONFIG_OF_GPIO +static int pxa_gpio_of_xlate(struct gpio_chip *gc, + const struct of_phandle_args *gpiospec, + u32 *flags) +{ + if (gpiospec->args[0] > pxa_last_gpio) + return -EINVAL; + + if (gc != &pxa_gpio_chips[gpiospec->args[0] / 32].chip) + return -EINVAL; + + if (flags) + *flags = gpiospec->args[1]; + + return gpiospec->args[0] % 32; +} +#endif + static int __devinit pxa_init_gpio_chip(int gpio_end, int (*set_wake)(unsigned int, unsigned int)) { @@ -304,6 +323,11 @@ static int __devinit pxa_init_gpio_chip(int gpio_end, c->get = pxa_gpio_get; c->set = pxa_gpio_set; c->to_irq = pxa_gpio_to_irq; +#ifdef CONFIG_OF_GPIO + c->of_node = pxa_gpio_of_node; + c->of_xlate = pxa_gpio_of_xlate; + c->of_gpio_n_cells = 2; +#endif /* number of GPIOs on last bank may be less than 32 */ c->ngpio = (gpio + 31 > gpio_end) ? (gpio_end - gpio + 1) : 32; @@ -488,6 +512,7 @@ static int pxa_gpio_nums(void) return count; } +#ifdef CONFIG_OF static struct of_device_id pxa_gpio_dt_ids[] = { { .compatible = "mrvl,pxa-gpio" }, { .compatible = "mrvl,mmp-gpio", .data = (void *)MMP_GPIO }, @@ -505,9 +530,9 @@ static int pxa_irq_domain_map(struct irq_domain *d, unsigned int irq, const struct irq_domain_ops pxa_irq_domain_ops = { .map = pxa_irq_domain_map, + .xlate = irq_domain_xlate_twocell, }; -#ifdef CONFIG_OF static int __devinit pxa_gpio_probe_dt(struct platform_device *pdev) { int ret, nr_banks, nr_gpios, irq_base; @@ -545,6 +570,7 @@ static int __devinit pxa_gpio_probe_dt(struct platform_device *pdev) } domain = irq_domain_add_legacy(np, nr_gpios, irq_base, 0, &pxa_irq_domain_ops, NULL); + pxa_gpio_of_node = np; return 0; err: iounmap(gpio_reg_base); @@ -653,7 +679,7 @@ static struct platform_driver pxa_gpio_driver = { .probe = pxa_gpio_probe, .driver = { .name = "pxa-gpio", - .of_match_table = pxa_gpio_dt_ids, + .of_match_table = of_match_ptr(pxa_gpio_dt_ids), }, }; diff --git a/trunk/drivers/gpio/gpio-rdc321x.c b/trunk/drivers/gpio/gpio-rdc321x.c index e97016af6443..b62d443e9a59 100644 --- a/trunk/drivers/gpio/gpio-rdc321x.c +++ b/trunk/drivers/gpio/gpio-rdc321x.c @@ -170,6 +170,7 @@ static int __devinit rdc321x_gpio_probe(struct platform_device *pdev) rdc321x_gpio_dev->reg2_data_base = r->start + 0x4; rdc321x_gpio_dev->chip.label = "rdc321x-gpio"; + rdc321x_gpio_dev->chip.owner = THIS_MODULE; rdc321x_gpio_dev->chip.direction_input = rdc_gpio_direction_input; rdc321x_gpio_dev->chip.direction_output = rdc_gpio_config; rdc321x_gpio_dev->chip.get = rdc_gpio_get_value; diff --git a/trunk/drivers/gpio/gpio-samsung.c b/trunk/drivers/gpio/gpio-samsung.c index 92f7b2bb79d4..ba126cc04073 100644 --- a/trunk/drivers/gpio/gpio-samsung.c +++ b/trunk/drivers/gpio/gpio-samsung.c @@ -2452,12 +2452,6 @@ static struct samsung_gpio_chip exynos5_gpios_1[] = { .ngpio = EXYNOS5_GPIO_C3_NR, .label = "GPC3", }, - }, { - .chip = { - .base = EXYNOS5_GPC4(0), - .ngpio = EXYNOS5_GPIO_C4_NR, - .label = "GPC4", - }, }, { .chip = { .base = EXYNOS5_GPD0(0), @@ -2512,6 +2506,12 @@ static struct samsung_gpio_chip exynos5_gpios_1[] = { .ngpio = EXYNOS5_GPIO_Y6_NR, .label = "GPY6", }, + }, { + .chip = { + .base = EXYNOS5_GPC4(0), + .ngpio = EXYNOS5_GPIO_C4_NR, + .label = "GPC4", + }, }, { .config = &samsung_gpio_cfgs[9], .irq_base = IRQ_EINT(0), @@ -2836,7 +2836,7 @@ static __init void exynos5_gpiolib_init(void) } /* need to set base address for gpc4 */ - exynos5_gpios_1[11].base = gpio_base1 + 0x2E0; + exynos5_gpios_1[20].base = gpio_base1 + 0x2E0; /* need to set base address for gpx */ chip = &exynos5_gpios_1[21]; diff --git a/trunk/drivers/gpio/gpio-sch.c b/trunk/drivers/gpio/gpio-sch.c index 424dce8e3f30..8707d4572a06 100644 --- a/trunk/drivers/gpio/gpio-sch.c +++ b/trunk/drivers/gpio/gpio-sch.c @@ -241,7 +241,8 @@ static int __devinit sch_gpio_probe(struct platform_device *pdev) break; default: - return -ENODEV; + err = -ENODEV; + goto err_sch_gpio_core; } sch_gpio_core.dev = &pdev->dev; diff --git a/trunk/drivers/gpio/gpiolib-of.c b/trunk/drivers/gpio/gpiolib-of.c index a18c4aa68b1e..f1a45997aea8 100644 --- a/trunk/drivers/gpio/gpiolib-of.c +++ b/trunk/drivers/gpio/gpiolib-of.c @@ -82,7 +82,7 @@ int of_get_named_gpio_flags(struct device_node *np, const char *propname, gpiochip_find(&gg_data, of_gpiochip_find_and_xlate); of_node_put(gg_data.gpiospec.np); - pr_debug("%s exited with status %d\n", __func__, ret); + pr_debug("%s exited with status %d\n", __func__, gg_data.out_gpio); return gg_data.out_gpio; } EXPORT_SYMBOL(of_get_named_gpio_flags); diff --git a/trunk/drivers/gpu/drm/Kconfig b/trunk/drivers/gpu/drm/Kconfig index 23120c00a881..90e28081712d 100644 --- a/trunk/drivers/gpu/drm/Kconfig +++ b/trunk/drivers/gpu/drm/Kconfig @@ -22,6 +22,7 @@ menuconfig DRM config DRM_USB tristate depends on DRM + depends on USB_ARCH_HAS_HCD select USB config DRM_KMS_HELPER diff --git a/trunk/drivers/gpu/drm/ast/ast_drv.c b/trunk/drivers/gpu/drm/ast/ast_drv.c index d0c4574ef49c..36164806b9d4 100644 --- a/trunk/drivers/gpu/drm/ast/ast_drv.c +++ b/trunk/drivers/gpu/drm/ast/ast_drv.c @@ -193,6 +193,9 @@ static const struct file_operations ast_fops = { .mmap = ast_mmap, .poll = drm_poll, .fasync = drm_fasync, +#ifdef CONFIG_COMPAT + .compat_ioctl = drm_compat_ioctl, +#endif .read = drm_read, }; diff --git a/trunk/drivers/gpu/drm/ast/ast_mode.c b/trunk/drivers/gpu/drm/ast/ast_mode.c index 7282c081fb53..a712cafcfa1d 100644 --- a/trunk/drivers/gpu/drm/ast/ast_mode.c +++ b/trunk/drivers/gpu/drm/ast/ast_mode.c @@ -841,7 +841,7 @@ int ast_cursor_init(struct drm_device *dev) ast->cursor_cache = obj; ast->cursor_cache_gpu_addr = gpu_addr; - DRM_ERROR("pinned cursor cache at %llx\n", ast->cursor_cache_gpu_addr); + DRM_DEBUG_KMS("pinned cursor cache at %llx\n", ast->cursor_cache_gpu_addr); return 0; fail: return ret; diff --git a/trunk/drivers/gpu/drm/cirrus/cirrus_drv.c b/trunk/drivers/gpu/drm/cirrus/cirrus_drv.c index 7053140c6596..b83a2d7ddd1a 100644 --- a/trunk/drivers/gpu/drm/cirrus/cirrus_drv.c +++ b/trunk/drivers/gpu/drm/cirrus/cirrus_drv.c @@ -74,6 +74,9 @@ static const struct file_operations cirrus_driver_fops = { .unlocked_ioctl = drm_ioctl, .mmap = cirrus_mmap, .poll = drm_poll, +#ifdef CONFIG_COMPAT + .compat_ioctl = drm_compat_ioctl, +#endif .fasync = drm_fasync, }; static struct drm_driver driver = { diff --git a/trunk/drivers/gpu/drm/drm_crtc.c b/trunk/drivers/gpu/drm/drm_crtc.c index 08a7aa722d6b..6fbfc244748f 100644 --- a/trunk/drivers/gpu/drm/drm_crtc.c +++ b/trunk/drivers/gpu/drm/drm_crtc.c @@ -1981,7 +1981,7 @@ int drm_mode_cursor_ioctl(struct drm_device *dev, if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; - if (!req->flags) + if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags)) return -EINVAL; mutex_lock(&dev->mode_config.mutex); diff --git a/trunk/drivers/gpu/drm/drm_edid.c b/trunk/drivers/gpu/drm/drm_edid.c index a8743c399e83..b7ee230572b7 100644 --- a/trunk/drivers/gpu/drm/drm_edid.c +++ b/trunk/drivers/gpu/drm/drm_edid.c @@ -87,6 +87,9 @@ static struct edid_quirk { int product_id; u32 quirks; } edid_quirk_list[] = { + /* ASUS VW222S */ + { "ACI", 0x22a2, EDID_QUIRK_FORCE_REDUCED_BLANKING }, + /* Acer AL1706 */ { "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 }, /* Acer F51 */ diff --git a/trunk/drivers/gpu/drm/drm_edid_load.c b/trunk/drivers/gpu/drm/drm_edid_load.c index 66d4a28ad5a2..0303935d10e2 100644 --- a/trunk/drivers/gpu/drm/drm_edid_load.c +++ b/trunk/drivers/gpu/drm/drm_edid_load.c @@ -119,7 +119,7 @@ static int edid_load(struct drm_connector *connector, char *name, { const struct firmware *fw; struct platform_device *pdev; - u8 *fwdata = NULL, *edid; + u8 *fwdata = NULL, *edid, *new_edid; int fwsize, expected; int builtin = 0, err = 0; int i, valid_extensions = 0; @@ -195,12 +195,14 @@ static int edid_load(struct drm_connector *connector, char *name, "\"%s\" for connector \"%s\"\n", valid_extensions, edid[0x7e], name, connector_name); edid[0x7e] = valid_extensions; - edid = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH, + new_edid = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL); - if (edid == NULL) { + if (new_edid == NULL) { err = -ENOMEM; + kfree(edid); goto relfw_out; } + edid = new_edid; } connector->display_info.raw_edid = edid; diff --git a/trunk/drivers/gpu/drm/drm_modes.c b/trunk/drivers/gpu/drm/drm_modes.c index b7adb4a967fd..28637c181b15 100644 --- a/trunk/drivers/gpu/drm/drm_modes.c +++ b/trunk/drivers/gpu/drm/drm_modes.c @@ -706,9 +706,6 @@ void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags) p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal); p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay); p->crtc_hblank_end = max(p->crtc_hsync_end, p->crtc_htotal); - - p->crtc_hadjusted = false; - p->crtc_vadjusted = false; } EXPORT_SYMBOL(drm_mode_set_crtcinfo); diff --git a/trunk/drivers/gpu/drm/drm_proc.c b/trunk/drivers/gpu/drm/drm_proc.c index 371c695322d9..da457b18eaaf 100644 --- a/trunk/drivers/gpu/drm/drm_proc.c +++ b/trunk/drivers/gpu/drm/drm_proc.c @@ -89,7 +89,7 @@ static const struct file_operations drm_proc_fops = { * Create a given set of proc files represented by an array of * gdm_proc_lists in the given root directory. */ -int drm_proc_create_files(struct drm_info_list *files, int count, +static int drm_proc_create_files(struct drm_info_list *files, int count, struct proc_dir_entry *root, struct drm_minor *minor) { struct drm_device *dev = minor->dev; @@ -172,7 +172,7 @@ int drm_proc_init(struct drm_minor *minor, int minor_id, return 0; } -int drm_proc_remove_files(struct drm_info_list *files, int count, +static int drm_proc_remove_files(struct drm_info_list *files, int count, struct drm_minor *minor) { struct list_head *pos, *q; diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_drv.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_drv.c index ebacec6f1e48..6345abe9fdee 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -241,6 +241,9 @@ static const struct file_operations exynos_drm_driver_fops = { .poll = drm_poll, .read = drm_read, .unlocked_ioctl = drm_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = drm_compat_ioctl, +#endif .release = drm_release, }; diff --git a/trunk/drivers/gpu/drm/gma500/psb_intel_display.c b/trunk/drivers/gpu/drm/gma500/psb_intel_display.c index 30dc22a7156c..8033526bb53b 100644 --- a/trunk/drivers/gpu/drm/gma500/psb_intel_display.c +++ b/trunk/drivers/gpu/drm/gma500/psb_intel_display.c @@ -1362,6 +1362,9 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe, (struct drm_connector **) (psb_intel_crtc + 1); psb_intel_crtc->mode_set.num_connectors = 0; psb_intel_cursor_init(dev, psb_intel_crtc); + + /* Set to true so that the pipe is forced off on initial config. */ + psb_intel_crtc->active = true; } int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, diff --git a/trunk/drivers/gpu/drm/i810/i810_dma.c b/trunk/drivers/gpu/drm/i810/i810_dma.c index 57d892eaaa6e..463ec6871fe9 100644 --- a/trunk/drivers/gpu/drm/i810/i810_dma.c +++ b/trunk/drivers/gpu/drm/i810/i810_dma.c @@ -115,6 +115,9 @@ static const struct file_operations i810_buffer_fops = { .unlocked_ioctl = drm_ioctl, .mmap = i810_mmap_buffers, .fasync = drm_fasync, +#ifdef CONFIG_COMPAT + .compat_ioctl = drm_compat_ioctl, +#endif .llseek = noop_llseek, }; diff --git a/trunk/drivers/gpu/drm/i810/i810_drv.c b/trunk/drivers/gpu/drm/i810/i810_drv.c index f9924ad04d09..48cfcca2b350 100644 --- a/trunk/drivers/gpu/drm/i810/i810_drv.c +++ b/trunk/drivers/gpu/drm/i810/i810_drv.c @@ -51,6 +51,9 @@ static const struct file_operations i810_driver_fops = { .mmap = drm_mmap, .poll = drm_poll, .fasync = drm_fasync, +#ifdef CONFIG_COMPAT + .compat_ioctl = drm_compat_ioctl, +#endif .llseek = noop_llseek, }; diff --git a/trunk/drivers/gpu/drm/i915/i915_drv.c b/trunk/drivers/gpu/drm/i915/i915_drv.c index ed22612bc847..a24ffbe97c01 100644 --- a/trunk/drivers/gpu/drm/i915/i915_drv.c +++ b/trunk/drivers/gpu/drm/i915/i915_drv.c @@ -346,11 +346,40 @@ static const struct pci_device_id pciidlist[] = { /* aka */ INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */ INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */ INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */ + INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT2 desktop */ INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */ INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */ + INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT2 server */ INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */ INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */ - INTEL_VGA_DEVICE(0x0c16, &intel_haswell_d_info), /* SDV */ + INTEL_VGA_DEVICE(0x0426, &intel_haswell_m_info), /* GT2 mobile */ + INTEL_VGA_DEVICE(0x0C02, &intel_haswell_d_info), /* SDV GT1 desktop */ + INTEL_VGA_DEVICE(0x0C12, &intel_haswell_d_info), /* SDV GT2 desktop */ + INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT2 desktop */ + INTEL_VGA_DEVICE(0x0C0A, &intel_haswell_d_info), /* SDV GT1 server */ + INTEL_VGA_DEVICE(0x0C1A, &intel_haswell_d_info), /* SDV GT2 server */ + INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT2 server */ + INTEL_VGA_DEVICE(0x0C06, &intel_haswell_m_info), /* SDV GT1 mobile */ + INTEL_VGA_DEVICE(0x0C16, &intel_haswell_m_info), /* SDV GT2 mobile */ + INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT2 mobile */ + INTEL_VGA_DEVICE(0x0A02, &intel_haswell_d_info), /* ULT GT1 desktop */ + INTEL_VGA_DEVICE(0x0A12, &intel_haswell_d_info), /* ULT GT2 desktop */ + INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT2 desktop */ + INTEL_VGA_DEVICE(0x0A0A, &intel_haswell_d_info), /* ULT GT1 server */ + INTEL_VGA_DEVICE(0x0A1A, &intel_haswell_d_info), /* ULT GT2 server */ + INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT2 server */ + INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */ + INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */ + INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT2 mobile */ + INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT1 desktop */ + INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT2 desktop */ + INTEL_VGA_DEVICE(0x0D32, &intel_haswell_d_info), /* CRW GT2 desktop */ + INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT1 server */ + INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT2 server */ + INTEL_VGA_DEVICE(0x0D3A, &intel_haswell_d_info), /* CRW GT2 server */ + INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT1 mobile */ + INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */ + INTEL_VGA_DEVICE(0x0D36, &intel_haswell_m_info), /* CRW GT2 mobile */ INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info), INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info), INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info), diff --git a/trunk/drivers/gpu/drm/i915/i915_gem.c b/trunk/drivers/gpu/drm/i915/i915_gem.c index 5c4657a54f97..489e2b162b27 100644 --- a/trunk/drivers/gpu/drm/i915/i915_gem.c +++ b/trunk/drivers/gpu/drm/i915/i915_gem.c @@ -2365,6 +2365,10 @@ int i915_gpu_idle(struct drm_device *dev) /* Flush everything onto the inactive list. */ for_each_ring(ring, dev_priv, i) { + ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID); + if (ret) + return ret; + ret = i915_ring_idle(ring); if (ret) return ret; @@ -2372,10 +2376,6 @@ int i915_gpu_idle(struct drm_device *dev) /* Is the device fubar? */ if (WARN_ON(!list_empty(&ring->gpu_write_list))) return -EBUSY; - - ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID); - if (ret) - return ret; } return 0; diff --git a/trunk/drivers/gpu/drm/i915/i915_gem_context.c b/trunk/drivers/gpu/drm/i915/i915_gem_context.c index da8b01fb1bf8..a9d58d72bb4d 100644 --- a/trunk/drivers/gpu/drm/i915/i915_gem_context.c +++ b/trunk/drivers/gpu/drm/i915/i915_gem_context.c @@ -451,7 +451,6 @@ int i915_switch_context(struct intel_ring_buffer *ring, struct drm_i915_file_private *file_priv = NULL; struct i915_hw_context *to; struct drm_i915_gem_object *from_obj = ring->last_context_obj; - int ret; if (dev_priv->hw_contexts_disabled) return 0; diff --git a/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 5af631e788c8..ff2819ea0813 100644 --- a/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -291,6 +291,16 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, target_i915_obj = to_intel_bo(target_obj); target_offset = target_i915_obj->gtt_offset; + /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and + * pipe_control writes because the gpu doesn't properly redirect them + * through the ppgtt for non_secure batchbuffers. */ + if (unlikely(IS_GEN6(dev) && + reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION && + !target_i915_obj->has_global_gtt_mapping)) { + i915_gem_gtt_bind_object(target_i915_obj, + target_i915_obj->cache_level); + } + /* The target buffer should have appeared before us in the * exec_object list, so it should have a GTT space bound by now. */ @@ -399,16 +409,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, io_mapping_unmap_atomic(reloc_page); } - /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and - * pipe_control writes because the gpu doesn't properly redirect them - * through the ppgtt for non_secure batchbuffers. */ - if (unlikely(IS_GEN6(dev) && - reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION && - !target_i915_obj->has_global_gtt_mapping)) { - i915_gem_gtt_bind_object(target_i915_obj, - target_i915_obj->cache_level); - } - /* and update the user's relocation entry */ reloc->presumed_offset = target_offset; diff --git a/trunk/drivers/gpu/drm/i915/i915_gem_gtt.c b/trunk/drivers/gpu/drm/i915/i915_gem_gtt.c index 9fd25a435536..60815b861ec2 100644 --- a/trunk/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/trunk/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -72,7 +72,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024 * entries. For aliasing ppgtt support we just steal them at the end for * now. */ - first_pd_entry_in_global_pt = 512*1024 - I915_PPGTT_PD_ENTRIES; + first_pd_entry_in_global_pt = dev_priv->mm.gtt->gtt_total_entries - I915_PPGTT_PD_ENTRIES; ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); if (!ppgtt) @@ -261,7 +261,10 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, pte_flags |= GEN6_PTE_CACHE_LLC; break; case I915_CACHE_NONE: - pte_flags |= GEN6_PTE_UNCACHED; + if (IS_HASWELL(dev)) + pte_flags |= HSW_PTE_UNCACHED; + else + pte_flags |= GEN6_PTE_UNCACHED; break; default: BUG(); @@ -361,7 +364,8 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - if (dev_priv->mm.gtt->needs_dmar) + /* don't map imported dma buf objects */ + if (dev_priv->mm.gtt->needs_dmar && !obj->sg_table) return intel_gtt_map_memory(obj->pages, obj->base.size >> PAGE_SHIFT, &obj->sg_list, diff --git a/trunk/drivers/gpu/drm/i915/i915_reg.h b/trunk/drivers/gpu/drm/i915/i915_reg.h index acc99b21e0b6..28725ce5b82c 100644 --- a/trunk/drivers/gpu/drm/i915/i915_reg.h +++ b/trunk/drivers/gpu/drm/i915/i915_reg.h @@ -115,6 +115,7 @@ #define GEN6_PTE_VALID (1 << 0) #define GEN6_PTE_UNCACHED (1 << 1) +#define HSW_PTE_UNCACHED (0) #define GEN6_PTE_CACHE_LLC (2 << 1) #define GEN6_PTE_CACHE_LLC_MLC (3 << 1) #define GEN6_PTE_CACHE_BITS (3 << 1) diff --git a/trunk/drivers/gpu/drm/i915/i915_sysfs.c b/trunk/drivers/gpu/drm/i915/i915_sysfs.c index 2f5388af8df9..7631807a2788 100644 --- a/trunk/drivers/gpu/drm/i915/i915_sysfs.c +++ b/trunk/drivers/gpu/drm/i915/i915_sysfs.c @@ -32,6 +32,7 @@ #include "intel_drv.h" #include "i915_drv.h" +#ifdef CONFIG_PM static u32 calc_residency(struct drm_device *dev, const u32 reg) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -224,3 +225,14 @@ void i915_teardown_sysfs(struct drm_device *dev) device_remove_bin_file(&dev->primary->kdev, &dpf_attrs); sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group); } +#else +void i915_setup_sysfs(struct drm_device *dev) +{ + return; +} + +void i915_teardown_sysfs(struct drm_device *dev) +{ + return; +} +#endif /* CONFIG_PM */ diff --git a/trunk/drivers/gpu/drm/i915/intel_crt.c b/trunk/drivers/gpu/drm/i915/intel_crt.c index 7ed4a41c3965..23bdc8cd1458 100644 --- a/trunk/drivers/gpu/drm/i915/intel_crt.c +++ b/trunk/drivers/gpu/drm/i915/intel_crt.c @@ -326,6 +326,36 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) return ret; } +static struct edid *intel_crt_get_edid(struct drm_connector *connector, + struct i2c_adapter *i2c) +{ + struct edid *edid; + + edid = drm_get_edid(connector, i2c); + + if (!edid && !intel_gmbus_is_forced_bit(i2c)) { + DRM_DEBUG_KMS("CRT GMBUS EDID read failed, retry using GPIO bit-banging\n"); + intel_gmbus_force_bit(i2c, true); + edid = drm_get_edid(connector, i2c); + intel_gmbus_force_bit(i2c, false); + } + + return edid; +} + +/* local version of intel_ddc_get_modes() to use intel_crt_get_edid() */ +static int intel_crt_ddc_get_modes(struct drm_connector *connector, + struct i2c_adapter *adapter) +{ + struct edid *edid; + + edid = intel_crt_get_edid(connector, adapter); + if (!edid) + return 0; + + return intel_connector_update_modes(connector, edid); +} + static bool intel_crt_detect_ddc(struct drm_connector *connector) { struct intel_crt *crt = intel_attached_crt(connector); @@ -336,7 +366,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector) BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG); i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin); - edid = drm_get_edid(connector, i2c); + edid = intel_crt_get_edid(connector, i2c); if (edid) { bool is_digital = edid->input & DRM_EDID_INPUT_DIGITAL; @@ -544,13 +574,13 @@ static int intel_crt_get_modes(struct drm_connector *connector) struct i2c_adapter *i2c; i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin); - ret = intel_ddc_get_modes(connector, i2c); + ret = intel_crt_ddc_get_modes(connector, i2c); if (ret || !IS_G4X(dev)) return ret; /* Try to probe digital port for output in DVI-I -> VGA mode. */ i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB); - return intel_ddc_get_modes(connector, i2c); + return intel_crt_ddc_get_modes(connector, i2c); } static int intel_crt_set_property(struct drm_connector *connector, diff --git a/trunk/drivers/gpu/drm/i915/intel_display.c b/trunk/drivers/gpu/drm/i915/intel_display.c index f6159765f1eb..2dfa6cf4886b 100644 --- a/trunk/drivers/gpu/drm/i915/intel_display.c +++ b/trunk/drivers/gpu/drm/i915/intel_display.c @@ -869,6 +869,7 @@ intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc, unsigned long bestppm, ppm, absppm; int dotclk, flag; + flag = 0; dotclk = target * 1000; bestppm = 1000000; ppm = absppm = 0; @@ -1383,7 +1384,7 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, enum pipe pipe, int reg) { u32 val = I915_READ(reg); - WARN(hdmi_pipe_enabled(dev_priv, val, pipe), + WARN(hdmi_pipe_enabled(dev_priv, pipe, val), "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n", reg, pipe_name(pipe)); @@ -1403,13 +1404,13 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, reg = PCH_ADPA; val = I915_READ(reg); - WARN(adpa_pipe_enabled(dev_priv, val, pipe), + WARN(adpa_pipe_enabled(dev_priv, pipe, val), "PCH VGA enabled on transcoder %c, should be disabled\n", pipe_name(pipe)); reg = PCH_LVDS; val = I915_READ(reg); - WARN(lvds_pipe_enabled(dev_priv, val, pipe), + WARN(lvds_pipe_enabled(dev_priv, pipe, val), "PCH LVDS enabled on transcoder %c, should be disabled\n", pipe_name(pipe)); @@ -1871,7 +1872,7 @@ static void disable_pch_hdmi(struct drm_i915_private *dev_priv, enum pipe pipe, int reg) { u32 val = I915_READ(reg); - if (hdmi_pipe_enabled(dev_priv, val, pipe)) { + if (hdmi_pipe_enabled(dev_priv, pipe, val)) { DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n", reg, pipe); I915_WRITE(reg, val & ~PORT_ENABLE); @@ -1893,12 +1894,12 @@ static void intel_disable_pch_ports(struct drm_i915_private *dev_priv, reg = PCH_ADPA; val = I915_READ(reg); - if (adpa_pipe_enabled(dev_priv, val, pipe)) + if (adpa_pipe_enabled(dev_priv, pipe, val)) I915_WRITE(reg, val & ~ADPA_DAC_ENABLE); reg = PCH_LVDS; val = I915_READ(reg); - if (lvds_pipe_enabled(dev_priv, val, pipe)) { + if (lvds_pipe_enabled(dev_priv, pipe, val)) { DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val); I915_WRITE(reg, val & ~LVDS_PORT_EN); POSTING_READ(reg); @@ -3753,17 +3754,6 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, continue; } - if (intel_encoder->type == INTEL_OUTPUT_EDP) { - /* Use VBT settings if we have an eDP panel */ - unsigned int edp_bpc = dev_priv->edp.bpp / 3; - - if (edp_bpc < display_bpc) { - DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc); - display_bpc = edp_bpc; - } - continue; - } - /* Not one of the known troublemakers, check the EDID */ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { diff --git a/trunk/drivers/gpu/drm/i915/intel_dp.c b/trunk/drivers/gpu/drm/i915/intel_dp.c index 0a56b9ab0f58..a6c426afaa7a 100644 --- a/trunk/drivers/gpu/drm/i915/intel_dp.c +++ b/trunk/drivers/gpu/drm/i915/intel_dp.c @@ -1174,10 +1174,14 @@ static void ironlake_edp_panel_off(struct intel_dp *intel_dp) WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n"); pp = ironlake_get_pp_control(dev_priv); - pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_BLC_ENABLE); + /* We need to switch off panel power _and_ force vdd, for otherwise some + * panels get very unhappy and cease to work. */ + pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE); I915_WRITE(PCH_PP_CONTROL, pp); POSTING_READ(PCH_PP_CONTROL); + intel_dp->want_panel_vdd = false; + ironlake_wait_panel_off(intel_dp); } @@ -1287,11 +1291,9 @@ static void intel_dp_prepare(struct drm_encoder *encoder) * ensure that we have vdd while we switch off the panel. */ ironlake_edp_panel_vdd_on(intel_dp); ironlake_edp_backlight_off(intel_dp); - ironlake_edp_panel_off(intel_dp); - intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); + ironlake_edp_panel_off(intel_dp); intel_dp_link_down(intel_dp); - ironlake_edp_panel_vdd_off(intel_dp, false); } static void intel_dp_commit(struct drm_encoder *encoder) @@ -1326,11 +1328,9 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) /* Switching the panel off requires vdd. */ ironlake_edp_panel_vdd_on(intel_dp); ironlake_edp_backlight_off(intel_dp); - ironlake_edp_panel_off(intel_dp); - intel_dp_sink_dpms(intel_dp, mode); + ironlake_edp_panel_off(intel_dp); intel_dp_link_down(intel_dp); - ironlake_edp_panel_vdd_off(intel_dp, false); if (is_cpu_edp(intel_dp)) ironlake_edp_pll_off(encoder); diff --git a/trunk/drivers/gpu/drm/i915/intel_drv.h b/trunk/drivers/gpu/drm/i915/intel_drv.h index 84353559441c..cd54cf88a28f 100644 --- a/trunk/drivers/gpu/drm/i915/intel_drv.h +++ b/trunk/drivers/gpu/drm/i915/intel_drv.h @@ -46,15 +46,16 @@ }) #define wait_for_atomic_us(COND, US) ({ \ - int i, ret__ = -ETIMEDOUT; \ - for (i = 0; i < (US); i++) { \ - if ((COND)) { \ - ret__ = 0; \ - break; \ - } \ - udelay(1); \ - } \ - ret__; \ + unsigned long timeout__ = jiffies + usecs_to_jiffies(US); \ + int ret__ = 0; \ + while (!(COND)) { \ + if (time_after(jiffies, timeout__)) { \ + ret__ = -ETIMEDOUT; \ + break; \ + } \ + cpu_relax(); \ + } \ + ret__; \ }) #define wait_for(COND, MS) _wait_for(COND, MS, 1) @@ -341,6 +342,8 @@ struct intel_fbc_work { int interval; }; +int intel_connector_update_modes(struct drm_connector *connector, + struct edid *edid); int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); extern void intel_attach_force_audio_property(struct drm_connector *connector); @@ -380,7 +383,6 @@ extern void intel_pch_panel_fitting(struct drm_device *dev, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); extern u32 intel_panel_get_max_backlight(struct drm_device *dev); -extern u32 intel_panel_get_backlight(struct drm_device *dev); extern void intel_panel_set_backlight(struct drm_device *dev, u32 level); extern int intel_panel_setup_backlight(struct drm_device *dev); extern void intel_panel_enable_backlight(struct drm_device *dev, diff --git a/trunk/drivers/gpu/drm/i915/intel_i2c.c b/trunk/drivers/gpu/drm/i915/intel_i2c.c index 1991a4408cf9..b9755f6378d8 100644 --- a/trunk/drivers/gpu/drm/i915/intel_i2c.c +++ b/trunk/drivers/gpu/drm/i915/intel_i2c.c @@ -486,9 +486,6 @@ int intel_setup_gmbus(struct drm_device *dev) bus->dev_priv = dev_priv; bus->adapter.algo = &gmbus_algorithm; - ret = i2c_add_adapter(&bus->adapter); - if (ret) - goto err; /* By default use a conservative clock rate */ bus->reg0 = port | GMBUS_RATE_100KHZ; @@ -498,6 +495,10 @@ int intel_setup_gmbus(struct drm_device *dev) bus->force_bit = true; intel_gpio_setup(bus, port); + + ret = i2c_add_adapter(&bus->adapter); + if (ret) + goto err; } intel_i2c_reset(dev_priv->dev); @@ -540,9 +541,6 @@ void intel_teardown_gmbus(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; int i; - if (dev_priv->gmbus == NULL) - return; - for (i = 0; i < GMBUS_NUM_PORTS; i++) { struct intel_gmbus *bus = &dev_priv->gmbus[i]; i2c_del_adapter(&bus->adapter); diff --git a/trunk/drivers/gpu/drm/i915/intel_lvds.c b/trunk/drivers/gpu/drm/i915/intel_lvds.c index e05c0d3e3440..e9a6f6aaed85 100644 --- a/trunk/drivers/gpu/drm/i915/intel_lvds.c +++ b/trunk/drivers/gpu/drm/i915/intel_lvds.c @@ -780,6 +780,14 @@ static const struct dmi_system_id intel_no_lvds[] = { DMI_MATCH(DMI_BOARD_NAME, "ZBOXSD-ID12/ID13"), }, }, + { + .callback = intel_no_lvds_dmi_callback, + .ident = "Gigabyte GA-D525TUD", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."), + DMI_MATCH(DMI_BOARD_NAME, "D525TUD"), + }, + }, { } /* terminating entry */ }; diff --git a/trunk/drivers/gpu/drm/i915/intel_modes.c b/trunk/drivers/gpu/drm/i915/intel_modes.c index 45848b9b670b..29b72593fbb2 100644 --- a/trunk/drivers/gpu/drm/i915/intel_modes.c +++ b/trunk/drivers/gpu/drm/i915/intel_modes.c @@ -32,6 +32,25 @@ #include "intel_drv.h" #include "i915_drv.h" +/** + * intel_connector_update_modes - update connector from edid + * @connector: DRM connector device to use + * @edid: previously read EDID information + */ +int intel_connector_update_modes(struct drm_connector *connector, + struct edid *edid) +{ + int ret; + + drm_mode_connector_update_edid_property(connector, edid); + ret = drm_add_edid_modes(connector, edid); + drm_edid_to_eld(connector, edid); + connector->display_info.raw_edid = NULL; + kfree(edid); + + return ret; +} + /** * intel_ddc_get_modes - get modelist from monitor * @connector: DRM connector device to use @@ -43,18 +62,12 @@ int intel_ddc_get_modes(struct drm_connector *connector, struct i2c_adapter *adapter) { struct edid *edid; - int ret = 0; edid = drm_get_edid(connector, adapter); - if (edid) { - drm_mode_connector_update_edid_property(connector, edid); - ret = drm_add_edid_modes(connector, edid); - drm_edid_to_eld(connector, edid); - connector->display_info.raw_edid = NULL; - kfree(edid); - } + if (!edid) + return 0; - return ret; + return intel_connector_update_modes(connector, edid); } static const struct drm_prop_enum_list force_audio_names[] = { diff --git a/trunk/drivers/gpu/drm/i915/intel_panel.c b/trunk/drivers/gpu/drm/i915/intel_panel.c index 10c7d39034e1..3df4f5fa892a 100644 --- a/trunk/drivers/gpu/drm/i915/intel_panel.c +++ b/trunk/drivers/gpu/drm/i915/intel_panel.c @@ -213,7 +213,7 @@ static u32 intel_panel_compute_brightness(struct drm_device *dev, u32 val) return val; } -u32 intel_panel_get_backlight(struct drm_device *dev) +static u32 intel_panel_get_backlight(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; u32 val; @@ -311,9 +311,6 @@ void intel_panel_enable_backlight(struct drm_device *dev, if (dev_priv->backlight_level == 0) dev_priv->backlight_level = intel_panel_get_max_backlight(dev); - dev_priv->backlight_enabled = true; - intel_panel_actually_set_backlight(dev, dev_priv->backlight_level); - if (INTEL_INFO(dev)->gen >= 4) { uint32_t reg, tmp; @@ -326,7 +323,7 @@ void intel_panel_enable_backlight(struct drm_device *dev, * we don't track the backlight dpms state, hence check whether * we have to do anything first. */ if (tmp & BLM_PWM_ENABLE) - return; + goto set_level; if (dev_priv->num_pipe == 3) tmp &= ~BLM_PIPE_SELECT_IVB; @@ -347,6 +344,14 @@ void intel_panel_enable_backlight(struct drm_device *dev, I915_WRITE(BLC_PWM_PCH_CTL1, tmp); } } + +set_level: + /* Call below after setting BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1. + * BLC_PWM_CPU_CTL may be cleared to zero automatically when these + * registers are set. + */ + dev_priv->backlight_enabled = true; + intel_panel_actually_set_backlight(dev, dev_priv->backlight_level); } static void intel_panel_init_backlight(struct drm_device *dev) diff --git a/trunk/drivers/gpu/drm/i915/intel_pm.c b/trunk/drivers/gpu/drm/i915/intel_pm.c index 94aabcaa3a67..1881c8c83f0e 100644 --- a/trunk/drivers/gpu/drm/i915/intel_pm.c +++ b/trunk/drivers/gpu/drm/i915/intel_pm.c @@ -2441,17 +2441,10 @@ static void gen6_enable_rps(struct drm_device *dev) dev_priv->max_delay << 24 | dev_priv->min_delay << 16); - if (IS_HASWELL(dev)) { - I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400); - I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000); - I915_WRITE(GEN6_RP_UP_EI, 66000); - I915_WRITE(GEN6_RP_DOWN_EI, 350000); - } else { - I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000); - I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000); - I915_WRITE(GEN6_RP_UP_EI, 100000); - I915_WRITE(GEN6_RP_DOWN_EI, 5000000); - } + I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400); + I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000); + I915_WRITE(GEN6_RP_UP_EI, 66000); + I915_WRITE(GEN6_RP_DOWN_EI, 350000); I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); I915_WRITE(GEN6_RP_CONTROL, @@ -3963,6 +3956,7 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) DRM_ERROR("Force wake wait timed out\n"); I915_WRITE_NOTRACE(FORCEWAKE, 1); + POSTING_READ(FORCEWAKE); if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500)) DRM_ERROR("Force wake wait timed out\n"); @@ -3983,6 +3977,7 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) DRM_ERROR("Force wake wait timed out\n"); I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1)); + POSTING_READ(FORCEWAKE_MT); if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500)) DRM_ERROR("Force wake wait timed out\n"); @@ -4018,14 +4013,14 @@ void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) { I915_WRITE_NOTRACE(FORCEWAKE, 0); - /* The below doubles as a POSTING_READ */ + POSTING_READ(FORCEWAKE); gen6_gt_check_fifodbg(dev_priv); } static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) { I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1)); - /* The below doubles as a POSTING_READ */ + POSTING_READ(FORCEWAKE_MT); gen6_gt_check_fifodbg(dev_priv); } diff --git a/trunk/drivers/gpu/drm/i915/intel_ringbuffer.c b/trunk/drivers/gpu/drm/i915/intel_ringbuffer.c index bf0195a96d53..e2a73b38abe9 100644 --- a/trunk/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/trunk/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -227,31 +227,36 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring, * number of bits based on the write domains has little performance * impact. */ - flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; - flags |= PIPE_CONTROL_TLB_INVALIDATE; - flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; - flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; - flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; - flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; - flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; - flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; - /* - * Ensure that any following seqno writes only happen when the render - * cache is indeed flushed (but only if the caller actually wants that). - */ - if (flush_domains) + if (flush_domains) { + flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; + flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; + /* + * Ensure that any following seqno writes only happen + * when the render cache is indeed flushed. + */ flags |= PIPE_CONTROL_CS_STALL; + } + if (invalidate_domains) { + flags |= PIPE_CONTROL_TLB_INVALIDATE; + flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; + /* + * TLB invalidate requires a post-sync write. + */ + flags |= PIPE_CONTROL_QW_WRITE; + } - ret = intel_ring_begin(ring, 6); + ret = intel_ring_begin(ring, 4); if (ret) return ret; - intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); intel_ring_emit(ring, flags); intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); - intel_ring_emit(ring, 0); /* lower dword */ - intel_ring_emit(ring, 0); /* uppwer dword */ - intel_ring_emit(ring, MI_NOOP); + intel_ring_emit(ring, 0); intel_ring_advance(ring); return 0; @@ -289,8 +294,6 @@ static int init_ring_common(struct intel_ring_buffer *ring) I915_WRITE_HEAD(ring, 0); ring->write_tail(ring, 0); - /* Initialize the ring. */ - I915_WRITE_START(ring, obj->gtt_offset); head = I915_READ_HEAD(ring) & HEAD_ADDR; /* G45 ring initialization fails to reset head to zero */ @@ -316,6 +319,11 @@ static int init_ring_common(struct intel_ring_buffer *ring) } } + /* Initialize the ring. This must happen _after_ we've cleared the ring + * registers with the above sequence (the readback of the HEAD registers + * also enforces ordering), otherwise the hw might lose the new ring + * register values. */ + I915_WRITE_START(ring, obj->gtt_offset); I915_WRITE_CTL(ring, ((ring->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID); diff --git a/trunk/drivers/gpu/drm/i915/intel_sdvo.c b/trunk/drivers/gpu/drm/i915/intel_sdvo.c index 26a6a4d0d078..d81bb0bf2885 100644 --- a/trunk/drivers/gpu/drm/i915/intel_sdvo.c +++ b/trunk/drivers/gpu/drm/i915/intel_sdvo.c @@ -444,13 +444,16 @@ static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd, struct i2c_msg *msgs; int i, ret = true; + /* Would be simpler to allocate both in one go ? */ buf = (u8 *)kzalloc(args_len * 2 + 2, GFP_KERNEL); if (!buf) return false; msgs = kcalloc(args_len + 3, sizeof(*msgs), GFP_KERNEL); - if (!msgs) + if (!msgs) { + kfree(buf); return false; + } intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len); @@ -1689,6 +1692,7 @@ static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector) edid = intel_sdvo_get_edid(connector); if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL) has_audio = drm_detect_monitor_audio(edid); + kfree(edid); return has_audio; } diff --git a/trunk/drivers/gpu/drm/i915/intel_sprite.c b/trunk/drivers/gpu/drm/i915/intel_sprite.c index cc8df4de2d92..7644f31a3778 100644 --- a/trunk/drivers/gpu/drm/i915/intel_sprite.c +++ b/trunk/drivers/gpu/drm/i915/intel_sprite.c @@ -60,11 +60,11 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, switch (fb->pixel_format) { case DRM_FORMAT_XBGR8888: - sprctl |= SPRITE_FORMAT_RGBX888; + sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX; pixel_size = 4; break; case DRM_FORMAT_XRGB8888: - sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX; + sprctl |= SPRITE_FORMAT_RGBX888; pixel_size = 4; break; case DRM_FORMAT_YUYV: diff --git a/trunk/drivers/gpu/drm/mgag200/mgag200_drv.c b/trunk/drivers/gpu/drm/mgag200/mgag200_drv.c index ea1024d79974..e5f145d2cb3b 100644 --- a/trunk/drivers/gpu/drm/mgag200/mgag200_drv.c +++ b/trunk/drivers/gpu/drm/mgag200/mgag200_drv.c @@ -84,6 +84,9 @@ static const struct file_operations mgag200_driver_fops = { .mmap = mgag200_mmap, .poll = drm_poll, .fasync = drm_fasync, +#ifdef CONFIG_COMPAT + .compat_ioctl = drm_compat_ioctl, +#endif .read = drm_read, }; diff --git a/trunk/drivers/gpu/drm/mgag200/mgag200_mode.c b/trunk/drivers/gpu/drm/mgag200/mgag200_mode.c index a4d7c500c97b..b69642d5d850 100644 --- a/trunk/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/trunk/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -468,10 +468,11 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock) { unsigned int vcomax, vcomin, pllreffreq; unsigned int delta, tmpdelta; - unsigned int testr, testn, testm, testo; + int testr, testn, testm, testo; unsigned int p, m, n; - unsigned int computed; + unsigned int computed, vco; int tmp; + const unsigned int m_div_val[] = { 1, 2, 4, 8 }; m = n = p = 0; vcomax = 1488000; @@ -490,12 +491,13 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock) if (delta == 0) break; for (testo = 5; testo < 33; testo++) { - computed = pllreffreq * (testn + 1) / + vco = pllreffreq * (testn + 1) / (testr + 1); - if (computed < vcomin) + if (vco < vcomin) continue; - if (computed > vcomax) + if (vco > vcomax) continue; + computed = vco / (m_div_val[testm] * (testo + 1)); if (computed > clock) tmpdelta = computed - clock; else diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_acpi.c b/trunk/drivers/gpu/drm/nouveau/nouveau_acpi.c index fc841e87b343..26ebffebe710 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_acpi.c @@ -211,11 +211,6 @@ static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id, return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dhandle, state); } -static int nouveau_dsm_init(void) -{ - return 0; -} - static int nouveau_dsm_get_client_id(struct pci_dev *pdev) { /* easy option one - intel vendor ID means Integrated */ @@ -232,7 +227,6 @@ static int nouveau_dsm_get_client_id(struct pci_dev *pdev) static struct vga_switcheroo_handler nouveau_dsm_handler = { .switchto = nouveau_dsm_switchto, .power_state = nouveau_dsm_power_state, - .init = nouveau_dsm_init, .get_client_id = nouveau_dsm_get_client_id, }; diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_i2c.c b/trunk/drivers/gpu/drm/nouveau/nouveau_i2c.c index 77e564667b5c..240cf962c999 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_i2c.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_i2c.c @@ -229,7 +229,7 @@ nouveau_i2c_init(struct drm_device *dev) } break; case 6: /* NV50- DP AUX */ - port->drive = entry[0]; + port->drive = entry[0] & 0x0f; port->sense = port->drive; port->adapter.algo = &nouveau_dp_i2c_algo; break; diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_state.c b/trunk/drivers/gpu/drm/nouveau/nouveau_state.c index 1cdfd6e757ce..c61014442aa9 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_state.c @@ -731,15 +731,16 @@ nouveau_card_init(struct drm_device *dev) case 0xa3: case 0xa5: case 0xa8: - case 0xaf: nva3_copy_create(dev); break; } break; case NV_C0: - nvc0_copy_create(dev, 1); + if (!(nv_rd32(dev, 0x022500) & 0x00000200)) + nvc0_copy_create(dev, 1); case NV_D0: - nvc0_copy_create(dev, 0); + if (!(nv_rd32(dev, 0x022500) & 0x00000100)) + nvc0_copy_create(dev, 0); break; default: break; diff --git a/trunk/drivers/gpu/drm/nouveau/nv50_gpio.c b/trunk/drivers/gpu/drm/nouveau/nv50_gpio.c index f429e6a8ca7a..f03490534893 100644 --- a/trunk/drivers/gpu/drm/nouveau/nv50_gpio.c +++ b/trunk/drivers/gpu/drm/nouveau/nv50_gpio.c @@ -115,6 +115,9 @@ nv50_gpio_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; + /* initialise gpios and routing to vbios defaults */ + nouveau_gpio_reset(dev); + /* disable, and ack any pending gpio interrupts */ nv_wr32(dev, 0xe050, 0x00000000); nv_wr32(dev, 0xe054, 0xffffffff); diff --git a/trunk/drivers/gpu/drm/nouveau/nv84_fifo.c b/trunk/drivers/gpu/drm/nouveau/nv84_fifo.c index cc82d799fc3b..c564c5e4c30a 100644 --- a/trunk/drivers/gpu/drm/nouveau/nv84_fifo.c +++ b/trunk/drivers/gpu/drm/nouveau/nv84_fifo.c @@ -117,17 +117,22 @@ nv84_fifo_context_del(struct nouveau_channel *chan, int engine) struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; unsigned long flags; + u32 save; /* remove channel from playlist, will context switch if active */ spin_lock_irqsave(&dev_priv->context_switch_lock, flags); nv_mask(dev, 0x002600 + (chan->id * 4), 0x80000000, 0x00000000); nv50_fifo_playlist_update(dev); + save = nv_mask(dev, 0x002520, 0x0000003f, 0x15); + /* tell any engines on this channel to unload their contexts */ nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12); if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff)) NV_INFO(dev, "PFIFO: channel %d unload timeout\n", chan->id); + nv_wr32(dev, 0x002520, save); + nv_wr32(dev, 0x002600 + (chan->id * 4), 0x00000000); spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); @@ -184,10 +189,13 @@ nv84_fifo_fini(struct drm_device *dev, int engine, bool suspend) struct drm_nouveau_private *dev_priv = dev->dev_private; struct nv84_fifo_priv *priv = nv_engine(dev, engine); int i; + u32 save; /* set playlist length to zero, fifo will unload context */ nv_wr32(dev, 0x0032ec, 0); + save = nv_mask(dev, 0x002520, 0x0000003f, 0x15); + /* tell all connected engines to unload their contexts */ for (i = 0; i < priv->base.channels; i++) { struct nouveau_channel *chan = dev_priv->channels.ptr[i]; @@ -199,6 +207,7 @@ nv84_fifo_fini(struct drm_device *dev, int engine, bool suspend) } } + nv_wr32(dev, 0x002520, save); nv_wr32(dev, 0x002140, 0); return 0; } diff --git a/trunk/drivers/gpu/drm/nouveau/nvc0_pm.c b/trunk/drivers/gpu/drm/nouveau/nvc0_pm.c index 7c95c44e2887..4e712b10ebdb 100644 --- a/trunk/drivers/gpu/drm/nouveau/nvc0_pm.c +++ b/trunk/drivers/gpu/drm/nouveau/nvc0_pm.c @@ -557,7 +557,7 @@ prog_mem(struct drm_device *dev, struct nvc0_pm_state *info) nouveau_mem_exec(&exec, info->perflvl); if (dev_priv->chipset < 0xd0) - nv_wr32(dev, 0x611200, 0x00003300); + nv_wr32(dev, 0x611200, 0x00003330); else nv_wr32(dev, 0x62c000, 0x03030300); } diff --git a/trunk/drivers/gpu/drm/nouveau/nvd0_display.c b/trunk/drivers/gpu/drm/nouveau/nvd0_display.c index d0d60e1e7f95..8a2fc89b7763 100644 --- a/trunk/drivers/gpu/drm/nouveau/nvd0_display.c +++ b/trunk/drivers/gpu/drm/nouveau/nvd0_display.c @@ -790,7 +790,7 @@ nvd0_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); int ch = EVO_CURS(nv_crtc->index); - evo_piow(crtc->dev, ch, 0x0084, (y << 16) | x); + evo_piow(crtc->dev, ch, 0x0084, (y << 16) | (x & 0xffff)); evo_piow(crtc->dev, ch, 0x0080, 0x00000000); return 0; } @@ -1510,10 +1510,10 @@ nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode, case OUTPUT_DP: if (nv_connector->base.display_info.bpc == 6) { nv_encoder->dp.datarate = mode->clock * 18 / 8; - syncs |= 0x00000140; + syncs |= 0x00000002 << 6; } else { nv_encoder->dp.datarate = mode->clock * 24 / 8; - syncs |= 0x00000180; + syncs |= 0x00000005 << 6; } if (nv_encoder->dcb->sorconf.link & 1) diff --git a/trunk/drivers/gpu/drm/nouveau/nve0_fifo.c b/trunk/drivers/gpu/drm/nouveau/nve0_fifo.c index 1855ecbd843b..e98d144e6eb9 100644 --- a/trunk/drivers/gpu/drm/nouveau/nve0_fifo.c +++ b/trunk/drivers/gpu/drm/nouveau/nve0_fifo.c @@ -294,6 +294,25 @@ nve0_fifo_isr_vm_fault(struct drm_device *dev, int unit) printk(" on channel 0x%010llx\n", (u64)inst << 12); } +static int +nve0_fifo_page_flip(struct drm_device *dev, u32 chid) +{ + struct nve0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO); + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan = NULL; + unsigned long flags; + int ret = -EINVAL; + + spin_lock_irqsave(&dev_priv->channels.lock, flags); + if (likely(chid >= 0 && chid < priv->base.channels)) { + chan = dev_priv->channels.ptr[chid]; + if (likely(chan)) + ret = nouveau_finish_page_flip(chan, NULL); + } + spin_unlock_irqrestore(&dev_priv->channels.lock, flags); + return ret; +} + static void nve0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit) { @@ -303,11 +322,21 @@ nve0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit) u32 chid = nv_rd32(dev, 0x040120 + (unit * 0x2000)) & 0x7f; u32 subc = (addr & 0x00070000); u32 mthd = (addr & 0x00003ffc); + u32 show = stat; + + if (stat & 0x00200000) { + if (mthd == 0x0054) { + if (!nve0_fifo_page_flip(dev, chid)) + show &= ~0x00200000; + } + } - NV_INFO(dev, "PSUBFIFO %d:", unit); - nouveau_bitfield_print(nve0_fifo_subfifo_intr, stat); - NV_INFO(dev, "PSUBFIFO %d: ch %d subc %d mthd 0x%04x data 0x%08x\n", - unit, chid, subc, mthd, data); + if (show) { + NV_INFO(dev, "PFIFO%d:", unit); + nouveau_bitfield_print(nve0_fifo_subfifo_intr, show); + NV_INFO(dev, "PFIFO%d: ch %d subc %d mthd 0x%04x data 0x%08x\n", + unit, chid, subc, mthd, data); + } nv_wr32(dev, 0x0400c0 + (unit * 0x2000), 0x80600008); nv_wr32(dev, 0x040108 + (unit * 0x2000), stat); diff --git a/trunk/drivers/gpu/drm/radeon/atombios_crtc.c b/trunk/drivers/gpu/drm/radeon/atombios_crtc.c index 9e6f76fec527..2817101fb167 100644 --- a/trunk/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/trunk/drivers/gpu/drm/radeon/atombios_crtc.c @@ -258,8 +258,7 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) radeon_crtc->enabled = true; /* adjust pm to dpms changes BEFORE enabling crtcs */ radeon_pm_compute_clocks(rdev); - /* disable crtc pair power gating before programming */ - if (ASIC_IS_DCE6(rdev)) + if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set) atombios_powergate_crtc(crtc, ATOM_DISABLE); atombios_enable_crtc(crtc, ATOM_ENABLE); if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev)) @@ -278,25 +277,8 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) atombios_enable_crtc_memreq(crtc, ATOM_DISABLE); atombios_enable_crtc(crtc, ATOM_DISABLE); radeon_crtc->enabled = false; - /* power gating is per-pair */ - if (ASIC_IS_DCE6(rdev)) { - struct drm_crtc *other_crtc; - struct radeon_crtc *other_radeon_crtc; - list_for_each_entry(other_crtc, &rdev->ddev->mode_config.crtc_list, head) { - other_radeon_crtc = to_radeon_crtc(other_crtc); - if (((radeon_crtc->crtc_id == 0) && (other_radeon_crtc->crtc_id == 1)) || - ((radeon_crtc->crtc_id == 1) && (other_radeon_crtc->crtc_id == 0)) || - ((radeon_crtc->crtc_id == 2) && (other_radeon_crtc->crtc_id == 3)) || - ((radeon_crtc->crtc_id == 3) && (other_radeon_crtc->crtc_id == 2)) || - ((radeon_crtc->crtc_id == 4) && (other_radeon_crtc->crtc_id == 5)) || - ((radeon_crtc->crtc_id == 5) && (other_radeon_crtc->crtc_id == 4))) { - /* if both crtcs in the pair are off, enable power gating */ - if (other_radeon_crtc->enabled == false) - atombios_powergate_crtc(crtc, ATOM_ENABLE); - break; - } - } - } + if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set) + atombios_powergate_crtc(crtc, ATOM_ENABLE); /* adjust pm to dpms changes AFTER disabling crtcs */ radeon_pm_compute_clocks(rdev); break; @@ -444,11 +426,28 @@ union atom_enable_ss { static void atombios_crtc_program_ss(struct radeon_device *rdev, int enable, int pll_id, + int crtc_id, struct radeon_atom_ss *ss) { + unsigned i; int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL); union atom_enable_ss args; + if (!enable) { + for (i = 0; i < rdev->num_crtc; i++) { + if (rdev->mode_info.crtcs[i] && + rdev->mode_info.crtcs[i]->enabled && + i != crtc_id && + pll_id == rdev->mode_info.crtcs[i]->pll_id) { + /* one other crtc is using this pll don't turn + * off spread spectrum as it might turn off + * display on active crtc + */ + return; + } + } + } + memset(&args, 0, sizeof(args)); if (ASIC_IS_DCE5(rdev)) { @@ -1028,7 +1027,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, &ref_div, &post_div); - atombios_crtc_program_ss(rdev, ATOM_DISABLE, radeon_crtc->pll_id, &ss); + atombios_crtc_program_ss(rdev, ATOM_DISABLE, radeon_crtc->pll_id, radeon_crtc->crtc_id, &ss); atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id, encoder_mode, radeon_encoder->encoder_id, mode->clock, @@ -1051,7 +1050,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode ss.step = step_size; } - atombios_crtc_program_ss(rdev, ATOM_ENABLE, radeon_crtc->pll_id, &ss); + atombios_crtc_program_ss(rdev, ATOM_ENABLE, radeon_crtc->pll_id, radeon_crtc->crtc_id, &ss); } } @@ -1531,12 +1530,12 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc) * crtc virtual pixel clock. */ if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_encoder))) { - if (ASIC_IS_DCE5(rdev)) - return ATOM_DCPLL; + if (rdev->clock.dp_extclk) + return ATOM_PPLL_INVALID; else if (ASIC_IS_DCE6(rdev)) return ATOM_PPLL0; - else if (rdev->clock.dp_extclk) - return ATOM_PPLL_INVALID; + else if (ASIC_IS_DCE5(rdev)) + return ATOM_DCPLL; } } } @@ -1572,11 +1571,11 @@ void radeon_atom_disp_eng_pll_init(struct radeon_device *rdev) ASIC_INTERNAL_SS_ON_DCPLL, rdev->clock.default_dispclk); if (ss_enabled) - atombios_crtc_program_ss(rdev, ATOM_DISABLE, ATOM_DCPLL, &ss); + atombios_crtc_program_ss(rdev, ATOM_DISABLE, ATOM_DCPLL, -1, &ss); /* XXX: DCE5, make sure voltage, dispclk is high enough */ atombios_crtc_set_disp_eng_pll(rdev, rdev->clock.default_dispclk); if (ss_enabled) - atombios_crtc_program_ss(rdev, ATOM_ENABLE, ATOM_DCPLL, &ss); + atombios_crtc_program_ss(rdev, ATOM_ENABLE, ATOM_DCPLL, -1, &ss); } } @@ -1635,18 +1634,28 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc, static void atombios_crtc_prepare(struct drm_crtc *crtc) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct radeon_device *rdev = dev->dev_private; + radeon_crtc->in_mode_set = true; /* pick pll */ radeon_crtc->pll_id = radeon_atom_pick_pll(crtc); + /* disable crtc pair power gating before programming */ + if (ASIC_IS_DCE6(rdev)) + atombios_powergate_crtc(crtc, ATOM_DISABLE); + atombios_lock_crtc(crtc, ATOM_ENABLE); atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); } static void atombios_crtc_commit(struct drm_crtc *crtc) { + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON); atombios_lock_crtc(crtc, ATOM_DISABLE); + radeon_crtc->in_mode_set = false; } static void atombios_crtc_disable(struct drm_crtc *crtc) @@ -1655,9 +1664,22 @@ static void atombios_crtc_disable(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_atom_ss ss; + int i; atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); + for (i = 0; i < rdev->num_crtc; i++) { + if (rdev->mode_info.crtcs[i] && + rdev->mode_info.crtcs[i]->enabled && + i != radeon_crtc->crtc_id && + radeon_crtc->pll_id == rdev->mode_info.crtcs[i]->pll_id) { + /* one other crtc is using this pll don't turn + * off the pll + */ + goto done; + } + } + switch (radeon_crtc->pll_id) { case ATOM_PPLL1: case ATOM_PPLL2: @@ -1674,6 +1696,7 @@ static void atombios_crtc_disable(struct drm_crtc *crtc) default: break; } +done: radeon_crtc->pll_id = -1; } diff --git a/trunk/drivers/gpu/drm/radeon/atombios_dp.c b/trunk/drivers/gpu/drm/radeon/atombios_dp.c index 7712cf5ab33b..3623b98ed3fe 100644 --- a/trunk/drivers/gpu/drm/radeon/atombios_dp.c +++ b/trunk/drivers/gpu/drm/radeon/atombios_dp.c @@ -577,30 +577,25 @@ int radeon_dp_get_panel_mode(struct drm_encoder *encoder, struct radeon_device *rdev = dev->dev_private; struct radeon_connector *radeon_connector = to_radeon_connector(connector); int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; + u16 dp_bridge = radeon_connector_encoder_get_dp_bridge_encoder_id(connector); + u8 tmp; if (!ASIC_IS_DCE4(rdev)) return panel_mode; - if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) == - ENCODER_OBJECT_ID_NUTMEG) - panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE; - else if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) == - ENCODER_OBJECT_ID_TRAVIS) { - u8 id[6]; - int i; - for (i = 0; i < 6; i++) - id[i] = radeon_read_dpcd_reg(radeon_connector, 0x503 + i); - if (id[0] == 0x73 && - id[1] == 0x69 && - id[2] == 0x76 && - id[3] == 0x61 && - id[4] == 0x72 && - id[5] == 0x54) + if (dp_bridge != ENCODER_OBJECT_ID_NONE) { + /* DP bridge chips */ + tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP); + if (tmp & 1) + panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; + else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) || + (dp_bridge == ENCODER_OBJECT_ID_TRAVIS)) panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE; else - panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; + panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; } else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { - u8 tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP); + /* eDP */ + tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP); if (tmp & 1) panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; } diff --git a/trunk/drivers/gpu/drm/radeon/atombios_encoders.c b/trunk/drivers/gpu/drm/radeon/atombios_encoders.c index f9bc27fe269a..6e8803a1170c 100644 --- a/trunk/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/trunk/drivers/gpu/drm/radeon/atombios_encoders.c @@ -1379,6 +1379,8 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder); + struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); struct radeon_connector *radeon_connector = NULL; struct radeon_connector_atom_dig *radeon_dig_connector = NULL; @@ -1390,19 +1392,37 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) switch (mode) { case DRM_MODE_DPMS_ON: - /* some early dce3.2 boards have a bug in their transmitter control table */ - if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730) || - ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) { - if (ASIC_IS_DCE6(rdev)) { - /* It seems we need to call ATOM_ENCODER_CMD_SETUP again - * before reenabling encoder on DPMS ON, otherwise we never - * get picture - */ - atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0); + if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) { + if (!connector) + dig->panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; + else + dig->panel_mode = radeon_dp_get_panel_mode(encoder, connector); + + /* setup and enable the encoder */ + atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0); + atombios_dig_encoder_setup(encoder, + ATOM_ENCODER_CMD_SETUP_PANEL_MODE, + dig->panel_mode); + if (ext_encoder) { + if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)) + atombios_external_encoder_setup(encoder, ext_encoder, + EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP); } atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); - } else { + } else if (ASIC_IS_DCE4(rdev)) { + /* setup and enable the encoder */ + atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0); + /* enable the transmitter */ + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); + } else { + /* setup and enable the encoder and transmitter */ + atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0); + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); + /* some early dce3.2 boards have a bug in their transmitter control table */ + if ((rdev->family != CHIP_RV710) || (rdev->family != CHIP_RV730)) + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); } if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) { if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { @@ -1420,10 +1440,19 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: - if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) + if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) { + /* disable the transmitter */ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); - else + } else if (ASIC_IS_DCE4(rdev)) { + /* disable the transmitter */ + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); + } else { + /* disable the encoder and transmitter */ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); + atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0); + } if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) { if (ASIC_IS_DCE4(rdev)) atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0); @@ -1740,13 +1769,34 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder) struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_encoder *test_encoder; - struct radeon_encoder_atom_dig *dig; + struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; uint32_t dig_enc_in_use = 0; - /* DCE4/5 */ - if (ASIC_IS_DCE4(rdev)) { - dig = radeon_encoder->enc_priv; - if (ASIC_IS_DCE41(rdev)) { + if (ASIC_IS_DCE6(rdev)) { + /* DCE6 */ + switch (radeon_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: + if (dig->linkb) + return 1; + else + return 0; + break; + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: + if (dig->linkb) + return 3; + else + return 2; + break; + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: + if (dig->linkb) + return 5; + else + return 4; + break; + } + } else if (ASIC_IS_DCE4(rdev)) { + /* DCE4/5 */ + if (ASIC_IS_DCE41(rdev) && !ASIC_IS_DCE61(rdev)) { /* ontario follows DCE4 */ if (rdev->family == CHIP_PALM) { if (dig->linkb) @@ -1848,10 +1898,12 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder); radeon_encoder->pixel_clock = adjusted_mode->clock; + /* need to call this here rather than in prepare() since we need some crtc info */ + radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); + if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE4(rdev)) { if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT)) atombios_yuv_setup(encoder, true); @@ -1870,38 +1922,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: - if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) { - struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); - struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; - - if (!connector) - dig->panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; - else - dig->panel_mode = radeon_dp_get_panel_mode(encoder, connector); - - /* setup and enable the encoder */ - atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0); - atombios_dig_encoder_setup(encoder, - ATOM_ENCODER_CMD_SETUP_PANEL_MODE, - dig->panel_mode); - } else if (ASIC_IS_DCE4(rdev)) { - /* disable the transmitter */ - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); - /* setup and enable the encoder */ - atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0); - - /* enable the transmitter */ - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); - } else { - /* disable the encoder and transmitter */ - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); - atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0); - - /* setup and enable the encoder and transmitter */ - atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0); - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); - } + /* handled in dpms */ break; case ENCODER_OBJECT_ID_INTERNAL_DDI: case ENCODER_OBJECT_ID_INTERNAL_DVO1: @@ -1922,14 +1943,6 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, break; } - if (ext_encoder) { - if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)) - atombios_external_encoder_setup(encoder, ext_encoder, - EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP); - else - atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE); - } - atombios_apply_encoder_quirks(encoder, adjusted_mode); if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { @@ -2116,7 +2129,6 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) } radeon_atom_output_lock(encoder, true); - radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); if (connector) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); @@ -2137,6 +2149,7 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) static void radeon_atom_encoder_commit(struct drm_encoder *encoder) { + /* need to call this here as we need the crtc set up */ radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_ON); radeon_atom_output_lock(encoder, false); } @@ -2177,14 +2190,7 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder) case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: - if (ASIC_IS_DCE4(rdev)) - /* disable the transmitter */ - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); - else { - /* disable the encoder and transmitter */ - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); - atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0); - } + /* handled in dpms */ break; case ENCODER_OBJECT_ID_INTERNAL_DDI: case ENCODER_OBJECT_ID_INTERNAL_DVO1: diff --git a/trunk/drivers/gpu/drm/radeon/evergreen.c b/trunk/drivers/gpu/drm/radeon/evergreen.c index e585a3b947eb..e93b80a6d4e9 100644 --- a/trunk/drivers/gpu/drm/radeon/evergreen.c +++ b/trunk/drivers/gpu/drm/radeon/evergreen.c @@ -1229,24 +1229,8 @@ void evergreen_agp_enable(struct radeon_device *rdev) void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save) { - save->vga_control[0] = RREG32(D1VGA_CONTROL); - save->vga_control[1] = RREG32(D2VGA_CONTROL); save->vga_render_control = RREG32(VGA_RENDER_CONTROL); save->vga_hdp_control = RREG32(VGA_HDP_CONTROL); - save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET); - save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET); - if (rdev->num_crtc >= 4) { - save->vga_control[2] = RREG32(EVERGREEN_D3VGA_CONTROL); - save->vga_control[3] = RREG32(EVERGREEN_D4VGA_CONTROL); - save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET); - save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET); - } - if (rdev->num_crtc >= 6) { - save->vga_control[4] = RREG32(EVERGREEN_D5VGA_CONTROL); - save->vga_control[5] = RREG32(EVERGREEN_D6VGA_CONTROL); - save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET); - save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); - } /* Stop all video */ WREG32(VGA_RENDER_CONTROL, 0); @@ -1357,47 +1341,6 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s /* Unlock host access */ WREG32(VGA_HDP_CONTROL, save->vga_hdp_control); mdelay(1); - /* Restore video state */ - WREG32(D1VGA_CONTROL, save->vga_control[0]); - WREG32(D2VGA_CONTROL, save->vga_control[1]); - if (rdev->num_crtc >= 4) { - WREG32(EVERGREEN_D3VGA_CONTROL, save->vga_control[2]); - WREG32(EVERGREEN_D4VGA_CONTROL, save->vga_control[3]); - } - if (rdev->num_crtc >= 6) { - WREG32(EVERGREEN_D5VGA_CONTROL, save->vga_control[4]); - WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]); - } - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1); - if (rdev->num_crtc >= 4) { - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1); - } - if (rdev->num_crtc >= 6) { - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1); - } - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]); - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]); - if (rdev->num_crtc >= 4) { - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]); - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]); - } - if (rdev->num_crtc >= 6) { - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]); - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]); - } - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); - if (rdev->num_crtc >= 4) { - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); - } - if (rdev->num_crtc >= 6) { - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); - } WREG32(VGA_RENDER_CONTROL, save->vga_render_control); } @@ -1986,10 +1929,18 @@ static void evergreen_gpu_init(struct radeon_device *rdev) if (rdev->flags & RADEON_IS_IGP) rdev->config.evergreen.tile_config |= 1 << 4; else { - if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) - rdev->config.evergreen.tile_config |= 1 << 4; - else + switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) { + case 0: /* four banks */ rdev->config.evergreen.tile_config |= 0 << 4; + break; + case 1: /* eight banks */ + rdev->config.evergreen.tile_config |= 1 << 4; + break; + case 2: /* sixteen banks */ + default: + rdev->config.evergreen.tile_config |= 2 << 4; + break; + } } rdev->config.evergreen.tile_config |= 0 << 8; rdev->config.evergreen.tile_config |= diff --git a/trunk/drivers/gpu/drm/radeon/evergreen_cs.c b/trunk/drivers/gpu/drm/radeon/evergreen_cs.c index c16554122ccd..e44a62a07fe3 100644 --- a/trunk/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/trunk/drivers/gpu/drm/radeon/evergreen_cs.c @@ -788,6 +788,13 @@ static int evergreen_cs_track_validate_texture(struct radeon_cs_parser *p, case V_030000_SQ_TEX_DIM_1D_ARRAY: case V_030000_SQ_TEX_DIM_2D_ARRAY: depth = 1; + break; + case V_030000_SQ_TEX_DIM_2D_MSAA: + case V_030000_SQ_TEX_DIM_2D_ARRAY_MSAA: + surf.nsamples = 1 << llevel; + llevel = 0; + depth = 1; + break; case V_030000_SQ_TEX_DIM_3D: break; default: @@ -961,13 +968,15 @@ static int evergreen_cs_track_check(struct radeon_cs_parser *p) if (track->db_dirty) { /* Check stencil buffer */ - if (G_028800_STENCIL_ENABLE(track->db_depth_control)) { + if (G_028044_FORMAT(track->db_s_info) != V_028044_STENCIL_INVALID && + G_028800_STENCIL_ENABLE(track->db_depth_control)) { r = evergreen_cs_track_validate_stencil(p); if (r) return r; } /* Check depth buffer */ - if (G_028800_Z_ENABLE(track->db_depth_control)) { + if (G_028040_FORMAT(track->db_z_info) != V_028040_Z_INVALID && + G_028800_Z_ENABLE(track->db_depth_control)) { r = evergreen_cs_track_validate_depth(p); if (r) return r; diff --git a/trunk/drivers/gpu/drm/radeon/evergreend.h b/trunk/drivers/gpu/drm/radeon/evergreend.h index d3bd098e4e19..79347855d9bf 100644 --- a/trunk/drivers/gpu/drm/radeon/evergreend.h +++ b/trunk/drivers/gpu/drm/radeon/evergreend.h @@ -1277,6 +1277,8 @@ #define S_028044_FORMAT(x) (((x) & 0x1) << 0) #define G_028044_FORMAT(x) (((x) >> 0) & 0x1) #define C_028044_FORMAT 0xFFFFFFFE +#define V_028044_STENCIL_INVALID 0 +#define V_028044_STENCIL_8 1 #define G_028044_TILE_SPLIT(x) (((x) >> 8) & 0x7) #define DB_Z_READ_BASE 0x28048 #define DB_STENCIL_READ_BASE 0x2804c diff --git a/trunk/drivers/gpu/drm/radeon/ni.c b/trunk/drivers/gpu/drm/radeon/ni.c index 9945d86d9001..853800e8582f 100644 --- a/trunk/drivers/gpu/drm/radeon/ni.c +++ b/trunk/drivers/gpu/drm/radeon/ni.c @@ -574,10 +574,18 @@ static void cayman_gpu_init(struct radeon_device *rdev) if (rdev->flags & RADEON_IS_IGP) rdev->config.cayman.tile_config |= 1 << 4; else { - if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) - rdev->config.cayman.tile_config |= 1 << 4; - else + switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) { + case 0: /* four banks */ rdev->config.cayman.tile_config |= 0 << 4; + break; + case 1: /* eight banks */ + rdev->config.cayman.tile_config |= 1 << 4; + break; + case 2: /* sixteen banks */ + default: + rdev->config.cayman.tile_config |= 2 << 4; + break; + } } rdev->config.cayman.tile_config |= ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8; diff --git a/trunk/drivers/gpu/drm/radeon/r600.c b/trunk/drivers/gpu/drm/radeon/r600.c index 637280f541a3..d79c639ae739 100644 --- a/trunk/drivers/gpu/drm/radeon/r600.c +++ b/trunk/drivers/gpu/drm/radeon/r600.c @@ -3789,3 +3789,23 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev) WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); } } + +/** + * r600_get_gpu_clock - return GPU clock counter snapshot + * + * @rdev: radeon_device pointer + * + * Fetches a GPU clock counter snapshot (R6xx-cayman). + * Returns the 64 bit clock counter snapshot. + */ +uint64_t r600_get_gpu_clock(struct radeon_device *rdev) +{ + uint64_t clock; + + mutex_lock(&rdev->gpu_clock_mutex); + WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1); + clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) | + ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL); + mutex_unlock(&rdev->gpu_clock_mutex); + return clock; +} diff --git a/trunk/drivers/gpu/drm/radeon/r600_cs.c b/trunk/drivers/gpu/drm/radeon/r600_cs.c index ca87f7afaf23..f37676d7f217 100644 --- a/trunk/drivers/gpu/drm/radeon/r600_cs.c +++ b/trunk/drivers/gpu/drm/radeon/r600_cs.c @@ -47,18 +47,23 @@ struct r600_cs_track { u32 npipes; /* value we track */ u32 sq_config; + u32 log_nsamples; u32 nsamples; u32 cb_color_base_last[8]; struct radeon_bo *cb_color_bo[8]; u64 cb_color_bo_mc[8]; - u32 cb_color_bo_offset[8]; - struct radeon_bo *cb_color_frag_bo[8]; /* unused */ - struct radeon_bo *cb_color_tile_bo[8]; /* unused */ + u64 cb_color_bo_offset[8]; + struct radeon_bo *cb_color_frag_bo[8]; + u64 cb_color_frag_offset[8]; + struct radeon_bo *cb_color_tile_bo[8]; + u64 cb_color_tile_offset[8]; + u32 cb_color_mask[8]; u32 cb_color_info[8]; u32 cb_color_view[8]; u32 cb_color_size_idx[8]; /* unused */ u32 cb_target_mask; u32 cb_shader_mask; /* unused */ + bool is_resolve; u32 cb_color_size[8]; u32 vgt_strmout_en; u32 vgt_strmout_buffer_en; @@ -311,7 +316,15 @@ static void r600_cs_track_init(struct r600_cs_track *track) track->cb_color_bo[i] = NULL; track->cb_color_bo_offset[i] = 0xFFFFFFFF; track->cb_color_bo_mc[i] = 0xFFFFFFFF; - } + track->cb_color_frag_bo[i] = NULL; + track->cb_color_frag_offset[i] = 0xFFFFFFFF; + track->cb_color_tile_bo[i] = NULL; + track->cb_color_tile_offset[i] = 0xFFFFFFFF; + track->cb_color_mask[i] = 0xFFFFFFFF; + } + track->is_resolve = false; + track->nsamples = 16; + track->log_nsamples = 4; track->cb_target_mask = 0xFFFFFFFF; track->cb_shader_mask = 0xFFFFFFFF; track->cb_dirty = true; @@ -348,11 +361,9 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) volatile u32 *ib = p->ib.ptr; unsigned array_mode; u32 format; + /* When resolve is used, the second colorbuffer has always 1 sample. */ + unsigned nsamples = track->is_resolve && i == 1 ? 1 : track->nsamples; - if (G_0280A0_TILE_MODE(track->cb_color_info[i])) { - dev_warn(p->dev, "FMASK or CMASK buffer are not supported by this kernel\n"); - return -EINVAL; - } size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i]; format = G_0280A0_FORMAT(track->cb_color_info[i]); if (!r600_fmt_is_valid_color(format)) { @@ -375,7 +386,7 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) array_check.group_size = track->group_size; array_check.nbanks = track->nbanks; array_check.npipes = track->npipes; - array_check.nsamples = track->nsamples; + array_check.nsamples = nsamples; array_check.blocksize = r600_fmt_get_blocksize(format); if (r600_get_array_mode_alignment(&array_check, &pitch_align, &height_align, &depth_align, &base_align)) { @@ -420,7 +431,8 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) } /* check offset */ - tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) * r600_fmt_get_blocksize(format); + tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) * + r600_fmt_get_blocksize(format) * nsamples; switch (array_mode) { default: case V_0280A0_ARRAY_LINEAR_GENERAL: @@ -441,7 +453,7 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) * broken userspace. */ } else { - dev_warn(p->dev, "%s offset[%d] %d %d %d %lu too big (%d %d) (%d %d %d)\n", + dev_warn(p->dev, "%s offset[%d] %d %llu %d %lu too big (%d %d) (%d %d %d)\n", __func__, i, array_mode, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i]), @@ -458,6 +470,51 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) tmp = S_028060_PITCH_TILE_MAX((pitch / 8) - 1) | S_028060_SLICE_TILE_MAX(slice_tile_max - 1); ib[track->cb_color_size_idx[i]] = tmp; + + /* FMASK/CMASK */ + switch (G_0280A0_TILE_MODE(track->cb_color_info[i])) { + case V_0280A0_TILE_DISABLE: + break; + case V_0280A0_FRAG_ENABLE: + if (track->nsamples > 1) { + uint32_t tile_max = G_028100_FMASK_TILE_MAX(track->cb_color_mask[i]); + /* the tile size is 8x8, but the size is in units of bits. + * for bytes, do just * 8. */ + uint32_t bytes = track->nsamples * track->log_nsamples * 8 * (tile_max + 1); + + if (bytes + track->cb_color_frag_offset[i] > + radeon_bo_size(track->cb_color_frag_bo[i])) { + dev_warn(p->dev, "%s FMASK_TILE_MAX too large " + "(tile_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n", + __func__, tile_max, bytes, + track->cb_color_frag_offset[i], + radeon_bo_size(track->cb_color_frag_bo[i])); + return -EINVAL; + } + } + /* fall through */ + case V_0280A0_CLEAR_ENABLE: + { + uint32_t block_max = G_028100_CMASK_BLOCK_MAX(track->cb_color_mask[i]); + /* One block = 128x128 pixels, one 8x8 tile has 4 bits.. + * (128*128) / (8*8) / 2 = 128 bytes per block. */ + uint32_t bytes = (block_max + 1) * 128; + + if (bytes + track->cb_color_tile_offset[i] > + radeon_bo_size(track->cb_color_tile_bo[i])) { + dev_warn(p->dev, "%s CMASK_BLOCK_MAX too large " + "(block_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n", + __func__, block_max, bytes, + track->cb_color_tile_offset[i], + radeon_bo_size(track->cb_color_tile_bo[i])); + return -EINVAL; + } + break; + } + default: + dev_warn(p->dev, "%s invalid tile mode\n", __func__); + return -EINVAL; + } return 0; } @@ -566,7 +623,7 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p) ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1; - tmp = ntiles * bpe * 64 * nviews; + tmp = ntiles * bpe * 64 * nviews * track->nsamples; if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) { dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n", array_mode, @@ -746,6 +803,12 @@ static int r600_cs_track_check(struct radeon_cs_parser *p) */ if (track->cb_dirty) { tmp = track->cb_target_mask; + + /* We must check both colorbuffers for RESOLVE. */ + if (track->is_resolve) { + tmp |= 0xff; + } + for (i = 0; i < 8; i++) { if ((tmp >> (i * 4)) & 0xF) { /* at least one component is enabled */ @@ -764,8 +827,10 @@ static int r600_cs_track_check(struct radeon_cs_parser *p) } /* Check depth buffer */ - if (track->db_dirty && (G_028800_STENCIL_ENABLE(track->db_depth_control) || - G_028800_Z_ENABLE(track->db_depth_control))) { + if (track->db_dirty && + G_028010_FORMAT(track->db_depth_info) != V_028010_DEPTH_INVALID && + (G_028800_STENCIL_ENABLE(track->db_depth_control) || + G_028800_Z_ENABLE(track->db_depth_control))) { r = r600_cs_track_validate_db(p); if (r) return r; @@ -1229,9 +1294,15 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) break; case R_028C04_PA_SC_AA_CONFIG: tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx)); + track->log_nsamples = tmp; track->nsamples = 1 << tmp; track->cb_dirty = true; break; + case R_028808_CB_COLOR_CONTROL: + tmp = G_028808_SPECIAL_OP(radeon_get_ib_value(p, idx)); + track->is_resolve = tmp == V_028808_SPECIAL_RESOLVE_BOX; + track->cb_dirty = true; + break; case R_0280A0_CB_COLOR0_INFO: case R_0280A4_CB_COLOR1_INFO: case R_0280A8_CB_COLOR2_INFO: @@ -1310,16 +1381,21 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg); return -EINVAL; } - ib[idx] = track->cb_color_base_last[tmp]; track->cb_color_frag_bo[tmp] = track->cb_color_bo[tmp]; + track->cb_color_frag_offset[tmp] = track->cb_color_bo_offset[tmp]; + ib[idx] = track->cb_color_base_last[tmp]; } else { r = r600_cs_packet_next_reloc(p, &reloc); if (r) { dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); return -EINVAL; } - ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); track->cb_color_frag_bo[tmp] = reloc->robj; + track->cb_color_frag_offset[tmp] = (u64)ib[idx] << 8; + ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + } + if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) { + track->cb_dirty = true; } break; case R_0280C0_CB_COLOR0_TILE: @@ -1336,16 +1412,35 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg); return -EINVAL; } - ib[idx] = track->cb_color_base_last[tmp]; track->cb_color_tile_bo[tmp] = track->cb_color_bo[tmp]; + track->cb_color_tile_offset[tmp] = track->cb_color_bo_offset[tmp]; + ib[idx] = track->cb_color_base_last[tmp]; } else { r = r600_cs_packet_next_reloc(p, &reloc); if (r) { dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); return -EINVAL; } - ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); track->cb_color_tile_bo[tmp] = reloc->robj; + track->cb_color_tile_offset[tmp] = (u64)ib[idx] << 8; + ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + } + if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) { + track->cb_dirty = true; + } + break; + case R_028100_CB_COLOR0_MASK: + case R_028104_CB_COLOR1_MASK: + case R_028108_CB_COLOR2_MASK: + case R_02810C_CB_COLOR3_MASK: + case R_028110_CB_COLOR4_MASK: + case R_028114_CB_COLOR5_MASK: + case R_028118_CB_COLOR6_MASK: + case R_02811C_CB_COLOR7_MASK: + tmp = (reg - R_028100_CB_COLOR0_MASK) / 4; + track->cb_color_mask[tmp] = radeon_get_ib_value(p, idx); + if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) { + track->cb_dirty = true; } break; case CB_COLOR0_BASE: @@ -1490,7 +1585,7 @@ unsigned r600_mip_minify(unsigned size, unsigned level) } static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel, - unsigned w0, unsigned h0, unsigned d0, unsigned format, + unsigned w0, unsigned h0, unsigned d0, unsigned nsamples, unsigned format, unsigned block_align, unsigned height_align, unsigned base_align, unsigned *l0_size, unsigned *mipmap_size) { @@ -1518,7 +1613,7 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel, depth = r600_mip_minify(d0, i); - size = nbx * nby * blocksize; + size = nbx * nby * blocksize * nsamples; if (nfaces) size *= nfaces; else @@ -1557,13 +1652,14 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, u32 tiling_flags) { struct r600_cs_track *track = p->track; - u32 nfaces, llevel, blevel, w0, h0, d0; - u32 word0, word1, l0_size, mipmap_size, word2, word3; + u32 dim, nfaces, llevel, blevel, w0, h0, d0; + u32 word0, word1, l0_size, mipmap_size, word2, word3, word4, word5; u32 height_align, pitch, pitch_align, depth_align; - u32 array, barray, larray; + u32 barray, larray; u64 base_align; struct array_mode_checker array_check; u32 format; + bool is_array; /* on legacy kernel we don't perform advanced check */ if (p->rdev == NULL) @@ -1581,12 +1677,28 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); } word1 = radeon_get_ib_value(p, idx + 1); + word2 = radeon_get_ib_value(p, idx + 2) << 8; + word3 = radeon_get_ib_value(p, idx + 3) << 8; + word4 = radeon_get_ib_value(p, idx + 4); + word5 = radeon_get_ib_value(p, idx + 5); + dim = G_038000_DIM(word0); w0 = G_038000_TEX_WIDTH(word0) + 1; + pitch = (G_038000_PITCH(word0) + 1) * 8; h0 = G_038004_TEX_HEIGHT(word1) + 1; d0 = G_038004_TEX_DEPTH(word1); + format = G_038004_DATA_FORMAT(word1); + blevel = G_038010_BASE_LEVEL(word4); + llevel = G_038014_LAST_LEVEL(word5); + /* pitch in texels */ + array_check.array_mode = G_038000_TILE_MODE(word0); + array_check.group_size = track->group_size; + array_check.nbanks = track->nbanks; + array_check.npipes = track->npipes; + array_check.nsamples = 1; + array_check.blocksize = r600_fmt_get_blocksize(format); nfaces = 1; - array = 0; - switch (G_038000_DIM(word0)) { + is_array = false; + switch (dim) { case V_038000_SQ_TEX_DIM_1D: case V_038000_SQ_TEX_DIM_2D: case V_038000_SQ_TEX_DIM_3D: @@ -1599,29 +1711,25 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, break; case V_038000_SQ_TEX_DIM_1D_ARRAY: case V_038000_SQ_TEX_DIM_2D_ARRAY: - array = 1; + is_array = true; break; - case V_038000_SQ_TEX_DIM_2D_MSAA: case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA: + is_array = true; + /* fall through */ + case V_038000_SQ_TEX_DIM_2D_MSAA: + array_check.nsamples = 1 << llevel; + llevel = 0; + break; default: dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0)); return -EINVAL; } - format = G_038004_DATA_FORMAT(word1); if (!r600_fmt_is_valid_texture(format, p->family)) { dev_warn(p->dev, "%s:%d texture invalid format %d\n", __func__, __LINE__, format); return -EINVAL; } - /* pitch in texels */ - pitch = (G_038000_PITCH(word0) + 1) * 8; - array_check.array_mode = G_038000_TILE_MODE(word0); - array_check.group_size = track->group_size; - array_check.nbanks = track->nbanks; - array_check.npipes = track->npipes; - array_check.nsamples = 1; - array_check.blocksize = r600_fmt_get_blocksize(format); if (r600_get_array_mode_alignment(&array_check, &pitch_align, &height_align, &depth_align, &base_align)) { dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n", @@ -1647,24 +1755,17 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, return -EINVAL; } - word2 = radeon_get_ib_value(p, idx + 2) << 8; - word3 = radeon_get_ib_value(p, idx + 3) << 8; - - word0 = radeon_get_ib_value(p, idx + 4); - word1 = radeon_get_ib_value(p, idx + 5); - blevel = G_038010_BASE_LEVEL(word0); - llevel = G_038014_LAST_LEVEL(word1); if (blevel > llevel) { dev_warn(p->dev, "texture blevel %d > llevel %d\n", blevel, llevel); } - if (array == 1) { - barray = G_038014_BASE_ARRAY(word1); - larray = G_038014_LAST_ARRAY(word1); + if (is_array) { + barray = G_038014_BASE_ARRAY(word5); + larray = G_038014_LAST_ARRAY(word5); nfaces = larray - barray + 1; } - r600_texture_size(nfaces, blevel, llevel, w0, h0, d0, format, + r600_texture_size(nfaces, blevel, llevel, w0, h0, d0, array_check.nsamples, format, pitch_align, height_align, base_align, &l0_size, &mipmap_size); /* using get ib will give us the offset into the texture bo */ @@ -1677,7 +1778,6 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, return -EINVAL; } /* using get ib will give us the offset into the mipmap bo */ - word3 = radeon_get_ib_value(p, idx + 3) << 8; if ((mipmap_size + word3) > radeon_bo_size(mipmap)) { /*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n", w0, h0, format, blevel, nlevels, word3, mipmap_size, radeon_bo_size(texture));*/ diff --git a/trunk/drivers/gpu/drm/radeon/r600d.h b/trunk/drivers/gpu/drm/radeon/r600d.h index 4b116ae75fc2..fa6f37099ba9 100644 --- a/trunk/drivers/gpu/drm/radeon/r600d.h +++ b/trunk/drivers/gpu/drm/radeon/r600d.h @@ -66,6 +66,14 @@ #define CC_RB_BACKEND_DISABLE 0x98F4 #define BACKEND_DISABLE(x) ((x) << 16) +#define R_028808_CB_COLOR_CONTROL 0x28808 +#define S_028808_SPECIAL_OP(x) (((x) & 0x7) << 4) +#define G_028808_SPECIAL_OP(x) (((x) >> 4) & 0x7) +#define C_028808_SPECIAL_OP 0xFFFFFF8F +#define V_028808_SPECIAL_NORMAL 0x00 +#define V_028808_SPECIAL_DISABLE 0x01 +#define V_028808_SPECIAL_RESOLVE_BOX 0x07 + #define CB_COLOR0_BASE 0x28040 #define CB_COLOR1_BASE 0x28044 #define CB_COLOR2_BASE 0x28048 @@ -92,6 +100,20 @@ #define R_028094_CB_COLOR5_VIEW 0x028094 #define R_028098_CB_COLOR6_VIEW 0x028098 #define R_02809C_CB_COLOR7_VIEW 0x02809C +#define R_028100_CB_COLOR0_MASK 0x028100 +#define S_028100_CMASK_BLOCK_MAX(x) (((x) & 0xFFF) << 0) +#define G_028100_CMASK_BLOCK_MAX(x) (((x) >> 0) & 0xFFF) +#define C_028100_CMASK_BLOCK_MAX 0xFFFFF000 +#define S_028100_FMASK_TILE_MAX(x) (((x) & 0xFFFFF) << 12) +#define G_028100_FMASK_TILE_MAX(x) (((x) >> 12) & 0xFFFFF) +#define C_028100_FMASK_TILE_MAX 0x00000FFF +#define R_028104_CB_COLOR1_MASK 0x028104 +#define R_028108_CB_COLOR2_MASK 0x028108 +#define R_02810C_CB_COLOR3_MASK 0x02810C +#define R_028110_CB_COLOR4_MASK 0x028110 +#define R_028114_CB_COLOR5_MASK 0x028114 +#define R_028118_CB_COLOR6_MASK 0x028118 +#define R_02811C_CB_COLOR7_MASK 0x02811C #define CB_COLOR0_INFO 0x280a0 # define CB_FORMAT(x) ((x) << 2) # define CB_ARRAY_MODE(x) ((x) << 8) @@ -602,6 +624,9 @@ #define RLC_HB_WPTR 0x3f1c #define RLC_HB_WPTR_LSB_ADDR 0x3f14 #define RLC_HB_WPTR_MSB_ADDR 0x3f18 +#define RLC_GPU_CLOCK_COUNT_LSB 0x3f38 +#define RLC_GPU_CLOCK_COUNT_MSB 0x3f3c +#define RLC_CAPTURE_GPU_CLOCK_COUNT 0x3f40 #define RLC_MC_CNTL 0x3f44 #define RLC_UCODE_CNTL 0x3f48 #define RLC_UCODE_ADDR 0x3f2c @@ -1397,6 +1422,9 @@ #define S_0280A0_TILE_MODE(x) (((x) & 0x3) << 18) #define G_0280A0_TILE_MODE(x) (((x) >> 18) & 0x3) #define C_0280A0_TILE_MODE 0xFFF3FFFF +#define V_0280A0_TILE_DISABLE 0 +#define V_0280A0_CLEAR_ENABLE 1 +#define V_0280A0_FRAG_ENABLE 2 #define S_0280A0_BLEND_CLAMP(x) (((x) & 0x1) << 20) #define G_0280A0_BLEND_CLAMP(x) (((x) >> 20) & 0x1) #define C_0280A0_BLEND_CLAMP 0xFFEFFFFF diff --git a/trunk/drivers/gpu/drm/radeon/radeon.h b/trunk/drivers/gpu/drm/radeon/radeon.h index 5431af292408..59a15315ae9f 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon.h +++ b/trunk/drivers/gpu/drm/radeon/radeon.h @@ -142,21 +142,6 @@ struct radeon_device; /* * BIOS. */ -#define ATRM_BIOS_PAGE 4096 - -#if defined(CONFIG_VGA_SWITCHEROO) -bool radeon_atrm_supported(struct pci_dev *pdev); -int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len); -#else -static inline bool radeon_atrm_supported(struct pci_dev *pdev) -{ - return false; -} - -static inline int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len){ - return -EINVAL; -} -#endif bool radeon_get_bios(struct radeon_device *rdev); /* @@ -300,6 +285,7 @@ struct radeon_bo_va { uint64_t soffset; uint64_t eoffset; uint32_t flags; + struct radeon_fence *fence; bool valid; }; @@ -1533,6 +1519,7 @@ struct radeon_device { unsigned debugfs_count; /* virtual memory */ struct radeon_vm_manager vm_manager; + struct mutex gpu_clock_mutex; }; int radeon_device_init(struct radeon_device *rdev, @@ -1733,11 +1720,11 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v); #define radeon_pm_finish(rdev) (rdev)->asic->pm.finish((rdev)) #define radeon_pm_init_profile(rdev) (rdev)->asic->pm.init_profile((rdev)) #define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm.get_dynpm_state((rdev)) -#define radeon_pre_page_flip(rdev, crtc) rdev->asic->pflip.pre_page_flip((rdev), (crtc)) -#define radeon_page_flip(rdev, crtc, base) rdev->asic->pflip.page_flip((rdev), (crtc), (base)) -#define radeon_post_page_flip(rdev, crtc) rdev->asic->pflip.post_page_flip((rdev), (crtc)) -#define radeon_wait_for_vblank(rdev, crtc) rdev->asic->display.wait_for_vblank((rdev), (crtc)) -#define radeon_mc_wait_for_idle(rdev) rdev->asic->mc_wait_for_idle((rdev)) +#define radeon_pre_page_flip(rdev, crtc) (rdev)->asic->pflip.pre_page_flip((rdev), (crtc)) +#define radeon_page_flip(rdev, crtc, base) (rdev)->asic->pflip.page_flip((rdev), (crtc), (base)) +#define radeon_post_page_flip(rdev, crtc) (rdev)->asic->pflip.post_page_flip((rdev), (crtc)) +#define radeon_wait_for_vblank(rdev, crtc) (rdev)->asic->display.wait_for_vblank((rdev), (crtc)) +#define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev)) /* Common functions */ /* AGP */ diff --git a/trunk/drivers/gpu/drm/radeon/radeon_asic.h b/trunk/drivers/gpu/drm/radeon/radeon_asic.h index f4af24310438..18c38d14c8cd 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_asic.h +++ b/trunk/drivers/gpu/drm/radeon/radeon_asic.h @@ -255,13 +255,10 @@ extern int rs690_mc_wait_for_idle(struct radeon_device *rdev); * rv515 */ struct rv515_mc_save { - u32 d1vga_control; - u32 d2vga_control; u32 vga_render_control; u32 vga_hdp_control; - u32 d1crtc_control; - u32 d2crtc_control; }; + int rv515_init(struct radeon_device *rdev); void rv515_fini(struct radeon_device *rdev); uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg); @@ -371,6 +368,7 @@ void r600_kms_blit_copy(struct radeon_device *rdev, unsigned num_gpu_pages, struct radeon_sa_bo *vb); int r600_mc_wait_for_idle(struct radeon_device *rdev); +uint64_t r600_get_gpu_clock(struct radeon_device *rdev); /* * rv770,rv730,rv710,rv740 @@ -389,11 +387,10 @@ void r700_cp_fini(struct radeon_device *rdev); * evergreen */ struct evergreen_mc_save { - u32 vga_control[6]; u32 vga_render_control; u32 vga_hdp_control; - u32 crtc_control[6]; }; + void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev); int evergreen_init(struct radeon_device *rdev); void evergreen_fini(struct radeon_device *rdev); @@ -472,5 +469,6 @@ int si_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id); void si_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm); void si_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm); int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); +uint64_t si_get_gpu_clock(struct radeon_device *rdev); #endif diff --git a/trunk/drivers/gpu/drm/radeon/radeon_atombios.c b/trunk/drivers/gpu/drm/radeon/radeon_atombios.c index b1e3820df363..d67d4f3eb6f4 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_atombios.c @@ -452,7 +452,7 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, } /* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */ - if ((dev->pdev->device == 0x9802) && + if (((dev->pdev->device == 0x9802) || (dev->pdev->device == 0x9806)) && (dev->pdev->subsystem_vendor == 0x1734) && (dev->pdev->subsystem_device == 0x11bd)) { if (*connector_type == DRM_MODE_CONNECTOR_VGA) { @@ -1263,6 +1263,8 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) union igp_info { struct _ATOM_INTEGRATED_SYSTEM_INFO info; struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2; + struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6; + struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7; }; bool radeon_atombios_sideport_present(struct radeon_device *rdev) @@ -1390,27 +1392,50 @@ static void radeon_atombios_get_igp_ss_overrides(struct radeon_device *rdev, struct radeon_mode_info *mode_info = &rdev->mode_info; int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); u16 data_offset, size; - struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *igp_info; + union igp_info *igp_info; u8 frev, crev; u16 percentage = 0, rate = 0; /* get any igp specific overrides */ if (atom_parse_data_header(mode_info->atom_context, index, &size, &frev, &crev, &data_offset)) { - igp_info = (struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *) + igp_info = (union igp_info *) (mode_info->atom_context->bios + data_offset); - switch (id) { - case ASIC_INTERNAL_SS_ON_TMDS: - percentage = le16_to_cpu(igp_info->usDVISSPercentage); - rate = le16_to_cpu(igp_info->usDVISSpreadRateIn10Hz); + switch (crev) { + case 6: + switch (id) { + case ASIC_INTERNAL_SS_ON_TMDS: + percentage = le16_to_cpu(igp_info->info_6.usDVISSPercentage); + rate = le16_to_cpu(igp_info->info_6.usDVISSpreadRateIn10Hz); + break; + case ASIC_INTERNAL_SS_ON_HDMI: + percentage = le16_to_cpu(igp_info->info_6.usHDMISSPercentage); + rate = le16_to_cpu(igp_info->info_6.usHDMISSpreadRateIn10Hz); + break; + case ASIC_INTERNAL_SS_ON_LVDS: + percentage = le16_to_cpu(igp_info->info_6.usLvdsSSPercentage); + rate = le16_to_cpu(igp_info->info_6.usLvdsSSpreadRateIn10Hz); + break; + } break; - case ASIC_INTERNAL_SS_ON_HDMI: - percentage = le16_to_cpu(igp_info->usHDMISSPercentage); - rate = le16_to_cpu(igp_info->usHDMISSpreadRateIn10Hz); + case 7: + switch (id) { + case ASIC_INTERNAL_SS_ON_TMDS: + percentage = le16_to_cpu(igp_info->info_7.usDVISSPercentage); + rate = le16_to_cpu(igp_info->info_7.usDVISSpreadRateIn10Hz); + break; + case ASIC_INTERNAL_SS_ON_HDMI: + percentage = le16_to_cpu(igp_info->info_7.usHDMISSPercentage); + rate = le16_to_cpu(igp_info->info_7.usHDMISSpreadRateIn10Hz); + break; + case ASIC_INTERNAL_SS_ON_LVDS: + percentage = le16_to_cpu(igp_info->info_7.usLvdsSSPercentage); + rate = le16_to_cpu(igp_info->info_7.usLvdsSSpreadRateIn10Hz); + break; + } break; - case ASIC_INTERNAL_SS_ON_LVDS: - percentage = le16_to_cpu(igp_info->usLvdsSSPercentage); - rate = le16_to_cpu(igp_info->usLvdsSSpreadRateIn10Hz); + default: + DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev); break; } if (percentage) diff --git a/trunk/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/trunk/drivers/gpu/drm/radeon/radeon_atpx_handler.c index 98724fcb0088..2a2cf0b88a28 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_atpx_handler.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_atpx_handler.c @@ -30,57 +30,8 @@ static struct radeon_atpx_priv { /* handle for device - and atpx */ acpi_handle dhandle; acpi_handle atpx_handle; - acpi_handle atrm_handle; } radeon_atpx_priv; -/* retrieve the ROM in 4k blocks */ -static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios, - int offset, int len) -{ - acpi_status status; - union acpi_object atrm_arg_elements[2], *obj; - struct acpi_object_list atrm_arg; - struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL}; - - atrm_arg.count = 2; - atrm_arg.pointer = &atrm_arg_elements[0]; - - atrm_arg_elements[0].type = ACPI_TYPE_INTEGER; - atrm_arg_elements[0].integer.value = offset; - - atrm_arg_elements[1].type = ACPI_TYPE_INTEGER; - atrm_arg_elements[1].integer.value = len; - - status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer); - if (ACPI_FAILURE(status)) { - printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status)); - return -ENODEV; - } - - obj = (union acpi_object *)buffer.pointer; - memcpy(bios+offset, obj->buffer.pointer, obj->buffer.length); - len = obj->buffer.length; - kfree(buffer.pointer); - return len; -} - -bool radeon_atrm_supported(struct pci_dev *pdev) -{ - /* get the discrete ROM only via ATRM */ - if (!radeon_atpx_priv.atpx_detected) - return false; - - if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev)) - return false; - return true; -} - - -int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len) -{ - return radeon_atrm_call(radeon_atpx_priv.atrm_handle, bios, offset, len); -} - static int radeon_atpx_get_version(acpi_handle handle) { acpi_status status; @@ -198,7 +149,7 @@ static int radeon_atpx_power_state(enum vga_switcheroo_client_id id, static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev) { - acpi_handle dhandle, atpx_handle, atrm_handle; + acpi_handle dhandle, atpx_handle; acpi_status status; dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); @@ -209,13 +160,8 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev) if (ACPI_FAILURE(status)) return false; - status = acpi_get_handle(dhandle, "ATRM", &atrm_handle); - if (ACPI_FAILURE(status)) - return false; - radeon_atpx_priv.dhandle = dhandle; radeon_atpx_priv.atpx_handle = atpx_handle; - radeon_atpx_priv.atrm_handle = atrm_handle; return true; } diff --git a/trunk/drivers/gpu/drm/radeon/radeon_bios.c b/trunk/drivers/gpu/drm/radeon/radeon_bios.c index 501f4881e5aa..d306cc8fdeaa 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_bios.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_bios.c @@ -32,6 +32,7 @@ #include #include +#include /* * BIOS. */ @@ -98,16 +99,81 @@ static bool radeon_read_bios(struct radeon_device *rdev) return true; } +#ifdef CONFIG_ACPI /* ATRM is used to get the BIOS on the discrete cards in * dual-gpu systems. */ +/* retrieve the ROM in 4k blocks */ +#define ATRM_BIOS_PAGE 4096 +/** + * radeon_atrm_call - fetch a chunk of the vbios + * + * @atrm_handle: acpi ATRM handle + * @bios: vbios image pointer + * @offset: offset of vbios image data to fetch + * @len: length of vbios image data to fetch + * + * Executes ATRM to fetch a chunk of the discrete + * vbios image on PX systems (all asics). + * Returns the length of the buffer fetched. + */ +static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios, + int offset, int len) +{ + acpi_status status; + union acpi_object atrm_arg_elements[2], *obj; + struct acpi_object_list atrm_arg; + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL}; + + atrm_arg.count = 2; + atrm_arg.pointer = &atrm_arg_elements[0]; + + atrm_arg_elements[0].type = ACPI_TYPE_INTEGER; + atrm_arg_elements[0].integer.value = offset; + + atrm_arg_elements[1].type = ACPI_TYPE_INTEGER; + atrm_arg_elements[1].integer.value = len; + + status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer); + if (ACPI_FAILURE(status)) { + printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status)); + return -ENODEV; + } + + obj = (union acpi_object *)buffer.pointer; + memcpy(bios+offset, obj->buffer.pointer, obj->buffer.length); + len = obj->buffer.length; + kfree(buffer.pointer); + return len; +} + static bool radeon_atrm_get_bios(struct radeon_device *rdev) { int ret; int size = 256 * 1024; int i; + struct pci_dev *pdev = NULL; + acpi_handle dhandle, atrm_handle; + acpi_status status; + bool found = false; + + /* ATRM is for the discrete card only */ + if (rdev->flags & RADEON_IS_IGP) + return false; + + while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { + dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); + if (!dhandle) + continue; + + status = acpi_get_handle(dhandle, "ATRM", &atrm_handle); + if (!ACPI_FAILURE(status)) { + found = true; + break; + } + } - if (!radeon_atrm_supported(rdev->pdev)) + if (!found) return false; rdev->bios = kmalloc(size, GFP_KERNEL); @@ -117,9 +183,10 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev) } for (i = 0; i < size / ATRM_BIOS_PAGE; i++) { - ret = radeon_atrm_get_bios_chunk(rdev->bios, - (i * ATRM_BIOS_PAGE), - ATRM_BIOS_PAGE); + ret = radeon_atrm_call(atrm_handle, + rdev->bios, + (i * ATRM_BIOS_PAGE), + ATRM_BIOS_PAGE); if (ret < ATRM_BIOS_PAGE) break; } @@ -130,6 +197,12 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev) } return true; } +#else +static inline bool radeon_atrm_get_bios(struct radeon_device *rdev) +{ + return false; +} +#endif static bool ni_read_disabled_bios(struct radeon_device *rdev) { @@ -476,6 +549,61 @@ static bool radeon_read_disabled_bios(struct radeon_device *rdev) return legacy_read_disabled_bios(rdev); } +#ifdef CONFIG_ACPI +static bool radeon_acpi_vfct_bios(struct radeon_device *rdev) +{ + bool ret = false; + struct acpi_table_header *hdr; + acpi_size tbl_size; + UEFI_ACPI_VFCT *vfct; + GOP_VBIOS_CONTENT *vbios; + VFCT_IMAGE_HEADER *vhdr; + + if (!ACPI_SUCCESS(acpi_get_table_with_size("VFCT", 1, &hdr, &tbl_size))) + return false; + if (tbl_size < sizeof(UEFI_ACPI_VFCT)) { + DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n"); + goto out_unmap; + } + + vfct = (UEFI_ACPI_VFCT *)hdr; + if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) > tbl_size) { + DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n"); + goto out_unmap; + } + + vbios = (GOP_VBIOS_CONTENT *)((char *)hdr + vfct->VBIOSImageOffset); + vhdr = &vbios->VbiosHeader; + DRM_INFO("ACPI VFCT contains a BIOS for %02x:%02x.%d %04x:%04x, size %d\n", + vhdr->PCIBus, vhdr->PCIDevice, vhdr->PCIFunction, + vhdr->VendorID, vhdr->DeviceID, vhdr->ImageLength); + + if (vhdr->PCIBus != rdev->pdev->bus->number || + vhdr->PCIDevice != PCI_SLOT(rdev->pdev->devfn) || + vhdr->PCIFunction != PCI_FUNC(rdev->pdev->devfn) || + vhdr->VendorID != rdev->pdev->vendor || + vhdr->DeviceID != rdev->pdev->device) { + DRM_INFO("ACPI VFCT table is not for this card\n"); + goto out_unmap; + }; + + if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) + vhdr->ImageLength > tbl_size) { + DRM_ERROR("ACPI VFCT image truncated\n"); + goto out_unmap; + } + + rdev->bios = kmemdup(&vbios->VbiosContent, vhdr->ImageLength, GFP_KERNEL); + ret = !!rdev->bios; + +out_unmap: + return ret; +} +#else +static inline bool radeon_acpi_vfct_bios(struct radeon_device *rdev) +{ + return false; +} +#endif bool radeon_get_bios(struct radeon_device *rdev) { @@ -483,6 +611,8 @@ bool radeon_get_bios(struct radeon_device *rdev) uint16_t tmp; r = radeon_atrm_get_bios(rdev); + if (r == false) + r = radeon_acpi_vfct_bios(rdev); if (r == false) r = igp_read_bios_from_vram(rdev); if (r == false) diff --git a/trunk/drivers/gpu/drm/radeon/radeon_combios.c b/trunk/drivers/gpu/drm/radeon/radeon_combios.c index 576f4f6919f2..f75247d42ffd 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_combios.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_combios.c @@ -719,6 +719,34 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde return i2c; } +static struct radeon_i2c_bus_rec radeon_combios_get_i2c_info_from_table(struct radeon_device *rdev) +{ + struct drm_device *dev = rdev->ddev; + struct radeon_i2c_bus_rec i2c; + u16 offset; + u8 id, blocks, clk, data; + int i; + + i2c.valid = false; + + offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE); + if (offset) { + blocks = RBIOS8(offset + 2); + for (i = 0; i < blocks; i++) { + id = RBIOS8(offset + 3 + (i * 5) + 0); + if (id == 136) { + clk = RBIOS8(offset + 3 + (i * 5) + 3); + data = RBIOS8(offset + 3 + (i * 5) + 4); + /* gpiopad */ + i2c = combios_setup_i2c_bus(rdev, DDC_MONID, + (1 << clk), (1 << data)); + break; + } + } + } + return i2c; +} + void radeon_combios_i2c_init(struct radeon_device *rdev) { struct drm_device *dev = rdev->ddev; @@ -755,30 +783,14 @@ void radeon_combios_i2c_init(struct radeon_device *rdev) } else if (rdev->family == CHIP_RS300 || rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) { - u16 offset; - u8 id, blocks, clk, data; - int i; - /* 0x68 */ i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0); rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID"); - offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE); - if (offset) { - blocks = RBIOS8(offset + 2); - for (i = 0; i < blocks; i++) { - id = RBIOS8(offset + 3 + (i * 5) + 0); - if (id == 136) { - clk = RBIOS8(offset + 3 + (i * 5) + 3); - data = RBIOS8(offset + 3 + (i * 5) + 4); - /* gpiopad */ - i2c = combios_setup_i2c_bus(rdev, DDC_MONID, - (1 << clk), (1 << data)); - rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "GPIOPAD_MASK"); - break; - } - } - } + /* gpiopad */ + i2c = radeon_combios_get_i2c_info_from_table(rdev); + if (i2c.valid) + rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "GPIOPAD_MASK"); } else if ((rdev->family == CHIP_R200) || (rdev->family >= CHIP_R300)) { /* 0x68 */ @@ -2321,7 +2333,10 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) connector = (tmp >> 12) & 0xf; ddc_type = (tmp >> 8) & 0xf; - ddc_i2c = combios_setup_i2c_bus(rdev, ddc_type, 0, 0); + if (ddc_type == 5) + ddc_i2c = radeon_combios_get_i2c_info_from_table(rdev); + else + ddc_i2c = combios_setup_i2c_bus(rdev, ddc_type, 0, 0); switch (connector) { case CONNECTOR_PROPRIETARY_LEGACY: diff --git a/trunk/drivers/gpu/drm/radeon/radeon_cs.c b/trunk/drivers/gpu/drm/radeon/radeon_cs.c index 8a4c49ef0cc4..b4a0db24f4dd 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_cs.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_cs.c @@ -278,6 +278,30 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) return 0; } +static void radeon_bo_vm_fence_va(struct radeon_cs_parser *parser, + struct radeon_fence *fence) +{ + struct radeon_fpriv *fpriv = parser->filp->driver_priv; + struct radeon_vm *vm = &fpriv->vm; + struct radeon_bo_list *lobj; + + if (parser->chunk_ib_idx == -1) { + return; + } + if ((parser->cs_flags & RADEON_CS_USE_VM) == 0) { + return; + } + + list_for_each_entry(lobj, &parser->validated, tv.head) { + struct radeon_bo_va *bo_va; + struct radeon_bo *rbo = lobj->bo; + + bo_va = radeon_bo_va(rbo, vm); + radeon_fence_unref(&bo_va->fence); + bo_va->fence = radeon_fence_ref(fence); + } +} + /** * cs_parser_fini() - clean parser states * @parser: parser structure holding parsing context. @@ -290,11 +314,14 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error) { unsigned i; - if (!error) + if (!error) { + /* fence all bo va before ttm_eu_fence_buffer_objects so bo are still reserved */ + radeon_bo_vm_fence_va(parser, parser->ib.fence); ttm_eu_fence_buffer_objects(&parser->validated, parser->ib.fence); - else + } else { ttm_eu_backoff_reservation(&parser->validated); + } if (parser->relocs != NULL) { for (i = 0; i < parser->nrelocs; i++) { @@ -388,7 +415,6 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, if (parser->chunk_ib_idx == -1) return 0; - if ((parser->cs_flags & RADEON_CS_USE_VM) == 0) return 0; diff --git a/trunk/drivers/gpu/drm/radeon/radeon_cursor.c b/trunk/drivers/gpu/drm/radeon/radeon_cursor.c index 711e95ad39bf..8794744cdf1a 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_cursor.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_cursor.c @@ -67,7 +67,8 @@ static void radeon_hide_cursor(struct drm_crtc *crtc) if (ASIC_IS_DCE4(rdev)) { WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset); - WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT)); + WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) | + EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2)); } else if (ASIC_IS_AVIVO(rdev)) { WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset); WREG32(RADEON_MM_DATA, (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT)); @@ -94,7 +95,8 @@ static void radeon_show_cursor(struct drm_crtc *crtc) if (ASIC_IS_DCE4(rdev)) { WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset); WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_EN | - EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT)); + EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) | + EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2)); } else if (ASIC_IS_AVIVO(rdev)) { WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset); WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN | diff --git a/trunk/drivers/gpu/drm/radeon/radeon_device.c b/trunk/drivers/gpu/drm/radeon/radeon_device.c index 742af8244e89..7a3daebd732d 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_device.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_device.c @@ -1009,6 +1009,7 @@ int radeon_device_init(struct radeon_device *rdev, atomic_set(&rdev->ih.lock, 0); mutex_init(&rdev->gem.mutex); mutex_init(&rdev->pm.mutex); + mutex_init(&rdev->gpu_clock_mutex); init_rwsem(&rdev->pm.mclk_lock); init_rwsem(&rdev->exclusive_lock); init_waitqueue_head(&rdev->irq.vblank_queue); @@ -1050,7 +1051,7 @@ int radeon_device_init(struct radeon_device *rdev, if (rdev->flags & RADEON_IS_AGP) rdev->need_dma32 = true; if ((rdev->flags & RADEON_IS_PCI) && - (rdev->family < CHIP_RS400)) + (rdev->family <= CHIP_RS740)) rdev->need_dma32 = true; dma_bits = rdev->need_dma32 ? 32 : 40; @@ -1345,12 +1346,15 @@ int radeon_gpu_reset(struct radeon_device *rdev) for (i = 0; i < RADEON_NUM_RINGS; ++i) { radeon_ring_restore(rdev, &rdev->ring[i], ring_sizes[i], ring_data[i]); + ring_sizes[i] = 0; + ring_data[i] = NULL; } r = radeon_ib_ring_tests(rdev); if (r) { dev_err(rdev->dev, "ib ring test failed (%d).\n", r); if (saved) { + saved = false; radeon_suspend(rdev); goto retry; } diff --git a/trunk/drivers/gpu/drm/radeon/radeon_drv.c b/trunk/drivers/gpu/drm/radeon/radeon_drv.c index dcea6f01ae4e..8c593ea82c41 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_drv.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_drv.c @@ -59,9 +59,14 @@ * 2.15.0 - add max_pipes query * 2.16.0 - fix evergreen 2D tiled surface calculation * 2.17.0 - add STRMOUT_BASE_UPDATE for r7xx + * 2.18.0 - r600-eg: allow "invalid" DB formats + * 2.19.0 - r600-eg: MSAA textures + * 2.20.0 - r600-si: RADEON_INFO_TIMESTAMP query + * 2.21.0 - r600-r700: FMASK and CMASK + * 2.22.0 - r600 only: RESOLVE_BOX allowed */ #define KMS_DRIVER_MAJOR 2 -#define KMS_DRIVER_MINOR 17 +#define KMS_DRIVER_MINOR 22 #define KMS_DRIVER_PATCHLEVEL 0 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); int radeon_driver_unload_kms(struct drm_device *dev); diff --git a/trunk/drivers/gpu/drm/radeon/radeon_gart.c b/trunk/drivers/gpu/drm/radeon/radeon_gart.c index b3720054614d..bb3b7fe05ccd 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_gart.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_gart.c @@ -814,7 +814,7 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev, return -EINVAL; } - if (bo_va->valid) + if (bo_va->valid && mem) return 0; ngpu_pages = radeon_bo_ngpu_pages(bo); @@ -859,11 +859,27 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev, struct radeon_bo *bo) { struct radeon_bo_va *bo_va; + int r; bo_va = radeon_bo_va(bo, vm); if (bo_va == NULL) return 0; + /* wait for va use to end */ + while (bo_va->fence) { + r = radeon_fence_wait(bo_va->fence, false); + if (r) { + DRM_ERROR("error while waiting for fence: %d\n", r); + } + if (r == -EDEADLK) { + r = radeon_gpu_reset(rdev); + if (!r) + continue; + } + break; + } + radeon_fence_unref(&bo_va->fence); + mutex_lock(&rdev->vm_manager.lock); mutex_lock(&vm->mutex); radeon_vm_bo_update_pte(rdev, vm, bo, NULL); @@ -934,7 +950,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) } /** - * radeon_vm_init - tear down a vm instance + * radeon_vm_fini - tear down a vm instance * * @rdev: radeon_device pointer * @vm: requested vm @@ -952,12 +968,15 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) radeon_vm_unbind_locked(rdev, vm); mutex_unlock(&rdev->vm_manager.lock); - /* remove all bo */ + /* remove all bo at this point non are busy any more because unbind + * waited for the last vm fence to signal + */ r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); if (!r) { bo_va = radeon_bo_va(rdev->ring_tmp_bo.bo, vm); list_del_init(&bo_va->bo_list); list_del_init(&bo_va->vm_list); + radeon_fence_unref(&bo_va->fence); radeon_bo_unreserve(rdev->ring_tmp_bo.bo); kfree(bo_va); } @@ -969,6 +988,7 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) r = radeon_bo_reserve(bo_va->bo, false); if (!r) { list_del_init(&bo_va->bo_list); + radeon_fence_unref(&bo_va->fence); radeon_bo_unreserve(bo_va->bo); kfree(bo_va); } diff --git a/trunk/drivers/gpu/drm/radeon/radeon_gem.c b/trunk/drivers/gpu/drm/radeon/radeon_gem.c index 84d045245739..1b57b0058ad6 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_gem.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_gem.c @@ -134,25 +134,16 @@ void radeon_gem_object_close(struct drm_gem_object *obj, struct radeon_device *rdev = rbo->rdev; struct radeon_fpriv *fpriv = file_priv->driver_priv; struct radeon_vm *vm = &fpriv->vm; - struct radeon_bo_va *bo_va, *tmp; if (rdev->family < CHIP_CAYMAN) { return; } if (radeon_bo_reserve(rbo, false)) { + dev_err(rdev->dev, "leaking bo va because we fail to reserve bo\n"); return; } - list_for_each_entry_safe(bo_va, tmp, &rbo->va, bo_list) { - if (bo_va->vm == vm) { - /* remove from this vm address space */ - mutex_lock(&vm->mutex); - list_del(&bo_va->vm_list); - mutex_unlock(&vm->mutex); - list_del(&bo_va->bo_list); - kfree(bo_va); - } - } + radeon_vm_bo_rmv(rdev, vm, rbo); radeon_bo_unreserve(rbo); } diff --git a/trunk/drivers/gpu/drm/radeon/radeon_kms.c b/trunk/drivers/gpu/drm/radeon/radeon_kms.c index 1d73f16b5d97..414b4acf6947 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_kms.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_kms.c @@ -29,6 +29,7 @@ #include "drm_sarea.h" #include "radeon.h" #include "radeon_drm.h" +#include "radeon_asic.h" #include #include @@ -167,17 +168,39 @@ static void radeon_set_filp_rights(struct drm_device *dev, int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { struct radeon_device *rdev = dev->dev_private; - struct drm_radeon_info *info; + struct drm_radeon_info *info = data; struct radeon_mode_info *minfo = &rdev->mode_info; - uint32_t *value_ptr; - uint32_t value; + uint32_t value, *value_ptr; + uint64_t value64, *value_ptr64; struct drm_crtc *crtc; int i, found; - info = data; + /* TIMESTAMP is a 64-bit value, needs special handling. */ + if (info->request == RADEON_INFO_TIMESTAMP) { + if (rdev->family >= CHIP_R600) { + value_ptr64 = (uint64_t*)((unsigned long)info->value); + if (rdev->family >= CHIP_TAHITI) { + value64 = si_get_gpu_clock(rdev); + } else { + value64 = r600_get_gpu_clock(rdev); + } + + if (DRM_COPY_TO_USER(value_ptr64, &value64, sizeof(value64))) { + DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__); + return -EFAULT; + } + return 0; + } else { + DRM_DEBUG_KMS("timestamp is r6xx+ only!\n"); + return -EINVAL; + } + } + value_ptr = (uint32_t *)((unsigned long)info->value); - if (DRM_COPY_FROM_USER(&value, value_ptr, sizeof(value))) + if (DRM_COPY_FROM_USER(&value, value_ptr, sizeof(value))) { + DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__); return -EFAULT; + } switch (info->request) { case RADEON_INFO_DEVICE_ID: @@ -337,7 +360,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) return -EINVAL; } if (DRM_COPY_TO_USER(value_ptr, &value, sizeof(uint32_t))) { - DRM_ERROR("copy_to_user\n"); + DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__); return -EFAULT; } return 0; diff --git a/trunk/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/trunk/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index d5fd615897ec..94b4a1c12893 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_legacy_crtc.c @@ -1025,9 +1025,11 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc, static void radeon_crtc_prepare(struct drm_crtc *crtc) { + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct drm_crtc *crtci; + radeon_crtc->in_mode_set = true; /* * The hardware wedges sometimes if you reconfigure one CRTC * whilst another is running (see fdo bug #24611). @@ -1038,6 +1040,7 @@ static void radeon_crtc_prepare(struct drm_crtc *crtc) static void radeon_crtc_commit(struct drm_crtc *crtc) { + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct drm_crtc *crtci; @@ -1048,6 +1051,7 @@ static void radeon_crtc_commit(struct drm_crtc *crtc) if (crtci->enabled) radeon_crtc_dpms(crtci, DRM_MODE_DPMS_ON); } + radeon_crtc->in_mode_set = false; } static const struct drm_crtc_helper_funcs legacy_helper_funcs = { diff --git a/trunk/drivers/gpu/drm/radeon/radeon_mode.h b/trunk/drivers/gpu/drm/radeon/radeon_mode.h index f380d59c5763..d56978949f34 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_mode.h +++ b/trunk/drivers/gpu/drm/radeon/radeon_mode.h @@ -275,6 +275,7 @@ struct radeon_crtc { u16 lut_r[256], lut_g[256], lut_b[256]; bool enabled; bool can_tile; + bool in_mode_set; uint32_t crtc_offset; struct drm_gem_object *cursor_bo; uint64_t cursor_addr; diff --git a/trunk/drivers/gpu/drm/radeon/radeon_object.c b/trunk/drivers/gpu/drm/radeon/radeon_object.c index 1f1a4c803c1d..9024e7222839 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_object.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_object.c @@ -52,11 +52,7 @@ void radeon_bo_clear_va(struct radeon_bo *bo) list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) { /* remove from all vm address space */ - mutex_lock(&bo_va->vm->mutex); - list_del(&bo_va->vm_list); - mutex_unlock(&bo_va->vm->mutex); - list_del(&bo_va->bo_list); - kfree(bo_va); + radeon_vm_bo_rmv(bo->rdev, bo_va->vm, bo); } } @@ -136,6 +132,7 @@ int radeon_bo_create(struct radeon_device *rdev, acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size, sizeof(struct radeon_bo)); +retry: bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); if (bo == NULL) return -ENOMEM; @@ -149,8 +146,6 @@ int radeon_bo_create(struct radeon_device *rdev, bo->surface_reg = -1; INIT_LIST_HEAD(&bo->list); INIT_LIST_HEAD(&bo->va); - -retry: radeon_ttm_placement_from_domain(bo, domain); /* Kernel allocation are uninterruptible */ down_read(&rdev->pm.mclk_lock); diff --git a/trunk/drivers/gpu/drm/radeon/radeon_ring.c b/trunk/drivers/gpu/drm/radeon/radeon_ring.c index ec79b3750430..43c431a2686d 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_ring.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_ring.c @@ -706,6 +706,7 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig if (radeon_debugfs_ring_init(rdev, ring)) { DRM_ERROR("Failed to register debugfs file for rings !\n"); } + radeon_ring_lockup_update(ring); return 0; } diff --git a/trunk/drivers/gpu/drm/radeon/reg_srcs/r600 b/trunk/drivers/gpu/drm/radeon/reg_srcs/r600 index 5e659b034d9a..20bfbda7b3f1 100644 --- a/trunk/drivers/gpu/drm/radeon/reg_srcs/r600 +++ b/trunk/drivers/gpu/drm/radeon/reg_srcs/r600 @@ -744,15 +744,6 @@ r600 0x9400 0x00028C38 CB_CLRCMP_DST 0x00028C3C CB_CLRCMP_MSK 0x00028C34 CB_CLRCMP_SRC -0x00028100 CB_COLOR0_MASK -0x00028104 CB_COLOR1_MASK -0x00028108 CB_COLOR2_MASK -0x0002810C CB_COLOR3_MASK -0x00028110 CB_COLOR4_MASK -0x00028114 CB_COLOR5_MASK -0x00028118 CB_COLOR6_MASK -0x0002811C CB_COLOR7_MASK -0x00028808 CB_COLOR_CONTROL 0x0002842C CB_FOG_BLUE 0x00028428 CB_FOG_GREEN 0x00028424 CB_FOG_RED diff --git a/trunk/drivers/gpu/drm/radeon/rv515.c b/trunk/drivers/gpu/drm/radeon/rv515.c index a12fbcc8ccb6..aa8ef491ef3c 100644 --- a/trunk/drivers/gpu/drm/radeon/rv515.c +++ b/trunk/drivers/gpu/drm/radeon/rv515.c @@ -281,12 +281,8 @@ int rv515_debugfs_ga_info_init(struct radeon_device *rdev) void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save) { - save->d1vga_control = RREG32(R_000330_D1VGA_CONTROL); - save->d2vga_control = RREG32(R_000338_D2VGA_CONTROL); save->vga_render_control = RREG32(R_000300_VGA_RENDER_CONTROL); save->vga_hdp_control = RREG32(R_000328_VGA_HDP_CONTROL); - save->d1crtc_control = RREG32(R_006080_D1CRTC_CONTROL); - save->d2crtc_control = RREG32(R_006880_D2CRTC_CONTROL); /* Stop all video */ WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0); @@ -311,15 +307,6 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save) /* Unlock host access */ WREG32(R_000328_VGA_HDP_CONTROL, save->vga_hdp_control); mdelay(1); - /* Restore video state */ - WREG32(R_000330_D1VGA_CONTROL, save->d1vga_control); - WREG32(R_000338_D2VGA_CONTROL, save->d2vga_control); - WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1); - WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 1); - WREG32(R_006080_D1CRTC_CONTROL, save->d1crtc_control); - WREG32(R_006880_D2CRTC_CONTROL, save->d2crtc_control); - WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0); - WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0); WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control); } diff --git a/trunk/drivers/gpu/drm/radeon/si.c b/trunk/drivers/gpu/drm/radeon/si.c index c053f8193771..0139e227e3c7 100644 --- a/trunk/drivers/gpu/drm/radeon/si.c +++ b/trunk/drivers/gpu/drm/radeon/si.c @@ -1639,11 +1639,19 @@ static void si_gpu_init(struct radeon_device *rdev) /* XXX what about 12? */ rdev->config.si.tile_config |= (3 << 0); break; - } - if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) - rdev->config.si.tile_config |= 1 << 4; - else + } + switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) { + case 0: /* four banks */ rdev->config.si.tile_config |= 0 << 4; + break; + case 1: /* eight banks */ + rdev->config.si.tile_config |= 1 << 4; + break; + case 2: /* sixteen banks */ + default: + rdev->config.si.tile_config |= 2 << 4; + break; + } rdev->config.si.tile_config |= ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8; rdev->config.si.tile_config |= @@ -3960,3 +3968,22 @@ void si_fini(struct radeon_device *rdev) rdev->bios = NULL; } +/** + * si_get_gpu_clock - return GPU clock counter snapshot + * + * @rdev: radeon_device pointer + * + * Fetches a GPU clock counter snapshot (SI). + * Returns the 64 bit clock counter snapshot. + */ +uint64_t si_get_gpu_clock(struct radeon_device *rdev) +{ + uint64_t clock; + + mutex_lock(&rdev->gpu_clock_mutex); + WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1); + clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) | + ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL); + mutex_unlock(&rdev->gpu_clock_mutex); + return clock; +} diff --git a/trunk/drivers/gpu/drm/radeon/sid.h b/trunk/drivers/gpu/drm/radeon/sid.h index 7869089e8761..ef4815c27b1c 100644 --- a/trunk/drivers/gpu/drm/radeon/sid.h +++ b/trunk/drivers/gpu/drm/radeon/sid.h @@ -698,6 +698,9 @@ #define RLC_UCODE_ADDR 0xC32C #define RLC_UCODE_DATA 0xC330 +#define RLC_GPU_CLOCK_COUNT_LSB 0xC338 +#define RLC_GPU_CLOCK_COUNT_MSB 0xC33C +#define RLC_CAPTURE_GPU_CLOCK_COUNT 0xC340 #define RLC_MC_CNTL 0xC344 #define RLC_UCODE_CNTL 0xC348 diff --git a/trunk/drivers/gpu/drm/savage/savage_drv.c b/trunk/drivers/gpu/drm/savage/savage_drv.c index d31d4cca9a4c..c5a164337bd5 100644 --- a/trunk/drivers/gpu/drm/savage/savage_drv.c +++ b/trunk/drivers/gpu/drm/savage/savage_drv.c @@ -43,6 +43,9 @@ static const struct file_operations savage_driver_fops = { .mmap = drm_mmap, .poll = drm_poll, .fasync = drm_fasync, +#ifdef CONFIG_COMPAT + .compat_ioctl = drm_compat_ioctl, +#endif .llseek = noop_llseek, }; diff --git a/trunk/drivers/gpu/drm/sis/sis_drv.c b/trunk/drivers/gpu/drm/sis/sis_drv.c index 7f119870147c..867dc03000e6 100644 --- a/trunk/drivers/gpu/drm/sis/sis_drv.c +++ b/trunk/drivers/gpu/drm/sis/sis_drv.c @@ -74,6 +74,9 @@ static const struct file_operations sis_driver_fops = { .mmap = drm_mmap, .poll = drm_poll, .fasync = drm_fasync, +#ifdef CONFIG_COMPAT + .compat_ioctl = drm_compat_ioctl, +#endif .llseek = noop_llseek, }; diff --git a/trunk/drivers/gpu/drm/tdfx/tdfx_drv.c b/trunk/drivers/gpu/drm/tdfx/tdfx_drv.c index 90f6b13acfac..a7f4d6bd1330 100644 --- a/trunk/drivers/gpu/drm/tdfx/tdfx_drv.c +++ b/trunk/drivers/gpu/drm/tdfx/tdfx_drv.c @@ -49,6 +49,9 @@ static const struct file_operations tdfx_driver_fops = { .mmap = drm_mmap, .poll = drm_poll, .fasync = drm_fasync, +#ifdef CONFIG_COMPAT + .compat_ioctl = drm_compat_ioctl, +#endif .llseek = noop_llseek, }; diff --git a/trunk/drivers/gpu/drm/udl/Kconfig b/trunk/drivers/gpu/drm/udl/Kconfig index 0b5e096d39a6..56e0bf31d425 100644 --- a/trunk/drivers/gpu/drm/udl/Kconfig +++ b/trunk/drivers/gpu/drm/udl/Kconfig @@ -1,6 +1,7 @@ config DRM_UDL tristate "DisplayLink" depends on DRM && EXPERIMENTAL + depends on USB_ARCH_HAS_HCD select DRM_USB select FB_SYS_FILLRECT select FB_SYS_COPYAREA diff --git a/trunk/drivers/gpu/drm/udl/udl_drv.c b/trunk/drivers/gpu/drm/udl/udl_drv.c index 6e52069894b3..9f84128505bb 100644 --- a/trunk/drivers/gpu/drm/udl/udl_drv.c +++ b/trunk/drivers/gpu/drm/udl/udl_drv.c @@ -66,6 +66,9 @@ static const struct file_operations udl_driver_fops = { .unlocked_ioctl = drm_ioctl, .release = drm_release, .fasync = drm_fasync, +#ifdef CONFIG_COMPAT + .compat_ioctl = drm_compat_ioctl, +#endif .llseek = noop_llseek, }; diff --git a/trunk/drivers/gpu/drm/udl/udl_gem.c b/trunk/drivers/gpu/drm/udl/udl_gem.c index 7bd65bdd15a8..291ecc145585 100644 --- a/trunk/drivers/gpu/drm/udl/udl_gem.c +++ b/trunk/drivers/gpu/drm/udl/udl_gem.c @@ -308,7 +308,7 @@ struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev, /* need to attach */ attach = dma_buf_attach(dma_buf, dev->dev); if (IS_ERR(attach)) - return ERR_PTR(PTR_ERR(attach)); + return ERR_CAST(attach); sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); if (IS_ERR(sg)) { diff --git a/trunk/drivers/gpu/drm/udl/udl_modeset.c b/trunk/drivers/gpu/drm/udl/udl_modeset.c index f5dd89e891de..9159d48d1dfd 100644 --- a/trunk/drivers/gpu/drm/udl/udl_modeset.c +++ b/trunk/drivers/gpu/drm/udl/udl_modeset.c @@ -354,8 +354,7 @@ static int udl_crtc_mode_set(struct drm_crtc *crtc, static void udl_crtc_disable(struct drm_crtc *crtc) { - - + udl_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); } static void udl_crtc_destroy(struct drm_crtc *crtc) diff --git a/trunk/drivers/gpu/drm/via/via_drv.c b/trunk/drivers/gpu/drm/via/via_drv.c index e927b4c052f5..af1b914b17e3 100644 --- a/trunk/drivers/gpu/drm/via/via_drv.c +++ b/trunk/drivers/gpu/drm/via/via_drv.c @@ -65,6 +65,9 @@ static const struct file_operations via_driver_fops = { .mmap = drm_mmap, .poll = drm_poll, .fasync = drm_fasync, +#ifdef CONFIG_COMPAT + .compat_ioctl = drm_compat_ioctl, +#endif .llseek = noop_llseek, }; diff --git a/trunk/drivers/gpu/drm/vmwgfx/Kconfig b/trunk/drivers/gpu/drm/vmwgfx/Kconfig index 794ff67c5701..b71bcd0bfbbf 100644 --- a/trunk/drivers/gpu/drm/vmwgfx/Kconfig +++ b/trunk/drivers/gpu/drm/vmwgfx/Kconfig @@ -12,3 +12,11 @@ config DRM_VMWGFX This is a KMS enabled DRM driver for the VMware SVGA2 virtual hardware. The compiled module will be called "vmwgfx.ko". + +config DRM_VMWGFX_FBCON + depends on DRM_VMWGFX + bool "Enable framebuffer console under vmwgfx by default" + help + Choose this option if you are shipping a new vmwgfx + userspace driver that supports using the kernel driver. + diff --git a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 4d9edead01ac..33bfc2033009 100644 --- a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -182,8 +182,9 @@ static struct pci_device_id vmw_pci_id_list[] = { {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII}, {0, 0, 0} }; +MODULE_DEVICE_TABLE(pci, vmw_pci_id_list); -static int enable_fbdev; +static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON); static int vmw_probe(struct pci_dev *, const struct pci_device_id *); static void vmw_master_init(struct vmw_master *); diff --git a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 6b0078ffa763..c50724bd30f6 100644 --- a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -1688,15 +1688,19 @@ int vmw_du_page_flip(struct drm_crtc *crtc, struct vmw_private *dev_priv = vmw_priv(crtc->dev); struct drm_framebuffer *old_fb = crtc->fb; struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb); - struct drm_file *file_priv = event->base.file_priv; + struct drm_file *file_priv ; struct vmw_fence_obj *fence = NULL; struct drm_clip_rect clips; int ret; + if (event == NULL) + return -EINVAL; + /* require ScreenObject support for page flipping */ if (!dev_priv->sou_priv) return -ENOSYS; + file_priv = event->base.file_priv; if (!vmw_kms_screen_object_flippable(dev_priv, crtc)) return -EINVAL; diff --git a/trunk/drivers/gpu/vga/vga_switcheroo.c b/trunk/drivers/gpu/vga/vga_switcheroo.c index 5b3c7d135dc9..e25cf31faab2 100644 --- a/trunk/drivers/gpu/vga/vga_switcheroo.c +++ b/trunk/drivers/gpu/vga/vga_switcheroo.c @@ -70,27 +70,12 @@ static struct vgasr_priv vgasr_priv = { .clients = LIST_HEAD_INIT(vgasr_priv.clients), }; -int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler) -{ - mutex_lock(&vgasr_mutex); - if (vgasr_priv.handler) { - mutex_unlock(&vgasr_mutex); - return -EINVAL; - } - - vgasr_priv.handler = handler; - mutex_unlock(&vgasr_mutex); - return 0; -} -EXPORT_SYMBOL(vga_switcheroo_register_handler); - -void vga_switcheroo_unregister_handler(void) +static bool vga_switcheroo_ready(void) { - mutex_lock(&vgasr_mutex); - vgasr_priv.handler = NULL; - mutex_unlock(&vgasr_mutex); + /* we're ready if we get two clients + handler */ + return !vgasr_priv.active && + vgasr_priv.registered_clients == 2 && vgasr_priv.handler; } -EXPORT_SYMBOL(vga_switcheroo_unregister_handler); static void vga_switcheroo_enable(void) { @@ -98,7 +83,8 @@ static void vga_switcheroo_enable(void) struct vga_switcheroo_client *client; /* call the handler to init */ - vgasr_priv.handler->init(); + if (vgasr_priv.handler->init) + vgasr_priv.handler->init(); list_for_each_entry(client, &vgasr_priv.clients, list) { if (client->id != -1) @@ -113,6 +99,37 @@ static void vga_switcheroo_enable(void) vgasr_priv.active = true; } +int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler) +{ + mutex_lock(&vgasr_mutex); + if (vgasr_priv.handler) { + mutex_unlock(&vgasr_mutex); + return -EINVAL; + } + + vgasr_priv.handler = handler; + if (vga_switcheroo_ready()) { + printk(KERN_INFO "vga_switcheroo: enabled\n"); + vga_switcheroo_enable(); + } + mutex_unlock(&vgasr_mutex); + return 0; +} +EXPORT_SYMBOL(vga_switcheroo_register_handler); + +void vga_switcheroo_unregister_handler(void) +{ + mutex_lock(&vgasr_mutex); + vgasr_priv.handler = NULL; + if (vgasr_priv.active) { + pr_info("vga_switcheroo: disabled\n"); + vga_switcheroo_debugfs_fini(&vgasr_priv); + vgasr_priv.active = false; + } + mutex_unlock(&vgasr_mutex); +} +EXPORT_SYMBOL(vga_switcheroo_unregister_handler); + static int register_client(struct pci_dev *pdev, const struct vga_switcheroo_client_ops *ops, int id, bool active) @@ -134,9 +151,7 @@ static int register_client(struct pci_dev *pdev, if (client_is_vga(client)) vgasr_priv.registered_clients++; - /* if we get two clients + handler */ - if (!vgasr_priv.active && - vgasr_priv.registered_clients == 2 && vgasr_priv.handler) { + if (vga_switcheroo_ready()) { printk(KERN_INFO "vga_switcheroo: enabled\n"); vga_switcheroo_enable(); } diff --git a/trunk/drivers/hid/hid-core.c b/trunk/drivers/hid/hid-core.c index 60ea284407ce..8bcd168fffae 100644 --- a/trunk/drivers/hid/hid-core.c +++ b/trunk/drivers/hid/hid-core.c @@ -996,7 +996,8 @@ static void hid_process_event(struct hid_device *hid, struct hid_field *field, struct hid_driver *hdrv = hid->driver; int ret; - hid_dump_input(hid, usage, value); + if (!list_empty(&hid->debug_list)) + hid_dump_input(hid, usage, value); if (hdrv && hdrv->event && hid_match_usage(hid, usage)) { ret = hdrv->event(hid, field, usage, value); @@ -1558,7 +1559,9 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) }, { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) }, { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) }, - { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) }, +#if IS_ENABLED(CONFIG_HID_LENOVO_TPKBD) + { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) }, +#endif { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) }, @@ -1624,7 +1627,6 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) }, { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) }, { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) }, - { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN) }, { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) }, { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) }, { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) }, diff --git a/trunk/drivers/hid/hid-logitech-dj.c b/trunk/drivers/hid/hid-logitech-dj.c index 0f9c146fc00d..4d524b5f52f5 100644 --- a/trunk/drivers/hid/hid-logitech-dj.c +++ b/trunk/drivers/hid/hid-logitech-dj.c @@ -439,7 +439,7 @@ static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev) struct dj_report *dj_report; int retval; - dj_report = kzalloc(sizeof(dj_report), GFP_KERNEL); + dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL); if (!dj_report) return -ENOMEM; dj_report->report_id = REPORT_ID_DJ_SHORT; @@ -456,7 +456,7 @@ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev, struct dj_report *dj_report; int retval; - dj_report = kzalloc(sizeof(dj_report), GFP_KERNEL); + dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL); if (!dj_report) return -ENOMEM; dj_report->report_id = REPORT_ID_DJ_SHORT; diff --git a/trunk/drivers/hid/usbhid/hid-quirks.c b/trunk/drivers/hid/usbhid/hid-quirks.c index 903eef3d3e10..991e85c7325c 100644 --- a/trunk/drivers/hid/usbhid/hid-quirks.c +++ b/trunk/drivers/hid/usbhid/hid-quirks.c @@ -70,6 +70,7 @@ static const struct hid_blacklist { { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET }, { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET }, + { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET }, { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2, HID_QUIRK_NO_INIT_REPORTS }, diff --git a/trunk/drivers/hwmon/asus_atk0110.c b/trunk/drivers/hwmon/asus_atk0110.c index 351d1f4593e7..4ee578948723 100644 --- a/trunk/drivers/hwmon/asus_atk0110.c +++ b/trunk/drivers/hwmon/asus_atk0110.c @@ -34,6 +34,12 @@ static const struct dmi_system_id __initconst atk_force_new_if[] = { .matches = { DMI_MATCH(DMI_BOARD_NAME, "SABERTOOTH X58") } + }, { + /* Old interface reads the same sensor for fan0 and fan1 */ + .ident = "Asus M5A78L", + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "M5A78L") + } }, { } }; diff --git a/trunk/drivers/hwmon/coretemp.c b/trunk/drivers/hwmon/coretemp.c index faa16f80db9c..0fa356fe82cc 100644 --- a/trunk/drivers/hwmon/coretemp.c +++ b/trunk/drivers/hwmon/coretemp.c @@ -196,7 +196,7 @@ struct tjmax { int tjmax; }; -static struct tjmax __cpuinitconst tjmax_table[] = { +static const struct tjmax __cpuinitconst tjmax_table[] = { { "CPU D410", 100000 }, { "CPU D425", 100000 }, { "CPU D510", 100000 }, diff --git a/trunk/drivers/hwmon/w83627hf.c b/trunk/drivers/hwmon/w83627hf.c index ab4825205a9d..5b1a6a666441 100644 --- a/trunk/drivers/hwmon/w83627hf.c +++ b/trunk/drivers/hwmon/w83627hf.c @@ -1206,7 +1206,7 @@ static int __init w83627hf_find(int sioaddr, unsigned short *addr, int err = -ENODEV; u16 val; - static const __initdata char *names[] = { + static __initconst char *const names[] = { "W83627HF", "W83627THF", "W83697HF", diff --git a/trunk/drivers/i2c/busses/Kconfig b/trunk/drivers/i2c/busses/Kconfig index b4aaa1bd6728..970a1612e795 100644 --- a/trunk/drivers/i2c/busses/Kconfig +++ b/trunk/drivers/i2c/busses/Kconfig @@ -104,6 +104,7 @@ config I2C_I801 DH89xxCC (PCH) Panther Point (PCH) Lynx Point (PCH) + Lynx Point-LP (PCH) This driver can also be built as a module. If so, the module will be called i2c-i801. @@ -354,9 +355,13 @@ config I2C_DAVINCI devices such as DaVinci NIC. For details please see http://www.ti.com/davinci +config I2C_DESIGNWARE_CORE + tristate + config I2C_DESIGNWARE_PLATFORM tristate "Synopsys DesignWare Platform" depends on HAVE_CLK + select I2C_DESIGNWARE_CORE help If you say yes to this option, support will be included for the Synopsys DesignWare I2C adapter. Only master mode is supported. @@ -367,6 +372,7 @@ config I2C_DESIGNWARE_PLATFORM config I2C_DESIGNWARE_PCI tristate "Synopsys DesignWare PCI" depends on PCI + select I2C_DESIGNWARE_CORE help If you say yes to this option, support will be included for the Synopsys DesignWare I2C adapter. Only master mode is supported. diff --git a/trunk/drivers/i2c/busses/Makefile b/trunk/drivers/i2c/busses/Makefile index ce3c2be7fb40..37c4182cc98b 100644 --- a/trunk/drivers/i2c/busses/Makefile +++ b/trunk/drivers/i2c/busses/Makefile @@ -33,10 +33,11 @@ obj-$(CONFIG_I2C_AU1550) += i2c-au1550.o obj-$(CONFIG_I2C_BLACKFIN_TWI) += i2c-bfin-twi.o obj-$(CONFIG_I2C_CPM) += i2c-cpm.o obj-$(CONFIG_I2C_DAVINCI) += i2c-davinci.o +obj-$(CONFIG_I2C_DESIGNWARE_CORE) += i2c-designware-core.o obj-$(CONFIG_I2C_DESIGNWARE_PLATFORM) += i2c-designware-platform.o -i2c-designware-platform-objs := i2c-designware-platdrv.o i2c-designware-core.o +i2c-designware-platform-objs := i2c-designware-platdrv.o obj-$(CONFIG_I2C_DESIGNWARE_PCI) += i2c-designware-pci.o -i2c-designware-pci-objs := i2c-designware-pcidrv.o i2c-designware-core.o +i2c-designware-pci-objs := i2c-designware-pcidrv.o obj-$(CONFIG_I2C_EG20T) += i2c-eg20t.o obj-$(CONFIG_I2C_GPIO) += i2c-gpio.o obj-$(CONFIG_I2C_HIGHLANDER) += i2c-highlander.o diff --git a/trunk/drivers/i2c/busses/i2c-designware-core.c b/trunk/drivers/i2c/busses/i2c-designware-core.c index 1e48bec80edf..7b8ebbefb581 100644 --- a/trunk/drivers/i2c/busses/i2c-designware-core.c +++ b/trunk/drivers/i2c/busses/i2c-designware-core.c @@ -25,6 +25,7 @@ * ---------------------------------------------------------------------------- * */ +#include #include #include #include @@ -316,6 +317,7 @@ int i2c_dw_init(struct dw_i2c_dev *dev) dw_writel(dev, dev->master_cfg , DW_IC_CON); return 0; } +EXPORT_SYMBOL_GPL(i2c_dw_init); /* * Waiting for bus not busy @@ -568,12 +570,14 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) return ret; } +EXPORT_SYMBOL_GPL(i2c_dw_xfer); u32 i2c_dw_func(struct i2c_adapter *adap) { struct dw_i2c_dev *dev = i2c_get_adapdata(adap); return dev->functionality; } +EXPORT_SYMBOL_GPL(i2c_dw_func); static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev) { @@ -678,17 +682,20 @@ irqreturn_t i2c_dw_isr(int this_irq, void *dev_id) return IRQ_HANDLED; } +EXPORT_SYMBOL_GPL(i2c_dw_isr); void i2c_dw_enable(struct dw_i2c_dev *dev) { /* Enable the adapter */ dw_writel(dev, 1, DW_IC_ENABLE); } +EXPORT_SYMBOL_GPL(i2c_dw_enable); u32 i2c_dw_is_enabled(struct dw_i2c_dev *dev) { return dw_readl(dev, DW_IC_ENABLE); } +EXPORT_SYMBOL_GPL(i2c_dw_is_enabled); void i2c_dw_disable(struct dw_i2c_dev *dev) { @@ -699,18 +706,22 @@ void i2c_dw_disable(struct dw_i2c_dev *dev) dw_writel(dev, 0, DW_IC_INTR_MASK); dw_readl(dev, DW_IC_CLR_INTR); } +EXPORT_SYMBOL_GPL(i2c_dw_disable); void i2c_dw_clear_int(struct dw_i2c_dev *dev) { dw_readl(dev, DW_IC_CLR_INTR); } +EXPORT_SYMBOL_GPL(i2c_dw_clear_int); void i2c_dw_disable_int(struct dw_i2c_dev *dev) { dw_writel(dev, 0, DW_IC_INTR_MASK); } +EXPORT_SYMBOL_GPL(i2c_dw_disable_int); u32 i2c_dw_read_comp_param(struct dw_i2c_dev *dev) { return dw_readl(dev, DW_IC_COMP_PARAM_1); } +EXPORT_SYMBOL_GPL(i2c_dw_read_comp_param); diff --git a/trunk/drivers/i2c/busses/i2c-diolan-u2c.c b/trunk/drivers/i2c/busses/i2c-diolan-u2c.c index aedb94f34bf7..dae3ddfe7619 100644 --- a/trunk/drivers/i2c/busses/i2c-diolan-u2c.c +++ b/trunk/drivers/i2c/busses/i2c-diolan-u2c.c @@ -405,6 +405,7 @@ static int diolan_usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, } } } + ret = num; abort: sret = diolan_i2c_stop(dev); if (sret < 0 && ret >= 0) diff --git a/trunk/drivers/i2c/busses/i2c-i801.c b/trunk/drivers/i2c/busses/i2c-i801.c index 898dcf9c7ade..33e9b0c09af2 100644 --- a/trunk/drivers/i2c/busses/i2c-i801.c +++ b/trunk/drivers/i2c/busses/i2c-i801.c @@ -52,6 +52,7 @@ DH89xxCC (PCH) 0x2330 32 hard yes yes yes Panther Point (PCH) 0x1e22 32 hard yes yes yes Lynx Point (PCH) 0x8c22 32 hard yes yes yes + Lynx Point-LP (PCH) 0x9c22 32 hard yes yes yes Features supported by this driver: Software PEC no @@ -155,6 +156,7 @@ #define PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS 0x2330 #define PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS 0x3b30 #define PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS 0x8c22 +#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_SMBUS 0x9c22 struct i801_priv { struct i2c_adapter adapter; @@ -771,6 +773,7 @@ static DEFINE_PCI_DEVICE_TABLE(i801_ids) = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PANTHERPOINT_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_SMBUS) }, { 0, } }; diff --git a/trunk/drivers/i2c/busses/i2c-nomadik.c b/trunk/drivers/i2c/busses/i2c-nomadik.c index 5e6f1eed4f83..61b00edacb08 100644 --- a/trunk/drivers/i2c/busses/i2c-nomadik.c +++ b/trunk/drivers/i2c/busses/i2c-nomadik.c @@ -350,10 +350,6 @@ static void setup_i2c_controller(struct nmk_i2c_dev *dev) i2c_clk = clk_get_rate(dev->clk); - /* fallback to std. mode if machine has not provided it */ - if (dev->cfg.clk_freq == 0) - dev->cfg.clk_freq = 100000; - /* * The spec says, in case of std. mode the divider is * 2 whereas it is 3 for fast and fastplus mode of @@ -911,20 +907,32 @@ static const struct i2c_algorithm nmk_i2c_algo = { .functionality = nmk_i2c_functionality }; +static struct nmk_i2c_controller u8500_i2c = { + /* + * Slave data setup time; 250ns, 100ns, and 10ns, which + * is 14, 6 and 2 respectively for a 48Mhz i2c clock. + */ + .slsu = 0xe, + .tft = 1, /* Tx FIFO threshold */ + .rft = 8, /* Rx FIFO threshold */ + .clk_freq = 400000, /* fast mode operation */ + .timeout = 200, /* Slave response timeout(ms) */ + .sm = I2C_FREQ_MODE_FAST, +}; + static atomic_t adapter_id = ATOMIC_INIT(0); static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id) { int ret = 0; - struct nmk_i2c_controller *pdata = - adev->dev.platform_data; + struct nmk_i2c_controller *pdata = adev->dev.platform_data; struct nmk_i2c_dev *dev; struct i2c_adapter *adap; - if (!pdata) { - dev_warn(&adev->dev, "no platform data\n"); - return -ENODEV; - } + if (!pdata) + /* No i2c configuration found, using the default. */ + pdata = &u8500_i2c; + dev = kzalloc(sizeof(struct nmk_i2c_dev), GFP_KERNEL); if (!dev) { dev_err(&adev->dev, "cannot allocate memory\n"); diff --git a/trunk/drivers/i2c/busses/i2c-omap.c b/trunk/drivers/i2c/busses/i2c-omap.c index 6849635b268a..5d19a49803c1 100644 --- a/trunk/drivers/i2c/busses/i2c-omap.c +++ b/trunk/drivers/i2c/busses/i2c-omap.c @@ -584,7 +584,7 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) r = pm_runtime_get_sync(dev->dev); if (IS_ERR_VALUE(r)) - return r; + goto out; r = omap_i2c_wait_for_bb(dev); if (r < 0) diff --git a/trunk/drivers/i2c/busses/i2c-tegra.c b/trunk/drivers/i2c/busses/i2c-tegra.c index 66eb53fac202..9a08c57bc936 100644 --- a/trunk/drivers/i2c/busses/i2c-tegra.c +++ b/trunk/drivers/i2c/busses/i2c-tegra.c @@ -712,7 +712,7 @@ static int __devexit tegra_i2c_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int tegra_i2c_suspend(struct device *dev) { struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev); diff --git a/trunk/drivers/i2c/i2c-core.c b/trunk/drivers/i2c/i2c-core.c index 2efa56c5ff2c..2091ae8f539a 100644 --- a/trunk/drivers/i2c/i2c-core.c +++ b/trunk/drivers/i2c/i2c-core.c @@ -636,6 +636,22 @@ static void i2c_adapter_dev_release(struct device *dev) complete(&adap->dev_released); } +/* + * This function is only needed for mutex_lock_nested, so it is never + * called unless locking correctness checking is enabled. Thus we + * make it inline to avoid a compiler warning. That's what gcc ends up + * doing anyway. + */ +static inline unsigned int i2c_adapter_depth(struct i2c_adapter *adapter) +{ + unsigned int depth = 0; + + while ((adapter = i2c_parent_is_i2c_adapter(adapter))) + depth++; + + return depth; +} + /* * Let users instantiate I2C devices through sysfs. This can be used when * platform initialization code doesn't contain the proper data for @@ -726,7 +742,8 @@ i2c_sysfs_delete_device(struct device *dev, struct device_attribute *attr, /* Make sure the device was added through sysfs */ res = -ENOENT; - mutex_lock(&adap->userspace_clients_lock); + mutex_lock_nested(&adap->userspace_clients_lock, + i2c_adapter_depth(adap)); list_for_each_entry_safe(client, next, &adap->userspace_clients, detected) { if (client->addr == addr) { @@ -1073,7 +1090,8 @@ int i2c_del_adapter(struct i2c_adapter *adap) return res; /* Remove devices instantiated from sysfs */ - mutex_lock(&adap->userspace_clients_lock); + mutex_lock_nested(&adap->userspace_clients_lock, + i2c_adapter_depth(adap)); list_for_each_entry_safe(client, next, &adap->userspace_clients, detected) { dev_dbg(&adap->dev, "Removing %s at 0x%x\n", client->name, diff --git a/trunk/drivers/ide/ide-pm.c b/trunk/drivers/ide/ide-pm.c index 92406097efeb..8d1e32d7cd97 100644 --- a/trunk/drivers/ide/ide-pm.c +++ b/trunk/drivers/ide/ide-pm.c @@ -4,7 +4,7 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg) { - ide_drive_t *drive = dev_get_drvdata(dev); + ide_drive_t *drive = to_ide_device(dev); ide_drive_t *pair = ide_get_pair_dev(drive); ide_hwif_t *hwif = drive->hwif; struct request *rq; @@ -40,7 +40,7 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg) int generic_ide_resume(struct device *dev) { - ide_drive_t *drive = dev_get_drvdata(dev); + ide_drive_t *drive = to_ide_device(dev); ide_drive_t *pair = ide_get_pair_dev(drive); ide_hwif_t *hwif = drive->hwif; struct request *rq; diff --git a/trunk/drivers/idle/intel_idle.c b/trunk/drivers/idle/intel_idle.c index f559088869f6..e8726177d103 100644 --- a/trunk/drivers/idle/intel_idle.c +++ b/trunk/drivers/idle/intel_idle.c @@ -606,8 +606,9 @@ static int __init intel_idle_init(void) intel_idle_cpuidle_driver_init(); retval = cpuidle_register_driver(&intel_idle_driver); if (retval) { + struct cpuidle_driver *drv = cpuidle_get_driver(); printk(KERN_DEBUG PREFIX "intel_idle yielding to %s", - cpuidle_get_driver()->name); + drv ? drv->name : "none"); return retval; } diff --git a/trunk/drivers/iio/frequency/adf4350.c b/trunk/drivers/iio/frequency/adf4350.c index 59fbb3ae40e7..e35bb8f6fe75 100644 --- a/trunk/drivers/iio/frequency/adf4350.c +++ b/trunk/drivers/iio/frequency/adf4350.c @@ -129,7 +129,7 @@ static int adf4350_set_freq(struct adf4350_state *st, unsigned long long freq) { struct adf4350_platform_data *pdata = st->pdata; u64 tmp; - u32 div_gcd, prescaler; + u32 div_gcd, prescaler, chspc; u16 mdiv, r_cnt = 0; u8 band_sel_div; @@ -158,14 +158,20 @@ static int adf4350_set_freq(struct adf4350_state *st, unsigned long long freq) if (pdata->ref_div_factor) r_cnt = pdata->ref_div_factor - 1; - do { - r_cnt = adf4350_tune_r_cnt(st, r_cnt); + chspc = st->chspc; - st->r1_mod = st->fpfd / st->chspc; - while (st->r1_mod > ADF4350_MAX_MODULUS) { - r_cnt = adf4350_tune_r_cnt(st, r_cnt); - st->r1_mod = st->fpfd / st->chspc; - } + do { + do { + do { + r_cnt = adf4350_tune_r_cnt(st, r_cnt); + st->r1_mod = st->fpfd / chspc; + if (r_cnt > ADF4350_MAX_R_CNT) { + /* try higher spacing values */ + chspc++; + r_cnt = 0; + } + } while ((st->r1_mod > ADF4350_MAX_MODULUS) && r_cnt); + } while (r_cnt == 0); tmp = freq * (u64)st->r1_mod + (st->fpfd > 1); do_div(tmp, st->fpfd); /* Div round closest (n + d/2)/d */ @@ -194,7 +200,7 @@ static int adf4350_set_freq(struct adf4350_state *st, unsigned long long freq) st->regs[ADF4350_REG0] = ADF4350_REG0_INT(st->r0_int) | ADF4350_REG0_FRACT(st->r0_fract); - st->regs[ADF4350_REG1] = ADF4350_REG1_PHASE(0) | + st->regs[ADF4350_REG1] = ADF4350_REG1_PHASE(1) | ADF4350_REG1_MOD(st->r1_mod) | prescaler; diff --git a/trunk/drivers/iio/light/adjd_s311.c b/trunk/drivers/iio/light/adjd_s311.c index 1cbb449b319a..9a99f43094f0 100644 --- a/trunk/drivers/iio/light/adjd_s311.c +++ b/trunk/drivers/iio/light/adjd_s311.c @@ -271,9 +271,10 @@ static int adjd_s311_update_scan_mode(struct iio_dev *indio_dev, const unsigned long *scan_mask) { struct adjd_s311_data *data = iio_priv(indio_dev); - data->buffer = krealloc(data->buffer, indio_dev->scan_bytes, - GFP_KERNEL); - if (!data->buffer) + + kfree(data->buffer); + data->buffer = kmalloc(indio_dev->scan_bytes, GFP_KERNEL); + if (data->buffer == NULL) return -ENOMEM; return 0; diff --git a/trunk/drivers/iio/light/lm3533-als.c b/trunk/drivers/iio/light/lm3533-als.c index c3e7bac13123..e45712a921ce 100644 --- a/trunk/drivers/iio/light/lm3533-als.c +++ b/trunk/drivers/iio/light/lm3533-als.c @@ -404,7 +404,7 @@ static int lm3533_als_get_hysteresis(struct iio_dev *indio_dev, unsigned nr, return ret; } -static int show_thresh_either_en(struct device *dev, +static ssize_t show_thresh_either_en(struct device *dev, struct device_attribute *attr, char *buf) { @@ -424,7 +424,7 @@ static int show_thresh_either_en(struct device *dev, return scnprintf(buf, PAGE_SIZE, "%u\n", enable); } -static int store_thresh_either_en(struct device *dev, +static ssize_t store_thresh_either_en(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { diff --git a/trunk/drivers/infiniband/core/ucma.c b/trunk/drivers/infiniband/core/ucma.c index 6bf850422895..055ed59838dc 100644 --- a/trunk/drivers/infiniband/core/ucma.c +++ b/trunk/drivers/infiniband/core/ucma.c @@ -267,6 +267,7 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id, if (!uevent) return event->event == RDMA_CM_EVENT_CONNECT_REQUEST; + mutex_lock(&ctx->file->mut); uevent->cm_id = cm_id; ucma_set_event_context(ctx, event, uevent); uevent->resp.event = event->event; @@ -277,7 +278,6 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id, ucma_copy_conn_event(&uevent->resp.param.conn, &event->param.conn); - mutex_lock(&ctx->file->mut); if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) { if (!ctx->backlog) { ret = -ENOMEM; diff --git a/trunk/drivers/infiniband/hw/amso1100/c2_rnic.c b/trunk/drivers/infiniband/hw/amso1100/c2_rnic.c index 8c81992fa6db..e4a73158fc7f 100644 --- a/trunk/drivers/infiniband/hw/amso1100/c2_rnic.c +++ b/trunk/drivers/infiniband/hw/amso1100/c2_rnic.c @@ -439,7 +439,7 @@ static int c2_rnic_close(struct c2_dev *c2dev) /* * Called by c2_probe to initialize the RNIC. This principally - * involves initalizing the various limits and resouce pools that + * involves initializing the various limits and resource pools that * comprise the RNIC instance. */ int __devinit c2_rnic_init(struct c2_dev *c2dev) diff --git a/trunk/drivers/infiniband/hw/cxgb3/iwch_cm.c b/trunk/drivers/infiniband/hw/cxgb3/iwch_cm.c index 77b6b182778a..aaf88ef9409c 100644 --- a/trunk/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/trunk/drivers/infiniband/hw/cxgb3/iwch_cm.c @@ -1680,7 +1680,7 @@ static int close_con_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) * T3A does 3 things when a TERM is received: * 1) send up a CPL_RDMA_TERMINATE message with the TERM packet * 2) generate an async event on the QP with the TERMINATE opcode - * 3) post a TERMINATE opcde cqe into the associated CQ. + * 3) post a TERMINATE opcode cqe into the associated CQ. * * For (1), we save the message in the qp for later consumer consumption. * For (2), we move the QP into TERMINATE, post a QP event and disconnect. diff --git a/trunk/drivers/infiniband/hw/mlx4/mad.c b/trunk/drivers/infiniband/hw/mlx4/mad.c index c27141fef1ab..9c2ae7efd00f 100644 --- a/trunk/drivers/infiniband/hw/mlx4/mad.c +++ b/trunk/drivers/infiniband/hw/mlx4/mad.c @@ -125,6 +125,7 @@ static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl) { struct ib_ah *new_ah; struct ib_ah_attr ah_attr; + unsigned long flags; if (!dev->send_agent[port_num - 1][0]) return; @@ -139,11 +140,11 @@ static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl) if (IS_ERR(new_ah)) return; - spin_lock(&dev->sm_lock); + spin_lock_irqsave(&dev->sm_lock, flags); if (dev->sm_ah[port_num - 1]) ib_destroy_ah(dev->sm_ah[port_num - 1]); dev->sm_ah[port_num - 1] = new_ah; - spin_unlock(&dev->sm_lock); + spin_unlock_irqrestore(&dev->sm_lock, flags); } /* @@ -197,13 +198,15 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad, static void node_desc_override(struct ib_device *dev, struct ib_mad *mad) { + unsigned long flags; + if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP && mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) { - spin_lock(&to_mdev(dev)->sm_lock); + spin_lock_irqsave(&to_mdev(dev)->sm_lock, flags); memcpy(((struct ib_smp *) mad)->data, dev->node_desc, 64); - spin_unlock(&to_mdev(dev)->sm_lock); + spin_unlock_irqrestore(&to_mdev(dev)->sm_lock, flags); } } @@ -213,6 +216,7 @@ static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *ma struct ib_mad_send_buf *send_buf; struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn]; int ret; + unsigned long flags; if (agent) { send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR, @@ -225,13 +229,13 @@ static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *ma * wrong following the IB spec strictly, but we know * it's OK for our devices). */ - spin_lock(&dev->sm_lock); + spin_lock_irqsave(&dev->sm_lock, flags); memcpy(send_buf->mad, mad, sizeof *mad); if ((send_buf->ah = dev->sm_ah[port_num - 1])) ret = ib_post_send_mad(send_buf, NULL); else ret = -EINVAL; - spin_unlock(&dev->sm_lock); + spin_unlock_irqrestore(&dev->sm_lock, flags); if (ret) ib_free_send_mad(send_buf); diff --git a/trunk/drivers/infiniband/hw/mlx4/main.c b/trunk/drivers/infiniband/hw/mlx4/main.c index fe2088cfa6ee..cc05579ebce7 100644 --- a/trunk/drivers/infiniband/hw/mlx4/main.c +++ b/trunk/drivers/infiniband/hw/mlx4/main.c @@ -423,6 +423,7 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask, struct ib_device_modify *props) { struct mlx4_cmd_mailbox *mailbox; + unsigned long flags; if (mask & ~IB_DEVICE_MODIFY_NODE_DESC) return -EOPNOTSUPP; @@ -430,9 +431,9 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask, if (!(mask & IB_DEVICE_MODIFY_NODE_DESC)) return 0; - spin_lock(&to_mdev(ibdev)->sm_lock); + spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags); memcpy(ibdev->node_desc, props->node_desc, 64); - spin_unlock(&to_mdev(ibdev)->sm_lock); + spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags); /* * If possible, pass node desc to FW, so it can generate diff --git a/trunk/drivers/infiniband/hw/mlx4/qp.c b/trunk/drivers/infiniband/hw/mlx4/qp.c index a6d8ea060ea8..f585eddef4b7 100644 --- a/trunk/drivers/infiniband/hw/mlx4/qp.c +++ b/trunk/drivers/infiniband/hw/mlx4/qp.c @@ -1407,6 +1407,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, struct mlx4_wqe_mlx_seg *mlx = wqe; struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx; struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah); + struct net_device *ndev; union ib_gid sgid; u16 pkey; int send_size; @@ -1483,7 +1484,10 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, memcpy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac, 6); /* FIXME: cache smac value? */ - smac = to_mdev(sqp->qp.ibqp.device)->iboe.netdevs[sqp->qp.port - 1]->dev_addr; + ndev = to_mdev(sqp->qp.ibqp.device)->iboe.netdevs[sqp->qp.port - 1]; + if (!ndev) + return -ENODEV; + smac = ndev->dev_addr; memcpy(sqp->ud_header.eth.smac_h, smac, 6); if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6)) mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK); diff --git a/trunk/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/trunk/drivers/infiniband/hw/ocrdma/ocrdma_main.c index 5a044526e4f4..c4e0131f1b57 100644 --- a/trunk/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/trunk/drivers/infiniband/hw/ocrdma/ocrdma_main.c @@ -161,7 +161,7 @@ static void ocrdma_add_default_sgid(struct ocrdma_dev *dev) ocrdma_get_guid(dev, &sgid->raw[8]); } -#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) +#if IS_ENABLED(CONFIG_VLAN_8021Q) static void ocrdma_add_vlan_sgids(struct ocrdma_dev *dev) { struct net_device *netdev, *tmp; @@ -202,14 +202,13 @@ static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev) return 0; } -#if IS_ENABLED(CONFIG_IPV6) || IS_ENABLED(CONFIG_VLAN_8021Q) +#if IS_ENABLED(CONFIG_IPV6) static int ocrdma_inet6addr_event(struct notifier_block *notifier, unsigned long event, void *ptr) { struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr; - struct net_device *event_netdev = ifa->idev->dev; - struct net_device *netdev = NULL; + struct net_device *netdev = ifa->idev->dev; struct ib_event gid_event; struct ocrdma_dev *dev; bool found = false; @@ -217,11 +216,12 @@ static int ocrdma_inet6addr_event(struct notifier_block *notifier, bool is_vlan = false; u16 vid = 0; - netdev = vlan_dev_real_dev(event_netdev); - if (netdev != event_netdev) { - is_vlan = true; - vid = vlan_dev_vlan_id(event_netdev); + is_vlan = netdev->priv_flags & IFF_802_1Q_VLAN; + if (is_vlan) { + vid = vlan_dev_vlan_id(netdev); + netdev = vlan_dev_real_dev(netdev); } + rcu_read_lock(); list_for_each_entry_rcu(dev, &ocrdma_dev_list, entry) { if (dev->nic_info.netdev == netdev) { diff --git a/trunk/drivers/infiniband/hw/qib/qib_iba7322.c b/trunk/drivers/infiniband/hw/qib/qib_iba7322.c index 0d7280af99bc..3f6b21e9dc11 100644 --- a/trunk/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/trunk/drivers/infiniband/hw/qib/qib_iba7322.c @@ -6346,8 +6346,10 @@ static int qib_init_7322_variables(struct qib_devdata *dd) dd->piobcnt4k * dd->align4k; dd->piovl15base = ioremap_nocache(vl15off, NUM_VL15_BUFS * dd->align4k); - if (!dd->piovl15base) + if (!dd->piovl15base) { + ret = -ENOMEM; goto bail; + } } qib_7322_set_baseaddrs(dd); /* set chip access pointers now */ diff --git a/trunk/drivers/infiniband/hw/qib/qib_sd7220.c b/trunk/drivers/infiniband/hw/qib/qib_sd7220.c index a322d5171a2c..50a8a0d4fe67 100644 --- a/trunk/drivers/infiniband/hw/qib/qib_sd7220.c +++ b/trunk/drivers/infiniband/hw/qib/qib_sd7220.c @@ -372,7 +372,7 @@ static void qib_sd_trimdone_monitor(struct qib_devdata *dd, /* Read CTRL reg for each channel to check TRIMDONE */ if (baduns & (1 << chn)) { qib_dev_err(dd, - "Reseting TRIMDONE on chn %d (%s)\n", + "Resetting TRIMDONE on chn %d (%s)\n", chn, where); ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_CTRL2(chn), 0x10, 0x10); diff --git a/trunk/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/trunk/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 95ecf4eadf5f..24683fda8e21 100644 --- a/trunk/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/trunk/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -1271,12 +1271,15 @@ struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx) { struct ipoib_dev_priv *priv = netdev_priv(tx->dev); + unsigned long flags; if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { + spin_lock_irqsave(&priv->lock, flags); list_move(&tx->list, &priv->cm.reap_list); queue_work(ipoib_workqueue, &priv->cm.reap_task); ipoib_dbg(priv, "Reap connection for gid %pI6\n", tx->neigh->daddr + 4); tx->neigh = NULL; + spin_unlock_irqrestore(&priv->lock, flags); } } diff --git a/trunk/drivers/infiniband/ulp/ipoib/ipoib_main.c b/trunk/drivers/infiniband/ulp/ipoib/ipoib_main.c index 97920b77a5d0..3e2085a3ee47 100644 --- a/trunk/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/trunk/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -1052,7 +1052,7 @@ void ipoib_neigh_free(struct ipoib_neigh *neigh) for (n = rcu_dereference_protected(*np, lockdep_is_held(&ntbl->rwlock)); n != NULL; - n = rcu_dereference_protected(neigh->hnext, + n = rcu_dereference_protected(*np, lockdep_is_held(&ntbl->rwlock))) { if (n == neigh) { /* found */ diff --git a/trunk/drivers/infiniband/ulp/srp/ib_srp.c b/trunk/drivers/infiniband/ulp/srp/ib_srp.c index bcbf22ee0aa7..1b5b0c730054 100644 --- a/trunk/drivers/infiniband/ulp/srp/ib_srp.c +++ b/trunk/drivers/infiniband/ulp/srp/ib_srp.c @@ -586,24 +586,62 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd, scmnd->sc_data_direction); } -static void srp_remove_req(struct srp_target_port *target, - struct srp_request *req, s32 req_lim_delta) +/** + * srp_claim_req - Take ownership of the scmnd associated with a request. + * @target: SRP target port. + * @req: SRP request. + * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take + * ownership of @req->scmnd if it equals @scmnd. + * + * Return value: + * Either NULL or a pointer to the SCSI command the caller became owner of. + */ +static struct scsi_cmnd *srp_claim_req(struct srp_target_port *target, + struct srp_request *req, + struct scsi_cmnd *scmnd) +{ + unsigned long flags; + + spin_lock_irqsave(&target->lock, flags); + if (!scmnd) { + scmnd = req->scmnd; + req->scmnd = NULL; + } else if (req->scmnd == scmnd) { + req->scmnd = NULL; + } else { + scmnd = NULL; + } + spin_unlock_irqrestore(&target->lock, flags); + + return scmnd; +} + +/** + * srp_free_req() - Unmap data and add request to the free request list. + */ +static void srp_free_req(struct srp_target_port *target, + struct srp_request *req, struct scsi_cmnd *scmnd, + s32 req_lim_delta) { unsigned long flags; - srp_unmap_data(req->scmnd, target, req); + srp_unmap_data(scmnd, target, req); + spin_lock_irqsave(&target->lock, flags); target->req_lim += req_lim_delta; - req->scmnd = NULL; list_add_tail(&req->list, &target->free_reqs); spin_unlock_irqrestore(&target->lock, flags); } static void srp_reset_req(struct srp_target_port *target, struct srp_request *req) { - req->scmnd->result = DID_RESET << 16; - req->scmnd->scsi_done(req->scmnd); - srp_remove_req(target, req, 0); + struct scsi_cmnd *scmnd = srp_claim_req(target, req, NULL); + + if (scmnd) { + scmnd->result = DID_RESET << 16; + scmnd->scsi_done(scmnd); + srp_free_req(target, req, scmnd, 0); + } } static int srp_reconnect_target(struct srp_target_port *target) @@ -1073,11 +1111,18 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) complete(&target->tsk_mgmt_done); } else { req = &target->req_ring[rsp->tag]; - scmnd = req->scmnd; - if (!scmnd) + scmnd = srp_claim_req(target, req, NULL); + if (!scmnd) { shost_printk(KERN_ERR, target->scsi_host, "Null scmnd for RSP w/tag %016llx\n", (unsigned long long) rsp->tag); + + spin_lock_irqsave(&target->lock, flags); + target->req_lim += be32_to_cpu(rsp->req_lim_delta); + spin_unlock_irqrestore(&target->lock, flags); + + return; + } scmnd->result = rsp->status; if (rsp->flags & SRP_RSP_FLAG_SNSVALID) { @@ -1092,7 +1137,9 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER)) scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt)); - srp_remove_req(target, req, be32_to_cpu(rsp->req_lim_delta)); + srp_free_req(target, req, scmnd, + be32_to_cpu(rsp->req_lim_delta)); + scmnd->host_scribble = NULL; scmnd->scsi_done(scmnd); } @@ -1631,25 +1678,17 @@ static int srp_abort(struct scsi_cmnd *scmnd) { struct srp_target_port *target = host_to_target(scmnd->device->host); struct srp_request *req = (struct srp_request *) scmnd->host_scribble; - int ret = SUCCESS; shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); - if (!req || target->qp_in_error) + if (!req || target->qp_in_error || !srp_claim_req(target, req, scmnd)) return FAILED; - if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun, - SRP_TSK_ABORT_TASK)) - return FAILED; - - if (req->scmnd) { - if (!target->tsk_mgmt_status) { - srp_remove_req(target, req, 0); - scmnd->result = DID_ABORT << 16; - } else - ret = FAILED; - } + srp_send_tsk_mgmt(target, req->index, scmnd->device->lun, + SRP_TSK_ABORT_TASK); + srp_free_req(target, req, scmnd, 0); + scmnd->result = DID_ABORT << 16; - return ret; + return SUCCESS; } static int srp_reset_device(struct scsi_cmnd *scmnd) diff --git a/trunk/drivers/infiniband/ulp/srpt/ib_srpt.c b/trunk/drivers/infiniband/ulp/srpt/ib_srpt.c index 7a0ce8d42887..9e1449f8c6a2 100644 --- a/trunk/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/trunk/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -1469,7 +1469,7 @@ static void srpt_handle_send_comp(struct srpt_rdma_ch *ch, * * XXX: what is now target_execute_cmd used to be asynchronous, and unmapping * the data that has been transferred via IB RDMA had to be postponed until the - * check_stop_free() callback. None of this is nessecary anymore and needs to + * check_stop_free() callback. None of this is necessary anymore and needs to * be cleaned up. */ static void srpt_handle_rdma_comp(struct srpt_rdma_ch *ch, diff --git a/trunk/drivers/input/keyboard/imx_keypad.c b/trunk/drivers/input/keyboard/imx_keypad.c index ff4c0a87a25f..ce68e361558c 100644 --- a/trunk/drivers/input/keyboard/imx_keypad.c +++ b/trunk/drivers/input/keyboard/imx_keypad.c @@ -358,6 +358,7 @@ static void imx_keypad_inhibit(struct imx_keypad *keypad) /* Inhibit KDI and KRI interrupts. */ reg_val = readw(keypad->mmio_base + KPSR); reg_val &= ~(KBD_STAT_KRIE | KBD_STAT_KDIE); + reg_val |= KBD_STAT_KPKR | KBD_STAT_KPKD; writew(reg_val, keypad->mmio_base + KPSR); /* Colums as open drain and disable all rows */ @@ -515,7 +516,9 @@ static int __devinit imx_keypad_probe(struct platform_device *pdev) input_set_drvdata(input_dev, keypad); /* Ensure that the keypad will stay dormant until opened */ + clk_enable(keypad->clk); imx_keypad_inhibit(keypad); + clk_disable(keypad->clk); error = request_irq(irq, imx_keypad_irq_handler, 0, pdev->name, keypad); diff --git a/trunk/drivers/input/serio/i8042-x86ia64io.h b/trunk/drivers/input/serio/i8042-x86ia64io.h index 5ec774d6c82b..6918773ce024 100644 --- a/trunk/drivers/input/serio/i8042-x86ia64io.h +++ b/trunk/drivers/input/serio/i8042-x86ia64io.h @@ -176,6 +176,20 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Spring Peak"), }, }, + { + /* Gigabyte T1005 - defines wrong chassis type ("Other") */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), + DMI_MATCH(DMI_PRODUCT_NAME, "T1005"), + }, + }, + { + /* Gigabyte T1005M/P - defines wrong chassis type ("Other") */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), + DMI_MATCH(DMI_PRODUCT_NAME, "T1005M/P"), + }, + }, { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), diff --git a/trunk/drivers/input/tablet/wacom_wac.c b/trunk/drivers/input/tablet/wacom_wac.c index 002041975de9..532d067a9e07 100644 --- a/trunk/drivers/input/tablet/wacom_wac.c +++ b/trunk/drivers/input/tablet/wacom_wac.c @@ -1848,7 +1848,10 @@ static const struct wacom_features wacom_features_0x2A = { "Wacom Intuos5 M", WACOM_PKGLEN_INTUOS, 44704, 27940, 2047, 63, INTUOS5, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0xF4 = - { "Wacom Cintiq 24HD", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047, + { "Wacom Cintiq 24HD", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047, + 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; +static const struct wacom_features wacom_features_0xF8 = + { "Wacom Cintiq 24HD touch", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047, 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0x3F = { "Wacom Cintiq 21UX", WACOM_PKGLEN_INTUOS, 87200, 65600, 1023, @@ -2091,6 +2094,7 @@ const struct usb_device_id wacom_ids[] = { { USB_DEVICE_WACOM(0xEF) }, { USB_DEVICE_WACOM(0x47) }, { USB_DEVICE_WACOM(0xF4) }, + { USB_DEVICE_WACOM(0xF8) }, { USB_DEVICE_WACOM(0xFA) }, { USB_DEVICE_LENOVO(0x6004) }, { } diff --git a/trunk/drivers/input/touchscreen/edt-ft5x06.c b/trunk/drivers/input/touchscreen/edt-ft5x06.c index 9afc777a40a7..b06a5e3a665e 100644 --- a/trunk/drivers/input/touchscreen/edt-ft5x06.c +++ b/trunk/drivers/input/touchscreen/edt-ft5x06.c @@ -602,6 +602,7 @@ edt_ft5x06_ts_teardown_debugfs(struct edt_ft5x06_ts_data *tsdata) { if (tsdata->debug_dir) debugfs_remove_recursive(tsdata->debug_dir); + kfree(tsdata->raw_buffer); } #else @@ -843,7 +844,6 @@ static int __devexit edt_ft5x06_ts_remove(struct i2c_client *client) if (gpio_is_valid(pdata->reset_pin)) gpio_free(pdata->reset_pin); - kfree(tsdata->raw_buffer); kfree(tsdata); return 0; diff --git a/trunk/drivers/input/touchscreen/eeti_ts.c b/trunk/drivers/input/touchscreen/eeti_ts.c index 503c7096ed36..908407efc672 100644 --- a/trunk/drivers/input/touchscreen/eeti_ts.c +++ b/trunk/drivers/input/touchscreen/eeti_ts.c @@ -48,7 +48,7 @@ struct eeti_ts_priv { struct input_dev *input; struct work_struct work; struct mutex mutex; - int irq, irq_active_high; + int irq_gpio, irq, irq_active_high; }; #define EETI_TS_BITDEPTH (11) @@ -62,7 +62,7 @@ struct eeti_ts_priv { static inline int eeti_ts_irq_active(struct eeti_ts_priv *priv) { - return gpio_get_value(irq_to_gpio(priv->irq)) == priv->irq_active_high; + return gpio_get_value(priv->irq_gpio) == priv->irq_active_high; } static void eeti_ts_read(struct work_struct *work) @@ -157,7 +157,7 @@ static void eeti_ts_close(struct input_dev *dev) static int __devinit eeti_ts_probe(struct i2c_client *client, const struct i2c_device_id *idp) { - struct eeti_ts_platform_data *pdata; + struct eeti_ts_platform_data *pdata = client->dev.platform_data; struct eeti_ts_priv *priv; struct input_dev *input; unsigned int irq_flags; @@ -199,9 +199,12 @@ static int __devinit eeti_ts_probe(struct i2c_client *client, priv->client = client; priv->input = input; - priv->irq = client->irq; + priv->irq_gpio = pdata->irq_gpio; + priv->irq = gpio_to_irq(pdata->irq_gpio); - pdata = client->dev.platform_data; + err = gpio_request_one(pdata->irq_gpio, GPIOF_IN, client->name); + if (err < 0) + goto err1; if (pdata) priv->irq_active_high = pdata->irq_active_high; @@ -215,13 +218,13 @@ static int __devinit eeti_ts_probe(struct i2c_client *client, err = input_register_device(input); if (err) - goto err1; + goto err2; err = request_irq(priv->irq, eeti_ts_isr, irq_flags, client->name, priv); if (err) { dev_err(&client->dev, "Unable to request touchscreen IRQ.\n"); - goto err2; + goto err3; } /* @@ -233,9 +236,11 @@ static int __devinit eeti_ts_probe(struct i2c_client *client, device_init_wakeup(&client->dev, 0); return 0; -err2: +err3: input_unregister_device(input); input = NULL; /* so we dont try to free it below */ +err2: + gpio_free(pdata->irq_gpio); err1: input_free_device(input); kfree(priv); diff --git a/trunk/drivers/iommu/amd_iommu.c b/trunk/drivers/iommu/amd_iommu.c index 6d1cbdfc9b2a..b64502dfa9f4 100644 --- a/trunk/drivers/iommu/amd_iommu.c +++ b/trunk/drivers/iommu/amd_iommu.c @@ -296,8 +296,13 @@ static int iommu_init_device(struct device *dev) } else dma_pdev = pci_dev_get(pdev); + /* Account for quirked devices */ swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev)); + /* + * If it's a multifunction device that does not support our + * required ACS flags, add to the same group as function 0. + */ if (dma_pdev->multifunction && !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) swap_pci_ref(&dma_pdev, @@ -305,14 +310,28 @@ static int iommu_init_device(struct device *dev) PCI_DEVFN(PCI_SLOT(dma_pdev->devfn), 0))); + /* + * Devices on the root bus go through the iommu. If that's not us, + * find the next upstream device and test ACS up to the root bus. + * Finding the next device may require skipping virtual buses. + */ while (!pci_is_root_bus(dma_pdev->bus)) { - if (pci_acs_path_enabled(dma_pdev->bus->self, - NULL, REQ_ACS_FLAGS)) + struct pci_bus *bus = dma_pdev->bus; + + while (!bus->self) { + if (!pci_is_root_bus(bus)) + bus = bus->parent; + else + goto root_bus; + } + + if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS)) break; - swap_pci_ref(&dma_pdev, pci_dev_get(dma_pdev->bus->self)); + swap_pci_ref(&dma_pdev, pci_dev_get(bus->self)); } +root_bus: group = iommu_group_get(&dma_pdev->dev); pci_dev_put(dma_pdev); if (!group) { diff --git a/trunk/drivers/iommu/amd_iommu_init.c b/trunk/drivers/iommu/amd_iommu_init.c index 500e7f15f5c2..18a89b760aaa 100644 --- a/trunk/drivers/iommu/amd_iommu_init.c +++ b/trunk/drivers/iommu/amd_iommu_init.c @@ -1111,7 +1111,7 @@ static void print_iommu_info(void) if (iommu->cap & (1 << IOMMU_CAP_EFR)) { pr_info("AMD-Vi: Extended features: "); - for (i = 0; ARRAY_SIZE(feat_str); ++i) { + for (i = 0; i < ARRAY_SIZE(feat_str); ++i) { if (iommu_feature(iommu, (1ULL << i))) pr_cont(" %s", feat_str[i]); } @@ -1131,9 +1131,6 @@ static int __init amd_iommu_init_pci(void) break; } - /* Make sure ACS will be enabled */ - pci_request_acs(); - ret = amd_iommu_init_devices(); print_iommu_info(); @@ -1652,6 +1649,9 @@ static bool detect_ivrs(void) early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size); + /* Make sure ACS will be enabled during PCI probe */ + pci_request_acs(); + return true; } diff --git a/trunk/drivers/iommu/exynos-iommu.c b/trunk/drivers/iommu/exynos-iommu.c index 45350ff5e93c..80bad32aa463 100644 --- a/trunk/drivers/iommu/exynos-iommu.c +++ b/trunk/drivers/iommu/exynos-iommu.c @@ -732,9 +732,9 @@ static int exynos_iommu_domain_init(struct iommu_domain *domain) spin_lock_init(&priv->pgtablelock); INIT_LIST_HEAD(&priv->clients); - dom->geometry.aperture_start = 0; - dom->geometry.aperture_end = ~0UL; - dom->geometry.force_aperture = true; + domain->geometry.aperture_start = 0; + domain->geometry.aperture_end = ~0UL; + domain->geometry.force_aperture = true; domain->priv = priv; return 0; diff --git a/trunk/drivers/iommu/intel-iommu.c b/trunk/drivers/iommu/intel-iommu.c index 7469b5346643..2297ec193eb4 100644 --- a/trunk/drivers/iommu/intel-iommu.c +++ b/trunk/drivers/iommu/intel-iommu.c @@ -2008,6 +2008,7 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw) if (!drhd) { printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n", pci_name(pdev)); + free_domain_mem(domain); return NULL; } iommu = drhd->iommu; @@ -4124,8 +4125,13 @@ static int intel_iommu_add_device(struct device *dev) } else dma_pdev = pci_dev_get(pdev); + /* Account for quirked devices */ swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev)); + /* + * If it's a multifunction device that does not support our + * required ACS flags, add to the same group as function 0. + */ if (dma_pdev->multifunction && !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) swap_pci_ref(&dma_pdev, @@ -4133,14 +4139,28 @@ static int intel_iommu_add_device(struct device *dev) PCI_DEVFN(PCI_SLOT(dma_pdev->devfn), 0))); + /* + * Devices on the root bus go through the iommu. If that's not us, + * find the next upstream device and test ACS up to the root bus. + * Finding the next device may require skipping virtual buses. + */ while (!pci_is_root_bus(dma_pdev->bus)) { - if (pci_acs_path_enabled(dma_pdev->bus->self, - NULL, REQ_ACS_FLAGS)) + struct pci_bus *bus = dma_pdev->bus; + + while (!bus->self) { + if (!pci_is_root_bus(bus)) + bus = bus->parent; + else + goto root_bus; + } + + if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS)) break; - swap_pci_ref(&dma_pdev, pci_dev_get(dma_pdev->bus->self)); + swap_pci_ref(&dma_pdev, pci_dev_get(bus->self)); } +root_bus: group = iommu_group_get(&dma_pdev->dev); pci_dev_put(dma_pdev); if (!group) { diff --git a/trunk/drivers/iommu/intel_irq_remapping.c b/trunk/drivers/iommu/intel_irq_remapping.c index e0b18f3ae9a8..af8904de1d44 100644 --- a/trunk/drivers/iommu/intel_irq_remapping.c +++ b/trunk/drivers/iommu/intel_irq_remapping.c @@ -736,6 +736,7 @@ int __init parse_ioapics_under_ir(void) { struct dmar_drhd_unit *drhd; int ir_supported = 0; + int ioapic_idx; for_each_drhd_unit(drhd) { struct intel_iommu *iommu = drhd->iommu; @@ -748,13 +749,20 @@ int __init parse_ioapics_under_ir(void) } } - if (ir_supported && ir_ioapic_num != nr_ioapics) { - printk(KERN_WARNING - "Not all IO-APIC's listed under remapping hardware\n"); - return -1; + if (!ir_supported) + return 0; + + for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) { + int ioapic_id = mpc_ioapic_id(ioapic_idx); + if (!map_ioapic_to_ir(ioapic_id)) { + pr_err(FW_BUG "ioapic %d has no mapping iommu, " + "interrupt remapping will be disabled\n", + ioapic_id); + return -1; + } } - return ir_supported; + return 1; } int __init ir_dev_scope_init(void) diff --git a/trunk/drivers/iommu/tegra-smmu.c b/trunk/drivers/iommu/tegra-smmu.c index 4ba325ab6262..2a4bb36bc688 100644 --- a/trunk/drivers/iommu/tegra-smmu.c +++ b/trunk/drivers/iommu/tegra-smmu.c @@ -799,14 +799,14 @@ static void smmu_iommu_detach_dev(struct iommu_domain *domain, goto out; } } - dev_err(smmu->dev, "Couldn't find %s\n", dev_name(c->dev)); + dev_err(smmu->dev, "Couldn't find %s\n", dev_name(dev)); out: spin_unlock(&as->client_lock); } static int smmu_iommu_domain_init(struct iommu_domain *domain) { - int i, err = -ENODEV; + int i, err = -EAGAIN; unsigned long flags; struct smmu_as *as; struct smmu_device *smmu = smmu_handle; @@ -814,11 +814,14 @@ static int smmu_iommu_domain_init(struct iommu_domain *domain) /* Look for a free AS with lock held */ for (i = 0; i < smmu->num_as; i++) { as = &smmu->as[i]; - if (!as->pdir_page) { - err = alloc_pdir(as); - if (!err) - goto found; - } + + if (as->pdir_page) + continue; + + err = alloc_pdir(as); + if (!err) + goto found; + if (err != -EAGAIN) break; } diff --git a/trunk/drivers/isdn/isdnloop/isdnloop.c b/trunk/drivers/isdn/isdnloop/isdnloop.c index 5405ec644db3..baf2686aa8eb 100644 --- a/trunk/drivers/isdn/isdnloop/isdnloop.c +++ b/trunk/drivers/isdn/isdnloop/isdnloop.c @@ -16,7 +16,6 @@ #include #include "isdnloop.h" -static char *revision = "$Revision: 1.11.6.7 $"; static char *isdnloop_id = "loop0"; MODULE_DESCRIPTION("ISDN4Linux: Pseudo Driver that simulates an ISDN card"); @@ -1494,17 +1493,6 @@ isdnloop_addcard(char *id1) static int __init isdnloop_init(void) { - char *p; - char rev[10]; - - if ((p = strchr(revision, ':'))) { - strcpy(rev, p + 1); - p = strchr(rev, '$'); - *p = 0; - } else - strcpy(rev, " ??? "); - printk(KERN_NOTICE "isdnloop-ISDN-driver Rev%s\n", rev); - if (isdnloop_id) return (isdnloop_addcard(isdnloop_id)); diff --git a/trunk/drivers/isdn/mISDN/layer2.c b/trunk/drivers/isdn/mISDN/layer2.c index 0dc8abca1407..949cabb88f1c 100644 --- a/trunk/drivers/isdn/mISDN/layer2.c +++ b/trunk/drivers/isdn/mISDN/layer2.c @@ -2222,7 +2222,7 @@ create_l2(struct mISDNchannel *ch, u_int protocol, u_long options, int tei, InitWin(l2); l2->l2m.fsm = &l2fsm; if (test_bit(FLG_LAPB, &l2->flag) || - test_bit(FLG_PTP, &l2->flag) || + test_bit(FLG_FIXED_TEI, &l2->flag) || test_bit(FLG_LAPD_NET, &l2->flag)) l2->l2m.state = ST_L2_4; else diff --git a/trunk/drivers/leds/led-triggers.c b/trunk/drivers/leds/led-triggers.c index 6157cbbf4113..363975b3c925 100644 --- a/trunk/drivers/leds/led-triggers.c +++ b/trunk/drivers/leds/led-triggers.c @@ -224,7 +224,7 @@ void led_trigger_event(struct led_trigger *trig, struct led_classdev *led_cdev; led_cdev = list_entry(entry, struct led_classdev, trig_list); - led_set_brightness(led_cdev, brightness); + __led_set_brightness(led_cdev, brightness); } read_unlock(&trig->leddev_list_lock); } diff --git a/trunk/drivers/leds/leds-lp8788.c b/trunk/drivers/leds/leds-lp8788.c index 53bd136f1ef0..0ade6ebfc914 100644 --- a/trunk/drivers/leds/leds-lp8788.c +++ b/trunk/drivers/leds/leds-lp8788.c @@ -63,7 +63,7 @@ static int lp8788_led_init_device(struct lp8788_led *led, /* scale configuration */ addr = LP8788_ISINK_CTRL; mask = 1 << (cfg->num + LP8788_ISINK_SCALE_OFFSET); - val = cfg->scale << cfg->num; + val = cfg->scale << (cfg->num + LP8788_ISINK_SCALE_OFFSET); ret = lp8788_update_bits(led->lp, addr, mask, val); if (ret) return ret; diff --git a/trunk/drivers/leds/leds-renesas-tpu.c b/trunk/drivers/leds/leds-renesas-tpu.c index 9ee12c28059a..771ea067e680 100644 --- a/trunk/drivers/leds/leds-renesas-tpu.c +++ b/trunk/drivers/leds/leds-renesas-tpu.c @@ -247,7 +247,7 @@ static int __devinit r_tpu_probe(struct platform_device *pdev) if (!cfg) { dev_err(&pdev->dev, "missing platform data\n"); - goto err0; + return -ENODEV; } p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL); diff --git a/trunk/drivers/md/Kconfig b/trunk/drivers/md/Kconfig index 1eee45b69b71..d949b781f6f8 100644 --- a/trunk/drivers/md/Kconfig +++ b/trunk/drivers/md/Kconfig @@ -268,13 +268,14 @@ config DM_MIRROR needed for live data migration tools such as 'pvmove'. config DM_RAID - tristate "RAID 1/4/5/6 target" + tristate "RAID 1/4/5/6/10 target" depends on BLK_DEV_DM select MD_RAID1 + select MD_RAID10 select MD_RAID456 select BLK_DEV_MD ---help--- - A dm target that supports RAID1, RAID4, RAID5 and RAID6 mappings + A dm target that supports RAID1, RAID10, RAID4, RAID5 and RAID6 mappings A RAID-5 set of N drives with a capacity of C MB per drive provides the capacity of C * (N - 1) MB, and protects against a failure diff --git a/trunk/drivers/md/bitmap.c b/trunk/drivers/md/bitmap.c index 15dbe03117e4..94e7f6ba2e11 100644 --- a/trunk/drivers/md/bitmap.c +++ b/trunk/drivers/md/bitmap.c @@ -1305,7 +1305,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect prepare_to_wait(&bitmap->overflow_wait, &__wait, TASK_UNINTERRUPTIBLE); spin_unlock_irq(&bitmap->counts.lock); - io_schedule(); + schedule(); finish_wait(&bitmap->overflow_wait, &__wait); continue; } diff --git a/trunk/drivers/md/dm-raid.c b/trunk/drivers/md/dm-raid.c index f2f29c526544..982e3e390c45 100644 --- a/trunk/drivers/md/dm-raid.c +++ b/trunk/drivers/md/dm-raid.c @@ -11,6 +11,7 @@ #include "md.h" #include "raid1.h" #include "raid5.h" +#include "raid10.h" #include "bitmap.h" #include @@ -52,7 +53,10 @@ struct raid_dev { #define DMPF_MAX_RECOVERY_RATE 0x20 #define DMPF_MAX_WRITE_BEHIND 0x40 #define DMPF_STRIPE_CACHE 0x80 -#define DMPF_REGION_SIZE 0X100 +#define DMPF_REGION_SIZE 0x100 +#define DMPF_RAID10_COPIES 0x200 +#define DMPF_RAID10_FORMAT 0x400 + struct raid_set { struct dm_target *ti; @@ -76,6 +80,7 @@ static struct raid_type { const unsigned algorithm; /* RAID algorithm. */ } raid_types[] = { {"raid1", "RAID1 (mirroring)", 0, 2, 1, 0 /* NONE */}, + {"raid10", "RAID10 (striped mirrors)", 0, 2, 10, UINT_MAX /* Varies */}, {"raid4", "RAID4 (dedicated parity disk)", 1, 2, 5, ALGORITHM_PARITY_0}, {"raid5_la", "RAID5 (left asymmetric)", 1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC}, {"raid5_ra", "RAID5 (right asymmetric)", 1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC}, @@ -86,6 +91,17 @@ static struct raid_type { {"raid6_nc", "RAID6 (N continue)", 2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE} }; +static unsigned raid10_md_layout_to_copies(int layout) +{ + return layout & 0xFF; +} + +static int raid10_format_to_md_layout(char *format, unsigned copies) +{ + /* 1 "far" copy, and 'copies' "near" copies */ + return (1 << 8) | (copies & 0xFF); +} + static struct raid_type *get_raid_type(char *name) { int i; @@ -339,10 +355,16 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size) * [max_write_behind ] See '-write-behind=' (man mdadm) * [stripe_cache ] Stripe cache size for higher RAIDs * [region_size ] Defines granularity of bitmap + * + * RAID10-only options: + * [raid10_copies <# copies>] Number of copies. (Default: 2) + * [raid10_format ] Layout algorithm. (Default: near) */ static int parse_raid_params(struct raid_set *rs, char **argv, unsigned num_raid_params) { + char *raid10_format = "near"; + unsigned raid10_copies = 2; unsigned i, rebuild_cnt = 0; unsigned long value, region_size = 0; sector_t sectors_per_dev = rs->ti->len; @@ -416,11 +438,28 @@ static int parse_raid_params(struct raid_set *rs, char **argv, } key = argv[i++]; + + /* Parameters that take a string value are checked here. */ + if (!strcasecmp(key, "raid10_format")) { + if (rs->raid_type->level != 10) { + rs->ti->error = "'raid10_format' is an invalid parameter for this RAID type"; + return -EINVAL; + } + if (strcmp("near", argv[i])) { + rs->ti->error = "Invalid 'raid10_format' value given"; + return -EINVAL; + } + raid10_format = argv[i]; + rs->print_flags |= DMPF_RAID10_FORMAT; + continue; + } + if (strict_strtoul(argv[i], 10, &value) < 0) { rs->ti->error = "Bad numerical argument given in raid params"; return -EINVAL; } + /* Parameters that take a numeric value are checked here */ if (!strcasecmp(key, "rebuild")) { rebuild_cnt++; @@ -439,6 +478,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv, return -EINVAL; } break; + case 10: default: DMERR("The rebuild parameter is not supported for %s", rs->raid_type->name); rs->ti->error = "Rebuild not supported for this RAID type"; @@ -495,7 +535,8 @@ static int parse_raid_params(struct raid_set *rs, char **argv, */ value /= 2; - if (rs->raid_type->level < 5) { + if ((rs->raid_type->level != 5) && + (rs->raid_type->level != 6)) { rs->ti->error = "Inappropriate argument: stripe_cache"; return -EINVAL; } @@ -520,6 +561,14 @@ static int parse_raid_params(struct raid_set *rs, char **argv, } else if (!strcasecmp(key, "region_size")) { rs->print_flags |= DMPF_REGION_SIZE; region_size = value; + } else if (!strcasecmp(key, "raid10_copies") && + (rs->raid_type->level == 10)) { + if ((value < 2) || (value > 0xFF)) { + rs->ti->error = "Bad value for 'raid10_copies'"; + return -EINVAL; + } + rs->print_flags |= DMPF_RAID10_COPIES; + raid10_copies = value; } else { DMERR("Unable to parse RAID parameter: %s", key); rs->ti->error = "Unable to parse RAID parameters"; @@ -538,8 +587,22 @@ static int parse_raid_params(struct raid_set *rs, char **argv, if (dm_set_target_max_io_len(rs->ti, max_io_len)) return -EINVAL; - if ((rs->raid_type->level > 1) && - sector_div(sectors_per_dev, (rs->md.raid_disks - rs->raid_type->parity_devs))) { + if (rs->raid_type->level == 10) { + if (raid10_copies > rs->md.raid_disks) { + rs->ti->error = "Not enough devices to satisfy specification"; + return -EINVAL; + } + + /* (Len * #mirrors) / #devices */ + sectors_per_dev = rs->ti->len * raid10_copies; + sector_div(sectors_per_dev, rs->md.raid_disks); + + rs->md.layout = raid10_format_to_md_layout(raid10_format, + raid10_copies); + rs->md.new_layout = rs->md.layout; + } else if ((rs->raid_type->level > 1) && + sector_div(sectors_per_dev, + (rs->md.raid_disks - rs->raid_type->parity_devs))) { rs->ti->error = "Target length not divisible by number of data devices"; return -EINVAL; } @@ -566,6 +629,9 @@ static int raid_is_congested(struct dm_target_callbacks *cb, int bits) if (rs->raid_type->level == 1) return md_raid1_congested(&rs->md, bits); + if (rs->raid_type->level == 10) + return md_raid10_congested(&rs->md, bits); + return md_raid5_congested(&rs->md, bits); } @@ -884,6 +950,9 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) case 6: redundancy = rs->raid_type->parity_devs; break; + case 10: + redundancy = raid10_md_layout_to_copies(mddev->layout) - 1; + break; default: ti->error = "Unknown RAID type"; return -EINVAL; @@ -1049,12 +1118,19 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv) goto bad; } + if (ti->len != rs->md.array_sectors) { + ti->error = "Array size does not match requested target length"; + ret = -EINVAL; + goto size_mismatch; + } rs->callbacks.congested_fn = raid_is_congested; dm_table_add_target_callbacks(ti->table, &rs->callbacks); mddev_suspend(&rs->md); return 0; +size_mismatch: + md_stop(&rs->md); bad: context_free(rs); @@ -1203,6 +1279,13 @@ static int raid_status(struct dm_target *ti, status_type_t type, DMEMIT(" region_size %lu", rs->md.bitmap_info.chunksize >> 9); + if (rs->print_flags & DMPF_RAID10_COPIES) + DMEMIT(" raid10_copies %u", + raid10_md_layout_to_copies(rs->md.layout)); + + if (rs->print_flags & DMPF_RAID10_FORMAT) + DMEMIT(" raid10_format near"); + DMEMIT(" %d", rs->md.raid_disks); for (i = 0; i < rs->md.raid_disks; i++) { if (rs->dev[i].meta_dev) @@ -1277,7 +1360,7 @@ static void raid_resume(struct dm_target *ti) static struct target_type raid_target = { .name = "raid", - .version = {1, 2, 0}, + .version = {1, 3, 0}, .module = THIS_MODULE, .ctr = raid_ctr, .dtr = raid_dtr, @@ -1304,6 +1387,8 @@ module_init(dm_raid_init); module_exit(dm_raid_exit); MODULE_DESCRIPTION(DM_NAME " raid4/5/6 target"); +MODULE_ALIAS("dm-raid1"); +MODULE_ALIAS("dm-raid10"); MODULE_ALIAS("dm-raid4"); MODULE_ALIAS("dm-raid5"); MODULE_ALIAS("dm-raid6"); diff --git a/trunk/drivers/md/md.c b/trunk/drivers/md/md.c index d5ab4493c8be..3f6203a4c7ea 100644 --- a/trunk/drivers/md/md.c +++ b/trunk/drivers/md/md.c @@ -498,61 +498,13 @@ void md_flush_request(struct mddev *mddev, struct bio *bio) } EXPORT_SYMBOL(md_flush_request); -/* Support for plugging. - * This mirrors the plugging support in request_queue, but does not - * require having a whole queue or request structures. - * We allocate an md_plug_cb for each md device and each thread it gets - * plugged on. This links tot the private plug_handle structure in the - * personality data where we keep a count of the number of outstanding - * plugs so other code can see if a plug is active. - */ -struct md_plug_cb { - struct blk_plug_cb cb; - struct mddev *mddev; -}; - -static void plugger_unplug(struct blk_plug_cb *cb) +void md_unplug(struct blk_plug_cb *cb, bool from_schedule) { - struct md_plug_cb *mdcb = container_of(cb, struct md_plug_cb, cb); - if (atomic_dec_and_test(&mdcb->mddev->plug_cnt)) - md_wakeup_thread(mdcb->mddev->thread); - kfree(mdcb); -} - -/* Check that an unplug wakeup will come shortly. - * If not, wakeup the md thread immediately - */ -int mddev_check_plugged(struct mddev *mddev) -{ - struct blk_plug *plug = current->plug; - struct md_plug_cb *mdcb; - - if (!plug) - return 0; - - list_for_each_entry(mdcb, &plug->cb_list, cb.list) { - if (mdcb->cb.callback == plugger_unplug && - mdcb->mddev == mddev) { - /* Already on the list, move to top */ - if (mdcb != list_first_entry(&plug->cb_list, - struct md_plug_cb, - cb.list)) - list_move(&mdcb->cb.list, &plug->cb_list); - return 1; - } - } - /* Not currently on the callback list */ - mdcb = kmalloc(sizeof(*mdcb), GFP_ATOMIC); - if (!mdcb) - return 0; - - mdcb->mddev = mddev; - mdcb->cb.callback = plugger_unplug; - atomic_inc(&mddev->plug_cnt); - list_add(&mdcb->cb.list, &plug->cb_list); - return 1; + struct mddev *mddev = cb->data; + md_wakeup_thread(mddev->thread); + kfree(cb); } -EXPORT_SYMBOL_GPL(mddev_check_plugged); +EXPORT_SYMBOL(md_unplug); static inline struct mddev *mddev_get(struct mddev *mddev) { @@ -602,7 +554,6 @@ void mddev_init(struct mddev *mddev) atomic_set(&mddev->active, 1); atomic_set(&mddev->openers, 0); atomic_set(&mddev->active_io, 0); - atomic_set(&mddev->plug_cnt, 0); spin_lock_init(&mddev->write_lock); atomic_set(&mddev->flush_pending, 0); init_waitqueue_head(&mddev->sb_wait); @@ -1157,8 +1108,11 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor ret = 0; } rdev->sectors = rdev->sb_start; - /* Limit to 4TB as metadata cannot record more than that */ - if (rdev->sectors >= (2ULL << 32)) + /* Limit to 4TB as metadata cannot record more than that. + * (not needed for Linear and RAID0 as metadata doesn't + * record this size) + */ + if (rdev->sectors >= (2ULL << 32) && sb->level >= 1) rdev->sectors = (2ULL << 32) - 2; if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1) @@ -1449,7 +1403,7 @@ super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) /* Limit to 4TB as metadata cannot record more than that. * 4TB == 2^32 KB, or 2*2^32 sectors. */ - if (num_sectors >= (2ULL << 32)) + if (num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1) num_sectors = (2ULL << 32) - 2; md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, rdev->sb_page); @@ -3942,17 +3896,13 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len) break; case clear: /* stopping an active array */ - if (atomic_read(&mddev->openers) > 0) - return -EBUSY; err = do_md_stop(mddev, 0, NULL); break; case inactive: /* stopping an active array */ - if (mddev->pers) { - if (atomic_read(&mddev->openers) > 0) - return -EBUSY; + if (mddev->pers) err = do_md_stop(mddev, 2, NULL); - } else + else err = 0; /* already inactive */ break; case suspended: diff --git a/trunk/drivers/md/md.h b/trunk/drivers/md/md.h index 7b4a3c318cae..f385b038589d 100644 --- a/trunk/drivers/md/md.h +++ b/trunk/drivers/md/md.h @@ -266,9 +266,6 @@ struct mddev { int new_chunk_sectors; int reshape_backwards; - atomic_t plug_cnt; /* If device is expecting - * more bios soon. - */ struct md_thread *thread; /* management thread */ struct md_thread *sync_thread; /* doing resync or reconstruct */ sector_t curr_resync; /* last block scheduled */ @@ -630,6 +627,12 @@ extern struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask, struct mddev *mddev); extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, struct mddev *mddev); -extern int mddev_check_plugged(struct mddev *mddev); extern void md_trim_bio(struct bio *bio, int offset, int size); + +extern void md_unplug(struct blk_plug_cb *cb, bool from_schedule); +static inline int mddev_check_plugged(struct mddev *mddev) +{ + return !!blk_check_plugged(md_unplug, mddev, + sizeof(struct blk_plug_cb)); +} #endif /* _MD_MD_H */ diff --git a/trunk/drivers/md/raid1.c b/trunk/drivers/md/raid1.c index cacd008d6864..611b5f797618 100644 --- a/trunk/drivers/md/raid1.c +++ b/trunk/drivers/md/raid1.c @@ -46,6 +46,20 @@ */ #define NR_RAID1_BIOS 256 +/* when we get a read error on a read-only array, we redirect to another + * device without failing the first device, or trying to over-write to + * correct the read error. To keep track of bad blocks on a per-bio + * level, we store IO_BLOCKED in the appropriate 'bios' pointer + */ +#define IO_BLOCKED ((struct bio *)1) +/* When we successfully write to a known bad-block, we need to remove the + * bad-block marking which must be done from process context. So we record + * the success by setting devs[n].bio to IO_MADE_GOOD + */ +#define IO_MADE_GOOD ((struct bio *)2) + +#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2) + /* When there are this many requests queue to be written by * the raid1 thread, we become 'congested' to provide back-pressure * for writeback. @@ -483,12 +497,14 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect const sector_t this_sector = r1_bio->sector; int sectors; int best_good_sectors; - int start_disk; - int best_disk; - int i; + int best_disk, best_dist_disk, best_pending_disk; + int has_nonrot_disk; + int disk; sector_t best_dist; + unsigned int min_pending; struct md_rdev *rdev; int choose_first; + int choose_next_idle; rcu_read_lock(); /* @@ -499,26 +515,26 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect retry: sectors = r1_bio->sectors; best_disk = -1; + best_dist_disk = -1; best_dist = MaxSector; + best_pending_disk = -1; + min_pending = UINT_MAX; best_good_sectors = 0; + has_nonrot_disk = 0; + choose_next_idle = 0; if (conf->mddev->recovery_cp < MaxSector && - (this_sector + sectors >= conf->next_resync)) { + (this_sector + sectors >= conf->next_resync)) choose_first = 1; - start_disk = 0; - } else { + else choose_first = 0; - start_disk = conf->last_used; - } - for (i = 0 ; i < conf->raid_disks * 2 ; i++) { + for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) { sector_t dist; sector_t first_bad; int bad_sectors; - - int disk = start_disk + i; - if (disk >= conf->raid_disks * 2) - disk -= conf->raid_disks * 2; + unsigned int pending; + bool nonrot; rdev = rcu_dereference(conf->mirrors[disk].rdev); if (r1_bio->bios[disk] == IO_BLOCKED @@ -577,22 +593,77 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect } else best_good_sectors = sectors; + nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev)); + has_nonrot_disk |= nonrot; + pending = atomic_read(&rdev->nr_pending); dist = abs(this_sector - conf->mirrors[disk].head_position); - if (choose_first - /* Don't change to another disk for sequential reads */ - || conf->next_seq_sect == this_sector - || dist == 0 - /* If device is idle, use it */ - || atomic_read(&rdev->nr_pending) == 0) { + if (choose_first) { + best_disk = disk; + break; + } + /* Don't change to another disk for sequential reads */ + if (conf->mirrors[disk].next_seq_sect == this_sector + || dist == 0) { + int opt_iosize = bdev_io_opt(rdev->bdev) >> 9; + struct raid1_info *mirror = &conf->mirrors[disk]; + + best_disk = disk; + /* + * If buffered sequential IO size exceeds optimal + * iosize, check if there is idle disk. If yes, choose + * the idle disk. read_balance could already choose an + * idle disk before noticing it's a sequential IO in + * this disk. This doesn't matter because this disk + * will idle, next time it will be utilized after the + * first disk has IO size exceeds optimal iosize. In + * this way, iosize of the first disk will be optimal + * iosize at least. iosize of the second disk might be + * small, but not a big deal since when the second disk + * starts IO, the first disk is likely still busy. + */ + if (nonrot && opt_iosize > 0 && + mirror->seq_start != MaxSector && + mirror->next_seq_sect > opt_iosize && + mirror->next_seq_sect - opt_iosize >= + mirror->seq_start) { + choose_next_idle = 1; + continue; + } + break; + } + /* If device is idle, use it */ + if (pending == 0) { best_disk = disk; break; } + + if (choose_next_idle) + continue; + + if (min_pending > pending) { + min_pending = pending; + best_pending_disk = disk; + } + if (dist < best_dist) { best_dist = dist; - best_disk = disk; + best_dist_disk = disk; } } + /* + * If all disks are rotational, choose the closest disk. If any disk is + * non-rotational, choose the disk with less pending request even the + * disk is rotational, which might/might not be optimal for raids with + * mixed ratation/non-rotational disks depending on workload. + */ + if (best_disk == -1) { + if (has_nonrot_disk) + best_disk = best_pending_disk; + else + best_disk = best_dist_disk; + } + if (best_disk >= 0) { rdev = rcu_dereference(conf->mirrors[best_disk].rdev); if (!rdev) @@ -606,8 +677,11 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect goto retry; } sectors = best_good_sectors; - conf->next_seq_sect = this_sector + sectors; - conf->last_used = best_disk; + + if (conf->mirrors[best_disk].next_seq_sect != this_sector) + conf->mirrors[best_disk].seq_start = this_sector; + + conf->mirrors[best_disk].next_seq_sect = this_sector + sectors; } rcu_read_unlock(); *max_sectors = sectors; @@ -870,10 +944,48 @@ static void alloc_behind_pages(struct bio *bio, struct r1bio *r1_bio) pr_debug("%dB behind alloc failed, doing sync I/O\n", bio->bi_size); } +struct raid1_plug_cb { + struct blk_plug_cb cb; + struct bio_list pending; + int pending_cnt; +}; + +static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule) +{ + struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb, + cb); + struct mddev *mddev = plug->cb.data; + struct r1conf *conf = mddev->private; + struct bio *bio; + + if (from_schedule) { + spin_lock_irq(&conf->device_lock); + bio_list_merge(&conf->pending_bio_list, &plug->pending); + conf->pending_count += plug->pending_cnt; + spin_unlock_irq(&conf->device_lock); + md_wakeup_thread(mddev->thread); + kfree(plug); + return; + } + + /* we aren't scheduling, so we can do the write-out directly. */ + bio = bio_list_get(&plug->pending); + bitmap_unplug(mddev->bitmap); + wake_up(&conf->wait_barrier); + + while (bio) { /* submit pending writes */ + struct bio *next = bio->bi_next; + bio->bi_next = NULL; + generic_make_request(bio); + bio = next; + } + kfree(plug); +} + static void make_request(struct mddev *mddev, struct bio * bio) { struct r1conf *conf = mddev->private; - struct mirror_info *mirror; + struct raid1_info *mirror; struct r1bio *r1_bio; struct bio *read_bio; int i, disks; @@ -883,6 +995,8 @@ static void make_request(struct mddev *mddev, struct bio * bio) const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA)); struct md_rdev *blocked_rdev; + struct blk_plug_cb *cb; + struct raid1_plug_cb *plug = NULL; int first_clone; int sectors_handled; int max_sectors; @@ -1185,11 +1299,22 @@ static void make_request(struct mddev *mddev, struct bio * bio) mbio->bi_private = r1_bio; atomic_inc(&r1_bio->remaining); + + cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug)); + if (cb) + plug = container_of(cb, struct raid1_plug_cb, cb); + else + plug = NULL; spin_lock_irqsave(&conf->device_lock, flags); - bio_list_add(&conf->pending_bio_list, mbio); - conf->pending_count++; + if (plug) { + bio_list_add(&plug->pending, mbio); + plug->pending_cnt++; + } else { + bio_list_add(&conf->pending_bio_list, mbio); + conf->pending_count++; + } spin_unlock_irqrestore(&conf->device_lock, flags); - if (!mddev_check_plugged(mddev)) + if (!plug) md_wakeup_thread(mddev->thread); } /* Mustn't call r1_bio_write_done before this next test, @@ -1364,7 +1489,7 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev) struct r1conf *conf = mddev->private; int err = -EEXIST; int mirror = 0; - struct mirror_info *p; + struct raid1_info *p; int first = 0; int last = conf->raid_disks - 1; struct request_queue *q = bdev_get_queue(rdev->bdev); @@ -1433,7 +1558,7 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev) struct r1conf *conf = mddev->private; int err = 0; int number = rdev->raid_disk; - struct mirror_info *p = conf->mirrors+ number; + struct raid1_info *p = conf->mirrors + number; if (rdev != p->rdev) p = conf->mirrors + conf->raid_disks + number; @@ -2173,8 +2298,7 @@ static void raid1d(struct mddev *mddev) blk_start_plug(&plug); for (;;) { - if (atomic_read(&mddev->plug_cnt) == 0) - flush_pending_writes(conf); + flush_pending_writes(conf); spin_lock_irqsave(&conf->device_lock, flags); if (list_empty(head)) { @@ -2371,6 +2495,18 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp bio->bi_rw = READ; bio->bi_end_io = end_sync_read; read_targets++; + } else if (!test_bit(WriteErrorSeen, &rdev->flags) && + test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && + !test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) { + /* + * The device is suitable for reading (InSync), + * but has bad block(s) here. Let's try to correct them, + * if we are doing resync or repair. Otherwise, leave + * this device alone for this sync request. + */ + bio->bi_rw = WRITE; + bio->bi_end_io = end_sync_write; + write_targets++; } } if (bio->bi_end_io) { @@ -2428,7 +2564,10 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp /* There is nowhere to write, so all non-sync * drives must be failed - so we are finished */ - sector_t rv = max_sector - sector_nr; + sector_t rv; + if (min_bad > 0) + max_sector = sector_nr + min_bad; + rv = max_sector - sector_nr; *skipped = 1; put_buf(r1_bio); return rv; @@ -2521,7 +2660,7 @@ static struct r1conf *setup_conf(struct mddev *mddev) { struct r1conf *conf; int i; - struct mirror_info *disk; + struct raid1_info *disk; struct md_rdev *rdev; int err = -ENOMEM; @@ -2529,7 +2668,7 @@ static struct r1conf *setup_conf(struct mddev *mddev) if (!conf) goto abort; - conf->mirrors = kzalloc(sizeof(struct mirror_info) + conf->mirrors = kzalloc(sizeof(struct raid1_info) * mddev->raid_disks * 2, GFP_KERNEL); if (!conf->mirrors) @@ -2572,6 +2711,7 @@ static struct r1conf *setup_conf(struct mddev *mddev) mddev->merge_check_needed = 1; disk->head_position = 0; + disk->seq_start = MaxSector; } conf->raid_disks = mddev->raid_disks; conf->mddev = mddev; @@ -2585,7 +2725,6 @@ static struct r1conf *setup_conf(struct mddev *mddev) conf->recovery_disabled = mddev->recovery_disabled - 1; err = -EIO; - conf->last_used = -1; for (i = 0; i < conf->raid_disks * 2; i++) { disk = conf->mirrors + i; @@ -2611,19 +2750,9 @@ static struct r1conf *setup_conf(struct mddev *mddev) if (disk->rdev && (disk->rdev->saved_raid_disk < 0)) conf->fullsync = 1; - } else if (conf->last_used < 0) - /* - * The first working device is used as a - * starting point to read balancing. - */ - conf->last_used = i; + } } - if (conf->last_used < 0) { - printk(KERN_ERR "md/raid1:%s: no operational mirrors\n", - mdname(mddev)); - goto abort; - } err = -ENOMEM; conf->thread = md_register_thread(raid1d, mddev, "raid1"); if (!conf->thread) { @@ -2798,7 +2927,7 @@ static int raid1_reshape(struct mddev *mddev) */ mempool_t *newpool, *oldpool; struct pool_info *newpoolinfo; - struct mirror_info *newmirrors; + struct raid1_info *newmirrors; struct r1conf *conf = mddev->private; int cnt, raid_disks; unsigned long flags; @@ -2841,7 +2970,7 @@ static int raid1_reshape(struct mddev *mddev) kfree(newpoolinfo); return -ENOMEM; } - newmirrors = kzalloc(sizeof(struct mirror_info) * raid_disks * 2, + newmirrors = kzalloc(sizeof(struct raid1_info) * raid_disks * 2, GFP_KERNEL); if (!newmirrors) { kfree(newpoolinfo); @@ -2880,7 +3009,6 @@ static int raid1_reshape(struct mddev *mddev) conf->raid_disks = mddev->raid_disks = raid_disks; mddev->delta_disks = 0; - conf->last_used = 0; /* just make sure it is in-range */ lower_barrier(conf); set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); diff --git a/trunk/drivers/md/raid1.h b/trunk/drivers/md/raid1.h index 80ded139314c..0ff3715fb7eb 100644 --- a/trunk/drivers/md/raid1.h +++ b/trunk/drivers/md/raid1.h @@ -1,9 +1,15 @@ #ifndef _RAID1_H #define _RAID1_H -struct mirror_info { +struct raid1_info { struct md_rdev *rdev; sector_t head_position; + + /* When choose the best device for a read (read_balance()) + * we try to keep sequential reads one the same device + */ + sector_t next_seq_sect; + sector_t seq_start; }; /* @@ -24,17 +30,11 @@ struct pool_info { struct r1conf { struct mddev *mddev; - struct mirror_info *mirrors; /* twice 'raid_disks' to + struct raid1_info *mirrors; /* twice 'raid_disks' to * allow for replacements. */ int raid_disks; - /* When choose the best device for a read (read_balance()) - * we try to keep sequential reads one the same device - * using 'last_used' and 'next_seq_sect' - */ - int last_used; - sector_t next_seq_sect; /* During resync, read_balancing is only allowed on the part * of the array that has been resynced. 'next_resync' tells us * where that is. @@ -135,20 +135,6 @@ struct r1bio { /* DO NOT PUT ANY NEW FIELDS HERE - bios array is contiguously alloced*/ }; -/* when we get a read error on a read-only array, we redirect to another - * device without failing the first device, or trying to over-write to - * correct the read error. To keep track of bad blocks on a per-bio - * level, we store IO_BLOCKED in the appropriate 'bios' pointer - */ -#define IO_BLOCKED ((struct bio *)1) -/* When we successfully write to a known bad-block, we need to remove the - * bad-block marking which must be done from process context. So we record - * the success by setting bios[n] to IO_MADE_GOOD - */ -#define IO_MADE_GOOD ((struct bio *)2) - -#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2) - /* bits for r1bio.state */ #define R1BIO_Uptodate 0 #define R1BIO_IsSync 1 diff --git a/trunk/drivers/md/raid10.c b/trunk/drivers/md/raid10.c index 8da6282254c3..1c2eb38f3c51 100644 --- a/trunk/drivers/md/raid10.c +++ b/trunk/drivers/md/raid10.c @@ -60,7 +60,21 @@ */ #define NR_RAID10_BIOS 256 -/* When there are this many requests queue to be written by +/* when we get a read error on a read-only array, we redirect to another + * device without failing the first device, or trying to over-write to + * correct the read error. To keep track of bad blocks on a per-bio + * level, we store IO_BLOCKED in the appropriate 'bios' pointer + */ +#define IO_BLOCKED ((struct bio *)1) +/* When we successfully write to a known bad-block, we need to remove the + * bad-block marking which must be done from process context. So we record + * the success by setting devs[n].bio to IO_MADE_GOOD + */ +#define IO_MADE_GOOD ((struct bio *)2) + +#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2) + +/* When there are this many requests queued to be written by * the raid10 thread, we become 'congested' to provide back-pressure * for writeback. */ @@ -645,7 +659,11 @@ static int raid10_mergeable_bvec(struct request_queue *q, max = biovec->bv_len; if (mddev->merge_check_needed) { - struct r10bio r10_bio; + struct { + struct r10bio r10_bio; + struct r10dev devs[conf->copies]; + } on_stack; + struct r10bio *r10_bio = &on_stack.r10_bio; int s; if (conf->reshape_progress != MaxSector) { /* Cannot give any guidance during reshape */ @@ -653,18 +671,18 @@ static int raid10_mergeable_bvec(struct request_queue *q, return biovec->bv_len; return 0; } - r10_bio.sector = sector; - raid10_find_phys(conf, &r10_bio); + r10_bio->sector = sector; + raid10_find_phys(conf, r10_bio); rcu_read_lock(); for (s = 0; s < conf->copies; s++) { - int disk = r10_bio.devs[s].devnum; + int disk = r10_bio->devs[s].devnum; struct md_rdev *rdev = rcu_dereference( conf->mirrors[disk].rdev); if (rdev && !test_bit(Faulty, &rdev->flags)) { struct request_queue *q = bdev_get_queue(rdev->bdev); if (q->merge_bvec_fn) { - bvm->bi_sector = r10_bio.devs[s].addr + bvm->bi_sector = r10_bio->devs[s].addr + rdev->data_offset; bvm->bi_bdev = rdev->bdev; max = min(max, q->merge_bvec_fn( @@ -676,7 +694,7 @@ static int raid10_mergeable_bvec(struct request_queue *q, struct request_queue *q = bdev_get_queue(rdev->bdev); if (q->merge_bvec_fn) { - bvm->bi_sector = r10_bio.devs[s].addr + bvm->bi_sector = r10_bio->devs[s].addr + rdev->data_offset; bvm->bi_bdev = rdev->bdev; max = min(max, q->merge_bvec_fn( @@ -717,7 +735,7 @@ static struct md_rdev *read_balance(struct r10conf *conf, int sectors = r10_bio->sectors; int best_good_sectors; sector_t new_distance, best_dist; - struct md_rdev *rdev, *best_rdev; + struct md_rdev *best_rdev, *rdev = NULL; int do_balance; int best_slot; struct geom *geo = &conf->geo; @@ -839,9 +857,8 @@ static struct md_rdev *read_balance(struct r10conf *conf, return rdev; } -static int raid10_congested(void *data, int bits) +int md_raid10_congested(struct mddev *mddev, int bits) { - struct mddev *mddev = data; struct r10conf *conf = mddev->private; int i, ret = 0; @@ -849,8 +866,6 @@ static int raid10_congested(void *data, int bits) conf->pending_count >= max_queued_requests) return 1; - if (mddev_congested(mddev, bits)) - return 1; rcu_read_lock(); for (i = 0; (i < conf->geo.raid_disks || i < conf->prev.raid_disks) @@ -866,6 +881,15 @@ static int raid10_congested(void *data, int bits) rcu_read_unlock(); return ret; } +EXPORT_SYMBOL_GPL(md_raid10_congested); + +static int raid10_congested(void *data, int bits) +{ + struct mddev *mddev = data; + + return mddev_congested(mddev, bits) || + md_raid10_congested(mddev, bits); +} static void flush_pending_writes(struct r10conf *conf) { @@ -1546,7 +1570,7 @@ static void error(struct mddev *mddev, struct md_rdev *rdev) static void print_conf(struct r10conf *conf) { int i; - struct mirror_info *tmp; + struct raid10_info *tmp; printk(KERN_DEBUG "RAID10 conf printout:\n"); if (!conf) { @@ -1580,7 +1604,7 @@ static int raid10_spare_active(struct mddev *mddev) { int i; struct r10conf *conf = mddev->private; - struct mirror_info *tmp; + struct raid10_info *tmp; int count = 0; unsigned long flags; @@ -1655,7 +1679,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) else mirror = first; for ( ; mirror <= last ; mirror++) { - struct mirror_info *p = &conf->mirrors[mirror]; + struct raid10_info *p = &conf->mirrors[mirror]; if (p->recovery_disabled == mddev->recovery_disabled) continue; if (p->rdev) { @@ -1709,7 +1733,7 @@ static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev) int err = 0; int number = rdev->raid_disk; struct md_rdev **rdevp; - struct mirror_info *p = conf->mirrors + number; + struct raid10_info *p = conf->mirrors + number; print_conf(conf); if (rdev == p->rdev) @@ -2660,8 +2684,7 @@ static void raid10d(struct mddev *mddev) blk_start_plug(&plug); for (;;) { - if (atomic_read(&mddev->plug_cnt) == 0) - flush_pending_writes(conf); + flush_pending_writes(conf); spin_lock_irqsave(&conf->device_lock, flags); if (list_empty(head)) { @@ -2876,7 +2899,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, sector_t sect; int must_sync; int any_working; - struct mirror_info *mirror = &conf->mirrors[i]; + struct raid10_info *mirror = &conf->mirrors[i]; if ((mirror->rdev == NULL || test_bit(In_sync, &mirror->rdev->flags)) @@ -3388,7 +3411,7 @@ static struct r10conf *setup_conf(struct mddev *mddev) goto out; /* FIXME calc properly */ - conf->mirrors = kzalloc(sizeof(struct mirror_info)*(mddev->raid_disks + + conf->mirrors = kzalloc(sizeof(struct raid10_info)*(mddev->raid_disks + max(0,mddev->delta_disks)), GFP_KERNEL); if (!conf->mirrors) @@ -3452,7 +3475,7 @@ static int run(struct mddev *mddev) { struct r10conf *conf; int i, disk_idx, chunk_size; - struct mirror_info *disk; + struct raid10_info *disk; struct md_rdev *rdev; sector_t size; sector_t min_offset_diff = 0; @@ -3472,12 +3495,14 @@ static int run(struct mddev *mddev) conf->thread = NULL; chunk_size = mddev->chunk_sectors << 9; - blk_queue_io_min(mddev->queue, chunk_size); - if (conf->geo.raid_disks % conf->geo.near_copies) - blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks); - else - blk_queue_io_opt(mddev->queue, chunk_size * - (conf->geo.raid_disks / conf->geo.near_copies)); + if (mddev->queue) { + blk_queue_io_min(mddev->queue, chunk_size); + if (conf->geo.raid_disks % conf->geo.near_copies) + blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks); + else + blk_queue_io_opt(mddev->queue, chunk_size * + (conf->geo.raid_disks / conf->geo.near_copies)); + } rdev_for_each(rdev, mddev) { long long diff; @@ -3511,8 +3536,9 @@ static int run(struct mddev *mddev) if (first || diff < min_offset_diff) min_offset_diff = diff; - disk_stack_limits(mddev->gendisk, rdev->bdev, - rdev->data_offset << 9); + if (mddev->gendisk) + disk_stack_limits(mddev->gendisk, rdev->bdev, + rdev->data_offset << 9); disk->head_position = 0; } @@ -3575,22 +3601,22 @@ static int run(struct mddev *mddev) md_set_array_sectors(mddev, size); mddev->resync_max_sectors = size; - mddev->queue->backing_dev_info.congested_fn = raid10_congested; - mddev->queue->backing_dev_info.congested_data = mddev; - - /* Calculate max read-ahead size. - * We need to readahead at least twice a whole stripe.... - * maybe... - */ - { + if (mddev->queue) { int stripe = conf->geo.raid_disks * ((mddev->chunk_sectors << 9) / PAGE_SIZE); + mddev->queue->backing_dev_info.congested_fn = raid10_congested; + mddev->queue->backing_dev_info.congested_data = mddev; + + /* Calculate max read-ahead size. + * We need to readahead at least twice a whole stripe.... + * maybe... + */ stripe /= conf->geo.near_copies; if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) mddev->queue->backing_dev_info.ra_pages = 2 * stripe; + blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec); } - blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec); if (md_integrity_register(mddev)) goto out_free_conf; @@ -3641,7 +3667,10 @@ static int stop(struct mddev *mddev) lower_barrier(conf); md_unregister_thread(&mddev->thread); - blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ + if (mddev->queue) + /* the unplug fn references 'conf'*/ + blk_sync_queue(mddev->queue); + if (conf->r10bio_pool) mempool_destroy(conf->r10bio_pool); kfree(conf->mirrors); @@ -3805,7 +3834,7 @@ static int raid10_check_reshape(struct mddev *mddev) if (mddev->delta_disks > 0) { /* allocate new 'mirrors' list */ conf->mirrors_new = kzalloc( - sizeof(struct mirror_info) + sizeof(struct raid10_info) *(mddev->raid_disks + mddev->delta_disks), GFP_KERNEL); @@ -3930,7 +3959,7 @@ static int raid10_start_reshape(struct mddev *mddev) spin_lock_irq(&conf->device_lock); if (conf->mirrors_new) { memcpy(conf->mirrors_new, conf->mirrors, - sizeof(struct mirror_info)*conf->prev.raid_disks); + sizeof(struct raid10_info)*conf->prev.raid_disks); smp_mb(); kfree(conf->mirrors_old); /* FIXME and elsewhere */ conf->mirrors_old = conf->mirrors; @@ -4389,14 +4418,18 @@ static int handle_reshape_read_error(struct mddev *mddev, { /* Use sync reads to get the blocks from somewhere else */ int sectors = r10_bio->sectors; - struct r10bio r10b; struct r10conf *conf = mddev->private; + struct { + struct r10bio r10_bio; + struct r10dev devs[conf->copies]; + } on_stack; + struct r10bio *r10b = &on_stack.r10_bio; int slot = 0; int idx = 0; struct bio_vec *bvec = r10_bio->master_bio->bi_io_vec; - r10b.sector = r10_bio->sector; - __raid10_find_phys(&conf->prev, &r10b); + r10b->sector = r10_bio->sector; + __raid10_find_phys(&conf->prev, r10b); while (sectors) { int s = sectors; @@ -4407,7 +4440,7 @@ static int handle_reshape_read_error(struct mddev *mddev, s = PAGE_SIZE >> 9; while (!success) { - int d = r10b.devs[slot].devnum; + int d = r10b->devs[slot].devnum; struct md_rdev *rdev = conf->mirrors[d].rdev; sector_t addr; if (rdev == NULL || @@ -4415,7 +4448,7 @@ static int handle_reshape_read_error(struct mddev *mddev, !test_bit(In_sync, &rdev->flags)) goto failed; - addr = r10b.devs[slot].addr + idx * PAGE_SIZE; + addr = r10b->devs[slot].addr + idx * PAGE_SIZE; success = sync_page_io(rdev, addr, s << 9, diff --git a/trunk/drivers/md/raid10.h b/trunk/drivers/md/raid10.h index 135b1b0a1554..1054cf602345 100644 --- a/trunk/drivers/md/raid10.h +++ b/trunk/drivers/md/raid10.h @@ -1,7 +1,7 @@ #ifndef _RAID10_H #define _RAID10_H -struct mirror_info { +struct raid10_info { struct md_rdev *rdev, *replacement; sector_t head_position; int recovery_disabled; /* matches @@ -13,8 +13,8 @@ struct mirror_info { struct r10conf { struct mddev *mddev; - struct mirror_info *mirrors; - struct mirror_info *mirrors_new, *mirrors_old; + struct raid10_info *mirrors; + struct raid10_info *mirrors_new, *mirrors_old; spinlock_t device_lock; /* geometry */ @@ -110,7 +110,7 @@ struct r10bio { * We choose the number when they are allocated. * We sometimes need an extra bio to write to the replacement. */ - struct { + struct r10dev { struct bio *bio; union { struct bio *repl_bio; /* used for resync and @@ -123,20 +123,6 @@ struct r10bio { } devs[0]; }; -/* when we get a read error on a read-only array, we redirect to another - * device without failing the first device, or trying to over-write to - * correct the read error. To keep track of bad blocks on a per-bio - * level, we store IO_BLOCKED in the appropriate 'bios' pointer - */ -#define IO_BLOCKED ((struct bio*)1) -/* When we successfully write to a known bad-block, we need to remove the - * bad-block marking which must be done from process context. So we record - * the success by setting devs[n].bio to IO_MADE_GOOD - */ -#define IO_MADE_GOOD ((struct bio *)2) - -#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2) - /* bits for r10bio.state */ enum r10bio_state { R10BIO_Uptodate, @@ -159,4 +145,7 @@ enum r10bio_state { */ R10BIO_Previous, }; + +extern int md_raid10_congested(struct mddev *mddev, int bits); + #endif diff --git a/trunk/drivers/md/raid5.c b/trunk/drivers/md/raid5.c index 04348d76bb30..adda94df5eb2 100644 --- a/trunk/drivers/md/raid5.c +++ b/trunk/drivers/md/raid5.c @@ -99,34 +99,40 @@ static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector) * We maintain a biased count of active stripes in the bottom 16 bits of * bi_phys_segments, and a count of processed stripes in the upper 16 bits */ -static inline int raid5_bi_phys_segments(struct bio *bio) +static inline int raid5_bi_processed_stripes(struct bio *bio) { - return bio->bi_phys_segments & 0xffff; + atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; + return (atomic_read(segments) >> 16) & 0xffff; } -static inline int raid5_bi_hw_segments(struct bio *bio) +static inline int raid5_dec_bi_active_stripes(struct bio *bio) { - return (bio->bi_phys_segments >> 16) & 0xffff; + atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; + return atomic_sub_return(1, segments) & 0xffff; } -static inline int raid5_dec_bi_phys_segments(struct bio *bio) +static inline void raid5_inc_bi_active_stripes(struct bio *bio) { - --bio->bi_phys_segments; - return raid5_bi_phys_segments(bio); + atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; + atomic_inc(segments); } -static inline int raid5_dec_bi_hw_segments(struct bio *bio) +static inline void raid5_set_bi_processed_stripes(struct bio *bio, + unsigned int cnt) { - unsigned short val = raid5_bi_hw_segments(bio); + atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; + int old, new; - --val; - bio->bi_phys_segments = (val << 16) | raid5_bi_phys_segments(bio); - return val; + do { + old = atomic_read(segments); + new = (old & 0xffff) | (cnt << 16); + } while (atomic_cmpxchg(segments, old, new) != old); } -static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt) +static inline void raid5_set_bi_stripes(struct bio *bio, unsigned int cnt) { - bio->bi_phys_segments = raid5_bi_phys_segments(bio) | (cnt << 16); + atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; + atomic_set(segments, cnt); } /* Find first data disk in a raid6 stripe */ @@ -190,49 +196,56 @@ static int stripe_operations_active(struct stripe_head *sh) test_bit(STRIPE_COMPUTE_RUN, &sh->state); } -static void __release_stripe(struct r5conf *conf, struct stripe_head *sh) +static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh) { - if (atomic_dec_and_test(&sh->count)) { - BUG_ON(!list_empty(&sh->lru)); - BUG_ON(atomic_read(&conf->active_stripes)==0); - if (test_bit(STRIPE_HANDLE, &sh->state)) { - if (test_bit(STRIPE_DELAYED, &sh->state) && - !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) - list_add_tail(&sh->lru, &conf->delayed_list); - else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && - sh->bm_seq - conf->seq_write > 0) - list_add_tail(&sh->lru, &conf->bitmap_list); - else { - clear_bit(STRIPE_DELAYED, &sh->state); - clear_bit(STRIPE_BIT_DELAY, &sh->state); - list_add_tail(&sh->lru, &conf->handle_list); - } - md_wakeup_thread(conf->mddev->thread); - } else { - BUG_ON(stripe_operations_active(sh)); - if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) - if (atomic_dec_return(&conf->preread_active_stripes) - < IO_THRESHOLD) - md_wakeup_thread(conf->mddev->thread); - atomic_dec(&conf->active_stripes); - if (!test_bit(STRIPE_EXPANDING, &sh->state)) { - list_add_tail(&sh->lru, &conf->inactive_list); - wake_up(&conf->wait_for_stripe); - if (conf->retry_read_aligned) - md_wakeup_thread(conf->mddev->thread); - } + BUG_ON(!list_empty(&sh->lru)); + BUG_ON(atomic_read(&conf->active_stripes)==0); + if (test_bit(STRIPE_HANDLE, &sh->state)) { + if (test_bit(STRIPE_DELAYED, &sh->state) && + !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) + list_add_tail(&sh->lru, &conf->delayed_list); + else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && + sh->bm_seq - conf->seq_write > 0) + list_add_tail(&sh->lru, &conf->bitmap_list); + else { + clear_bit(STRIPE_DELAYED, &sh->state); + clear_bit(STRIPE_BIT_DELAY, &sh->state); + list_add_tail(&sh->lru, &conf->handle_list); + } + md_wakeup_thread(conf->mddev->thread); + } else { + BUG_ON(stripe_operations_active(sh)); + if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) + if (atomic_dec_return(&conf->preread_active_stripes) + < IO_THRESHOLD) + md_wakeup_thread(conf->mddev->thread); + atomic_dec(&conf->active_stripes); + if (!test_bit(STRIPE_EXPANDING, &sh->state)) { + list_add_tail(&sh->lru, &conf->inactive_list); + wake_up(&conf->wait_for_stripe); + if (conf->retry_read_aligned) + md_wakeup_thread(conf->mddev->thread); } } } +static void __release_stripe(struct r5conf *conf, struct stripe_head *sh) +{ + if (atomic_dec_and_test(&sh->count)) + do_release_stripe(conf, sh); +} + static void release_stripe(struct stripe_head *sh) { struct r5conf *conf = sh->raid_conf; unsigned long flags; - spin_lock_irqsave(&conf->device_lock, flags); - __release_stripe(conf, sh); - spin_unlock_irqrestore(&conf->device_lock, flags); + local_irq_save(flags); + if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) { + do_release_stripe(conf, sh); + spin_unlock(&conf->device_lock); + } + local_irq_restore(flags); } static inline void remove_hash(struct stripe_head *sh) @@ -471,7 +484,8 @@ get_active_stripe(struct r5conf *conf, sector_t sector, } else { if (atomic_read(&sh->count)) { BUG_ON(!list_empty(&sh->lru) - && !test_bit(STRIPE_EXPANDING, &sh->state)); + && !test_bit(STRIPE_EXPANDING, &sh->state) + && !test_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)); } else { if (!test_bit(STRIPE_HANDLE, &sh->state)) atomic_inc(&conf->active_stripes); @@ -640,6 +654,9 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) else bi->bi_sector = (sh->sector + rdev->data_offset); + if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) + bi->bi_rw |= REQ_FLUSH; + bi->bi_flags = 1 << BIO_UPTODATE; bi->bi_idx = 0; bi->bi_io_vec[0].bv_len = STRIPE_SIZE; @@ -749,14 +766,12 @@ static void ops_complete_biofill(void *stripe_head_ref) { struct stripe_head *sh = stripe_head_ref; struct bio *return_bi = NULL; - struct r5conf *conf = sh->raid_conf; int i; pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector); /* clear completed biofills */ - spin_lock_irq(&conf->device_lock); for (i = sh->disks; i--; ) { struct r5dev *dev = &sh->dev[i]; @@ -774,7 +789,7 @@ static void ops_complete_biofill(void *stripe_head_ref) while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) { rbi2 = r5_next_bio(rbi, dev->sector); - if (!raid5_dec_bi_phys_segments(rbi)) { + if (!raid5_dec_bi_active_stripes(rbi)) { rbi->bi_next = return_bi; return_bi = rbi; } @@ -782,7 +797,6 @@ static void ops_complete_biofill(void *stripe_head_ref) } } } - spin_unlock_irq(&conf->device_lock); clear_bit(STRIPE_BIOFILL_RUN, &sh->state); return_io(return_bi); @@ -794,7 +808,6 @@ static void ops_complete_biofill(void *stripe_head_ref) static void ops_run_biofill(struct stripe_head *sh) { struct dma_async_tx_descriptor *tx = NULL; - struct r5conf *conf = sh->raid_conf; struct async_submit_ctl submit; int i; @@ -805,10 +818,10 @@ static void ops_run_biofill(struct stripe_head *sh) struct r5dev *dev = &sh->dev[i]; if (test_bit(R5_Wantfill, &dev->flags)) { struct bio *rbi; - spin_lock_irq(&conf->device_lock); + spin_lock_irq(&sh->stripe_lock); dev->read = rbi = dev->toread; dev->toread = NULL; - spin_unlock_irq(&conf->device_lock); + spin_unlock_irq(&sh->stripe_lock); while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) { tx = async_copy_data(0, rbi, dev->page, @@ -1144,12 +1157,12 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) { struct bio *wbi; - spin_lock_irq(&sh->raid_conf->device_lock); + spin_lock_irq(&sh->stripe_lock); chosen = dev->towrite; dev->towrite = NULL; BUG_ON(dev->written); wbi = dev->written = chosen; - spin_unlock_irq(&sh->raid_conf->device_lock); + spin_unlock_irq(&sh->stripe_lock); while (wbi && wbi->bi_sector < dev->sector + STRIPE_SECTORS) { @@ -1454,6 +1467,8 @@ static int grow_one_stripe(struct r5conf *conf) init_waitqueue_head(&sh->ops.wait_for_ops); #endif + spin_lock_init(&sh->stripe_lock); + if (grow_buffers(sh)) { shrink_buffers(sh); kmem_cache_free(conf->slab_cache, sh); @@ -1739,7 +1754,9 @@ static void raid5_end_read_request(struct bio * bi, int error) atomic_add(STRIPE_SECTORS, &rdev->corrected_errors); clear_bit(R5_ReadError, &sh->dev[i].flags); clear_bit(R5_ReWrite, &sh->dev[i].flags); - } + } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) + clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); + if (atomic_read(&rdev->read_errors)) atomic_set(&rdev->read_errors, 0); } else { @@ -1784,7 +1801,11 @@ static void raid5_end_read_request(struct bio * bi, int error) else retry = 1; if (retry) - set_bit(R5_ReadError, &sh->dev[i].flags); + if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) { + set_bit(R5_ReadError, &sh->dev[i].flags); + clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); + } else + set_bit(R5_ReadNoMerge, &sh->dev[i].flags); else { clear_bit(R5_ReadError, &sh->dev[i].flags); clear_bit(R5_ReWrite, &sh->dev[i].flags); @@ -2340,11 +2361,18 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in (unsigned long long)bi->bi_sector, (unsigned long long)sh->sector); - - spin_lock_irq(&conf->device_lock); + /* + * If several bio share a stripe. The bio bi_phys_segments acts as a + * reference count to avoid race. The reference count should already be + * increased before this function is called (for example, in + * make_request()), so other bio sharing this stripe will not free the + * stripe. If a stripe is owned by one stripe, the stripe lock will + * protect it. + */ + spin_lock_irq(&sh->stripe_lock); if (forwrite) { bip = &sh->dev[dd_idx].towrite; - if (*bip == NULL && sh->dev[dd_idx].written == NULL) + if (*bip == NULL) firstwrite = 1; } else bip = &sh->dev[dd_idx].toread; @@ -2360,7 +2388,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in if (*bip) bi->bi_next = *bip; *bip = bi; - bi->bi_phys_segments++; + raid5_inc_bi_active_stripes(bi); if (forwrite) { /* check if page is covered */ @@ -2375,7 +2403,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS) set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags); } - spin_unlock_irq(&conf->device_lock); + spin_unlock_irq(&sh->stripe_lock); pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", (unsigned long long)(*bip)->bi_sector, @@ -2391,7 +2419,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in overlap: set_bit(R5_Overlap, &sh->dev[dd_idx].flags); - spin_unlock_irq(&conf->device_lock); + spin_unlock_irq(&sh->stripe_lock); return 0; } @@ -2441,10 +2469,11 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, rdev_dec_pending(rdev, conf->mddev); } } - spin_lock_irq(&conf->device_lock); + spin_lock_irq(&sh->stripe_lock); /* fail all writes first */ bi = sh->dev[i].towrite; sh->dev[i].towrite = NULL; + spin_unlock_irq(&sh->stripe_lock); if (bi) { s->to_write--; bitmap_end = 1; @@ -2457,13 +2486,17 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, sh->dev[i].sector + STRIPE_SECTORS) { struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); clear_bit(BIO_UPTODATE, &bi->bi_flags); - if (!raid5_dec_bi_phys_segments(bi)) { + if (!raid5_dec_bi_active_stripes(bi)) { md_write_end(conf->mddev); bi->bi_next = *return_bi; *return_bi = bi; } bi = nextbi; } + if (bitmap_end) + bitmap_endwrite(conf->mddev->bitmap, sh->sector, + STRIPE_SECTORS, 0, 0); + bitmap_end = 0; /* and fail all 'written' */ bi = sh->dev[i].written; sh->dev[i].written = NULL; @@ -2472,7 +2505,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, sh->dev[i].sector + STRIPE_SECTORS) { struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); clear_bit(BIO_UPTODATE, &bi->bi_flags); - if (!raid5_dec_bi_phys_segments(bi)) { + if (!raid5_dec_bi_active_stripes(bi)) { md_write_end(conf->mddev); bi->bi_next = *return_bi; *return_bi = bi; @@ -2496,14 +2529,13 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); clear_bit(BIO_UPTODATE, &bi->bi_flags); - if (!raid5_dec_bi_phys_segments(bi)) { + if (!raid5_dec_bi_active_stripes(bi)) { bi->bi_next = *return_bi; *return_bi = bi; } bi = nextbi; } } - spin_unlock_irq(&conf->device_lock); if (bitmap_end) bitmap_endwrite(conf->mddev->bitmap, sh->sector, STRIPE_SECTORS, 0, 0); @@ -2707,30 +2739,23 @@ static void handle_stripe_clean_event(struct r5conf *conf, test_bit(R5_UPTODATE, &dev->flags)) { /* We can return any write requests */ struct bio *wbi, *wbi2; - int bitmap_end = 0; pr_debug("Return write for disc %d\n", i); - spin_lock_irq(&conf->device_lock); wbi = dev->written; dev->written = NULL; while (wbi && wbi->bi_sector < dev->sector + STRIPE_SECTORS) { wbi2 = r5_next_bio(wbi, dev->sector); - if (!raid5_dec_bi_phys_segments(wbi)) { + if (!raid5_dec_bi_active_stripes(wbi)) { md_write_end(conf->mddev); wbi->bi_next = *return_bi; *return_bi = wbi; } wbi = wbi2; } - if (dev->towrite == NULL) - bitmap_end = 1; - spin_unlock_irq(&conf->device_lock); - if (bitmap_end) - bitmap_endwrite(conf->mddev->bitmap, - sh->sector, - STRIPE_SECTORS, + bitmap_endwrite(conf->mddev->bitmap, sh->sector, + STRIPE_SECTORS, !test_bit(STRIPE_DEGRADED, &sh->state), - 0); + 0); } } @@ -3182,7 +3207,6 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) /* Now to look around and see what can be done */ rcu_read_lock(); - spin_lock_irq(&conf->device_lock); for (i=disks; i--; ) { struct md_rdev *rdev; sector_t first_bad; @@ -3328,7 +3352,6 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) do_recovery = 1; } } - spin_unlock_irq(&conf->device_lock); if (test_bit(STRIPE_SYNCING, &sh->state)) { /* If there is a failed device being replaced, * we must be recovering. @@ -3791,7 +3814,7 @@ static struct bio *remove_bio_from_retry(struct r5conf *conf) * this sets the active strip count to 1 and the processed * strip count to zero (upper 8 bits) */ - bi->bi_phys_segments = 1; /* biased count of active stripes */ + raid5_set_bi_stripes(bi, 1); /* biased count of active stripes */ } return bi; @@ -3988,6 +4011,62 @@ static struct stripe_head *__get_priority_stripe(struct r5conf *conf) return sh; } +struct raid5_plug_cb { + struct blk_plug_cb cb; + struct list_head list; +}; + +static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule) +{ + struct raid5_plug_cb *cb = container_of( + blk_cb, struct raid5_plug_cb, cb); + struct stripe_head *sh; + struct mddev *mddev = cb->cb.data; + struct r5conf *conf = mddev->private; + + if (cb->list.next && !list_empty(&cb->list)) { + spin_lock_irq(&conf->device_lock); + while (!list_empty(&cb->list)) { + sh = list_first_entry(&cb->list, struct stripe_head, lru); + list_del_init(&sh->lru); + /* + * avoid race release_stripe_plug() sees + * STRIPE_ON_UNPLUG_LIST clear but the stripe + * is still in our list + */ + smp_mb__before_clear_bit(); + clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state); + __release_stripe(conf, sh); + } + spin_unlock_irq(&conf->device_lock); + } + kfree(cb); +} + +static void release_stripe_plug(struct mddev *mddev, + struct stripe_head *sh) +{ + struct blk_plug_cb *blk_cb = blk_check_plugged( + raid5_unplug, mddev, + sizeof(struct raid5_plug_cb)); + struct raid5_plug_cb *cb; + + if (!blk_cb) { + release_stripe(sh); + return; + } + + cb = container_of(blk_cb, struct raid5_plug_cb, cb); + + if (cb->list.next == NULL) + INIT_LIST_HEAD(&cb->list); + + if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)) + list_add_tail(&sh->lru, &cb->list); + else + release_stripe(sh); +} + static void make_request(struct mddev *mddev, struct bio * bi) { struct r5conf *conf = mddev->private; @@ -4113,11 +4192,10 @@ static void make_request(struct mddev *mddev, struct bio * bi) finish_wait(&conf->wait_for_overlap, &w); set_bit(STRIPE_HANDLE, &sh->state); clear_bit(STRIPE_DELAYED, &sh->state); - if ((bi->bi_rw & REQ_SYNC) && + if ((bi->bi_rw & REQ_NOIDLE) && !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) atomic_inc(&conf->preread_active_stripes); - mddev_check_plugged(mddev); - release_stripe(sh); + release_stripe_plug(mddev, sh); } else { /* cannot get stripe for read-ahead, just give-up */ clear_bit(BIO_UPTODATE, &bi->bi_flags); @@ -4126,9 +4204,7 @@ static void make_request(struct mddev *mddev, struct bio * bi) } } - spin_lock_irq(&conf->device_lock); - remaining = raid5_dec_bi_phys_segments(bi); - spin_unlock_irq(&conf->device_lock); + remaining = raid5_dec_bi_active_stripes(bi); if (remaining == 0) { if ( rw == WRITE ) @@ -4484,7 +4560,7 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) sector += STRIPE_SECTORS, scnt++) { - if (scnt < raid5_bi_hw_segments(raid_bio)) + if (scnt < raid5_bi_processed_stripes(raid_bio)) /* already done this stripe */ continue; @@ -4492,25 +4568,24 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) if (!sh) { /* failed to get a stripe - must wait */ - raid5_set_bi_hw_segments(raid_bio, scnt); + raid5_set_bi_processed_stripes(raid_bio, scnt); conf->retry_read_aligned = raid_bio; return handled; } if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) { release_stripe(sh); - raid5_set_bi_hw_segments(raid_bio, scnt); + raid5_set_bi_processed_stripes(raid_bio, scnt); conf->retry_read_aligned = raid_bio; return handled; } + set_bit(R5_ReadNoMerge, &sh->dev[dd_idx].flags); handle_stripe(sh); release_stripe(sh); handled++; } - spin_lock_irq(&conf->device_lock); - remaining = raid5_dec_bi_phys_segments(raid_bio); - spin_unlock_irq(&conf->device_lock); + remaining = raid5_dec_bi_active_stripes(raid_bio); if (remaining == 0) bio_endio(raid_bio, 0); if (atomic_dec_and_test(&conf->active_aligned_reads)) @@ -4518,6 +4593,30 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) return handled; } +#define MAX_STRIPE_BATCH 8 +static int handle_active_stripes(struct r5conf *conf) +{ + struct stripe_head *batch[MAX_STRIPE_BATCH], *sh; + int i, batch_size = 0; + + while (batch_size < MAX_STRIPE_BATCH && + (sh = __get_priority_stripe(conf)) != NULL) + batch[batch_size++] = sh; + + if (batch_size == 0) + return batch_size; + spin_unlock_irq(&conf->device_lock); + + for (i = 0; i < batch_size; i++) + handle_stripe(batch[i]); + + cond_resched(); + + spin_lock_irq(&conf->device_lock); + for (i = 0; i < batch_size; i++) + __release_stripe(conf, batch[i]); + return batch_size; +} /* * This is our raid5 kernel thread. @@ -4528,7 +4627,6 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) */ static void raid5d(struct mddev *mddev) { - struct stripe_head *sh; struct r5conf *conf = mddev->private; int handled; struct blk_plug plug; @@ -4542,8 +4640,9 @@ static void raid5d(struct mddev *mddev) spin_lock_irq(&conf->device_lock); while (1) { struct bio *bio; + int batch_size; - if (atomic_read(&mddev->plug_cnt) == 0 && + if ( !list_empty(&conf->bitmap_list)) { /* Now is a good time to flush some bitmap updates */ conf->seq_flush++; @@ -4553,8 +4652,7 @@ static void raid5d(struct mddev *mddev) conf->seq_write = conf->seq_flush; activate_bit_delay(conf); } - if (atomic_read(&mddev->plug_cnt) == 0) - raid5_activate_delayed(conf); + raid5_activate_delayed(conf); while ((bio = remove_bio_from_retry(conf))) { int ok; @@ -4566,21 +4664,16 @@ static void raid5d(struct mddev *mddev) handled++; } - sh = __get_priority_stripe(conf); - - if (!sh) + batch_size = handle_active_stripes(conf); + if (!batch_size) break; - spin_unlock_irq(&conf->device_lock); - - handled++; - handle_stripe(sh); - release_stripe(sh); - cond_resched(); + handled += batch_size; - if (mddev->flags & ~(1<flags & ~(1<device_lock); md_check_recovery(mddev); - - spin_lock_irq(&conf->device_lock); + spin_lock_irq(&conf->device_lock); + } } pr_debug("%d stripes handled\n", handled); diff --git a/trunk/drivers/md/raid5.h b/trunk/drivers/md/raid5.h index 2164021f3b5f..a9fc24901eda 100644 --- a/trunk/drivers/md/raid5.h +++ b/trunk/drivers/md/raid5.h @@ -210,6 +210,7 @@ struct stripe_head { int disks; /* disks in stripe */ enum check_states check_state; enum reconstruct_states reconstruct_state; + spinlock_t stripe_lock; /** * struct stripe_operations * @target - STRIPE_OP_COMPUTE_BLK target @@ -273,6 +274,7 @@ enum r5dev_flags { R5_Wantwrite, R5_Overlap, /* There is a pending overlapping request * on this block */ + R5_ReadNoMerge, /* prevent bio from merging in block-layer */ R5_ReadError, /* seen a read error here recently */ R5_ReWrite, /* have tried to over-write the readerror */ @@ -319,6 +321,7 @@ enum { STRIPE_BIOFILL_RUN, STRIPE_COMPUTE_RUN, STRIPE_OPS_REQ_PENDING, + STRIPE_ON_UNPLUG_LIST, }; /* diff --git a/trunk/drivers/media/dvb/siano/smsusb.c b/trunk/drivers/media/dvb/siano/smsusb.c index 664e460f247b..aac622200e99 100644 --- a/trunk/drivers/media/dvb/siano/smsusb.c +++ b/trunk/drivers/media/dvb/siano/smsusb.c @@ -481,7 +481,7 @@ static int smsusb_resume(struct usb_interface *intf) return 0; } -static const struct usb_device_id smsusb_id_table[] __devinitconst = { +static const struct usb_device_id smsusb_id_table[] = { { USB_DEVICE(0x187f, 0x0010), .driver_info = SMS1XXX_BOARD_SIANO_STELLAR }, { USB_DEVICE(0x187f, 0x0100), diff --git a/trunk/drivers/media/radio/radio-shark.c b/trunk/drivers/media/radio/radio-shark.c index d0b6bb507634..72ded29728bb 100644 --- a/trunk/drivers/media/radio/radio-shark.c +++ b/trunk/drivers/media/radio/radio-shark.c @@ -35,6 +35,11 @@ #include #include +#if defined(CONFIG_LEDS_CLASS) || \ + (defined(CONFIG_LEDS_CLASS_MODULE) && defined(CONFIG_RADIO_SHARK_MODULE)) +#define SHARK_USE_LEDS 1 +#endif + /* * Version Information */ @@ -56,44 +61,18 @@ MODULE_LICENSE("GPL"); enum { BLUE_LED, BLUE_PULSE_LED, RED_LED, NO_LEDS }; -static void shark_led_set_blue(struct led_classdev *led_cdev, - enum led_brightness value); -static void shark_led_set_blue_pulse(struct led_classdev *led_cdev, - enum led_brightness value); -static void shark_led_set_red(struct led_classdev *led_cdev, - enum led_brightness value); - -static const struct led_classdev shark_led_templates[NO_LEDS] = { - [BLUE_LED] = { - .name = "%s:blue:", - .brightness = LED_OFF, - .max_brightness = 127, - .brightness_set = shark_led_set_blue, - }, - [BLUE_PULSE_LED] = { - .name = "%s:blue-pulse:", - .brightness = LED_OFF, - .max_brightness = 255, - .brightness_set = shark_led_set_blue_pulse, - }, - [RED_LED] = { - .name = "%s:red:", - .brightness = LED_OFF, - .max_brightness = 1, - .brightness_set = shark_led_set_red, - }, -}; - struct shark_device { struct usb_device *usbdev; struct v4l2_device v4l2_dev; struct snd_tea575x tea; +#ifdef SHARK_USE_LEDS struct work_struct led_work; struct led_classdev leds[NO_LEDS]; char led_names[NO_LEDS][32]; atomic_t brightness[NO_LEDS]; unsigned long brightness_new; +#endif u8 *transfer_buffer; u32 last_val; @@ -175,20 +154,13 @@ static struct snd_tea575x_ops shark_tea_ops = { .read_val = shark_read_val, }; +#ifdef SHARK_USE_LEDS static void shark_led_work(struct work_struct *work) { struct shark_device *shark = container_of(work, struct shark_device, led_work); int i, res, brightness, actual_len; - /* - * We use the v4l2_dev lock and registered bit to ensure the device - * does not get unplugged and unreffed while we're running. - */ - mutex_lock(&shark->tea.mutex); - if (!video_is_registered(&shark->tea.vd)) - goto leave; - for (i = 0; i < 3; i++) { if (!test_and_clear_bit(i, &shark->brightness_new)) continue; @@ -208,8 +180,6 @@ static void shark_led_work(struct work_struct *work) v4l2_err(&shark->v4l2_dev, "set LED %s error: %d\n", shark->led_names[i], res); } -leave: - mutex_unlock(&shark->tea.mutex); } static void shark_led_set_blue(struct led_classdev *led_cdev, @@ -245,19 +215,78 @@ static void shark_led_set_red(struct led_classdev *led_cdev, schedule_work(&shark->led_work); } +static const struct led_classdev shark_led_templates[NO_LEDS] = { + [BLUE_LED] = { + .name = "%s:blue:", + .brightness = LED_OFF, + .max_brightness = 127, + .brightness_set = shark_led_set_blue, + }, + [BLUE_PULSE_LED] = { + .name = "%s:blue-pulse:", + .brightness = LED_OFF, + .max_brightness = 255, + .brightness_set = shark_led_set_blue_pulse, + }, + [RED_LED] = { + .name = "%s:red:", + .brightness = LED_OFF, + .max_brightness = 1, + .brightness_set = shark_led_set_red, + }, +}; + +static int shark_register_leds(struct shark_device *shark, struct device *dev) +{ + int i, retval; + + INIT_WORK(&shark->led_work, shark_led_work); + for (i = 0; i < NO_LEDS; i++) { + shark->leds[i] = shark_led_templates[i]; + snprintf(shark->led_names[i], sizeof(shark->led_names[0]), + shark->leds[i].name, shark->v4l2_dev.name); + shark->leds[i].name = shark->led_names[i]; + retval = led_classdev_register(dev, &shark->leds[i]); + if (retval) { + v4l2_err(&shark->v4l2_dev, + "couldn't register led: %s\n", + shark->led_names[i]); + return retval; + } + } + return 0; +} + +static void shark_unregister_leds(struct shark_device *shark) +{ + int i; + + for (i = 0; i < NO_LEDS; i++) + led_classdev_unregister(&shark->leds[i]); + + cancel_work_sync(&shark->led_work); +} +#else +static int shark_register_leds(struct shark_device *shark, struct device *dev) +{ + v4l2_warn(&shark->v4l2_dev, + "CONFIG_LED_CLASS not enabled, LED support disabled\n"); + return 0; +} +static inline void shark_unregister_leds(struct shark_device *shark) { } +#endif + static void usb_shark_disconnect(struct usb_interface *intf) { struct v4l2_device *v4l2_dev = usb_get_intfdata(intf); struct shark_device *shark = v4l2_dev_to_shark(v4l2_dev); - int i; mutex_lock(&shark->tea.mutex); v4l2_device_disconnect(&shark->v4l2_dev); snd_tea575x_exit(&shark->tea); mutex_unlock(&shark->tea.mutex); - for (i = 0; i < NO_LEDS; i++) - led_classdev_unregister(&shark->leds[i]); + shark_unregister_leds(shark); v4l2_device_put(&shark->v4l2_dev); } @@ -266,7 +295,6 @@ static void usb_shark_release(struct v4l2_device *v4l2_dev) { struct shark_device *shark = v4l2_dev_to_shark(v4l2_dev); - cancel_work_sync(&shark->led_work); v4l2_device_unregister(&shark->v4l2_dev); kfree(shark->transfer_buffer); kfree(shark); @@ -276,7 +304,7 @@ static int usb_shark_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct shark_device *shark; - int i, retval = -ENOMEM; + int retval = -ENOMEM; shark = kzalloc(sizeof(struct shark_device), GFP_KERNEL); if (!shark) @@ -286,17 +314,13 @@ static int usb_shark_probe(struct usb_interface *intf, if (!shark->transfer_buffer) goto err_alloc_buffer; - /* - * Work around a bug in usbhid/hid-core.c, where it leaves a dangling - * pointer in intfdata causing v4l2-device.c to not set it. Which - * results in usb_shark_disconnect() referencing the dangling pointer - * - * REMOVE (as soon as the above bug is fixed, patch submitted) - */ - usb_set_intfdata(intf, NULL); + v4l2_device_set_name(&shark->v4l2_dev, DRV_NAME, &shark_instance); + + retval = shark_register_leds(shark, &intf->dev); + if (retval) + goto err_reg_leds; shark->v4l2_dev.release = usb_shark_release; - v4l2_device_set_name(&shark->v4l2_dev, DRV_NAME, &shark_instance); retval = v4l2_device_register(&intf->dev, &shark->v4l2_dev); if (retval) { v4l2_err(&shark->v4l2_dev, "couldn't register v4l2_device\n"); @@ -320,32 +344,13 @@ static int usb_shark_probe(struct usb_interface *intf, goto err_init_tea; } - INIT_WORK(&shark->led_work, shark_led_work); - for (i = 0; i < NO_LEDS; i++) { - shark->leds[i] = shark_led_templates[i]; - snprintf(shark->led_names[i], sizeof(shark->led_names[0]), - shark->leds[i].name, shark->v4l2_dev.name); - shark->leds[i].name = shark->led_names[i]; - /* - * We don't fail the probe if we fail to register the leds, - * because once we've called snd_tea575x_init, the /dev/radio0 - * node may be opened from userspace holding a reference to us! - * - * Note we cannot register the leds first instead as - * shark_led_work depends on the v4l2 mutex and registered bit. - */ - retval = led_classdev_register(&intf->dev, &shark->leds[i]); - if (retval) - v4l2_err(&shark->v4l2_dev, - "couldn't register led: %s\n", - shark->led_names[i]); - } - return 0; err_init_tea: v4l2_device_unregister(&shark->v4l2_dev); err_reg_dev: + shark_unregister_leds(shark); +err_reg_leds: kfree(shark->transfer_buffer); err_alloc_buffer: kfree(shark); diff --git a/trunk/drivers/media/radio/radio-shark2.c b/trunk/drivers/media/radio/radio-shark2.c index b9575de3e7e8..7b4efdfaae28 100644 --- a/trunk/drivers/media/radio/radio-shark2.c +++ b/trunk/drivers/media/radio/radio-shark2.c @@ -35,6 +35,11 @@ #include #include "radio-tea5777.h" +#if defined(CONFIG_LEDS_CLASS) || \ + (defined(CONFIG_LEDS_CLASS_MODULE) && defined(CONFIG_RADIO_SHARK2_MODULE)) +#define SHARK_USE_LEDS 1 +#endif + MODULE_AUTHOR("Hans de Goede "); MODULE_DESCRIPTION("Griffin radioSHARK2, USB radio receiver driver"); MODULE_LICENSE("GPL"); @@ -43,7 +48,6 @@ static int debug; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0-1)"); - #define SHARK_IN_EP 0x83 #define SHARK_OUT_EP 0x05 @@ -54,36 +58,18 @@ MODULE_PARM_DESC(debug, "Debug level (0-1)"); enum { BLUE_LED, RED_LED, NO_LEDS }; -static void shark_led_set_blue(struct led_classdev *led_cdev, - enum led_brightness value); -static void shark_led_set_red(struct led_classdev *led_cdev, - enum led_brightness value); - -static const struct led_classdev shark_led_templates[NO_LEDS] = { - [BLUE_LED] = { - .name = "%s:blue:", - .brightness = LED_OFF, - .max_brightness = 127, - .brightness_set = shark_led_set_blue, - }, - [RED_LED] = { - .name = "%s:red:", - .brightness = LED_OFF, - .max_brightness = 1, - .brightness_set = shark_led_set_red, - }, -}; - struct shark_device { struct usb_device *usbdev; struct v4l2_device v4l2_dev; struct radio_tea5777 tea; +#ifdef SHARK_USE_LEDS struct work_struct led_work; struct led_classdev leds[NO_LEDS]; char led_names[NO_LEDS][32]; atomic_t brightness[NO_LEDS]; unsigned long brightness_new; +#endif u8 *transfer_buffer; }; @@ -161,18 +147,12 @@ static struct radio_tea5777_ops shark_tea_ops = { .read_reg = shark_read_reg, }; +#ifdef SHARK_USE_LEDS static void shark_led_work(struct work_struct *work) { struct shark_device *shark = container_of(work, struct shark_device, led_work); int i, res, brightness, actual_len; - /* - * We use the v4l2_dev lock and registered bit to ensure the device - * does not get unplugged and unreffed while we're running. - */ - mutex_lock(&shark->tea.mutex); - if (!video_is_registered(&shark->tea.vd)) - goto leave; for (i = 0; i < 2; i++) { if (!test_and_clear_bit(i, &shark->brightness_new)) @@ -191,8 +171,6 @@ static void shark_led_work(struct work_struct *work) v4l2_err(&shark->v4l2_dev, "set LED %s error: %d\n", shark->led_names[i], res); } -leave: - mutex_unlock(&shark->tea.mutex); } static void shark_led_set_blue(struct led_classdev *led_cdev, @@ -217,19 +195,72 @@ static void shark_led_set_red(struct led_classdev *led_cdev, schedule_work(&shark->led_work); } +static const struct led_classdev shark_led_templates[NO_LEDS] = { + [BLUE_LED] = { + .name = "%s:blue:", + .brightness = LED_OFF, + .max_brightness = 127, + .brightness_set = shark_led_set_blue, + }, + [RED_LED] = { + .name = "%s:red:", + .brightness = LED_OFF, + .max_brightness = 1, + .brightness_set = shark_led_set_red, + }, +}; + +static int shark_register_leds(struct shark_device *shark, struct device *dev) +{ + int i, retval; + + INIT_WORK(&shark->led_work, shark_led_work); + for (i = 0; i < NO_LEDS; i++) { + shark->leds[i] = shark_led_templates[i]; + snprintf(shark->led_names[i], sizeof(shark->led_names[0]), + shark->leds[i].name, shark->v4l2_dev.name); + shark->leds[i].name = shark->led_names[i]; + retval = led_classdev_register(dev, &shark->leds[i]); + if (retval) { + v4l2_err(&shark->v4l2_dev, + "couldn't register led: %s\n", + shark->led_names[i]); + return retval; + } + } + return 0; +} + +static void shark_unregister_leds(struct shark_device *shark) +{ + int i; + + for (i = 0; i < NO_LEDS; i++) + led_classdev_unregister(&shark->leds[i]); + + cancel_work_sync(&shark->led_work); +} +#else +static int shark_register_leds(struct shark_device *shark, struct device *dev) +{ + v4l2_warn(&shark->v4l2_dev, + "CONFIG_LED_CLASS not enabled, LED support disabled\n"); + return 0; +} +static inline void shark_unregister_leds(struct shark_device *shark) { } +#endif + static void usb_shark_disconnect(struct usb_interface *intf) { struct v4l2_device *v4l2_dev = usb_get_intfdata(intf); struct shark_device *shark = v4l2_dev_to_shark(v4l2_dev); - int i; mutex_lock(&shark->tea.mutex); v4l2_device_disconnect(&shark->v4l2_dev); radio_tea5777_exit(&shark->tea); mutex_unlock(&shark->tea.mutex); - for (i = 0; i < NO_LEDS; i++) - led_classdev_unregister(&shark->leds[i]); + shark_unregister_leds(shark); v4l2_device_put(&shark->v4l2_dev); } @@ -238,7 +269,6 @@ static void usb_shark_release(struct v4l2_device *v4l2_dev) { struct shark_device *shark = v4l2_dev_to_shark(v4l2_dev); - cancel_work_sync(&shark->led_work); v4l2_device_unregister(&shark->v4l2_dev); kfree(shark->transfer_buffer); kfree(shark); @@ -248,7 +278,7 @@ static int usb_shark_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct shark_device *shark; - int i, retval = -ENOMEM; + int retval = -ENOMEM; shark = kzalloc(sizeof(struct shark_device), GFP_KERNEL); if (!shark) @@ -258,17 +288,13 @@ static int usb_shark_probe(struct usb_interface *intf, if (!shark->transfer_buffer) goto err_alloc_buffer; - /* - * Work around a bug in usbhid/hid-core.c, where it leaves a dangling - * pointer in intfdata causing v4l2-device.c to not set it. Which - * results in usb_shark_disconnect() referencing the dangling pointer - * - * REMOVE (as soon as the above bug is fixed, patch submitted) - */ - usb_set_intfdata(intf, NULL); + v4l2_device_set_name(&shark->v4l2_dev, DRV_NAME, &shark_instance); + + retval = shark_register_leds(shark, &intf->dev); + if (retval) + goto err_reg_leds; shark->v4l2_dev.release = usb_shark_release; - v4l2_device_set_name(&shark->v4l2_dev, DRV_NAME, &shark_instance); retval = v4l2_device_register(&intf->dev, &shark->v4l2_dev); if (retval) { v4l2_err(&shark->v4l2_dev, "couldn't register v4l2_device\n"); @@ -292,32 +318,13 @@ static int usb_shark_probe(struct usb_interface *intf, goto err_init_tea; } - INIT_WORK(&shark->led_work, shark_led_work); - for (i = 0; i < NO_LEDS; i++) { - shark->leds[i] = shark_led_templates[i]; - snprintf(shark->led_names[i], sizeof(shark->led_names[0]), - shark->leds[i].name, shark->v4l2_dev.name); - shark->leds[i].name = shark->led_names[i]; - /* - * We don't fail the probe if we fail to register the leds, - * because once we've called radio_tea5777_init, the /dev/radio0 - * node may be opened from userspace holding a reference to us! - * - * Note we cannot register the leds first instead as - * shark_led_work depends on the v4l2 mutex and registered bit. - */ - retval = led_classdev_register(&intf->dev, &shark->leds[i]); - if (retval) - v4l2_err(&shark->v4l2_dev, - "couldn't register led: %s\n", - shark->led_names[i]); - } - return 0; err_init_tea: v4l2_device_unregister(&shark->v4l2_dev); err_reg_dev: + shark_unregister_leds(shark); +err_reg_leds: kfree(shark->transfer_buffer); err_alloc_buffer: kfree(shark); diff --git a/trunk/drivers/media/radio/si470x/radio-si470x-common.c b/trunk/drivers/media/radio/si470x/radio-si470x-common.c index 9e38132afec6..9bb65e170d99 100644 --- a/trunk/drivers/media/radio/si470x/radio-si470x-common.c +++ b/trunk/drivers/media/radio/si470x/radio-si470x-common.c @@ -151,6 +151,7 @@ static const struct v4l2_frequency_band bands[] = { .index = 0, .capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_BLOCK_IO | + V4L2_TUNER_CAP_FREQ_BANDS | V4L2_TUNER_CAP_HWSEEK_BOUNDED | V4L2_TUNER_CAP_HWSEEK_WRAP, .rangelow = 87500 * 16, @@ -162,6 +163,7 @@ static const struct v4l2_frequency_band bands[] = { .index = 1, .capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_BLOCK_IO | + V4L2_TUNER_CAP_FREQ_BANDS | V4L2_TUNER_CAP_HWSEEK_BOUNDED | V4L2_TUNER_CAP_HWSEEK_WRAP, .rangelow = 76000 * 16, @@ -173,6 +175,7 @@ static const struct v4l2_frequency_band bands[] = { .index = 2, .capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_BLOCK_IO | + V4L2_TUNER_CAP_FREQ_BANDS | V4L2_TUNER_CAP_HWSEEK_BOUNDED | V4L2_TUNER_CAP_HWSEEK_WRAP, .rangelow = 76000 * 16, diff --git a/trunk/drivers/media/radio/si470x/radio-si470x-i2c.c b/trunk/drivers/media/radio/si470x/radio-si470x-i2c.c index 643a6ff7c5d0..f867f04cccc9 100644 --- a/trunk/drivers/media/radio/si470x/radio-si470x-i2c.c +++ b/trunk/drivers/media/radio/si470x/radio-si470x-i2c.c @@ -225,8 +225,9 @@ int si470x_vidioc_querycap(struct file *file, void *priv, { strlcpy(capability->driver, DRIVER_NAME, sizeof(capability->driver)); strlcpy(capability->card, DRIVER_CARD, sizeof(capability->card)); - capability->capabilities = V4L2_CAP_HW_FREQ_SEEK | - V4L2_CAP_TUNER | V4L2_CAP_RADIO; + capability->device_caps = V4L2_CAP_HW_FREQ_SEEK | V4L2_CAP_READWRITE | + V4L2_CAP_TUNER | V4L2_CAP_RADIO | V4L2_CAP_RDS_CAPTURE; + capability->capabilities = capability->device_caps | V4L2_CAP_DEVICE_CAPS; return 0; } diff --git a/trunk/drivers/media/radio/si470x/radio-si470x-usb.c b/trunk/drivers/media/radio/si470x/radio-si470x-usb.c index 146be4263ea1..be076f7181e7 100644 --- a/trunk/drivers/media/radio/si470x/radio-si470x-usb.c +++ b/trunk/drivers/media/radio/si470x/radio-si470x-usb.c @@ -531,7 +531,7 @@ int si470x_vidioc_querycap(struct file *file, void *priv, strlcpy(capability->card, DRIVER_CARD, sizeof(capability->card)); usb_make_path(radio->usbdev, capability->bus_info, sizeof(capability->bus_info)); - capability->device_caps = V4L2_CAP_HW_FREQ_SEEK | + capability->device_caps = V4L2_CAP_HW_FREQ_SEEK | V4L2_CAP_READWRITE | V4L2_CAP_TUNER | V4L2_CAP_RADIO | V4L2_CAP_RDS_CAPTURE; capability->capabilities = capability->device_caps | V4L2_CAP_DEVICE_CAPS; return 0; diff --git a/trunk/drivers/media/rc/Kconfig b/trunk/drivers/media/rc/Kconfig index 5180390be7ab..8be57634ba60 100644 --- a/trunk/drivers/media/rc/Kconfig +++ b/trunk/drivers/media/rc/Kconfig @@ -261,6 +261,7 @@ config IR_WINBOND_CIR config IR_IGUANA tristate "IguanaWorks USB IR Transceiver" + depends on USB_ARCH_HAS_HCD depends on RC_CORE select USB ---help--- diff --git a/trunk/drivers/media/video/gspca/jl2005bcd.c b/trunk/drivers/media/video/gspca/jl2005bcd.c index cf9d9fca5b84..234777116e5f 100644 --- a/trunk/drivers/media/video/gspca/jl2005bcd.c +++ b/trunk/drivers/media/video/gspca/jl2005bcd.c @@ -512,7 +512,7 @@ static const struct sd_desc sd_desc = { }; /* -- module initialisation -- */ -static const __devinitdata struct usb_device_id device_table[] = { +static const struct usb_device_id device_table[] = { {USB_DEVICE(0x0979, 0x0227)}, {} }; diff --git a/trunk/drivers/media/video/gspca/spca506.c b/trunk/drivers/media/video/gspca/spca506.c index 969bb5a4cd93..bab01c86c315 100644 --- a/trunk/drivers/media/video/gspca/spca506.c +++ b/trunk/drivers/media/video/gspca/spca506.c @@ -579,7 +579,7 @@ static const struct sd_desc sd_desc = { }; /* -- module initialisation -- */ -static const struct usb_device_id device_table[] __devinitconst = { +static const struct usb_device_id device_table[] = { {USB_DEVICE(0x06e1, 0xa190)}, /*fixme: may be IntelPCCameraPro BRIDGE_SPCA505 {USB_DEVICE(0x0733, 0x0430)}, */ diff --git a/trunk/drivers/media/video/mem2mem_testdev.c b/trunk/drivers/media/video/mem2mem_testdev.c index 7efe9ad7acc7..0b91a5cd38eb 100644 --- a/trunk/drivers/media/video/mem2mem_testdev.c +++ b/trunk/drivers/media/video/mem2mem_testdev.c @@ -431,7 +431,7 @@ static int vidioc_querycap(struct file *file, void *priv, strncpy(cap->driver, MEM2MEM_NAME, sizeof(cap->driver) - 1); strncpy(cap->card, MEM2MEM_NAME, sizeof(cap->card) - 1); strlcpy(cap->bus_info, MEM2MEM_NAME, sizeof(cap->bus_info)); - cap->capabilities = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING; + cap->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING; cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; return 0; } diff --git a/trunk/drivers/media/video/mx1_camera.c b/trunk/drivers/media/video/mx1_camera.c index d2e6f82ecfac..560a65aa7038 100644 --- a/trunk/drivers/media/video/mx1_camera.c +++ b/trunk/drivers/media/video/mx1_camera.c @@ -403,7 +403,7 @@ static void mx1_camera_activate(struct mx1_camera_dev *pcdev) dev_dbg(pcdev->icd->parent, "Activate device\n"); - clk_enable(pcdev->clk); + clk_prepare_enable(pcdev->clk); /* enable CSI before doing anything else */ __raw_writel(csicr1, pcdev->base + CSICR1); @@ -422,7 +422,7 @@ static void mx1_camera_deactivate(struct mx1_camera_dev *pcdev) /* Disable all CSI interface */ __raw_writel(0x00, pcdev->base + CSICR1); - clk_disable(pcdev->clk); + clk_disable_unprepare(pcdev->clk); } /* diff --git a/trunk/drivers/media/video/mx2_camera.c b/trunk/drivers/media/video/mx2_camera.c index 637bde8aca28..ac175406e582 100644 --- a/trunk/drivers/media/video/mx2_camera.c +++ b/trunk/drivers/media/video/mx2_camera.c @@ -272,7 +272,7 @@ struct mx2_camera_dev { struct device *dev; struct soc_camera_host soc_host; struct soc_camera_device *icd; - struct clk *clk_csi, *clk_emma; + struct clk *clk_csi, *clk_emma_ahb, *clk_emma_ipg; unsigned int irq_csi, irq_emma; void __iomem *base_csi, *base_emma; @@ -407,7 +407,7 @@ static void mx2_camera_deactivate(struct mx2_camera_dev *pcdev) { unsigned long flags; - clk_disable(pcdev->clk_csi); + clk_disable_unprepare(pcdev->clk_csi); writel(0, pcdev->base_csi + CSICR1); if (cpu_is_mx27()) { writel(0, pcdev->base_emma + PRP_CNTL); @@ -435,7 +435,7 @@ static int mx2_camera_add_device(struct soc_camera_device *icd) if (pcdev->icd) return -EBUSY; - ret = clk_enable(pcdev->clk_csi); + ret = clk_prepare_enable(pcdev->clk_csi); if (ret < 0) return ret; @@ -1633,23 +1633,34 @@ static int __devinit mx27_camera_emma_init(struct mx2_camera_dev *pcdev) goto exit_iounmap; } - pcdev->clk_emma = clk_get(NULL, "emma"); - if (IS_ERR(pcdev->clk_emma)) { - err = PTR_ERR(pcdev->clk_emma); + pcdev->clk_emma_ipg = clk_get(pcdev->dev, "emma-ipg"); + if (IS_ERR(pcdev->clk_emma_ipg)) { + err = PTR_ERR(pcdev->clk_emma_ipg); goto exit_free_irq; } - clk_enable(pcdev->clk_emma); + clk_prepare_enable(pcdev->clk_emma_ipg); + + pcdev->clk_emma_ahb = clk_get(pcdev->dev, "emma-ahb"); + if (IS_ERR(pcdev->clk_emma_ahb)) { + err = PTR_ERR(pcdev->clk_emma_ahb); + goto exit_clk_emma_ipg_put; + } + + clk_prepare_enable(pcdev->clk_emma_ahb); err = mx27_camera_emma_prp_reset(pcdev); if (err) - goto exit_clk_emma_put; + goto exit_clk_emma_ahb_put; return err; -exit_clk_emma_put: - clk_disable(pcdev->clk_emma); - clk_put(pcdev->clk_emma); +exit_clk_emma_ahb_put: + clk_disable_unprepare(pcdev->clk_emma_ahb); + clk_put(pcdev->clk_emma_ahb); +exit_clk_emma_ipg_put: + clk_disable_unprepare(pcdev->clk_emma_ipg); + clk_put(pcdev->clk_emma_ipg); exit_free_irq: free_irq(pcdev->irq_emma, pcdev); exit_iounmap: @@ -1685,7 +1696,7 @@ static int __devinit mx2_camera_probe(struct platform_device *pdev) goto exit; } - pcdev->clk_csi = clk_get(&pdev->dev, NULL); + pcdev->clk_csi = clk_get(&pdev->dev, "ahb"); if (IS_ERR(pcdev->clk_csi)) { dev_err(&pdev->dev, "Could not get csi clock\n"); err = PTR_ERR(pcdev->clk_csi); @@ -1785,8 +1796,10 @@ static int __devinit mx2_camera_probe(struct platform_device *pdev) eallocctx: if (cpu_is_mx27()) { free_irq(pcdev->irq_emma, pcdev); - clk_disable(pcdev->clk_emma); - clk_put(pcdev->clk_emma); + clk_disable_unprepare(pcdev->clk_emma_ipg); + clk_put(pcdev->clk_emma_ipg); + clk_disable_unprepare(pcdev->clk_emma_ahb); + clk_put(pcdev->clk_emma_ahb); iounmap(pcdev->base_emma); release_mem_region(pcdev->res_emma->start, resource_size(pcdev->res_emma)); } @@ -1825,8 +1838,10 @@ static int __devexit mx2_camera_remove(struct platform_device *pdev) iounmap(pcdev->base_csi); if (cpu_is_mx27()) { - clk_disable(pcdev->clk_emma); - clk_put(pcdev->clk_emma); + clk_disable_unprepare(pcdev->clk_emma_ipg); + clk_put(pcdev->clk_emma_ipg); + clk_disable_unprepare(pcdev->clk_emma_ahb); + clk_put(pcdev->clk_emma_ahb); iounmap(pcdev->base_emma); res = pcdev->res_emma; release_mem_region(res->start, resource_size(res)); diff --git a/trunk/drivers/media/video/mx3_camera.c b/trunk/drivers/media/video/mx3_camera.c index f13643d31353..af2297dd49c8 100644 --- a/trunk/drivers/media/video/mx3_camera.c +++ b/trunk/drivers/media/video/mx3_camera.c @@ -61,15 +61,9 @@ #define MAX_VIDEO_MEM 16 -enum csi_buffer_state { - CSI_BUF_NEEDS_INIT, - CSI_BUF_PREPARED, -}; - struct mx3_camera_buffer { /* common v4l buffer stuff -- must be first */ struct vb2_buffer vb; - enum csi_buffer_state state; struct list_head queue; /* One descriptot per scatterlist (per frame) */ @@ -285,7 +279,7 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb) goto error; } - if (buf->state == CSI_BUF_NEEDS_INIT) { + if (!buf->txd) { sg_dma_address(sg) = vb2_dma_contig_plane_dma_addr(vb, 0); sg_dma_len(sg) = new_size; @@ -298,7 +292,6 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb) txd->callback_param = txd; txd->callback = mx3_cam_dma_done; - buf->state = CSI_BUF_PREPARED; buf->txd = txd; } else { txd = buf->txd; @@ -385,7 +378,6 @@ static void mx3_videobuf_release(struct vb2_buffer *vb) /* Doesn't hurt also if the list is empty */ list_del_init(&buf->queue); - buf->state = CSI_BUF_NEEDS_INIT; if (txd) { buf->txd = NULL; @@ -405,13 +397,13 @@ static int mx3_videobuf_init(struct vb2_buffer *vb) struct mx3_camera_dev *mx3_cam = ici->priv; struct mx3_camera_buffer *buf = to_mx3_vb(vb); - /* This is for locking debugging only */ - INIT_LIST_HEAD(&buf->queue); - sg_init_table(&buf->sg, 1); + if (!buf->txd) { + /* This is for locking debugging only */ + INIT_LIST_HEAD(&buf->queue); + sg_init_table(&buf->sg, 1); - buf->state = CSI_BUF_NEEDS_INIT; - - mx3_cam->buf_total += vb2_plane_size(vb, 0); + mx3_cam->buf_total += vb2_plane_size(vb, 0); + } return 0; } diff --git a/trunk/drivers/media/video/soc_camera.c b/trunk/drivers/media/video/soc_camera.c index b03ffecb7438..1bde255e45df 100644 --- a/trunk/drivers/media/video/soc_camera.c +++ b/trunk/drivers/media/video/soc_camera.c @@ -171,7 +171,8 @@ static int soc_camera_try_fmt(struct soc_camera_device *icd, dev_dbg(icd->pdev, "TRY_FMT(%c%c%c%c, %ux%u)\n", pixfmtstr(pix->pixelformat), pix->width, pix->height); - if (!(ici->capabilities & SOCAM_HOST_CAP_STRIDE)) { + if (pix->pixelformat != V4L2_PIX_FMT_JPEG && + !(ici->capabilities & SOCAM_HOST_CAP_STRIDE)) { pix->bytesperline = 0; pix->sizeimage = 0; } diff --git a/trunk/drivers/media/video/soc_mediabus.c b/trunk/drivers/media/video/soc_mediabus.c index 89dce097a827..a397812635d6 100644 --- a/trunk/drivers/media/video/soc_mediabus.c +++ b/trunk/drivers/media/video/soc_mediabus.c @@ -378,6 +378,9 @@ EXPORT_SYMBOL(soc_mbus_samples_per_pixel); s32 soc_mbus_bytes_per_line(u32 width, const struct soc_mbus_pixelfmt *mf) { + if (mf->fourcc == V4L2_PIX_FMT_JPEG) + return 0; + if (mf->layout != SOC_MBUS_LAYOUT_PACKED) return width * mf->bits_per_sample / 8; @@ -400,6 +403,9 @@ EXPORT_SYMBOL(soc_mbus_bytes_per_line); s32 soc_mbus_image_size(const struct soc_mbus_pixelfmt *mf, u32 bytes_per_line, u32 height) { + if (mf->fourcc == V4L2_PIX_FMT_JPEG) + return 0; + if (mf->layout == SOC_MBUS_LAYOUT_PACKED) return bytes_per_line * height; diff --git a/trunk/drivers/media/video/uvc/uvc_queue.c b/trunk/drivers/media/video/uvc/uvc_queue.c index 9288fbd5001b..5577381b5bf0 100644 --- a/trunk/drivers/media/video/uvc/uvc_queue.c +++ b/trunk/drivers/media/video/uvc/uvc_queue.c @@ -338,6 +338,7 @@ struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue, if ((queue->flags & UVC_QUEUE_DROP_CORRUPTED) && buf->error) { buf->error = 0; buf->state = UVC_BUF_STATE_QUEUED; + buf->bytesused = 0; vb2_set_plane_payload(&buf->buf, 0, 0); return buf; } diff --git a/trunk/drivers/media/video/v4l2-ioctl.c b/trunk/drivers/media/video/v4l2-ioctl.c index c3b7b5f59b32..6bc47fc82fe2 100644 --- a/trunk/drivers/media/video/v4l2-ioctl.c +++ b/trunk/drivers/media/video/v4l2-ioctl.c @@ -402,8 +402,10 @@ static void v4l_print_hw_freq_seek(const void *arg, bool write_only) { const struct v4l2_hw_freq_seek *p = arg; - pr_cont("tuner=%u, type=%u, seek_upward=%u, wrap_around=%u, spacing=%u\n", - p->tuner, p->type, p->seek_upward, p->wrap_around, p->spacing); + pr_cont("tuner=%u, type=%u, seek_upward=%u, wrap_around=%u, spacing=%u, " + "rangelow=%u, rangehigh=%u\n", + p->tuner, p->type, p->seek_upward, p->wrap_around, p->spacing, + p->rangelow, p->rangehigh); } static void v4l_print_requestbuffers(const void *arg, bool write_only) @@ -1853,6 +1855,8 @@ static int v4l_enum_freq_bands(const struct v4l2_ioctl_ops *ops, .type = type, }; + if (p->index) + return -EINVAL; err = ops->vidioc_g_tuner(file, fh, &t); if (err) return err; @@ -1870,6 +1874,8 @@ static int v4l_enum_freq_bands(const struct v4l2_ioctl_ops *ops, if (type != V4L2_TUNER_RADIO) return -EINVAL; + if (p->index) + return -EINVAL; err = ops->vidioc_g_modulator(file, fh, &m); if (err) return err; diff --git a/trunk/drivers/mfd/Kconfig b/trunk/drivers/mfd/Kconfig index d1facef28a60..b1a146205c08 100644 --- a/trunk/drivers/mfd/Kconfig +++ b/trunk/drivers/mfd/Kconfig @@ -395,7 +395,8 @@ config MFD_TC6387XB config MFD_TC6393XB bool "Support Toshiba TC6393XB" - depends on GPIOLIB && ARM && HAVE_CLK + depends on ARM && HAVE_CLK + select GPIOLIB select MFD_CORE select MFD_TMIO help diff --git a/trunk/drivers/mfd/asic3.c b/trunk/drivers/mfd/asic3.c index 383421bf5760..683e18a23329 100644 --- a/trunk/drivers/mfd/asic3.c +++ b/trunk/drivers/mfd/asic3.c @@ -925,6 +925,7 @@ static int __init asic3_mfd_probe(struct platform_device *pdev, goto out; } + ret = 0; if (pdata->leds) { int i; diff --git a/trunk/drivers/mfd/ezx-pcap.c b/trunk/drivers/mfd/ezx-pcap.c index 43a76c41cfcc..db662e2dcfa5 100644 --- a/trunk/drivers/mfd/ezx-pcap.c +++ b/trunk/drivers/mfd/ezx-pcap.c @@ -202,7 +202,7 @@ static void pcap_isr_work(struct work_struct *work) } local_irq_enable(); ezx_pcap_write(pcap, PCAP_REG_MSR, pcap->msr); - } while (gpio_get_value(irq_to_gpio(pcap->spi->irq))); + } while (gpio_get_value(pdata->gpio)); } static void pcap_irq_handler(unsigned int irq, struct irq_desc *desc) diff --git a/trunk/drivers/misc/mei/interrupt.c b/trunk/drivers/misc/mei/interrupt.c index c6ffbbe5a6c0..d78c05e693f7 100644 --- a/trunk/drivers/misc/mei/interrupt.c +++ b/trunk/drivers/misc/mei/interrupt.c @@ -1253,7 +1253,7 @@ static int mei_irq_thread_write_handler(struct mei_io_list *cmpl_list, if (dev->wd_timeout) *slots -= mei_data2slots(MEI_START_WD_DATA_SIZE); else - *slots -= mei_data2slots(MEI_START_WD_DATA_SIZE); + *slots -= mei_data2slots(MEI_WD_PARAMS_SIZE); } } if (dev->stop) diff --git a/trunk/drivers/misc/mei/main.c b/trunk/drivers/misc/mei/main.c index 092330208869..7422c7652845 100644 --- a/trunk/drivers/misc/mei/main.c +++ b/trunk/drivers/misc/mei/main.c @@ -924,6 +924,27 @@ static struct miscdevice mei_misc_device = { .minor = MISC_DYNAMIC_MINOR, }; +/** + * mei_quirk_probe - probe for devices that doesn't valid ME interface + * @pdev: PCI device structure + * @ent: entry into pci_device_table + * + * returns true if ME Interface is valid, false otherwise + */ +static bool __devinit mei_quirk_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + u32 reg; + if (ent->device == MEI_DEV_ID_PBG_1) { + pci_read_config_dword(pdev, 0x48, ®); + /* make sure that bit 9 is up and bit 10 is down */ + if ((reg & 0x600) == 0x200) { + dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n"); + return false; + } + } + return true; +} /** * mei_probe - Device Initialization Routine * @@ -939,6 +960,12 @@ static int __devinit mei_probe(struct pci_dev *pdev, int err; mutex_lock(&mei_mutex); + + if (!mei_quirk_probe(pdev, ent)) { + err = -ENODEV; + goto end; + } + if (mei_device) { err = -EEXIST; goto end; diff --git a/trunk/drivers/misc/sgi-xp/xpc_uv.c b/trunk/drivers/misc/sgi-xp/xpc_uv.c index 87b251ab6ec5..b9e2000969f0 100644 --- a/trunk/drivers/misc/sgi-xp/xpc_uv.c +++ b/trunk/drivers/misc/sgi-xp/xpc_uv.c @@ -18,6 +18,8 @@ #include #include #include +#include +#include #include #include #include @@ -59,6 +61,8 @@ static struct xpc_heartbeat_uv *xpc_heartbeat_uv; XPC_NOTIFY_MSG_SIZE_UV) #define XPC_NOTIFY_IRQ_NAME "xpc_notify" +static int xpc_mq_node = -1; + static struct xpc_gru_mq_uv *xpc_activate_mq_uv; static struct xpc_gru_mq_uv *xpc_notify_mq_uv; @@ -109,11 +113,8 @@ xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name) #if defined CONFIG_X86_64 mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset, UV_AFFINITY_CPU); - if (mq->irq < 0) { - dev_err(xpc_part, "uv_setup_irq() returned error=%d\n", - -mq->irq); + if (mq->irq < 0) return mq->irq; - } mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset); @@ -238,8 +239,9 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name, mq->mmr_blade = uv_cpu_to_blade_id(cpu); nid = cpu_to_node(cpu); - page = alloc_pages_exact_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, - pg_order); + page = alloc_pages_exact_node(nid, + GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, + pg_order); if (page == NULL) { dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d " "bytes of memory on nid=%d for GRU mq\n", mq_size, nid); @@ -1731,9 +1733,50 @@ static struct xpc_arch_operations xpc_arch_ops_uv = { .notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv, }; +static int +xpc_init_mq_node(int nid) +{ + int cpu; + + get_online_cpus(); + + for_each_cpu(cpu, cpumask_of_node(nid)) { + xpc_activate_mq_uv = + xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, nid, + XPC_ACTIVATE_IRQ_NAME, + xpc_handle_activate_IRQ_uv); + if (!IS_ERR(xpc_activate_mq_uv)) + break; + } + if (IS_ERR(xpc_activate_mq_uv)) { + put_online_cpus(); + return PTR_ERR(xpc_activate_mq_uv); + } + + for_each_cpu(cpu, cpumask_of_node(nid)) { + xpc_notify_mq_uv = + xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, nid, + XPC_NOTIFY_IRQ_NAME, + xpc_handle_notify_IRQ_uv); + if (!IS_ERR(xpc_notify_mq_uv)) + break; + } + if (IS_ERR(xpc_notify_mq_uv)) { + xpc_destroy_gru_mq_uv(xpc_activate_mq_uv); + put_online_cpus(); + return PTR_ERR(xpc_notify_mq_uv); + } + + put_online_cpus(); + return 0; +} + int xpc_init_uv(void) { + int nid; + int ret = 0; + xpc_arch_ops = xpc_arch_ops_uv; if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) { @@ -1742,21 +1785,21 @@ xpc_init_uv(void) return -E2BIG; } - xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0, - XPC_ACTIVATE_IRQ_NAME, - xpc_handle_activate_IRQ_uv); - if (IS_ERR(xpc_activate_mq_uv)) - return PTR_ERR(xpc_activate_mq_uv); + if (xpc_mq_node < 0) + for_each_online_node(nid) { + ret = xpc_init_mq_node(nid); - xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0, - XPC_NOTIFY_IRQ_NAME, - xpc_handle_notify_IRQ_uv); - if (IS_ERR(xpc_notify_mq_uv)) { - xpc_destroy_gru_mq_uv(xpc_activate_mq_uv); - return PTR_ERR(xpc_notify_mq_uv); - } + if (!ret) + break; + } + else + ret = xpc_init_mq_node(xpc_mq_node); - return 0; + if (ret < 0) + dev_err(xpc_part, "xpc_init_mq_node() returned error=%d\n", + -ret); + + return ret; } void @@ -1765,3 +1808,6 @@ xpc_exit_uv(void) xpc_destroy_gru_mq_uv(xpc_notify_mq_uv); xpc_destroy_gru_mq_uv(xpc_activate_mq_uv); } + +module_param(xpc_mq_node, int, 0); +MODULE_PARM_DESC(xpc_mq_node, "Node number on which to allocate message queues."); diff --git a/trunk/drivers/misc/ti-st/st_ll.c b/trunk/drivers/misc/ti-st/st_ll.c index 1ff460a8e9c7..93b4d67cc4a3 100644 --- a/trunk/drivers/misc/ti-st/st_ll.c +++ b/trunk/drivers/misc/ti-st/st_ll.c @@ -87,7 +87,7 @@ static void ll_device_want_to_wakeup(struct st_data_s *st_data) /* communicate to platform about chip wakeup */ kim_data = st_data->kim_data; pdata = kim_data->kim_pdev->dev.platform_data; - if (pdata->chip_asleep) + if (pdata->chip_awake) pdata->chip_awake(NULL); } diff --git a/trunk/drivers/mmc/card/block.c b/trunk/drivers/mmc/card/block.c index f1c84decb192..172a768036d8 100644 --- a/trunk/drivers/mmc/card/block.c +++ b/trunk/drivers/mmc/card/block.c @@ -1411,7 +1411,8 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) /* complete ongoing async transfer before issuing discard */ if (card->host->areq) mmc_blk_issue_rw_rq(mq, NULL); - if (req->cmd_flags & REQ_SECURE) + if (req->cmd_flags & REQ_SECURE && + !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN)) ret = mmc_blk_issue_secdiscard_rq(mq, req); else ret = mmc_blk_issue_discard_rq(mq, req); @@ -1716,6 +1717,7 @@ static int mmc_add_disk(struct mmc_blk_data *md) #define CID_MANFID_SANDISK 0x2 #define CID_MANFID_TOSHIBA 0x11 #define CID_MANFID_MICRON 0x13 +#define CID_MANFID_SAMSUNG 0x15 static const struct mmc_fixup blk_fixups[] = { @@ -1752,6 +1754,28 @@ static const struct mmc_fixup blk_fixups[] = MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc, MMC_QUIRK_LONG_READ_TIME), + /* + * On these Samsung MoviNAND parts, performing secure erase or + * secure trim can result in unrecoverable corruption due to a + * firmware bug. + */ + MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + END_FIXUP }; diff --git a/trunk/drivers/mmc/host/atmel-mci.c b/trunk/drivers/mmc/host/atmel-mci.c index 322412cec4ee..a53c7c478e05 100644 --- a/trunk/drivers/mmc/host/atmel-mci.c +++ b/trunk/drivers/mmc/host/atmel-mci.c @@ -81,6 +81,7 @@ struct atmel_mci_caps { bool has_bad_data_ordering; bool need_reset_after_xfer; bool need_blksz_mul_4; + bool need_notbusy_for_read_ops; }; struct atmel_mci_dma { @@ -1625,7 +1626,8 @@ static void atmci_tasklet_func(unsigned long priv) __func__); atmci_set_completed(host, EVENT_XFER_COMPLETE); - if (host->data->flags & MMC_DATA_WRITE) { + if (host->caps.need_notbusy_for_read_ops || + (host->data->flags & MMC_DATA_WRITE)) { atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); state = STATE_WAITING_NOTBUSY; } else if (host->mrq->stop) { @@ -2218,6 +2220,7 @@ static void __init atmci_get_cap(struct atmel_mci *host) host->caps.has_bad_data_ordering = 1; host->caps.need_reset_after_xfer = 1; host->caps.need_blksz_mul_4 = 1; + host->caps.need_notbusy_for_read_ops = 0; /* keep only major version number */ switch (version & 0xf00) { @@ -2238,6 +2241,7 @@ static void __init atmci_get_cap(struct atmel_mci *host) case 0x200: host->caps.has_rwproof = 1; host->caps.need_blksz_mul_4 = 0; + host->caps.need_notbusy_for_read_ops = 1; case 0x100: host->caps.has_bad_data_ordering = 0; host->caps.need_reset_after_xfer = 0; diff --git a/trunk/drivers/mmc/host/bfin_sdh.c b/trunk/drivers/mmc/host/bfin_sdh.c index 03666174ca48..a17dd7363ceb 100644 --- a/trunk/drivers/mmc/host/bfin_sdh.c +++ b/trunk/drivers/mmc/host/bfin_sdh.c @@ -49,13 +49,6 @@ #define bfin_write_SDH_CFG bfin_write_RSI_CFG #endif -struct dma_desc_array { - unsigned long start_addr; - unsigned short cfg; - unsigned short x_count; - short x_modify; -} __packed; - struct sdh_host { struct mmc_host *mmc; spinlock_t lock; diff --git a/trunk/drivers/mmc/host/dw_mmc.c b/trunk/drivers/mmc/host/dw_mmc.c index 72dc3cde646d..af40d227bece 100644 --- a/trunk/drivers/mmc/host/dw_mmc.c +++ b/trunk/drivers/mmc/host/dw_mmc.c @@ -627,6 +627,7 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot) { struct dw_mci *host = slot->host; u32 div; + u32 clk_en_a; if (slot->clock != host->current_speed) { div = host->bus_hz / slot->clock; @@ -659,9 +660,11 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot) mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0); - /* enable clock */ - mci_writel(host, CLKENA, ((SDMMC_CLKEN_ENABLE | - SDMMC_CLKEN_LOW_PWR) << slot->id)); + /* enable clock; only low power if no SDIO */ + clk_en_a = SDMMC_CLKEN_ENABLE << slot->id; + if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id))) + clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id; + mci_writel(host, CLKENA, clk_en_a); /* inform CIU */ mci_send_cmd(slot, @@ -862,6 +865,30 @@ static int dw_mci_get_cd(struct mmc_host *mmc) return present; } +/* + * Disable lower power mode. + * + * Low power mode will stop the card clock when idle. According to the + * description of the CLKENA register we should disable low power mode + * for SDIO cards if we need SDIO interrupts to work. + * + * This function is fast if low power mode is already disabled. + */ +static void dw_mci_disable_low_power(struct dw_mci_slot *slot) +{ + struct dw_mci *host = slot->host; + u32 clk_en_a; + const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id; + + clk_en_a = mci_readl(host, CLKENA); + + if (clk_en_a & clken_low_pwr) { + mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr); + mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | + SDMMC_CMD_PRV_DAT_WAIT, 0); + } +} + static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb) { struct dw_mci_slot *slot = mmc_priv(mmc); @@ -871,6 +898,14 @@ static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb) /* Enable/disable Slot Specific SDIO interrupt */ int_mask = mci_readl(host, INTMASK); if (enb) { + /* + * Turn off low power mode if it was enabled. This is a bit of + * a heavy operation and we disable / enable IRQs a lot, so + * we'll leave low power mode disabled and it will get + * re-enabled again in dw_mci_setup_bus(). + */ + dw_mci_disable_low_power(slot); + mci_writel(host, INTMASK, (int_mask | SDMMC_INT_SDIO(slot->id))); } else { @@ -1429,22 +1464,10 @@ static void dw_mci_read_data_pio(struct dw_mci *host) nbytes += len; remain -= len; } while (remain); - sg_miter->consumed = offset; + sg_miter->consumed = offset; status = mci_readl(host, MINTSTS); mci_writel(host, RINTSTS, SDMMC_INT_RXDR); - if (status & DW_MCI_DATA_ERROR_FLAGS) { - host->data_status = status; - data->bytes_xfered += nbytes; - sg_miter_stop(sg_miter); - host->sg = NULL; - smp_wmb(); - - set_bit(EVENT_DATA_ERROR, &host->pending_events); - - tasklet_schedule(&host->tasklet); - return; - } } while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/ data->bytes_xfered += nbytes; @@ -1497,23 +1520,10 @@ static void dw_mci_write_data_pio(struct dw_mci *host) nbytes += len; remain -= len; } while (remain); - sg_miter->consumed = offset; + sg_miter->consumed = offset; status = mci_readl(host, MINTSTS); mci_writel(host, RINTSTS, SDMMC_INT_TXDR); - if (status & DW_MCI_DATA_ERROR_FLAGS) { - host->data_status = status; - data->bytes_xfered += nbytes; - sg_miter_stop(sg_miter); - host->sg = NULL; - - smp_wmb(); - - set_bit(EVENT_DATA_ERROR, &host->pending_events); - - tasklet_schedule(&host->tasklet); - return; - } } while (status & SDMMC_INT_TXDR); /* if TXDR write again */ data->bytes_xfered += nbytes; @@ -1547,12 +1557,11 @@ static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status) static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) { struct dw_mci *host = dev_id; - u32 status, pending; + u32 pending; unsigned int pass_count = 0; int i; do { - status = mci_readl(host, RINTSTS); pending = mci_readl(host, MINTSTS); /* read-only mask reg */ /* @@ -1570,7 +1579,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) if (pending & DW_MCI_CMD_ERROR_FLAGS) { mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS); - host->cmd_status = status; + host->cmd_status = pending; smp_wmb(); set_bit(EVENT_CMD_COMPLETE, &host->pending_events); } @@ -1578,18 +1587,16 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) if (pending & DW_MCI_DATA_ERROR_FLAGS) { /* if there is an error report DATA_ERROR */ mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS); - host->data_status = status; + host->data_status = pending; smp_wmb(); set_bit(EVENT_DATA_ERROR, &host->pending_events); - if (!(pending & (SDMMC_INT_DTO | SDMMC_INT_DCRC | - SDMMC_INT_SBE | SDMMC_INT_EBE))) - tasklet_schedule(&host->tasklet); + tasklet_schedule(&host->tasklet); } if (pending & SDMMC_INT_DATA_OVER) { mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER); if (!host->data_status) - host->data_status = status; + host->data_status = pending; smp_wmb(); if (host->dir_status == DW_MCI_RECV_STATUS) { if (host->sg != NULL) @@ -1613,7 +1620,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) if (pending & SDMMC_INT_CMD_DONE) { mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE); - dw_mci_cmd_interrupt(host, status); + dw_mci_cmd_interrupt(host, pending); } if (pending & SDMMC_INT_CD) { diff --git a/trunk/drivers/mmc/host/mxs-mmc.c b/trunk/drivers/mmc/host/mxs-mmc.c index a51f9309ffbb..ad3fcea1269e 100644 --- a/trunk/drivers/mmc/host/mxs-mmc.c +++ b/trunk/drivers/mmc/host/mxs-mmc.c @@ -285,11 +285,11 @@ static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id) writel(stat & MXS_MMC_IRQ_BITS, host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_CLR); + spin_unlock(&host->lock); + if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN)) mmc_signal_sdio_irq(host->mmc); - spin_unlock(&host->lock); - if (stat & BM_SSP_CTRL1_RESP_TIMEOUT_IRQ) cmd->error = -ETIMEDOUT; else if (stat & BM_SSP_CTRL1_RESP_ERR_IRQ) @@ -644,11 +644,6 @@ static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) host->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); writel(BM_SSP_CTRL1_SDIO_IRQ_EN, host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_SET); - - if (readl(host->base + HW_SSP_STATUS(host)) & - BM_SSP_STATUS_SDIO_IRQ) - mmc_signal_sdio_irq(host->mmc); - } else { writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK, host->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR); @@ -657,6 +652,11 @@ static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) } spin_unlock_irqrestore(&host->lock, flags); + + if (enable && readl(host->base + HW_SSP_STATUS(host)) & + BM_SSP_STATUS_SDIO_IRQ) + mmc_signal_sdio_irq(host->mmc); + } static const struct mmc_host_ops mxs_mmc_ops = { diff --git a/trunk/drivers/mmc/host/omap.c b/trunk/drivers/mmc/host/omap.c index 3e8dcf8d2e05..a5999a74496a 100644 --- a/trunk/drivers/mmc/host/omap.c +++ b/trunk/drivers/mmc/host/omap.c @@ -17,10 +17,12 @@ #include #include #include +#include #include #include #include #include +#include #include #include #include @@ -128,6 +130,10 @@ struct mmc_omap_host { unsigned char id; /* 16xx chips have 2 MMC blocks */ struct clk * iclk; struct clk * fclk; + struct dma_chan *dma_rx; + u32 dma_rx_burst; + struct dma_chan *dma_tx; + u32 dma_tx_burst; struct resource *mem_res; void __iomem *virt_base; unsigned int phys_base; @@ -153,12 +159,8 @@ struct mmc_omap_host { unsigned use_dma:1; unsigned brs_received:1, dma_done:1; - unsigned dma_is_read:1; unsigned dma_in_use:1; - int dma_ch; spinlock_t dma_lock; - struct timer_list dma_timer; - unsigned dma_len; struct mmc_omap_slot *slots[OMAP_MMC_MAX_SLOTS]; struct mmc_omap_slot *current_slot; @@ -406,18 +408,25 @@ mmc_omap_release_dma(struct mmc_omap_host *host, struct mmc_data *data, int abort) { enum dma_data_direction dma_data_dir; + struct device *dev = mmc_dev(host->mmc); + struct dma_chan *c; - BUG_ON(host->dma_ch < 0); - if (data->error) - omap_stop_dma(host->dma_ch); - /* Release DMA channel lazily */ - mod_timer(&host->dma_timer, jiffies + HZ); - if (data->flags & MMC_DATA_WRITE) + if (data->flags & MMC_DATA_WRITE) { dma_data_dir = DMA_TO_DEVICE; - else + c = host->dma_tx; + } else { dma_data_dir = DMA_FROM_DEVICE; - dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_len, - dma_data_dir); + c = host->dma_rx; + } + if (c) { + if (data->error) { + dmaengine_terminate_all(c); + /* Claim nothing transferred on error... */ + data->bytes_xfered = 0; + } + dev = c->device->dev; + } + dma_unmap_sg(dev, data->sg, host->sg_len, dma_data_dir); } static void mmc_omap_send_stop_work(struct work_struct *work) @@ -524,16 +533,6 @@ mmc_omap_end_of_data(struct mmc_omap_host *host, struct mmc_data *data) mmc_omap_xfer_done(host, data); } -static void -mmc_omap_dma_timer(unsigned long data) -{ - struct mmc_omap_host *host = (struct mmc_omap_host *) data; - - BUG_ON(host->dma_ch < 0); - omap_free_dma(host->dma_ch); - host->dma_ch = -1; -} - static void mmc_omap_dma_done(struct mmc_omap_host *host, struct mmc_data *data) { @@ -669,7 +668,7 @@ mmc_omap_clk_timer(unsigned long data) static void mmc_omap_xfer_data(struct mmc_omap_host *host, int write) { - int n; + int n, nwords; if (host->buffer_bytes_left == 0) { host->sg_idx++; @@ -679,15 +678,23 @@ mmc_omap_xfer_data(struct mmc_omap_host *host, int write) n = 64; if (n > host->buffer_bytes_left) n = host->buffer_bytes_left; + + nwords = n / 2; + nwords += n & 1; /* handle odd number of bytes to transfer */ + host->buffer_bytes_left -= n; host->total_bytes_left -= n; host->data->bytes_xfered += n; if (write) { - __raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n); + __raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA), + host->buffer, nwords); } else { - __raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n); + __raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA), + host->buffer, nwords); } + + host->buffer += nwords; } static inline void mmc_omap_report_irq(u16 status) @@ -891,159 +898,15 @@ static void mmc_omap_cover_handler(unsigned long param) jiffies + msecs_to_jiffies(OMAP_MMC_COVER_POLL_DELAY)); } -/* Prepare to transfer the next segment of a scatterlist */ -static void -mmc_omap_prepare_dma(struct mmc_omap_host *host, struct mmc_data *data) -{ - int dma_ch = host->dma_ch; - unsigned long data_addr; - u16 buf, frame; - u32 count; - struct scatterlist *sg = &data->sg[host->sg_idx]; - int src_port = 0; - int dst_port = 0; - int sync_dev = 0; - - data_addr = host->phys_base + OMAP_MMC_REG(host, DATA); - frame = data->blksz; - count = sg_dma_len(sg); - - if ((data->blocks == 1) && (count > data->blksz)) - count = frame; - - host->dma_len = count; - - /* FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx and 24xx. - * Use 16 or 32 word frames when the blocksize is at least that large. - * Blocksize is usually 512 bytes; but not for some SD reads. - */ - if (cpu_is_omap15xx() && frame > 32) - frame = 32; - else if (frame > 64) - frame = 64; - count /= frame; - frame >>= 1; - - if (!(data->flags & MMC_DATA_WRITE)) { - buf = 0x800f | ((frame - 1) << 8); - - if (cpu_class_is_omap1()) { - src_port = OMAP_DMA_PORT_TIPB; - dst_port = OMAP_DMA_PORT_EMIFF; - } - if (cpu_is_omap24xx()) - sync_dev = OMAP24XX_DMA_MMC1_RX; - - omap_set_dma_src_params(dma_ch, src_port, - OMAP_DMA_AMODE_CONSTANT, - data_addr, 0, 0); - omap_set_dma_dest_params(dma_ch, dst_port, - OMAP_DMA_AMODE_POST_INC, - sg_dma_address(sg), 0, 0); - omap_set_dma_dest_data_pack(dma_ch, 1); - omap_set_dma_dest_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4); - } else { - buf = 0x0f80 | ((frame - 1) << 0); - - if (cpu_class_is_omap1()) { - src_port = OMAP_DMA_PORT_EMIFF; - dst_port = OMAP_DMA_PORT_TIPB; - } - if (cpu_is_omap24xx()) - sync_dev = OMAP24XX_DMA_MMC1_TX; - - omap_set_dma_dest_params(dma_ch, dst_port, - OMAP_DMA_AMODE_CONSTANT, - data_addr, 0, 0); - omap_set_dma_src_params(dma_ch, src_port, - OMAP_DMA_AMODE_POST_INC, - sg_dma_address(sg), 0, 0); - omap_set_dma_src_data_pack(dma_ch, 1); - omap_set_dma_src_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4); - } - - /* Max limit for DMA frame count is 0xffff */ - BUG_ON(count > 0xffff); - - OMAP_MMC_WRITE(host, BUF, buf); - omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S16, - frame, count, OMAP_DMA_SYNC_FRAME, - sync_dev, 0); -} - -/* A scatterlist segment completed */ -static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data) +static void mmc_omap_dma_callback(void *priv) { - struct mmc_omap_host *host = (struct mmc_omap_host *) data; - struct mmc_data *mmcdat = host->data; + struct mmc_omap_host *host = priv; + struct mmc_data *data = host->data; - if (unlikely(host->dma_ch < 0)) { - dev_err(mmc_dev(host->mmc), - "DMA callback while DMA not enabled\n"); - return; - } - /* FIXME: We really should do something to _handle_ the errors */ - if (ch_status & OMAP1_DMA_TOUT_IRQ) { - dev_err(mmc_dev(host->mmc),"DMA timeout\n"); - return; - } - if (ch_status & OMAP_DMA_DROP_IRQ) { - dev_err(mmc_dev(host->mmc), "DMA sync error\n"); - return; - } - if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) { - return; - } - mmcdat->bytes_xfered += host->dma_len; - host->sg_idx++; - if (host->sg_idx < host->sg_len) { - mmc_omap_prepare_dma(host, host->data); - omap_start_dma(host->dma_ch); - } else - mmc_omap_dma_done(host, host->data); -} - -static int mmc_omap_get_dma_channel(struct mmc_omap_host *host, struct mmc_data *data) -{ - const char *dma_dev_name; - int sync_dev, dma_ch, is_read, r; - - is_read = !(data->flags & MMC_DATA_WRITE); - del_timer_sync(&host->dma_timer); - if (host->dma_ch >= 0) { - if (is_read == host->dma_is_read) - return 0; - omap_free_dma(host->dma_ch); - host->dma_ch = -1; - } - - if (is_read) { - if (host->id == 0) { - sync_dev = OMAP_DMA_MMC_RX; - dma_dev_name = "MMC1 read"; - } else { - sync_dev = OMAP_DMA_MMC2_RX; - dma_dev_name = "MMC2 read"; - } - } else { - if (host->id == 0) { - sync_dev = OMAP_DMA_MMC_TX; - dma_dev_name = "MMC1 write"; - } else { - sync_dev = OMAP_DMA_MMC2_TX; - dma_dev_name = "MMC2 write"; - } - } - r = omap_request_dma(sync_dev, dma_dev_name, mmc_omap_dma_cb, - host, &dma_ch); - if (r != 0) { - dev_dbg(mmc_dev(host->mmc), "omap_request_dma() failed with %d\n", r); - return r; - } - host->dma_ch = dma_ch; - host->dma_is_read = is_read; + /* If we got to the end of DMA, assume everything went well */ + data->bytes_xfered += data->blocks * data->blksz; - return 0; + mmc_omap_dma_done(host, data); } static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_request *req) @@ -1118,33 +981,85 @@ mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req) host->sg_idx = 0; if (use_dma) { - if (mmc_omap_get_dma_channel(host, data) == 0) { - enum dma_data_direction dma_data_dir; - - if (data->flags & MMC_DATA_WRITE) - dma_data_dir = DMA_TO_DEVICE; - else - dma_data_dir = DMA_FROM_DEVICE; - - host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, - sg_len, dma_data_dir); - host->total_bytes_left = 0; - mmc_omap_prepare_dma(host, req->data); - host->brs_received = 0; - host->dma_done = 0; - host->dma_in_use = 1; - } else - use_dma = 0; + enum dma_data_direction dma_data_dir; + struct dma_async_tx_descriptor *tx; + struct dma_chan *c; + u32 burst, *bp; + u16 buf; + + /* + * FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx + * and 24xx. Use 16 or 32 word frames when the + * blocksize is at least that large. Blocksize is + * usually 512 bytes; but not for some SD reads. + */ + burst = cpu_is_omap15xx() ? 32 : 64; + if (burst > data->blksz) + burst = data->blksz; + + burst >>= 1; + + if (data->flags & MMC_DATA_WRITE) { + c = host->dma_tx; + bp = &host->dma_tx_burst; + buf = 0x0f80 | (burst - 1) << 0; + dma_data_dir = DMA_TO_DEVICE; + } else { + c = host->dma_rx; + bp = &host->dma_rx_burst; + buf = 0x800f | (burst - 1) << 8; + dma_data_dir = DMA_FROM_DEVICE; + } + + if (!c) + goto use_pio; + + /* Only reconfigure if we have a different burst size */ + if (*bp != burst) { + struct dma_slave_config cfg; + + cfg.src_addr = host->phys_base + OMAP_MMC_REG(host, DATA); + cfg.dst_addr = host->phys_base + OMAP_MMC_REG(host, DATA); + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; + cfg.src_maxburst = burst; + cfg.dst_maxburst = burst; + + if (dmaengine_slave_config(c, &cfg)) + goto use_pio; + + *bp = burst; + } + + host->sg_len = dma_map_sg(c->device->dev, data->sg, sg_len, + dma_data_dir); + if (host->sg_len == 0) + goto use_pio; + + tx = dmaengine_prep_slave_sg(c, data->sg, host->sg_len, + data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!tx) + goto use_pio; + + OMAP_MMC_WRITE(host, BUF, buf); + + tx->callback = mmc_omap_dma_callback; + tx->callback_param = host; + dmaengine_submit(tx); + host->brs_received = 0; + host->dma_done = 0; + host->dma_in_use = 1; + return; } + use_pio: /* Revert to PIO? */ - if (!use_dma) { - OMAP_MMC_WRITE(host, BUF, 0x1f1f); - host->total_bytes_left = data->blocks * block_size; - host->sg_len = sg_len; - mmc_omap_sg_to_buf(host); - host->dma_in_use = 0; - } + OMAP_MMC_WRITE(host, BUF, 0x1f1f); + host->total_bytes_left = data->blocks * block_size; + host->sg_len = sg_len; + mmc_omap_sg_to_buf(host); + host->dma_in_use = 0; } static void mmc_omap_start_request(struct mmc_omap_host *host, @@ -1157,8 +1072,12 @@ static void mmc_omap_start_request(struct mmc_omap_host *host, /* only touch fifo AFTER the controller readies it */ mmc_omap_prepare_data(host, req); mmc_omap_start_command(host, req->cmd); - if (host->dma_in_use) - omap_start_dma(host->dma_ch); + if (host->dma_in_use) { + struct dma_chan *c = host->data->flags & MMC_DATA_WRITE ? + host->dma_tx : host->dma_rx; + + dma_async_issue_pending(c); + } } static void mmc_omap_request(struct mmc_host *mmc, struct mmc_request *req) @@ -1400,6 +1319,8 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev) struct omap_mmc_platform_data *pdata = pdev->dev.platform_data; struct mmc_omap_host *host = NULL; struct resource *res; + dma_cap_mask_t mask; + unsigned sig; int i, ret = 0; int irq; @@ -1439,7 +1360,6 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev) setup_timer(&host->clk_timer, mmc_omap_clk_timer, (unsigned long) host); spin_lock_init(&host->dma_lock); - setup_timer(&host->dma_timer, mmc_omap_dma_timer, (unsigned long) host); spin_lock_init(&host->slot_lock); init_waitqueue_head(&host->slot_wq); @@ -1450,11 +1370,7 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev) host->id = pdev->id; host->mem_res = res; host->irq = irq; - host->use_dma = 1; - host->dev->dma_mask = &pdata->dma_mask; - host->dma_ch = -1; - host->irq = irq; host->phys_base = host->mem_res->start; host->virt_base = ioremap(res->start, resource_size(res)); @@ -1474,9 +1390,48 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev) goto err_free_iclk; } + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + host->dma_tx_burst = -1; + host->dma_rx_burst = -1; + + if (cpu_is_omap24xx()) + sig = host->id == 0 ? OMAP24XX_DMA_MMC1_TX : OMAP24XX_DMA_MMC2_TX; + else + sig = host->id == 0 ? OMAP_DMA_MMC_TX : OMAP_DMA_MMC2_TX; + host->dma_tx = dma_request_channel(mask, omap_dma_filter_fn, &sig); +#if 0 + if (!host->dma_tx) { + dev_err(host->dev, "unable to obtain TX DMA engine channel %u\n", + sig); + goto err_dma; + } +#else + if (!host->dma_tx) + dev_warn(host->dev, "unable to obtain TX DMA engine channel %u\n", + sig); +#endif + if (cpu_is_omap24xx()) + sig = host->id == 0 ? OMAP24XX_DMA_MMC1_RX : OMAP24XX_DMA_MMC2_RX; + else + sig = host->id == 0 ? OMAP_DMA_MMC_RX : OMAP_DMA_MMC2_RX; + host->dma_rx = dma_request_channel(mask, omap_dma_filter_fn, &sig); +#if 0 + if (!host->dma_rx) { + dev_err(host->dev, "unable to obtain RX DMA engine channel %u\n", + sig); + goto err_dma; + } +#else + if (!host->dma_rx) + dev_warn(host->dev, "unable to obtain RX DMA engine channel %u\n", + sig); +#endif + ret = request_irq(host->irq, mmc_omap_irq, 0, DRIVER_NAME, host); if (ret) - goto err_free_fclk; + goto err_free_dma; if (pdata->init != NULL) { ret = pdata->init(&pdev->dev); @@ -1510,7 +1465,11 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev) pdata->cleanup(&pdev->dev); err_free_irq: free_irq(host->irq, host); -err_free_fclk: +err_free_dma: + if (host->dma_tx) + dma_release_channel(host->dma_tx); + if (host->dma_rx) + dma_release_channel(host->dma_rx); clk_put(host->fclk); err_free_iclk: clk_disable(host->iclk); @@ -1545,6 +1504,11 @@ static int __devexit mmc_omap_remove(struct platform_device *pdev) clk_disable(host->iclk); clk_put(host->iclk); + if (host->dma_tx) + dma_release_channel(host->dma_tx); + if (host->dma_rx) + dma_release_channel(host->dma_rx); + iounmap(host->virt_base); release_mem_region(pdev->resource[0].start, pdev->resource[0].end - pdev->resource[0].start + 1); diff --git a/trunk/drivers/mmc/host/omap_hsmmc.c b/trunk/drivers/mmc/host/omap_hsmmc.c index bc28627af66b..3a09f93cc3b6 100644 --- a/trunk/drivers/mmc/host/omap_hsmmc.c +++ b/trunk/drivers/mmc/host/omap_hsmmc.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -29,6 +30,7 @@ #include #include #include +#include #include #include #include @@ -37,7 +39,6 @@ #include #include #include -#include #include #include #include @@ -166,7 +167,8 @@ struct omap_hsmmc_host { int suspended; int irq; int use_dma, dma_ch; - int dma_line_tx, dma_line_rx; + struct dma_chan *tx_chan; + struct dma_chan *rx_chan; int slot_id; int response_busy; int context_loss; @@ -797,6 +799,12 @@ omap_hsmmc_get_dma_dir(struct omap_hsmmc_host *host, struct mmc_data *data) return DMA_FROM_DEVICE; } +static struct dma_chan *omap_hsmmc_get_dma_chan(struct omap_hsmmc_host *host, + struct mmc_data *data) +{ + return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan; +} + static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq) { int dma_ch; @@ -889,10 +897,13 @@ static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno) spin_unlock_irqrestore(&host->irq_lock, flags); if (host->use_dma && dma_ch != -1) { - dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, - host->data->sg_len, + struct dma_chan *chan = omap_hsmmc_get_dma_chan(host, host->data); + + dmaengine_terminate_all(chan); + dma_unmap_sg(chan->device->dev, + host->data->sg, host->data->sg_len, omap_hsmmc_get_dma_dir(host, host->data)); - omap_free_dma(dma_ch); + host->data->host_cookie = 0; } host->data = NULL; @@ -1190,90 +1201,29 @@ static irqreturn_t omap_hsmmc_detect(int irq, void *dev_id) return IRQ_HANDLED; } -static int omap_hsmmc_get_dma_sync_dev(struct omap_hsmmc_host *host, - struct mmc_data *data) -{ - int sync_dev; - - if (data->flags & MMC_DATA_WRITE) - sync_dev = host->dma_line_tx; - else - sync_dev = host->dma_line_rx; - return sync_dev; -} - -static void omap_hsmmc_config_dma_params(struct omap_hsmmc_host *host, - struct mmc_data *data, - struct scatterlist *sgl) -{ - int blksz, nblk, dma_ch; - - dma_ch = host->dma_ch; - if (data->flags & MMC_DATA_WRITE) { - omap_set_dma_dest_params(dma_ch, 0, OMAP_DMA_AMODE_CONSTANT, - (host->mapbase + OMAP_HSMMC_DATA), 0, 0); - omap_set_dma_src_params(dma_ch, 0, OMAP_DMA_AMODE_POST_INC, - sg_dma_address(sgl), 0, 0); - } else { - omap_set_dma_src_params(dma_ch, 0, OMAP_DMA_AMODE_CONSTANT, - (host->mapbase + OMAP_HSMMC_DATA), 0, 0); - omap_set_dma_dest_params(dma_ch, 0, OMAP_DMA_AMODE_POST_INC, - sg_dma_address(sgl), 0, 0); - } - - blksz = host->data->blksz; - nblk = sg_dma_len(sgl) / blksz; - - omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S32, - blksz / 4, nblk, OMAP_DMA_SYNC_FRAME, - omap_hsmmc_get_dma_sync_dev(host, data), - !(data->flags & MMC_DATA_WRITE)); - - omap_start_dma(dma_ch); -} - -/* - * DMA call back function - */ -static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data) +static void omap_hsmmc_dma_callback(void *param) { - struct omap_hsmmc_host *host = cb_data; + struct omap_hsmmc_host *host = param; + struct dma_chan *chan; struct mmc_data *data; - int dma_ch, req_in_progress; - unsigned long flags; - - if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) { - dev_warn(mmc_dev(host->mmc), "unexpected dma status %x\n", - ch_status); - return; - } + int req_in_progress; - spin_lock_irqsave(&host->irq_lock, flags); + spin_lock_irq(&host->irq_lock); if (host->dma_ch < 0) { - spin_unlock_irqrestore(&host->irq_lock, flags); + spin_unlock_irq(&host->irq_lock); return; } data = host->mrq->data; - host->dma_sg_idx++; - if (host->dma_sg_idx < host->dma_len) { - /* Fire up the next transfer. */ - omap_hsmmc_config_dma_params(host, data, - data->sg + host->dma_sg_idx); - spin_unlock_irqrestore(&host->irq_lock, flags); - return; - } - + chan = omap_hsmmc_get_dma_chan(host, data); if (!data->host_cookie) - dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, + dma_unmap_sg(chan->device->dev, + data->sg, data->sg_len, omap_hsmmc_get_dma_dir(host, data)); req_in_progress = host->req_in_progress; - dma_ch = host->dma_ch; host->dma_ch = -1; - spin_unlock_irqrestore(&host->irq_lock, flags); - - omap_free_dma(dma_ch); + spin_unlock_irq(&host->irq_lock); /* If DMA has finished after TC, complete the request */ if (!req_in_progress) { @@ -1286,7 +1236,8 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data) static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host, struct mmc_data *data, - struct omap_hsmmc_next *next) + struct omap_hsmmc_next *next, + struct dma_chan *chan) { int dma_len; @@ -1301,8 +1252,7 @@ static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host, /* Check if next job is already prepared */ if (next || (!next && data->host_cookie != host->next_data.cookie)) { - dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, - data->sg_len, + dma_len = dma_map_sg(chan->device->dev, data->sg, data->sg_len, omap_hsmmc_get_dma_dir(host, data)); } else { @@ -1329,8 +1279,11 @@ static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host, static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host, struct mmc_request *req) { - int dma_ch = 0, ret = 0, i; + struct dma_slave_config cfg; + struct dma_async_tx_descriptor *tx; + int ret = 0, i; struct mmc_data *data = req->data; + struct dma_chan *chan; /* Sanity check: all the SG entries must be aligned by block size. */ for (i = 0; i < data->sg_len; i++) { @@ -1348,22 +1301,41 @@ static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host, BUG_ON(host->dma_ch != -1); - ret = omap_request_dma(omap_hsmmc_get_dma_sync_dev(host, data), - "MMC/SD", omap_hsmmc_dma_cb, host, &dma_ch); - if (ret != 0) { - dev_err(mmc_dev(host->mmc), - "%s: omap_request_dma() failed with %d\n", - mmc_hostname(host->mmc), ret); + chan = omap_hsmmc_get_dma_chan(host, data); + + cfg.src_addr = host->mapbase + OMAP_HSMMC_DATA; + cfg.dst_addr = host->mapbase + OMAP_HSMMC_DATA; + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + cfg.src_maxburst = data->blksz / 4; + cfg.dst_maxburst = data->blksz / 4; + + ret = dmaengine_slave_config(chan, &cfg); + if (ret) return ret; - } - ret = omap_hsmmc_pre_dma_transfer(host, data, NULL); + + ret = omap_hsmmc_pre_dma_transfer(host, data, NULL, chan); if (ret) return ret; - host->dma_ch = dma_ch; - host->dma_sg_idx = 0; + tx = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len, + data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!tx) { + dev_err(mmc_dev(host->mmc), "prep_slave_sg() failed\n"); + /* FIXME: cleanup */ + return -1; + } + + tx->callback = omap_hsmmc_dma_callback; + tx->callback_param = host; - omap_hsmmc_config_dma_params(host, data, data->sg); + /* Does not fail */ + dmaengine_submit(tx); + + host->dma_ch = 1; + + dma_async_issue_pending(chan); return 0; } @@ -1445,11 +1417,11 @@ static void omap_hsmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq, struct omap_hsmmc_host *host = mmc_priv(mmc); struct mmc_data *data = mrq->data; - if (host->use_dma) { - if (data->host_cookie) - dma_unmap_sg(mmc_dev(host->mmc), data->sg, - data->sg_len, - omap_hsmmc_get_dma_dir(host, data)); + if (host->use_dma && data->host_cookie) { + struct dma_chan *c = omap_hsmmc_get_dma_chan(host, data); + + dma_unmap_sg(c->device->dev, data->sg, data->sg_len, + omap_hsmmc_get_dma_dir(host, data)); data->host_cookie = 0; } } @@ -1464,10 +1436,13 @@ static void omap_hsmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq, return ; } - if (host->use_dma) + if (host->use_dma) { + struct dma_chan *c = omap_hsmmc_get_dma_chan(host, mrq->data); + if (omap_hsmmc_pre_dma_transfer(host, mrq->data, - &host->next_data)) + &host->next_data, c)) mrq->data->host_cookie = 0; + } } /* @@ -1800,6 +1775,8 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev) struct resource *res; int ret, irq; const struct of_device_id *match; + dma_cap_mask_t mask; + unsigned tx_req, rx_req; match = of_match_device(of_match_ptr(omap_mmc_of_match), &pdev->dev); if (match) { @@ -1844,7 +1821,6 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev) host->pdata = pdata; host->dev = &pdev->dev; host->use_dma = 1; - host->dev->dma_mask = &pdata->dma_mask; host->dma_ch = -1; host->irq = irq; host->slot_id = 0; @@ -1934,7 +1910,7 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev) ret = -ENXIO; goto err_irq; } - host->dma_line_tx = res->start; + tx_req = res->start; res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx"); if (!res) { @@ -1942,7 +1918,24 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev) ret = -ENXIO; goto err_irq; } - host->dma_line_rx = res->start; + rx_req = res->start; + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + host->rx_chan = dma_request_channel(mask, omap_dma_filter_fn, &rx_req); + if (!host->rx_chan) { + dev_err(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel %u\n", rx_req); + ret = -ENXIO; + goto err_irq; + } + + host->tx_chan = dma_request_channel(mask, omap_dma_filter_fn, &tx_req); + if (!host->tx_chan) { + dev_err(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel %u\n", tx_req); + ret = -ENXIO; + goto err_irq; + } /* Request IRQ for MMC operations */ ret = request_irq(host->irq, omap_hsmmc_irq, 0, @@ -2021,6 +2014,10 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev) err_irq_cd_init: free_irq(host->irq, host); err_irq: + if (host->tx_chan) + dma_release_channel(host->tx_chan); + if (host->rx_chan) + dma_release_channel(host->rx_chan); pm_runtime_put_sync(host->dev); pm_runtime_disable(host->dev); clk_put(host->fclk); @@ -2056,6 +2053,11 @@ static int __devexit omap_hsmmc_remove(struct platform_device *pdev) if (mmc_slot(host).card_detect_irq) free_irq(mmc_slot(host).card_detect_irq, host); + if (host->tx_chan) + dma_release_channel(host->tx_chan); + if (host->rx_chan) + dma_release_channel(host->rx_chan); + pm_runtime_put_sync(host->dev); pm_runtime_disable(host->dev); clk_put(host->fclk); diff --git a/trunk/drivers/mmc/host/sdhci-esdhc.h b/trunk/drivers/mmc/host/sdhci-esdhc.h index b97b2f5dafdb..d25f9ab9a54d 100644 --- a/trunk/drivers/mmc/host/sdhci-esdhc.h +++ b/trunk/drivers/mmc/host/sdhci-esdhc.h @@ -48,14 +48,14 @@ static inline void esdhc_set_clock(struct sdhci_host *host, unsigned int clock) int div = 1; u32 temp; + if (clock == 0) + goto out; + temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN | ESDHC_CLOCK_MASK); sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); - if (clock == 0) - goto out; - while (host->max_clk / pre_div / 16 > clock && pre_div < 256) pre_div *= 2; diff --git a/trunk/drivers/mtd/maps/uclinux.c b/trunk/drivers/mtd/maps/uclinux.c index cfff454f628b..c3bb304eca07 100644 --- a/trunk/drivers/mtd/maps/uclinux.c +++ b/trunk/drivers/mtd/maps/uclinux.c @@ -19,14 +19,13 @@ #include #include #include +#include /****************************************************************************/ -extern char _ebss; - struct map_info uclinux_ram_map = { .name = "RAM", - .phys = (unsigned long)&_ebss, + .phys = (unsigned long)__bss_stop, .size = 0, }; diff --git a/trunk/drivers/mtd/nand/Kconfig b/trunk/drivers/mtd/nand/Kconfig index 31bb7e5b504a..8ca417614c57 100644 --- a/trunk/drivers/mtd/nand/Kconfig +++ b/trunk/drivers/mtd/nand/Kconfig @@ -480,7 +480,7 @@ config MTD_NAND_NANDSIM config MTD_NAND_GPMI_NAND bool "GPMI NAND Flash Controller driver" - depends on MTD_NAND && (SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q) + depends on MTD_NAND && MXS_DMA help Enables NAND Flash support for IMX23 or IMX28. The GPMI controller is very powerful, with the help of BCH diff --git a/trunk/drivers/mtd/nand/omap2.c b/trunk/drivers/mtd/nand/omap2.c index d7f681d0c9b9..ac4fd756eda3 100644 --- a/trunk/drivers/mtd/nand/omap2.c +++ b/trunk/drivers/mtd/nand/omap2.c @@ -9,6 +9,7 @@ */ #include +#include #include #include #include @@ -18,6 +19,7 @@ #include #include #include +#include #include #include @@ -123,7 +125,7 @@ struct omap_nand_info { int gpmc_cs; unsigned long phys_base; struct completion comp; - int dma_ch; + struct dma_chan *dma; int gpmc_irq; enum { OMAP_NAND_IO_READ = 0, /* read */ @@ -336,12 +338,10 @@ static void omap_write_buf_pref(struct mtd_info *mtd, } /* - * omap_nand_dma_cb: callback on the completion of dma transfer - * @lch: logical channel - * @ch_satuts: channel status + * omap_nand_dma_callback: callback on the completion of dma transfer * @data: pointer to completion data structure */ -static void omap_nand_dma_cb(int lch, u16 ch_status, void *data) +static void omap_nand_dma_callback(void *data) { complete((struct completion *) data); } @@ -358,17 +358,13 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, { struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, mtd); + struct dma_async_tx_descriptor *tx; enum dma_data_direction dir = is_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE; - dma_addr_t dma_addr; - int ret; + struct scatterlist sg; unsigned long tim, limit; - - /* The fifo depth is 64 bytes max. - * But configure the FIFO-threahold to 32 to get a sync at each frame - * and frame length is 32 bytes. - */ - int buf_len = len >> 6; + unsigned n; + int ret; if (addr >= high_memory) { struct page *p1; @@ -382,40 +378,33 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK); } - dma_addr = dma_map_single(&info->pdev->dev, addr, len, dir); - if (dma_mapping_error(&info->pdev->dev, dma_addr)) { + sg_init_one(&sg, addr, len); + n = dma_map_sg(info->dma->device->dev, &sg, 1, dir); + if (n == 0) { dev_err(&info->pdev->dev, "Couldn't DMA map a %d byte buffer\n", len); goto out_copy; } - if (is_write) { - omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT, - info->phys_base, 0, 0); - omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC, - dma_addr, 0, 0); - omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32, - 0x10, buf_len, OMAP_DMA_SYNC_FRAME, - OMAP24XX_DMA_GPMC, OMAP_DMA_DST_SYNC); - } else { - omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT, - info->phys_base, 0, 0); - omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC, - dma_addr, 0, 0); - omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32, - 0x10, buf_len, OMAP_DMA_SYNC_FRAME, - OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC); - } - /* configure and start prefetch transfer */ + tx = dmaengine_prep_slave_sg(info->dma, &sg, n, + is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!tx) + goto out_copy_unmap; + + tx->callback = omap_nand_dma_callback; + tx->callback_param = &info->comp; + dmaengine_submit(tx); + + /* configure and start prefetch transfer */ ret = gpmc_prefetch_enable(info->gpmc_cs, - PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write); + PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write); if (ret) /* PFPW engine is busy, use cpu copy method */ goto out_copy_unmap; init_completion(&info->comp); - - omap_start_dma(info->dma_ch); + dma_async_issue_pending(info->dma); /* setup and start DMA using dma_addr */ wait_for_completion(&info->comp); @@ -427,11 +416,11 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, /* disable and stop the PFPW engine */ gpmc_prefetch_reset(info->gpmc_cs); - dma_unmap_single(&info->pdev->dev, dma_addr, len, dir); + dma_unmap_sg(info->dma->device->dev, &sg, 1, dir); return 0; out_copy_unmap: - dma_unmap_single(&info->pdev->dev, dma_addr, len, dir); + dma_unmap_sg(info->dma->device->dev, &sg, 1, dir); out_copy: if (info->nand.options & NAND_BUSWIDTH_16) is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len) @@ -1164,6 +1153,8 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) struct omap_nand_platform_data *pdata; int err; int i, offset; + dma_cap_mask_t mask; + unsigned sig; pdata = pdev->dev.platform_data; if (pdata == NULL) { @@ -1244,18 +1235,30 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) break; case NAND_OMAP_PREFETCH_DMA: - err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND", - omap_nand_dma_cb, &info->comp, &info->dma_ch); - if (err < 0) { - info->dma_ch = -1; - dev_err(&pdev->dev, "DMA request failed!\n"); + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + sig = OMAP24XX_DMA_GPMC; + info->dma = dma_request_channel(mask, omap_dma_filter_fn, &sig); + if (!info->dma) { + dev_err(&pdev->dev, "DMA engine request failed\n"); + err = -ENXIO; goto out_release_mem_region; } else { - omap_set_dma_dest_burst_mode(info->dma_ch, - OMAP_DMA_DATA_BURST_16); - omap_set_dma_src_burst_mode(info->dma_ch, - OMAP_DMA_DATA_BURST_16); - + struct dma_slave_config cfg; + + memset(&cfg, 0, sizeof(cfg)); + cfg.src_addr = info->phys_base; + cfg.dst_addr = info->phys_base; + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + cfg.src_maxburst = 16; + cfg.dst_maxburst = 16; + err = dmaengine_slave_config(info->dma, &cfg); + if (err) { + dev_err(&pdev->dev, "DMA engine slave config failed: %d\n", + err); + goto out_release_mem_region; + } info->nand.read_buf = omap_read_buf_dma_pref; info->nand.write_buf = omap_write_buf_dma_pref; } @@ -1358,6 +1361,8 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) return 0; out_release_mem_region: + if (info->dma) + dma_release_channel(info->dma); release_mem_region(info->phys_base, NAND_IO_SIZE); out_free_info: kfree(info); @@ -1373,8 +1378,8 @@ static int omap_nand_remove(struct platform_device *pdev) omap3_free_bch(&info->mtd); platform_set_drvdata(pdev, NULL); - if (info->dma_ch != -1) - omap_free_dma(info->dma_ch); + if (info->dma) + dma_release_channel(info->dma); if (info->gpmc_irq) free_irq(info->gpmc_irq, info); diff --git a/trunk/drivers/mtd/ubi/vtbl.c b/trunk/drivers/mtd/ubi/vtbl.c index 437bc193e170..568307cc7caf 100644 --- a/trunk/drivers/mtd/ubi/vtbl.c +++ b/trunk/drivers/mtd/ubi/vtbl.c @@ -340,7 +340,7 @@ static int create_vtbl(struct ubi_device *ubi, struct ubi_attach_info *ai, * of this LEB as it will be deleted and freed in 'ubi_add_to_av()'. */ err = ubi_add_to_av(ubi, ai, new_aeb->pnum, new_aeb->ec, vid_hdr, 0); - kfree(new_aeb); + kmem_cache_free(ai->aeb_slab_cache, new_aeb); ubi_free_vid_hdr(ubi, vid_hdr); return err; @@ -353,7 +353,7 @@ static int create_vtbl(struct ubi_device *ubi, struct ubi_attach_info *ai, list_add(&new_aeb->u.list, &ai->erase); goto retry; } - kfree(new_aeb); + kmem_cache_free(ai->aeb_slab_cache, new_aeb); out_free: ubi_free_vid_hdr(ubi, vid_hdr); return err; diff --git a/trunk/drivers/net/appletalk/cops.c b/trunk/drivers/net/appletalk/cops.c index 545c09ed9079..cff6f023c03a 100644 --- a/trunk/drivers/net/appletalk/cops.c +++ b/trunk/drivers/net/appletalk/cops.c @@ -996,9 +996,7 @@ static int __init cops_module_init(void) printk(KERN_WARNING "%s: You shouldn't autoprobe with insmod\n", cardname); cops_dev = cops_probe(-1); - if (IS_ERR(cops_dev)) - return PTR_ERR(cops_dev); - return 0; + return PTR_RET(cops_dev); } static void __exit cops_module_exit(void) diff --git a/trunk/drivers/net/appletalk/ltpc.c b/trunk/drivers/net/appletalk/ltpc.c index 0910dce3996d..b5782cdf0bca 100644 --- a/trunk/drivers/net/appletalk/ltpc.c +++ b/trunk/drivers/net/appletalk/ltpc.c @@ -1243,9 +1243,7 @@ static int __init ltpc_module_init(void) "ltpc: Autoprobing is not recommended for modules\n"); dev_ltpc = ltpc_probe(); - if (IS_ERR(dev_ltpc)) - return PTR_ERR(dev_ltpc); - return 0; + return PTR_RET(dev_ltpc); } module_init(ltpc_module_init); #endif diff --git a/trunk/drivers/net/bonding/bond_main.c b/trunk/drivers/net/bonding/bond_main.c index 6fae5f3ec7f6..d688a8af432c 100644 --- a/trunk/drivers/net/bonding/bond_main.c +++ b/trunk/drivers/net/bonding/bond_main.c @@ -398,7 +398,7 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping)); skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping; - if (unlikely(netpoll_tx_running(slave_dev))) + if (unlikely(netpoll_tx_running(bond->dev))) bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb); else dev_queue_xmit(skb); @@ -1235,12 +1235,12 @@ static inline int slave_enable_netpoll(struct slave *slave) struct netpoll *np; int err = 0; - np = kzalloc(sizeof(*np), GFP_KERNEL); + np = kzalloc(sizeof(*np), GFP_ATOMIC); err = -ENOMEM; if (!np) goto out; - err = __netpoll_setup(np, slave->dev); + err = __netpoll_setup(np, slave->dev, GFP_ATOMIC); if (err) { kfree(np); goto out; @@ -1257,9 +1257,7 @@ static inline void slave_disable_netpoll(struct slave *slave) return; slave->np = NULL; - synchronize_rcu_bh(); - __netpoll_cleanup(np); - kfree(np); + __netpoll_free_rcu(np); } static inline bool slave_dev_support_netpoll(struct net_device *slave_dev) { @@ -1292,7 +1290,7 @@ static void bond_netpoll_cleanup(struct net_device *bond_dev) read_unlock(&bond->lock); } -static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni) +static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni, gfp_t gfp) { struct bonding *bond = netdev_priv(dev); struct slave *slave; diff --git a/trunk/drivers/net/can/sja1000/sja1000_platform.c b/trunk/drivers/net/can/sja1000/sja1000_platform.c index 4f50145f6483..662c5f7eb0c5 100644 --- a/trunk/drivers/net/can/sja1000/sja1000_platform.c +++ b/trunk/drivers/net/can/sja1000/sja1000_platform.c @@ -109,7 +109,9 @@ static int sp_probe(struct platform_device *pdev) priv = netdev_priv(dev); dev->irq = res_irq->start; - priv->irq_flags = res_irq->flags & (IRQF_TRIGGER_MASK | IRQF_SHARED); + priv->irq_flags = res_irq->flags & IRQF_TRIGGER_MASK; + if (res_irq->flags & IORESOURCE_IRQ_SHAREABLE) + priv->irq_flags |= IRQF_SHARED; priv->reg_base = addr; /* The CAN clock frequency is half the oscillator clock frequency */ priv->can.clock.freq = pdata->osc_freq / 2; diff --git a/trunk/drivers/net/can/softing/softing_fw.c b/trunk/drivers/net/can/softing/softing_fw.c index 310596175676..b595d3422b9f 100644 --- a/trunk/drivers/net/can/softing/softing_fw.c +++ b/trunk/drivers/net/can/softing/softing_fw.c @@ -150,7 +150,7 @@ int softing_load_fw(const char *file, struct softing *card, const uint8_t *mem, *end, *dat; uint16_t type, len; uint32_t addr; - uint8_t *buf = NULL; + uint8_t *buf = NULL, *new_buf; int buflen = 0; int8_t type_end = 0; @@ -199,11 +199,12 @@ int softing_load_fw(const char *file, struct softing *card, if (len > buflen) { /* align buflen */ buflen = (len + (1024-1)) & ~(1024-1); - buf = krealloc(buf, buflen, GFP_KERNEL); - if (!buf) { + new_buf = krealloc(buf, buflen, GFP_KERNEL); + if (!new_buf) { ret = -ENOMEM; goto failed; } + buf = new_buf; } /* verify record data */ memcpy_fromio(buf, &dpram[addr + offset], len); diff --git a/trunk/drivers/net/cris/eth_v10.c b/trunk/drivers/net/cris/eth_v10.c index f0c8bd54ce29..021d69c5d9bc 100644 --- a/trunk/drivers/net/cris/eth_v10.c +++ b/trunk/drivers/net/cris/eth_v10.c @@ -1712,7 +1712,7 @@ e100_set_network_leds(int active) static void e100_netpoll(struct net_device* netdev) { - e100rxtx_interrupt(NETWORK_DMA_TX_IRQ_NBR, netdev, NULL); + e100rxtx_interrupt(NETWORK_DMA_TX_IRQ_NBR, netdev); } #endif diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 77bcd4cb4ffb..6d1a24acb77e 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -1278,7 +1278,7 @@ struct bnx2x { #define BNX2X_FW_RX_ALIGN_START (1UL << BNX2X_RX_ALIGN_SHIFT) #define BNX2X_FW_RX_ALIGN_END \ - max(1UL << BNX2X_RX_ALIGN_SHIFT, \ + max_t(u64, 1UL << BNX2X_RX_ALIGN_SHIFT, \ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) #define BNX2X_PXP_DRAM_ALIGN (BNX2X_RX_ALIGN_SHIFT - 5) @@ -1708,9 +1708,6 @@ struct bnx2x_func_init_params { continue; \ else -#define for_each_napi_rx_queue(bp, var) \ - for ((var) = 0; (var) < bp->num_napi_queues; (var)++) - /* Skip OOO FP */ #define for_each_tx_queue(bp, var) \ for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index e879e19eb0d6..af20c6ee2cd9 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -2046,6 +2046,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) */ bnx2x_setup_tc(bp->dev, bp->max_cos); + /* Add all NAPI objects */ + bnx2x_add_all_napi(bp); bnx2x_napi_enable(bp); /* set pf load just before approaching the MCP */ @@ -2408,6 +2410,8 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) /* Disable HW interrupts, NAPI */ bnx2x_netif_stop(bp, 1); + /* Delete all NAPI objects */ + bnx2x_del_all_napi(bp); /* Release IRQs */ bnx2x_free_irq(bp); diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index dfa757e74296..21b553229ea4 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -792,7 +792,7 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp) bp->num_napi_queues = bp->num_queues; /* Add NAPI objects */ - for_each_napi_rx_queue(bp, i) + for_each_rx_queue(bp, i) netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), bnx2x_poll, BNX2X_NAPI_WEIGHT); } @@ -801,7 +801,7 @@ static inline void bnx2x_del_all_napi(struct bnx2x *bp) { int i; - for_each_napi_rx_queue(bp, i) + for_each_rx_queue(bp, i) netif_napi_del(&bnx2x_fp(bp, i, napi)); } diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index fc4e0e3885b0..c37a68d68090 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -2888,11 +2888,9 @@ static void bnx2x_get_channels(struct net_device *dev, */ static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss) { - bnx2x_del_all_napi(bp); bnx2x_disable_msi(bp); BNX2X_NUM_QUEUES(bp) = num_rss + NON_ETH_CONTEXT_USE; bnx2x_set_int_mode(bp); - bnx2x_add_all_napi(bp); } /** diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index dd451c3dd83d..21054987257a 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -4041,20 +4041,6 @@ static bool bnx2x_get_load_status(struct bnx2x *bp, int engine) return val != 0; } -/* - * Reset the load status for the current engine. - */ -static void bnx2x_clear_load_status(struct bnx2x *bp) -{ - u32 val; - u32 mask = (BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : - BNX2X_PATH0_LOAD_CNT_MASK); - bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); - val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); - REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~mask)); - bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); -} - static void _print_next_block(int idx, const char *blk) { pr_cont("%s%s", idx ? ", " : "", blk); @@ -8441,6 +8427,8 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) /* Disable HW interrupts, NAPI */ bnx2x_netif_stop(bp, 1); + /* Delete all NAPI objects */ + bnx2x_del_all_napi(bp); /* Release IRQs */ bnx2x_free_irq(bp); @@ -9384,32 +9372,24 @@ static int __devinit bnx2x_prev_mark_path(struct bnx2x *bp) return rc; } -static bool __devinit bnx2x_can_flr(struct bnx2x *bp) -{ - int pos; - u32 cap; - struct pci_dev *dev = bp->pdev; - - pos = pci_pcie_cap(dev); - if (!pos) - return false; - - pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap); - if (!(cap & PCI_EXP_DEVCAP_FLR)) - return false; - - return true; -} - static int __devinit bnx2x_do_flr(struct bnx2x *bp) { int i, pos; u16 status; struct pci_dev *dev = bp->pdev; - /* probe the capability first */ - if (bnx2x_can_flr(bp)) - return -ENOTTY; + + if (CHIP_IS_E1x(bp)) { + BNX2X_DEV_INFO("FLR not supported in E1/E1H\n"); + return -EINVAL; + } + + /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */ + if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) { + BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n", + bp->common.bc_ver); + return -EINVAL; + } pos = pci_pcie_cap(dev); if (!pos) @@ -9429,12 +9409,8 @@ static int __devinit bnx2x_do_flr(struct bnx2x *bp) "transaction is not cleared; proceeding with reset anyway\n"); clear: - if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) { - BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n", - bp->common.bc_ver); - return -EINVAL; - } + BNX2X_DEV_INFO("Initiating FLR\n"); bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0); return 0; @@ -9454,8 +9430,21 @@ static int __devinit bnx2x_prev_unload_uncommon(struct bnx2x *bp) * the one required, then FLR will be sufficient to clean any residue * left by previous driver */ - if (bnx2x_test_firmware_version(bp, false) && bnx2x_can_flr(bp)) - return bnx2x_do_flr(bp); + rc = bnx2x_test_firmware_version(bp, false); + + if (!rc) { + /* fw version is good */ + BNX2X_DEV_INFO("FW version matches our own. Attempting FLR\n"); + rc = bnx2x_do_flr(bp); + } + + if (!rc) { + /* FLR was performed */ + BNX2X_DEV_INFO("FLR successful\n"); + return 0; + } + + BNX2X_DEV_INFO("Could not FLR\n"); /* Close the MCP request, return failure*/ rc = bnx2x_prev_mcp_done(bp); @@ -11242,10 +11231,12 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) static void poll_bnx2x(struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); + int i; - disable_irq(bp->pdev->irq); - bnx2x_interrupt(bp->pdev->irq, dev); - enable_irq(bp->pdev->irq); + for_each_eth_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + napi_schedule(&bnx2x_fp(bp, fp->index, napi)); + } } #endif @@ -11427,9 +11418,6 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev, if (!chip_is_e1x) REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); - /* Reset the load counter */ - bnx2x_clear_load_status(bp); - dev->watchdog_timeo = TX_TIMEOUT; dev->netdev_ops = &bnx2x_netdev_ops; @@ -11915,9 +11903,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, */ bnx2x_set_int_mode(bp); - /* Add all NAPI objects */ - bnx2x_add_all_napi(bp); - rc = register_netdev(dev); if (rc) { dev_err(&pdev->dev, "Cannot register net device\n"); @@ -11992,9 +11977,6 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev) unregister_netdev(dev); - /* Delete all NAPI objects */ - bnx2x_del_all_napi(bp); - /* Power on: we can't let PCI layer write to us while we are in D3 */ bnx2x_set_power_state(bp, PCI_D0); @@ -12041,6 +12023,8 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp) bnx2x_tx_disable(bp); bnx2x_netif_stop(bp, 0); + /* Delete all NAPI objects */ + bnx2x_del_all_napi(bp); del_timer_sync(&bp->timer); diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 734fd87cd990..62f754bd0dfe 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -2485,6 +2485,7 @@ static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp, break; default: + kfree(new_cmd); BNX2X_ERR("Unknown command: %d\n", cmd); return -EINVAL; } diff --git a/trunk/drivers/net/ethernet/cirrus/cs89x0.c b/trunk/drivers/net/ethernet/cirrus/cs89x0.c index 845b2020f291..138446957786 100644 --- a/trunk/drivers/net/ethernet/cirrus/cs89x0.c +++ b/trunk/drivers/net/ethernet/cirrus/cs89x0.c @@ -1243,6 +1243,7 @@ static void set_multicast_list(struct net_device *dev) { struct net_local *lp = netdev_priv(dev); unsigned long flags; + u16 cfg; spin_lock_irqsave(&lp->lock, flags); if (dev->flags & IFF_PROMISC) @@ -1260,11 +1261,10 @@ static void set_multicast_list(struct net_device *dev) /* in promiscuous mode, we accept errored packets, * so we have to enable interrupts on them also */ - writereg(dev, PP_RxCFG, - (lp->curr_rx_cfg | - (lp->rx_mode == RX_ALL_ACCEPT) - ? (RX_CRC_ERROR_ENBL | RX_RUNT_ENBL | RX_EXTRA_DATA_ENBL) - : 0)); + cfg = lp->curr_rx_cfg; + if (lp->rx_mode == RX_ALL_ACCEPT) + cfg |= RX_CRC_ERROR_ENBL | RX_RUNT_ENBL | RX_EXTRA_DATA_ENBL; + writereg(dev, PP_RxCFG, cfg); spin_unlock_irqrestore(&lp->lock, flags); } diff --git a/trunk/drivers/net/ethernet/emulex/benet/be_cmds.c b/trunk/drivers/net/ethernet/emulex/benet/be_cmds.c index 7fac97b4bb59..8c63d06ab12b 100644 --- a/trunk/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/trunk/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -259,7 +259,7 @@ int be_process_mcc(struct be_adapter *adapter) int num = 0, status = 0; struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; - spin_lock_bh(&adapter->mcc_cq_lock); + spin_lock(&adapter->mcc_cq_lock); while ((compl = be_mcc_compl_get(adapter))) { if (compl->flags & CQE_FLAGS_ASYNC_MASK) { /* Interpret flags as an async trailer */ @@ -280,7 +280,7 @@ int be_process_mcc(struct be_adapter *adapter) if (num) be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num); - spin_unlock_bh(&adapter->mcc_cq_lock); + spin_unlock(&adapter->mcc_cq_lock); return status; } @@ -295,7 +295,9 @@ static int be_mcc_wait_compl(struct be_adapter *adapter) if (be_error(adapter)) return -EIO; + local_bh_disable(); status = be_process_mcc(adapter); + local_bh_enable(); if (atomic_read(&mcc_obj->q.used) == 0) break; diff --git a/trunk/drivers/net/ethernet/emulex/benet/be_main.c b/trunk/drivers/net/ethernet/emulex/benet/be_main.c index c60de89b6669..78b8aa8069f0 100644 --- a/trunk/drivers/net/ethernet/emulex/benet/be_main.c +++ b/trunk/drivers/net/ethernet/emulex/benet/be_main.c @@ -1948,7 +1948,7 @@ static int be_rx_cqs_create(struct be_adapter *adapter) if (adapter->num_rx_qs != MAX_RX_QS) dev_info(&adapter->pdev->dev, - "Created only %d receive queues", adapter->num_rx_qs); + "Created only %d receive queues\n", adapter->num_rx_qs); return 0; } @@ -3763,7 +3763,9 @@ static void be_worker(struct work_struct *work) /* when interrupts are not yet enabled, just reap any pending * mcc completions */ if (!netif_running(adapter->netdev)) { + local_bh_disable(); be_process_mcc(adapter); + local_bh_enable(); goto reschedule; } diff --git a/trunk/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/trunk/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c index 0f2d1a710909..151453309401 100644 --- a/trunk/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c +++ b/trunk/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c @@ -174,8 +174,10 @@ static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev) new_bus->phy_mask = ~0; new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); - if (!new_bus->irq) + if (!new_bus->irq) { + ret = -ENOMEM; goto out_unmap_regs; + } new_bus->parent = &ofdev->dev; dev_set_drvdata(&ofdev->dev, new_bus); diff --git a/trunk/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/trunk/drivers/net/ethernet/freescale/fs_enet/mii-fec.c index 55bb867258e6..cdf702a59485 100644 --- a/trunk/drivers/net/ethernet/freescale/fs_enet/mii-fec.c +++ b/trunk/drivers/net/ethernet/freescale/fs_enet/mii-fec.c @@ -137,8 +137,10 @@ static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev) snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", res.start); fec->fecp = ioremap(res.start, resource_size(&res)); - if (!fec->fecp) + if (!fec->fecp) { + ret = -ENOMEM; goto out_fec; + } if (get_bus_freq) { clock = get_bus_freq(ofdev->dev.of_node); @@ -172,8 +174,10 @@ static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev) new_bus->phy_mask = ~0; new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); - if (!new_bus->irq) + if (!new_bus->irq) { + ret = -ENOMEM; goto out_unmap_regs; + } new_bus->parent = &ofdev->dev; dev_set_drvdata(&ofdev->dev, new_bus); diff --git a/trunk/drivers/net/ethernet/freescale/gianfar.c b/trunk/drivers/net/ethernet/freescale/gianfar.c index 4605f7246687..d3233f59a82e 100644 --- a/trunk/drivers/net/ethernet/freescale/gianfar.c +++ b/trunk/drivers/net/ethernet/freescale/gianfar.c @@ -1041,7 +1041,7 @@ static int gfar_probe(struct platform_device *ofdev) if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; - dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; + dev->features |= NETIF_F_HW_VLAN_RX; } if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { diff --git a/trunk/drivers/net/ethernet/intel/e1000e/82571.c b/trunk/drivers/net/ethernet/intel/e1000e/82571.c index 0b3bade957fd..080c89093feb 100644 --- a/trunk/drivers/net/ethernet/intel/e1000e/82571.c +++ b/trunk/drivers/net/ethernet/intel/e1000e/82571.c @@ -999,7 +999,7 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active) **/ static s32 e1000_reset_hw_82571(struct e1000_hw *hw) { - u32 ctrl, ctrl_ext, eecd; + u32 ctrl, ctrl_ext, eecd, tctl; s32 ret_val; /* @@ -1014,7 +1014,9 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw) ew32(IMC, 0xffffffff); ew32(RCTL, 0); - ew32(TCTL, E1000_TCTL_PSP); + tctl = er32(TCTL); + tctl &= ~E1000_TCTL_EN; + ew32(TCTL, tctl); e1e_flush(); usleep_range(10000, 20000); @@ -1601,10 +1603,8 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) * auto-negotiation in the TXCW register and disable * forced link in the Device Control register in an * attempt to auto-negotiate with our link partner. - * If the partner code word is null, stop forcing - * and restart auto negotiation. */ - if ((rxcw & E1000_RXCW_C) || !(rxcw & E1000_RXCW_CW)) { + if (rxcw & E1000_RXCW_C) { /* Enable autoneg, and unforce link up */ ew32(TXCW, mac->txcw); ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); diff --git a/trunk/drivers/net/ethernet/intel/e1000e/e1000.h b/trunk/drivers/net/ethernet/intel/e1000e/e1000.h index cd153326c3cf..cb3356c9af80 100644 --- a/trunk/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/trunk/drivers/net/ethernet/intel/e1000e/e1000.h @@ -310,6 +310,7 @@ struct e1000_adapter { */ struct e1000_ring *tx_ring /* One per active queue */ ____cacheline_aligned_in_smp; + u32 tx_fifo_limit; struct napi_struct napi; diff --git a/trunk/drivers/net/ethernet/intel/e1000e/netdev.c b/trunk/drivers/net/ethernet/intel/e1000e/netdev.c index 95b245310f17..d01a099475a1 100644 --- a/trunk/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/trunk/drivers/net/ethernet/intel/e1000e/netdev.c @@ -178,6 +178,24 @@ static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo) pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]); } +static void e1000e_dump_ps_pages(struct e1000_adapter *adapter, + struct e1000_buffer *bi) +{ + int i; + struct e1000_ps_page *ps_page; + + for (i = 0; i < adapter->rx_ps_pages; i++) { + ps_page = &bi->ps_pages[i]; + + if (ps_page->page) { + pr_info("packet dump for ps_page %d:\n", i); + print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, + 16, 1, page_address(ps_page->page), + PAGE_SIZE, true); + } + } +} + /* * e1000e_dump - Print registers, Tx-ring and Rx-ring */ @@ -299,10 +317,10 @@ static void e1000e_dump(struct e1000_adapter *adapter) (unsigned long long)buffer_info->time_stamp, buffer_info->skb, next_desc); - if (netif_msg_pktdata(adapter) && buffer_info->dma != 0) + if (netif_msg_pktdata(adapter) && buffer_info->skb) print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, - 16, 1, phys_to_virt(buffer_info->dma), - buffer_info->length, true); + 16, 1, buffer_info->skb->data, + buffer_info->skb->len, true); } /* Print Rx Ring Summary */ @@ -381,10 +399,8 @@ static void e1000e_dump(struct e1000_adapter *adapter) buffer_info->skb, next_desc); if (netif_msg_pktdata(adapter)) - print_hex_dump(KERN_INFO, "", - DUMP_PREFIX_ADDRESS, 16, 1, - phys_to_virt(buffer_info->dma), - adapter->rx_ps_bsize0, true); + e1000e_dump_ps_pages(adapter, + buffer_info); } } break; @@ -444,12 +460,12 @@ static void e1000e_dump(struct e1000_adapter *adapter) (unsigned long long)buffer_info->dma, buffer_info->skb, next_desc); - if (netif_msg_pktdata(adapter)) + if (netif_msg_pktdata(adapter) && + buffer_info->skb) print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 1, - phys_to_virt - (buffer_info->dma), + buffer_info->skb->data, adapter->rx_buffer_len, true); } @@ -3500,6 +3516,15 @@ void e1000e_reset(struct e1000_adapter *adapter) break; } + /* + * Alignment of Tx data is on an arbitrary byte boundary with the + * maximum size per Tx descriptor limited only to the transmit + * allocation of the packet buffer minus 96 bytes with an upper + * limit of 24KB due to receive synchronization limitations. + */ + adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96, + 24 << 10); + /* * Disable Adaptive Interrupt Moderation if 2 full packets cannot * fit in receive buffer. @@ -4769,12 +4794,9 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb) return 1; } -#define E1000_MAX_PER_TXD 8192 -#define E1000_MAX_TXD_PWR 12 - static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb, unsigned int first, unsigned int max_per_txd, - unsigned int nr_frags, unsigned int mss) + unsigned int nr_frags) { struct e1000_adapter *adapter = tx_ring->adapter; struct pci_dev *pdev = adapter->pdev; @@ -5007,20 +5029,19 @@ static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size) static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size) { + BUG_ON(size > tx_ring->count); + if (e1000_desc_unused(tx_ring) >= size) return 0; return __e1000_maybe_stop_tx(tx_ring, size); } -#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1) static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_ring *tx_ring = adapter->tx_ring; unsigned int first; - unsigned int max_per_txd = E1000_MAX_PER_TXD; - unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; unsigned int tx_flags = 0; unsigned int len = skb_headlen(skb); unsigned int nr_frags; @@ -5040,18 +5061,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, } mss = skb_shinfo(skb)->gso_size; - /* - * The controller does a simple calculation to - * make sure there is enough room in the FIFO before - * initiating the DMA for each buffer. The calc is: - * 4 = ceil(buffer len/mss). To make sure we don't - * overrun the FIFO, adjust the max buffer len if mss - * drops. - */ if (mss) { u8 hdr_len; - max_per_txd = min(mss << 2, max_per_txd); - max_txd_pwr = fls(max_per_txd) - 1; /* * TSO Workaround for 82571/2/3 Controllers -- if skb->data @@ -5081,12 +5092,12 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, count++; count++; - count += TXD_USE_COUNT(len, max_txd_pwr); + count += DIV_ROUND_UP(len, adapter->tx_fifo_limit); nr_frags = skb_shinfo(skb)->nr_frags; for (f = 0; f < nr_frags; f++) - count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]), - max_txd_pwr); + count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]), + adapter->tx_fifo_limit); if (adapter->hw.mac.tx_pkt_filtering) e1000_transfer_dhcp_info(adapter, skb); @@ -5128,15 +5139,18 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, tx_flags |= E1000_TX_FLAGS_NO_FCS; /* if count is 0 then mapping error has occurred */ - count = e1000_tx_map(tx_ring, skb, first, max_per_txd, nr_frags, mss); + count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit, + nr_frags); if (count) { skb_tx_timestamp(skb); netdev_sent_queue(netdev, skb->len); e1000_tx_queue(tx_ring, tx_flags, count); /* Make sure there is space in the ring for the next send. */ - e1000_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 2); - + e1000_maybe_stop_tx(tx_ring, + (MAX_SKB_FRAGS * + DIV_ROUND_UP(PAGE_SIZE, + adapter->tx_fifo_limit) + 2)); } else { dev_kfree_skb_any(skb); tx_ring->buffer_info[first].time_stamp = 0; @@ -6311,8 +6325,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev, adapter->hw.phy.autoneg_advertised = 0x2f; /* ring size defaults */ - adapter->rx_ring->count = 256; - adapter->tx_ring->count = 256; + adapter->rx_ring->count = E1000_DEFAULT_RXD; + adapter->tx_ring->count = E1000_DEFAULT_TXD; /* * Initial Wake on LAN setting - If APM wake is enabled in diff --git a/trunk/drivers/net/ethernet/intel/igb/e1000_82575.c b/trunk/drivers/net/ethernet/intel/igb/e1000_82575.c index 5e84eaac48c1..ba994fb4cec6 100644 --- a/trunk/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/trunk/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -254,6 +254,14 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) */ size += NVM_WORD_SIZE_BASE_SHIFT; + /* + * Check for invalid size + */ + if ((hw->mac.type == e1000_82576) && (size > 15)) { + pr_notice("The NVM size is not valid, defaulting to 32K\n"); + size = 15; + } + nvm->word_size = 1 << size; if (hw->mac.type < e1000_i210) { nvm->opcode_bits = 8; @@ -281,14 +289,6 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) } else nvm->type = e1000_nvm_flash_hw; - /* - * Check for invalid size - */ - if ((hw->mac.type == e1000_82576) && (size > 15)) { - pr_notice("The NVM size is not valid, defaulting to 32K\n"); - size = 15; - } - /* NVM Function Pointers */ switch (hw->mac.type) { case e1000_82580: diff --git a/trunk/drivers/net/ethernet/intel/igb/e1000_regs.h b/trunk/drivers/net/ethernet/intel/igb/e1000_regs.h index 10efcd88dca0..28394bea5253 100644 --- a/trunk/drivers/net/ethernet/intel/igb/e1000_regs.h +++ b/trunk/drivers/net/ethernet/intel/igb/e1000_regs.h @@ -156,8 +156,12 @@ : (0x0E018 + ((_n) * 0x40))) #define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) \ : (0x0E028 + ((_n) * 0x40))) -#define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8)) -#define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8)) +#define E1000_RXCTL(_n) ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \ + (0x0C014 + ((_n) * 0x40))) +#define E1000_DCA_RXCTRL(_n) E1000_RXCTL(_n) +#define E1000_TXCTL(_n) ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \ + (0x0E014 + ((_n) * 0x40))) +#define E1000_DCA_TXCTRL(_n) E1000_TXCTL(_n) #define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) \ : (0x0E038 + ((_n) * 0x40))) #define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) \ diff --git a/trunk/drivers/net/ethernet/intel/igb/igb_ethtool.c b/trunk/drivers/net/ethernet/intel/igb/igb_ethtool.c index a19c84cad0e9..70591117051b 100644 --- a/trunk/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/trunk/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -209,8 +209,8 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) /* When SoL/IDER sessions are active, autoneg/speed/duplex * cannot be changed */ if (igb_check_reset_block(hw)) { - dev_err(&adapter->pdev->dev, "Cannot change link " - "characteristics when SoL/IDER is active.\n"); + dev_err(&adapter->pdev->dev, + "Cannot change link characteristics when SoL/IDER is active.\n"); return -EINVAL; } @@ -1089,8 +1089,8 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data, wr32(reg, (_test[pat] & write)); val = rd32(reg) & mask; if (val != (_test[pat] & write & mask)) { - dev_err(&adapter->pdev->dev, "pattern test reg %04X " - "failed: got 0x%08X expected 0x%08X\n", + dev_err(&adapter->pdev->dev, + "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n", reg, val, (_test[pat] & write & mask)); *data = reg; return 1; @@ -1108,8 +1108,8 @@ static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data, wr32(reg, write & mask); val = rd32(reg); if ((write & mask) != (val & mask)) { - dev_err(&adapter->pdev->dev, "set/check reg %04X test failed:" - " got 0x%08X expected 0x%08X\n", reg, + dev_err(&adapter->pdev->dev, + "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", reg, (val & mask), (write & mask)); *data = reg; return 1; @@ -1171,8 +1171,9 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data) wr32(E1000_STATUS, toggle); after = rd32(E1000_STATUS) & toggle; if (value != after) { - dev_err(&adapter->pdev->dev, "failed STATUS register test " - "got: 0x%08X expected: 0x%08X\n", after, value); + dev_err(&adapter->pdev->dev, + "failed STATUS register test got: 0x%08X expected: 0x%08X\n", + after, value); *data = 1; return 1; } @@ -1497,6 +1498,9 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter) break; } + /* add small delay to avoid loopback test failure */ + msleep(50); + /* force 1000, set loopback */ igb_write_phy_reg(hw, PHY_CONTROL, 0x4140); @@ -1777,16 +1781,14 @@ static int igb_loopback_test(struct igb_adapter *adapter, u64 *data) * sessions are active */ if (igb_check_reset_block(&adapter->hw)) { dev_err(&adapter->pdev->dev, - "Cannot do PHY loopback test " - "when SoL/IDER is active.\n"); + "Cannot do PHY loopback test when SoL/IDER is active.\n"); *data = 0; goto out; } if ((adapter->hw.mac.type == e1000_i210) - || (adapter->hw.mac.type == e1000_i210)) { + || (adapter->hw.mac.type == e1000_i211)) { dev_err(&adapter->pdev->dev, - "Loopback test not supported " - "on this part at this time.\n"); + "Loopback test not supported on this part at this time.\n"); *data = 0; goto out; } diff --git a/trunk/drivers/net/ethernet/intel/igb/igb_main.c b/trunk/drivers/net/ethernet/intel/igb/igb_main.c index b7c2d5050572..48cc4fb1a307 100644 --- a/trunk/drivers/net/ethernet/intel/igb/igb_main.c +++ b/trunk/drivers/net/ethernet/intel/igb/igb_main.c @@ -462,10 +462,10 @@ static void igb_dump(struct igb_adapter *adapter) (u64)buffer_info->time_stamp, buffer_info->skb, next_desc); - if (netif_msg_pktdata(adapter) && buffer_info->dma != 0) + if (netif_msg_pktdata(adapter) && buffer_info->skb) print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, - 16, 1, phys_to_virt(buffer_info->dma), + 16, 1, buffer_info->skb->data, buffer_info->length, true); } } @@ -547,18 +547,17 @@ static void igb_dump(struct igb_adapter *adapter) (u64)buffer_info->dma, buffer_info->skb, next_desc); - if (netif_msg_pktdata(adapter)) { + if (netif_msg_pktdata(adapter) && + buffer_info->dma && buffer_info->skb) { print_hex_dump(KERN_INFO, "", - DUMP_PREFIX_ADDRESS, - 16, 1, - phys_to_virt(buffer_info->dma), - IGB_RX_HDR_LEN, true); + DUMP_PREFIX_ADDRESS, + 16, 1, buffer_info->skb->data, + IGB_RX_HDR_LEN, true); print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 1, - phys_to_virt( - buffer_info->page_dma + - buffer_info->page_offset), + page_address(buffer_info->page) + + buffer_info->page_offset, PAGE_SIZE/2, true); } } diff --git a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index 50fc137501da..18bf08c9d7a4 100644 --- a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -804,12 +804,13 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { /* Set KX4/KX/KR support according to speed requested */ autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP); - if (speed & IXGBE_LINK_SPEED_10GB_FULL) + if (speed & IXGBE_LINK_SPEED_10GB_FULL) { if (orig_autoc & IXGBE_AUTOC_KX4_SUPP) autoc |= IXGBE_AUTOC_KX4_SUPP; if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) && (hw->phy.smart_speed_active == false)) autoc |= IXGBE_AUTOC_KR_SUPP; + } if (speed & IXGBE_LINK_SPEED_1GB_FULL) autoc |= IXGBE_AUTOC_KX_SUPP; } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) && diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/trunk/drivers/net/ethernet/mellanox/mlx4/en_rx.c index f32e70300770..5aba5ecdf1e2 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -614,8 +614,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud /* If source MAC is equal to our own MAC and not performing * the selftest or flb disabled - drop the packet */ if (s_mac == priv->mac && - (!(dev->features & NETIF_F_LOOPBACK) || - !priv->validate_loopback)) + !((dev->features & NETIF_F_LOOPBACK) || + priv->validate_loopback)) goto next; /* diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/trunk/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 019d856b1334..10bba09c44ea 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -164,7 +164,6 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, ring->cons = 0xffffffff; ring->last_nr_txbb = 1; ring->poll_cnt = 0; - ring->blocked = 0; memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info)); memset(ring->buf, 0, ring->buf_size); @@ -365,14 +364,13 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq) ring->cons += txbbs_skipped; netdev_tx_completed_queue(ring->tx_queue, packets, bytes); - /* Wakeup Tx queue if this ring stopped it */ - if (unlikely(ring->blocked)) { - if ((u32) (ring->prod - ring->cons) <= - ring->size - HEADROOM - MAX_DESC_TXBBS) { - ring->blocked = 0; - netif_tx_wake_queue(ring->tx_queue); - priv->port_stats.wake_queue++; - } + /* + * Wakeup Tx queue if this stopped, and at least 1 packet + * was completed + */ + if (netif_tx_queue_stopped(ring->tx_queue) && txbbs_skipped > 0) { + netif_tx_wake_queue(ring->tx_queue); + priv->port_stats.wake_queue++; } } @@ -592,7 +590,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) ring->size - HEADROOM - MAX_DESC_TXBBS)) { /* every full Tx ring stops queue */ netif_tx_stop_queue(ring->tx_queue); - ring->blocked = 1; priv->port_stats.queue_stopped++; return NETDEV_TX_BUSY; diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/icm.c b/trunk/drivers/net/ethernet/mellanox/mlx4/icm.c index 88b7b3e75ab1..daf417923661 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/icm.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/icm.c @@ -358,13 +358,14 @@ void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, } int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table, - u64 virt, int obj_size, int nobj, int reserved, + u64 virt, int obj_size, u32 nobj, int reserved, int use_lowmem, int use_coherent) { int obj_per_chunk; int num_icm; unsigned chunk_size; int i; + u64 size; obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size; num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk; @@ -380,10 +381,12 @@ int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table, table->coherent = use_coherent; mutex_init(&table->mutex); + size = (u64) nobj * obj_size; for (i = 0; i * MLX4_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) { chunk_size = MLX4_TABLE_CHUNK_SIZE; - if ((i + 1) * MLX4_TABLE_CHUNK_SIZE > nobj * obj_size) - chunk_size = PAGE_ALIGN(nobj * obj_size - i * MLX4_TABLE_CHUNK_SIZE); + if ((i + 1) * MLX4_TABLE_CHUNK_SIZE > size) + chunk_size = PAGE_ALIGN(size - + i * MLX4_TABLE_CHUNK_SIZE); table->icm[i] = mlx4_alloc_icm(dev, chunk_size >> PAGE_SHIFT, (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) | diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/icm.h b/trunk/drivers/net/ethernet/mellanox/mlx4/icm.h index 19e4efc0b342..a67744f53506 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/icm.h +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/icm.h @@ -78,7 +78,7 @@ int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, int start, int end); int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table, - u64 virt, int obj_size, int nobj, int reserved, + u64 virt, int obj_size, u32 nobj, int reserved, int use_lowmem, int use_coherent); void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table); void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_handle); diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/main.c b/trunk/drivers/net/ethernet/mellanox/mlx4/main.c index 48d0e90194cb..827b72dfce99 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/main.c @@ -157,9 +157,6 @@ int mlx4_check_port_params(struct mlx4_dev *dev, "on this HCA, aborting.\n"); return -EINVAL; } - if (port_type[i] == MLX4_PORT_TYPE_ETH && - port_type[i + 1] == MLX4_PORT_TYPE_IB) - return -EINVAL; } } diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/mcg.c b/trunk/drivers/net/ethernet/mellanox/mlx4/mcg.c index 4ec3835e1bc2..a018ea2a43de 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -432,8 +432,10 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 port, if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) { /* Entry already exists, add to duplicates */ dqp = kmalloc(sizeof *dqp, GFP_KERNEL); - if (!dqp) + if (!dqp) { + err = -ENOMEM; goto out_mailbox; + } dqp->qpn = qpn; list_add_tail(&dqp->list, &entry->duplicates); found = true; diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/trunk/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 59ebc0339638..4d9df8f2a126 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -249,7 +249,7 @@ struct mlx4_bitmap { struct mlx4_buddy { unsigned long **bits; unsigned int *num_free; - int max_order; + u32 max_order; spinlock_t lock; }; @@ -258,7 +258,7 @@ struct mlx4_icm; struct mlx4_icm_table { u64 virt; int num_icm; - int num_obj; + u32 num_obj; int obj_size; int lowmem; int coherent; diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/trunk/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 5f1ab105debc..9d27e42264e2 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -248,7 +248,6 @@ struct mlx4_en_tx_ring { u32 doorbell_qpn; void *buf; u16 poll_cnt; - int blocked; struct mlx4_en_tx_info *tx_info; u8 *bounce_buf; u32 last_nr_txbb; diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/mr.c b/trunk/drivers/net/ethernet/mellanox/mlx4/mr.c index af55b7ce5341..c202d3ad2a0e 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/mr.c @@ -37,6 +37,7 @@ #include #include #include +#include #include @@ -120,7 +121,7 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order) buddy->max_order = max_order; spin_lock_init(&buddy->lock); - buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *), + buddy->bits = kcalloc(buddy->max_order + 1, sizeof (long *), GFP_KERNEL); buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free, GFP_KERNEL); @@ -129,10 +130,12 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order) for (i = 0; i <= buddy->max_order; ++i) { s = BITS_TO_LONGS(1 << (buddy->max_order - i)); - buddy->bits[i] = kmalloc(s * sizeof (long), GFP_KERNEL); - if (!buddy->bits[i]) - goto err_out_free; - bitmap_zero(buddy->bits[i], 1 << (buddy->max_order - i)); + buddy->bits[i] = kcalloc(s, sizeof (long), GFP_KERNEL | __GFP_NOWARN); + if (!buddy->bits[i]) { + buddy->bits[i] = vzalloc(s * sizeof(long)); + if (!buddy->bits[i]) + goto err_out_free; + } } set_bit(0, buddy->bits[buddy->max_order]); @@ -142,7 +145,10 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order) err_out_free: for (i = 0; i <= buddy->max_order; ++i) - kfree(buddy->bits[i]); + if (buddy->bits[i] && is_vmalloc_addr(buddy->bits[i])) + vfree(buddy->bits[i]); + else + kfree(buddy->bits[i]); err_out: kfree(buddy->bits); @@ -156,7 +162,10 @@ static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy) int i; for (i = 0; i <= buddy->max_order; ++i) - kfree(buddy->bits[i]); + if (is_vmalloc_addr(buddy->bits[i])) + vfree(buddy->bits[i]); + else + kfree(buddy->bits[i]); kfree(buddy->bits); kfree(buddy->num_free); @@ -668,7 +677,7 @@ int mlx4_init_mr_table(struct mlx4_dev *dev) return err; err = mlx4_buddy_init(&mr_table->mtt_buddy, - ilog2(dev->caps.num_mtts / + ilog2((u32)dev->caps.num_mtts / (1 << log_mtts_per_seg))); if (err) goto err_buddy; @@ -678,7 +687,7 @@ int mlx4_init_mr_table(struct mlx4_dev *dev) mlx4_alloc_mtt_range(dev, fls(dev->caps.reserved_mtts - 1)); if (priv->reserved_mtts < 0) { - mlx4_warn(dev, "MTT table of order %d is too small.\n", + mlx4_warn(dev, "MTT table of order %u is too small.\n", mr_table->mtt_buddy.max_order); err = -ENOMEM; goto err_reserve_mtts; diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/profile.c b/trunk/drivers/net/ethernet/mellanox/mlx4/profile.c index 9ee4725363d5..8e0c3cc2a1ec 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/profile.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/profile.c @@ -76,7 +76,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, u64 size; u64 start; int type; - int num; + u32 num; int log_num; }; @@ -105,7 +105,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, si_meminfo(&si); request->num_mtt = roundup_pow_of_two(max_t(unsigned, request->num_mtt, - min(1UL << 31, + min(1UL << (31 - log_mtts_per_seg), si.totalram >> (log_mtts_per_seg - 1)))); profile[MLX4_RES_QP].size = dev_cap->qpc_entry_sz; diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/sense.c b/trunk/drivers/net/ethernet/mellanox/mlx4/sense.c index 802498293528..34ee09bae36e 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/sense.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/sense.c @@ -80,20 +80,6 @@ void mlx4_do_sense_ports(struct mlx4_dev *dev, stype[i - 1] = defaults[i - 1]; } - /* - * Adjust port configuration: - * If port 1 sensed nothing and port 2 is IB, set both as IB - * If port 2 sensed nothing and port 1 is Eth, set both as Eth - */ - if (stype[0] == MLX4_PORT_TYPE_ETH) { - for (i = 1; i < dev->caps.num_ports; i++) - stype[i] = stype[i] ? stype[i] : MLX4_PORT_TYPE_ETH; - } - if (stype[dev->caps.num_ports - 1] == MLX4_PORT_TYPE_IB) { - for (i = 0; i < dev->caps.num_ports - 1; i++) - stype[i] = stype[i] ? stype[i] : MLX4_PORT_TYPE_IB; - } - /* * If sensed nothing, remain in current configuration. */ diff --git a/trunk/drivers/net/ethernet/nxp/lpc_eth.c b/trunk/drivers/net/ethernet/nxp/lpc_eth.c index 4069edab229e..53743f7a2ca9 100644 --- a/trunk/drivers/net/ethernet/nxp/lpc_eth.c +++ b/trunk/drivers/net/ethernet/nxp/lpc_eth.c @@ -346,28 +346,15 @@ static phy_interface_t lpc_phy_interface_mode(struct device *dev) "phy-mode", NULL); if (mode && !strcmp(mode, "mii")) return PHY_INTERFACE_MODE_MII; - return PHY_INTERFACE_MODE_RMII; } - - /* non-DT */ -#ifdef CONFIG_ARCH_LPC32XX_MII_SUPPORT - return PHY_INTERFACE_MODE_MII; -#else return PHY_INTERFACE_MODE_RMII; -#endif } static bool use_iram_for_net(struct device *dev) { if (dev && dev->of_node) return of_property_read_bool(dev->of_node, "use-iram"); - - /* non-DT */ -#ifdef CONFIG_ARCH_LPC32XX_IRAM_FOR_NET - return true; -#else return false; -#endif } /* Receive Status information word */ diff --git a/trunk/drivers/net/ethernet/renesas/Kconfig b/trunk/drivers/net/ethernet/renesas/Kconfig index 46df3a04030c..24c2305d7948 100644 --- a/trunk/drivers/net/ethernet/renesas/Kconfig +++ b/trunk/drivers/net/ethernet/renesas/Kconfig @@ -8,7 +8,7 @@ config SH_ETH (CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712 || \ CPU_SUBTYPE_SH7763 || CPU_SUBTYPE_SH7619 || \ CPU_SUBTYPE_SH7724 || CPU_SUBTYPE_SH7734 || \ - CPU_SUBTYPE_SH7757 || ARCH_R8A7740) + CPU_SUBTYPE_SH7757 || ARCH_R8A7740 || ARCH_R8A7779) select CRC32 select NET_CORE select MII @@ -18,4 +18,4 @@ config SH_ETH Renesas SuperH Ethernet device driver. This driver supporting CPUs are: - SH7619, SH7710, SH7712, SH7724, SH7734, SH7763, SH7757, - and R8A7740. + R8A7740 and R8A7779. diff --git a/trunk/drivers/net/ethernet/renesas/sh_eth.c b/trunk/drivers/net/ethernet/renesas/sh_eth.c index af0b867a6cf6..bad8f2eec9b4 100644 --- a/trunk/drivers/net/ethernet/renesas/sh_eth.c +++ b/trunk/drivers/net/ethernet/renesas/sh_eth.c @@ -78,7 +78,7 @@ static void sh_eth_select_mii(struct net_device *ndev) #endif /* There is CPU dependent code */ -#if defined(CONFIG_CPU_SUBTYPE_SH7724) +#if defined(CONFIG_CPU_SUBTYPE_SH7724) || defined(CONFIG_ARCH_R8A7779) #define SH_ETH_RESET_DEFAULT 1 static void sh_eth_set_duplex(struct net_device *ndev) { @@ -93,13 +93,18 @@ static void sh_eth_set_duplex(struct net_device *ndev) static void sh_eth_set_rate(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); + unsigned int bits = ECMR_RTM; + +#if defined(CONFIG_ARCH_R8A7779) + bits |= ECMR_ELB; +#endif switch (mdp->speed) { case 10: /* 10BASE */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR); + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~bits, ECMR); break; case 100:/* 100BASE */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR); + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | bits, ECMR); break; default: break; diff --git a/trunk/drivers/net/ethernet/sfc/efx.c b/trunk/drivers/net/ethernet/sfc/efx.c index 70554a1b2b02..65a8d49106a4 100644 --- a/trunk/drivers/net/ethernet/sfc/efx.c +++ b/trunk/drivers/net/ethernet/sfc/efx.c @@ -1503,6 +1503,11 @@ static int efx_probe_all(struct efx_nic *efx) goto fail2; } + BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_RXQ_MIN_ENT); + if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_TXQ_MIN_ENT(efx))) { + rc = -EINVAL; + goto fail3; + } efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE; rc = efx_probe_filters(efx); @@ -2070,6 +2075,7 @@ static int efx_register_netdev(struct efx_nic *efx) net_dev->irq = efx->pci_dev->irq; net_dev->netdev_ops = &efx_netdev_ops; SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops); + net_dev->gso_max_segs = EFX_TSO_MAX_SEGS; rtnl_lock(); diff --git a/trunk/drivers/net/ethernet/sfc/efx.h b/trunk/drivers/net/ethernet/sfc/efx.h index be8f9158a714..70755c97251a 100644 --- a/trunk/drivers/net/ethernet/sfc/efx.h +++ b/trunk/drivers/net/ethernet/sfc/efx.h @@ -30,6 +30,7 @@ extern netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc); +extern unsigned int efx_tx_max_skb_descs(struct efx_nic *efx); /* RX */ extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); @@ -52,10 +53,15 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue); #define EFX_MAX_EVQ_SIZE 16384UL #define EFX_MIN_EVQ_SIZE 512UL -/* The smallest [rt]xq_entries that the driver supports. Callers of - * efx_wake_queue() assume that they can subsequently send at least one - * skb. Falcon/A1 may require up to three descriptors per skb_frag. */ -#define EFX_MIN_RING_SIZE (roundup_pow_of_two(2 * 3 * MAX_SKB_FRAGS)) +/* Maximum number of TCP segments we support for soft-TSO */ +#define EFX_TSO_MAX_SEGS 100 + +/* The smallest [rt]xq_entries that the driver supports. RX minimum + * is a bit arbitrary. For TX, we must have space for at least 2 + * TSO skbs. + */ +#define EFX_RXQ_MIN_ENT 128U +#define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx)) /* Filters */ extern int efx_probe_filters(struct efx_nic *efx); diff --git a/trunk/drivers/net/ethernet/sfc/ethtool.c b/trunk/drivers/net/ethernet/sfc/ethtool.c index 10536f93b561..5faedd855b77 100644 --- a/trunk/drivers/net/ethernet/sfc/ethtool.c +++ b/trunk/drivers/net/ethernet/sfc/ethtool.c @@ -680,21 +680,27 @@ static int efx_ethtool_set_ringparam(struct net_device *net_dev, struct ethtool_ringparam *ring) { struct efx_nic *efx = netdev_priv(net_dev); + u32 txq_entries; if (ring->rx_mini_pending || ring->rx_jumbo_pending || ring->rx_pending > EFX_MAX_DMAQ_SIZE || ring->tx_pending > EFX_MAX_DMAQ_SIZE) return -EINVAL; - if (ring->rx_pending < EFX_MIN_RING_SIZE || - ring->tx_pending < EFX_MIN_RING_SIZE) { + if (ring->rx_pending < EFX_RXQ_MIN_ENT) { netif_err(efx, drv, efx->net_dev, - "TX and RX queues cannot be smaller than %ld\n", - EFX_MIN_RING_SIZE); + "RX queues cannot be smaller than %u\n", + EFX_RXQ_MIN_ENT); return -EINVAL; } - return efx_realloc_channels(efx, ring->rx_pending, ring->tx_pending); + txq_entries = max(ring->tx_pending, EFX_TXQ_MIN_ENT(efx)); + if (txq_entries != ring->tx_pending) + netif_warn(efx, drv, efx->net_dev, + "increasing TX queue size to minimum of %u\n", + txq_entries); + + return efx_realloc_channels(efx, ring->rx_pending, txq_entries); } static int efx_ethtool_set_pauseparam(struct net_device *net_dev, @@ -857,8 +863,8 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx, &ip_entry->ip4dst, &ip_entry->pdst); if (rc != 0) { rc = efx_filter_get_ipv4_full( - &spec, &proto, &ip_entry->ip4src, &ip_entry->psrc, - &ip_entry->ip4dst, &ip_entry->pdst); + &spec, &proto, &ip_entry->ip4dst, &ip_entry->pdst, + &ip_entry->ip4src, &ip_entry->psrc); EFX_WARN_ON_PARANOID(rc); ip_mask->ip4src = ~0; ip_mask->psrc = ~0; diff --git a/trunk/drivers/net/ethernet/sfc/tx.c b/trunk/drivers/net/ethernet/sfc/tx.c index 9b225a7769f7..18713436b443 100644 --- a/trunk/drivers/net/ethernet/sfc/tx.c +++ b/trunk/drivers/net/ethernet/sfc/tx.c @@ -119,6 +119,25 @@ efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr) return len; } +unsigned int efx_tx_max_skb_descs(struct efx_nic *efx) +{ + /* Header and payload descriptor for each output segment, plus + * one for every input fragment boundary within a segment + */ + unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS; + + /* Possibly one more per segment for the alignment workaround */ + if (EFX_WORKAROUND_5391(efx)) + max_descs += EFX_TSO_MAX_SEGS; + + /* Possibly more for PCIe page boundaries within input fragments */ + if (PAGE_SIZE > EFX_PAGE_SIZE) + max_descs += max_t(unsigned int, MAX_SKB_FRAGS, + DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE)); + + return max_descs; +} + /* * Add a socket buffer to a TX queue * diff --git a/trunk/drivers/net/ethernet/stmicro/stmmac/common.h b/trunk/drivers/net/ethernet/stmicro/stmmac/common.h index e2d083228f3a..719be3912aa9 100644 --- a/trunk/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/trunk/drivers/net/ethernet/stmicro/stmmac/common.h @@ -22,6 +22,9 @@ Author: Giuseppe Cavallaro *******************************************************************************/ +#ifndef __COMMON_H__ +#define __COMMON_H__ + #include #include #include @@ -366,3 +369,5 @@ extern void stmmac_set_mac(void __iomem *ioaddr, bool enable); extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr); extern const struct stmmac_ring_mode_ops ring_mode_ops; + +#endif /* __COMMON_H__ */ diff --git a/trunk/drivers/net/ethernet/stmicro/stmmac/descs.h b/trunk/drivers/net/ethernet/stmicro/stmmac/descs.h index 9820ec842cc0..223adf95fd03 100644 --- a/trunk/drivers/net/ethernet/stmicro/stmmac/descs.h +++ b/trunk/drivers/net/ethernet/stmicro/stmmac/descs.h @@ -20,6 +20,10 @@ Author: Giuseppe Cavallaro *******************************************************************************/ + +#ifndef __DESCS_H__ +#define __DESCS_H__ + struct dma_desc { /* Receive descriptor */ union { @@ -166,3 +170,5 @@ enum tdes_csum_insertion { * is not calculated */ cic_full = 3, /* IP header and pseudoheader */ }; + +#endif /* __DESCS_H__ */ diff --git a/trunk/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/trunk/drivers/net/ethernet/stmicro/stmmac/descs_com.h index dd8d6e19dff6..7ee9499a6e38 100644 --- a/trunk/drivers/net/ethernet/stmicro/stmmac/descs_com.h +++ b/trunk/drivers/net/ethernet/stmicro/stmmac/descs_com.h @@ -27,6 +27,9 @@ Author: Giuseppe Cavallaro *******************************************************************************/ +#ifndef __DESC_COM_H__ +#define __DESC_COM_H__ + #if defined(CONFIG_STMMAC_RING) static inline void ehn_desc_rx_set_on_ring_chain(struct dma_desc *p, int end) { @@ -124,3 +127,5 @@ static inline void norm_set_tx_desc_len(struct dma_desc *p, int len) p->des01.tx.buffer1_size = len; } #endif + +#endif /* __DESC_COM_H__ */ diff --git a/trunk/drivers/net/ethernet/stmicro/stmmac/dwmac100.h b/trunk/drivers/net/ethernet/stmicro/stmmac/dwmac100.h index 7c6d857a9cc7..2ec6aeae349e 100644 --- a/trunk/drivers/net/ethernet/stmicro/stmmac/dwmac100.h +++ b/trunk/drivers/net/ethernet/stmicro/stmmac/dwmac100.h @@ -22,6 +22,9 @@ Author: Giuseppe Cavallaro *******************************************************************************/ +#ifndef __DWMAC100_H__ +#define __DWMAC100_H__ + #include #include "common.h" @@ -119,3 +122,5 @@ enum ttc_control { #define DMA_MISSED_FRAME_M_CNTR 0x0000ffff /* Missed Frame Couinter */ extern const struct stmmac_dma_ops dwmac100_dma_ops; + +#endif /* __DWMAC100_H__ */ diff --git a/trunk/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/trunk/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h index f90fcb5f9573..0e4cacedc1f0 100644 --- a/trunk/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h +++ b/trunk/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h @@ -19,6 +19,8 @@ Author: Giuseppe Cavallaro *******************************************************************************/ +#ifndef __DWMAC1000_H__ +#define __DWMAC1000_H__ #include #include "common.h" @@ -229,6 +231,7 @@ enum rtc_control { #define GMAC_MMC_RX_CSUM_OFFLOAD 0x208 /* Synopsys Core versions */ -#define DWMAC_CORE_3_40 34 +#define DWMAC_CORE_3_40 0x34 extern const struct stmmac_dma_ops dwmac1000_dma_ops; +#endif /* __DWMAC1000_H__ */ diff --git a/trunk/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/trunk/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h index e678ce39d014..e49c9a0fd6ff 100644 --- a/trunk/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h +++ b/trunk/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h @@ -22,6 +22,9 @@ Author: Giuseppe Cavallaro *******************************************************************************/ +#ifndef __DWMAC_DMA_H__ +#define __DWMAC_DMA_H__ + /* DMA CRS Control and Status Register Mapping */ #define DMA_BUS_MODE 0x00001000 /* Bus Mode */ #define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */ @@ -109,3 +112,5 @@ extern void dwmac_dma_start_rx(void __iomem *ioaddr); extern void dwmac_dma_stop_rx(void __iomem *ioaddr); extern int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x); + +#endif /* __DWMAC_DMA_H__ */ diff --git a/trunk/drivers/net/ethernet/stmicro/stmmac/mmc.h b/trunk/drivers/net/ethernet/stmicro/stmmac/mmc.h index a38352024cb8..67995ef25251 100644 --- a/trunk/drivers/net/ethernet/stmicro/stmmac/mmc.h +++ b/trunk/drivers/net/ethernet/stmicro/stmmac/mmc.h @@ -22,6 +22,9 @@ Author: Giuseppe Cavallaro *******************************************************************************/ +#ifndef __MMC_H__ +#define __MMC_H__ + /* MMC control register */ /* When set, all counter are reset */ #define MMC_CNTRL_COUNTER_RESET 0x1 @@ -129,3 +132,5 @@ struct stmmac_counters { extern void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode); extern void dwmac_mmc_intr_all_mask(void __iomem *ioaddr); extern void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc); + +#endif /* __MMC_H__ */ diff --git a/trunk/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/trunk/drivers/net/ethernet/stmicro/stmmac/mmc_core.c index c07cfe989f6e..0c74a702d461 100644 --- a/trunk/drivers/net/ethernet/stmicro/stmmac/mmc_core.c +++ b/trunk/drivers/net/ethernet/stmicro/stmmac/mmc_core.c @@ -33,7 +33,7 @@ #define MMC_TX_INTR 0x00000108 /* MMC TX Interrupt */ #define MMC_RX_INTR_MASK 0x0000010c /* MMC Interrupt Mask */ #define MMC_TX_INTR_MASK 0x00000110 /* MMC Interrupt Mask */ -#define MMC_DEFAUL_MASK 0xffffffff +#define MMC_DEFAULT_MASK 0xffffffff /* MMC TX counter registers */ @@ -147,8 +147,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode) /* To mask all all interrupts.*/ void dwmac_mmc_intr_all_mask(void __iomem *ioaddr) { - writel(MMC_DEFAUL_MASK, ioaddr + MMC_RX_INTR_MASK); - writel(MMC_DEFAUL_MASK, ioaddr + MMC_TX_INTR_MASK); + writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_INTR_MASK); + writel(MMC_DEFAULT_MASK, ioaddr + MMC_TX_INTR_MASK); } /* This reads the MAC core counters (if actaully supported). diff --git a/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac.h index f2d3665430ad..e872e1da3137 100644 --- a/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -20,6 +20,9 @@ Author: Giuseppe Cavallaro *******************************************************************************/ +#ifndef __STMMAC_H__ +#define __STMMAC_H__ + #define STMMAC_RESOURCE_NAME "stmmaceth" #define DRV_MODULE_VERSION "March_2012" @@ -166,3 +169,5 @@ static inline void stmmac_unregister_pci(void) { } #endif /* CONFIG_STMMAC_PCI */ + +#endif /* __STMMAC_H__ */ diff --git a/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index fd8882f9602a..c136162e6473 100644 --- a/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2077,7 +2077,7 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device, goto error_netdev_register; } - priv->stmmac_clk = clk_get(priv->device, NULL); + priv->stmmac_clk = clk_get(priv->device, STMMAC_RESOURCE_NAME); if (IS_ERR(priv->stmmac_clk)) { pr_warning("%s: warning: cannot get CSR clock\n", __func__); goto error_clk_get; diff --git a/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index cd01ee7ecef1..b93245c11995 100644 --- a/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -74,7 +74,7 @@ static int __devinit stmmac_probe_config_dt(struct platform_device *pdev, * the necessary resources and invokes the main to init * the net device, register the mdio bus etc. */ -static int stmmac_pltfr_probe(struct platform_device *pdev) +static int __devinit stmmac_pltfr_probe(struct platform_device *pdev) { int ret = 0; struct resource *res; diff --git a/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h b/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h index 6863590d184b..aea9b14cdfbe 100644 --- a/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h +++ b/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h @@ -21,6 +21,8 @@ Author: Giuseppe Cavallaro *******************************************************************************/ +#ifndef __STMMAC_TIMER_H__ +#define __STMMAC_TIMER_H__ struct stmmac_timer { void (*timer_start) (unsigned int new_freq); @@ -40,3 +42,5 @@ void stmmac_schedule(struct net_device *dev); extern int tmu2_register_user(void *fnt, void *data); extern void tmu2_unregister_user(void); #endif + +#endif /* __STMMAC_TIMER_H__ */ diff --git a/trunk/drivers/net/ethernet/ti/davinci_cpdma.c b/trunk/drivers/net/ethernet/ti/davinci_cpdma.c index 3b5c4571b55e..d15c888e9df8 100644 --- a/trunk/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/trunk/drivers/net/ethernet/ti/davinci_cpdma.c @@ -538,11 +538,12 @@ EXPORT_SYMBOL_GPL(cpdma_chan_create); int cpdma_chan_destroy(struct cpdma_chan *chan) { - struct cpdma_ctlr *ctlr = chan->ctlr; + struct cpdma_ctlr *ctlr; unsigned long flags; if (!chan) return -EINVAL; + ctlr = chan->ctlr; spin_lock_irqsave(&ctlr->lock, flags); if (chan->state != CPDMA_STATE_IDLE) diff --git a/trunk/drivers/net/ethernet/ti/davinci_mdio.c b/trunk/drivers/net/ethernet/ti/davinci_mdio.c index cd7ee204e94a..a9ca4a03d31b 100644 --- a/trunk/drivers/net/ethernet/ti/davinci_mdio.c +++ b/trunk/drivers/net/ethernet/ti/davinci_mdio.c @@ -394,8 +394,10 @@ static int __devexit davinci_mdio_remove(struct platform_device *pdev) struct device *dev = &pdev->dev; struct davinci_mdio_data *data = dev_get_drvdata(dev); - if (data->bus) + if (data->bus) { + mdiobus_unregister(data->bus); mdiobus_free(data->bus); + } if (data->clk) clk_put(data->clk); diff --git a/trunk/drivers/net/ethernet/xscale/ixp4xx_eth.c b/trunk/drivers/net/ethernet/xscale/ixp4xx_eth.c index 482648fcf0b6..98934bdf6acf 100644 --- a/trunk/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/trunk/drivers/net/ethernet/xscale/ixp4xx_eth.c @@ -1003,6 +1003,7 @@ static int ixp4xx_nway_reset(struct net_device *dev) } int ixp46x_phc_index = -1; +EXPORT_SYMBOL_GPL(ixp46x_phc_index); static int ixp4xx_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) diff --git a/trunk/drivers/net/fddi/skfp/pmf.c b/trunk/drivers/net/fddi/skfp/pmf.c index 24d8566cfd8b..441b4dc79450 100644 --- a/trunk/drivers/net/fddi/skfp/pmf.c +++ b/trunk/drivers/net/fddi/skfp/pmf.c @@ -673,7 +673,7 @@ void smt_add_para(struct s_smc *smc, struct s_pcon *pcon, u_short para, sm_pm_get_ls(smc,port_to_mib(smc,port))) ; break ; case SMT_P_REASON : - * (u_long *) to = 0 ; + *(u32 *)to = 0 ; sp_len = 4 ; goto sp_done ; case SMT_P1033 : /* time stamp */ diff --git a/trunk/drivers/net/hyperv/netvsc.c b/trunk/drivers/net/hyperv/netvsc.c index 6cee2917eb02..4a1a5f58fa73 100644 --- a/trunk/drivers/net/hyperv/netvsc.c +++ b/trunk/drivers/net/hyperv/netvsc.c @@ -383,13 +383,6 @@ int netvsc_device_remove(struct hv_device *device) unsigned long flags; net_device = hv_get_drvdata(device); - spin_lock_irqsave(&device->channel->inbound_lock, flags); - net_device->destroy = true; - spin_unlock_irqrestore(&device->channel->inbound_lock, flags); - - /* Wait for all send completions */ - wait_event(net_device->wait_drain, - atomic_read(&net_device->num_outstanding_sends) == 0); netvsc_disconnect_vsp(net_device); diff --git a/trunk/drivers/net/hyperv/rndis_filter.c b/trunk/drivers/net/hyperv/rndis_filter.c index e5d6146937fa..1e88a1095934 100644 --- a/trunk/drivers/net/hyperv/rndis_filter.c +++ b/trunk/drivers/net/hyperv/rndis_filter.c @@ -718,6 +718,9 @@ static void rndis_filter_halt_device(struct rndis_device *dev) { struct rndis_request *request; struct rndis_halt_request *halt; + struct netvsc_device *nvdev = dev->net_dev; + struct hv_device *hdev = nvdev->dev; + ulong flags; /* Attempt to do a rndis device halt */ request = get_rndis_request(dev, RNDIS_MSG_HALT, @@ -735,6 +738,14 @@ static void rndis_filter_halt_device(struct rndis_device *dev) dev->state = RNDIS_DEV_UNINITIALIZED; cleanup: + spin_lock_irqsave(&hdev->channel->inbound_lock, flags); + nvdev->destroy = true; + spin_unlock_irqrestore(&hdev->channel->inbound_lock, flags); + + /* Wait for all send completions */ + wait_event(nvdev->wait_drain, + atomic_read(&nvdev->num_outstanding_sends) == 0); + if (request) put_rndis_request(dev, request); return; diff --git a/trunk/drivers/net/irda/bfin_sir.c b/trunk/drivers/net/irda/bfin_sir.c index a561ae44a9ac..c6a0299aa9f9 100644 --- a/trunk/drivers/net/irda/bfin_sir.c +++ b/trunk/drivers/net/irda/bfin_sir.c @@ -158,7 +158,7 @@ static int bfin_sir_set_speed(struct bfin_sir_port *port, int speed) /* If not add the 'RPOLC', we can't catch the receive interrupt. * It's related with the HW layout and the IR transiver. */ - val |= IREN | RPOLC; + val |= UMOD_IRDA | RPOLC; UART_PUT_GCTL(port, val); return ret; } @@ -432,7 +432,7 @@ static void bfin_sir_shutdown(struct bfin_sir_port *port, struct net_device *dev bfin_sir_stop_rx(port); val = UART_GET_GCTL(port); - val &= ~(UCEN | IREN | RPOLC); + val &= ~(UCEN | UMOD_MASK | RPOLC); UART_PUT_GCTL(port, val); #ifdef CONFIG_SIR_BFIN_DMA @@ -518,10 +518,10 @@ static void bfin_sir_send_work(struct work_struct *work) * reset all the UART. */ val = UART_GET_GCTL(port); - val &= ~(IREN | RPOLC); + val &= ~(UMOD_MASK | RPOLC); UART_PUT_GCTL(port, val); SSYNC(); - val |= IREN | RPOLC; + val |= UMOD_IRDA | RPOLC; UART_PUT_GCTL(port, val); SSYNC(); /* bfin_sir_set_speed(port, self->speed); */ diff --git a/trunk/drivers/net/irda/ks959-sir.c b/trunk/drivers/net/irda/ks959-sir.c index 824e2a93fe8a..5f3aeac3f86d 100644 --- a/trunk/drivers/net/irda/ks959-sir.c +++ b/trunk/drivers/net/irda/ks959-sir.c @@ -542,6 +542,7 @@ static int ks959_net_open(struct net_device *netdev) sprintf(hwname, "usb#%d", kingsun->usbdev->devnum); kingsun->irlap = irlap_open(netdev, &kingsun->qos, hwname); if (!kingsun->irlap) { + err = -ENOMEM; dev_err(&kingsun->usbdev->dev, "irlap_open failed\n"); goto free_mem; } diff --git a/trunk/drivers/net/irda/ksdazzle-sir.c b/trunk/drivers/net/irda/ksdazzle-sir.c index 5a278ab83c2f..2d4b6a1ab202 100644 --- a/trunk/drivers/net/irda/ksdazzle-sir.c +++ b/trunk/drivers/net/irda/ksdazzle-sir.c @@ -436,6 +436,7 @@ static int ksdazzle_net_open(struct net_device *netdev) sprintf(hwname, "usb#%d", kingsun->usbdev->devnum); kingsun->irlap = irlap_open(netdev, &kingsun->qos, hwname); if (!kingsun->irlap) { + err = -ENOMEM; dev_err(&kingsun->usbdev->dev, "irlap_open failed\n"); goto free_mem; } diff --git a/trunk/drivers/net/macvtap.c b/trunk/drivers/net/macvtap.c index 0737bd4d1669..0f0f9ce3a776 100644 --- a/trunk/drivers/net/macvtap.c +++ b/trunk/drivers/net/macvtap.c @@ -94,7 +94,8 @@ static int get_slot(struct macvlan_dev *vlan, struct macvtap_queue *q) int i; for (i = 0; i < MAX_MACVTAP_QUEUES; i++) { - if (rcu_dereference(vlan->taps[i]) == q) + if (rcu_dereference_protected(vlan->taps[i], + lockdep_is_held(&macvtap_lock)) == q) return i; } diff --git a/trunk/drivers/net/netconsole.c b/trunk/drivers/net/netconsole.c index f9347ea3d381..b3321129a83c 100644 --- a/trunk/drivers/net/netconsole.c +++ b/trunk/drivers/net/netconsole.c @@ -640,15 +640,9 @@ static int netconsole_netdev_event(struct notifier_block *this, * rtnl_lock already held */ if (nt->np.dev) { - spin_unlock_irqrestore( - &target_list_lock, - flags); __netpoll_cleanup(&nt->np); - spin_lock_irqsave(&target_list_lock, - flags); dev_put(nt->np.dev); nt->np.dev = NULL; - netconsole_target_put(nt); } nt->enabled = 0; stopped = true; diff --git a/trunk/drivers/net/phy/mdio-mux-gpio.c b/trunk/drivers/net/phy/mdio-mux-gpio.c index e0cc4ef33dee..eefe49e8713c 100644 --- a/trunk/drivers/net/phy/mdio-mux-gpio.c +++ b/trunk/drivers/net/phy/mdio-mux-gpio.c @@ -101,7 +101,6 @@ static int __devinit mdio_mux_gpio_probe(struct platform_device *pdev) n--; gpio_free(s->gpio[n]); } - devm_kfree(&pdev->dev, s); return r; } diff --git a/trunk/drivers/net/phy/mdio-mux.c b/trunk/drivers/net/phy/mdio-mux.c index 5c120189ec86..4d4d25efc1e1 100644 --- a/trunk/drivers/net/phy/mdio-mux.c +++ b/trunk/drivers/net/phy/mdio-mux.c @@ -132,7 +132,7 @@ int mdio_mux_init(struct device *dev, pb->mii_bus = parent_bus; ret_val = -ENODEV; - for_each_child_of_node(dev->of_node, child_bus_node) { + for_each_available_child_of_node(dev->of_node, child_bus_node) { u32 v; r = of_property_read_u32(child_bus_node, "reg", &v); diff --git a/trunk/drivers/net/ppp/pptp.c b/trunk/drivers/net/ppp/pptp.c index 1c98321b56cc..162464fe86bf 100644 --- a/trunk/drivers/net/ppp/pptp.c +++ b/trunk/drivers/net/ppp/pptp.c @@ -189,7 +189,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb) if (sk_pppox(po)->sk_state & PPPOX_DEAD) goto tx_error; - rt = ip_route_output_ports(&init_net, &fl4, NULL, + rt = ip_route_output_ports(sock_net(sk), &fl4, NULL, opt->dst_addr.sin_addr.s_addr, opt->src_addr.sin_addr.s_addr, 0, 0, IPPROTO_GRE, @@ -468,7 +468,7 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr, po->chan.private = sk; po->chan.ops = &pptp_chan_ops; - rt = ip_route_output_ports(&init_net, &fl4, sk, + rt = ip_route_output_ports(sock_net(sk), &fl4, sk, opt->dst_addr.sin_addr.s_addr, opt->src_addr.sin_addr.s_addr, 0, 0, diff --git a/trunk/drivers/net/team/team.c b/trunk/drivers/net/team/team.c index 87707ab39430..341b65dbbcd3 100644 --- a/trunk/drivers/net/team/team.c +++ b/trunk/drivers/net/team/team.c @@ -795,16 +795,17 @@ static void team_port_leave(struct team *team, struct team_port *port) } #ifdef CONFIG_NET_POLL_CONTROLLER -static int team_port_enable_netpoll(struct team *team, struct team_port *port) +static int team_port_enable_netpoll(struct team *team, struct team_port *port, + gfp_t gfp) { struct netpoll *np; int err; - np = kzalloc(sizeof(*np), GFP_KERNEL); + np = kzalloc(sizeof(*np), gfp); if (!np) return -ENOMEM; - err = __netpoll_setup(np, port->dev); + err = __netpoll_setup(np, port->dev, gfp); if (err) { kfree(np); return err; @@ -833,7 +834,8 @@ static struct netpoll_info *team_netpoll_info(struct team *team) } #else -static int team_port_enable_netpoll(struct team *team, struct team_port *port) +static int team_port_enable_netpoll(struct team *team, struct team_port *port, + gfp_t gfp) { return 0; } @@ -913,7 +915,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev) } if (team_netpoll_info(team)) { - err = team_port_enable_netpoll(team, port); + err = team_port_enable_netpoll(team, port, GFP_KERNEL); if (err) { netdev_err(dev, "Failed to enable netpoll on device %s\n", portname); @@ -1443,7 +1445,7 @@ static void team_netpoll_cleanup(struct net_device *dev) } static int team_netpoll_setup(struct net_device *dev, - struct netpoll_info *npifo) + struct netpoll_info *npifo, gfp_t gfp) { struct team *team = netdev_priv(dev); struct team_port *port; @@ -1451,7 +1453,7 @@ static int team_netpoll_setup(struct net_device *dev, mutex_lock(&team->lock); list_for_each_entry(port, &team->port_list, list) { - err = team_port_enable_netpoll(team, port); + err = team_port_enable_netpoll(team, port, gfp); if (err) { __team_netpoll_cleanup(team); break; diff --git a/trunk/drivers/net/tun.c b/trunk/drivers/net/tun.c index 926d4db5cb38..3a16d4fdaa05 100644 --- a/trunk/drivers/net/tun.c +++ b/trunk/drivers/net/tun.c @@ -187,7 +187,6 @@ static void __tun_detach(struct tun_struct *tun) netif_tx_lock_bh(tun->dev); netif_carrier_off(tun->dev); tun->tfile = NULL; - tun->socket.file = NULL; netif_tx_unlock_bh(tun->dev); /* Drop read queue */ diff --git a/trunk/drivers/net/usb/cdc-phonet.c b/trunk/drivers/net/usb/cdc-phonet.c index 64610048ce87..7d78669000d7 100644 --- a/trunk/drivers/net/usb/cdc-phonet.c +++ b/trunk/drivers/net/usb/cdc-phonet.c @@ -232,6 +232,7 @@ static int usbpn_open(struct net_device *dev) struct urb *req = usb_alloc_urb(0, GFP_KERNEL); if (!req || rx_submit(pnd, req, GFP_KERNEL | __GFP_COLD)) { + usb_free_urb(req); usbpn_close(dev); return -ENOMEM; } diff --git a/trunk/drivers/net/usb/cdc_ncm.c b/trunk/drivers/net/usb/cdc_ncm.c index f4ce5957df32..4cd582a4f625 100644 --- a/trunk/drivers/net/usb/cdc_ncm.c +++ b/trunk/drivers/net/usb/cdc_ncm.c @@ -1225,6 +1225,26 @@ static const struct usb_device_id cdc_devs[] = { .driver_info = (unsigned long) &wwan_info, }, + /* Dell branded MBM devices like DW5550 */ + { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_VENDOR, + .idVendor = 0x413c, + .bInterfaceClass = USB_CLASS_COMM, + .bInterfaceSubClass = USB_CDC_SUBCLASS_NCM, + .bInterfaceProtocol = USB_CDC_PROTO_NONE, + .driver_info = (unsigned long) &wwan_info, + }, + + /* Toshiba branded MBM devices */ + { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_VENDOR, + .idVendor = 0x0930, + .bInterfaceClass = USB_CLASS_COMM, + .bInterfaceSubClass = USB_CDC_SUBCLASS_NCM, + .bInterfaceProtocol = USB_CDC_PROTO_NONE, + .driver_info = (unsigned long) &wwan_info, + }, + /* Generic CDC-NCM devices */ { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE), diff --git a/trunk/drivers/net/usb/qmi_wwan.c b/trunk/drivers/net/usb/qmi_wwan.c index 2ea126a16d79..adfab3fc5478 100644 --- a/trunk/drivers/net/usb/qmi_wwan.c +++ b/trunk/drivers/net/usb/qmi_wwan.c @@ -247,30 +247,12 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf) */ static int qmi_wwan_bind_shared(struct usbnet *dev, struct usb_interface *intf) { - int rv; struct qmi_wwan_state *info = (void *)&dev->data; - /* ZTE makes devices where the interface descriptors and endpoint - * configurations of two or more interfaces are identical, even - * though the functions are completely different. If set, then - * driver_info->data is a bitmap of acceptable interface numbers - * allowing us to bind to one such interface without binding to - * all of them - */ - if (dev->driver_info->data && - !test_bit(intf->cur_altsetting->desc.bInterfaceNumber, &dev->driver_info->data)) { - dev_info(&intf->dev, "not on our whitelist - ignored"); - rv = -ENODEV; - goto err; - } - /* control and data is shared */ info->control = intf; info->data = intf; - rv = qmi_wwan_register_subdriver(dev); - -err: - return rv; + return qmi_wwan_register_subdriver(dev); } static void qmi_wwan_unbind(struct usbnet *dev, struct usb_interface *intf) @@ -356,214 +338,64 @@ static const struct driver_info qmi_wwan_shared = { .manage_power = qmi_wwan_manage_power, }; -static const struct driver_info qmi_wwan_force_int0 = { - .description = "Qualcomm WWAN/QMI device", - .flags = FLAG_WWAN, - .bind = qmi_wwan_bind_shared, - .unbind = qmi_wwan_unbind, - .manage_power = qmi_wwan_manage_power, - .data = BIT(0), /* interface whitelist bitmap */ -}; - -static const struct driver_info qmi_wwan_force_int1 = { - .description = "Qualcomm WWAN/QMI device", - .flags = FLAG_WWAN, - .bind = qmi_wwan_bind_shared, - .unbind = qmi_wwan_unbind, - .manage_power = qmi_wwan_manage_power, - .data = BIT(1), /* interface whitelist bitmap */ -}; - -static const struct driver_info qmi_wwan_force_int2 = { - .description = "Qualcomm WWAN/QMI device", - .flags = FLAG_WWAN, - .bind = qmi_wwan_bind_shared, - .unbind = qmi_wwan_unbind, - .manage_power = qmi_wwan_manage_power, - .data = BIT(2), /* interface whitelist bitmap */ -}; - -static const struct driver_info qmi_wwan_force_int3 = { - .description = "Qualcomm WWAN/QMI device", - .flags = FLAG_WWAN, - .bind = qmi_wwan_bind_shared, - .unbind = qmi_wwan_unbind, - .manage_power = qmi_wwan_manage_power, - .data = BIT(3), /* interface whitelist bitmap */ -}; - -static const struct driver_info qmi_wwan_force_int4 = { - .description = "Qualcomm WWAN/QMI device", - .flags = FLAG_WWAN, - .bind = qmi_wwan_bind_shared, - .unbind = qmi_wwan_unbind, - .manage_power = qmi_wwan_manage_power, - .data = BIT(4), /* interface whitelist bitmap */ -}; - -/* Sierra Wireless provide equally useless interface descriptors - * Devices in QMI mode can be switched between two different - * configurations: - * a) USB interface #8 is QMI/wwan - * b) USB interfaces #8, #19 and #20 are QMI/wwan - * - * Both configurations provide a number of other interfaces (serial++), - * some of which have the same endpoint configuration as we expect, so - * a whitelist or blacklist is necessary. - * - * FIXME: The below whitelist should include BIT(20). It does not - * because I cannot get it to work... - */ -static const struct driver_info qmi_wwan_sierra = { - .description = "Sierra Wireless wwan/QMI device", - .flags = FLAG_WWAN, - .bind = qmi_wwan_bind_shared, - .unbind = qmi_wwan_unbind, - .manage_power = qmi_wwan_manage_power, - .data = BIT(8) | BIT(19), /* interface whitelist bitmap */ -}; - #define HUAWEI_VENDOR_ID 0x12D1 +/* map QMI/wwan function by a fixed interface number */ +#define QMI_FIXED_INTF(vend, prod, num) \ + USB_DEVICE_INTERFACE_NUMBER(vend, prod, num), \ + .driver_info = (unsigned long)&qmi_wwan_shared + /* Gobi 1000 QMI/wwan interface number is 3 according to qcserial */ #define QMI_GOBI1K_DEVICE(vend, prod) \ - USB_DEVICE(vend, prod), \ - .driver_info = (unsigned long)&qmi_wwan_force_int3 + QMI_FIXED_INTF(vend, prod, 3) -/* Gobi 2000 and Gobi 3000 QMI/wwan interface number is 0 according to qcserial */ +/* Gobi 2000/3000 QMI/wwan interface number is 0 according to qcserial */ #define QMI_GOBI_DEVICE(vend, prod) \ - USB_DEVICE(vend, prod), \ - .driver_info = (unsigned long)&qmi_wwan_force_int0 + QMI_FIXED_INTF(vend, prod, 0) static const struct usb_device_id products[] = { + /* 1. CDC ECM like devices match on the control interface */ { /* Huawei E392, E398 and possibly others sharing both device id and more... */ - .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = HUAWEI_VENDOR_ID, - .bInterfaceClass = USB_CLASS_VENDOR_SPEC, - .bInterfaceSubClass = 1, - .bInterfaceProtocol = 9, /* CDC Ethernet *control* interface */ + USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 9), .driver_info = (unsigned long)&qmi_wwan_info, }, { /* Vodafone/Huawei K5005 (12d1:14c8) and similar modems */ - .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = HUAWEI_VENDOR_ID, - .bInterfaceClass = USB_CLASS_VENDOR_SPEC, - .bInterfaceSubClass = 1, - .bInterfaceProtocol = 57, /* CDC Ethernet *control* interface */ + USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 57), .driver_info = (unsigned long)&qmi_wwan_info, }, - { /* Huawei E392, E398 and possibly others in "Windows mode" - * using a combined control and data interface without any CDC - * functional descriptors - */ - .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = HUAWEI_VENDOR_ID, - .bInterfaceClass = USB_CLASS_VENDOR_SPEC, - .bInterfaceSubClass = 1, - .bInterfaceProtocol = 17, + + /* 2. Combined interface devices matching on class+protocol */ + { /* Huawei E392, E398 and possibly others in "Windows mode" */ + USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 17), .driver_info = (unsigned long)&qmi_wwan_shared, }, { /* Pantech UML290 */ - .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = 0x106c, - .idProduct = 0x3718, - .bInterfaceClass = 0xff, - .bInterfaceSubClass = 0xf0, - .bInterfaceProtocol = 0xff, + USB_DEVICE_AND_INTERFACE_INFO(0x106c, 0x3718, USB_CLASS_VENDOR_SPEC, 0xf0, 0xff), .driver_info = (unsigned long)&qmi_wwan_shared, }, - { /* ZTE MF820D */ - .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = 0x19d2, - .idProduct = 0x0167, - .bInterfaceClass = 0xff, - .bInterfaceSubClass = 0xff, - .bInterfaceProtocol = 0xff, - .driver_info = (unsigned long)&qmi_wwan_force_int4, - }, - { /* ZTE MF821D */ - .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = 0x19d2, - .idProduct = 0x0326, - .bInterfaceClass = 0xff, - .bInterfaceSubClass = 0xff, - .bInterfaceProtocol = 0xff, - .driver_info = (unsigned long)&qmi_wwan_force_int4, - }, - { /* ZTE (Vodafone) K3520-Z */ - .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = 0x19d2, - .idProduct = 0x0055, - .bInterfaceClass = 0xff, - .bInterfaceSubClass = 0xff, - .bInterfaceProtocol = 0xff, - .driver_info = (unsigned long)&qmi_wwan_force_int1, - }, - { /* ZTE (Vodafone) K3565-Z */ - .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = 0x19d2, - .idProduct = 0x0063, - .bInterfaceClass = 0xff, - .bInterfaceSubClass = 0xff, - .bInterfaceProtocol = 0xff, - .driver_info = (unsigned long)&qmi_wwan_force_int4, - }, - { /* ZTE (Vodafone) K3570-Z */ - .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = 0x19d2, - .idProduct = 0x1008, - .bInterfaceClass = 0xff, - .bInterfaceSubClass = 0xff, - .bInterfaceProtocol = 0xff, - .driver_info = (unsigned long)&qmi_wwan_force_int4, - }, - { /* ZTE (Vodafone) K3571-Z */ - .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = 0x19d2, - .idProduct = 0x1010, - .bInterfaceClass = 0xff, - .bInterfaceSubClass = 0xff, - .bInterfaceProtocol = 0xff, - .driver_info = (unsigned long)&qmi_wwan_force_int4, - }, - { /* ZTE (Vodafone) K3765-Z */ - .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = 0x19d2, - .idProduct = 0x2002, - .bInterfaceClass = 0xff, - .bInterfaceSubClass = 0xff, - .bInterfaceProtocol = 0xff, - .driver_info = (unsigned long)&qmi_wwan_force_int4, - }, - { /* ZTE (Vodafone) K4505-Z */ - .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = 0x19d2, - .idProduct = 0x0104, - .bInterfaceClass = 0xff, - .bInterfaceSubClass = 0xff, - .bInterfaceProtocol = 0xff, - .driver_info = (unsigned long)&qmi_wwan_force_int4, - }, - { /* ZTE MF60 */ - .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = 0x19d2, - .idProduct = 0x1402, - .bInterfaceClass = 0xff, - .bInterfaceSubClass = 0xff, - .bInterfaceProtocol = 0xff, - .driver_info = (unsigned long)&qmi_wwan_force_int2, - }, - { /* Sierra Wireless MC77xx in QMI mode */ - .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = 0x1199, - .idProduct = 0x68a2, - .bInterfaceClass = 0xff, - .bInterfaceSubClass = 0xff, - .bInterfaceProtocol = 0xff, - .driver_info = (unsigned long)&qmi_wwan_sierra, + { /* Pantech UML290 - newer firmware */ + USB_DEVICE_AND_INTERFACE_INFO(0x106c, 0x3718, USB_CLASS_VENDOR_SPEC, 0xf1, 0xff), + .driver_info = (unsigned long)&qmi_wwan_shared, }, - /* Gobi 1000 devices */ + /* 3. Combined interface devices matching on interface number */ + {QMI_FIXED_INTF(0x19d2, 0x0055, 1)}, /* ZTE (Vodafone) K3520-Z */ + {QMI_FIXED_INTF(0x19d2, 0x0063, 4)}, /* ZTE (Vodafone) K3565-Z */ + {QMI_FIXED_INTF(0x19d2, 0x0104, 4)}, /* ZTE (Vodafone) K4505-Z */ + {QMI_FIXED_INTF(0x19d2, 0x0167, 4)}, /* ZTE MF820D */ + {QMI_FIXED_INTF(0x19d2, 0x0326, 4)}, /* ZTE MF821D */ + {QMI_FIXED_INTF(0x19d2, 0x1008, 4)}, /* ZTE (Vodafone) K3570-Z */ + {QMI_FIXED_INTF(0x19d2, 0x1010, 4)}, /* ZTE (Vodafone) K3571-Z */ + {QMI_FIXED_INTF(0x19d2, 0x1018, 3)}, /* ZTE (Vodafone) K5006-Z */ + {QMI_FIXED_INTF(0x19d2, 0x1402, 2)}, /* ZTE MF60 */ + {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ + {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ + {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ + {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ + {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */ + {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ + + /* 4. Gobi 1000 devices */ {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ {QMI_GOBI1K_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */ {QMI_GOBI1K_DEVICE(0x03f0, 0x371d)}, /* HP un2430 Mobile Broadband Module */ @@ -579,9 +411,11 @@ static const struct usb_device_id products[] = { {QMI_GOBI1K_DEVICE(0x05c6, 0x9222)}, /* Generic Gobi Modem device */ {QMI_GOBI1K_DEVICE(0x05c6, 0x9009)}, /* Generic Gobi Modem device */ - /* Gobi 2000 and 3000 devices */ + /* 5. Gobi 2000 and 3000 devices */ {QMI_GOBI_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */ + {QMI_GOBI_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */ {QMI_GOBI_DEVICE(0x05c6, 0x920b)}, /* Generic Gobi 2000 Modem device */ + {QMI_GOBI_DEVICE(0x05c6, 0x920d)}, /* Gobi 3000 Composite */ {QMI_GOBI_DEVICE(0x05c6, 0x9225)}, /* Sony Gobi 2000 Modem device (N0279, VU730) */ {QMI_GOBI_DEVICE(0x05c6, 0x9245)}, /* Samsung Gobi 2000 Modem device (VL176) */ {QMI_GOBI_DEVICE(0x03f0, 0x251d)}, /* HP Gobi 2000 Modem device (VP412) */ @@ -589,6 +423,8 @@ static const struct usb_device_id products[] = { {QMI_GOBI_DEVICE(0x05c6, 0x9265)}, /* Asus Gobi 2000 Modem device (VR305) */ {QMI_GOBI_DEVICE(0x05c6, 0x9235)}, /* Top Global Gobi 2000 Modem device (VR306) */ {QMI_GOBI_DEVICE(0x05c6, 0x9275)}, /* iRex Technologies Gobi 2000 Modem device (VR307) */ + {QMI_GOBI_DEVICE(0x1199, 0x68a5)}, /* Sierra Wireless Modem */ + {QMI_GOBI_DEVICE(0x1199, 0x68a9)}, /* Sierra Wireless Modem */ {QMI_GOBI_DEVICE(0x1199, 0x9001)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ {QMI_GOBI_DEVICE(0x1199, 0x9002)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ {QMI_GOBI_DEVICE(0x1199, 0x9003)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ @@ -600,11 +436,16 @@ static const struct usb_device_id products[] = { {QMI_GOBI_DEVICE(0x1199, 0x9009)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ {QMI_GOBI_DEVICE(0x1199, 0x900a)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ {QMI_GOBI_DEVICE(0x1199, 0x9011)}, /* Sierra Wireless Gobi 2000 Modem device (MC8305) */ + {QMI_FIXED_INTF(0x1199, 0x9011, 5)}, /* alternate interface number!? */ {QMI_GOBI_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */ {QMI_GOBI_DEVICE(0x05c6, 0x9205)}, /* Gobi 2000 Modem device */ {QMI_GOBI_DEVICE(0x1199, 0x9013)}, /* Sierra Wireless Gobi 3000 Modem device (MC8355) */ {QMI_GOBI_DEVICE(0x1199, 0x9015)}, /* Sierra Wireless Gobi 3000 Modem device */ {QMI_GOBI_DEVICE(0x1199, 0x9019)}, /* Sierra Wireless Gobi 3000 Modem device */ + {QMI_GOBI_DEVICE(0x1199, 0x901b)}, /* Sierra Wireless MC7770 */ + {QMI_GOBI_DEVICE(0x12d1, 0x14f1)}, /* Sony Gobi 3000 Composite */ + {QMI_GOBI_DEVICE(0x1410, 0xa021)}, /* Foxconn Gobi 3000 Modem device (Novatel E396) */ + { } /* END */ }; MODULE_DEVICE_TABLE(usb, products); diff --git a/trunk/drivers/net/usb/sierra_net.c b/trunk/drivers/net/usb/sierra_net.c index d75d1f56becf..7be49ea60b6d 100644 --- a/trunk/drivers/net/usb/sierra_net.c +++ b/trunk/drivers/net/usb/sierra_net.c @@ -68,15 +68,8 @@ static atomic_t iface_counter = ATOMIC_INIT(0); */ #define SIERRA_NET_USBCTL_BUF_LEN 1024 -/* list of interface numbers - used for constructing interface lists */ -struct sierra_net_iface_info { - const u32 infolen; /* number of interface numbers on list */ - const u8 *ifaceinfo; /* pointer to the array holding the numbers */ -}; - struct sierra_net_info_data { u16 rx_urb_size; - struct sierra_net_iface_info whitelist; }; /* Private data structure */ @@ -637,21 +630,6 @@ static int sierra_net_change_mtu(struct net_device *net, int new_mtu) return usbnet_change_mtu(net, new_mtu); } -static int is_whitelisted(const u8 ifnum, - const struct sierra_net_iface_info *whitelist) -{ - if (whitelist) { - const u8 *list = whitelist->ifaceinfo; - int i; - - for (i = 0; i < whitelist->infolen; i++) { - if (list[i] == ifnum) - return 1; - } - } - return 0; -} - static int sierra_net_get_fw_attr(struct usbnet *dev, u16 *datap) { int result = 0; @@ -706,11 +684,6 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf) dev_dbg(&dev->udev->dev, "%s", __func__); ifacenum = intf->cur_altsetting->desc.bInterfaceNumber; - /* We only accept certain interfaces */ - if (!is_whitelisted(ifacenum, &data->whitelist)) { - dev_dbg(&dev->udev->dev, "Ignoring interface: %d", ifacenum); - return -ENODEV; - } numendpoints = intf->cur_altsetting->desc.bNumEndpoints; /* We have three endpoints, bulk in and out, and a status */ if (numendpoints != 3) { @@ -945,13 +918,8 @@ struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, struct sk_buff *skb, return NULL; } -static const u8 sierra_net_ifnum_list[] = { 7, 10, 11 }; static const struct sierra_net_info_data sierra_net_info_data_direct_ip = { .rx_urb_size = 8 * 1024, - .whitelist = { - .infolen = ARRAY_SIZE(sierra_net_ifnum_list), - .ifaceinfo = sierra_net_ifnum_list - } }; static const struct driver_info sierra_net_info_direct_ip = { @@ -965,15 +933,19 @@ static const struct driver_info sierra_net_info_direct_ip = { .data = (unsigned long)&sierra_net_info_data_direct_ip, }; +#define DIRECT_IP_DEVICE(vend, prod) \ + {USB_DEVICE_INTERFACE_NUMBER(vend, prod, 7), \ + .driver_info = (unsigned long)&sierra_net_info_direct_ip}, \ + {USB_DEVICE_INTERFACE_NUMBER(vend, prod, 10), \ + .driver_info = (unsigned long)&sierra_net_info_direct_ip}, \ + {USB_DEVICE_INTERFACE_NUMBER(vend, prod, 11), \ + .driver_info = (unsigned long)&sierra_net_info_direct_ip} + static const struct usb_device_id products[] = { - {USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */ - .driver_info = (unsigned long) &sierra_net_info_direct_ip}, - {USB_DEVICE(0x0F3D, 0x68A3), /* AT&T Direct IP modem */ - .driver_info = (unsigned long) &sierra_net_info_direct_ip}, - {USB_DEVICE(0x1199, 0x68AA), /* Sierra Wireless Direct IP LTE modem */ - .driver_info = (unsigned long) &sierra_net_info_direct_ip}, - {USB_DEVICE(0x0F3D, 0x68AA), /* AT&T Direct IP LTE modem */ - .driver_info = (unsigned long) &sierra_net_info_direct_ip}, + DIRECT_IP_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */ + DIRECT_IP_DEVICE(0x0F3D, 0x68A3), /* AT&T Direct IP modem */ + DIRECT_IP_DEVICE(0x1199, 0x68AA), /* Sierra Wireless Direct IP LTE modem */ + DIRECT_IP_DEVICE(0x0F3D, 0x68AA), /* AT&T Direct IP LTE modem */ {}, /* last item */ }; diff --git a/trunk/drivers/net/usb/usbnet.c b/trunk/drivers/net/usb/usbnet.c index 8531c1caac28..fd4b26d46fd5 100644 --- a/trunk/drivers/net/usb/usbnet.c +++ b/trunk/drivers/net/usb/usbnet.c @@ -1573,7 +1573,7 @@ int usbnet_resume (struct usb_interface *intf) netif_device_present(dev->net) && !timer_pending(&dev->delay) && !test_bit(EVENT_RX_HALT, &dev->flags)) - rx_alloc_submit(dev, GFP_KERNEL); + rx_alloc_submit(dev, GFP_NOIO); if (!(dev->txq.qlen >= TX_QLEN(dev))) netif_tx_wake_all_queues(dev->net); diff --git a/trunk/drivers/net/vmxnet3/vmxnet3_drv.c b/trunk/drivers/net/vmxnet3/vmxnet3_drv.c index 93e0cfb739b8..ce9d4f2c9776 100644 --- a/trunk/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/trunk/drivers/net/vmxnet3/vmxnet3_drv.c @@ -3019,6 +3019,7 @@ vmxnet3_probe_device(struct pci_dev *pdev, netdev->watchdog_timeo = 5 * HZ; INIT_WORK(&adapter->work, vmxnet3_reset_work); + set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); if (adapter->intr.type == VMXNET3_IT_MSIX) { int i; @@ -3043,7 +3044,6 @@ vmxnet3_probe_device(struct pci_dev *pdev, goto err_register; } - set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); vmxnet3_check_link(adapter, false); atomic_inc(&devices_found); return 0; diff --git a/trunk/drivers/net/wan/dscc4.c b/trunk/drivers/net/wan/dscc4.c index 9eb6479306d6..ef36cafd44b7 100644 --- a/trunk/drivers/net/wan/dscc4.c +++ b/trunk/drivers/net/wan/dscc4.c @@ -774,14 +774,15 @@ static int __devinit dscc4_init_one(struct pci_dev *pdev, } /* Global interrupt queue */ writel((u32)(((IRQ_RING_SIZE >> 5) - 1) << 20), ioaddr + IQLENR1); + + rc = -ENOMEM; + priv->iqcfg = (__le32 *) pci_alloc_consistent(pdev, IRQ_RING_SIZE*sizeof(__le32), &priv->iqcfg_dma); if (!priv->iqcfg) goto err_free_irq_5; writel(priv->iqcfg_dma, ioaddr + IQCFG); - rc = -ENOMEM; - /* * SCC 0-3 private rx/tx irq structures * IQRX/TXi needs to be set soon. Learned it the hard way... diff --git a/trunk/drivers/net/wimax/i2400m/fw.c b/trunk/drivers/net/wimax/i2400m/fw.c index 283237f6f074..def12b38cbf7 100644 --- a/trunk/drivers/net/wimax/i2400m/fw.c +++ b/trunk/drivers/net/wimax/i2400m/fw.c @@ -326,8 +326,10 @@ int i2400m_barker_db_init(const char *_options) unsigned barker; options_orig = kstrdup(_options, GFP_KERNEL); - if (options_orig == NULL) + if (options_orig == NULL) { + result = -ENOMEM; goto error_parse; + } options = options_orig; while ((token = strsep(&options, ",")) != NULL) { diff --git a/trunk/drivers/net/wireless/at76c50x-usb.c b/trunk/drivers/net/wireless/at76c50x-usb.c index efc162e0b511..88b8d64c90f1 100644 --- a/trunk/drivers/net/wireless/at76c50x-usb.c +++ b/trunk/drivers/net/wireless/at76c50x-usb.c @@ -342,7 +342,7 @@ static int at76_dfu_get_status(struct usb_device *udev, return ret; } -static u8 at76_dfu_get_state(struct usb_device *udev, u8 *state) +static int at76_dfu_get_state(struct usb_device *udev, u8 *state) { int ret; diff --git a/trunk/drivers/net/wireless/ath/ath5k/base.c b/trunk/drivers/net/wireless/ath/ath5k/base.c index 8c4c040a47b8..2aab20ee9f38 100644 --- a/trunk/drivers/net/wireless/ath/ath5k/base.c +++ b/trunk/drivers/net/wireless/ath/ath5k/base.c @@ -2056,9 +2056,7 @@ ath5k_beacon_update_timers(struct ath5k_hw *ah, u64 bc_tsf) void ath5k_beacon_config(struct ath5k_hw *ah) { - unsigned long flags; - - spin_lock_irqsave(&ah->block, flags); + spin_lock_bh(&ah->block); ah->bmisscount = 0; ah->imask &= ~(AR5K_INT_BMISS | AR5K_INT_SWBA); @@ -2085,7 +2083,7 @@ ath5k_beacon_config(struct ath5k_hw *ah) ath5k_hw_set_imr(ah, ah->imask); mmiowb(); - spin_unlock_irqrestore(&ah->block, flags); + spin_unlock_bh(&ah->block); } static void ath5k_tasklet_beacon(unsigned long data) diff --git a/trunk/drivers/net/wireless/ath/ath5k/eeprom.c b/trunk/drivers/net/wireless/ath/ath5k/eeprom.c index 4026c906cc7b..b7e0258887e7 100644 --- a/trunk/drivers/net/wireless/ath/ath5k/eeprom.c +++ b/trunk/drivers/net/wireless/ath/ath5k/eeprom.c @@ -1482,7 +1482,7 @@ ath5k_eeprom_read_target_rate_pwr_info(struct ath5k_hw *ah, unsigned int mode) case AR5K_EEPROM_MODE_11A: offset += AR5K_EEPROM_TARGET_PWR_OFF_11A(ee->ee_version); rate_pcal_info = ee->ee_rate_tpwr_a; - ee->ee_rate_target_pwr_num[mode] = AR5K_EEPROM_N_5GHZ_CHAN; + ee->ee_rate_target_pwr_num[mode] = AR5K_EEPROM_N_5GHZ_RATE_CHAN; break; case AR5K_EEPROM_MODE_11B: offset += AR5K_EEPROM_TARGET_PWR_OFF_11B(ee->ee_version); diff --git a/trunk/drivers/net/wireless/ath/ath5k/eeprom.h b/trunk/drivers/net/wireless/ath/ath5k/eeprom.h index dc2bcfeadeb4..94a9bbea6874 100644 --- a/trunk/drivers/net/wireless/ath/ath5k/eeprom.h +++ b/trunk/drivers/net/wireless/ath/ath5k/eeprom.h @@ -182,6 +182,7 @@ #define AR5K_EEPROM_EEP_DELTA 10 #define AR5K_EEPROM_N_MODES 3 #define AR5K_EEPROM_N_5GHZ_CHAN 10 +#define AR5K_EEPROM_N_5GHZ_RATE_CHAN 8 #define AR5K_EEPROM_N_2GHZ_CHAN 3 #define AR5K_EEPROM_N_2GHZ_CHAN_2413 4 #define AR5K_EEPROM_N_2GHZ_CHAN_MAX 4 diff --git a/trunk/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/trunk/drivers/net/wireless/ath/ath5k/mac80211-ops.c index 260e7dc7f751..d56453e43d7e 100644 --- a/trunk/drivers/net/wireless/ath/ath5k/mac80211-ops.c +++ b/trunk/drivers/net/wireless/ath/ath5k/mac80211-ops.c @@ -254,7 +254,6 @@ ath5k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ath5k_vif *avf = (void *)vif->drv_priv; struct ath5k_hw *ah = hw->priv; struct ath_common *common = ath5k_hw_common(ah); - unsigned long flags; mutex_lock(&ah->lock); @@ -300,9 +299,9 @@ ath5k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, } if (changes & BSS_CHANGED_BEACON) { - spin_lock_irqsave(&ah->block, flags); + spin_lock_bh(&ah->block); ath5k_beacon_update(hw, vif); - spin_unlock_irqrestore(&ah->block, flags); + spin_unlock_bh(&ah->block); } if (changes & BSS_CHANGED_BEACON_ENABLED) diff --git a/trunk/drivers/net/wireless/ath/ath9k/hw.c b/trunk/drivers/net/wireless/ath/ath9k/hw.c index cfa91ab7acf8..60b6a9daff7e 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/hw.c +++ b/trunk/drivers/net/wireless/ath/ath9k/hw.c @@ -730,6 +730,7 @@ int ath9k_hw_init(struct ath_hw *ah) case AR9300_DEVID_QCA955X: case AR9300_DEVID_AR9580: case AR9300_DEVID_AR9462: + case AR9485_DEVID_AR1111: break; default: if (common->bus_ops->ath_bus_type == ATH_USB) diff --git a/trunk/drivers/net/wireless/ath/ath9k/hw.h b/trunk/drivers/net/wireless/ath/ath9k/hw.h index dd0c146d81dc..ce7332c64efb 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/hw.h +++ b/trunk/drivers/net/wireless/ath/ath9k/hw.h @@ -49,6 +49,7 @@ #define AR9300_DEVID_AR9462 0x0034 #define AR9300_DEVID_AR9330 0x0035 #define AR9300_DEVID_QCA955X 0x0038 +#define AR9485_DEVID_AR1111 0x0037 #define AR5416_AR9100_DEVID 0x000b diff --git a/trunk/drivers/net/wireless/ath/ath9k/mac.c b/trunk/drivers/net/wireless/ath/ath9k/mac.c index 7990cd55599c..b42be910a83d 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/mac.c +++ b/trunk/drivers/net/wireless/ath/ath9k/mac.c @@ -773,15 +773,10 @@ bool ath9k_hw_intrpend(struct ath_hw *ah) } EXPORT_SYMBOL(ath9k_hw_intrpend); -void ath9k_hw_disable_interrupts(struct ath_hw *ah) +void ath9k_hw_kill_interrupts(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); - if (!(ah->imask & ATH9K_INT_GLOBAL)) - atomic_set(&ah->intr_ref_cnt, -1); - else - atomic_dec(&ah->intr_ref_cnt); - ath_dbg(common, INTERRUPT, "disable IER\n"); REG_WRITE(ah, AR_IER, AR_IER_DISABLE); (void) REG_READ(ah, AR_IER); @@ -793,6 +788,17 @@ void ath9k_hw_disable_interrupts(struct ath_hw *ah) (void) REG_READ(ah, AR_INTR_SYNC_ENABLE); } } +EXPORT_SYMBOL(ath9k_hw_kill_interrupts); + +void ath9k_hw_disable_interrupts(struct ath_hw *ah) +{ + if (!(ah->imask & ATH9K_INT_GLOBAL)) + atomic_set(&ah->intr_ref_cnt, -1); + else + atomic_dec(&ah->intr_ref_cnt); + + ath9k_hw_kill_interrupts(ah); +} EXPORT_SYMBOL(ath9k_hw_disable_interrupts); void ath9k_hw_enable_interrupts(struct ath_hw *ah) diff --git a/trunk/drivers/net/wireless/ath/ath9k/mac.h b/trunk/drivers/net/wireless/ath/ath9k/mac.h index 0eba36dca6f8..4a745e68dd94 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/mac.h +++ b/trunk/drivers/net/wireless/ath/ath9k/mac.h @@ -738,6 +738,7 @@ bool ath9k_hw_intrpend(struct ath_hw *ah); void ath9k_hw_set_interrupts(struct ath_hw *ah); void ath9k_hw_enable_interrupts(struct ath_hw *ah); void ath9k_hw_disable_interrupts(struct ath_hw *ah); +void ath9k_hw_kill_interrupts(struct ath_hw *ah); void ar9002_hw_attach_mac_ops(struct ath_hw *ah); diff --git a/trunk/drivers/net/wireless/ath/ath9k/main.c b/trunk/drivers/net/wireless/ath/ath9k/main.c index 6049d8b82855..a22df749b8db 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/main.c +++ b/trunk/drivers/net/wireless/ath/ath9k/main.c @@ -462,8 +462,10 @@ irqreturn_t ath_isr(int irq, void *dev) if (!ath9k_hw_intrpend(ah)) return IRQ_NONE; - if(test_bit(SC_OP_HW_RESET, &sc->sc_flags)) + if (test_bit(SC_OP_HW_RESET, &sc->sc_flags)) { + ath9k_hw_kill_interrupts(ah); return IRQ_HANDLED; + } /* * Figure out the reason(s) for the interrupt. Note diff --git a/trunk/drivers/net/wireless/ath/ath9k/pci.c b/trunk/drivers/net/wireless/ath/ath9k/pci.c index 87b89d55e637..a978984d78a5 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/pci.c +++ b/trunk/drivers/net/wireless/ath/ath9k/pci.c @@ -37,6 +37,7 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = { { PCI_VDEVICE(ATHEROS, 0x0032) }, /* PCI-E AR9485 */ { PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E AR9580 */ { PCI_VDEVICE(ATHEROS, 0x0034) }, /* PCI-E AR9462 */ + { PCI_VDEVICE(ATHEROS, 0x0037) }, /* PCI-E AR1111/AR9485 */ { 0 } }; @@ -320,6 +321,7 @@ static int ath_pci_suspend(struct device *device) * Otherwise the chip never moved to full sleep, * when no interface is up. */ + ath9k_stop_btcoex(sc); ath9k_hw_disable(sc->sc_ah); ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP); diff --git a/trunk/drivers/net/wireless/ath/ath9k/recv.c b/trunk/drivers/net/wireless/ath/ath9k/recv.c index 12aca02228c2..4480c0cc655f 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/recv.c +++ b/trunk/drivers/net/wireless/ath/ath9k/recv.c @@ -1044,7 +1044,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) struct ieee80211_hw *hw = sc->hw; struct ieee80211_hdr *hdr; int retval; - bool decrypt_error = false; struct ath_rx_status rs; enum ath9k_rx_qtype qtype; bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); @@ -1066,6 +1065,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) tsf_lower = tsf & 0xffffffff; do { + bool decrypt_error = false; /* If handling rx interrupt and flush is in progress => exit */ if (test_bit(SC_OP_RXFLUSH, &sc->sc_flags) && (flush == 0)) break; diff --git a/trunk/drivers/net/wireless/b43/main.c b/trunk/drivers/net/wireless/b43/main.c index b80352b308d5..a140165dfee0 100644 --- a/trunk/drivers/net/wireless/b43/main.c +++ b/trunk/drivers/net/wireless/b43/main.c @@ -2719,32 +2719,37 @@ static int b43_gpio_init(struct b43_wldev *dev) if (dev->dev->chip_id == 0x4301) { mask |= 0x0060; set |= 0x0060; + } else if (dev->dev->chip_id == 0x5354) { + /* Don't allow overtaking buttons GPIOs */ + set &= 0x2; /* 0x2 is LED GPIO on BCM5354 */ } - if (dev->dev->chip_id == 0x5354) - set &= 0xff02; + if (0 /* FIXME: conditional unknown */ ) { b43_write16(dev, B43_MMIO_GPIO_MASK, b43_read16(dev, B43_MMIO_GPIO_MASK) | 0x0100); - mask |= 0x0180; - set |= 0x0180; + /* BT Coexistance Input */ + mask |= 0x0080; + set |= 0x0080; + /* BT Coexistance Out */ + mask |= 0x0100; + set |= 0x0100; } if (dev->dev->bus_sprom->boardflags_lo & B43_BFL_PACTRL) { + /* PA is controlled by gpio 9, let ucode handle it */ b43_write16(dev, B43_MMIO_GPIO_MASK, b43_read16(dev, B43_MMIO_GPIO_MASK) | 0x0200); mask |= 0x0200; set |= 0x0200; } - if (dev->dev->core_rev >= 2) - mask |= 0x0010; /* FIXME: This is redundant. */ switch (dev->dev->bus_type) { #ifdef CONFIG_B43_BCMA case B43_BUS_BCMA: bcma_cc_write32(&dev->dev->bdev->bus->drv_cc, BCMA_CC_GPIOCTL, (bcma_cc_read32(&dev->dev->bdev->bus->drv_cc, - BCMA_CC_GPIOCTL) & mask) | set); + BCMA_CC_GPIOCTL) & ~mask) | set); break; #endif #ifdef CONFIG_B43_SSB @@ -2753,7 +2758,7 @@ static int b43_gpio_init(struct b43_wldev *dev) if (gpiodev) ssb_write32(gpiodev, B43_GPIO_CONTROL, (ssb_read32(gpiodev, B43_GPIO_CONTROL) - & mask) | set); + & ~mask) | set); break; #endif } diff --git a/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c index 57bf1d7ee80f..9ab24528f9b9 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c +++ b/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c @@ -1188,7 +1188,7 @@ int brcmf_write_to_file(struct brcmf_pub *drvr, const u8 *buf, int size) kfree(buf); /* close file before return */ if (fp) - filp_close(fp, current->files); + filp_close(fp, NULL); /* restore previous address limit */ set_fs(old_fs); diff --git a/trunk/drivers/net/wireless/brcm80211/brcmsmac/channel.c b/trunk/drivers/net/wireless/brcm80211/brcmsmac/channel.c index 9a4c63f927cb..7ed7d7577024 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmsmac/channel.c +++ b/trunk/drivers/net/wireless/brcm80211/brcmsmac/channel.c @@ -382,9 +382,7 @@ brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec, { struct brcms_c_info *wlc = wlc_cm->wlc; struct ieee80211_channel *ch = wlc->pub->ieee_hw->conf.channel; - const struct ieee80211_reg_rule *reg_rule; struct txpwr_limits txpwr; - int ret; brcms_c_channel_reg_limits(wlc_cm, chanspec, &txpwr); @@ -393,8 +391,7 @@ brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec, ); /* set or restore gmode as required by regulatory */ - ret = freq_reg_info(wlc->wiphy, ch->center_freq, 0, ®_rule); - if (!ret && (reg_rule->flags & NL80211_RRF_NO_OFDM)) + if (ch->flags & IEEE80211_CHAN_NO_OFDM) brcms_c_set_gmode(wlc, GMODE_LEGACY_B, false); else brcms_c_set_gmode(wlc, wlc->protection->gmode_user, false); diff --git a/trunk/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/trunk/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c index 9e79d47e077f..a5edebeb0b4f 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c +++ b/trunk/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c @@ -121,7 +121,8 @@ static struct ieee80211_channel brcms_2ghz_chantable[] = { IEEE80211_CHAN_NO_HT40PLUS), CHAN2GHZ(14, 2484, IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_IBSS | - IEEE80211_CHAN_NO_HT40PLUS | IEEE80211_CHAN_NO_HT40MINUS) + IEEE80211_CHAN_NO_HT40PLUS | IEEE80211_CHAN_NO_HT40MINUS | + IEEE80211_CHAN_NO_OFDM) }; static struct ieee80211_channel brcms_5ghz_nphy_chantable[] = { @@ -1232,6 +1233,9 @@ uint brcms_reset(struct brcms_info *wl) /* dpc will not be rescheduled */ wl->resched = false; + /* inform publicly that interface is down */ + wl->pub->up = false; + return 0; } diff --git a/trunk/drivers/net/wireless/ipw2x00/ipw2100.c b/trunk/drivers/net/wireless/ipw2x00/ipw2100.c index 95aa8e1683ec..83324b321652 100644 --- a/trunk/drivers/net/wireless/ipw2x00/ipw2100.c +++ b/trunk/drivers/net/wireless/ipw2x00/ipw2100.c @@ -2042,7 +2042,8 @@ static void isr_indicate_associated(struct ipw2100_priv *priv, u32 status) return; } len = ETH_ALEN; - ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID, &bssid, &len); + ret = ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID, bssid, + &len); if (ret) { IPW_DEBUG_INFO("failed querying ordinals at line %d\n", __LINE__); diff --git a/trunk/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/trunk/drivers/net/wireless/iwlwifi/dvm/debugfs.c index 46782f1102ac..a47b306b522c 100644 --- a/trunk/drivers/net/wireless/iwlwifi/dvm/debugfs.c +++ b/trunk/drivers/net/wireless/iwlwifi/dvm/debugfs.c @@ -124,6 +124,9 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file, const struct fw_img *img; size_t bufsz; + if (!iwl_is_ready_rf(priv)) + return -EAGAIN; + /* default is to dump the entire data segment */ if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) { priv->dbgfs_sram_offset = 0x800000; diff --git a/trunk/drivers/net/wireless/iwlwifi/dvm/rs.c b/trunk/drivers/net/wireless/iwlwifi/dvm/rs.c index 6fddd2785e6e..a82f46c10f5e 100644 --- a/trunk/drivers/net/wireless/iwlwifi/dvm/rs.c +++ b/trunk/drivers/net/wireless/iwlwifi/dvm/rs.c @@ -707,11 +707,14 @@ static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags, */ static bool rs_use_green(struct ieee80211_sta *sta) { - struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; - struct iwl_rxon_context *ctx = sta_priv->ctx; - - return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) && - !(ctx->ht.non_gf_sta_present); + /* + * There's a bug somewhere in this code that causes the + * scaling to get stuck because GF+SGI can't be combined + * in SISO rates. Until we find that bug, disable GF, it + * has only limited benefit and we still interoperate with + * GF APs since we can always receive GF transmissions. + */ + return false; } /** diff --git a/trunk/drivers/net/wireless/iwlwifi/pcie/internal.h b/trunk/drivers/net/wireless/iwlwifi/pcie/internal.h index d9694c58208c..4ffc18dc3a57 100644 --- a/trunk/drivers/net/wireless/iwlwifi/pcie/internal.h +++ b/trunk/drivers/net/wireless/iwlwifi/pcie/internal.h @@ -350,7 +350,7 @@ int iwl_queue_space(const struct iwl_queue *q); /***************************************************** * Error handling ******************************************************/ -int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display); +int iwl_dump_fh(struct iwl_trans *trans, char **buf); void iwl_dump_csr(struct iwl_trans *trans); /***************************************************** diff --git a/trunk/drivers/net/wireless/iwlwifi/pcie/rx.c b/trunk/drivers/net/wireless/iwlwifi/pcie/rx.c index 39a6ca1f009c..d1a61ba6247a 100644 --- a/trunk/drivers/net/wireless/iwlwifi/pcie/rx.c +++ b/trunk/drivers/net/wireless/iwlwifi/pcie/rx.c @@ -555,7 +555,7 @@ static void iwl_irq_handle_error(struct iwl_trans *trans) } iwl_dump_csr(trans); - iwl_dump_fh(trans, NULL, false); + iwl_dump_fh(trans, NULL); iwl_op_mode_nic_error(trans->op_mode); } diff --git a/trunk/drivers/net/wireless/iwlwifi/pcie/trans.c b/trunk/drivers/net/wireless/iwlwifi/pcie/trans.c index 939c2f78df58..1e86ea2266d4 100644 --- a/trunk/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/trunk/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -1649,13 +1649,9 @@ static const char *get_fh_string(int cmd) #undef IWL_CMD } -int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display) +int iwl_dump_fh(struct iwl_trans *trans, char **buf) { int i; -#ifdef CONFIG_IWLWIFI_DEBUG - int pos = 0; - size_t bufsz = 0; -#endif static const u32 fh_tbl[] = { FH_RSCSR_CHNL0_STTS_WPTR_REG, FH_RSCSR_CHNL0_RBDCB_BASE_REG, @@ -1667,29 +1663,35 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display) FH_TSSR_TX_STATUS_REG, FH_TSSR_TX_ERROR_REG }; -#ifdef CONFIG_IWLWIFI_DEBUG - if (display) { - bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40; + +#ifdef CONFIG_IWLWIFI_DEBUGFS + if (buf) { + int pos = 0; + size_t bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40; + *buf = kmalloc(bufsz, GFP_KERNEL); if (!*buf) return -ENOMEM; + pos += scnprintf(*buf + pos, bufsz - pos, "FH register values:\n"); - for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) { + + for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) pos += scnprintf(*buf + pos, bufsz - pos, " %34s: 0X%08x\n", get_fh_string(fh_tbl[i]), iwl_read_direct32(trans, fh_tbl[i])); - } + return pos; } #endif + IWL_ERR(trans, "FH register values:\n"); - for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) { + for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) IWL_ERR(trans, " %34s: 0X%08x\n", get_fh_string(fh_tbl[i]), iwl_read_direct32(trans, fh_tbl[i])); - } + return 0; } @@ -1982,11 +1984,11 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file, size_t count, loff_t *ppos) { struct iwl_trans *trans = file->private_data; - char *buf; + char *buf = NULL; int pos = 0; ssize_t ret = -EFAULT; - ret = pos = iwl_dump_fh(trans, &buf, true); + ret = pos = iwl_dump_fh(trans, &buf); if (buf) { ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); diff --git a/trunk/drivers/net/wireless/libertas/cfg.c b/trunk/drivers/net/wireless/libertas/cfg.c index eb5de800ed90..1c10b542ab23 100644 --- a/trunk/drivers/net/wireless/libertas/cfg.c +++ b/trunk/drivers/net/wireless/libertas/cfg.c @@ -1254,6 +1254,7 @@ static int lbs_associate(struct lbs_private *priv, netif_tx_wake_all_queues(priv->dev); } + kfree(cmd); done: lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret); return ret; diff --git a/trunk/drivers/net/wireless/libertas/if_sdio.c b/trunk/drivers/net/wireless/libertas/if_sdio.c index 76caebaa4397..e970897f6ab5 100644 --- a/trunk/drivers/net/wireless/libertas/if_sdio.c +++ b/trunk/drivers/net/wireless/libertas/if_sdio.c @@ -1314,6 +1314,7 @@ static void if_sdio_remove(struct sdio_func *func) kfree(packet); } + kfree(card); lbs_deb_leave(LBS_DEB_SDIO); } diff --git a/trunk/drivers/net/wireless/libertas/if_usb.c b/trunk/drivers/net/wireless/libertas/if_usb.c index 55a77e41170a..27980778d992 100644 --- a/trunk/drivers/net/wireless/libertas/if_usb.c +++ b/trunk/drivers/net/wireless/libertas/if_usb.c @@ -10,6 +10,7 @@ #include #include #include +#include #ifdef CONFIG_OLPC #include diff --git a/trunk/drivers/net/wireless/libertas/main.c b/trunk/drivers/net/wireless/libertas/main.c index 58048189bd24..fe1ea43c5149 100644 --- a/trunk/drivers/net/wireless/libertas/main.c +++ b/trunk/drivers/net/wireless/libertas/main.c @@ -571,7 +571,10 @@ static int lbs_thread(void *data) netdev_info(dev, "Timeout submitting command 0x%04x\n", le16_to_cpu(cmdnode->cmdbuf->command)); lbs_complete_command(priv, cmdnode, -ETIMEDOUT); - if (priv->reset_card) + + /* Reset card, but only when it isn't in the process + * of being shutdown anyway. */ + if (!dev->dismantle && priv->reset_card) priv->reset_card(priv); } priv->cmd_timed_out = 0; diff --git a/trunk/drivers/net/wireless/p54/p54usb.c b/trunk/drivers/net/wireless/p54/p54usb.c index 7f207b6e9552..effb044a8a9d 100644 --- a/trunk/drivers/net/wireless/p54/p54usb.c +++ b/trunk/drivers/net/wireless/p54/p54usb.c @@ -42,7 +42,7 @@ MODULE_FIRMWARE("isl3887usb"); * whenever you add a new device. */ -static struct usb_device_id p54u_table[] __devinitdata = { +static struct usb_device_id p54u_table[] = { /* Version 1 devices (pci chip + net2280) */ {USB_DEVICE(0x0411, 0x0050)}, /* Buffalo WLI2-USB2-G54 */ {USB_DEVICE(0x045e, 0x00c2)}, /* Microsoft MN-710 */ diff --git a/trunk/drivers/net/wireless/rndis_wlan.c b/trunk/drivers/net/wireless/rndis_wlan.c index 241162e8111d..7a4ae9ee1c63 100644 --- a/trunk/drivers/net/wireless/rndis_wlan.c +++ b/trunk/drivers/net/wireless/rndis_wlan.c @@ -1803,6 +1803,7 @@ static struct ndis_80211_pmkid *update_pmkid(struct usbnet *usbdev, struct cfg80211_pmksa *pmksa, int max_pmkids) { + struct ndis_80211_pmkid *new_pmkids; int i, err, newlen; unsigned int count; @@ -1833,11 +1834,12 @@ static struct ndis_80211_pmkid *update_pmkid(struct usbnet *usbdev, /* add new pmkid */ newlen = sizeof(*pmkids) + (count + 1) * sizeof(pmkids->bssid_info[0]); - pmkids = krealloc(pmkids, newlen, GFP_KERNEL); - if (!pmkids) { + new_pmkids = krealloc(pmkids, newlen, GFP_KERNEL); + if (!new_pmkids) { err = -ENOMEM; goto error; } + pmkids = new_pmkids; pmkids->length = cpu_to_le32(newlen); pmkids->bssid_info_count = cpu_to_le32(count + 1); diff --git a/trunk/drivers/net/wireless/rt2x00/rt2800lib.c b/trunk/drivers/net/wireless/rt2x00/rt2800lib.c index 88455b1b9fe0..cb8c2aca54e4 100644 --- a/trunk/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/trunk/drivers/net/wireless/rt2x00/rt2800lib.c @@ -221,6 +221,67 @@ static void rt2800_rf_write(struct rt2x00_dev *rt2x00dev, mutex_unlock(&rt2x00dev->csr_mutex); } +static int rt2800_enable_wlan_rt3290(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + int i, count; + + rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, ®); + if (rt2x00_get_field32(reg, WLAN_EN)) + return 0; + + rt2x00_set_field32(®, WLAN_GPIO_OUT_OE_BIT_ALL, 0xff); + rt2x00_set_field32(®, FRC_WL_ANT_SET, 1); + rt2x00_set_field32(®, WLAN_CLK_EN, 0); + rt2x00_set_field32(®, WLAN_EN, 1); + rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg); + + udelay(REGISTER_BUSY_DELAY); + + count = 0; + do { + /* + * Check PLL_LD & XTAL_RDY. + */ + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2800_register_read(rt2x00dev, CMB_CTRL, ®); + if (rt2x00_get_field32(reg, PLL_LD) && + rt2x00_get_field32(reg, XTAL_RDY)) + break; + udelay(REGISTER_BUSY_DELAY); + } + + if (i >= REGISTER_BUSY_COUNT) { + + if (count >= 10) + return -EIO; + + rt2800_register_write(rt2x00dev, 0x58, 0x018); + udelay(REGISTER_BUSY_DELAY); + rt2800_register_write(rt2x00dev, 0x58, 0x418); + udelay(REGISTER_BUSY_DELAY); + rt2800_register_write(rt2x00dev, 0x58, 0x618); + udelay(REGISTER_BUSY_DELAY); + count++; + } else { + count = 0; + } + + rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, ®); + rt2x00_set_field32(®, PCIE_APP0_CLK_REQ, 0); + rt2x00_set_field32(®, WLAN_CLK_EN, 1); + rt2x00_set_field32(®, WLAN_RESET, 1); + rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg); + udelay(10); + rt2x00_set_field32(®, WLAN_RESET, 0); + rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg); + udelay(10); + rt2800_register_write(rt2x00dev, INT_SOURCE_CSR, 0x7fffffff); + } while (count != 0); + + return 0; +} + void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev, const u8 command, const u8 token, const u8 arg0, const u8 arg1) @@ -400,6 +461,13 @@ int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev, { unsigned int i; u32 reg; + int retval; + + if (rt2x00_rt(rt2x00dev, RT3290)) { + retval = rt2800_enable_wlan_rt3290(rt2x00dev); + if (retval) + return -EBUSY; + } /* * If driver doesn't wake up firmware here, diff --git a/trunk/drivers/net/wireless/rt2x00/rt2800pci.c b/trunk/drivers/net/wireless/rt2x00/rt2800pci.c index 235376e9cb04..98aa426a3564 100644 --- a/trunk/drivers/net/wireless/rt2x00/rt2800pci.c +++ b/trunk/drivers/net/wireless/rt2x00/rt2800pci.c @@ -980,66 +980,6 @@ static int rt2800pci_validate_eeprom(struct rt2x00_dev *rt2x00dev) return rt2800_validate_eeprom(rt2x00dev); } -static int rt2800_enable_wlan_rt3290(struct rt2x00_dev *rt2x00dev) -{ - u32 reg; - int i, count; - - rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, ®); - if (rt2x00_get_field32(reg, WLAN_EN)) - return 0; - - rt2x00_set_field32(®, WLAN_GPIO_OUT_OE_BIT_ALL, 0xff); - rt2x00_set_field32(®, FRC_WL_ANT_SET, 1); - rt2x00_set_field32(®, WLAN_CLK_EN, 0); - rt2x00_set_field32(®, WLAN_EN, 1); - rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg); - - udelay(REGISTER_BUSY_DELAY); - - count = 0; - do { - /* - * Check PLL_LD & XTAL_RDY. - */ - for (i = 0; i < REGISTER_BUSY_COUNT; i++) { - rt2800_register_read(rt2x00dev, CMB_CTRL, ®); - if (rt2x00_get_field32(reg, PLL_LD) && - rt2x00_get_field32(reg, XTAL_RDY)) - break; - udelay(REGISTER_BUSY_DELAY); - } - - if (i >= REGISTER_BUSY_COUNT) { - - if (count >= 10) - return -EIO; - - rt2800_register_write(rt2x00dev, 0x58, 0x018); - udelay(REGISTER_BUSY_DELAY); - rt2800_register_write(rt2x00dev, 0x58, 0x418); - udelay(REGISTER_BUSY_DELAY); - rt2800_register_write(rt2x00dev, 0x58, 0x618); - udelay(REGISTER_BUSY_DELAY); - count++; - } else { - count = 0; - } - - rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, ®); - rt2x00_set_field32(®, PCIE_APP0_CLK_REQ, 0); - rt2x00_set_field32(®, WLAN_CLK_EN, 1); - rt2x00_set_field32(®, WLAN_RESET, 1); - rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg); - udelay(10); - rt2x00_set_field32(®, WLAN_RESET, 0); - rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg); - udelay(10); - rt2800_register_write(rt2x00dev, INT_SOURCE_CSR, 0x7fffffff); - } while (count != 0); - - return 0; -} static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev) { int retval; @@ -1062,17 +1002,6 @@ static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev) if (retval) return retval; - /* - * In probe phase call rt2800_enable_wlan_rt3290 to enable wlan - * clk for rt3290. That avoid the MCU fail in start phase. - */ - if (rt2x00_rt(rt2x00dev, RT3290)) { - retval = rt2800_enable_wlan_rt3290(rt2x00dev); - - if (retval) - return retval; - } - /* * This device has multiple filters for control frames * and has a separate filter for PS Poll frames. diff --git a/trunk/drivers/net/wireless/rt2x00/rt61pci.c b/trunk/drivers/net/wireless/rt2x00/rt61pci.c index f32259686b45..3f7bc5cadf9a 100644 --- a/trunk/drivers/net/wireless/rt2x00/rt61pci.c +++ b/trunk/drivers/net/wireless/rt2x00/rt61pci.c @@ -2243,8 +2243,7 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev) static void rt61pci_wakeup(struct rt2x00_dev *rt2x00dev) { - struct ieee80211_conf conf = { .flags = 0 }; - struct rt2x00lib_conf libconf = { .conf = &conf }; + struct rt2x00lib_conf libconf = { .conf = &rt2x00dev->hw->conf }; rt61pci_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS); } diff --git a/trunk/drivers/net/wireless/rtl818x/rtl8187/dev.c b/trunk/drivers/net/wireless/rtl818x/rtl8187/dev.c index 71a30b026089..533024095c43 100644 --- a/trunk/drivers/net/wireless/rtl818x/rtl8187/dev.c +++ b/trunk/drivers/net/wireless/rtl818x/rtl8187/dev.c @@ -44,7 +44,7 @@ MODULE_AUTHOR("Larry Finger "); MODULE_DESCRIPTION("RTL8187/RTL8187B USB wireless driver"); MODULE_LICENSE("GPL"); -static struct usb_device_id rtl8187_table[] __devinitdata = { +static struct usb_device_id rtl8187_table[] = { /* Asus */ {USB_DEVICE(0x0b05, 0x171d), .driver_info = DEVICE_RTL8187}, /* Belkin */ diff --git a/trunk/drivers/net/xen-netfront.c b/trunk/drivers/net/xen-netfront.c index 30899901aef5..650f79a1f2bd 100644 --- a/trunk/drivers/net/xen-netfront.c +++ b/trunk/drivers/net/xen-netfront.c @@ -57,8 +57,7 @@ static const struct ethtool_ops xennet_ethtool_ops; struct netfront_cb { - struct page *page; - unsigned offset; + int pull_to; }; #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb)) @@ -867,15 +866,9 @@ static int handle_incoming_queue(struct net_device *dev, struct sk_buff *skb; while ((skb = __skb_dequeue(rxq)) != NULL) { - struct page *page = NETFRONT_SKB_CB(skb)->page; - void *vaddr = page_address(page); - unsigned offset = NETFRONT_SKB_CB(skb)->offset; - - memcpy(skb->data, vaddr + offset, - skb_headlen(skb)); + int pull_to = NETFRONT_SKB_CB(skb)->pull_to; - if (page != skb_frag_page(&skb_shinfo(skb)->frags[0])) - __free_page(page); + __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); /* Ethernet work: Delayed to here as it peeks the header. */ skb->protocol = eth_type_trans(skb, dev); @@ -913,7 +906,6 @@ static int xennet_poll(struct napi_struct *napi, int budget) struct sk_buff_head errq; struct sk_buff_head tmpq; unsigned long flags; - unsigned int len; int err; spin_lock(&np->rx_lock); @@ -955,24 +947,13 @@ static int xennet_poll(struct napi_struct *napi, int budget) } } - NETFRONT_SKB_CB(skb)->page = - skb_frag_page(&skb_shinfo(skb)->frags[0]); - NETFRONT_SKB_CB(skb)->offset = rx->offset; - - len = rx->status; - if (len > RX_COPY_THRESHOLD) - len = RX_COPY_THRESHOLD; - skb_put(skb, len); + NETFRONT_SKB_CB(skb)->pull_to = rx->status; + if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD) + NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD; - if (rx->status > len) { - skb_shinfo(skb)->frags[0].page_offset = - rx->offset + len; - skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status - len); - skb->data_len = rx->status - len; - } else { - __skb_fill_page_desc(skb, 0, NULL, 0, 0); - skb_shinfo(skb)->nr_frags = 0; - } + skb_shinfo(skb)->frags[0].page_offset = rx->offset; + skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status); + skb->data_len = rx->status; i = xennet_fill_frags(np, skb, &tmpq); @@ -999,7 +980,7 @@ static int xennet_poll(struct napi_struct *napi, int budget) * receive throughout using the standard receive * buffer size was cut by 25%(!!!). */ - skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len); + skb->truesize += skb->data_len - RX_COPY_THRESHOLD; skb->len += skb->data_len; if (rx->flags & XEN_NETRXF_csum_blank) diff --git a/trunk/drivers/of/base.c b/trunk/drivers/of/base.c index c181b94abc36..d4a1c9a043e1 100644 --- a/trunk/drivers/of/base.c +++ b/trunk/drivers/of/base.c @@ -363,6 +363,33 @@ struct device_node *of_get_next_child(const struct device_node *node, } EXPORT_SYMBOL(of_get_next_child); +/** + * of_get_next_available_child - Find the next available child node + * @node: parent node + * @prev: previous child of the parent node, or NULL to get first + * + * This function is like of_get_next_child(), except that it + * automatically skips any disabled nodes (i.e. status = "disabled"). + */ +struct device_node *of_get_next_available_child(const struct device_node *node, + struct device_node *prev) +{ + struct device_node *next; + + read_lock(&devtree_lock); + next = prev ? prev->sibling : node->child; + for (; next; next = next->sibling) { + if (!of_device_is_available(next)) + continue; + if (of_node_get(next)) + break; + } + of_node_put(prev); + read_unlock(&devtree_lock); + return next; +} +EXPORT_SYMBOL(of_get_next_available_child); + /** * of_find_node_by_path - Find a node matching a full OF path * @path: The full path to match diff --git a/trunk/drivers/pci/pci-acpi.c b/trunk/drivers/pci/pci-acpi.c index fbf7b26c7c8a..c5792d622dc4 100644 --- a/trunk/drivers/pci/pci-acpi.c +++ b/trunk/drivers/pci/pci-acpi.c @@ -266,8 +266,8 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state) } if (!error) - dev_printk(KERN_INFO, &dev->dev, - "power state changed by ACPI to D%d\n", state); + dev_info(&dev->dev, "power state changed by ACPI to %s\n", + pci_power_name(state)); return error; } diff --git a/trunk/drivers/pci/pci-driver.c b/trunk/drivers/pci/pci-driver.c index 185be3703343..d6fd6b6d9d4b 100644 --- a/trunk/drivers/pci/pci-driver.c +++ b/trunk/drivers/pci/pci-driver.c @@ -280,8 +280,12 @@ static long local_pci_probe(void *_ddi) { struct drv_dev_and_id *ddi = _ddi; struct device *dev = &ddi->dev->dev; + struct device *parent = dev->parent; int rc; + /* The parent bridge must be in active state when probing */ + if (parent) + pm_runtime_get_sync(parent); /* Unbound PCI devices are always set to disabled and suspended. * During probe, the device is set to enabled and active and the * usage count is incremented. If the driver supports runtime PM, @@ -298,6 +302,8 @@ static long local_pci_probe(void *_ddi) pm_runtime_set_suspended(dev); pm_runtime_put_noidle(dev); } + if (parent) + pm_runtime_put(parent); return rc; } @@ -959,6 +965,13 @@ static int pci_pm_poweroff_noirq(struct device *dev) if (!pci_dev->state_saved && !pci_is_bridge(pci_dev)) pci_prepare_to_sleep(pci_dev); + /* + * The reason for doing this here is the same as for the analogous code + * in pci_pm_suspend_noirq(). + */ + if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI) + pci_write_config_word(pci_dev, PCI_COMMAND, 0); + return 0; } diff --git a/trunk/drivers/pci/pci-sysfs.c b/trunk/drivers/pci/pci-sysfs.c index 6869009c7393..02d107b15281 100644 --- a/trunk/drivers/pci/pci-sysfs.c +++ b/trunk/drivers/pci/pci-sysfs.c @@ -458,6 +458,40 @@ boot_vga_show(struct device *dev, struct device_attribute *attr, char *buf) } struct device_attribute vga_attr = __ATTR_RO(boot_vga); +static void +pci_config_pm_runtime_get(struct pci_dev *pdev) +{ + struct device *dev = &pdev->dev; + struct device *parent = dev->parent; + + if (parent) + pm_runtime_get_sync(parent); + pm_runtime_get_noresume(dev); + /* + * pdev->current_state is set to PCI_D3cold during suspending, + * so wait until suspending completes + */ + pm_runtime_barrier(dev); + /* + * Only need to resume devices in D3cold, because config + * registers are still accessible for devices suspended but + * not in D3cold. + */ + if (pdev->current_state == PCI_D3cold) + pm_runtime_resume(dev); +} + +static void +pci_config_pm_runtime_put(struct pci_dev *pdev) +{ + struct device *dev = &pdev->dev; + struct device *parent = dev->parent; + + pm_runtime_put(dev); + if (parent) + pm_runtime_put_sync(parent); +} + static ssize_t pci_read_config(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, @@ -484,6 +518,8 @@ pci_read_config(struct file *filp, struct kobject *kobj, size = count; } + pci_config_pm_runtime_get(dev); + if ((off & 1) && size) { u8 val; pci_user_read_config_byte(dev, off, &val); @@ -529,6 +565,8 @@ pci_read_config(struct file *filp, struct kobject *kobj, --size; } + pci_config_pm_runtime_put(dev); + return count; } @@ -549,6 +587,8 @@ pci_write_config(struct file* filp, struct kobject *kobj, count = size; } + pci_config_pm_runtime_get(dev); + if ((off & 1) && size) { pci_user_write_config_byte(dev, off, data[off - init_off]); off++; @@ -587,6 +627,8 @@ pci_write_config(struct file* filp, struct kobject *kobj, --size; } + pci_config_pm_runtime_put(dev); + return count; } diff --git a/trunk/drivers/pci/pci.c b/trunk/drivers/pci/pci.c index f3ea977a5b1b..ab4bf5a4c2f1 100644 --- a/trunk/drivers/pci/pci.c +++ b/trunk/drivers/pci/pci.c @@ -1941,6 +1941,7 @@ void pci_pm_init(struct pci_dev *dev) dev->pm_cap = pm; dev->d3_delay = PCI_PM_D3_WAIT; dev->d3cold_delay = PCI_PM_D3COLD_WAIT; + dev->d3cold_allowed = true; dev->d1_support = false; dev->d2_support = false; diff --git a/trunk/drivers/pci/pcie/portdrv_pci.c b/trunk/drivers/pci/pcie/portdrv_pci.c index 3a7eefcb270a..e76b44777dbf 100644 --- a/trunk/drivers/pci/pcie/portdrv_pci.c +++ b/trunk/drivers/pci/pcie/portdrv_pci.c @@ -140,9 +140,17 @@ static int pcie_port_runtime_resume(struct device *dev) { return 0; } + +static int pcie_port_runtime_idle(struct device *dev) +{ + /* Delay for a short while to prevent too frequent suspend/resume */ + pm_schedule_suspend(dev, 10); + return -EBUSY; +} #else #define pcie_port_runtime_suspend NULL #define pcie_port_runtime_resume NULL +#define pcie_port_runtime_idle NULL #endif static const struct dev_pm_ops pcie_portdrv_pm_ops = { @@ -155,6 +163,7 @@ static const struct dev_pm_ops pcie_portdrv_pm_ops = { .resume_noirq = pcie_port_resume_noirq, .runtime_suspend = pcie_port_runtime_suspend, .runtime_resume = pcie_port_runtime_resume, + .runtime_idle = pcie_port_runtime_idle, }; #define PCIE_PORTDRV_PM_OPS (&pcie_portdrv_pm_ops) @@ -200,6 +209,11 @@ static int __devinit pcie_portdrv_probe(struct pci_dev *dev, return status; pci_save_state(dev); + /* + * D3cold may not work properly on some PCIe port, so disable + * it by default. + */ + dev->d3cold_allowed = false; if (!pci_match_id(port_runtime_pm_black_list, dev)) pm_runtime_put_noidle(&dev->dev); diff --git a/trunk/drivers/pci/probe.c b/trunk/drivers/pci/probe.c index 6c143b4497ca..9f8a6b79a8ec 100644 --- a/trunk/drivers/pci/probe.c +++ b/trunk/drivers/pci/probe.c @@ -144,15 +144,13 @@ static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar) case PCI_BASE_ADDRESS_MEM_TYPE_32: break; case PCI_BASE_ADDRESS_MEM_TYPE_1M: - dev_info(&dev->dev, "1M mem BAR treated as 32-bit BAR\n"); + /* 1M mem BAR treated as 32-bit BAR */ break; case PCI_BASE_ADDRESS_MEM_TYPE_64: flags |= IORESOURCE_MEM_64; break; default: - dev_warn(&dev->dev, - "mem unknown type %x treated as 32-bit BAR\n", - mem_type); + /* mem unknown type treated as 32-bit BAR */ break; } return flags; @@ -173,9 +171,11 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, u32 l, sz, mask; u16 orig_cmd; struct pci_bus_region region; + bool bar_too_big = false, bar_disabled = false; mask = type ? PCI_ROM_ADDRESS_MASK : ~0; + /* No printks while decoding is disabled! */ if (!dev->mmio_always_on) { pci_read_config_word(dev, PCI_COMMAND, &orig_cmd); pci_write_config_word(dev, PCI_COMMAND, @@ -240,8 +240,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, goto fail; if ((sizeof(resource_size_t) < 8) && (sz64 > 0x100000000ULL)) { - dev_err(&dev->dev, "reg %x: can't handle 64-bit BAR\n", - pos); + bar_too_big = true; goto fail; } @@ -252,12 +251,11 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, region.start = 0; region.end = sz64; pcibios_bus_to_resource(dev, res, ®ion); + bar_disabled = true; } else { region.start = l64; region.end = l64 + sz64; pcibios_bus_to_resource(dev, res, ®ion); - dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", - pos, res); } } else { sz = pci_size(l, sz, mask); @@ -268,18 +266,23 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, region.start = l; region.end = l + sz; pcibios_bus_to_resource(dev, res, ®ion); - - dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res); } - out: + goto out; + + +fail: + res->flags = 0; +out: if (!dev->mmio_always_on) pci_write_config_word(dev, PCI_COMMAND, orig_cmd); + if (bar_too_big) + dev_err(&dev->dev, "reg %x: can't handle 64-bit BAR\n", pos); + if (res->flags && !bar_disabled) + dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res); + return (res->flags & IORESOURCE_MEM_64) ? 1 : 0; - fail: - res->flags = 0; - goto out; } static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) diff --git a/trunk/drivers/pinctrl/core.c b/trunk/drivers/pinctrl/core.c index fb7f3bebdc69..dc5c126e398a 100644 --- a/trunk/drivers/pinctrl/core.c +++ b/trunk/drivers/pinctrl/core.c @@ -657,11 +657,7 @@ static struct pinctrl *pinctrl_get_locked(struct device *dev) if (p != NULL) return ERR_PTR(-EBUSY); - p = create_pinctrl(dev); - if (IS_ERR(p)) - return p; - - return p; + return create_pinctrl(dev); } /** @@ -738,11 +734,8 @@ static struct pinctrl_state *pinctrl_lookup_state_locked(struct pinctrl *p, dev_dbg(p->dev, "using pinctrl dummy state (%s)\n", name); state = create_state(p, name); - if (IS_ERR(state)) - return state; - } else { - return ERR_PTR(-ENODEV); - } + } else + state = ERR_PTR(-ENODEV); } return state; diff --git a/trunk/drivers/pinctrl/pinctrl-imx23.c b/trunk/drivers/pinctrl/pinctrl-imx23.c index 75d3eff94296..3674d877ed7c 100644 --- a/trunk/drivers/pinctrl/pinctrl-imx23.c +++ b/trunk/drivers/pinctrl/pinctrl-imx23.c @@ -292,7 +292,7 @@ static int __init imx23_pinctrl_init(void) { return platform_driver_register(&imx23_pinctrl_driver); } -arch_initcall(imx23_pinctrl_init); +postcore_initcall(imx23_pinctrl_init); static void __exit imx23_pinctrl_exit(void) { diff --git a/trunk/drivers/pinctrl/pinctrl-imx28.c b/trunk/drivers/pinctrl/pinctrl-imx28.c index b973026811a2..0f5b2122b1ba 100644 --- a/trunk/drivers/pinctrl/pinctrl-imx28.c +++ b/trunk/drivers/pinctrl/pinctrl-imx28.c @@ -408,7 +408,7 @@ static int __init imx28_pinctrl_init(void) { return platform_driver_register(&imx28_pinctrl_driver); } -arch_initcall(imx28_pinctrl_init); +postcore_initcall(imx28_pinctrl_init); static void __exit imx28_pinctrl_exit(void) { diff --git a/trunk/drivers/pinctrl/pinctrl-imx51.c b/trunk/drivers/pinctrl/pinctrl-imx51.c index 689b3c88dd2e..9fd02162a3c2 100644 --- a/trunk/drivers/pinctrl/pinctrl-imx51.c +++ b/trunk/drivers/pinctrl/pinctrl-imx51.c @@ -974,7 +974,7 @@ static struct imx_pin_reg imx51_pin_regs[] = { IMX_PIN_REG(MX51_PAD_EIM_DA13, NO_PAD, 0x050, 0, 0x000, 0), /* MX51_PAD_EIM_DA13__EIM_DA13 */ IMX_PIN_REG(MX51_PAD_EIM_DA14, NO_PAD, 0x054, 0, 0x000, 0), /* MX51_PAD_EIM_DA14__EIM_DA14 */ IMX_PIN_REG(MX51_PAD_EIM_DA15, NO_PAD, 0x058, 0, 0x000, 0), /* MX51_PAD_EIM_DA15__EIM_DA15 */ - IMX_PIN_REG(MX51_PAD_SD2_CMD, NO_PAD, 0x3b4, 2, 0x91c, 3), /* MX51_PAD_SD2_CMD__CSPI_MOSI */ + IMX_PIN_REG(MX51_PAD_SD2_CMD, 0x7bc, 0x3b4, 2, 0x91c, 3), /* MX51_PAD_SD2_CMD__CSPI_MOSI */ IMX_PIN_REG(MX51_PAD_SD2_CMD, 0x7bc, 0x3b4, 1, 0x9b0, 2), /* MX51_PAD_SD2_CMD__I2C1_SCL */ IMX_PIN_REG(MX51_PAD_SD2_CMD, 0x7bc, 0x3b4, 0, 0x000, 0), /* MX51_PAD_SD2_CMD__SD2_CMD */ IMX_PIN_REG(MX51_PAD_SD2_CLK, 0x7c0, 0x3b8, 2, 0x914, 3), /* MX51_PAD_SD2_CLK__CSPI_SCLK */ diff --git a/trunk/drivers/pinctrl/pinctrl-nomadik-db8500.c b/trunk/drivers/pinctrl/pinctrl-nomadik-db8500.c index 6f99769c6733..a39fb7a6fc51 100644 --- a/trunk/drivers/pinctrl/pinctrl-nomadik-db8500.c +++ b/trunk/drivers/pinctrl/pinctrl-nomadik-db8500.c @@ -505,6 +505,8 @@ static const unsigned kp_b_1_pins[] = { DB8500_PIN_F3, DB8500_PIN_F1, DB8500_PIN_J3, DB8500_PIN_H2, DB8500_PIN_J2, DB8500_PIN_H1, DB8500_PIN_F4, DB8500_PIN_E3, DB8500_PIN_E4, DB8500_PIN_D2, DB8500_PIN_C1, DB8500_PIN_D3, DB8500_PIN_C2, DB8500_PIN_D5 }; +static const unsigned kp_b_2_pins[] = { DB8500_PIN_F3, DB8500_PIN_F1, + DB8500_PIN_G3, DB8500_PIN_G2, DB8500_PIN_F4, DB8500_PIN_E3}; static const unsigned sm_b_1_pins[] = { DB8500_PIN_C6, DB8500_PIN_B3, DB8500_PIN_C4, DB8500_PIN_E6, DB8500_PIN_A3, DB8500_PIN_B6, DB8500_PIN_D6, DB8500_PIN_B7, DB8500_PIN_D7, DB8500_PIN_D8, @@ -662,6 +664,7 @@ static const struct nmk_pingroup nmk_db8500_groups[] = { DB8500_PIN_GROUP(spi3_b_1, NMK_GPIO_ALT_B), DB8500_PIN_GROUP(msp1txrx_b_1, NMK_GPIO_ALT_B), DB8500_PIN_GROUP(kp_b_1, NMK_GPIO_ALT_B), + DB8500_PIN_GROUP(kp_b_2, NMK_GPIO_ALT_B), DB8500_PIN_GROUP(sm_b_1, NMK_GPIO_ALT_B), DB8500_PIN_GROUP(smcs0_b_1, NMK_GPIO_ALT_B), DB8500_PIN_GROUP(smcs1_b_1, NMK_GPIO_ALT_B), @@ -751,7 +754,7 @@ DB8500_FUNC_GROUPS(msp1, "msp1txrx_a_1", "msp1_a_1", "msp1txrx_b_1"); DB8500_FUNC_GROUPS(lcdb, "lcdb_a_1"); DB8500_FUNC_GROUPS(lcd, "lcdvsi0_a_1", "lcdvsi1_a_1", "lcd_d0_d7_a_1", "lcd_d8_d11_a_1", "lcd_d12_d23_a_1", "lcd_b_1"); -DB8500_FUNC_GROUPS(kp, "kp_a_1", "kp_b_1", "kp_c_1", "kp_oc1_1"); +DB8500_FUNC_GROUPS(kp, "kp_a_1", "kp_b_1", "kp_b_2", "kp_c_1", "kp_oc1_1"); DB8500_FUNC_GROUPS(mc2, "mc2_a_1", "mc2rstn_c_1"); DB8500_FUNC_GROUPS(ssp1, "ssp1_a_1"); DB8500_FUNC_GROUPS(ssp0, "ssp0_a_1"); @@ -766,7 +769,7 @@ DB8500_FUNC_GROUPS(ipgpio, "ipgpio0_a_1", "ipgpio1_a_1", "ipgpio7_b_1", DB8500_FUNC_GROUPS(msp2, "msp2sck_a_1", "msp2_a_1"); DB8500_FUNC_GROUPS(mc4, "mc4_a_1", "mc4rstn_c_1"); DB8500_FUNC_GROUPS(mc1, "mc1_a_1", "mc1dir_a_1"); -DB8500_FUNC_GROUPS(hsi, "hsir1_a_1", "hsit1_a_1", "hsit_a_2"); +DB8500_FUNC_GROUPS(hsi, "hsir_a_1", "hsit_a_1", "hsit_a_2"); DB8500_FUNC_GROUPS(clkout, "clkout_a_1", "clkout_a_2", "clkout_c_1"); DB8500_FUNC_GROUPS(usb, "usb_a_1"); DB8500_FUNC_GROUPS(trig, "trig_b_1"); diff --git a/trunk/drivers/pinctrl/pinctrl-nomadik.c b/trunk/drivers/pinctrl/pinctrl-nomadik.c index 53b0d49a7a1c..3dde6537adb8 100644 --- a/trunk/drivers/pinctrl/pinctrl-nomadik.c +++ b/trunk/drivers/pinctrl/pinctrl-nomadik.c @@ -1292,7 +1292,7 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev) NOMADIK_GPIO_TO_IRQ(pdata->first_gpio), 0, &nmk_gpio_irq_simple_ops, nmk_chip); if (!nmk_chip->domain) { - pr_err("%s: Failed to create irqdomain\n", np->full_name); + dev_err(&dev->dev, "failed to create irqdomain\n"); ret = -ENOSYS; goto out; } @@ -1731,7 +1731,6 @@ static int __devinit nmk_pinctrl_probe(struct platform_device *pdev) for (i = 0; i < npct->soc->gpio_num_ranges; i++) { if (!nmk_gpio_chips[i]) { dev_warn(&pdev->dev, "GPIO chip %d not registered yet\n", i); - devm_kfree(&pdev->dev, npct); return -EPROBE_DEFER; } npct->soc->gpio_ranges[i].gc = &nmk_gpio_chips[i]->chip; diff --git a/trunk/drivers/pinctrl/pinctrl-sirf.c b/trunk/drivers/pinctrl/pinctrl-sirf.c index 2aae8a8978e9..7fca6ce5952b 100644 --- a/trunk/drivers/pinctrl/pinctrl-sirf.c +++ b/trunk/drivers/pinctrl/pinctrl-sirf.c @@ -1217,7 +1217,6 @@ static int __devinit sirfsoc_pinmux_probe(struct platform_device *pdev) iounmap(spmx->gpio_virtbase); out_no_gpio_remap: platform_set_drvdata(pdev, NULL); - devm_kfree(&pdev->dev, spmx); return ret; } diff --git a/trunk/drivers/pinctrl/pinctrl-u300.c b/trunk/drivers/pinctrl/pinctrl-u300.c index a7ad8c112d91..309f5b9a70ec 100644 --- a/trunk/drivers/pinctrl/pinctrl-u300.c +++ b/trunk/drivers/pinctrl/pinctrl-u300.c @@ -1121,10 +1121,8 @@ static int __devinit u300_pmx_probe(struct platform_device *pdev) upmx->dev = &pdev->dev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - ret = -ENOENT; - goto out_no_resource; - } + if (!res) + return -ENOENT; upmx->phybase = res->start; upmx->physize = resource_size(res); @@ -1165,8 +1163,6 @@ static int __devinit u300_pmx_probe(struct platform_device *pdev) platform_set_drvdata(pdev, NULL); out_no_memregion: release_mem_region(upmx->phybase, upmx->physize); -out_no_resource: - devm_kfree(&pdev->dev, upmx); return ret; } diff --git a/trunk/drivers/platform/Makefile b/trunk/drivers/platform/Makefile index 782953ae4c03..b17c16ce54ad 100644 --- a/trunk/drivers/platform/Makefile +++ b/trunk/drivers/platform/Makefile @@ -3,3 +3,4 @@ # obj-$(CONFIG_X86) += x86/ +obj-$(CONFIG_OLPC) += olpc/ diff --git a/trunk/drivers/platform/olpc/Makefile b/trunk/drivers/platform/olpc/Makefile new file mode 100644 index 000000000000..dc8b26bc7209 --- /dev/null +++ b/trunk/drivers/platform/olpc/Makefile @@ -0,0 +1,4 @@ +# +# OLPC XO platform-specific drivers +# +obj-$(CONFIG_OLPC) += olpc-ec.o diff --git a/trunk/drivers/platform/olpc/olpc-ec.c b/trunk/drivers/platform/olpc/olpc-ec.c new file mode 100644 index 000000000000..0f9f8596b300 --- /dev/null +++ b/trunk/drivers/platform/olpc/olpc-ec.c @@ -0,0 +1,336 @@ +/* + * Generic driver for the OLPC Embedded Controller. + * + * Copyright (C) 2011-2012 One Laptop per Child Foundation. + * + * Licensed under the GPL v2 or later. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct ec_cmd_desc { + u8 cmd; + u8 *inbuf, *outbuf; + size_t inlen, outlen; + + int err; + struct completion finished; + struct list_head node; + + void *priv; +}; + +struct olpc_ec_priv { + struct olpc_ec_driver *drv; + struct work_struct worker; + struct mutex cmd_lock; + + /* Pending EC commands */ + struct list_head cmd_q; + spinlock_t cmd_q_lock; + + struct dentry *dbgfs_dir; + + /* + * Running an EC command while suspending means we don't always finish + * the command before the machine suspends. This means that the EC + * is expecting the command protocol to finish, but we after a period + * of time (while the OS is asleep) the EC times out and restarts its + * idle loop. Meanwhile, the OS wakes up, thinks it's still in the + * middle of the command protocol, starts throwing random things at + * the EC... and everyone's uphappy. + */ + bool suspended; +}; + +static struct olpc_ec_driver *ec_driver; +static struct olpc_ec_priv *ec_priv; +static void *ec_cb_arg; + +void olpc_ec_driver_register(struct olpc_ec_driver *drv, void *arg) +{ + ec_driver = drv; + ec_cb_arg = arg; +} +EXPORT_SYMBOL_GPL(olpc_ec_driver_register); + +static void olpc_ec_worker(struct work_struct *w) +{ + struct olpc_ec_priv *ec = container_of(w, struct olpc_ec_priv, worker); + struct ec_cmd_desc *desc = NULL; + unsigned long flags; + + /* Grab the first pending command from the queue */ + spin_lock_irqsave(&ec->cmd_q_lock, flags); + if (!list_empty(&ec->cmd_q)) { + desc = list_first_entry(&ec->cmd_q, struct ec_cmd_desc, node); + list_del(&desc->node); + } + spin_unlock_irqrestore(&ec->cmd_q_lock, flags); + + /* Do we actually have anything to do? */ + if (!desc) + return; + + /* Protect the EC hw with a mutex; only run one cmd at a time */ + mutex_lock(&ec->cmd_lock); + desc->err = ec_driver->ec_cmd(desc->cmd, desc->inbuf, desc->inlen, + desc->outbuf, desc->outlen, ec_cb_arg); + mutex_unlock(&ec->cmd_lock); + + /* Finished, wake up olpc_ec_cmd() */ + complete(&desc->finished); + + /* Run the worker thread again in case there are more cmds pending */ + schedule_work(&ec->worker); +} + +/* + * Throw a cmd descripter onto the list. We now have SMP OLPC machines, so + * locking is pretty critical. + */ +static void queue_ec_descriptor(struct ec_cmd_desc *desc, + struct olpc_ec_priv *ec) +{ + unsigned long flags; + + INIT_LIST_HEAD(&desc->node); + + spin_lock_irqsave(&ec->cmd_q_lock, flags); + list_add_tail(&desc->node, &ec->cmd_q); + spin_unlock_irqrestore(&ec->cmd_q_lock, flags); + + schedule_work(&ec->worker); +} + +int olpc_ec_cmd(u8 cmd, u8 *inbuf, size_t inlen, u8 *outbuf, size_t outlen) +{ + struct olpc_ec_priv *ec = ec_priv; + struct ec_cmd_desc desc; + + /* Ensure a driver and ec hook have been registered */ + if (WARN_ON(!ec_driver || !ec_driver->ec_cmd)) + return -ENODEV; + + if (!ec) + return -ENOMEM; + + /* Suspending in the middle of a command hoses things really badly */ + if (WARN_ON(ec->suspended)) + return -EBUSY; + + might_sleep(); + + desc.cmd = cmd; + desc.inbuf = inbuf; + desc.outbuf = outbuf; + desc.inlen = inlen; + desc.outlen = outlen; + desc.err = 0; + init_completion(&desc.finished); + + queue_ec_descriptor(&desc, ec); + + /* Timeouts must be handled in the platform-specific EC hook */ + wait_for_completion(&desc.finished); + + /* The worker thread dequeues the cmd; no need to do anything here */ + return desc.err; +} +EXPORT_SYMBOL_GPL(olpc_ec_cmd); + +#ifdef CONFIG_DEBUG_FS + +/* + * debugfs support for "generic commands", to allow sending + * arbitrary EC commands from userspace. + */ + +#define EC_MAX_CMD_ARGS (5 + 1) /* cmd byte + 5 args */ +#define EC_MAX_CMD_REPLY (8) + +static DEFINE_MUTEX(ec_dbgfs_lock); +static unsigned char ec_dbgfs_resp[EC_MAX_CMD_REPLY]; +static unsigned int ec_dbgfs_resp_bytes; + +static ssize_t ec_dbgfs_cmd_write(struct file *file, const char __user *buf, + size_t size, loff_t *ppos) +{ + int i, m; + unsigned char ec_cmd[EC_MAX_CMD_ARGS]; + unsigned int ec_cmd_int[EC_MAX_CMD_ARGS]; + char cmdbuf[64]; + int ec_cmd_bytes; + + mutex_lock(&ec_dbgfs_lock); + + size = simple_write_to_buffer(cmdbuf, sizeof(cmdbuf), ppos, buf, size); + + m = sscanf(cmdbuf, "%x:%u %x %x %x %x %x", &ec_cmd_int[0], + &ec_dbgfs_resp_bytes, &ec_cmd_int[1], &ec_cmd_int[2], + &ec_cmd_int[3], &ec_cmd_int[4], &ec_cmd_int[5]); + if (m < 2 || ec_dbgfs_resp_bytes > EC_MAX_CMD_REPLY) { + /* reset to prevent overflow on read */ + ec_dbgfs_resp_bytes = 0; + + pr_debug("olpc-ec: bad ec cmd: cmd:response-count [arg1 [arg2 ...]]\n"); + size = -EINVAL; + goto out; + } + + /* convert scanf'd ints to char */ + ec_cmd_bytes = m - 2; + for (i = 0; i <= ec_cmd_bytes; i++) + ec_cmd[i] = ec_cmd_int[i]; + + pr_debug("olpc-ec: debugfs cmd 0x%02x with %d args %02x %02x %02x %02x %02x, want %d returns\n", + ec_cmd[0], ec_cmd_bytes, ec_cmd[1], ec_cmd[2], + ec_cmd[3], ec_cmd[4], ec_cmd[5], ec_dbgfs_resp_bytes); + + olpc_ec_cmd(ec_cmd[0], (ec_cmd_bytes == 0) ? NULL : &ec_cmd[1], + ec_cmd_bytes, ec_dbgfs_resp, ec_dbgfs_resp_bytes); + + pr_debug("olpc-ec: response %02x %02x %02x %02x %02x %02x %02x %02x (%d bytes expected)\n", + ec_dbgfs_resp[0], ec_dbgfs_resp[1], ec_dbgfs_resp[2], + ec_dbgfs_resp[3], ec_dbgfs_resp[4], ec_dbgfs_resp[5], + ec_dbgfs_resp[6], ec_dbgfs_resp[7], + ec_dbgfs_resp_bytes); + +out: + mutex_unlock(&ec_dbgfs_lock); + return size; +} + +static ssize_t ec_dbgfs_cmd_read(struct file *file, char __user *buf, + size_t size, loff_t *ppos) +{ + unsigned int i, r; + char *rp; + char respbuf[64]; + + mutex_lock(&ec_dbgfs_lock); + rp = respbuf; + rp += sprintf(rp, "%02x", ec_dbgfs_resp[0]); + for (i = 1; i < ec_dbgfs_resp_bytes; i++) + rp += sprintf(rp, ", %02x", ec_dbgfs_resp[i]); + mutex_unlock(&ec_dbgfs_lock); + rp += sprintf(rp, "\n"); + + r = rp - respbuf; + return simple_read_from_buffer(buf, size, ppos, respbuf, r); +} + +static const struct file_operations ec_dbgfs_ops = { + .write = ec_dbgfs_cmd_write, + .read = ec_dbgfs_cmd_read, +}; + +static struct dentry *olpc_ec_setup_debugfs(void) +{ + struct dentry *dbgfs_dir; + + dbgfs_dir = debugfs_create_dir("olpc-ec", NULL); + if (IS_ERR_OR_NULL(dbgfs_dir)) + return NULL; + + debugfs_create_file("cmd", 0600, dbgfs_dir, NULL, &ec_dbgfs_ops); + + return dbgfs_dir; +} + +#else + +static struct dentry *olpc_ec_setup_debugfs(void) +{ + return NULL; +} + +#endif /* CONFIG_DEBUG_FS */ + +static int olpc_ec_probe(struct platform_device *pdev) +{ + struct olpc_ec_priv *ec; + int err; + + if (!ec_driver) + return -ENODEV; + + ec = kzalloc(sizeof(*ec), GFP_KERNEL); + if (!ec) + return -ENOMEM; + + ec->drv = ec_driver; + INIT_WORK(&ec->worker, olpc_ec_worker); + mutex_init(&ec->cmd_lock); + + INIT_LIST_HEAD(&ec->cmd_q); + spin_lock_init(&ec->cmd_q_lock); + + ec_priv = ec; + platform_set_drvdata(pdev, ec); + + err = ec_driver->probe ? ec_driver->probe(pdev) : 0; + if (err) { + ec_priv = NULL; + kfree(ec); + } else { + ec->dbgfs_dir = olpc_ec_setup_debugfs(); + } + + return err; +} + +static int olpc_ec_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct olpc_ec_priv *ec = platform_get_drvdata(pdev); + int err = 0; + + if (ec_driver->suspend) + err = ec_driver->suspend(pdev); + if (!err) + ec->suspended = true; + + return err; +} + +static int olpc_ec_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct olpc_ec_priv *ec = platform_get_drvdata(pdev); + + ec->suspended = false; + return ec_driver->resume ? ec_driver->resume(pdev) : 0; +} + +static const struct dev_pm_ops olpc_ec_pm_ops = { + .suspend_late = olpc_ec_suspend, + .resume_early = olpc_ec_resume, +}; + +static struct platform_driver olpc_ec_plat_driver = { + .probe = olpc_ec_probe, + .driver = { + .name = "olpc-ec", + .pm = &olpc_ec_pm_ops, + }, +}; + +static int __init olpc_ec_init_module(void) +{ + return platform_driver_register(&olpc_ec_plat_driver); +} + +module_init(olpc_ec_init_module); + +MODULE_AUTHOR("Andres Salomon "); +MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/platform/x86/Kconfig b/trunk/drivers/platform/x86/Kconfig index 2a262f5c5c0c..c86bae828c28 100644 --- a/trunk/drivers/platform/x86/Kconfig +++ b/trunk/drivers/platform/x86/Kconfig @@ -289,6 +289,7 @@ config IDEAPAD_LAPTOP tristate "Lenovo IdeaPad Laptop Extras" depends on ACPI depends on RFKILL && INPUT + depends on SERIO_I8042 select INPUT_SPARSEKMAP help This is a driver for the rfkill switches on Lenovo IdeaPad netbooks. @@ -758,8 +759,11 @@ config SAMSUNG_Q10 config APPLE_GMUX tristate "Apple Gmux Driver" + depends on ACPI depends on PNP - select BACKLIGHT_CLASS_DEVICE + depends on BACKLIGHT_CLASS_DEVICE + depends on BACKLIGHT_APPLE=n || BACKLIGHT_APPLE + depends on ACPI_VIDEO=n || ACPI_VIDEO ---help--- This driver provides support for the gmux device found on many Apple laptops, which controls the display mux for the hybrid diff --git a/trunk/drivers/platform/x86/apple-gmux.c b/trunk/drivers/platform/x86/apple-gmux.c index 905fa01ac8df..dfb1a92ce949 100644 --- a/trunk/drivers/platform/x86/apple-gmux.c +++ b/trunk/drivers/platform/x86/apple-gmux.c @@ -2,6 +2,7 @@ * Gmux driver for Apple laptops * * Copyright (C) Canonical Ltd. + * Copyright (C) 2010-2012 Andreas Heider * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -18,16 +19,30 @@ #include #include #include +#include +#include +#include #include #include struct apple_gmux_data { unsigned long iostart; unsigned long iolen; + bool indexed; + struct mutex index_lock; struct backlight_device *bdev; + + /* switcheroo data */ + acpi_handle dhandle; + int gpe; + enum vga_switcheroo_client_id resume_client_id; + enum vga_switcheroo_state power_state; + struct completion powerchange_done; }; +static struct apple_gmux_data *apple_gmux_data; + /* * gmux port offsets. Many of these are not yet used, but may be in the * future, and it's useful to have them documented here anyhow. @@ -45,6 +60,9 @@ struct apple_gmux_data { #define GMUX_PORT_DISCRETE_POWER 0x50 #define GMUX_PORT_MAX_BRIGHTNESS 0x70 #define GMUX_PORT_BRIGHTNESS 0x74 +#define GMUX_PORT_VALUE 0xc2 +#define GMUX_PORT_READ 0xd0 +#define GMUX_PORT_WRITE 0xd4 #define GMUX_MIN_IO_LEN (GMUX_PORT_BRIGHTNESS + 4) @@ -59,22 +77,172 @@ struct apple_gmux_data { #define GMUX_BRIGHTNESS_MASK 0x00ffffff #define GMUX_MAX_BRIGHTNESS GMUX_BRIGHTNESS_MASK -static inline u8 gmux_read8(struct apple_gmux_data *gmux_data, int port) +static u8 gmux_pio_read8(struct apple_gmux_data *gmux_data, int port) { return inb(gmux_data->iostart + port); } -static inline void gmux_write8(struct apple_gmux_data *gmux_data, int port, +static void gmux_pio_write8(struct apple_gmux_data *gmux_data, int port, u8 val) { outb(val, gmux_data->iostart + port); } -static inline u32 gmux_read32(struct apple_gmux_data *gmux_data, int port) +static u32 gmux_pio_read32(struct apple_gmux_data *gmux_data, int port) { return inl(gmux_data->iostart + port); } +static void gmux_pio_write32(struct apple_gmux_data *gmux_data, int port, + u32 val) +{ + int i; + u8 tmpval; + + for (i = 0; i < 4; i++) { + tmpval = (val >> (i * 8)) & 0xff; + outb(tmpval, port + i); + } +} + +static int gmux_index_wait_ready(struct apple_gmux_data *gmux_data) +{ + int i = 200; + u8 gwr = inb(gmux_data->iostart + GMUX_PORT_WRITE); + + while (i && (gwr & 0x01)) { + inb(gmux_data->iostart + GMUX_PORT_READ); + gwr = inb(gmux_data->iostart + GMUX_PORT_WRITE); + udelay(100); + i--; + } + + return !!i; +} + +static int gmux_index_wait_complete(struct apple_gmux_data *gmux_data) +{ + int i = 200; + u8 gwr = inb(gmux_data->iostart + GMUX_PORT_WRITE); + + while (i && !(gwr & 0x01)) { + gwr = inb(gmux_data->iostart + GMUX_PORT_WRITE); + udelay(100); + i--; + } + + if (gwr & 0x01) + inb(gmux_data->iostart + GMUX_PORT_READ); + + return !!i; +} + +static u8 gmux_index_read8(struct apple_gmux_data *gmux_data, int port) +{ + u8 val; + + mutex_lock(&gmux_data->index_lock); + outb((port & 0xff), gmux_data->iostart + GMUX_PORT_READ); + gmux_index_wait_ready(gmux_data); + val = inb(gmux_data->iostart + GMUX_PORT_VALUE); + mutex_unlock(&gmux_data->index_lock); + + return val; +} + +static void gmux_index_write8(struct apple_gmux_data *gmux_data, int port, + u8 val) +{ + mutex_lock(&gmux_data->index_lock); + outb(val, gmux_data->iostart + GMUX_PORT_VALUE); + gmux_index_wait_ready(gmux_data); + outb(port & 0xff, gmux_data->iostart + GMUX_PORT_WRITE); + gmux_index_wait_complete(gmux_data); + mutex_unlock(&gmux_data->index_lock); +} + +static u32 gmux_index_read32(struct apple_gmux_data *gmux_data, int port) +{ + u32 val; + + mutex_lock(&gmux_data->index_lock); + outb((port & 0xff), gmux_data->iostart + GMUX_PORT_READ); + gmux_index_wait_ready(gmux_data); + val = inl(gmux_data->iostart + GMUX_PORT_VALUE); + mutex_unlock(&gmux_data->index_lock); + + return val; +} + +static void gmux_index_write32(struct apple_gmux_data *gmux_data, int port, + u32 val) +{ + int i; + u8 tmpval; + + mutex_lock(&gmux_data->index_lock); + + for (i = 0; i < 4; i++) { + tmpval = (val >> (i * 8)) & 0xff; + outb(tmpval, gmux_data->iostart + GMUX_PORT_VALUE + i); + } + + gmux_index_wait_ready(gmux_data); + outb(port & 0xff, gmux_data->iostart + GMUX_PORT_WRITE); + gmux_index_wait_complete(gmux_data); + mutex_unlock(&gmux_data->index_lock); +} + +static u8 gmux_read8(struct apple_gmux_data *gmux_data, int port) +{ + if (gmux_data->indexed) + return gmux_index_read8(gmux_data, port); + else + return gmux_pio_read8(gmux_data, port); +} + +static void gmux_write8(struct apple_gmux_data *gmux_data, int port, u8 val) +{ + if (gmux_data->indexed) + gmux_index_write8(gmux_data, port, val); + else + gmux_pio_write8(gmux_data, port, val); +} + +static u32 gmux_read32(struct apple_gmux_data *gmux_data, int port) +{ + if (gmux_data->indexed) + return gmux_index_read32(gmux_data, port); + else + return gmux_pio_read32(gmux_data, port); +} + +static void gmux_write32(struct apple_gmux_data *gmux_data, int port, + u32 val) +{ + if (gmux_data->indexed) + gmux_index_write32(gmux_data, port, val); + else + gmux_pio_write32(gmux_data, port, val); +} + +static bool gmux_is_indexed(struct apple_gmux_data *gmux_data) +{ + u16 val; + + outb(0xaa, gmux_data->iostart + 0xcc); + outb(0x55, gmux_data->iostart + 0xcd); + outb(0x00, gmux_data->iostart + 0xce); + + val = inb(gmux_data->iostart + 0xcc) | + (inb(gmux_data->iostart + 0xcd) << 8); + + if (val == 0x55aa) + return true; + + return false; +} + static int gmux_get_brightness(struct backlight_device *bd) { struct apple_gmux_data *gmux_data = bl_get_data(bd); @@ -90,16 +258,7 @@ static int gmux_update_status(struct backlight_device *bd) if (bd->props.state & BL_CORE_SUSPENDED) return 0; - /* - * Older gmux versions require writing out lower bytes first then - * setting the upper byte to 0 to flush the values. Newer versions - * accept a single u32 write, but the old method also works, so we - * just use the old method for all gmux versions. - */ - gmux_write8(gmux_data, GMUX_PORT_BRIGHTNESS, brightness); - gmux_write8(gmux_data, GMUX_PORT_BRIGHTNESS + 1, brightness >> 8); - gmux_write8(gmux_data, GMUX_PORT_BRIGHTNESS + 2, brightness >> 16); - gmux_write8(gmux_data, GMUX_PORT_BRIGHTNESS + 3, 0); + gmux_write32(gmux_data, GMUX_PORT_BRIGHTNESS, brightness); return 0; } @@ -110,6 +269,146 @@ static const struct backlight_ops gmux_bl_ops = { .update_status = gmux_update_status, }; +static int gmux_switchto(enum vga_switcheroo_client_id id) +{ + if (id == VGA_SWITCHEROO_IGD) { + gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_DDC, 1); + gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_DISPLAY, 2); + gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_EXTERNAL, 2); + } else { + gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_DDC, 2); + gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_DISPLAY, 3); + gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_EXTERNAL, 3); + } + + return 0; +} + +static int gmux_set_discrete_state(struct apple_gmux_data *gmux_data, + enum vga_switcheroo_state state) +{ + INIT_COMPLETION(gmux_data->powerchange_done); + + if (state == VGA_SWITCHEROO_ON) { + gmux_write8(gmux_data, GMUX_PORT_DISCRETE_POWER, 1); + gmux_write8(gmux_data, GMUX_PORT_DISCRETE_POWER, 3); + pr_debug("Discrete card powered up\n"); + } else { + gmux_write8(gmux_data, GMUX_PORT_DISCRETE_POWER, 1); + gmux_write8(gmux_data, GMUX_PORT_DISCRETE_POWER, 0); + pr_debug("Discrete card powered down\n"); + } + + gmux_data->power_state = state; + + if (gmux_data->gpe >= 0 && + !wait_for_completion_interruptible_timeout(&gmux_data->powerchange_done, + msecs_to_jiffies(200))) + pr_warn("Timeout waiting for gmux switch to complete\n"); + + return 0; +} + +static int gmux_set_power_state(enum vga_switcheroo_client_id id, + enum vga_switcheroo_state state) +{ + if (id == VGA_SWITCHEROO_IGD) + return 0; + + return gmux_set_discrete_state(apple_gmux_data, state); +} + +static int gmux_get_client_id(struct pci_dev *pdev) +{ + /* + * Early Macbook Pros with switchable graphics use nvidia + * integrated graphics. Hardcode that the 9400M is integrated. + */ + if (pdev->vendor == PCI_VENDOR_ID_INTEL) + return VGA_SWITCHEROO_IGD; + else if (pdev->vendor == PCI_VENDOR_ID_NVIDIA && + pdev->device == 0x0863) + return VGA_SWITCHEROO_IGD; + else + return VGA_SWITCHEROO_DIS; +} + +static enum vga_switcheroo_client_id +gmux_active_client(struct apple_gmux_data *gmux_data) +{ + if (gmux_read8(gmux_data, GMUX_PORT_SWITCH_DISPLAY) == 2) + return VGA_SWITCHEROO_IGD; + + return VGA_SWITCHEROO_DIS; +} + +static struct vga_switcheroo_handler gmux_handler = { + .switchto = gmux_switchto, + .power_state = gmux_set_power_state, + .get_client_id = gmux_get_client_id, +}; + +static inline void gmux_disable_interrupts(struct apple_gmux_data *gmux_data) +{ + gmux_write8(gmux_data, GMUX_PORT_INTERRUPT_ENABLE, + GMUX_INTERRUPT_DISABLE); +} + +static inline void gmux_enable_interrupts(struct apple_gmux_data *gmux_data) +{ + gmux_write8(gmux_data, GMUX_PORT_INTERRUPT_ENABLE, + GMUX_INTERRUPT_ENABLE); +} + +static inline u8 gmux_interrupt_get_status(struct apple_gmux_data *gmux_data) +{ + return gmux_read8(gmux_data, GMUX_PORT_INTERRUPT_STATUS); +} + +static void gmux_clear_interrupts(struct apple_gmux_data *gmux_data) +{ + u8 status; + + /* to clear interrupts write back current status */ + status = gmux_interrupt_get_status(gmux_data); + gmux_write8(gmux_data, GMUX_PORT_INTERRUPT_STATUS, status); +} + +static void gmux_notify_handler(acpi_handle device, u32 value, void *context) +{ + u8 status; + struct pnp_dev *pnp = (struct pnp_dev *)context; + struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp); + + status = gmux_interrupt_get_status(gmux_data); + gmux_disable_interrupts(gmux_data); + pr_debug("Notify handler called: status %d\n", status); + + gmux_clear_interrupts(gmux_data); + gmux_enable_interrupts(gmux_data); + + if (status & GMUX_INTERRUPT_STATUS_POWER) + complete(&gmux_data->powerchange_done); +} + +static int gmux_suspend(struct pnp_dev *pnp, pm_message_t state) +{ + struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp); + gmux_data->resume_client_id = gmux_active_client(gmux_data); + gmux_disable_interrupts(gmux_data); + return 0; +} + +static int gmux_resume(struct pnp_dev *pnp) +{ + struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp); + gmux_enable_interrupts(gmux_data); + gmux_switchto(gmux_data->resume_client_id); + if (gmux_data->power_state == VGA_SWITCHEROO_OFF) + gmux_set_discrete_state(gmux_data, gmux_data->power_state); + return 0; +} + static int __devinit gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) { @@ -119,6 +418,11 @@ static int __devinit gmux_probe(struct pnp_dev *pnp, struct backlight_device *bdev; u8 ver_major, ver_minor, ver_release; int ret = -ENXIO; + acpi_status status; + unsigned long long gpe; + + if (apple_gmux_data) + return -EBUSY; gmux_data = kzalloc(sizeof(*gmux_data), GFP_KERNEL); if (!gmux_data) @@ -147,22 +451,29 @@ static int __devinit gmux_probe(struct pnp_dev *pnp, } /* - * On some machines the gmux is in ACPI even thought the machine - * doesn't really have a gmux. Check for invalid version information - * to detect this. + * Invalid version information may indicate either that the gmux + * device isn't present or that it's a new one that uses indexed + * io */ + ver_major = gmux_read8(gmux_data, GMUX_PORT_VERSION_MAJOR); ver_minor = gmux_read8(gmux_data, GMUX_PORT_VERSION_MINOR); ver_release = gmux_read8(gmux_data, GMUX_PORT_VERSION_RELEASE); if (ver_major == 0xff && ver_minor == 0xff && ver_release == 0xff) { - pr_info("gmux device not present\n"); - ret = -ENODEV; - goto err_release; + if (gmux_is_indexed(gmux_data)) { + mutex_init(&gmux_data->index_lock); + gmux_data->indexed = true; + } else { + pr_info("gmux device not present\n"); + ret = -ENODEV; + goto err_release; + } + pr_info("Found indexed gmux\n"); + } else { + pr_info("Found gmux version %d.%d.%d\n", ver_major, ver_minor, + ver_release); } - pr_info("Found gmux version %d.%d.%d\n", ver_major, ver_minor, - ver_release); - memset(&props, 0, sizeof(props)); props.type = BACKLIGHT_PLATFORM; props.max_brightness = gmux_read32(gmux_data, GMUX_PORT_MAX_BRIGHTNESS); @@ -194,13 +505,67 @@ static int __devinit gmux_probe(struct pnp_dev *pnp, * Disable the other backlight choices. */ acpi_video_dmi_promote_vendor(); -#ifdef CONFIG_ACPI_VIDEO +#if defined (CONFIG_ACPI_VIDEO) || defined (CONFIG_ACPI_VIDEO_MODULE) acpi_video_unregister(); #endif apple_bl_unregister(); + gmux_data->power_state = VGA_SWITCHEROO_ON; + + gmux_data->dhandle = DEVICE_ACPI_HANDLE(&pnp->dev); + if (!gmux_data->dhandle) { + pr_err("Cannot find acpi handle for pnp device %s\n", + dev_name(&pnp->dev)); + ret = -ENODEV; + goto err_notify; + } + + status = acpi_evaluate_integer(gmux_data->dhandle, "GMGP", NULL, &gpe); + if (ACPI_SUCCESS(status)) { + gmux_data->gpe = (int)gpe; + + status = acpi_install_notify_handler(gmux_data->dhandle, + ACPI_DEVICE_NOTIFY, + &gmux_notify_handler, pnp); + if (ACPI_FAILURE(status)) { + pr_err("Install notify handler failed: %s\n", + acpi_format_exception(status)); + ret = -ENODEV; + goto err_notify; + } + + status = acpi_enable_gpe(NULL, gmux_data->gpe); + if (ACPI_FAILURE(status)) { + pr_err("Cannot enable gpe: %s\n", + acpi_format_exception(status)); + goto err_enable_gpe; + } + } else { + pr_warn("No GPE found for gmux\n"); + gmux_data->gpe = -1; + } + + if (vga_switcheroo_register_handler(&gmux_handler)) { + ret = -ENODEV; + goto err_register_handler; + } + + init_completion(&gmux_data->powerchange_done); + apple_gmux_data = gmux_data; + gmux_enable_interrupts(gmux_data); + return 0; +err_register_handler: + if (gmux_data->gpe >= 0) + acpi_disable_gpe(NULL, gmux_data->gpe); +err_enable_gpe: + if (gmux_data->gpe >= 0) + acpi_remove_notify_handler(gmux_data->dhandle, + ACPI_DEVICE_NOTIFY, + &gmux_notify_handler); +err_notify: + backlight_device_unregister(bdev); err_release: release_region(gmux_data->iostart, gmux_data->iolen); err_free: @@ -212,12 +577,23 @@ static void __devexit gmux_remove(struct pnp_dev *pnp) { struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp); + vga_switcheroo_unregister_handler(); + gmux_disable_interrupts(gmux_data); + if (gmux_data->gpe >= 0) { + acpi_disable_gpe(NULL, gmux_data->gpe); + acpi_remove_notify_handler(gmux_data->dhandle, + ACPI_DEVICE_NOTIFY, + &gmux_notify_handler); + } + backlight_device_unregister(gmux_data->bdev); + release_region(gmux_data->iostart, gmux_data->iolen); + apple_gmux_data = NULL; kfree(gmux_data); acpi_video_dmi_demote_vendor(); -#ifdef CONFIG_ACPI_VIDEO +#if defined (CONFIG_ACPI_VIDEO) || defined (CONFIG_ACPI_VIDEO_MODULE) acpi_video_register(); #endif apple_bl_register(); @@ -233,6 +609,8 @@ static struct pnp_driver gmux_pnp_driver = { .probe = gmux_probe, .remove = __devexit_p(gmux_remove), .id_table = gmux_device_ids, + .suspend = gmux_suspend, + .resume = gmux_resume }; static int __init apple_gmux_init(void) diff --git a/trunk/drivers/platform/x86/asus-wmi.c b/trunk/drivers/platform/x86/asus-wmi.c index c7a36f6b0580..2eb9fe8e8efd 100644 --- a/trunk/drivers/platform/x86/asus-wmi.c +++ b/trunk/drivers/platform/x86/asus-wmi.c @@ -101,6 +101,7 @@ MODULE_LICENSE("GPL"); #define ASUS_WMI_DEVID_WIRELESS_LED 0x00010002 #define ASUS_WMI_DEVID_CWAP 0x00010003 #define ASUS_WMI_DEVID_WLAN 0x00010011 +#define ASUS_WMI_DEVID_WLAN_LED 0x00010012 #define ASUS_WMI_DEVID_BLUETOOTH 0x00010013 #define ASUS_WMI_DEVID_GPS 0x00010015 #define ASUS_WMI_DEVID_WIMAX 0x00010017 @@ -731,8 +732,21 @@ static int asus_rfkill_set(void *data, bool blocked) { struct asus_rfkill *priv = data; u32 ctrl_param = !blocked; + u32 dev_id = priv->dev_id; - return asus_wmi_set_devstate(priv->dev_id, ctrl_param, NULL); + /* + * If the user bit is set, BIOS can't set and record the wlan status, + * it will report the value read from id ASUS_WMI_DEVID_WLAN_LED + * while we query the wlan status through WMI(ASUS_WMI_DEVID_WLAN). + * So, we have to record wlan status in id ASUS_WMI_DEVID_WLAN_LED + * while setting the wlan status through WMI. + * This is also the behavior that windows app will do. + */ + if ((dev_id == ASUS_WMI_DEVID_WLAN) && + priv->asus->driver->wlan_ctrl_by_user) + dev_id = ASUS_WMI_DEVID_WLAN_LED; + + return asus_wmi_set_devstate(dev_id, ctrl_param, NULL); } static void asus_rfkill_query(struct rfkill *rfkill, void *data) @@ -1653,6 +1667,7 @@ static int asus_wmi_add(struct platform_device *pdev) struct asus_wmi *asus; acpi_status status; int err; + u32 result; asus = kzalloc(sizeof(struct asus_wmi), GFP_KERNEL); if (!asus) @@ -1711,6 +1726,10 @@ static int asus_wmi_add(struct platform_device *pdev) if (err) goto fail_debugfs; + asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_WLAN, &result); + if (result & (ASUS_WMI_DSTS_PRESENCE_BIT | ASUS_WMI_DSTS_USER_BIT)) + asus->driver->wlan_ctrl_by_user = 1; + return 0; fail_debugfs: diff --git a/trunk/drivers/platform/x86/asus-wmi.h b/trunk/drivers/platform/x86/asus-wmi.h index 9c1da8b81bea..4c9bd38bb0a2 100644 --- a/trunk/drivers/platform/x86/asus-wmi.h +++ b/trunk/drivers/platform/x86/asus-wmi.h @@ -46,6 +46,7 @@ struct quirk_entry { struct asus_wmi_driver { int brightness; int panel_power; + int wlan_ctrl_by_user; const char *name; struct module *owner; diff --git a/trunk/drivers/platform/x86/classmate-laptop.c b/trunk/drivers/platform/x86/classmate-laptop.c index 2ca7dd1ab3e4..c87ff16873f9 100644 --- a/trunk/drivers/platform/x86/classmate-laptop.c +++ b/trunk/drivers/platform/x86/classmate-laptop.c @@ -350,6 +350,7 @@ static void cmpc_accel_idev_init_v4(struct input_dev *inputdev) inputdev->close = cmpc_accel_close_v4; } +#ifdef CONFIG_PM_SLEEP static int cmpc_accel_suspend_v4(struct device *dev) { struct input_dev *inputdev; @@ -384,6 +385,7 @@ static int cmpc_accel_resume_v4(struct device *dev) return 0; } +#endif static int cmpc_accel_add_v4(struct acpi_device *acpi) { @@ -723,8 +725,10 @@ static void cmpc_tablet_handler(struct acpi_device *dev, u32 event) struct input_dev *inputdev = dev_get_drvdata(&dev->dev); if (event == 0x81) { - if (ACPI_SUCCESS(cmpc_get_tablet(dev->handle, &val))) + if (ACPI_SUCCESS(cmpc_get_tablet(dev->handle, &val))) { input_report_switch(inputdev, SW_TABLET_MODE, !val); + input_sync(inputdev); + } } } @@ -737,8 +741,10 @@ static void cmpc_tablet_idev_init(struct input_dev *inputdev) set_bit(SW_TABLET_MODE, inputdev->swbit); acpi = to_acpi_device(inputdev->dev.parent); - if (ACPI_SUCCESS(cmpc_get_tablet(acpi->handle, &val))) + if (ACPI_SUCCESS(cmpc_get_tablet(acpi->handle, &val))) { input_report_switch(inputdev, SW_TABLET_MODE, !val); + input_sync(inputdev); + } } static int cmpc_tablet_add(struct acpi_device *acpi) @@ -752,15 +758,19 @@ static int cmpc_tablet_remove(struct acpi_device *acpi, int type) return cmpc_remove_acpi_notify_device(acpi); } +#ifdef CONFIG_PM_SLEEP static int cmpc_tablet_resume(struct device *dev) { struct input_dev *inputdev = dev_get_drvdata(dev); unsigned long long val = 0; - if (ACPI_SUCCESS(cmpc_get_tablet(to_acpi_device(dev)->handle, &val))) + if (ACPI_SUCCESS(cmpc_get_tablet(to_acpi_device(dev)->handle, &val))) { input_report_switch(inputdev, SW_TABLET_MODE, !val); + input_sync(inputdev); + } return 0; } +#endif static SIMPLE_DEV_PM_OPS(cmpc_tablet_pm, NULL, cmpc_tablet_resume); diff --git a/trunk/drivers/platform/x86/dell-laptop.c b/trunk/drivers/platform/x86/dell-laptop.c index 4e96e8c0b60f..927c33af67ec 100644 --- a/trunk/drivers/platform/x86/dell-laptop.c +++ b/trunk/drivers/platform/x86/dell-laptop.c @@ -211,7 +211,7 @@ static struct dmi_system_id __devinitdata dell_quirks[] = { .ident = "Dell Inspiron 5420", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Isnpiron 5420"), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5420"), }, .driver_data = &quirk_dell_vostro_v130, }, @@ -220,7 +220,7 @@ static struct dmi_system_id __devinitdata dell_quirks[] = { .ident = "Dell Inspiron 5520", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Isnpiron 5520"), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5520"), }, .driver_data = &quirk_dell_vostro_v130, }, @@ -229,7 +229,7 @@ static struct dmi_system_id __devinitdata dell_quirks[] = { .ident = "Dell Inspiron 5720", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Isnpiron 5720"), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5720"), }, .driver_data = &quirk_dell_vostro_v130, }, @@ -238,7 +238,7 @@ static struct dmi_system_id __devinitdata dell_quirks[] = { .ident = "Dell Inspiron 7420", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Isnpiron 7420"), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7420"), }, .driver_data = &quirk_dell_vostro_v130, }, @@ -247,7 +247,7 @@ static struct dmi_system_id __devinitdata dell_quirks[] = { .ident = "Dell Inspiron 7520", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Isnpiron 7520"), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7520"), }, .driver_data = &quirk_dell_vostro_v130, }, @@ -256,7 +256,7 @@ static struct dmi_system_id __devinitdata dell_quirks[] = { .ident = "Dell Inspiron 7720", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Isnpiron 7720"), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7720"), }, .driver_data = &quirk_dell_vostro_v130, }, diff --git a/trunk/drivers/platform/x86/fujitsu-tablet.c b/trunk/drivers/platform/x86/fujitsu-tablet.c index d2e41735a47b..7acae3f85f3b 100644 --- a/trunk/drivers/platform/x86/fujitsu-tablet.c +++ b/trunk/drivers/platform/x86/fujitsu-tablet.c @@ -440,11 +440,13 @@ static int __devexit acpi_fujitsu_remove(struct acpi_device *adev, int type) return 0; } +#ifdef CONFIG_PM_SLEEP static int acpi_fujitsu_resume(struct device *dev) { fujitsu_reset(); return 0; } +#endif static SIMPLE_DEV_PM_OPS(acpi_fujitsu_pm, NULL, acpi_fujitsu_resume); diff --git a/trunk/drivers/platform/x86/hdaps.c b/trunk/drivers/platform/x86/hdaps.c index d9ab6f64dcec..777c7e3dda51 100644 --- a/trunk/drivers/platform/x86/hdaps.c +++ b/trunk/drivers/platform/x86/hdaps.c @@ -305,10 +305,12 @@ static int hdaps_probe(struct platform_device *dev) return 0; } +#ifdef CONFIG_PM_SLEEP static int hdaps_resume(struct device *dev) { return hdaps_device_init(); } +#endif static SIMPLE_DEV_PM_OPS(hdaps_pm, NULL, hdaps_resume); diff --git a/trunk/drivers/platform/x86/hp_accel.c b/trunk/drivers/platform/x86/hp_accel.c index f4d91154ad67..6b9af989632b 100644 --- a/trunk/drivers/platform/x86/hp_accel.c +++ b/trunk/drivers/platform/x86/hp_accel.c @@ -352,7 +352,7 @@ static int lis3lv02d_remove(struct acpi_device *device, int type) } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int lis3lv02d_suspend(struct device *dev) { /* make sure the device is off when we suspend */ diff --git a/trunk/drivers/platform/x86/ideapad-laptop.c b/trunk/drivers/platform/x86/ideapad-laptop.c index 17f6dfd8dbfb..dae7abe1d711 100644 --- a/trunk/drivers/platform/x86/ideapad-laptop.c +++ b/trunk/drivers/platform/x86/ideapad-laptop.c @@ -36,6 +36,7 @@ #include #include #include +#include #define IDEAPAD_RFKILL_DEV_NUM (3) @@ -63,8 +64,11 @@ enum { VPCCMD_R_3G, VPCCMD_W_3G, VPCCMD_R_ODD, /* 0x21 */ - VPCCMD_R_RF = 0x23, + VPCCMD_W_FAN, + VPCCMD_R_RF, VPCCMD_W_RF, + VPCCMD_R_FAN = 0x2B, + VPCCMD_R_SPECIAL_BUTTONS = 0x31, VPCCMD_W_BL_POWER = 0x33, }; @@ -356,14 +360,46 @@ static ssize_t store_ideapad_cam(struct device *dev, return -EINVAL; ret = write_ec_cmd(ideapad_handle, VPCCMD_W_CAMERA, state); if (ret < 0) - return ret; + return -EIO; return count; } static DEVICE_ATTR(camera_power, 0644, show_ideapad_cam, store_ideapad_cam); +static ssize_t show_ideapad_fan(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long result; + + if (read_ec_data(ideapad_handle, VPCCMD_R_FAN, &result)) + return sprintf(buf, "-1\n"); + return sprintf(buf, "%lu\n", result); +} + +static ssize_t store_ideapad_fan(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int ret, state; + + if (!count) + return 0; + if (sscanf(buf, "%i", &state) != 1) + return -EINVAL; + if (state < 0 || state > 4 || state == 3) + return -EINVAL; + ret = write_ec_cmd(ideapad_handle, VPCCMD_W_FAN, state); + if (ret < 0) + return -EIO; + return count; +} + +static DEVICE_ATTR(fan_mode, 0644, show_ideapad_fan, store_ideapad_fan); + static struct attribute *ideapad_attributes[] = { &dev_attr_camera_power.attr, + &dev_attr_fan_mode.attr, NULL }; @@ -377,7 +413,10 @@ static umode_t ideapad_is_visible(struct kobject *kobj, if (attr == &dev_attr_camera_power.attr) supported = test_bit(CFG_CAMERA_BIT, &(priv->cfg)); - else + else if (attr == &dev_attr_fan_mode.attr) { + unsigned long value; + supported = !read_ec_data(ideapad_handle, VPCCMD_R_FAN, &value); + } else supported = true; return supported ? attr->mode : 0; @@ -518,9 +557,15 @@ static void ideapad_platform_exit(struct ideapad_private *priv) */ static const struct key_entry ideapad_keymap[] = { { KE_KEY, 6, { KEY_SWITCHVIDEOMODE } }, + { KE_KEY, 7, { KEY_CAMERA } }, + { KE_KEY, 11, { KEY_F16 } }, { KE_KEY, 13, { KEY_WLAN } }, { KE_KEY, 16, { KEY_PROG1 } }, { KE_KEY, 17, { KEY_PROG2 } }, + { KE_KEY, 64, { KEY_PROG3 } }, + { KE_KEY, 65, { KEY_PROG4 } }, + { KE_KEY, 66, { KEY_TOUCHPAD_OFF } }, + { KE_KEY, 67, { KEY_TOUCHPAD_ON } }, { KE_END, 0 }, }; @@ -587,6 +632,28 @@ static void ideapad_input_novokey(struct ideapad_private *priv) ideapad_input_report(priv, 16); } +static void ideapad_check_special_buttons(struct ideapad_private *priv) +{ + unsigned long bit, value; + + read_ec_data(ideapad_handle, VPCCMD_R_SPECIAL_BUTTONS, &value); + + for (bit = 0; bit < 16; bit++) { + if (test_bit(bit, &value)) { + switch (bit) { + case 6: + /* Thermal Management button */ + ideapad_input_report(priv, 65); + break; + case 1: + /* OneKey Theater button */ + ideapad_input_report(priv, 64); + break; + } + } + } +} + /* * backlight */ @@ -691,6 +758,24 @@ static const struct acpi_device_id ideapad_device_ids[] = { }; MODULE_DEVICE_TABLE(acpi, ideapad_device_ids); +static void ideapad_sync_touchpad_state(struct acpi_device *adevice) +{ + struct ideapad_private *priv = dev_get_drvdata(&adevice->dev); + unsigned long value; + + /* Without reading from EC touchpad LED doesn't switch state */ + if (!read_ec_data(adevice->handle, VPCCMD_R_TOUCHPAD, &value)) { + /* Some IdeaPads don't really turn off touchpad - they only + * switch the LED state. We (de)activate KBC AUX port to turn + * touchpad off and on. We send KEY_TOUCHPAD_OFF and + * KEY_TOUCHPAD_ON to not to get out of sync with LED */ + unsigned char param; + i8042_command(¶m, value ? I8042_CMD_AUX_ENABLE : + I8042_CMD_AUX_DISABLE); + ideapad_input_report(priv, value ? 67 : 66); + } +} + static int __devinit ideapad_acpi_add(struct acpi_device *adevice) { int ret, i; @@ -727,6 +812,7 @@ static int __devinit ideapad_acpi_add(struct acpi_device *adevice) priv->rfk[i] = NULL; } ideapad_sync_rfk_state(priv); + ideapad_sync_touchpad_state(adevice); if (!acpi_video_backlight_support()) { ret = ideapad_backlight_init(priv); @@ -785,9 +871,14 @@ static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event) ideapad_sync_rfk_state(priv); break; case 13: + case 11: + case 7: case 6: ideapad_input_report(priv, vpc_bit); break; + case 5: + ideapad_sync_touchpad_state(adevice); + break; case 4: ideapad_backlight_notify_brightness(priv); break; @@ -797,6 +888,9 @@ static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event) case 2: ideapad_backlight_notify_power(priv); break; + case 0: + ideapad_check_special_buttons(priv); + break; default: pr_info("Unknown event: %lu\n", vpc_bit); } @@ -804,6 +898,15 @@ static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event) } } +static int ideapad_acpi_resume(struct device *device) +{ + ideapad_sync_rfk_state(ideapad_priv); + ideapad_sync_touchpad_state(to_acpi_device(device)); + return 0; +} + +static SIMPLE_DEV_PM_OPS(ideapad_pm, NULL, ideapad_acpi_resume); + static struct acpi_driver ideapad_acpi_driver = { .name = "ideapad_acpi", .class = "IdeaPad", @@ -811,6 +914,7 @@ static struct acpi_driver ideapad_acpi_driver = { .ops.add = ideapad_acpi_add, .ops.remove = ideapad_acpi_remove, .ops.notify = ideapad_acpi_notify, + .drv.pm = &ideapad_pm, .owner = THIS_MODULE, }; diff --git a/trunk/drivers/platform/x86/msi-laptop.c b/trunk/drivers/platform/x86/msi-laptop.c index f64441844317..2111dbb7e1e3 100644 --- a/trunk/drivers/platform/x86/msi-laptop.c +++ b/trunk/drivers/platform/x86/msi-laptop.c @@ -85,7 +85,9 @@ #define MSI_STANDARD_EC_TOUCHPAD_ADDRESS 0xe4 #define MSI_STANDARD_EC_TOUCHPAD_MASK (1 << 4) +#ifdef CONFIG_PM_SLEEP static int msi_laptop_resume(struct device *device); +#endif static SIMPLE_DEV_PM_OPS(msi_laptop_pm, NULL, msi_laptop_resume); #define MSI_STANDARD_EC_DEVICES_EXISTS_ADDRESS 0x2f @@ -753,6 +755,7 @@ static int rfkill_init(struct platform_device *sdev) return retval; } +#ifdef CONFIG_PM_SLEEP static int msi_laptop_resume(struct device *device) { u8 data; @@ -773,6 +776,7 @@ static int msi_laptop_resume(struct device *device) return 0; } +#endif static int __init msi_laptop_input_setup(void) { diff --git a/trunk/drivers/platform/x86/panasonic-laptop.c b/trunk/drivers/platform/x86/panasonic-laptop.c index 24480074bcf0..8e8caa767d6a 100644 --- a/trunk/drivers/platform/x86/panasonic-laptop.c +++ b/trunk/drivers/platform/x86/panasonic-laptop.c @@ -188,7 +188,9 @@ static const struct acpi_device_id pcc_device_ids[] = { }; MODULE_DEVICE_TABLE(acpi, pcc_device_ids); +#ifdef CONFIG_PM_SLEEP static int acpi_pcc_hotkey_resume(struct device *dev); +#endif static SIMPLE_DEV_PM_OPS(acpi_pcc_hotkey_pm, NULL, acpi_pcc_hotkey_resume); static struct acpi_driver acpi_pcc_driver = { @@ -540,6 +542,7 @@ static void acpi_pcc_destroy_input(struct pcc_acpi *pcc) /* kernel module interface */ +#ifdef CONFIG_PM_SLEEP static int acpi_pcc_hotkey_resume(struct device *dev) { struct pcc_acpi *pcc; @@ -556,6 +559,7 @@ static int acpi_pcc_hotkey_resume(struct device *dev) return acpi_pcc_write_sset(pcc, SINF_STICKY_KEY, pcc->sticky_mode); } +#endif static int acpi_pcc_hotkey_add(struct acpi_device *device) { diff --git a/trunk/drivers/platform/x86/sony-laptop.c b/trunk/drivers/platform/x86/sony-laptop.c index 9363969ad07a..daaddec68def 100644 --- a/trunk/drivers/platform/x86/sony-laptop.c +++ b/trunk/drivers/platform/x86/sony-laptop.c @@ -140,7 +140,10 @@ MODULE_PARM_DESC(kbd_backlight_timeout, "1 for 30 seconds, 2 for 60 seconds and 3 to disable timeout " "(default: 0)"); +#ifdef CONFIG_PM_SLEEP static void sony_nc_kbd_backlight_resume(void); +static void sony_nc_thermal_resume(void); +#endif static int sony_nc_kbd_backlight_setup(struct platform_device *pd, unsigned int handle); static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd); @@ -151,7 +154,6 @@ static void sony_nc_battery_care_cleanup(struct platform_device *pd); static int sony_nc_thermal_setup(struct platform_device *pd); static void sony_nc_thermal_cleanup(struct platform_device *pd); -static void sony_nc_thermal_resume(void); static int sony_nc_lid_resume_setup(struct platform_device *pd); static void sony_nc_lid_resume_cleanup(struct platform_device *pd); @@ -1431,6 +1433,7 @@ static void sony_nc_function_cleanup(struct platform_device *pd) sony_nc_handles_cleanup(pd); } +#ifdef CONFIG_PM_SLEEP static void sony_nc_function_resume(void) { unsigned int i, result, bitmask, arg; @@ -1508,6 +1511,7 @@ static int sony_nc_resume(struct device *dev) return 0; } +#endif static SIMPLE_DEV_PM_OPS(sony_nc_pm, NULL, sony_nc_resume); @@ -1872,6 +1876,7 @@ static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd) } } +#ifdef CONFIG_PM_SLEEP static void sony_nc_kbd_backlight_resume(void) { int ignore = 0; @@ -1888,6 +1893,7 @@ static void sony_nc_kbd_backlight_resume(void) (kbdbl_ctl->base + 0x200) | (kbdbl_ctl->timeout << 0x10), &ignore); } +#endif struct battery_care_control { struct device_attribute attrs[2]; @@ -2210,6 +2216,7 @@ static void sony_nc_thermal_cleanup(struct platform_device *pd) } } +#ifdef CONFIG_PM_SLEEP static void sony_nc_thermal_resume(void) { unsigned int status = sony_nc_thermal_mode_get(); @@ -2217,6 +2224,7 @@ static void sony_nc_thermal_resume(void) if (status != th_handle->mode) sony_nc_thermal_mode_set(th_handle->mode); } +#endif /* resume on LID open */ struct snc_lid_resume_control { @@ -4287,6 +4295,7 @@ static int sony_pic_add(struct acpi_device *device) return result; } +#ifdef CONFIG_PM_SLEEP static int sony_pic_suspend(struct device *dev) { if (sony_pic_disable(to_acpi_device(dev))) @@ -4300,6 +4309,7 @@ static int sony_pic_resume(struct device *dev) spic_dev.cur_ioport, spic_dev.cur_irq); return 0; } +#endif static SIMPLE_DEV_PM_OPS(sony_pic_pm, sony_pic_suspend, sony_pic_resume); diff --git a/trunk/drivers/platform/x86/thinkpad_acpi.c b/trunk/drivers/platform/x86/thinkpad_acpi.c index e7f73287636c..80e377949314 100644 --- a/trunk/drivers/platform/x86/thinkpad_acpi.c +++ b/trunk/drivers/platform/x86/thinkpad_acpi.c @@ -922,6 +922,7 @@ static struct input_dev *tpacpi_inputdev; static struct mutex tpacpi_inputdev_send_mutex; static LIST_HEAD(tpacpi_all_drivers); +#ifdef CONFIG_PM_SLEEP static int tpacpi_suspend_handler(struct device *dev) { struct ibm_struct *ibm, *itmp; @@ -949,6 +950,7 @@ static int tpacpi_resume_handler(struct device *dev) return 0; } +#endif static SIMPLE_DEV_PM_OPS(tpacpi_pm, tpacpi_suspend_handler, tpacpi_resume_handler); @@ -8662,6 +8664,13 @@ static int __must_check __init get_thinkpad_model_data( tp->model_str = kstrdup(s, GFP_KERNEL); if (!tp->model_str) return -ENOMEM; + } else { + s = dmi_get_system_info(DMI_BIOS_VENDOR); + if (s && !(strnicmp(s, "Lenovo", 6))) { + tp->model_str = kstrdup(s, GFP_KERNEL); + if (!tp->model_str) + return -ENOMEM; + } } s = dmi_get_system_info(DMI_PRODUCT_NAME); diff --git a/trunk/drivers/platform/x86/toshiba_acpi.c b/trunk/drivers/platform/x86/toshiba_acpi.c index c13ba5bac93f..5f1256d5e933 100644 --- a/trunk/drivers/platform/x86/toshiba_acpi.c +++ b/trunk/drivers/platform/x86/toshiba_acpi.c @@ -1296,6 +1296,7 @@ static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event) } } +#ifdef CONFIG_PM_SLEEP static int toshiba_acpi_suspend(struct device *device) { struct toshiba_acpi_dev *dev = acpi_driver_data(to_acpi_device(device)); @@ -1317,6 +1318,7 @@ static int toshiba_acpi_resume(struct device *device) return 0; } +#endif static SIMPLE_DEV_PM_OPS(toshiba_acpi_pm, toshiba_acpi_suspend, toshiba_acpi_resume); diff --git a/trunk/drivers/platform/x86/toshiba_bluetooth.c b/trunk/drivers/platform/x86/toshiba_bluetooth.c index 715a43cb5e3c..5e5d6317d690 100644 --- a/trunk/drivers/platform/x86/toshiba_bluetooth.c +++ b/trunk/drivers/platform/x86/toshiba_bluetooth.c @@ -41,7 +41,9 @@ static const struct acpi_device_id bt_device_ids[] = { }; MODULE_DEVICE_TABLE(acpi, bt_device_ids); +#ifdef CONFIG_PM_SLEEP static int toshiba_bt_resume(struct device *dev); +#endif static SIMPLE_DEV_PM_OPS(toshiba_bt_pm, NULL, toshiba_bt_resume); static struct acpi_driver toshiba_bt_rfkill_driver = { @@ -90,10 +92,12 @@ static void toshiba_bt_rfkill_notify(struct acpi_device *device, u32 event) toshiba_bluetooth_enable(device->handle); } +#ifdef CONFIG_PM_SLEEP static int toshiba_bt_resume(struct device *dev) { return toshiba_bluetooth_enable(to_acpi_device(dev)->handle); } +#endif static int toshiba_bt_rfkill_add(struct acpi_device *device) { diff --git a/trunk/drivers/platform/x86/xo1-rfkill.c b/trunk/drivers/platform/x86/xo1-rfkill.c index b57ad8641480..1da13ed34b04 100644 --- a/trunk/drivers/platform/x86/xo1-rfkill.c +++ b/trunk/drivers/platform/x86/xo1-rfkill.c @@ -12,8 +12,7 @@ #include #include #include - -#include +#include static bool card_blocked; diff --git a/trunk/drivers/platform/x86/xo15-ebook.c b/trunk/drivers/platform/x86/xo15-ebook.c index 849c07c13bf6..38ba39d7ca7d 100644 --- a/trunk/drivers/platform/x86/xo15-ebook.c +++ b/trunk/drivers/platform/x86/xo15-ebook.c @@ -77,10 +77,12 @@ static void ebook_switch_notify(struct acpi_device *device, u32 event) } } +#ifdef CONFIG_PM_SLEEP static int ebook_switch_resume(struct device *dev) { return ebook_send_state(to_acpi_device(dev)); } +#endif static SIMPLE_DEV_PM_OPS(ebook_switch_pm, NULL, ebook_switch_resume); diff --git a/trunk/drivers/power/olpc_battery.c b/trunk/drivers/power/olpc_battery.c index 55b10b813353..a89a41acf9c5 100644 --- a/trunk/drivers/power/olpc_battery.c +++ b/trunk/drivers/power/olpc_battery.c @@ -17,6 +17,7 @@ #include #include #include +#include #include diff --git a/trunk/drivers/pwm/Kconfig b/trunk/drivers/pwm/Kconfig index 8fc3808d7a3e..90c5c7357a50 100644 --- a/trunk/drivers/pwm/Kconfig +++ b/trunk/drivers/pwm/Kconfig @@ -1,12 +1,31 @@ menuconfig PWM - bool "PWM Support" + bool "Pulse-Width Modulation (PWM) Support" depends on !MACH_JZ4740 && !PUV3_PWM help - This enables PWM support through the generic PWM framework. - You only need to enable this, if you also want to enable - one or more of the PWM drivers below. - - If unsure, say N. + Generic Pulse-Width Modulation (PWM) support. + + In Pulse-Width Modulation, a variation of the width of pulses + in a rectangular pulse signal is used as a means to alter the + average power of the signal. Applications include efficient + power delivery and voltage regulation. In computer systems, + PWMs are commonly used to control fans or the brightness of + display backlights. + + This framework provides a generic interface to PWM devices + within the Linux kernel. On the driver side it provides an API + to register and unregister a PWM chip, an abstraction of a PWM + controller, that supports one or more PWM devices. Client + drivers can request PWM devices and use the generic framework + to configure as well as enable and disable them. + + This generic framework replaces the legacy PWM framework which + allows only a single driver implementing the required API. Not + all legacy implementations have been ported to the framework + yet. The framework provides an API that is backward compatible + with the legacy framework so that existing client drivers + continue to work as expected. + + If unsure, say no. if PWM diff --git a/trunk/drivers/pwm/core.c b/trunk/drivers/pwm/core.c index ecb76909e946..c6e05078d3ad 100644 --- a/trunk/drivers/pwm/core.c +++ b/trunk/drivers/pwm/core.c @@ -129,8 +129,8 @@ static int pwm_device_request(struct pwm_device *pwm, const char *label) return 0; } -static struct pwm_device *of_pwm_simple_xlate(struct pwm_chip *pc, - const struct of_phandle_args *args) +static struct pwm_device * +of_pwm_simple_xlate(struct pwm_chip *pc, const struct of_phandle_args *args) { struct pwm_device *pwm; @@ -149,7 +149,7 @@ static struct pwm_device *of_pwm_simple_xlate(struct pwm_chip *pc, return pwm; } -void of_pwmchip_add(struct pwm_chip *chip) +static void of_pwmchip_add(struct pwm_chip *chip) { if (!chip->dev || !chip->dev->of_node) return; @@ -162,7 +162,7 @@ void of_pwmchip_add(struct pwm_chip *chip) of_node_get(chip->dev->of_node); } -void of_pwmchip_remove(struct pwm_chip *chip) +static void of_pwmchip_remove(struct pwm_chip *chip) { if (chip->dev && chip->dev->of_node) of_node_put(chip->dev->of_node); @@ -527,7 +527,7 @@ void __init pwm_add_table(struct pwm_lookup *table, size_t num) struct pwm_device *pwm_get(struct device *dev, const char *con_id) { struct pwm_device *pwm = ERR_PTR(-EPROBE_DEFER); - const char *dev_id = dev ? dev_name(dev): NULL; + const char *dev_id = dev ? dev_name(dev) : NULL; struct pwm_chip *chip = NULL; unsigned int index = 0; unsigned int best = 0; @@ -609,7 +609,7 @@ void pwm_put(struct pwm_device *pwm) mutex_lock(&pwm_lock); if (!test_and_clear_bit(PWMF_REQUESTED, &pwm->flags)) { - pr_warning("PWM device already freed\n"); + pr_warn("PWM device already freed\n"); goto out; } diff --git a/trunk/drivers/pwm/pwm-samsung.c b/trunk/drivers/pwm/pwm-samsung.c index d10386528c9c..e5187c0ade9f 100644 --- a/trunk/drivers/pwm/pwm-samsung.c +++ b/trunk/drivers/pwm/pwm-samsung.c @@ -225,6 +225,7 @@ static int s3c_pwm_probe(struct platform_device *pdev) /* calculate base of control bits in TCON */ s3c->tcon_base = id == 0 ? 0 : (id * 4) + 4; + s3c->chip.dev = &pdev->dev; s3c->chip.ops = &s3c_pwm_ops; s3c->chip.base = -1; s3c->chip.npwm = 1; diff --git a/trunk/drivers/pwm/pwm-tegra.c b/trunk/drivers/pwm/pwm-tegra.c index 02ce18d5e49a..057465e0553c 100644 --- a/trunk/drivers/pwm/pwm-tegra.c +++ b/trunk/drivers/pwm/pwm-tegra.c @@ -187,10 +187,8 @@ static int tegra_pwm_probe(struct platform_device *pdev) } pwm->mmio_base = devm_request_and_ioremap(&pdev->dev, r); - if (!pwm->mmio_base) { - dev_err(&pdev->dev, "failed to ioremap() region\n"); + if (!pwm->mmio_base) return -EADDRNOTAVAIL; - } platform_set_drvdata(pdev, pwm); diff --git a/trunk/drivers/pwm/pwm-tiecap.c b/trunk/drivers/pwm/pwm-tiecap.c index 3c2ad284ee3e..0b66d0f25922 100644 --- a/trunk/drivers/pwm/pwm-tiecap.c +++ b/trunk/drivers/pwm/pwm-tiecap.c @@ -192,10 +192,8 @@ static int __devinit ecap_pwm_probe(struct platform_device *pdev) } pc->mmio_base = devm_request_and_ioremap(&pdev->dev, r); - if (!pc->mmio_base) { - dev_err(&pdev->dev, "failed to ioremap() registers\n"); + if (!pc->mmio_base) return -EADDRNOTAVAIL; - } ret = pwmchip_add(&pc->chip); if (ret < 0) { diff --git a/trunk/drivers/pwm/pwm-tiehrpwm.c b/trunk/drivers/pwm/pwm-tiehrpwm.c index 010d232cb0c8..c3756d1be194 100644 --- a/trunk/drivers/pwm/pwm-tiehrpwm.c +++ b/trunk/drivers/pwm/pwm-tiehrpwm.c @@ -371,10 +371,8 @@ static int __devinit ehrpwm_pwm_probe(struct platform_device *pdev) } pc->mmio_base = devm_request_and_ioremap(&pdev->dev, r); - if (!pc->mmio_base) { - dev_err(&pdev->dev, "failed to ioremap() registers\n"); + if (!pc->mmio_base) return -EADDRNOTAVAIL; - } ret = pwmchip_add(&pc->chip); if (ret < 0) { diff --git a/trunk/drivers/pwm/pwm-vt8500.c b/trunk/drivers/pwm/pwm-vt8500.c index 548021439f0c..ad14389b7144 100644 --- a/trunk/drivers/pwm/pwm-vt8500.c +++ b/trunk/drivers/pwm/pwm-vt8500.c @@ -41,7 +41,7 @@ static inline void pwm_busy_wait(void __iomem *reg, u8 bitmask) cpu_relax(); if (unlikely(!loops)) - pr_warning("Waiting for status bits 0x%x to clear timed out\n", + pr_warn("Waiting for status bits 0x%x to clear timed out\n", bitmask); } diff --git a/trunk/drivers/rapidio/devices/tsi721.c b/trunk/drivers/rapidio/devices/tsi721.c index 722246cf20ab..5d44252b7342 100644 --- a/trunk/drivers/rapidio/devices/tsi721.c +++ b/trunk/drivers/rapidio/devices/tsi721.c @@ -435,6 +435,9 @@ static void tsi721_db_dpc(struct work_struct *work) " info %4.4x\n", DBELL_SID(idb.bytes), DBELL_TID(idb.bytes), DBELL_INF(idb.bytes)); } + + wr_ptr = ioread32(priv->regs + + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE; } iowrite32(rd_ptr & (IDB_QSIZE - 1), @@ -445,6 +448,10 @@ static void tsi721_db_dpc(struct work_struct *work) regval |= TSI721_SR_CHINT_IDBQRCV; iowrite32(regval, priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); + + wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE; + if (wr_ptr != rd_ptr) + schedule_work(&priv->idb_work); } /** @@ -2212,7 +2219,7 @@ static int __devinit tsi721_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct tsi721_device *priv; - int i, cap; + int cap; int err; u32 regval; @@ -2232,12 +2239,15 @@ static int __devinit tsi721_probe(struct pci_dev *pdev, priv->pdev = pdev; #ifdef DEBUG + { + int i; for (i = 0; i <= PCI_STD_RESOURCE_END; i++) { dev_dbg(&pdev->dev, "res[%d] @ 0x%llx (0x%lx, 0x%lx)\n", i, (unsigned long long)pci_resource_start(pdev, i), (unsigned long)pci_resource_len(pdev, i), pci_resource_flags(pdev, i)); } + } #endif /* * Verify BAR configuration diff --git a/trunk/drivers/regulator/ab3100.c b/trunk/drivers/regulator/ab3100.c index 182b553059c9..c151fd5d8c97 100644 --- a/trunk/drivers/regulator/ab3100.c +++ b/trunk/drivers/regulator/ab3100.c @@ -486,6 +486,7 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = { .id = AB3100_BUCK, .ops = ®ulator_ops_variable_sleepable, .n_voltages = ARRAY_SIZE(ldo_e_buck_typ_voltages), + .volt_table = ldo_e_buck_typ_voltages, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, .enable_time = 1000, diff --git a/trunk/drivers/regulator/anatop-regulator.c b/trunk/drivers/regulator/anatop-regulator.c index e9c2085f9dfb..ce0fe72a428e 100644 --- a/trunk/drivers/regulator/anatop-regulator.c +++ b/trunk/drivers/regulator/anatop-regulator.c @@ -64,14 +64,15 @@ static int anatop_set_voltage_sel(struct regulator_dev *reg, unsigned selector) static int anatop_get_voltage_sel(struct regulator_dev *reg) { struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg); - u32 val; + u32 val, mask; if (!anatop_reg->control_reg) return -ENOTSUPP; val = anatop_read_reg(anatop_reg->mfd, anatop_reg->control_reg); - val = (val & ((1 << anatop_reg->vol_bit_width) - 1)) >> + mask = ((1 << anatop_reg->vol_bit_width) - 1) << anatop_reg->vol_bit_shift; + val = (val & mask) >> anatop_reg->vol_bit_shift; return val - anatop_reg->min_bit_val; } diff --git a/trunk/drivers/regulator/core.c b/trunk/drivers/regulator/core.c index f092588a078c..48385318175a 100644 --- a/trunk/drivers/regulator/core.c +++ b/trunk/drivers/regulator/core.c @@ -3217,7 +3217,7 @@ regulator_register(const struct regulator_desc *regulator_desc, dev_set_drvdata(&rdev->dev, rdev); - if (config->ena_gpio) { + if (config->ena_gpio && gpio_is_valid(config->ena_gpio)) { ret = gpio_request_one(config->ena_gpio, GPIOF_DIR_OUT | config->ena_gpio_flags, rdev_get_name(rdev)); diff --git a/trunk/drivers/regulator/gpio-regulator.c b/trunk/drivers/regulator/gpio-regulator.c index 34b67bee9323..8b5944f2d7d1 100644 --- a/trunk/drivers/regulator/gpio-regulator.c +++ b/trunk/drivers/regulator/gpio-regulator.c @@ -57,16 +57,17 @@ static int gpio_regulator_get_value(struct regulator_dev *dev) return -EINVAL; } -static int gpio_regulator_set_value(struct regulator_dev *dev, - int min, int max, unsigned *selector) +static int gpio_regulator_set_voltage(struct regulator_dev *dev, + int min_uV, int max_uV, + unsigned *selector) { struct gpio_regulator_data *data = rdev_get_drvdata(dev); int ptr, target = 0, state, best_val = INT_MAX; for (ptr = 0; ptr < data->nr_states; ptr++) if (data->states[ptr].value < best_val && - data->states[ptr].value >= min && - data->states[ptr].value <= max) { + data->states[ptr].value >= min_uV && + data->states[ptr].value <= max_uV) { target = data->states[ptr].gpios; best_val = data->states[ptr].value; if (selector) @@ -85,13 +86,6 @@ static int gpio_regulator_set_value(struct regulator_dev *dev, return 0; } -static int gpio_regulator_set_voltage(struct regulator_dev *dev, - int min_uV, int max_uV, - unsigned *selector) -{ - return gpio_regulator_set_value(dev, min_uV, max_uV, selector); -} - static int gpio_regulator_list_voltage(struct regulator_dev *dev, unsigned selector) { @@ -106,7 +100,27 @@ static int gpio_regulator_list_voltage(struct regulator_dev *dev, static int gpio_regulator_set_current_limit(struct regulator_dev *dev, int min_uA, int max_uA) { - return gpio_regulator_set_value(dev, min_uA, max_uA, NULL); + struct gpio_regulator_data *data = rdev_get_drvdata(dev); + int ptr, target = 0, state, best_val = 0; + + for (ptr = 0; ptr < data->nr_states; ptr++) + if (data->states[ptr].value > best_val && + data->states[ptr].value >= min_uA && + data->states[ptr].value <= max_uA) { + target = data->states[ptr].gpios; + best_val = data->states[ptr].value; + } + + if (best_val == 0) + return -EINVAL; + + for (ptr = 0; ptr < data->nr_gpios; ptr++) { + state = (target & (1 << ptr)) >> ptr; + gpio_set_value(data->gpios[ptr].gpio, state); + } + data->state = target; + + return 0; } static struct regulator_ops gpio_regulator_voltage_ops = { diff --git a/trunk/drivers/regulator/palmas-regulator.c b/trunk/drivers/regulator/palmas-regulator.c index 17d19fbbc490..46c7e88f8381 100644 --- a/trunk/drivers/regulator/palmas-regulator.c +++ b/trunk/drivers/regulator/palmas-regulator.c @@ -486,9 +486,12 @@ static int palmas_map_voltage_ldo(struct regulator_dev *rdev, { int ret, voltage; - ret = ((min_uV - 900000) / 50000) + 1; - if (ret < 0) - return ret; + if (min_uV == 0) + return 0; + + if (min_uV < 900000) + min_uV = 900000; + ret = DIV_ROUND_UP(min_uV - 900000, 50000) + 1; /* Map back into a voltage to verify we're still in bounds */ voltage = palmas_list_voltage_ldo(rdev, ret); @@ -586,7 +589,7 @@ static int palmas_ldo_init(struct palmas *palmas, int id, addr = palmas_regs_info[id].ctrl_addr; - ret = palmas_smps_read(palmas, addr, ®); + ret = palmas_ldo_read(palmas, addr, ®); if (ret) return ret; @@ -596,7 +599,7 @@ static int palmas_ldo_init(struct palmas *palmas, int id, if (reg_init->mode_sleep) reg |= PALMAS_LDO1_CTRL_MODE_SLEEP; - ret = palmas_smps_write(palmas, addr, reg); + ret = palmas_ldo_write(palmas, addr, reg); if (ret) return ret; @@ -630,7 +633,7 @@ static __devinit int palmas_probe(struct platform_device *pdev) ret = palmas_smps_read(palmas, PALMAS_SMPS_CTRL, ®); if (ret) - goto err_unregister_regulator; + return ret; if (reg & PALMAS_SMPS_CTRL_SMPS12_SMPS123_EN) pmic->smps123 = 1; @@ -676,7 +679,9 @@ static __devinit int palmas_probe(struct platform_device *pdev) case PALMAS_REG_SMPS10: pmic->desc[id].n_voltages = PALMAS_SMPS10_NUM_VOLTAGES; pmic->desc[id].ops = &palmas_ops_smps10; - pmic->desc[id].vsel_reg = PALMAS_SMPS10_CTRL; + pmic->desc[id].vsel_reg = + PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE, + PALMAS_SMPS10_CTRL); pmic->desc[id].vsel_mask = SMPS10_VSEL; pmic->desc[id].enable_reg = PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE, @@ -778,8 +783,10 @@ static __devinit int palmas_probe(struct platform_device *pdev) reg_init = pdata->reg_init[id]; if (reg_init) { ret = palmas_ldo_init(palmas, id, reg_init); - if (ret) + if (ret) { + regulator_unregister(pmic->rdev[id]); goto err_unregister_regulator; + } } } } diff --git a/trunk/drivers/regulator/tps6586x-regulator.c b/trunk/drivers/regulator/tps6586x-regulator.c index e6da90ab5153..19241fc30050 100644 --- a/trunk/drivers/regulator/tps6586x-regulator.c +++ b/trunk/drivers/regulator/tps6586x-regulator.c @@ -240,14 +240,16 @@ static struct tps6586x_regulator tps6586x_regulator[] = { TPS6586X_LDO(LDO_9, "vinldo9", ldo, SUPPLYV6, 3, 3, ENE, 7, ENE, 7), TPS6586X_LDO(LDO_RTC, NULL, ldo, SUPPLYV4, 3, 3, V4, 7, V4, 7), TPS6586X_LDO(LDO_1, "vinldo01", dvm, SUPPLYV1, 0, 5, ENC, 1, END, 1), - TPS6586X_LDO(SM_2, "sm2", sm2, SUPPLYV2, 0, 5, ENC, 7, END, 7), + TPS6586X_LDO(SM_2, "vin-sm2", sm2, SUPPLYV2, 0, 5, ENC, 7, END, 7), TPS6586X_DVM(LDO_2, "vinldo23", dvm, LDO2BV1, 0, 5, ENA, 3, ENB, 3, VCC2, 6), TPS6586X_DVM(LDO_4, "vinldo4", ldo4, LDO4V1, 0, 5, ENC, 3, END, 3, VCC1, 6), - TPS6586X_DVM(SM_0, "sm0", dvm, SM0V1, 0, 5, ENA, 1, ENB, 1, VCC1, 2), - TPS6586X_DVM(SM_1, "sm1", dvm, SM1V1, 0, 5, ENA, 0, ENB, 0, VCC1, 0), + TPS6586X_DVM(SM_0, "vin-sm0", dvm, SM0V1, 0, 5, ENA, 1, + ENB, 1, VCC1, 2), + TPS6586X_DVM(SM_1, "vin-sm1", dvm, SM1V1, 0, 5, ENA, 0, + ENB, 0, VCC1, 0), }; /* diff --git a/trunk/drivers/regulator/twl-regulator.c b/trunk/drivers/regulator/twl-regulator.c index 242fe90dc565..77a71a5c17c3 100644 --- a/trunk/drivers/regulator/twl-regulator.c +++ b/trunk/drivers/regulator/twl-regulator.c @@ -1037,7 +1037,7 @@ TWL6025_ADJUSTABLE_LDO(LDO7, 0x74, 1000, 3300); TWL6025_ADJUSTABLE_LDO(LDO6, 0x60, 1000, 3300); TWL6025_ADJUSTABLE_LDO(LDOLN, 0x64, 1000, 3300); TWL6025_ADJUSTABLE_LDO(LDOUSB, 0x70, 1000, 3300); -TWL4030_FIXED_LDO(VINTANA2, 0x3f, 1500, 11, 100, 0x08); +TWL4030_FIXED_LDO(VINTANA1, 0x3f, 1500, 11, 100, 0x08); TWL4030_FIXED_LDO(VINTDIG, 0x47, 1500, 13, 100, 0x08); TWL4030_FIXED_LDO(VUSB1V5, 0x71, 1500, 17, 100, 0x08); TWL4030_FIXED_LDO(VUSB1V8, 0x74, 1800, 18, 100, 0x08); @@ -1048,7 +1048,6 @@ TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 0); TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 0); TWL6030_FIXED_LDO(V1V8, 0x16, 1800, 0); TWL6030_FIXED_LDO(V2V1, 0x1c, 2100, 0); -TWL6030_FIXED_RESOURCE(CLK32KG, 0x8C, 0); TWL6025_ADJUSTABLE_SMPS(SMPS3, 0x34); TWL6025_ADJUSTABLE_SMPS(SMPS4, 0x10); TWL6025_ADJUSTABLE_SMPS(VIO, 0x16); @@ -1117,7 +1116,7 @@ static const struct of_device_id twl_of_match[] __devinitconst = { TWL6025_OF_MATCH("ti,twl6025-ldo6", LDO6), TWL6025_OF_MATCH("ti,twl6025-ldoln", LDOLN), TWL6025_OF_MATCH("ti,twl6025-ldousb", LDOUSB), - TWLFIXED_OF_MATCH("ti,twl4030-vintana2", VINTANA2), + TWLFIXED_OF_MATCH("ti,twl4030-vintana1", VINTANA1), TWLFIXED_OF_MATCH("ti,twl4030-vintdig", VINTDIG), TWLFIXED_OF_MATCH("ti,twl4030-vusb1v5", VUSB1V5), TWLFIXED_OF_MATCH("ti,twl4030-vusb1v8", VUSB1V8), diff --git a/trunk/drivers/rtc/interface.c b/trunk/drivers/rtc/interface.c index eb415bd76494..9592b936b71b 100644 --- a/trunk/drivers/rtc/interface.c +++ b/trunk/drivers/rtc/interface.c @@ -582,6 +582,7 @@ enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer) void rtc_update_irq(struct rtc_device *rtc, unsigned long num, unsigned long events) { + pm_stay_awake(rtc->dev.parent); schedule_work(&rtc->irqwork); } EXPORT_SYMBOL_GPL(rtc_update_irq); @@ -844,6 +845,7 @@ void rtc_timer_do_work(struct work_struct *work) mutex_lock(&rtc->ops_lock); again: + pm_relax(rtc->dev.parent); __rtc_read_time(rtc, &tm); now = rtc_tm_to_ktime(tm); while ((next = timerqueue_getnext(&rtc->timerqueue))) { diff --git a/trunk/drivers/rtc/rtc-at91sam9.c b/trunk/drivers/rtc/rtc-at91sam9.c index 831868904e02..1dd61f402b04 100644 --- a/trunk/drivers/rtc/rtc-at91sam9.c +++ b/trunk/drivers/rtc/rtc-at91sam9.c @@ -58,6 +58,7 @@ struct sam9_rtc { struct rtc_device *rtcdev; u32 imr; void __iomem *gpbr; + int irq; }; #define rtt_readl(rtc, field) \ @@ -292,7 +293,7 @@ static int __devinit at91_rtc_probe(struct platform_device *pdev) { struct resource *r, *r_gpbr; struct sam9_rtc *rtc; - int ret; + int ret, irq; u32 mr; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -302,10 +303,18 @@ static int __devinit at91_rtc_probe(struct platform_device *pdev) return -ENODEV; } + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "failed to get interrupt resource\n"); + return irq; + } + rtc = kzalloc(sizeof *rtc, GFP_KERNEL); if (!rtc) return -ENOMEM; + rtc->irq = irq; + /* platform setup code should have handled this; sigh */ if (!device_can_wakeup(&pdev->dev)) device_init_wakeup(&pdev->dev, 1); @@ -345,11 +354,10 @@ static int __devinit at91_rtc_probe(struct platform_device *pdev) } /* register irq handler after we know what name we'll use */ - ret = request_irq(AT91_ID_SYS, at91_rtc_interrupt, - IRQF_SHARED, + ret = request_irq(rtc->irq, at91_rtc_interrupt, IRQF_SHARED, dev_name(&rtc->rtcdev->dev), rtc); if (ret) { - dev_dbg(&pdev->dev, "can't share IRQ %d?\n", AT91_ID_SYS); + dev_dbg(&pdev->dev, "can't share IRQ %d?\n", rtc->irq); rtc_device_unregister(rtc->rtcdev); goto fail_register; } @@ -386,7 +394,7 @@ static int __devexit at91_rtc_remove(struct platform_device *pdev) /* disable all interrupts */ rtt_writel(rtc, MR, mr & ~(AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN)); - free_irq(AT91_ID_SYS, rtc); + free_irq(rtc->irq, rtc); rtc_device_unregister(rtc->rtcdev); @@ -423,7 +431,7 @@ static int at91_rtc_suspend(struct platform_device *pdev, rtc->imr = mr & (AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN); if (rtc->imr) { if (device_may_wakeup(&pdev->dev) && (mr & AT91_RTT_ALMIEN)) { - enable_irq_wake(AT91_ID_SYS); + enable_irq_wake(rtc->irq); /* don't let RTTINC cause wakeups */ if (mr & AT91_RTT_RTTINCIEN) rtt_writel(rtc, MR, mr & ~AT91_RTT_RTTINCIEN); @@ -441,7 +449,7 @@ static int at91_rtc_resume(struct platform_device *pdev) if (rtc->imr) { if (device_may_wakeup(&pdev->dev)) - disable_irq_wake(AT91_ID_SYS); + disable_irq_wake(rtc->irq); mr = rtt_readl(rtc, MR); rtt_writel(rtc, MR, mr | rtc->imr); } diff --git a/trunk/drivers/rtc/rtc-cmos.c b/trunk/drivers/rtc/rtc-cmos.c index 132333d75408..4267789ca995 100644 --- a/trunk/drivers/rtc/rtc-cmos.c +++ b/trunk/drivers/rtc/rtc-cmos.c @@ -568,7 +568,6 @@ static irqreturn_t cmos_interrupt(int irq, void *p) hpet_mask_rtc_irq_bit(RTC_AIE); CMOS_READ(RTC_INTR_FLAGS); - pm_wakeup_event(cmos_rtc.dev, 0); } spin_unlock(&rtc_lock); diff --git a/trunk/drivers/rtc/rtc-pcf2123.c b/trunk/drivers/rtc/rtc-pcf2123.c index 836118795c0b..13e4df63974f 100644 --- a/trunk/drivers/rtc/rtc-pcf2123.c +++ b/trunk/drivers/rtc/rtc-pcf2123.c @@ -43,6 +43,7 @@ #include #include #include +#include #define DRV_VERSION "0.6" @@ -292,6 +293,7 @@ static int __devinit pcf2123_probe(struct spi_device *spi) pdata->rtc = rtc; for (i = 0; i < 16; i++) { + sysfs_attr_init(&pdata->regs[i].attr.attr); sprintf(pdata->regs[i].name, "%1x", i); pdata->regs[i].attr.attr.mode = S_IRUGO | S_IWUSR; pdata->regs[i].attr.attr.name = pdata->regs[i].name; diff --git a/trunk/drivers/rtc/rtc-rs5c348.c b/trunk/drivers/rtc/rtc-rs5c348.c index 77074ccd2850..fd5c7af04ae5 100644 --- a/trunk/drivers/rtc/rtc-rs5c348.c +++ b/trunk/drivers/rtc/rtc-rs5c348.c @@ -122,9 +122,12 @@ rs5c348_rtc_read_time(struct device *dev, struct rtc_time *tm) tm->tm_min = bcd2bin(rxbuf[RS5C348_REG_MINS] & RS5C348_MINS_MASK); tm->tm_hour = bcd2bin(rxbuf[RS5C348_REG_HOURS] & RS5C348_HOURS_MASK); if (!pdata->rtc_24h) { - tm->tm_hour %= 12; - if (rxbuf[RS5C348_REG_HOURS] & RS5C348_BIT_PM) + if (rxbuf[RS5C348_REG_HOURS] & RS5C348_BIT_PM) { + tm->tm_hour -= 20; + tm->tm_hour %= 12; tm->tm_hour += 12; + } else + tm->tm_hour %= 12; } tm->tm_wday = bcd2bin(rxbuf[RS5C348_REG_WDAY] & RS5C348_WDAY_MASK); tm->tm_mday = bcd2bin(rxbuf[RS5C348_REG_DAY] & RS5C348_DAY_MASK); diff --git a/trunk/drivers/s390/block/dasd_eckd.c b/trunk/drivers/s390/block/dasd_eckd.c index 40a826a7295f..2fb2b9ea97ec 100644 --- a/trunk/drivers/s390/block/dasd_eckd.c +++ b/trunk/drivers/s390/block/dasd_eckd.c @@ -3804,7 +3804,7 @@ dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp) case BIODASDSYMMIO: return dasd_symm_io(device, argp); default: - return -ENOIOCTLCMD; + return -ENOTTY; } } diff --git a/trunk/drivers/s390/block/dasd_ioctl.c b/trunk/drivers/s390/block/dasd_ioctl.c index cceae70279f6..654c6921a6d4 100644 --- a/trunk/drivers/s390/block/dasd_ioctl.c +++ b/trunk/drivers/s390/block/dasd_ioctl.c @@ -498,12 +498,9 @@ int dasd_ioctl(struct block_device *bdev, fmode_t mode, break; default: /* if the discipline has an ioctl method try it. */ - if (base->discipline->ioctl) { + rc = -ENOTTY; + if (base->discipline->ioctl) rc = base->discipline->ioctl(block, cmd, argp); - if (rc == -ENOIOCTLCMD) - rc = -EINVAL; - } else - rc = -EINVAL; } dasd_put_device(base); return rc; diff --git a/trunk/drivers/s390/char/sclp_sdias.c b/trunk/drivers/s390/char/sclp_sdias.c index 6a6f76bf6e3d..b1032931a1c4 100644 --- a/trunk/drivers/s390/char/sclp_sdias.c +++ b/trunk/drivers/s390/char/sclp_sdias.c @@ -242,11 +242,13 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks) switch (sdias_evbuf.event_status) { case EVSTATE_ALL_STORED: TRACE("all stored\n"); + break; case EVSTATE_PART_STORED: TRACE("part stored: %i\n", sdias_evbuf.blk_cnt); break; case EVSTATE_NO_DATA: TRACE("no data\n"); + /* fall through */ default: pr_err("Error from SCLP while copying hsa. " "Event status = %x\n", diff --git a/trunk/drivers/scsi/megaraid/megaraid_sas_base.c b/trunk/drivers/scsi/megaraid/megaraid_sas_base.c index dc27598785e5..ed38454228c6 100644 --- a/trunk/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/trunk/drivers/scsi/megaraid/megaraid_sas_base.c @@ -4066,7 +4066,6 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) spin_lock_init(&instance->cmd_pool_lock); spin_lock_init(&instance->hba_lock); spin_lock_init(&instance->completion_lock); - spin_lock_init(&poll_aen_lock); mutex_init(&instance->aen_mutex); mutex_init(&instance->reset_mutex); @@ -5392,6 +5391,8 @@ static int __init megasas_init(void) printk(KERN_INFO "megasas: %s %s\n", MEGASAS_VERSION, MEGASAS_EXT_VERSION); + spin_lock_init(&poll_aen_lock); + support_poll_for_event = 2; support_device_change = 1; diff --git a/trunk/drivers/scsi/mpt2sas/mpt2sas_base.c b/trunk/drivers/scsi/mpt2sas/mpt2sas_base.c index 9d46fcbe7755..b25757d1e91b 100644 --- a/trunk/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/trunk/drivers/scsi/mpt2sas/mpt2sas_base.c @@ -2424,10 +2424,13 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) } /* command line tunables for max controller queue depth */ - if (max_queue_depth != -1) - max_request_credit = (max_queue_depth < facts->RequestCredit) - ? max_queue_depth : facts->RequestCredit; - else + if (max_queue_depth != -1 && max_queue_depth != 0) { + max_request_credit = min_t(u16, max_queue_depth + + ioc->hi_priority_depth + ioc->internal_depth, + facts->RequestCredit); + if (max_request_credit > MAX_HBA_QUEUE_DEPTH) + max_request_credit = MAX_HBA_QUEUE_DEPTH; + } else max_request_credit = min_t(u16, facts->RequestCredit, MAX_HBA_QUEUE_DEPTH); @@ -2502,7 +2505,7 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) /* set the scsi host can_queue depth * with some internal commands that could be outstanding */ - ioc->shost->can_queue = ioc->scsiio_depth - (2); + ioc->shost->can_queue = ioc->scsiio_depth; dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scsi host: " "can_queue depth (%d)\n", ioc->name, ioc->shost->can_queue)); diff --git a/trunk/drivers/scsi/scsi_error.c b/trunk/drivers/scsi/scsi_error.c index 4a6381c87253..de2337f255a7 100644 --- a/trunk/drivers/scsi/scsi_error.c +++ b/trunk/drivers/scsi/scsi_error.c @@ -42,6 +42,8 @@ #include +static void scsi_eh_done(struct scsi_cmnd *scmd); + #define SENSE_TIMEOUT (10*HZ) /* @@ -241,6 +243,14 @@ static int scsi_check_sense(struct scsi_cmnd *scmd) if (! scsi_command_normalize_sense(scmd, &sshdr)) return FAILED; /* no valid sense data */ + if (scmd->cmnd[0] == TEST_UNIT_READY && scmd->scsi_done != scsi_eh_done) + /* + * nasty: for mid-layer issued TURs, we need to return the + * actual sense data without any recovery attempt. For eh + * issued ones, we need to try to recover and interpret + */ + return SUCCESS; + if (scsi_sense_is_deferred(&sshdr)) return NEEDS_RETRY; diff --git a/trunk/drivers/scsi/scsi_lib.c b/trunk/drivers/scsi/scsi_lib.c index ffd77739ae3e..faa790fba134 100644 --- a/trunk/drivers/scsi/scsi_lib.c +++ b/trunk/drivers/scsi/scsi_lib.c @@ -776,7 +776,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) } if (req->cmd_type == REQ_TYPE_BLOCK_PC) { /* SG_IO ioctl from block level */ - req->errors = result; if (result) { if (sense_valid && req->sense) { /* @@ -792,6 +791,10 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) if (!sense_deferred) error = __scsi_error_from_host_byte(cmd, result); } + /* + * __scsi_error_from_host_byte may have reset the host_byte + */ + req->errors = cmd->result; req->resid_len = scsi_get_resid(cmd); diff --git a/trunk/drivers/scsi/scsi_scan.c b/trunk/drivers/scsi/scsi_scan.c index 56a93794c470..d947ffc20ceb 100644 --- a/trunk/drivers/scsi/scsi_scan.c +++ b/trunk/drivers/scsi/scsi_scan.c @@ -764,6 +764,16 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, sdev->model = (char *) (sdev->inquiry + 16); sdev->rev = (char *) (sdev->inquiry + 32); + if (strncmp(sdev->vendor, "ATA ", 8) == 0) { + /* + * sata emulation layer device. This is a hack to work around + * the SATL power management specifications which state that + * when the SATL detects the device has gone into standby + * mode, it shall respond with NOT READY. + */ + sdev->allow_restart = 1; + } + if (*bflags & BLIST_ISROM) { sdev->type = TYPE_ROM; sdev->removable = 1; diff --git a/trunk/drivers/scsi/scsi_transport_fc.c b/trunk/drivers/scsi/scsi_transport_fc.c index 2d1e68db9b3f..e894ca7b54c0 100644 --- a/trunk/drivers/scsi/scsi_transport_fc.c +++ b/trunk/drivers/scsi/scsi_transport_fc.c @@ -4146,45 +4146,7 @@ fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport) static void fc_bsg_remove(struct request_queue *q) { - struct request *req; /* block request */ - int counts; /* totals for request_list count and starved */ - if (q) { - /* Stop taking in new requests */ - spin_lock_irq(q->queue_lock); - blk_stop_queue(q); - - /* drain all requests in the queue */ - while (1) { - /* need the lock to fetch a request - * this may fetch the same reqeust as the previous pass - */ - req = blk_fetch_request(q); - /* save requests in use and starved */ - counts = q->rq.count[0] + q->rq.count[1] + - q->rq.starved[0] + q->rq.starved[1]; - spin_unlock_irq(q->queue_lock); - /* any requests still outstanding? */ - if (counts == 0) - break; - - /* This may be the same req as the previous iteration, - * always send the blk_end_request_all after a prefetch. - * It is not okay to not end the request because the - * prefetch started the request. - */ - if (req) { - /* return -ENXIO to indicate that this queue is - * going away - */ - req->errors = -ENXIO; - blk_end_request_all(req, -ENXIO); - } - - msleep(200); /* allow bsg to possibly finish */ - spin_lock_irq(q->queue_lock); - } - bsg_unregister_queue(q); blk_cleanup_queue(q); } diff --git a/trunk/drivers/scsi/scsi_transport_iscsi.c b/trunk/drivers/scsi/scsi_transport_iscsi.c index 09809d06eccb..fa1dfaa83e32 100644 --- a/trunk/drivers/scsi/scsi_transport_iscsi.c +++ b/trunk/drivers/scsi/scsi_transport_iscsi.c @@ -575,7 +575,7 @@ static int iscsi_remove_host(struct transport_container *tc, struct iscsi_cls_host *ihost = shost->shost_data; if (ihost->bsg_q) { - bsg_remove_queue(ihost->bsg_q); + bsg_unregister_queue(ihost->bsg_q); blk_cleanup_queue(ihost->bsg_q); } return 0; diff --git a/trunk/drivers/sh/intc/Kconfig b/trunk/drivers/sh/intc/Kconfig index c88cbccc62b0..a305731742a9 100644 --- a/trunk/drivers/sh/intc/Kconfig +++ b/trunk/drivers/sh/intc/Kconfig @@ -1,3 +1,7 @@ +config SH_INTC + def_bool y + select IRQ_DOMAIN + comment "Interrupt controller options" config INTC_USERIMASK diff --git a/trunk/drivers/sh/intc/Makefile b/trunk/drivers/sh/intc/Makefile index 44f006d09471..54ec2a0643df 100644 --- a/trunk/drivers/sh/intc/Makefile +++ b/trunk/drivers/sh/intc/Makefile @@ -1,4 +1,4 @@ -obj-y := access.o chip.o core.o handle.o virq.o +obj-y := access.o chip.o core.o handle.o irqdomain.o virq.o obj-$(CONFIG_INTC_BALANCING) += balancing.o obj-$(CONFIG_INTC_USERIMASK) += userimask.o diff --git a/trunk/drivers/sh/intc/core.c b/trunk/drivers/sh/intc/core.c index 7e562ccb6997..32c26d795ed0 100644 --- a/trunk/drivers/sh/intc/core.c +++ b/trunk/drivers/sh/intc/core.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -310,6 +311,8 @@ int __init register_intc_controller(struct intc_desc *desc) BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */ + intc_irq_domain_init(d, hw); + /* register the vectors one by one */ for (i = 0; i < hw->nr_vectors; i++) { struct intc_vect *vect = hw->vectors + i; @@ -319,10 +322,18 @@ int __init register_intc_controller(struct intc_desc *desc) if (!vect->enum_id) continue; - res = irq_alloc_desc_at(irq, numa_node_id()); - if (res != irq && res != -EEXIST) { - pr_err("can't get irq_desc for %d\n", irq); - continue; + res = irq_create_identity_mapping(d->domain, irq); + if (unlikely(res)) { + if (res == -EEXIST) { + res = irq_domain_associate(d->domain, irq, irq); + if (unlikely(res)) { + pr_err("domain association failure\n"); + continue; + } + } else { + pr_err("can't identity map IRQ %d\n", irq); + continue; + } } intc_irq_xlate_set(irq, vect->enum_id, d); @@ -340,10 +351,21 @@ int __init register_intc_controller(struct intc_desc *desc) * IRQ support, each vector still needs to have * its own backing irq_desc. */ - res = irq_alloc_desc_at(irq2, numa_node_id()); - if (res != irq2 && res != -EEXIST) { - pr_err("can't get irq_desc for %d\n", irq2); - continue; + res = irq_create_identity_mapping(d->domain, irq2); + if (unlikely(res)) { + if (res == -EEXIST) { + res = irq_domain_associate(d->domain, + irq, irq); + if (unlikely(res)) { + pr_err("domain association " + "failure\n"); + continue; + } + } else { + pr_err("can't identity map IRQ %d\n", + irq); + continue; + } } vect2->enum_id = 0; diff --git a/trunk/drivers/sh/intc/internals.h b/trunk/drivers/sh/intc/internals.h index f034a979a16f..7dff08e2a071 100644 --- a/trunk/drivers/sh/intc/internals.h +++ b/trunk/drivers/sh/intc/internals.h @@ -1,5 +1,6 @@ #include #include +#include #include #include #include @@ -66,6 +67,7 @@ struct intc_desc_int { unsigned int nr_sense; struct intc_window *window; unsigned int nr_windows; + struct irq_domain *domain; struct irq_chip chip; bool skip_suspend; }; @@ -187,6 +189,9 @@ unsigned long intc_get_ack_handle(unsigned int irq); void intc_enable_disable_enum(struct intc_desc *desc, struct intc_desc_int *d, intc_enum enum_id, int enable); +/* irqdomain.c */ +void intc_irq_domain_init(struct intc_desc_int *d, struct intc_hw_desc *hw); + /* virq.c */ void intc_subgroup_init(struct intc_desc *desc, struct intc_desc_int *d); void intc_irq_xlate_set(unsigned int irq, intc_enum id, struct intc_desc_int *d); diff --git a/trunk/drivers/sh/intc/irqdomain.c b/trunk/drivers/sh/intc/irqdomain.c new file mode 100644 index 000000000000..3968f1c3c5c3 --- /dev/null +++ b/trunk/drivers/sh/intc/irqdomain.c @@ -0,0 +1,68 @@ +/* + * IRQ domain support for SH INTC subsystem + * + * Copyright (C) 2012 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#define pr_fmt(fmt) "intc: " fmt + +#include +#include +#include +#include "internals.h" + +/** + * intc_irq_domain_evt_xlate() - Generic xlate for vectored IRQs. + * + * This takes care of exception vector to hwirq translation through + * by way of evt2irq() translation. + * + * Note: For platforms that use a flat vector space without INTEVT this + * basically just mimics irq_domain_xlate_onecell() by way of a nopped + * out evt2irq() implementation. + */ +static int intc_evt_xlate(struct irq_domain *d, struct device_node *ctrlr, + const u32 *intspec, unsigned int intsize, + unsigned long *out_hwirq, unsigned int *out_type) +{ + if (WARN_ON(intsize < 1)) + return -EINVAL; + + *out_hwirq = evt2irq(intspec[0]); + *out_type = IRQ_TYPE_NONE; + + return 0; +} + +static const struct irq_domain_ops intc_evt_ops = { + .xlate = intc_evt_xlate, +}; + +void __init intc_irq_domain_init(struct intc_desc_int *d, + struct intc_hw_desc *hw) +{ + unsigned int irq_base, irq_end; + + /* + * Quick linear revmap check + */ + irq_base = evt2irq(hw->vectors[0].vect); + irq_end = evt2irq(hw->vectors[hw->nr_vectors - 1].vect); + + /* + * Linear domains have a hard-wired assertion that IRQs start at + * 0 in order to make some performance optimizations. Lamely + * restrict the linear case to these conditions here, taking the + * tree penalty for linear cases with non-zero hwirq bases. + */ + if (irq_base == 0 && irq_end == (irq_base + hw->nr_vectors - 1)) + d->domain = irq_domain_add_linear(NULL, hw->nr_vectors, + &intc_evt_ops, NULL); + else + d->domain = irq_domain_add_tree(NULL, &intc_evt_ops, NULL); + + BUG_ON(!d->domain); +} diff --git a/trunk/drivers/sh/pfc/pinctrl.c b/trunk/drivers/sh/pfc/pinctrl.c index 0802b6c0d653..2804eaae804e 100644 --- a/trunk/drivers/sh/pfc/pinctrl.c +++ b/trunk/drivers/sh/pfc/pinctrl.c @@ -276,7 +276,6 @@ static int sh_pfc_pinconf_set(struct pinctrl_dev *pctldev, unsigned pin, unsigned long config) { struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev); - struct sh_pfc *pfc = pmx->pfc; /* Validate the new type */ if (config >= PINMUX_FLAG_TYPE) @@ -326,20 +325,6 @@ static struct pinctrl_desc sh_pfc_pinctrl_desc = { .confops = &sh_pfc_pinconf_ops, }; -int sh_pfc_register_pinctrl(struct sh_pfc *pfc) -{ - sh_pfc_pmx = kzalloc(sizeof(struct sh_pfc_pinctrl), GFP_KERNEL); - if (unlikely(!sh_pfc_pmx)) - return -ENOMEM; - - spin_lock_init(&sh_pfc_pmx->lock); - - sh_pfc_pmx->pfc = pfc; - - return 0; -} -EXPORT_SYMBOL_GPL(sh_pfc_register_pinctrl); - static inline void __devinit sh_pfc_map_one_gpio(struct sh_pfc *pfc, struct sh_pfc_pinctrl *pmx, struct pinmux_gpio *gpio, @@ -481,7 +466,6 @@ static int __devexit sh_pfc_pinctrl_remove(struct platform_device *pdev) { struct sh_pfc_pinctrl *pmx = platform_get_drvdata(pdev); - pinctrl_remove_gpio_range(pmx->pctl, &sh_pfc_gpio_range); pinctrl_unregister(pmx->pctl); platform_set_drvdata(pdev, NULL); @@ -507,7 +491,7 @@ static struct platform_device sh_pfc_pinctrl_device = { .id = -1, }; -static int __init sh_pfc_pinctrl_init(void) +static int sh_pfc_pinctrl_init(void) { int rc; @@ -521,10 +505,22 @@ static int __init sh_pfc_pinctrl_init(void) return rc; } +int sh_pfc_register_pinctrl(struct sh_pfc *pfc) +{ + sh_pfc_pmx = kzalloc(sizeof(struct sh_pfc_pinctrl), GFP_KERNEL); + if (unlikely(!sh_pfc_pmx)) + return -ENOMEM; + + spin_lock_init(&sh_pfc_pmx->lock); + + sh_pfc_pmx->pfc = pfc; + + return sh_pfc_pinctrl_init(); +} +EXPORT_SYMBOL_GPL(sh_pfc_register_pinctrl); + static void __exit sh_pfc_pinctrl_exit(void) { platform_driver_unregister(&sh_pfc_pinctrl_driver); } - -subsys_initcall(sh_pfc_pinctrl_init); module_exit(sh_pfc_pinctrl_exit); diff --git a/trunk/drivers/spi/spi-bcm63xx.c b/trunk/drivers/spi/spi-bcm63xx.c index 6e25ef1bce91..a9f4049c6769 100644 --- a/trunk/drivers/spi/spi-bcm63xx.c +++ b/trunk/drivers/spi/spi-bcm63xx.c @@ -47,6 +47,8 @@ struct bcm63xx_spi { /* Platform data */ u32 speed_hz; unsigned fifo_size; + unsigned int msg_type_shift; + unsigned int msg_ctl_width; /* Data buffers */ const unsigned char *tx_ptr; @@ -221,13 +223,20 @@ static unsigned int bcm63xx_txrx_bufs(struct spi_device *spi, msg_ctl = (t->len << SPI_BYTE_CNT_SHIFT); if (t->rx_buf && t->tx_buf) - msg_ctl |= (SPI_FD_RW << SPI_MSG_TYPE_SHIFT); + msg_ctl |= (SPI_FD_RW << bs->msg_type_shift); else if (t->rx_buf) - msg_ctl |= (SPI_HD_R << SPI_MSG_TYPE_SHIFT); + msg_ctl |= (SPI_HD_R << bs->msg_type_shift); else if (t->tx_buf) - msg_ctl |= (SPI_HD_W << SPI_MSG_TYPE_SHIFT); - - bcm_spi_writew(bs, msg_ctl, SPI_MSG_CTL); + msg_ctl |= (SPI_HD_W << bs->msg_type_shift); + + switch (bs->msg_ctl_width) { + case 8: + bcm_spi_writeb(bs, msg_ctl, SPI_MSG_CTL); + break; + case 16: + bcm_spi_writew(bs, msg_ctl, SPI_MSG_CTL); + break; + } /* Issue the transfer */ cmd = SPI_CMD_START_IMMEDIATE; @@ -406,9 +415,21 @@ static int __devinit bcm63xx_spi_probe(struct platform_device *pdev) master->transfer_one_message = bcm63xx_spi_transfer_one; master->mode_bits = MODEBITS; bs->speed_hz = pdata->speed_hz; + bs->msg_type_shift = pdata->msg_type_shift; + bs->msg_ctl_width = pdata->msg_ctl_width; bs->tx_io = (u8 *)(bs->regs + bcm63xx_spireg(SPI_MSG_DATA)); bs->rx_io = (const u8 *)(bs->regs + bcm63xx_spireg(SPI_RX_DATA)); + switch (bs->msg_ctl_width) { + case 8: + case 16: + break; + default: + dev_err(dev, "unsupported MSG_CTL width: %d\n", + bs->msg_ctl_width); + goto out_clk_disable; + } + /* Initialize hardware */ clk_enable(bs->clk); bcm_spi_writeb(bs, SPI_INTR_CLEAR_ALL, SPI_INT_STATUS); @@ -438,7 +459,7 @@ static int __devinit bcm63xx_spi_probe(struct platform_device *pdev) static int __devexit bcm63xx_spi_remove(struct platform_device *pdev) { - struct spi_master *master = platform_get_drvdata(pdev); + struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); struct bcm63xx_spi *bs = spi_master_get_devdata(master); spi_unregister_master(master); @@ -452,6 +473,8 @@ static int __devexit bcm63xx_spi_remove(struct platform_device *pdev) platform_set_drvdata(pdev, 0); + spi_master_put(master); + return 0; } diff --git a/trunk/drivers/spi/spi-coldfire-qspi.c b/trunk/drivers/spi/spi-coldfire-qspi.c index b2d4b9e4e010..764bfee75920 100644 --- a/trunk/drivers/spi/spi-coldfire-qspi.c +++ b/trunk/drivers/spi/spi-coldfire-qspi.c @@ -533,7 +533,6 @@ static int __devexit mcfqspi_remove(struct platform_device *pdev) iounmap(mcfqspi->iobase); release_mem_region(res->start, resource_size(res)); spi_unregister_master(master); - spi_master_put(master); return 0; } @@ -541,7 +540,7 @@ static int __devexit mcfqspi_remove(struct platform_device *pdev) #ifdef CONFIG_PM_SLEEP static int mcfqspi_suspend(struct device *dev) { - struct spi_master *master = spi_master_get(dev_get_drvdata(dev)); + struct spi_master *master = dev_get_drvdata(dev); struct mcfqspi *mcfqspi = spi_master_get_devdata(master); spi_master_suspend(master); @@ -553,7 +552,7 @@ static int mcfqspi_suspend(struct device *dev) static int mcfqspi_resume(struct device *dev) { - struct spi_master *master = spi_master_get(dev_get_drvdata(dev)); + struct spi_master *master = dev_get_drvdata(dev); struct mcfqspi *mcfqspi = spi_master_get_devdata(master); spi_master_resume(master); diff --git a/trunk/drivers/spi/spi-omap2-mcspi.c b/trunk/drivers/spi/spi-omap2-mcspi.c index 7d46b15e1520..b2fb141da375 100644 --- a/trunk/drivers/spi/spi-omap2-mcspi.c +++ b/trunk/drivers/spi/spi-omap2-mcspi.c @@ -28,6 +28,8 @@ #include #include #include +#include +#include #include #include #include @@ -39,7 +41,6 @@ #include -#include #include #include @@ -93,8 +94,8 @@ /* We have 2 DMA channels per CS, one for RX and one for TX */ struct omap2_mcspi_dma { - int dma_tx_channel; - int dma_rx_channel; + struct dma_chan *dma_tx; + struct dma_chan *dma_rx; int dma_tx_sync_dev; int dma_rx_sync_dev; @@ -300,20 +301,46 @@ static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit) return 0; } +static void omap2_mcspi_rx_callback(void *data) +{ + struct spi_device *spi = data; + struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master); + struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select]; + + complete(&mcspi_dma->dma_rx_completion); + + /* We must disable the DMA RX request */ + omap2_mcspi_set_dma_req(spi, 1, 0); +} + +static void omap2_mcspi_tx_callback(void *data) +{ + struct spi_device *spi = data; + struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master); + struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select]; + + complete(&mcspi_dma->dma_tx_completion); + + /* We must disable the DMA TX request */ + omap2_mcspi_set_dma_req(spi, 0, 0); +} + static unsigned omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) { struct omap2_mcspi *mcspi; struct omap2_mcspi_cs *cs = spi->controller_state; struct omap2_mcspi_dma *mcspi_dma; - unsigned int count, c; - unsigned long base, tx_reg, rx_reg; - int word_len, data_type, element_count; + unsigned int count; + int word_len, element_count; int elements = 0; u32 l; u8 * rx; const u8 * tx; void __iomem *chstat_reg; + struct dma_slave_config cfg; + enum dma_slave_buswidth width; + unsigned es; mcspi = spi_master_get_devdata(spi->master); mcspi_dma = &mcspi->dma_channels[spi->chip_select]; @@ -321,68 +348,92 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0; + if (cs->word_len <= 8) { + width = DMA_SLAVE_BUSWIDTH_1_BYTE; + es = 1; + } else if (cs->word_len <= 16) { + width = DMA_SLAVE_BUSWIDTH_2_BYTES; + es = 2; + } else { + width = DMA_SLAVE_BUSWIDTH_4_BYTES; + es = 4; + } + + memset(&cfg, 0, sizeof(cfg)); + cfg.src_addr = cs->phys + OMAP2_MCSPI_RX0; + cfg.dst_addr = cs->phys + OMAP2_MCSPI_TX0; + cfg.src_addr_width = width; + cfg.dst_addr_width = width; + cfg.src_maxburst = 1; + cfg.dst_maxburst = 1; + + if (xfer->tx_buf && mcspi_dma->dma_tx) { + struct dma_async_tx_descriptor *tx; + struct scatterlist sg; + + dmaengine_slave_config(mcspi_dma->dma_tx, &cfg); + + sg_init_table(&sg, 1); + sg_dma_address(&sg) = xfer->tx_dma; + sg_dma_len(&sg) = xfer->len; + + tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, &sg, 1, + DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (tx) { + tx->callback = omap2_mcspi_tx_callback; + tx->callback_param = spi; + dmaengine_submit(tx); + } else { + /* FIXME: fall back to PIO? */ + } + } + + if (xfer->rx_buf && mcspi_dma->dma_rx) { + struct dma_async_tx_descriptor *tx; + struct scatterlist sg; + size_t len = xfer->len - es; + + dmaengine_slave_config(mcspi_dma->dma_rx, &cfg); + + if (l & OMAP2_MCSPI_CHCONF_TURBO) + len -= es; + + sg_init_table(&sg, 1); + sg_dma_address(&sg) = xfer->rx_dma; + sg_dma_len(&sg) = len; + + tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, &sg, 1, + DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (tx) { + tx->callback = omap2_mcspi_rx_callback; + tx->callback_param = spi; + dmaengine_submit(tx); + } else { + /* FIXME: fall back to PIO? */ + } + } + count = xfer->len; - c = count; word_len = cs->word_len; - base = cs->phys; - tx_reg = base + OMAP2_MCSPI_TX0; - rx_reg = base + OMAP2_MCSPI_RX0; rx = xfer->rx_buf; tx = xfer->tx_buf; if (word_len <= 8) { - data_type = OMAP_DMA_DATA_TYPE_S8; element_count = count; } else if (word_len <= 16) { - data_type = OMAP_DMA_DATA_TYPE_S16; element_count = count >> 1; } else /* word_len <= 32 */ { - data_type = OMAP_DMA_DATA_TYPE_S32; element_count = count >> 2; } if (tx != NULL) { - omap_set_dma_transfer_params(mcspi_dma->dma_tx_channel, - data_type, element_count, 1, - OMAP_DMA_SYNC_ELEMENT, - mcspi_dma->dma_tx_sync_dev, 0); - - omap_set_dma_dest_params(mcspi_dma->dma_tx_channel, 0, - OMAP_DMA_AMODE_CONSTANT, - tx_reg, 0, 0); - - omap_set_dma_src_params(mcspi_dma->dma_tx_channel, 0, - OMAP_DMA_AMODE_POST_INC, - xfer->tx_dma, 0, 0); - } - - if (rx != NULL) { - elements = element_count - 1; - if (l & OMAP2_MCSPI_CHCONF_TURBO) - elements--; - - omap_set_dma_transfer_params(mcspi_dma->dma_rx_channel, - data_type, elements, 1, - OMAP_DMA_SYNC_ELEMENT, - mcspi_dma->dma_rx_sync_dev, 1); - - omap_set_dma_src_params(mcspi_dma->dma_rx_channel, 0, - OMAP_DMA_AMODE_CONSTANT, - rx_reg, 0, 0); - - omap_set_dma_dest_params(mcspi_dma->dma_rx_channel, 0, - OMAP_DMA_AMODE_POST_INC, - xfer->rx_dma, 0, 0); - } - - if (tx != NULL) { - omap_start_dma(mcspi_dma->dma_tx_channel); + dma_async_issue_pending(mcspi_dma->dma_tx); omap2_mcspi_set_dma_req(spi, 0, 1); } if (rx != NULL) { - omap_start_dma(mcspi_dma->dma_rx_channel); + dma_async_issue_pending(mcspi_dma->dma_rx); omap2_mcspi_set_dma_req(spi, 1, 1); } @@ -408,7 +459,10 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) DMA_FROM_DEVICE); omap2_mcspi_set_enable(spi, 0); + elements = element_count - 1; + if (l & OMAP2_MCSPI_CHCONF_TURBO) { + elements--; if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0) & OMAP2_MCSPI_CHSTAT_RXS)) { @@ -725,64 +779,38 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi, return 0; } -static void omap2_mcspi_dma_rx_callback(int lch, u16 ch_status, void *data) -{ - struct spi_device *spi = data; - struct omap2_mcspi *mcspi; - struct omap2_mcspi_dma *mcspi_dma; - - mcspi = spi_master_get_devdata(spi->master); - mcspi_dma = &(mcspi->dma_channels[spi->chip_select]); - - complete(&mcspi_dma->dma_rx_completion); - - /* We must disable the DMA RX request */ - omap2_mcspi_set_dma_req(spi, 1, 0); -} - -static void omap2_mcspi_dma_tx_callback(int lch, u16 ch_status, void *data) -{ - struct spi_device *spi = data; - struct omap2_mcspi *mcspi; - struct omap2_mcspi_dma *mcspi_dma; - - mcspi = spi_master_get_devdata(spi->master); - mcspi_dma = &(mcspi->dma_channels[spi->chip_select]); - - complete(&mcspi_dma->dma_tx_completion); - - /* We must disable the DMA TX request */ - omap2_mcspi_set_dma_req(spi, 0, 0); -} - static int omap2_mcspi_request_dma(struct spi_device *spi) { struct spi_master *master = spi->master; struct omap2_mcspi *mcspi; struct omap2_mcspi_dma *mcspi_dma; + dma_cap_mask_t mask; + unsigned sig; mcspi = spi_master_get_devdata(master); mcspi_dma = mcspi->dma_channels + spi->chip_select; - if (omap_request_dma(mcspi_dma->dma_rx_sync_dev, "McSPI RX", - omap2_mcspi_dma_rx_callback, spi, - &mcspi_dma->dma_rx_channel)) { - dev_err(&spi->dev, "no RX DMA channel for McSPI\n"); + init_completion(&mcspi_dma->dma_rx_completion); + init_completion(&mcspi_dma->dma_tx_completion); + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + sig = mcspi_dma->dma_rx_sync_dev; + mcspi_dma->dma_rx = dma_request_channel(mask, omap_dma_filter_fn, &sig); + if (!mcspi_dma->dma_rx) { + dev_err(&spi->dev, "no RX DMA engine channel for McSPI\n"); return -EAGAIN; } - if (omap_request_dma(mcspi_dma->dma_tx_sync_dev, "McSPI TX", - omap2_mcspi_dma_tx_callback, spi, - &mcspi_dma->dma_tx_channel)) { - omap_free_dma(mcspi_dma->dma_rx_channel); - mcspi_dma->dma_rx_channel = -1; - dev_err(&spi->dev, "no TX DMA channel for McSPI\n"); + sig = mcspi_dma->dma_tx_sync_dev; + mcspi_dma->dma_tx = dma_request_channel(mask, omap_dma_filter_fn, &sig); + if (!mcspi_dma->dma_tx) { + dev_err(&spi->dev, "no TX DMA engine channel for McSPI\n"); + dma_release_channel(mcspi_dma->dma_rx); + mcspi_dma->dma_rx = NULL; return -EAGAIN; } - init_completion(&mcspi_dma->dma_rx_completion); - init_completion(&mcspi_dma->dma_tx_completion); - return 0; } @@ -814,8 +842,7 @@ static int omap2_mcspi_setup(struct spi_device *spi) list_add_tail(&cs->node, &ctx->cs); } - if (mcspi_dma->dma_rx_channel == -1 - || mcspi_dma->dma_tx_channel == -1) { + if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx) { ret = omap2_mcspi_request_dma(spi); if (ret < 0) return ret; @@ -850,13 +877,13 @@ static void omap2_mcspi_cleanup(struct spi_device *spi) if (spi->chip_select < spi->master->num_chipselect) { mcspi_dma = &mcspi->dma_channels[spi->chip_select]; - if (mcspi_dma->dma_rx_channel != -1) { - omap_free_dma(mcspi_dma->dma_rx_channel); - mcspi_dma->dma_rx_channel = -1; + if (mcspi_dma->dma_rx) { + dma_release_channel(mcspi_dma->dma_rx); + mcspi_dma->dma_rx = NULL; } - if (mcspi_dma->dma_tx_channel != -1) { - omap_free_dma(mcspi_dma->dma_tx_channel); - mcspi_dma->dma_tx_channel = -1; + if (mcspi_dma->dma_tx) { + dma_release_channel(mcspi_dma->dma_tx); + mcspi_dma->dma_tx = NULL; } } } @@ -1176,7 +1203,6 @@ static int __devinit omap2_mcspi_probe(struct platform_device *pdev) break; } - mcspi->dma_channels[i].dma_rx_channel = -1; mcspi->dma_channels[i].dma_rx_sync_dev = dma_res->start; sprintf(dma_ch_name, "tx%d", i); dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA, @@ -1187,7 +1213,6 @@ static int __devinit omap2_mcspi_probe(struct platform_device *pdev) break; } - mcspi->dma_channels[i].dma_tx_channel = -1; mcspi->dma_channels[i].dma_tx_sync_dev = dma_res->start; } @@ -1203,18 +1228,16 @@ static int __devinit omap2_mcspi_probe(struct platform_device *pdev) status = spi_register_master(master); if (status < 0) - goto err_spi_register; + goto disable_pm; return status; -err_spi_register: - spi_master_put(master); disable_pm: pm_runtime_disable(&pdev->dev); dma_chnl_free: kfree(mcspi->dma_channels); free_master: - kfree(master); + spi_master_put(master); platform_set_drvdata(pdev, NULL); return status; } diff --git a/trunk/drivers/spi/spi-pl022.c b/trunk/drivers/spi/spi-pl022.c index aab518ec2bbc..6abbe23c39b4 100644 --- a/trunk/drivers/spi/spi-pl022.c +++ b/trunk/drivers/spi/spi-pl022.c @@ -2053,7 +2053,6 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id) printk(KERN_INFO "pl022: mapped registers from 0x%08x to %p\n", adev->res.start, pl022->virtbase); - pm_runtime_enable(dev); pm_runtime_resume(dev); pl022->clk = clk_get(&adev->dev, NULL); diff --git a/trunk/drivers/spi/spi-s3c64xx.c b/trunk/drivers/spi/spi-s3c64xx.c index 646a7657fe62..d1c8441f638c 100644 --- a/trunk/drivers/spi/spi-s3c64xx.c +++ b/trunk/drivers/spi/spi-s3c64xx.c @@ -826,7 +826,7 @@ static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata( struct spi_device *spi) { struct s3c64xx_spi_csinfo *cs; - struct device_node *slave_np, *data_np; + struct device_node *slave_np, *data_np = NULL; u32 fb_delay = 0; slave_np = spi->dev.of_node; @@ -1479,40 +1479,40 @@ static const struct dev_pm_ops s3c64xx_spi_pm = { s3c64xx_spi_runtime_resume, NULL) }; -struct s3c64xx_spi_port_config s3c2443_spi_port_config = { +static struct s3c64xx_spi_port_config s3c2443_spi_port_config = { .fifo_lvl_mask = { 0x7f }, .rx_lvl_offset = 13, .tx_st_done = 21, .high_speed = true, }; -struct s3c64xx_spi_port_config s3c6410_spi_port_config = { +static struct s3c64xx_spi_port_config s3c6410_spi_port_config = { .fifo_lvl_mask = { 0x7f, 0x7F }, .rx_lvl_offset = 13, .tx_st_done = 21, }; -struct s3c64xx_spi_port_config s5p64x0_spi_port_config = { +static struct s3c64xx_spi_port_config s5p64x0_spi_port_config = { .fifo_lvl_mask = { 0x1ff, 0x7F }, .rx_lvl_offset = 15, .tx_st_done = 25, }; -struct s3c64xx_spi_port_config s5pc100_spi_port_config = { +static struct s3c64xx_spi_port_config s5pc100_spi_port_config = { .fifo_lvl_mask = { 0x7f, 0x7F }, .rx_lvl_offset = 13, .tx_st_done = 21, .high_speed = true, }; -struct s3c64xx_spi_port_config s5pv210_spi_port_config = { +static struct s3c64xx_spi_port_config s5pv210_spi_port_config = { .fifo_lvl_mask = { 0x1ff, 0x7F }, .rx_lvl_offset = 15, .tx_st_done = 25, .high_speed = true, }; -struct s3c64xx_spi_port_config exynos4_spi_port_config = { +static struct s3c64xx_spi_port_config exynos4_spi_port_config = { .fifo_lvl_mask = { 0x1ff, 0x7F, 0x7F }, .rx_lvl_offset = 15, .tx_st_done = 25, diff --git a/trunk/drivers/staging/bcm/Misc.c b/trunk/drivers/staging/bcm/Misc.c index 9a60d4cd2184..f545716c666d 100644 --- a/trunk/drivers/staging/bcm/Misc.c +++ b/trunk/drivers/staging/bcm/Misc.c @@ -157,12 +157,7 @@ static int create_worker_threads(struct bcm_mini_adapter *psAdapter) static struct file *open_firmware_file(struct bcm_mini_adapter *Adapter, const char *path) { - struct file *flp = NULL; - mm_segment_t oldfs; - oldfs = get_fs(); - set_fs(get_ds()); - flp = filp_open(path, O_RDONLY, S_IRWXU); - set_fs(oldfs); + struct file *flp = filp_open(path, O_RDONLY, S_IRWXU); if (IS_ERR(flp)) { pr_err(DRV_NAME "Unable To Open File %s, err %ld", path, PTR_ERR(flp)); flp = NULL; @@ -183,14 +178,12 @@ static int BcmFileDownload(struct bcm_mini_adapter *Adapter, const char *path, u { int errorno = 0; struct file *flp = NULL; - mm_segment_t oldfs; struct timeval tv = {0}; flp = open_firmware_file(Adapter, path); if (!flp) { - errorno = -ENOENT; BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Unable to Open %s\n", path); - goto exit_download; + return -ENOENT; } BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Opened file is = %s and length =0x%lx to be downloaded at =0x%x", path, (unsigned long)flp->f_dentry->d_inode->i_size, loc); do_gettimeofday(&tv); @@ -201,10 +194,7 @@ static int BcmFileDownload(struct bcm_mini_adapter *Adapter, const char *path, u errorno = -EIO; goto exit_download; } - oldfs = get_fs(); - set_fs(get_ds()); vfs_llseek(flp, 0, 0); - set_fs(oldfs); if (Adapter->bcm_file_readback_from_chip(Adapter->pvInterfaceAdapter, flp, loc)) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Failed to read back firmware!"); errorno = -EIO; @@ -212,12 +202,7 @@ static int BcmFileDownload(struct bcm_mini_adapter *Adapter, const char *path, u } exit_download: - oldfs = get_fs(); - set_fs(get_ds()); - if (flp && !(IS_ERR(flp))) - filp_close(flp, current->files); - set_fs(oldfs); - + filp_close(flp, NULL); return errorno; } @@ -1056,10 +1041,8 @@ int InitCardAndDownloadFirmware(struct bcm_mini_adapter *ps_adapter) static int bcm_parse_target_params(struct bcm_mini_adapter *Adapter) { struct file *flp = NULL; - mm_segment_t oldfs = {0}; char *buff; int len = 0; - loff_t pos = 0; buff = kmalloc(BUFFER_1K, GFP_KERNEL); if (!buff) @@ -1079,20 +1062,16 @@ static int bcm_parse_target_params(struct bcm_mini_adapter *Adapter) Adapter->pstargetparams = NULL; return -ENOENT; } - oldfs = get_fs(); - set_fs(get_ds()); - len = vfs_read(flp, (void __user __force *)buff, BUFFER_1K, &pos); - set_fs(oldfs); + len = kernel_read(flp, 0, buff, BUFFER_1K); + filp_close(flp, NULL); if (len != sizeof(STARGETPARAMS)) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Mismatch in Target Param Structure!\n"); kfree(buff); kfree(Adapter->pstargetparams); Adapter->pstargetparams = NULL; - filp_close(flp, current->files); return -ENOENT; } - filp_close(flp, current->files); /* Check for autolink in config params */ /* diff --git a/trunk/drivers/staging/comedi/drivers.c b/trunk/drivers/staging/comedi/drivers.c index c0fdb00783ed..2359151af7e1 100644 --- a/trunk/drivers/staging/comedi/drivers.c +++ b/trunk/drivers/staging/comedi/drivers.c @@ -168,7 +168,7 @@ int comedi_device_attach(struct comedi_device *dev, struct comedi_devconfig *it) dev->board_ptr = comedi_recognize(driv, it->board_name); if (dev->board_ptr) break; - } else if (strcmp(driv->driver_name, it->board_name)) + } else if (strcmp(driv->driver_name, it->board_name) == 0) break; module_put(driv->module); } diff --git a/trunk/drivers/staging/comedi/drivers/adv_pci1710.c b/trunk/drivers/staging/comedi/drivers/adv_pci1710.c index 31986608eaf1..6b4d0d68e637 100644 --- a/trunk/drivers/staging/comedi/drivers/adv_pci1710.c +++ b/trunk/drivers/staging/comedi/drivers/adv_pci1710.c @@ -1349,9 +1349,6 @@ static struct pci_dev *pci1710_find_pci_dev(struct comedi_device *dev, } if (pcidev->vendor != PCI_VENDOR_ID_ADVANTECH) continue; - if (pci_is_enabled(pcidev)) - continue; - if (strcmp(this_board->name, DRV_NAME) == 0) { for (i = 0; i < ARRAY_SIZE(boardtypes); ++i) { if (pcidev->device == boardtypes[i].device_id) { diff --git a/trunk/drivers/staging/comedi/drivers/adv_pci1723.c b/trunk/drivers/staging/comedi/drivers/adv_pci1723.c index da5ee69d2c9d..dfde0f6328dd 100644 --- a/trunk/drivers/staging/comedi/drivers/adv_pci1723.c +++ b/trunk/drivers/staging/comedi/drivers/adv_pci1723.c @@ -301,8 +301,6 @@ static struct pci_dev *pci1723_find_pci_dev(struct comedi_device *dev, } if (pcidev->vendor != PCI_VENDOR_ID_ADVANTECH) continue; - if (pci_is_enabled(pcidev)) - continue; return pcidev; } dev_err(dev->class_dev, diff --git a/trunk/drivers/staging/comedi/drivers/adv_pci_dio.c b/trunk/drivers/staging/comedi/drivers/adv_pci_dio.c index 97f06dc8e48d..2d4cb7f638b2 100644 --- a/trunk/drivers/staging/comedi/drivers/adv_pci_dio.c +++ b/trunk/drivers/staging/comedi/drivers/adv_pci_dio.c @@ -1064,8 +1064,6 @@ static struct pci_dev *pci_dio_find_pci_dev(struct comedi_device *dev, slot != PCI_SLOT(pcidev->devfn)) continue; } - if (pci_is_enabled(pcidev)) - continue; for (i = 0; i < ARRAY_SIZE(boardtypes); ++i) { if (boardtypes[i].vendor_id != pcidev->vendor) continue; diff --git a/trunk/drivers/staging/comedi/drivers/daqboard2000.c b/trunk/drivers/staging/comedi/drivers/daqboard2000.c index ef28385c1482..cad559a1a730 100644 --- a/trunk/drivers/staging/comedi/drivers/daqboard2000.c +++ b/trunk/drivers/staging/comedi/drivers/daqboard2000.c @@ -718,7 +718,8 @@ static struct pci_dev *daqboard2000_find_pci_dev(struct comedi_device *dev, continue; } if (pcidev->vendor != PCI_VENDOR_ID_IOTECH || - pcidev->device != 0x0409) + pcidev->device != 0x0409 || + pcidev->subsystem_device != PCI_VENDOR_ID_IOTECH) continue; for (i = 0; i < ARRAY_SIZE(boardtypes); i++) { @@ -739,6 +740,7 @@ static int daqboard2000_attach(struct comedi_device *dev, { struct pci_dev *pcidev; struct comedi_subdevice *s; + resource_size_t pci_base; void *aux_data; unsigned int aux_len; int result; @@ -758,11 +760,12 @@ static int daqboard2000_attach(struct comedi_device *dev, "failed to enable PCI device and request regions\n"); return -EIO; } - dev->iobase = pci_resource_start(pcidev, 2); + dev->iobase = 1; /* the "detach" needs this */ - devpriv->plx = - ioremap(pci_resource_start(pcidev, 0), DAQBOARD2000_PLX_SIZE); - devpriv->daq = ioremap(dev->iobase, DAQBOARD2000_DAQ_SIZE); + pci_base = pci_resource_start(pcidev, 0); + devpriv->plx = ioremap(pci_base, DAQBOARD2000_PLX_SIZE); + pci_base = pci_resource_start(pcidev, 2); + devpriv->daq = ioremap(pci_base, DAQBOARD2000_DAQ_SIZE); if (!devpriv->plx || !devpriv->daq) return -ENOMEM; @@ -799,8 +802,6 @@ static int daqboard2000_attach(struct comedi_device *dev, printk("Interrupt after is: %x\n", interrupt); */ - dev->iobase = (unsigned long)devpriv->daq; - dev->board_name = this_board->name; s = dev->subdevices + 0; @@ -824,7 +825,7 @@ static int daqboard2000_attach(struct comedi_device *dev, s = dev->subdevices + 2; result = subdev_8255_init(dev, s, daqboard2000_8255_cb, - (unsigned long)(dev->iobase + 0x40)); + (unsigned long)(devpriv->daq + 0x40)); out: return result; diff --git a/trunk/drivers/staging/comedi/drivers/dt3000.c b/trunk/drivers/staging/comedi/drivers/dt3000.c index a6fe6c9be87e..3476cda0fff0 100644 --- a/trunk/drivers/staging/comedi/drivers/dt3000.c +++ b/trunk/drivers/staging/comedi/drivers/dt3000.c @@ -804,6 +804,7 @@ static int dt3000_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct pci_dev *pcidev; struct comedi_subdevice *s; + resource_size_t pci_base; int ret = 0; dev_dbg(dev->class_dev, "dt3000:\n"); @@ -820,9 +821,10 @@ static int dt3000_attach(struct comedi_device *dev, struct comedi_devconfig *it) ret = comedi_pci_enable(pcidev, "dt3000"); if (ret < 0) return ret; + dev->iobase = 1; /* the "detach" needs this */ - dev->iobase = pci_resource_start(pcidev, 0); - devpriv->io_addr = ioremap(dev->iobase, DT3000_SIZE); + pci_base = pci_resource_start(pcidev, 0); + devpriv->io_addr = ioremap(pci_base, DT3000_SIZE); if (!devpriv->io_addr) return -ENOMEM; diff --git a/trunk/drivers/staging/comedi/drivers/rtd520.c b/trunk/drivers/staging/comedi/drivers/rtd520.c index 112fdc3e9c69..5aa8be1e7b92 100644 --- a/trunk/drivers/staging/comedi/drivers/rtd520.c +++ b/trunk/drivers/staging/comedi/drivers/rtd520.c @@ -1619,9 +1619,8 @@ static int rtd_attach(struct comedi_device *dev, struct comedi_devconfig *it) struct rtdPrivate *devpriv; struct pci_dev *pcidev; struct comedi_subdevice *s; + resource_size_t pci_base; int ret; - resource_size_t physLas1; /* data area */ - resource_size_t physLcfg; /* PLX9080 */ #ifdef USE_DMA int index; #endif @@ -1655,20 +1654,15 @@ static int rtd_attach(struct comedi_device *dev, struct comedi_devconfig *it) printk(KERN_INFO "Failed to enable PCI device and request regions.\n"); return ret; } - - /* - * Initialize base addresses - */ - /* Get the physical address from PCI config */ - dev->iobase = pci_resource_start(pcidev, LAS0_PCIINDEX); - physLas1 = pci_resource_start(pcidev, LAS1_PCIINDEX); - physLcfg = pci_resource_start(pcidev, LCFG_PCIINDEX); - /* Now have the kernel map this into memory */ - /* ASSUME page aligned */ - devpriv->las0 = ioremap_nocache(dev->iobase, LAS0_PCISIZE); - devpriv->las1 = ioremap_nocache(physLas1, LAS1_PCISIZE); - devpriv->lcfg = ioremap_nocache(physLcfg, LCFG_PCISIZE); - + dev->iobase = 1; /* the "detach" needs this */ + + /* Initialize the base addresses */ + pci_base = pci_resource_start(pcidev, LAS0_PCIINDEX); + devpriv->las0 = ioremap_nocache(pci_base, LAS0_PCISIZE); + pci_base = pci_resource_start(pcidev, LAS1_PCIINDEX); + devpriv->las1 = ioremap_nocache(pci_base, LAS1_PCISIZE); + pci_base = pci_resource_start(pcidev, LCFG_PCIINDEX); + devpriv->lcfg = ioremap_nocache(pci_base, LCFG_PCISIZE); if (!devpriv->las0 || !devpriv->las1 || !devpriv->lcfg) return -ENOMEM; diff --git a/trunk/drivers/staging/comedi/drivers/usbdux.c b/trunk/drivers/staging/comedi/drivers/usbdux.c index 848c7ec06976..11ee83681da7 100644 --- a/trunk/drivers/staging/comedi/drivers/usbdux.c +++ b/trunk/drivers/staging/comedi/drivers/usbdux.c @@ -102,6 +102,7 @@ sampling rate. If you sample two channels you get 4kHz and so on. #define BULK_TIMEOUT 1000 /* constants for "firmware" upload and download */ +#define FIRMWARE "usbdux_firmware.bin" #define USBDUXSUB_FIRMWARE 0xA0 #define VENDOR_DIR_IN 0xC0 #define VENDOR_DIR_OUT 0x40 @@ -2791,7 +2792,7 @@ static int usbdux_usb_probe(struct usb_interface *uinterf, ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, - "usbdux_firmware.bin", + FIRMWARE, &udev->dev, GFP_KERNEL, usbduxsub + index, @@ -2850,3 +2851,4 @@ module_comedi_usb_driver(usbdux_driver, usbdux_usb_driver); MODULE_AUTHOR("Bernd Porr, BerndPorr@f2s.com"); MODULE_DESCRIPTION("Stirling/ITL USB-DUX -- Bernd.Porr@f2s.com"); MODULE_LICENSE("GPL"); +MODULE_FIRMWARE(FIRMWARE); diff --git a/trunk/drivers/staging/comedi/drivers/usbduxfast.c b/trunk/drivers/staging/comedi/drivers/usbduxfast.c index d9911588c10a..8eb41257c6ce 100644 --- a/trunk/drivers/staging/comedi/drivers/usbduxfast.c +++ b/trunk/drivers/staging/comedi/drivers/usbduxfast.c @@ -57,6 +57,7 @@ /* * constants for "firmware" upload and download */ +#define FIRMWARE "usbduxfast_firmware.bin" #define USBDUXFASTSUB_FIRMWARE 0xA0 #define VENDOR_DIR_IN 0xC0 #define VENDOR_DIR_OUT 0x40 @@ -1706,7 +1707,7 @@ static int usbduxfast_usb_probe(struct usb_interface *uinterf, ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, - "usbduxfast_firmware.bin", + FIRMWARE, &udev->dev, GFP_KERNEL, usbduxfastsub + index, @@ -1774,3 +1775,4 @@ module_comedi_usb_driver(usbduxfast_driver, usbduxfast_usb_driver); MODULE_AUTHOR("Bernd Porr, BerndPorr@f2s.com"); MODULE_DESCRIPTION("USB-DUXfast, BerndPorr@f2s.com"); MODULE_LICENSE("GPL"); +MODULE_FIRMWARE(FIRMWARE); diff --git a/trunk/drivers/staging/comedi/drivers/usbduxsigma.c b/trunk/drivers/staging/comedi/drivers/usbduxsigma.c index 543e604791e2..f54ab8c2fcfd 100644 --- a/trunk/drivers/staging/comedi/drivers/usbduxsigma.c +++ b/trunk/drivers/staging/comedi/drivers/usbduxsigma.c @@ -63,6 +63,7 @@ Status: testing #define BULK_TIMEOUT 1000 /* constants for "firmware" upload and download */ +#define FIRMWARE "usbduxsigma_firmware.bin" #define USBDUXSUB_FIRMWARE 0xA0 #define VENDOR_DIR_IN 0xC0 #define VENDOR_DIR_OUT 0x40 @@ -2780,7 +2781,7 @@ static int usbduxsigma_usb_probe(struct usb_interface *uinterf, ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, - "usbduxsigma_firmware.bin", + FIRMWARE, &udev->dev, GFP_KERNEL, usbduxsub + index, @@ -2845,3 +2846,4 @@ module_comedi_usb_driver(usbduxsigma_driver, usbduxsigma_usb_driver); MODULE_AUTHOR("Bernd Porr, BerndPorr@f2s.com"); MODULE_DESCRIPTION("Stirling/ITL USB-DUX SIGMA -- Bernd.Porr@f2s.com"); MODULE_LICENSE("GPL"); +MODULE_FIRMWARE(FIRMWARE); diff --git a/trunk/drivers/staging/csr/Kconfig b/trunk/drivers/staging/csr/Kconfig index cee8d48d2af9..ad2a1096e920 100644 --- a/trunk/drivers/staging/csr/Kconfig +++ b/trunk/drivers/staging/csr/Kconfig @@ -1,6 +1,6 @@ config CSR_WIFI tristate "CSR wireless driver" - depends on MMC && CFG80211_WEXT + depends on MMC && CFG80211_WEXT && INET select WIRELESS_EXT select WEXT_PRIV help diff --git a/trunk/drivers/staging/gdm72xx/sdio_boot.c b/trunk/drivers/staging/gdm72xx/sdio_boot.c index 760efee23d4a..65624bca8b3a 100644 --- a/trunk/drivers/staging/gdm72xx/sdio_boot.c +++ b/trunk/drivers/staging/gdm72xx/sdio_boot.c @@ -66,9 +66,8 @@ static int download_image(struct sdio_func *func, char *img_name) return -ENOENT; } - if (filp->f_dentry) - inode = filp->f_dentry->d_inode; - if (!inode || !S_ISREG(inode->i_mode)) { + inode = filp->f_dentry->d_inode; + if (!S_ISREG(inode->i_mode)) { printk(KERN_ERR "Invalid file type: %s\n", img_name); ret = -EINVAL; goto out; @@ -123,7 +122,7 @@ static int download_image(struct sdio_func *func, char *img_name) pno++; } out: - filp_close(filp, current->files); + filp_close(filp, NULL); return ret; } diff --git a/trunk/drivers/staging/gdm72xx/usb_boot.c b/trunk/drivers/staging/gdm72xx/usb_boot.c index fef290c38db6..e3dbd5a552ca 100644 --- a/trunk/drivers/staging/gdm72xx/usb_boot.c +++ b/trunk/drivers/staging/gdm72xx/usb_boot.c @@ -173,14 +173,12 @@ int usb_boot(struct usb_device *usbdev, u16 pid) filp = filp_open(img_name, O_RDONLY | O_LARGEFILE, 0); if (IS_ERR(filp)) { printk(KERN_ERR "Can't find %s.\n", img_name); - set_fs(fs); ret = PTR_ERR(filp); goto restore_fs; } - if (filp->f_dentry) - inode = filp->f_dentry->d_inode; - if (!inode || !S_ISREG(inode->i_mode)) { + inode = filp->f_dentry->d_inode; + if (!S_ISREG(inode->i_mode)) { printk(KERN_ERR "Invalid file type: %s\n", img_name); ret = -EINVAL; goto out; @@ -262,7 +260,7 @@ int usb_boot(struct usb_device *usbdev, u16 pid) ret = -EINVAL; } out: - filp_close(filp, current->files); + filp_close(filp, NULL); restore_fs: set_fs(fs); @@ -322,13 +320,11 @@ static int em_download_image(struct usb_device *usbdev, char *path, goto restore_fs; } - if (filp->f_dentry) { - inode = filp->f_dentry->d_inode; - if (!inode || !S_ISREG(inode->i_mode)) { - printk(KERN_ERR "Invalid file type: %s\n", path); - ret = -EINVAL; - goto out; - } + inode = filp->f_dentry->d_inode; + if (!S_ISREG(inode->i_mode)) { + printk(KERN_ERR "Invalid file type: %s\n", path); + ret = -EINVAL; + goto out; } buf = kmalloc(DOWNLOAD_CHUCK + pad_size, GFP_KERNEL); @@ -364,7 +360,7 @@ static int em_download_image(struct usb_device *usbdev, char *path, goto out; out: - filp_close(filp, current->files); + filp_close(filp, NULL); restore_fs: set_fs(fs); diff --git a/trunk/drivers/staging/iio/adc/ad7192.c b/trunk/drivers/staging/iio/adc/ad7192.c index 22c3923d55eb..095837285f4f 100644 --- a/trunk/drivers/staging/iio/adc/ad7192.c +++ b/trunk/drivers/staging/iio/adc/ad7192.c @@ -754,7 +754,7 @@ static ssize_t ad7192_set(struct device *dev, else st->mode &= ~AD7192_MODE_ACX; - ad7192_write_reg(st, AD7192_REG_GPOCON, 3, st->mode); + ad7192_write_reg(st, AD7192_REG_MODE, 3, st->mode); break; default: ret = -EINVAL; @@ -798,6 +798,11 @@ static const struct attribute_group ad7195_attribute_group = { .attrs = ad7195_attributes, }; +static unsigned int ad7192_get_temp_scale(bool unipolar) +{ + return unipolar ? 2815 * 2 : 2815; +} + static int ad7192_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, @@ -824,19 +829,6 @@ static int ad7192_read_raw(struct iio_dev *indio_dev, *val = (smpl >> chan->scan_type.shift) & ((1 << (chan->scan_type.realbits)) - 1); - switch (chan->type) { - case IIO_VOLTAGE: - if (!unipolar) - *val -= (1 << (chan->scan_type.realbits - 1)); - break; - case IIO_TEMP: - *val -= 0x800000; - *val /= 2815; /* temp Kelvin */ - *val -= 273; /* temp Celsius */ - break; - default: - return -EINVAL; - } return IIO_VAL_INT; case IIO_CHAN_INFO_SCALE: @@ -848,11 +840,21 @@ static int ad7192_read_raw(struct iio_dev *indio_dev, mutex_unlock(&indio_dev->mlock); return IIO_VAL_INT_PLUS_NANO; case IIO_TEMP: - *val = 1000; - return IIO_VAL_INT; + *val = 0; + *val2 = 1000000000 / ad7192_get_temp_scale(unipolar); + return IIO_VAL_INT_PLUS_NANO; default: return -EINVAL; } + case IIO_CHAN_INFO_OFFSET: + if (!unipolar) + *val = -(1 << (chan->scan_type.realbits - 1)); + else + *val = 0; + /* Kelvin to Celsius */ + if (chan->type == IIO_TEMP) + *val -= 273 * ad7192_get_temp_scale(unipolar); + return IIO_VAL_INT; } return -EINVAL; @@ -890,7 +892,7 @@ static int ad7192_write_raw(struct iio_dev *indio_dev, } ret = 0; } - + break; default: ret = -EINVAL; } @@ -942,20 +944,22 @@ static const struct iio_info ad7195_info = { .channel = _chan, \ .channel2 = _chan2, \ .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | \ - IIO_CHAN_INFO_SCALE_SHARED_BIT, \ + IIO_CHAN_INFO_SCALE_SHARED_BIT | \ + IIO_CHAN_INFO_OFFSET_SHARED_BIT, \ .address = _address, \ .scan_index = _si, \ - .scan_type = IIO_ST('s', 24, 32, 0)} + .scan_type = IIO_ST('u', 24, 32, 0)} #define AD7192_CHAN(_chan, _address, _si) \ { .type = IIO_VOLTAGE, \ .indexed = 1, \ .channel = _chan, \ .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | \ - IIO_CHAN_INFO_SCALE_SHARED_BIT, \ + IIO_CHAN_INFO_SCALE_SHARED_BIT | \ + IIO_CHAN_INFO_OFFSET_SHARED_BIT, \ .address = _address, \ .scan_index = _si, \ - .scan_type = IIO_ST('s', 24, 32, 0)} + .scan_type = IIO_ST('u', 24, 32, 0)} #define AD7192_CHAN_TEMP(_chan, _address, _si) \ { .type = IIO_TEMP, \ @@ -965,7 +969,7 @@ static const struct iio_info ad7195_info = { IIO_CHAN_INFO_SCALE_SEPARATE_BIT, \ .address = _address, \ .scan_index = _si, \ - .scan_type = IIO_ST('s', 24, 32, 0)} + .scan_type = IIO_ST('u', 24, 32, 0)} static struct iio_chan_spec ad7192_channels[] = { AD7192_CHAN_DIFF(1, 2, NULL, AD7192_CH_AIN1P_AIN2M, 0), diff --git a/trunk/drivers/staging/iio/adc/ad7298_ring.c b/trunk/drivers/staging/iio/adc/ad7298_ring.c index fd1d855ff57a..506016f01593 100644 --- a/trunk/drivers/staging/iio/adc/ad7298_ring.c +++ b/trunk/drivers/staging/iio/adc/ad7298_ring.c @@ -76,7 +76,7 @@ static irqreturn_t ad7298_trigger_handler(int irq, void *p) struct iio_dev *indio_dev = pf->indio_dev; struct ad7298_state *st = iio_priv(indio_dev); struct iio_buffer *ring = indio_dev->buffer; - s64 time_ns; + s64 time_ns = 0; __u16 buf[16]; int b_sent, i; diff --git a/trunk/drivers/staging/iio/adc/ad7780.c b/trunk/drivers/staging/iio/adc/ad7780.c index 1ece2ac8de56..19ee49c95de4 100644 --- a/trunk/drivers/staging/iio/adc/ad7780.c +++ b/trunk/drivers/staging/iio/adc/ad7780.c @@ -131,9 +131,10 @@ static const struct ad7780_chip_info ad7780_chip_info_tbl[] = { .indexed = 1, .channel = 0, .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | - IIO_CHAN_INFO_SCALE_SHARED_BIT, + IIO_CHAN_INFO_SCALE_SHARED_BIT | + IIO_CHAN_INFO_OFFSET_SHARED_BIT, .scan_type = { - .sign = 's', + .sign = 'u', .realbits = 24, .storagebits = 32, .shift = 8, @@ -146,9 +147,10 @@ static const struct ad7780_chip_info ad7780_chip_info_tbl[] = { .indexed = 1, .channel = 0, .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | - IIO_CHAN_INFO_SCALE_SHARED_BIT, + IIO_CHAN_INFO_SCALE_SHARED_BIT | + IIO_CHAN_INFO_OFFSET_SHARED_BIT, .scan_type = { - .sign = 's', + .sign = 'u', .realbits = 20, .storagebits = 32, .shift = 12, diff --git a/trunk/drivers/staging/iio/adc/ad7793.c b/trunk/drivers/staging/iio/adc/ad7793.c index 76fdd7145fc5..112e2b7b5bc4 100644 --- a/trunk/drivers/staging/iio/adc/ad7793.c +++ b/trunk/drivers/staging/iio/adc/ad7793.c @@ -563,8 +563,9 @@ static ssize_t ad7793_show_scale_available(struct device *dev, return len; } -static IIO_DEVICE_ATTR_NAMED(in_m_in_scale_available, in-in_scale_available, - S_IRUGO, ad7793_show_scale_available, NULL, 0); +static IIO_DEVICE_ATTR_NAMED(in_m_in_scale_available, + in_voltage-voltage_scale_available, S_IRUGO, + ad7793_show_scale_available, NULL, 0); static struct attribute *ad7793_attributes[] = { &iio_dev_attr_sampling_frequency.dev_attr.attr, @@ -604,9 +605,6 @@ static int ad7793_read_raw(struct iio_dev *indio_dev, *val = (smpl >> chan->scan_type.shift) & ((1 << (chan->scan_type.realbits)) - 1); - if (!unipolar) - *val -= (1 << (chan->scan_type.realbits - 1)); - return IIO_VAL_INT; case IIO_CHAN_INFO_SCALE: @@ -620,25 +618,38 @@ static int ad7793_read_raw(struct iio_dev *indio_dev, return IIO_VAL_INT_PLUS_NANO; } else { /* 1170mV / 2^23 * 6 */ - scale_uv = (1170ULL * 100000000ULL * 6ULL) - >> (chan->scan_type.realbits - - (unipolar ? 0 : 1)); + scale_uv = (1170ULL * 100000000ULL * 6ULL); } break; case IIO_TEMP: - /* Always uses unity gain and internal ref */ - scale_uv = (2500ULL * 100000000ULL) - >> (chan->scan_type.realbits - - (unipolar ? 0 : 1)); + /* 1170mV / 0.81 mV/C / 2^23 */ + scale_uv = 1444444444444ULL; break; default: return -EINVAL; } - *val2 = do_div(scale_uv, 100000000) * 10; - *val = scale_uv; - + scale_uv >>= (chan->scan_type.realbits - (unipolar ? 0 : 1)); + *val = 0; + *val2 = scale_uv; return IIO_VAL_INT_PLUS_NANO; + case IIO_CHAN_INFO_OFFSET: + if (!unipolar) + *val = -(1 << (chan->scan_type.realbits - 1)); + else + *val = 0; + + /* Kelvin to Celsius */ + if (chan->type == IIO_TEMP) { + unsigned long long offset; + unsigned int shift; + + shift = chan->scan_type.realbits - (unipolar ? 0 : 1); + offset = 273ULL << shift; + do_div(offset, 1444); + *val -= offset; + } + return IIO_VAL_INT; } return -EINVAL; } @@ -676,7 +687,7 @@ static int ad7793_write_raw(struct iio_dev *indio_dev, } ret = 0; } - + break; default: ret = -EINVAL; } @@ -720,9 +731,10 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = { .channel2 = 0, .address = AD7793_CH_AIN1P_AIN1M, .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | - IIO_CHAN_INFO_SCALE_SHARED_BIT, + IIO_CHAN_INFO_SCALE_SHARED_BIT | + IIO_CHAN_INFO_OFFSET_SHARED_BIT, .scan_index = 0, - .scan_type = IIO_ST('s', 24, 32, 0) + .scan_type = IIO_ST('u', 24, 32, 0) }, .channel[1] = { .type = IIO_VOLTAGE, @@ -732,9 +744,10 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = { .channel2 = 1, .address = AD7793_CH_AIN2P_AIN2M, .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | - IIO_CHAN_INFO_SCALE_SHARED_BIT, + IIO_CHAN_INFO_SCALE_SHARED_BIT | + IIO_CHAN_INFO_OFFSET_SHARED_BIT, .scan_index = 1, - .scan_type = IIO_ST('s', 24, 32, 0) + .scan_type = IIO_ST('u', 24, 32, 0) }, .channel[2] = { .type = IIO_VOLTAGE, @@ -744,9 +757,10 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = { .channel2 = 2, .address = AD7793_CH_AIN3P_AIN3M, .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | - IIO_CHAN_INFO_SCALE_SHARED_BIT, + IIO_CHAN_INFO_SCALE_SHARED_BIT | + IIO_CHAN_INFO_OFFSET_SHARED_BIT, .scan_index = 2, - .scan_type = IIO_ST('s', 24, 32, 0) + .scan_type = IIO_ST('u', 24, 32, 0) }, .channel[3] = { .type = IIO_VOLTAGE, @@ -757,9 +771,10 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = { .channel2 = 2, .address = AD7793_CH_AIN1M_AIN1M, .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | - IIO_CHAN_INFO_SCALE_SHARED_BIT, + IIO_CHAN_INFO_SCALE_SHARED_BIT | + IIO_CHAN_INFO_OFFSET_SHARED_BIT, .scan_index = 3, - .scan_type = IIO_ST('s', 24, 32, 0) + .scan_type = IIO_ST('u', 24, 32, 0) }, .channel[4] = { .type = IIO_TEMP, @@ -769,7 +784,7 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = { .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | IIO_CHAN_INFO_SCALE_SEPARATE_BIT, .scan_index = 4, - .scan_type = IIO_ST('s', 24, 32, 0), + .scan_type = IIO_ST('u', 24, 32, 0), }, .channel[5] = { .type = IIO_VOLTAGE, @@ -778,9 +793,10 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = { .channel = 4, .address = AD7793_CH_AVDD_MONITOR, .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | - IIO_CHAN_INFO_SCALE_SEPARATE_BIT, + IIO_CHAN_INFO_SCALE_SEPARATE_BIT | + IIO_CHAN_INFO_OFFSET_SHARED_BIT, .scan_index = 5, - .scan_type = IIO_ST('s', 24, 32, 0), + .scan_type = IIO_ST('u', 24, 32, 0), }, .channel[6] = IIO_CHAN_SOFT_TIMESTAMP(6), }, @@ -793,9 +809,10 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = { .channel2 = 0, .address = AD7793_CH_AIN1P_AIN1M, .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | - IIO_CHAN_INFO_SCALE_SHARED_BIT, + IIO_CHAN_INFO_SCALE_SHARED_BIT | + IIO_CHAN_INFO_OFFSET_SHARED_BIT, .scan_index = 0, - .scan_type = IIO_ST('s', 16, 32, 0) + .scan_type = IIO_ST('u', 16, 32, 0) }, .channel[1] = { .type = IIO_VOLTAGE, @@ -805,9 +822,10 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = { .channel2 = 1, .address = AD7793_CH_AIN2P_AIN2M, .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | - IIO_CHAN_INFO_SCALE_SHARED_BIT, + IIO_CHAN_INFO_SCALE_SHARED_BIT | + IIO_CHAN_INFO_OFFSET_SHARED_BIT, .scan_index = 1, - .scan_type = IIO_ST('s', 16, 32, 0) + .scan_type = IIO_ST('u', 16, 32, 0) }, .channel[2] = { .type = IIO_VOLTAGE, @@ -817,9 +835,10 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = { .channel2 = 2, .address = AD7793_CH_AIN3P_AIN3M, .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | - IIO_CHAN_INFO_SCALE_SHARED_BIT, + IIO_CHAN_INFO_SCALE_SHARED_BIT | + IIO_CHAN_INFO_OFFSET_SHARED_BIT, .scan_index = 2, - .scan_type = IIO_ST('s', 16, 32, 0) + .scan_type = IIO_ST('u', 16, 32, 0) }, .channel[3] = { .type = IIO_VOLTAGE, @@ -830,9 +849,10 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = { .channel2 = 2, .address = AD7793_CH_AIN1M_AIN1M, .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | - IIO_CHAN_INFO_SCALE_SHARED_BIT, + IIO_CHAN_INFO_SCALE_SHARED_BIT | + IIO_CHAN_INFO_OFFSET_SHARED_BIT, .scan_index = 3, - .scan_type = IIO_ST('s', 16, 32, 0) + .scan_type = IIO_ST('u', 16, 32, 0) }, .channel[4] = { .type = IIO_TEMP, @@ -842,7 +862,7 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = { .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | IIO_CHAN_INFO_SCALE_SEPARATE_BIT, .scan_index = 4, - .scan_type = IIO_ST('s', 16, 32, 0), + .scan_type = IIO_ST('u', 16, 32, 0), }, .channel[5] = { .type = IIO_VOLTAGE, @@ -851,9 +871,10 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = { .channel = 4, .address = AD7793_CH_AVDD_MONITOR, .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | - IIO_CHAN_INFO_SCALE_SEPARATE_BIT, + IIO_CHAN_INFO_SCALE_SEPARATE_BIT | + IIO_CHAN_INFO_OFFSET_SHARED_BIT, .scan_index = 5, - .scan_type = IIO_ST('s', 16, 32, 0), + .scan_type = IIO_ST('u', 16, 32, 0), }, .channel[6] = IIO_CHAN_SOFT_TIMESTAMP(6), }, @@ -901,7 +922,7 @@ static int __devinit ad7793_probe(struct spi_device *spi) else if (voltage_uv) st->int_vref_mv = voltage_uv / 1000; else - st->int_vref_mv = 2500; /* Build-in ref */ + st->int_vref_mv = 1170; /* Build-in ref */ spi_set_drvdata(spi, indio_dev); st->spi = spi; diff --git a/trunk/drivers/staging/olpc_dcon/olpc_dcon.c b/trunk/drivers/staging/olpc_dcon/olpc_dcon.c index 992275c0d87c..2c4bd746715a 100644 --- a/trunk/drivers/staging/olpc_dcon/olpc_dcon.c +++ b/trunk/drivers/staging/olpc_dcon/olpc_dcon.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include diff --git a/trunk/drivers/staging/vt6656/main_usb.c b/trunk/drivers/staging/vt6656/main_usb.c index b06fd5b723fa..d536756549e6 100644 --- a/trunk/drivers/staging/vt6656/main_usb.c +++ b/trunk/drivers/staging/vt6656/main_usb.c @@ -189,7 +189,7 @@ DEVICE_PARAM(b80211hEnable, "802.11h mode"); // Static vars definitions // -static struct usb_device_id vt6656_table[] __devinitdata = { +static struct usb_device_id vt6656_table[] = { {USB_DEVICE(VNT_USB_VENDOR_ID, VNT_USB_PRODUCT_ID)}, {} }; diff --git a/trunk/drivers/staging/winbond/wbusb.c b/trunk/drivers/staging/winbond/wbusb.c index ef360547ecec..0ca857ac473e 100644 --- a/trunk/drivers/staging/winbond/wbusb.c +++ b/trunk/drivers/staging/winbond/wbusb.c @@ -25,7 +25,7 @@ MODULE_DESCRIPTION("IS89C35 802.11bg WLAN USB Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION("0.1"); -static const struct usb_device_id wb35_table[] __devinitconst = { +static const struct usb_device_id wb35_table[] = { { USB_DEVICE(0x0416, 0x0035) }, { USB_DEVICE(0x18E8, 0x6201) }, { USB_DEVICE(0x18E8, 0x6206) }, diff --git a/trunk/drivers/target/target_core_file.c b/trunk/drivers/target/target_core_file.c index 9e2100551c78..cbb5aaf3e567 100644 --- a/trunk/drivers/target/target_core_file.c +++ b/trunk/drivers/target/target_core_file.c @@ -109,46 +109,29 @@ static struct se_device *fd_create_virtdevice( struct se_subsystem_dev *se_dev, void *p) { - char *dev_p = NULL; struct se_device *dev; struct se_dev_limits dev_limits; struct queue_limits *limits; struct fd_dev *fd_dev = p; struct fd_host *fd_host = hba->hba_ptr; - mm_segment_t old_fs; struct file *file; struct inode *inode = NULL; int dev_flags = 0, flags, ret = -EINVAL; memset(&dev_limits, 0, sizeof(struct se_dev_limits)); - old_fs = get_fs(); - set_fs(get_ds()); - dev_p = getname(fd_dev->fd_dev_name); - set_fs(old_fs); - - if (IS_ERR(dev_p)) { - pr_err("getname(%s) failed: %lu\n", - fd_dev->fd_dev_name, IS_ERR(dev_p)); - ret = PTR_ERR(dev_p); - goto fail; - } /* * Use O_DSYNC by default instead of O_SYNC to forgo syncing * of pure timestamp updates. */ flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC; - file = filp_open(dev_p, flags, 0600); + file = filp_open(fd_dev->fd_dev_name, flags, 0600); if (IS_ERR(file)) { - pr_err("filp_open(%s) failed\n", dev_p); + pr_err("filp_open(%s) failed\n", fd_dev->fd_dev_name); ret = PTR_ERR(file); goto fail; } - if (!file || !file->f_dentry) { - pr_err("filp_open(%s) failed\n", dev_p); - goto fail; - } fd_dev->fd_file = file; /* * If using a block backend with this struct file, we extract @@ -212,14 +195,12 @@ static struct se_device *fd_create_virtdevice( " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id, fd_dev->fd_dev_name, fd_dev->fd_dev_size); - putname(dev_p); return dev; fail: if (fd_dev->fd_file) { filp_close(fd_dev->fd_file, NULL); fd_dev->fd_file = NULL; } - putname(dev_p); return ERR_PTR(ret); } @@ -452,14 +433,11 @@ static ssize_t fd_set_configfs_dev_params( token = match_token(ptr, tokens, args); switch (token) { case Opt_fd_dev_name: - arg_p = match_strdup(&args[0]); - if (!arg_p) { - ret = -ENOMEM; + if (match_strlcpy(fd_dev->fd_dev_name, &args[0], + FD_MAX_DEV_NAME) == 0) { + ret = -EINVAL; break; } - snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME, - "%s", arg_p); - kfree(arg_p); pr_debug("FILEIO: Referencing Path: %s\n", fd_dev->fd_dev_name); fd_dev->fbd_flags |= FBDF_HAS_PATH; diff --git a/trunk/drivers/target/target_core_pscsi.c b/trunk/drivers/target/target_core_pscsi.c index 6e32ff6f2fa0..5552fa7426bc 100644 --- a/trunk/drivers/target/target_core_pscsi.c +++ b/trunk/drivers/target/target_core_pscsi.c @@ -673,8 +673,15 @@ static int pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg) struct scsi_device *sd = pdv->pdv_sd; int result; struct pscsi_plugin_task *pt = cmd->priv; - unsigned char *cdb = &pt->pscsi_cdb[0]; + unsigned char *cdb; + /* + * Special case for REPORT_LUNs handling where pscsi_plugin_task has + * not been allocated because TCM is handling the emulation directly. + */ + if (!pt) + return 0; + cdb = &pt->pscsi_cdb[0]; result = pt->pscsi_result; /* * Hack to make sure that Write-Protect modepage is set if R/O mode is diff --git a/trunk/drivers/target/target_core_transport.c b/trunk/drivers/target/target_core_transport.c index 0eaae23d12b5..4de3186dc44e 100644 --- a/trunk/drivers/target/target_core_transport.c +++ b/trunk/drivers/target/target_core_transport.c @@ -1165,8 +1165,6 @@ int target_cmd_size_check(struct se_cmd *cmd, unsigned int size) " 0x%02x\n", cmd->se_tfo->get_fabric_name(), cmd->data_length, size, cmd->t_task_cdb[0]); - cmd->cmd_spdtl = size; - if (cmd->data_direction == DMA_TO_DEVICE) { pr_err("Rejecting underflow/overflow" " WRITE data\n"); @@ -2294,9 +2292,9 @@ transport_generic_get_mem(struct se_cmd *cmd) return 0; out: - while (i >= 0) { - __free_page(sg_page(&cmd->t_data_sg[i])); + while (i > 0) { i--; + __free_page(sg_page(&cmd->t_data_sg[i])); } kfree(cmd->t_data_sg); cmd->t_data_sg = NULL; @@ -2323,9 +2321,12 @@ int transport_generic_new_cmd(struct se_cmd *cmd) if (ret < 0) goto out_fail; } - - /* Workaround for handling zero-length control CDBs */ - if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->data_length) { + /* + * If this command doesn't have any payload and we don't have to call + * into the fabric for data transfers, go ahead and complete it right + * away. + */ + if (!cmd->data_length) { spin_lock_irq(&cmd->t_state_lock); cmd->t_state = TRANSPORT_COMPLETE; cmd->transport_state |= CMD_T_ACTIVE; diff --git a/trunk/drivers/target/tcm_fc/tcm_fc.h b/trunk/drivers/target/tcm_fc/tcm_fc.h index c5eb3c33c3db..eea69358ced3 100644 --- a/trunk/drivers/target/tcm_fc/tcm_fc.h +++ b/trunk/drivers/target/tcm_fc/tcm_fc.h @@ -131,6 +131,7 @@ extern struct list_head ft_lport_list; extern struct mutex ft_lport_lock; extern struct fc4_prov ft_prov; extern struct target_fabric_configfs *ft_configfs; +extern unsigned int ft_debug_logging; /* * Fabric methods. diff --git a/trunk/drivers/target/tcm_fc/tfc_cmd.c b/trunk/drivers/target/tcm_fc/tfc_cmd.c index b9cb5006177e..823e6922249d 100644 --- a/trunk/drivers/target/tcm_fc/tfc_cmd.c +++ b/trunk/drivers/target/tcm_fc/tfc_cmd.c @@ -48,7 +48,7 @@ /* * Dump cmd state for debugging. */ -void ft_dump_cmd(struct ft_cmd *cmd, const char *caller) +static void _ft_dump_cmd(struct ft_cmd *cmd, const char *caller) { struct fc_exch *ep; struct fc_seq *sp; @@ -80,6 +80,12 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller) } } +void ft_dump_cmd(struct ft_cmd *cmd, const char *caller) +{ + if (unlikely(ft_debug_logging)) + _ft_dump_cmd(cmd, caller); +} + static void ft_free_cmd(struct ft_cmd *cmd) { struct fc_frame *fp; diff --git a/trunk/drivers/target/tcm_fc/tfc_sess.c b/trunk/drivers/target/tcm_fc/tfc_sess.c index 87901fa74dd7..3c9e5b57caab 100644 --- a/trunk/drivers/target/tcm_fc/tfc_sess.c +++ b/trunk/drivers/target/tcm_fc/tfc_sess.c @@ -456,7 +456,9 @@ static void ft_prlo(struct fc_rport_priv *rdata) struct ft_tport *tport; mutex_lock(&ft_lport_lock); - tport = rcu_dereference(rdata->local_port->prov[FC_TYPE_FCP]); + tport = rcu_dereference_protected(rdata->local_port->prov[FC_TYPE_FCP], + lockdep_is_held(&ft_lport_lock)); + if (!tport) { mutex_unlock(&ft_lport_lock); return; diff --git a/trunk/drivers/tty/serial/Kconfig b/trunk/drivers/tty/serial/Kconfig index 070b442c1f81..4720b4ba096a 100644 --- a/trunk/drivers/tty/serial/Kconfig +++ b/trunk/drivers/tty/serial/Kconfig @@ -160,10 +160,12 @@ config SERIAL_KS8695_CONSOLE config SERIAL_CLPS711X tristate "CLPS711X serial port support" - depends on ARM && ARCH_CLPS711X + depends on ARCH_CLPS711X select SERIAL_CORE + default y help - ::: To be written ::: + This enables the driver for the on-chip UARTs of the Cirrus + Logic EP711x/EP721x/EP731x processors. config SERIAL_CLPS711X_CONSOLE bool "Support for console on CLPS711X serial port" @@ -173,9 +175,7 @@ config SERIAL_CLPS711X_CONSOLE Even if you say Y here, the currently visible virtual console (/dev/tty0) will still be used as the system console by default, but you can alter that using a kernel command line option such as - "console=ttyCL1". (Try "man bootparam" or see the documentation of - your boot loader (lilo or loadlin) about how to pass options to the - kernel at boot time.) + "console=ttyCL1". config SERIAL_SAMSUNG tristate "Samsung SoC serial support" diff --git a/trunk/drivers/tty/serial/ifx6x60.c b/trunk/drivers/tty/serial/ifx6x60.c index 144cd3987d4c..3ad079ffd049 100644 --- a/trunk/drivers/tty/serial/ifx6x60.c +++ b/trunk/drivers/tty/serial/ifx6x60.c @@ -1331,7 +1331,7 @@ static const struct spi_device_id ifx_id_table[] = { MODULE_DEVICE_TABLE(spi, ifx_id_table); /* spi operations */ -static const struct spi_driver ifx_spi_driver = { +static struct spi_driver ifx_spi_driver = { .driver = { .name = DRVNAME, .pm = &ifx_spi_pm, diff --git a/trunk/drivers/tty/serial/mxs-auart.c b/trunk/drivers/tty/serial/mxs-auart.c index 2e341b81ff89..3a667eed63d6 100644 --- a/trunk/drivers/tty/serial/mxs-auart.c +++ b/trunk/drivers/tty/serial/mxs-auart.c @@ -73,6 +73,7 @@ #define AUART_CTRL0_CLKGATE (1 << 30) #define AUART_CTRL2_CTSEN (1 << 15) +#define AUART_CTRL2_RTSEN (1 << 14) #define AUART_CTRL2_RTS (1 << 11) #define AUART_CTRL2_RXE (1 << 9) #define AUART_CTRL2_TXE (1 << 8) @@ -259,9 +260,12 @@ static void mxs_auart_set_mctrl(struct uart_port *u, unsigned mctrl) u32 ctrl = readl(u->membase + AUART_CTRL2); - ctrl &= ~AUART_CTRL2_RTS; - if (mctrl & TIOCM_RTS) - ctrl |= AUART_CTRL2_RTS; + ctrl &= ~AUART_CTRL2_RTSEN; + if (mctrl & TIOCM_RTS) { + if (u->state->port.flags & ASYNC_CTS_FLOW) + ctrl |= AUART_CTRL2_RTSEN; + } + s->ctrl = mctrl; writel(ctrl, u->membase + AUART_CTRL2); } @@ -359,9 +363,9 @@ static void mxs_auart_settermios(struct uart_port *u, /* figure out the hardware flow control settings */ if (cflag & CRTSCTS) - ctrl2 |= AUART_CTRL2_CTSEN; + ctrl2 |= AUART_CTRL2_CTSEN | AUART_CTRL2_RTSEN; else - ctrl2 &= ~AUART_CTRL2_CTSEN; + ctrl2 &= ~(AUART_CTRL2_CTSEN | AUART_CTRL2_RTSEN); /* set baud rate */ baud = uart_get_baud_rate(u, termios, old, 0, u->uartclk); diff --git a/trunk/drivers/tty/serial/pmac_zilog.c b/trunk/drivers/tty/serial/pmac_zilog.c index 654755a990df..333c8d012b0e 100644 --- a/trunk/drivers/tty/serial/pmac_zilog.c +++ b/trunk/drivers/tty/serial/pmac_zilog.c @@ -1348,10 +1348,16 @@ static int pmz_verify_port(struct uart_port *port, struct serial_struct *ser) static int pmz_poll_get_char(struct uart_port *port) { struct uart_pmac_port *uap = (struct uart_pmac_port *)port; + int tries = 2; - while ((read_zsreg(uap, R0) & Rx_CH_AV) == 0) - udelay(5); - return read_zsdata(uap); + while (tries) { + if ((read_zsreg(uap, R0) & Rx_CH_AV) != 0) + return read_zsdata(uap); + if (tries--) + udelay(5); + } + + return NO_POLL_CHAR; } static void pmz_poll_put_char(struct uart_port *port, unsigned char c) diff --git a/trunk/drivers/tty/serial/sh-sci.c b/trunk/drivers/tty/serial/sh-sci.c index d4d8c9453cd8..9be296cf7295 100644 --- a/trunk/drivers/tty/serial/sh-sci.c +++ b/trunk/drivers/tty/serial/sh-sci.c @@ -25,6 +25,7 @@ #include #include +#include #include #include #include @@ -1410,8 +1411,8 @@ static void work_fn_rx(struct work_struct *work) /* Handle incomplete DMA receive */ struct tty_struct *tty = port->state->port.tty; struct dma_chan *chan = s->chan_rx; - struct sh_desc *sh_desc = container_of(desc, struct sh_desc, - async_tx); + struct shdma_desc *sh_desc = container_of(desc, + struct shdma_desc, async_tx); unsigned long flags; int count; diff --git a/trunk/drivers/usb/Kconfig b/trunk/drivers/usb/Kconfig index a7773a3e02b1..7065df6036ca 100644 --- a/trunk/drivers/usb/Kconfig +++ b/trunk/drivers/usb/Kconfig @@ -13,7 +13,7 @@ config USB_ARCH_HAS_OHCI default y if PXA3xx default y if ARCH_EP93XX default y if ARCH_AT91 - default y if ARCH_PNX4008 && I2C + default y if ARCH_PNX4008 default y if MFD_TC6393XB default y if ARCH_W90X900 default y if ARCH_DAVINCI_DA8XX diff --git a/trunk/drivers/usb/chipidea/Kconfig b/trunk/drivers/usb/chipidea/Kconfig index 8337fb5d988d..47e499c9c0b6 100644 --- a/trunk/drivers/usb/chipidea/Kconfig +++ b/trunk/drivers/usb/chipidea/Kconfig @@ -1,9 +1,9 @@ config USB_CHIPIDEA tristate "ChipIdea Highspeed Dual Role Controller" - depends on USB + depends on USB || USB_GADGET help - Say Y here if your system has a dual role high speed USB - controller based on ChipIdea silicon IP. Currently, only the + Say Y here if your system has a dual role high speed USB + controller based on ChipIdea silicon IP. Currently, only the peripheral mode is supported. When compiled dynamically, the module will be called ci-hdrc.ko. @@ -12,7 +12,7 @@ if USB_CHIPIDEA config USB_CHIPIDEA_UDC bool "ChipIdea device controller" - depends on USB_GADGET + depends on USB_GADGET=y || USB_GADGET=USB_CHIPIDEA select USB_GADGET_DUALSPEED help Say Y here to enable device controller functionality of the @@ -20,6 +20,7 @@ config USB_CHIPIDEA_UDC config USB_CHIPIDEA_HOST bool "ChipIdea host controller" + depends on USB=y || USB=USB_CHIPIDEA select USB_EHCI_ROOT_HUB_TT help Say Y here to enable host controller functionality of the diff --git a/trunk/drivers/usb/class/cdc-acm.c b/trunk/drivers/usb/class/cdc-acm.c index 56d6bf668488..f763ed7ba91e 100644 --- a/trunk/drivers/usb/class/cdc-acm.c +++ b/trunk/drivers/usb/class/cdc-acm.c @@ -1104,7 +1104,8 @@ static int acm_probe(struct usb_interface *intf, } - if (data_interface->cur_altsetting->desc.bNumEndpoints < 2) + if (data_interface->cur_altsetting->desc.bNumEndpoints < 2 || + control_interface->cur_altsetting->desc.bNumEndpoints == 0) return -EINVAL; epctrl = &control_interface->cur_altsetting->endpoint[0].desc; diff --git a/trunk/drivers/usb/early/ehci-dbgp.c b/trunk/drivers/usb/early/ehci-dbgp.c index ee0ebacf8227..89dcf155d57e 100644 --- a/trunk/drivers/usb/early/ehci-dbgp.c +++ b/trunk/drivers/usb/early/ehci-dbgp.c @@ -450,7 +450,7 @@ static int dbgp_ehci_startup(void) writel(FLAG_CF, &ehci_regs->configured_flag); /* Wait until the controller is no longer halted */ - loop = 10; + loop = 1000; do { status = readl(&ehci_regs->status); if (!(status & STS_HALT)) diff --git a/trunk/drivers/usb/gadget/storage_common.c b/trunk/drivers/usb/gadget/storage_common.c index ae8b18869b8c..8d9bcd8207c8 100644 --- a/trunk/drivers/usb/gadget/storage_common.c +++ b/trunk/drivers/usb/gadget/storage_common.c @@ -656,9 +656,8 @@ static int fsg_lun_open(struct fsg_lun *curlun, const char *filename) if (!(filp->f_mode & FMODE_WRITE)) ro = 1; - if (filp->f_path.dentry) - inode = filp->f_path.dentry->d_inode; - if (!inode || (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))) { + inode = filp->f_path.dentry->d_inode; + if ((!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))) { LINFO(curlun, "invalid file type: %s\n", filename); goto out; } @@ -667,7 +666,7 @@ static int fsg_lun_open(struct fsg_lun *curlun, const char *filename) * If we can't read the file, it's no good. * If we can't write the file, use it read-only. */ - if (!filp->f_op || !(filp->f_op->read || filp->f_op->aio_read)) { + if (!(filp->f_op->read || filp->f_op->aio_read)) { LINFO(curlun, "file not readable: %s\n", filename); goto out; } @@ -712,7 +711,6 @@ static int fsg_lun_open(struct fsg_lun *curlun, const char *filename) if (fsg_lun_is_open(curlun)) fsg_lun_close(curlun); - get_file(filp); curlun->blksize = blksize; curlun->blkbits = blkbits; curlun->ro = ro; @@ -720,10 +718,10 @@ static int fsg_lun_open(struct fsg_lun *curlun, const char *filename) curlun->file_length = size; curlun->num_sectors = num_sectors; LDBG(curlun, "open backing file: %s\n", filename); - rc = 0; + return 0; out: - filp_close(filp, current->files); + fput(filp); return rc; } diff --git a/trunk/drivers/usb/gadget/u_ether.c b/trunk/drivers/usb/gadget/u_ether.c index 90e82e288eb9..0e5230926154 100644 --- a/trunk/drivers/usb/gadget/u_ether.c +++ b/trunk/drivers/usb/gadget/u_ether.c @@ -669,6 +669,8 @@ static int eth_stop(struct net_device *net) spin_lock_irqsave(&dev->lock, flags); if (dev->port_usb) { struct gether *link = dev->port_usb; + const struct usb_endpoint_descriptor *in; + const struct usb_endpoint_descriptor *out; if (link->close) link->close(link); @@ -682,10 +684,14 @@ static int eth_stop(struct net_device *net) * their own pace; the network stack can handle old packets. * For the moment we leave this here, since it works. */ + in = link->in_ep->desc; + out = link->out_ep->desc; usb_ep_disable(link->in_ep); usb_ep_disable(link->out_ep); if (netif_carrier_ok(net)) { DBG(dev, "host still using in/out endpoints\n"); + link->in_ep->desc = in; + link->out_ep->desc = out; usb_ep_enable(link->in_ep); usb_ep_enable(link->out_ep); } diff --git a/trunk/drivers/usb/gadget/u_uac1.c b/trunk/drivers/usb/gadget/u_uac1.c index af9898982059..e0c5e88e03ed 100644 --- a/trunk/drivers/usb/gadget/u_uac1.c +++ b/trunk/drivers/usb/gadget/u_uac1.c @@ -275,17 +275,17 @@ static int gaudio_close_snd_dev(struct gaudio *gau) /* Close control device */ snd = &gau->control; if (snd->filp) - filp_close(snd->filp, current->files); + filp_close(snd->filp, NULL); /* Close PCM playback device and setup substream */ snd = &gau->playback; if (snd->filp) - filp_close(snd->filp, current->files); + filp_close(snd->filp, NULL); /* Close PCM capture device and setup substream */ snd = &gau->capture; if (snd->filp) - filp_close(snd->filp, current->files); + filp_close(snd->filp, NULL); return 0; } diff --git a/trunk/drivers/usb/host/ehci-omap.c b/trunk/drivers/usb/host/ehci-omap.c index bb55eb4a7d48..d7fe287d0678 100644 --- a/trunk/drivers/usb/host/ehci-omap.c +++ b/trunk/drivers/usb/host/ehci-omap.c @@ -56,15 +56,6 @@ #define EHCI_INSNREG05_ULPI_EXTREGADD_SHIFT 8 #define EHCI_INSNREG05_ULPI_WRDATA_SHIFT 0 -/* Errata i693 */ -static struct clk *utmi_p1_fck; -static struct clk *utmi_p2_fck; -static struct clk *xclk60mhsp1_ck; -static struct clk *xclk60mhsp2_ck; -static struct clk *usbhost_p1_fck; -static struct clk *usbhost_p2_fck; -static struct clk *init_60m_fclk; - /*-------------------------------------------------------------------------*/ static const struct hc_driver ehci_omap_hc_driver; @@ -80,40 +71,6 @@ static inline u32 ehci_read(void __iomem *base, u32 reg) return __raw_readl(base + reg); } -/* Erratum i693 workaround sequence */ -static void omap_ehci_erratum_i693(struct ehci_hcd *ehci) -{ - int ret = 0; - - /* Switch to the internal 60 MHz clock */ - ret = clk_set_parent(utmi_p1_fck, init_60m_fclk); - if (ret != 0) - ehci_err(ehci, "init_60m_fclk set parent" - "failed error:%d\n", ret); - - ret = clk_set_parent(utmi_p2_fck, init_60m_fclk); - if (ret != 0) - ehci_err(ehci, "init_60m_fclk set parent" - "failed error:%d\n", ret); - - clk_enable(usbhost_p1_fck); - clk_enable(usbhost_p2_fck); - - /* Wait 1ms and switch back to the external clock */ - mdelay(1); - ret = clk_set_parent(utmi_p1_fck, xclk60mhsp1_ck); - if (ret != 0) - ehci_err(ehci, "xclk60mhsp1_ck set parent" - "failed error:%d\n", ret); - - ret = clk_set_parent(utmi_p2_fck, xclk60mhsp2_ck); - if (ret != 0) - ehci_err(ehci, "xclk60mhsp2_ck set parent" - "failed error:%d\n", ret); - - clk_disable(usbhost_p1_fck); - clk_disable(usbhost_p2_fck); -} static void omap_ehci_soft_phy_reset(struct usb_hcd *hcd, u8 port) { @@ -195,50 +152,6 @@ static int omap_ehci_init(struct usb_hcd *hcd) return rc; } -static int omap_ehci_hub_control( - struct usb_hcd *hcd, - u16 typeReq, - u16 wValue, - u16 wIndex, - char *buf, - u16 wLength -) -{ - struct ehci_hcd *ehci = hcd_to_ehci(hcd); - u32 __iomem *status_reg = &ehci->regs->port_status[ - (wIndex & 0xff) - 1]; - u32 temp; - unsigned long flags; - int retval = 0; - - spin_lock_irqsave(&ehci->lock, flags); - - if (typeReq == SetPortFeature && wValue == USB_PORT_FEAT_SUSPEND) { - temp = ehci_readl(ehci, status_reg); - if ((temp & PORT_PE) == 0 || (temp & PORT_RESET) != 0) { - retval = -EPIPE; - goto done; - } - - temp &= ~PORT_WKCONN_E; - temp |= PORT_WKDISC_E | PORT_WKOC_E; - ehci_writel(ehci, temp | PORT_SUSPEND, status_reg); - - omap_ehci_erratum_i693(ehci); - - set_bit((wIndex & 0xff) - 1, &ehci->suspended_ports); - goto done; - } - - spin_unlock_irqrestore(&ehci->lock, flags); - - /* Handle the hub control events here */ - return ehci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength); -done: - spin_unlock_irqrestore(&ehci->lock, flags); - return retval; -} - static void disable_put_regulator( struct ehci_hcd_omap_platform_data *pdata) { @@ -351,79 +264,9 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev) goto err_pm_runtime; } - /* get clocks */ - utmi_p1_fck = clk_get(dev, "utmi_p1_gfclk"); - if (IS_ERR(utmi_p1_fck)) { - ret = PTR_ERR(utmi_p1_fck); - dev_err(dev, "utmi_p1_gfclk failed error:%d\n", ret); - goto err_add_hcd; - } - - xclk60mhsp1_ck = clk_get(dev, "xclk60mhsp1_ck"); - if (IS_ERR(xclk60mhsp1_ck)) { - ret = PTR_ERR(xclk60mhsp1_ck); - dev_err(dev, "xclk60mhsp1_ck failed error:%d\n", ret); - goto err_utmi_p1_fck; - } - - utmi_p2_fck = clk_get(dev, "utmi_p2_gfclk"); - if (IS_ERR(utmi_p2_fck)) { - ret = PTR_ERR(utmi_p2_fck); - dev_err(dev, "utmi_p2_gfclk failed error:%d\n", ret); - goto err_xclk60mhsp1_ck; - } - - xclk60mhsp2_ck = clk_get(dev, "xclk60mhsp2_ck"); - if (IS_ERR(xclk60mhsp2_ck)) { - ret = PTR_ERR(xclk60mhsp2_ck); - dev_err(dev, "xclk60mhsp2_ck failed error:%d\n", ret); - goto err_utmi_p2_fck; - } - - usbhost_p1_fck = clk_get(dev, "usb_host_hs_utmi_p1_clk"); - if (IS_ERR(usbhost_p1_fck)) { - ret = PTR_ERR(usbhost_p1_fck); - dev_err(dev, "usbhost_p1_fck failed error:%d\n", ret); - goto err_xclk60mhsp2_ck; - } - - usbhost_p2_fck = clk_get(dev, "usb_host_hs_utmi_p2_clk"); - if (IS_ERR(usbhost_p2_fck)) { - ret = PTR_ERR(usbhost_p2_fck); - dev_err(dev, "usbhost_p2_fck failed error:%d\n", ret); - goto err_usbhost_p1_fck; - } - - init_60m_fclk = clk_get(dev, "init_60m_fclk"); - if (IS_ERR(init_60m_fclk)) { - ret = PTR_ERR(init_60m_fclk); - dev_err(dev, "init_60m_fclk failed error:%d\n", ret); - goto err_usbhost_p2_fck; - } return 0; -err_usbhost_p2_fck: - clk_put(usbhost_p2_fck); - -err_usbhost_p1_fck: - clk_put(usbhost_p1_fck); - -err_xclk60mhsp2_ck: - clk_put(xclk60mhsp2_ck); - -err_utmi_p2_fck: - clk_put(utmi_p2_fck); - -err_xclk60mhsp1_ck: - clk_put(xclk60mhsp1_ck); - -err_utmi_p1_fck: - clk_put(utmi_p1_fck); - -err_add_hcd: - usb_remove_hcd(hcd); - err_pm_runtime: disable_put_regulator(pdata); pm_runtime_put_sync(dev); @@ -454,14 +297,6 @@ static int ehci_hcd_omap_remove(struct platform_device *pdev) iounmap(hcd->regs); usb_put_hcd(hcd); - clk_put(utmi_p1_fck); - clk_put(utmi_p2_fck); - clk_put(xclk60mhsp1_ck); - clk_put(xclk60mhsp2_ck); - clk_put(usbhost_p1_fck); - clk_put(usbhost_p2_fck); - clk_put(init_60m_fclk); - pm_runtime_put_sync(dev); pm_runtime_disable(dev); @@ -532,7 +367,7 @@ static const struct hc_driver ehci_omap_hc_driver = { * root hub support */ .hub_status_data = ehci_hub_status_data, - .hub_control = omap_ehci_hub_control, + .hub_control = ehci_hub_control, .bus_suspend = ehci_bus_suspend, .bus_resume = ehci_bus_resume, diff --git a/trunk/drivers/usb/host/ehci-sead3.c b/trunk/drivers/usb/host/ehci-sead3.c index 58c96bd50d22..0c9e43cfaff5 100644 --- a/trunk/drivers/usb/host/ehci-sead3.c +++ b/trunk/drivers/usb/host/ehci-sead3.c @@ -40,7 +40,7 @@ static int ehci_sead3_setup(struct usb_hcd *hcd) ehci->need_io_watchdog = 0; /* Set burst length to 16 words. */ - ehci_writel(ehci, 0x1010, &ehci->regs->reserved[1]); + ehci_writel(ehci, 0x1010, &ehci->regs->reserved1[1]); return ret; } diff --git a/trunk/drivers/usb/host/ehci-tegra.c b/trunk/drivers/usb/host/ehci-tegra.c index 950e95efa381..26dedb30ad0b 100644 --- a/trunk/drivers/usb/host/ehci-tegra.c +++ b/trunk/drivers/usb/host/ehci-tegra.c @@ -799,11 +799,12 @@ static int tegra_ehci_remove(struct platform_device *pdev) #endif usb_remove_hcd(hcd); - usb_put_hcd(hcd); tegra_usb_phy_close(tegra->phy); iounmap(hcd->regs); + usb_put_hcd(hcd); + clk_disable_unprepare(tegra->clk); clk_put(tegra->clk); diff --git a/trunk/drivers/usb/host/isp1362-hcd.c b/trunk/drivers/usb/host/isp1362-hcd.c index 2ed112d3e159..256326322cfd 100644 --- a/trunk/drivers/usb/host/isp1362-hcd.c +++ b/trunk/drivers/usb/host/isp1362-hcd.c @@ -543,12 +543,12 @@ static void postproc_ep(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep) usb_pipein(urb->pipe) ? "IN" : "OUT", ep->nextpid, short_ok ? "" : "not_", PTD_GET_COUNT(ptd), ep->maxpacket, len); + /* save the data underrun error code for later and + * proceed with the status stage + */ + urb->actual_length += PTD_GET_COUNT(ptd); if (usb_pipecontrol(urb->pipe)) { ep->nextpid = USB_PID_ACK; - /* save the data underrun error code for later and - * proceed with the status stage - */ - urb->actual_length += PTD_GET_COUNT(ptd); BUG_ON(urb->actual_length > urb->transfer_buffer_length); if (urb->status == -EINPROGRESS) diff --git a/trunk/drivers/usb/host/ohci-omap.c b/trunk/drivers/usb/host/ohci-omap.c index e7d75d295988..f8b2d91851f7 100644 --- a/trunk/drivers/usb/host/ohci-omap.c +++ b/trunk/drivers/usb/host/ohci-omap.c @@ -403,8 +403,6 @@ static int usb_hcd_omap_probe (const struct hc_driver *driver, static inline void usb_hcd_omap_remove (struct usb_hcd *hcd, struct platform_device *pdev) { - struct ohci_hcd *ohci = hcd_to_ohci (hcd); - usb_remove_hcd(hcd); if (!IS_ERR_OR_NULL(hcd->phy)) { (void) otg_set_host(hcd->phy->otg, 0); diff --git a/trunk/drivers/usb/host/pci-quirks.c b/trunk/drivers/usb/host/pci-quirks.c index df0828cb2aa3..c5e9e4a76f14 100644 --- a/trunk/drivers/usb/host/pci-quirks.c +++ b/trunk/drivers/usb/host/pci-quirks.c @@ -800,6 +800,13 @@ void usb_enable_xhci_ports(struct pci_dev *xhci_pdev) } EXPORT_SYMBOL_GPL(usb_enable_xhci_ports); +void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) +{ + pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN, 0x0); + pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR, 0x0); +} +EXPORT_SYMBOL_GPL(usb_disable_xhci_ports); + /** * PCI Quirks for xHCI. * diff --git a/trunk/drivers/usb/host/pci-quirks.h b/trunk/drivers/usb/host/pci-quirks.h index b1002a8ef96f..ef004a5de20f 100644 --- a/trunk/drivers/usb/host/pci-quirks.h +++ b/trunk/drivers/usb/host/pci-quirks.h @@ -10,6 +10,7 @@ void usb_amd_quirk_pll_disable(void); void usb_amd_quirk_pll_enable(void); bool usb_is_intel_switchable_xhci(struct pci_dev *pdev); void usb_enable_xhci_ports(struct pci_dev *xhci_pdev); +void usb_disable_xhci_ports(struct pci_dev *xhci_pdev); #else static inline void usb_amd_quirk_pll_disable(void) {} static inline void usb_amd_quirk_pll_enable(void) {} diff --git a/trunk/drivers/usb/host/xhci-pci.c b/trunk/drivers/usb/host/xhci-pci.c index 18b231b0c5d3..9bfd4ca1153c 100644 --- a/trunk/drivers/usb/host/xhci-pci.c +++ b/trunk/drivers/usb/host/xhci-pci.c @@ -94,11 +94,21 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) xhci->quirks |= XHCI_EP_LIMIT_QUIRK; xhci->limit_active_eps = 64; xhci->quirks |= XHCI_SW_BW_CHECKING; + /* + * PPT desktop boards DH77EB and DH77DF will power back on after + * a few seconds of being shutdown. The fix for this is to + * switch the ports from xHCI to EHCI on shutdown. We can't use + * DMI information to find those particular boards (since each + * vendor will change the board name), so we have to key off all + * PPT chipsets. + */ + xhci->quirks |= XHCI_SPURIOUS_REBOOT; } if (pdev->vendor == PCI_VENDOR_ID_ETRON && pdev->device == PCI_DEVICE_ID_ASROCK_P67) { xhci->quirks |= XHCI_RESET_ON_RESUME; xhci_dbg(xhci, "QUIRK: Resetting on resume\n"); + xhci->quirks |= XHCI_TRUST_TX_LENGTH; } if (pdev->vendor == PCI_VENDOR_ID_VIA) xhci->quirks |= XHCI_RESET_ON_RESUME; diff --git a/trunk/drivers/usb/host/xhci-ring.c b/trunk/drivers/usb/host/xhci-ring.c index 8275645889da..643c2f3f3e73 100644 --- a/trunk/drivers/usb/host/xhci-ring.c +++ b/trunk/drivers/usb/host/xhci-ring.c @@ -145,29 +145,37 @@ static void next_trb(struct xhci_hcd *xhci, */ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring) { - union xhci_trb *next; unsigned long long addr; ring->deq_updates++; - /* If this is not event ring, there is one more usable TRB */ + /* + * If this is not event ring, and the dequeue pointer + * is not on a link TRB, there is one more usable TRB + */ if (ring->type != TYPE_EVENT && !last_trb(xhci, ring, ring->deq_seg, ring->dequeue)) ring->num_trbs_free++; - next = ++(ring->dequeue); - /* Update the dequeue pointer further if that was a link TRB or we're at - * the end of an event ring segment (which doesn't have link TRBS) - */ - while (last_trb(xhci, ring, ring->deq_seg, next)) { - if (ring->type == TYPE_EVENT && last_trb_on_last_seg(xhci, - ring, ring->deq_seg, next)) { - ring->cycle_state = (ring->cycle_state ? 0 : 1); + do { + /* + * Update the dequeue pointer further if that was a link TRB or + * we're at the end of an event ring segment (which doesn't have + * link TRBS) + */ + if (last_trb(xhci, ring, ring->deq_seg, ring->dequeue)) { + if (ring->type == TYPE_EVENT && + last_trb_on_last_seg(xhci, ring, + ring->deq_seg, ring->dequeue)) { + ring->cycle_state = (ring->cycle_state ? 0 : 1); + } + ring->deq_seg = ring->deq_seg->next; + ring->dequeue = ring->deq_seg->trbs; + } else { + ring->dequeue++; } - ring->deq_seg = ring->deq_seg->next; - ring->dequeue = ring->deq_seg->trbs; - next = ring->dequeue; - } + } while (last_trb(xhci, ring, ring->deq_seg, ring->dequeue)); + addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue); } @@ -2073,8 +2081,8 @@ static int handle_tx_event(struct xhci_hcd *xhci, if (xhci->quirks & XHCI_TRUST_TX_LENGTH) trb_comp_code = COMP_SHORT_TX; else - xhci_warn(xhci, "WARN Successful completion on short TX: " - "needs XHCI_TRUST_TX_LENGTH quirk?\n"); + xhci_warn_ratelimited(xhci, + "WARN Successful completion on short TX: needs XHCI_TRUST_TX_LENGTH quirk?\n"); case COMP_SHORT_TX: break; case COMP_STOP: diff --git a/trunk/drivers/usb/host/xhci.c b/trunk/drivers/usb/host/xhci.c index 7648b2d4b268..c59d5b5b6c7d 100644 --- a/trunk/drivers/usb/host/xhci.c +++ b/trunk/drivers/usb/host/xhci.c @@ -166,7 +166,7 @@ int xhci_reset(struct xhci_hcd *xhci) xhci_writel(xhci, command, &xhci->op_regs->command); ret = handshake(xhci, &xhci->op_regs->command, - CMD_RESET, 0, 250 * 1000); + CMD_RESET, 0, 10 * 1000 * 1000); if (ret) return ret; @@ -175,7 +175,8 @@ int xhci_reset(struct xhci_hcd *xhci) * xHCI cannot write to any doorbells or operational registers other * than status until the "Controller Not Ready" flag is cleared. */ - ret = handshake(xhci, &xhci->op_regs->status, STS_CNR, 0, 250 * 1000); + ret = handshake(xhci, &xhci->op_regs->status, + STS_CNR, 0, 10 * 1000 * 1000); for (i = 0; i < 2; ++i) { xhci->bus_state[i].port_c_suspend = 0; @@ -658,6 +659,9 @@ void xhci_shutdown(struct usb_hcd *hcd) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); + if (xhci->quirks && XHCI_SPURIOUS_REBOOT) + usb_disable_xhci_ports(to_pci_dev(hcd->self.controller)); + spin_lock_irq(&xhci->lock); xhci_halt(xhci); spin_unlock_irq(&xhci->lock); diff --git a/trunk/drivers/usb/host/xhci.h b/trunk/drivers/usb/host/xhci.h index 55c0785810c9..c713256297ac 100644 --- a/trunk/drivers/usb/host/xhci.h +++ b/trunk/drivers/usb/host/xhci.h @@ -1494,6 +1494,7 @@ struct xhci_hcd { #define XHCI_TRUST_TX_LENGTH (1 << 10) #define XHCI_LPM_SUPPORT (1 << 11) #define XHCI_INTEL_HOST (1 << 12) +#define XHCI_SPURIOUS_REBOOT (1 << 13) unsigned int num_active_eps; unsigned int limit_active_eps; /* There are two roothubs to keep track of bus suspend info for */ @@ -1537,6 +1538,8 @@ static inline struct usb_hcd *xhci_to_hcd(struct xhci_hcd *xhci) dev_err(xhci_to_hcd(xhci)->self.controller , fmt , ## args) #define xhci_warn(xhci, fmt, args...) \ dev_warn(xhci_to_hcd(xhci)->self.controller , fmt , ## args) +#define xhci_warn_ratelimited(xhci, fmt, args...) \ + dev_warn_ratelimited(xhci_to_hcd(xhci)->self.controller , fmt , ## args) /* TODO: copied from ehci.h - can be refactored? */ /* xHCI spec says all registers are little endian */ diff --git a/trunk/drivers/usb/misc/emi62.c b/trunk/drivers/usb/misc/emi62.c index ff08015b230c..ae794b90766b 100644 --- a/trunk/drivers/usb/misc/emi62.c +++ b/trunk/drivers/usb/misc/emi62.c @@ -232,7 +232,7 @@ static int emi62_load_firmware (struct usb_device *dev) return err; } -static const struct usb_device_id id_table[] __devinitconst = { +static const struct usb_device_id id_table[] = { { USB_DEVICE(EMI62_VENDOR_ID, EMI62_PRODUCT_ID) }, { } /* Terminating entry */ }; diff --git a/trunk/drivers/usb/musb/Kconfig b/trunk/drivers/usb/musb/Kconfig index ef0c3f9f0947..6259f0d99324 100644 --- a/trunk/drivers/usb/musb/Kconfig +++ b/trunk/drivers/usb/musb/Kconfig @@ -8,7 +8,7 @@ config USB_MUSB_HDRC tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)' depends on USB && USB_GADGET select NOP_USB_XCEIV if (ARCH_DAVINCI || MACH_OMAP3EVM || BLACKFIN) - select NOP_USB_XCEIV if (SOC_OMAPTI81XX || SOC_OMAPAM33XX) + select NOP_USB_XCEIV if (SOC_TI81XX || SOC_AM33XX) select TWL4030_USB if MACH_OMAP_3430SDP select TWL6030_USB if MACH_OMAP_4430SDP || MACH_OMAP4_PANDA select USB_OTG_UTILS @@ -57,7 +57,7 @@ config USB_MUSB_AM35X config USB_MUSB_DSPS tristate "TI DSPS platforms" - depends on SOC_OMAPTI81XX || SOC_OMAPAM33XX + depends on SOC_TI81XX || SOC_AM33XX config USB_MUSB_BLACKFIN tristate "Blackfin" diff --git a/trunk/drivers/usb/musb/musb_dsps.c b/trunk/drivers/usb/musb/musb_dsps.c index 217808d9fbe1..494772fc9e23 100644 --- a/trunk/drivers/usb/musb/musb_dsps.c +++ b/trunk/drivers/usb/musb/musb_dsps.c @@ -479,9 +479,9 @@ static int __devinit dsps_create_musb_pdev(struct dsps_glue *glue, u8 id) ret = -ENODEV; goto err0; } - strcpy((u8 *)res->name, "mc"); res->parent = NULL; resources[1] = *res; + resources[1].name = "mc"; /* allocate the child platform device */ musb = platform_device_alloc("musb-hdrc", -1); @@ -566,27 +566,28 @@ static int __devinit dsps_probe(struct platform_device *pdev) } platform_set_drvdata(pdev, glue); - /* create the child platform device for first instances of musb */ - ret = dsps_create_musb_pdev(glue, 0); - if (ret != 0) { - dev_err(&pdev->dev, "failed to create child pdev\n"); - goto err2; - } - /* enable the usbss clocks */ pm_runtime_enable(&pdev->dev); ret = pm_runtime_get_sync(&pdev->dev); if (ret < 0) { dev_err(&pdev->dev, "pm_runtime_get_sync FAILED"); + goto err2; + } + + /* create the child platform device for first instances of musb */ + ret = dsps_create_musb_pdev(glue, 0); + if (ret != 0) { + dev_err(&pdev->dev, "failed to create child pdev\n"); goto err3; } return 0; err3: - pm_runtime_disable(&pdev->dev); + pm_runtime_put(&pdev->dev); err2: + pm_runtime_disable(&pdev->dev); kfree(glue->wrp); err1: kfree(glue); diff --git a/trunk/drivers/usb/renesas_usbhs/common.c b/trunk/drivers/usb/renesas_usbhs/common.c index 8c9bb1ad3069..681da06170c2 100644 --- a/trunk/drivers/usb/renesas_usbhs/common.c +++ b/trunk/drivers/usb/renesas_usbhs/common.c @@ -603,12 +603,12 @@ static int usbhsc_resume(struct device *dev) struct usbhs_priv *priv = dev_get_drvdata(dev); struct platform_device *pdev = usbhs_priv_to_pdev(priv); - usbhs_platform_call(priv, phy_reset, pdev); - if (!usbhsc_flags_has(priv, USBHSF_RUNTIME_PWCTRL)) usbhsc_power_ctrl(priv, 1); - usbhsc_hotplug(priv); + usbhs_platform_call(priv, phy_reset, pdev); + + usbhsc_drvcllbck_notify_hotplug(pdev); return 0; } diff --git a/trunk/drivers/usb/renesas_usbhs/mod_host.c b/trunk/drivers/usb/renesas_usbhs/mod_host.c index 1834cf50888c..9b69a1323294 100644 --- a/trunk/drivers/usb/renesas_usbhs/mod_host.c +++ b/trunk/drivers/usb/renesas_usbhs/mod_host.c @@ -1266,6 +1266,12 @@ static int usbhsh_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, return ret; } +static int usbhsh_bus_nop(struct usb_hcd *hcd) +{ + /* nothing to do */ + return 0; +} + static struct hc_driver usbhsh_driver = { .description = usbhsh_hcd_name, .hcd_priv_size = sizeof(struct usbhsh_hpriv), @@ -1290,6 +1296,8 @@ static struct hc_driver usbhsh_driver = { */ .hub_status_data = usbhsh_hub_status_data, .hub_control = usbhsh_hub_control, + .bus_suspend = usbhsh_bus_nop, + .bus_resume = usbhsh_bus_nop, }; /* diff --git a/trunk/drivers/usb/serial/bus.c b/trunk/drivers/usb/serial/bus.c index f398d1e34474..c15f2e7cefc7 100644 --- a/trunk/drivers/usb/serial/bus.c +++ b/trunk/drivers/usb/serial/bus.c @@ -61,18 +61,23 @@ static int usb_serial_device_probe(struct device *dev) goto exit; } + /* make sure suspend/resume doesn't race against port_probe */ + retval = usb_autopm_get_interface(port->serial->interface); + if (retval) + goto exit; + driver = port->serial->type; if (driver->port_probe) { retval = driver->port_probe(port); if (retval) - goto exit; + goto exit_with_autopm; } retval = device_create_file(dev, &dev_attr_port_number); if (retval) { if (driver->port_remove) retval = driver->port_remove(port); - goto exit; + goto exit_with_autopm; } minor = port->number; @@ -81,6 +86,8 @@ static int usb_serial_device_probe(struct device *dev) "%s converter now attached to ttyUSB%d\n", driver->description, minor); +exit_with_autopm: + usb_autopm_put_interface(port->serial->interface); exit: return retval; } @@ -96,6 +103,9 @@ static int usb_serial_device_remove(struct device *dev) if (!port) return -ENODEV; + /* make sure suspend/resume doesn't race against port_remove */ + usb_autopm_get_interface(port->serial->interface); + device_remove_file(&port->dev, &dev_attr_port_number); driver = port->serial->type; @@ -107,6 +117,7 @@ static int usb_serial_device_remove(struct device *dev) dev_info(dev, "%s converter now disconnected from ttyUSB%d\n", driver->description, minor); + usb_autopm_put_interface(port->serial->interface); return retval; } diff --git a/trunk/drivers/usb/serial/ftdi_sio.c b/trunk/drivers/usb/serial/ftdi_sio.c index bc912e5a3beb..5620db6469e5 100644 --- a/trunk/drivers/usb/serial/ftdi_sio.c +++ b/trunk/drivers/usb/serial/ftdi_sio.c @@ -811,6 +811,7 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(LARSENBRUSGAARD_VID, LB_ALTITRACK_PID) }, { USB_DEVICE(GN_OTOMETRICS_VID, AURICAL_USB_PID) }, { USB_DEVICE(PI_VID, PI_E861_PID) }, + { USB_DEVICE(KONDO_VID, KONDO_USB_SERIAL_PID) }, { USB_DEVICE(BAYER_VID, BAYER_CONTOUR_CABLE_PID) }, { USB_DEVICE(FTDI_VID, MARVELL_OPENRD_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, diff --git a/trunk/drivers/usb/serial/ftdi_sio_ids.h b/trunk/drivers/usb/serial/ftdi_sio_ids.h index 5661c7e2d415..5dd96ca6c380 100644 --- a/trunk/drivers/usb/serial/ftdi_sio_ids.h +++ b/trunk/drivers/usb/serial/ftdi_sio_ids.h @@ -794,6 +794,13 @@ #define PI_VID 0x1a72 /* Vendor ID */ #define PI_E861_PID 0x1008 /* E-861 piezo controller USB connection */ +/* + * Kondo Kagaku Co.Ltd. + * http://www.kondo-robot.com/EN + */ +#define KONDO_VID 0x165c +#define KONDO_USB_SERIAL_PID 0x0002 + /* * Bayer Ascensia Contour blood glucose meter USB-converter cable. * http://winglucofacts.com/cables/ diff --git a/trunk/drivers/usb/serial/ipw.c b/trunk/drivers/usb/serial/ipw.c index 5811d34b6c6b..2cb30c535839 100644 --- a/trunk/drivers/usb/serial/ipw.c +++ b/trunk/drivers/usb/serial/ipw.c @@ -227,7 +227,6 @@ static void ipw_release(struct usb_serial *serial) { struct usb_wwan_intf_private *data = usb_get_serial_data(serial); - usb_wwan_release(serial); usb_set_serial_data(serial, NULL); kfree(data); } @@ -309,12 +308,12 @@ static struct usb_serial_driver ipw_device = { .description = "IPWireless converter", .id_table = id_table, .num_ports = 1, - .disconnect = usb_wwan_disconnect, .open = ipw_open, .close = ipw_close, .probe = ipw_probe, .attach = usb_wwan_startup, .release = ipw_release, + .port_remove = usb_wwan_port_remove, .dtr_rts = ipw_dtr_rts, .write = usb_wwan_write, }; diff --git a/trunk/drivers/usb/serial/mos7840.c b/trunk/drivers/usb/serial/mos7840.c index 57eca2448424..2f6da1e89bfa 100644 --- a/trunk/drivers/usb/serial/mos7840.c +++ b/trunk/drivers/usb/serial/mos7840.c @@ -82,8 +82,7 @@ * Defines used for sending commands to port */ -#define WAIT_FOR_EVER (HZ * 0) /* timeout urb is wait for ever */ -#define MOS_WDR_TIMEOUT (HZ * 5) /* default urb timeout */ +#define MOS_WDR_TIMEOUT 5000 /* default urb timeout */ #define MOS_PORT1 0x0200 #define MOS_PORT2 0x0300 @@ -1232,9 +1231,12 @@ static int mos7840_chars_in_buffer(struct tty_struct *tty) return 0; spin_lock_irqsave(&mos7840_port->pool_lock, flags); - for (i = 0; i < NUM_URBS; ++i) - if (mos7840_port->busy[i]) - chars += URB_TRANSFER_BUFFER_SIZE; + for (i = 0; i < NUM_URBS; ++i) { + if (mos7840_port->busy[i]) { + struct urb *urb = mos7840_port->write_urb_pool[i]; + chars += urb->transfer_buffer_length; + } + } spin_unlock_irqrestore(&mos7840_port->pool_lock, flags); dbg("%s - returns %d", __func__, chars); return chars; @@ -1344,7 +1346,7 @@ static void mos7840_close(struct usb_serial_port *port) static void mos7840_block_until_chase_response(struct tty_struct *tty, struct moschip_port *mos7840_port) { - int timeout = 1 * HZ; + int timeout = msecs_to_jiffies(1000); int wait = 10; int count; @@ -2672,7 +2674,7 @@ static int mos7840_startup(struct usb_serial *serial) /* setting configuration feature to one */ usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), - (__u8) 0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5 * HZ); + (__u8) 0x03, 0x00, 0x01, 0x00, NULL, 0x00, MOS_WDR_TIMEOUT); return 0; error: for (/* nothing */; i >= 0; i--) { diff --git a/trunk/drivers/usb/serial/option.c b/trunk/drivers/usb/serial/option.c index 08ff9b862049..cc40f47ecea1 100644 --- a/trunk/drivers/usb/serial/option.c +++ b/trunk/drivers/usb/serial/option.c @@ -80,85 +80,9 @@ static void option_instat_callback(struct urb *urb); #define OPTION_PRODUCT_GTM380_MODEM 0x7201 #define HUAWEI_VENDOR_ID 0x12D1 -#define HUAWEI_PRODUCT_E600 0x1001 -#define HUAWEI_PRODUCT_E220 0x1003 -#define HUAWEI_PRODUCT_E220BIS 0x1004 -#define HUAWEI_PRODUCT_E1401 0x1401 -#define HUAWEI_PRODUCT_E1402 0x1402 -#define HUAWEI_PRODUCT_E1403 0x1403 -#define HUAWEI_PRODUCT_E1404 0x1404 -#define HUAWEI_PRODUCT_E1405 0x1405 -#define HUAWEI_PRODUCT_E1406 0x1406 -#define HUAWEI_PRODUCT_E1407 0x1407 -#define HUAWEI_PRODUCT_E1408 0x1408 -#define HUAWEI_PRODUCT_E1409 0x1409 -#define HUAWEI_PRODUCT_E140A 0x140A -#define HUAWEI_PRODUCT_E140B 0x140B -#define HUAWEI_PRODUCT_E140C 0x140C -#define HUAWEI_PRODUCT_E140D 0x140D -#define HUAWEI_PRODUCT_E140E 0x140E -#define HUAWEI_PRODUCT_E140F 0x140F -#define HUAWEI_PRODUCT_E1410 0x1410 -#define HUAWEI_PRODUCT_E1411 0x1411 -#define HUAWEI_PRODUCT_E1412 0x1412 -#define HUAWEI_PRODUCT_E1413 0x1413 -#define HUAWEI_PRODUCT_E1414 0x1414 -#define HUAWEI_PRODUCT_E1415 0x1415 -#define HUAWEI_PRODUCT_E1416 0x1416 -#define HUAWEI_PRODUCT_E1417 0x1417 -#define HUAWEI_PRODUCT_E1418 0x1418 -#define HUAWEI_PRODUCT_E1419 0x1419 -#define HUAWEI_PRODUCT_E141A 0x141A -#define HUAWEI_PRODUCT_E141B 0x141B -#define HUAWEI_PRODUCT_E141C 0x141C -#define HUAWEI_PRODUCT_E141D 0x141D -#define HUAWEI_PRODUCT_E141E 0x141E -#define HUAWEI_PRODUCT_E141F 0x141F -#define HUAWEI_PRODUCT_E1420 0x1420 -#define HUAWEI_PRODUCT_E1421 0x1421 -#define HUAWEI_PRODUCT_E1422 0x1422 -#define HUAWEI_PRODUCT_E1423 0x1423 -#define HUAWEI_PRODUCT_E1424 0x1424 -#define HUAWEI_PRODUCT_E1425 0x1425 -#define HUAWEI_PRODUCT_E1426 0x1426 -#define HUAWEI_PRODUCT_E1427 0x1427 -#define HUAWEI_PRODUCT_E1428 0x1428 -#define HUAWEI_PRODUCT_E1429 0x1429 -#define HUAWEI_PRODUCT_E142A 0x142A -#define HUAWEI_PRODUCT_E142B 0x142B -#define HUAWEI_PRODUCT_E142C 0x142C -#define HUAWEI_PRODUCT_E142D 0x142D -#define HUAWEI_PRODUCT_E142E 0x142E -#define HUAWEI_PRODUCT_E142F 0x142F -#define HUAWEI_PRODUCT_E1430 0x1430 -#define HUAWEI_PRODUCT_E1431 0x1431 -#define HUAWEI_PRODUCT_E1432 0x1432 -#define HUAWEI_PRODUCT_E1433 0x1433 -#define HUAWEI_PRODUCT_E1434 0x1434 -#define HUAWEI_PRODUCT_E1435 0x1435 -#define HUAWEI_PRODUCT_E1436 0x1436 -#define HUAWEI_PRODUCT_E1437 0x1437 -#define HUAWEI_PRODUCT_E1438 0x1438 -#define HUAWEI_PRODUCT_E1439 0x1439 -#define HUAWEI_PRODUCT_E143A 0x143A -#define HUAWEI_PRODUCT_E143B 0x143B -#define HUAWEI_PRODUCT_E143C 0x143C -#define HUAWEI_PRODUCT_E143D 0x143D -#define HUAWEI_PRODUCT_E143E 0x143E -#define HUAWEI_PRODUCT_E143F 0x143F #define HUAWEI_PRODUCT_K4505 0x1464 #define HUAWEI_PRODUCT_K3765 0x1465 -#define HUAWEI_PRODUCT_E14AC 0x14AC -#define HUAWEI_PRODUCT_K3806 0x14AE #define HUAWEI_PRODUCT_K4605 0x14C6 -#define HUAWEI_PRODUCT_K5005 0x14C8 -#define HUAWEI_PRODUCT_K3770 0x14C9 -#define HUAWEI_PRODUCT_K3771 0x14CA -#define HUAWEI_PRODUCT_K4510 0x14CB -#define HUAWEI_PRODUCT_K4511 0x14CC -#define HUAWEI_PRODUCT_ETS1220 0x1803 -#define HUAWEI_PRODUCT_E353 0x1506 -#define HUAWEI_PRODUCT_E173S 0x1C05 #define QUANTA_VENDOR_ID 0x0408 #define QUANTA_PRODUCT_Q101 0xEA02 @@ -615,104 +539,123 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLX) }, { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GKE) }, { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLE) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E600, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220BIS, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1401, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1402, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1403, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1404, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1405, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1406, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1407, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1408, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1409, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140A, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140B, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140C, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140D, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140E, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140F, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1410, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1411, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1412, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1413, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1414, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1415, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1416, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1417, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1418, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1419, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141A, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141B, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141C, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141D, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141E, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141F, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1420, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1421, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1422, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1423, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1424, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1425, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1426, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1427, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1428, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1429, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142A, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142B, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142C, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142D, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142E, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142F, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1430, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1431, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1432, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1433, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1434, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1435, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1436, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1437, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1438, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1439, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143A, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143B, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143C, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143D, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143E, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143F, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173S, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3806, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0x01, 0x31) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0x01, 0x32) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x31) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x32) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x33) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x31) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x32) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x31) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x32) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4510, 0xff, 0x01, 0x31) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4510, 0xff, 0x01, 0x32) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x31) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x32) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x01) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x02) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x03) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x10) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x12) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x13) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x01) }, /* E398 3G Modem */ - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x02) }, /* E398 3G PC UI Interface */ - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x03) }, /* E398 3G Application Interface */ + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0xff, 0xff) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x01) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x02) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x03) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x04) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x05) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x06) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0A) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0B) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0D) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0E) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0F) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x10) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x12) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x13) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x14) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x15) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x17) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x18) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x19) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x1A) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x1B) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x1C) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x31) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x32) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x33) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x34) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x35) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x36) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3A) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3B) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3D) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3E) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3F) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x48) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x49) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x4A) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x4B) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x4C) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x61) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x62) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x63) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x64) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x65) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x66) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6A) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6B) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6D) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6E) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6F) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x78) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x79) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7A) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7B) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7C) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x01) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x02) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x03) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x04) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x05) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x06) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0A) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0B) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0D) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0E) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0F) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x10) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x12) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x13) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x14) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x15) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x17) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x18) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x19) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x1A) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x1B) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x1C) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x31) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x32) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x33) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x34) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x35) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x36) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3A) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3B) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3D) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3E) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3F) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x48) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x49) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x4A) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x4B) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x4C) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x61) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x62) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x63) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x64) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x65) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x66) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6A) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6B) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6D) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6E) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6F) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x78) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x79) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7A) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7B) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7C) }, + + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) }, @@ -943,6 +886,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1018, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1058, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1059, 0xff, 0xff, 0xff) }, @@ -1297,8 +1242,8 @@ static struct usb_serial_driver option_1port_device = { .tiocmset = usb_wwan_tiocmset, .ioctl = usb_wwan_ioctl, .attach = usb_wwan_startup, - .disconnect = usb_wwan_disconnect, .release = option_release, + .port_remove = usb_wwan_port_remove, .read_int_callback = option_instat_callback, #ifdef CONFIG_PM .suspend = usb_wwan_suspend, @@ -1414,8 +1359,6 @@ static void option_release(struct usb_serial *serial) struct usb_wwan_intf_private *intfdata = usb_get_serial_data(serial); struct option_private *priv = intfdata->private; - usb_wwan_release(serial); - kfree(priv); kfree(intfdata); } diff --git a/trunk/drivers/usb/serial/qcserial.c b/trunk/drivers/usb/serial/qcserial.c index 8d103019d6aa..bfd50779f0c9 100644 --- a/trunk/drivers/usb/serial/qcserial.c +++ b/trunk/drivers/usb/serial/qcserial.c @@ -199,43 +199,49 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) /* default to enabling interface */ altsetting = 0; - switch (ifnum) { - /* Composite mode; don't bind to the QMI/net interface as that - * gets handled by other drivers. - */ + /* Composite mode; don't bind to the QMI/net interface as that + * gets handled by other drivers. + */ + + if (is_gobi1k) { /* Gobi 1K USB layout: * 0: serial port (doesn't respond) * 1: serial port (doesn't respond) * 2: AT-capable modem port * 3: QMI/net - * - * Gobi 2K+ USB layout: + */ + if (ifnum == 2) + dev_dbg(dev, "Modem port found\n"); + else + altsetting = -1; + } else { + /* Gobi 2K+ USB layout: * 0: QMI/net * 1: DM/DIAG (use libqcdm from ModemManager for communication) * 2: AT-capable modem port * 3: NMEA */ - - case 1: - if (is_gobi1k) + switch (ifnum) { + case 0: + /* Don't claim the QMI/net interface */ altsetting = -1; - else + break; + case 1: dev_dbg(dev, "Gobi 2K+ DM/DIAG interface found\n"); - break; - case 2: - dev_dbg(dev, "Modem port found\n"); - break; - case 3: - if (is_gobi1k) - altsetting = -1; - else + break; + case 2: + dev_dbg(dev, "Modem port found\n"); + break; + case 3: /* * NMEA (serial line 9600 8N1) * # echo "\$GPS_START" > /dev/ttyUSBx * # echo "\$GPS_STOP" > /dev/ttyUSBx */ dev_dbg(dev, "Gobi 2K+ NMEA GPS interface found\n"); + break; + } } done: @@ -262,8 +268,7 @@ static void qc_release(struct usb_serial *serial) { struct usb_wwan_intf_private *priv = usb_get_serial_data(serial); - /* Call usb_wwan release & free the private data allocated in qcprobe */ - usb_wwan_release(serial); + /* Free the private data allocated in qcprobe */ usb_set_serial_data(serial, NULL); kfree(priv); } @@ -283,8 +288,8 @@ static struct usb_serial_driver qcdevice = { .write_room = usb_wwan_write_room, .chars_in_buffer = usb_wwan_chars_in_buffer, .attach = usb_wwan_startup, - .disconnect = usb_wwan_disconnect, .release = qc_release, + .port_remove = usb_wwan_port_remove, #ifdef CONFIG_PM .suspend = usb_wwan_suspend, .resume = usb_wwan_resume, diff --git a/trunk/drivers/usb/serial/usb-wwan.h b/trunk/drivers/usb/serial/usb-wwan.h index c47b6ec03063..1f034d2397c6 100644 --- a/trunk/drivers/usb/serial/usb-wwan.h +++ b/trunk/drivers/usb/serial/usb-wwan.h @@ -9,8 +9,7 @@ extern void usb_wwan_dtr_rts(struct usb_serial_port *port, int on); extern int usb_wwan_open(struct tty_struct *tty, struct usb_serial_port *port); extern void usb_wwan_close(struct usb_serial_port *port); extern int usb_wwan_startup(struct usb_serial *serial); -extern void usb_wwan_disconnect(struct usb_serial *serial); -extern void usb_wwan_release(struct usb_serial *serial); +extern int usb_wwan_port_remove(struct usb_serial_port *port); extern int usb_wwan_write_room(struct tty_struct *tty); extern void usb_wwan_set_termios(struct tty_struct *tty, struct usb_serial_port *port, diff --git a/trunk/drivers/usb/serial/usb_wwan.c b/trunk/drivers/usb/serial/usb_wwan.c index f35971dff4a5..6855d5ed0331 100644 --- a/trunk/drivers/usb/serial/usb_wwan.c +++ b/trunk/drivers/usb/serial/usb_wwan.c @@ -565,62 +565,52 @@ int usb_wwan_startup(struct usb_serial *serial) } EXPORT_SYMBOL(usb_wwan_startup); -static void stop_read_write_urbs(struct usb_serial *serial) +int usb_wwan_port_remove(struct usb_serial_port *port) { - int i, j; - struct usb_serial_port *port; + int i; struct usb_wwan_port_private *portdata; - /* Stop reading/writing urbs */ - for (i = 0; i < serial->num_ports; ++i) { - port = serial->port[i]; - portdata = usb_get_serial_port_data(port); - for (j = 0; j < N_IN_URB; j++) - usb_kill_urb(portdata->in_urbs[j]); - for (j = 0; j < N_OUT_URB; j++) - usb_kill_urb(portdata->out_urbs[j]); + portdata = usb_get_serial_port_data(port); + usb_set_serial_port_data(port, NULL); + + /* Stop reading/writing urbs and free them */ + for (i = 0; i < N_IN_URB; i++) { + usb_kill_urb(portdata->in_urbs[i]); + usb_free_urb(portdata->in_urbs[i]); + free_page((unsigned long)portdata->in_buffer[i]); + } + for (i = 0; i < N_OUT_URB; i++) { + usb_kill_urb(portdata->out_urbs[i]); + usb_free_urb(portdata->out_urbs[i]); + kfree(portdata->out_buffer[i]); } -} -void usb_wwan_disconnect(struct usb_serial *serial) -{ - stop_read_write_urbs(serial); + /* Now free port private data */ + kfree(portdata); + return 0; } -EXPORT_SYMBOL(usb_wwan_disconnect); +EXPORT_SYMBOL(usb_wwan_port_remove); -void usb_wwan_release(struct usb_serial *serial) +#ifdef CONFIG_PM +static void stop_read_write_urbs(struct usb_serial *serial) { int i, j; struct usb_serial_port *port; struct usb_wwan_port_private *portdata; - /* Now free them */ + /* Stop reading/writing urbs */ for (i = 0; i < serial->num_ports; ++i) { port = serial->port[i]; portdata = usb_get_serial_port_data(port); - - for (j = 0; j < N_IN_URB; j++) { - usb_free_urb(portdata->in_urbs[j]); - free_page((unsigned long) - portdata->in_buffer[j]); - portdata->in_urbs[j] = NULL; - } - for (j = 0; j < N_OUT_URB; j++) { - usb_free_urb(portdata->out_urbs[j]); - kfree(portdata->out_buffer[j]); - portdata->out_urbs[j] = NULL; - } - } - - /* Now free per port private data */ - for (i = 0; i < serial->num_ports; i++) { - port = serial->port[i]; - kfree(usb_get_serial_port_data(port)); + if (!portdata) + continue; + for (j = 0; j < N_IN_URB; j++) + usb_kill_urb(portdata->in_urbs[j]); + for (j = 0; j < N_OUT_URB; j++) + usb_kill_urb(portdata->out_urbs[j]); } } -EXPORT_SYMBOL(usb_wwan_release); -#ifdef CONFIG_PM int usb_wwan_suspend(struct usb_serial *serial, pm_message_t message) { struct usb_wwan_intf_private *intfdata = serial->private; @@ -712,7 +702,7 @@ int usb_wwan_resume(struct usb_serial *serial) /* skip closed ports */ spin_lock_irq(&intfdata->susp_lock); - if (!portdata->opened) { + if (!portdata || !portdata->opened) { spin_unlock_irq(&intfdata->susp_lock); continue; } diff --git a/trunk/drivers/vfio/vfio.c b/trunk/drivers/vfio/vfio.c index 9591e2b509d7..17830c9c7cc6 100644 --- a/trunk/drivers/vfio/vfio.c +++ b/trunk/drivers/vfio/vfio.c @@ -264,6 +264,7 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group) return group; } +/* called with vfio.group_lock held */ static void vfio_group_release(struct kref *kref) { struct vfio_group *group = container_of(kref, struct vfio_group, kref); @@ -287,13 +288,7 @@ static void vfio_group_release(struct kref *kref) static void vfio_group_put(struct vfio_group *group) { - mutex_lock(&vfio.group_lock); - /* - * Release needs to unlock to unregister the notifier, so only - * unlock if not released. - */ - if (!kref_put(&group->kref, vfio_group_release)) - mutex_unlock(&vfio.group_lock); + kref_put_mutex(&group->kref, vfio_group_release, &vfio.group_lock); } /* Assume group_lock or group reference is held */ @@ -401,7 +396,6 @@ static void vfio_device_release(struct kref *kref) struct vfio_device, kref); struct vfio_group *group = device->group; - mutex_lock(&group->device_lock); list_del(&device->group_next); mutex_unlock(&group->device_lock); @@ -416,8 +410,9 @@ static void vfio_device_release(struct kref *kref) /* Device reference always implies a group reference */ static void vfio_device_put(struct vfio_device *device) { - kref_put(&device->kref, vfio_device_release); - vfio_group_put(device->group); + struct vfio_group *group = device->group; + kref_put_mutex(&device->kref, vfio_device_release, &group->device_lock); + vfio_group_put(group); } static void vfio_device_get(struct vfio_device *device) @@ -1116,10 +1111,10 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf) */ filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE); - fd_install(ret, filep); - vfio_device_get(device); atomic_inc(&group->container_users); + + fd_install(ret, filep); break; } mutex_unlock(&group->device_lock); diff --git a/trunk/drivers/vhost/Kconfig b/trunk/drivers/vhost/Kconfig index e4e2fd1b5107..202bba6c997c 100644 --- a/trunk/drivers/vhost/Kconfig +++ b/trunk/drivers/vhost/Kconfig @@ -9,3 +9,6 @@ config VHOST_NET To compile this driver as a module, choose M here: the module will be called vhost_net. +if STAGING +source "drivers/vhost/Kconfig.tcm" +endif diff --git a/trunk/drivers/vhost/Kconfig.tcm b/trunk/drivers/vhost/Kconfig.tcm new file mode 100644 index 000000000000..a9c6f76e3208 --- /dev/null +++ b/trunk/drivers/vhost/Kconfig.tcm @@ -0,0 +1,6 @@ +config TCM_VHOST + tristate "TCM_VHOST fabric module (EXPERIMENTAL)" + depends on TARGET_CORE && EVENTFD && EXPERIMENTAL && m + default n + ---help--- + Say M here to enable the TCM_VHOST fabric module for use with virtio-scsi guests diff --git a/trunk/drivers/vhost/Makefile b/trunk/drivers/vhost/Makefile index 72dd02050bb9..a27b053bc9ab 100644 --- a/trunk/drivers/vhost/Makefile +++ b/trunk/drivers/vhost/Makefile @@ -1,2 +1,4 @@ obj-$(CONFIG_VHOST_NET) += vhost_net.o vhost_net-y := vhost.o net.o + +obj-$(CONFIG_TCM_VHOST) += tcm_vhost.o diff --git a/trunk/drivers/vhost/tcm_vhost.c b/trunk/drivers/vhost/tcm_vhost.c new file mode 100644 index 000000000000..ed8e2e6c8df2 --- /dev/null +++ b/trunk/drivers/vhost/tcm_vhost.c @@ -0,0 +1,1649 @@ +/******************************************************************************* + * Vhost kernel TCM fabric driver for virtio SCSI initiators + * + * (C) Copyright 2010-2012 RisingTide Systems LLC. + * (C) Copyright 2010-2012 IBM Corp. + * + * Licensed to the Linux Foundation under the General Public License (GPL) version 2. + * + * Authors: Nicholas A. Bellinger + * Stefan Hajnoczi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + ****************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* TODO vhost.h currently depends on this */ +#include + +#include "vhost.c" +#include "vhost.h" +#include "tcm_vhost.h" + +enum { + VHOST_SCSI_VQ_CTL = 0, + VHOST_SCSI_VQ_EVT = 1, + VHOST_SCSI_VQ_IO = 2, +}; + +struct vhost_scsi { + struct tcm_vhost_tpg *vs_tpg; /* Protected by vhost_scsi->dev.mutex */ + struct vhost_dev dev; + struct vhost_virtqueue vqs[3]; + + struct vhost_work vs_completion_work; /* cmd completion work item */ + struct list_head vs_completion_list; /* cmd completion queue */ + spinlock_t vs_completion_lock; /* protects s_completion_list */ +}; + +/* Local pointer to allocated TCM configfs fabric module */ +static struct target_fabric_configfs *tcm_vhost_fabric_configfs; + +static struct workqueue_struct *tcm_vhost_workqueue; + +/* Global spinlock to protect tcm_vhost TPG list for vhost IOCTL access */ +static DEFINE_MUTEX(tcm_vhost_mutex); +static LIST_HEAD(tcm_vhost_list); + +static int tcm_vhost_check_true(struct se_portal_group *se_tpg) +{ + return 1; +} + +static int tcm_vhost_check_false(struct se_portal_group *se_tpg) +{ + return 0; +} + +static char *tcm_vhost_get_fabric_name(void) +{ + return "vhost"; +} + +static u8 tcm_vhost_get_fabric_proto_ident(struct se_portal_group *se_tpg) +{ + struct tcm_vhost_tpg *tpg = container_of(se_tpg, + struct tcm_vhost_tpg, se_tpg); + struct tcm_vhost_tport *tport = tpg->tport; + + switch (tport->tport_proto_id) { + case SCSI_PROTOCOL_SAS: + return sas_get_fabric_proto_ident(se_tpg); + case SCSI_PROTOCOL_FCP: + return fc_get_fabric_proto_ident(se_tpg); + case SCSI_PROTOCOL_ISCSI: + return iscsi_get_fabric_proto_ident(se_tpg); + default: + pr_err("Unknown tport_proto_id: 0x%02x, using" + " SAS emulation\n", tport->tport_proto_id); + break; + } + + return sas_get_fabric_proto_ident(se_tpg); +} + +static char *tcm_vhost_get_fabric_wwn(struct se_portal_group *se_tpg) +{ + struct tcm_vhost_tpg *tpg = container_of(se_tpg, + struct tcm_vhost_tpg, se_tpg); + struct tcm_vhost_tport *tport = tpg->tport; + + return &tport->tport_name[0]; +} + +static u16 tcm_vhost_get_tag(struct se_portal_group *se_tpg) +{ + struct tcm_vhost_tpg *tpg = container_of(se_tpg, + struct tcm_vhost_tpg, se_tpg); + return tpg->tport_tpgt; +} + +static u32 tcm_vhost_get_default_depth(struct se_portal_group *se_tpg) +{ + return 1; +} + +static u32 tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg, + struct se_node_acl *se_nacl, + struct t10_pr_registration *pr_reg, + int *format_code, + unsigned char *buf) +{ + struct tcm_vhost_tpg *tpg = container_of(se_tpg, + struct tcm_vhost_tpg, se_tpg); + struct tcm_vhost_tport *tport = tpg->tport; + + switch (tport->tport_proto_id) { + case SCSI_PROTOCOL_SAS: + return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg, + format_code, buf); + case SCSI_PROTOCOL_FCP: + return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg, + format_code, buf); + case SCSI_PROTOCOL_ISCSI: + return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg, + format_code, buf); + default: + pr_err("Unknown tport_proto_id: 0x%02x, using" + " SAS emulation\n", tport->tport_proto_id); + break; + } + + return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg, + format_code, buf); +} + +static u32 tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg, + struct se_node_acl *se_nacl, + struct t10_pr_registration *pr_reg, + int *format_code) +{ + struct tcm_vhost_tpg *tpg = container_of(se_tpg, + struct tcm_vhost_tpg, se_tpg); + struct tcm_vhost_tport *tport = tpg->tport; + + switch (tport->tport_proto_id) { + case SCSI_PROTOCOL_SAS: + return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, + format_code); + case SCSI_PROTOCOL_FCP: + return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, + format_code); + case SCSI_PROTOCOL_ISCSI: + return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, + format_code); + default: + pr_err("Unknown tport_proto_id: 0x%02x, using" + " SAS emulation\n", tport->tport_proto_id); + break; + } + + return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, + format_code); +} + +static char *tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg, + const char *buf, + u32 *out_tid_len, + char **port_nexus_ptr) +{ + struct tcm_vhost_tpg *tpg = container_of(se_tpg, + struct tcm_vhost_tpg, se_tpg); + struct tcm_vhost_tport *tport = tpg->tport; + + switch (tport->tport_proto_id) { + case SCSI_PROTOCOL_SAS: + return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, + port_nexus_ptr); + case SCSI_PROTOCOL_FCP: + return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, + port_nexus_ptr); + case SCSI_PROTOCOL_ISCSI: + return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, + port_nexus_ptr); + default: + pr_err("Unknown tport_proto_id: 0x%02x, using" + " SAS emulation\n", tport->tport_proto_id); + break; + } + + return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, + port_nexus_ptr); +} + +static struct se_node_acl *tcm_vhost_alloc_fabric_acl( + struct se_portal_group *se_tpg) +{ + struct tcm_vhost_nacl *nacl; + + nacl = kzalloc(sizeof(struct tcm_vhost_nacl), GFP_KERNEL); + if (!nacl) { + pr_err("Unable to alocate struct tcm_vhost_nacl\n"); + return NULL; + } + + return &nacl->se_node_acl; +} + +static void tcm_vhost_release_fabric_acl(struct se_portal_group *se_tpg, + struct se_node_acl *se_nacl) +{ + struct tcm_vhost_nacl *nacl = container_of(se_nacl, + struct tcm_vhost_nacl, se_node_acl); + kfree(nacl); +} + +static u32 tcm_vhost_tpg_get_inst_index(struct se_portal_group *se_tpg) +{ + return 1; +} + +static void tcm_vhost_release_cmd(struct se_cmd *se_cmd) +{ + return; +} + +static int tcm_vhost_shutdown_session(struct se_session *se_sess) +{ + return 0; +} + +static void tcm_vhost_close_session(struct se_session *se_sess) +{ + return; +} + +static u32 tcm_vhost_sess_get_index(struct se_session *se_sess) +{ + return 0; +} + +static int tcm_vhost_write_pending(struct se_cmd *se_cmd) +{ + /* Go ahead and process the write immediately */ + target_execute_cmd(se_cmd); + return 0; +} + +static int tcm_vhost_write_pending_status(struct se_cmd *se_cmd) +{ + return 0; +} + +static void tcm_vhost_set_default_node_attrs(struct se_node_acl *nacl) +{ + return; +} + +static u32 tcm_vhost_get_task_tag(struct se_cmd *se_cmd) +{ + return 0; +} + +static int tcm_vhost_get_cmd_state(struct se_cmd *se_cmd) +{ + return 0; +} + +static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *tv_cmd) +{ + struct vhost_scsi *vs = tv_cmd->tvc_vhost; + + spin_lock_bh(&vs->vs_completion_lock); + list_add_tail(&tv_cmd->tvc_completion_list, &vs->vs_completion_list); + spin_unlock_bh(&vs->vs_completion_lock); + + vhost_work_queue(&vs->dev, &vs->vs_completion_work); +} + +static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd) +{ + struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd, + struct tcm_vhost_cmd, tvc_se_cmd); + vhost_scsi_complete_cmd(tv_cmd); + return 0; +} + +static int tcm_vhost_queue_status(struct se_cmd *se_cmd) +{ + struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd, + struct tcm_vhost_cmd, tvc_se_cmd); + vhost_scsi_complete_cmd(tv_cmd); + return 0; +} + +static int tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd) +{ + return 0; +} + +static u16 tcm_vhost_set_fabric_sense_len(struct se_cmd *se_cmd, + u32 sense_length) +{ + return 0; +} + +static u16 tcm_vhost_get_fabric_sense_len(void) +{ + return 0; +} + +static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd) +{ + struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd; + + /* TODO locking against target/backend threads? */ + transport_generic_free_cmd(se_cmd, 1); + + if (tv_cmd->tvc_sgl_count) { + u32 i; + for (i = 0; i < tv_cmd->tvc_sgl_count; i++) + put_page(sg_page(&tv_cmd->tvc_sgl[i])); + + kfree(tv_cmd->tvc_sgl); + } + + kfree(tv_cmd); +} + +/* Dequeue a command from the completion list */ +static struct tcm_vhost_cmd *vhost_scsi_get_cmd_from_completion( + struct vhost_scsi *vs) +{ + struct tcm_vhost_cmd *tv_cmd = NULL; + + spin_lock_bh(&vs->vs_completion_lock); + if (list_empty(&vs->vs_completion_list)) { + spin_unlock_bh(&vs->vs_completion_lock); + return NULL; + } + + list_for_each_entry(tv_cmd, &vs->vs_completion_list, + tvc_completion_list) { + list_del(&tv_cmd->tvc_completion_list); + break; + } + spin_unlock_bh(&vs->vs_completion_lock); + return tv_cmd; +} + +/* Fill in status and signal that we are done processing this command + * + * This is scheduled in the vhost work queue so we are called with the owner + * process mm and can access the vring. + */ +static void vhost_scsi_complete_cmd_work(struct vhost_work *work) +{ + struct vhost_scsi *vs = container_of(work, struct vhost_scsi, + vs_completion_work); + struct tcm_vhost_cmd *tv_cmd; + + while ((tv_cmd = vhost_scsi_get_cmd_from_completion(vs))) { + struct virtio_scsi_cmd_resp v_rsp; + struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd; + int ret; + + pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__, + tv_cmd, se_cmd->residual_count, se_cmd->scsi_status); + + memset(&v_rsp, 0, sizeof(v_rsp)); + v_rsp.resid = se_cmd->residual_count; + /* TODO is status_qualifier field needed? */ + v_rsp.status = se_cmd->scsi_status; + v_rsp.sense_len = se_cmd->scsi_sense_length; + memcpy(v_rsp.sense, tv_cmd->tvc_sense_buf, + v_rsp.sense_len); + ret = copy_to_user(tv_cmd->tvc_resp, &v_rsp, sizeof(v_rsp)); + if (likely(ret == 0)) + vhost_add_used(&vs->vqs[2], tv_cmd->tvc_vq_desc, 0); + else + pr_err("Faulted on virtio_scsi_cmd_resp\n"); + + vhost_scsi_free_cmd(tv_cmd); + } + + vhost_signal(&vs->dev, &vs->vqs[2]); +} + +static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd( + struct tcm_vhost_tpg *tv_tpg, + struct virtio_scsi_cmd_req *v_req, + u32 exp_data_len, + int data_direction) +{ + struct tcm_vhost_cmd *tv_cmd; + struct tcm_vhost_nexus *tv_nexus; + struct se_portal_group *se_tpg = &tv_tpg->se_tpg; + struct se_session *se_sess; + struct se_cmd *se_cmd; + int sam_task_attr; + + tv_nexus = tv_tpg->tpg_nexus; + if (!tv_nexus) { + pr_err("Unable to locate active struct tcm_vhost_nexus\n"); + return ERR_PTR(-EIO); + } + se_sess = tv_nexus->tvn_se_sess; + + tv_cmd = kzalloc(sizeof(struct tcm_vhost_cmd), GFP_ATOMIC); + if (!tv_cmd) { + pr_err("Unable to allocate struct tcm_vhost_cmd\n"); + return ERR_PTR(-ENOMEM); + } + INIT_LIST_HEAD(&tv_cmd->tvc_completion_list); + tv_cmd->tvc_tag = v_req->tag; + + se_cmd = &tv_cmd->tvc_se_cmd; + /* + * Locate the SAM Task Attr from virtio_scsi_cmd_req + */ + sam_task_attr = v_req->task_attr; + /* + * Initialize struct se_cmd descriptor from TCM infrastructure + */ + transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, exp_data_len, + data_direction, sam_task_attr, + &tv_cmd->tvc_sense_buf[0]); + +#if 0 /* FIXME: vhost_scsi_allocate_cmd() BIDI operation */ + if (bidi) + se_cmd->se_cmd_flags |= SCF_BIDI; +#endif + return tv_cmd; +} + +/* + * Map a user memory range into a scatterlist + * + * Returns the number of scatterlist entries used or -errno on error. + */ +static int vhost_scsi_map_to_sgl(struct scatterlist *sgl, + unsigned int sgl_count, void __user *ptr, size_t len, int write) +{ + struct scatterlist *sg = sgl; + unsigned int npages = 0; + int ret; + + while (len > 0) { + struct page *page; + unsigned int offset = (uintptr_t)ptr & ~PAGE_MASK; + unsigned int nbytes = min_t(unsigned int, + PAGE_SIZE - offset, len); + + if (npages == sgl_count) { + ret = -ENOBUFS; + goto err; + } + + ret = get_user_pages_fast((unsigned long)ptr, 1, write, &page); + BUG_ON(ret == 0); /* we should either get our page or fail */ + if (ret < 0) + goto err; + + sg_set_page(sg, page, nbytes, offset); + ptr += nbytes; + len -= nbytes; + sg++; + npages++; + } + return npages; + +err: + /* Put pages that we hold */ + for (sg = sgl; sg != &sgl[npages]; sg++) + put_page(sg_page(sg)); + return ret; +} + +static int vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *tv_cmd, + struct iovec *iov, unsigned int niov, int write) +{ + int ret; + unsigned int i; + u32 sgl_count; + struct scatterlist *sg; + + /* + * Find out how long sglist needs to be + */ + sgl_count = 0; + for (i = 0; i < niov; i++) { + sgl_count += (((uintptr_t)iov[i].iov_base + iov[i].iov_len + + PAGE_SIZE - 1) >> PAGE_SHIFT) - + ((uintptr_t)iov[i].iov_base >> PAGE_SHIFT); + } + /* TODO overflow checking */ + + sg = kmalloc(sizeof(tv_cmd->tvc_sgl[0]) * sgl_count, GFP_ATOMIC); + if (!sg) + return -ENOMEM; + pr_debug("%s sg %p sgl_count %u is_err %d\n", __func__, + sg, sgl_count, !sg); + sg_init_table(sg, sgl_count); + + tv_cmd->tvc_sgl = sg; + tv_cmd->tvc_sgl_count = sgl_count; + + pr_debug("Mapping %u iovecs for %u pages\n", niov, sgl_count); + for (i = 0; i < niov; i++) { + ret = vhost_scsi_map_to_sgl(sg, sgl_count, iov[i].iov_base, + iov[i].iov_len, write); + if (ret < 0) { + for (i = 0; i < tv_cmd->tvc_sgl_count; i++) + put_page(sg_page(&tv_cmd->tvc_sgl[i])); + kfree(tv_cmd->tvc_sgl); + tv_cmd->tvc_sgl = NULL; + tv_cmd->tvc_sgl_count = 0; + return ret; + } + + sg += ret; + sgl_count -= ret; + } + return 0; +} + +static void tcm_vhost_submission_work(struct work_struct *work) +{ + struct tcm_vhost_cmd *tv_cmd = + container_of(work, struct tcm_vhost_cmd, work); + struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd; + struct scatterlist *sg_ptr, *sg_bidi_ptr = NULL; + int rc, sg_no_bidi = 0; + /* + * Locate the struct se_lun pointer based on v_req->lun, and + * attach it to struct se_cmd + */ + rc = transport_lookup_cmd_lun(&tv_cmd->tvc_se_cmd, tv_cmd->tvc_lun); + if (rc < 0) { + pr_err("Failed to look up lun: %d\n", tv_cmd->tvc_lun); + transport_send_check_condition_and_sense(&tv_cmd->tvc_se_cmd, + tv_cmd->tvc_se_cmd.scsi_sense_reason, 0); + transport_generic_free_cmd(se_cmd, 0); + return; + } + + rc = target_setup_cmd_from_cdb(se_cmd, tv_cmd->tvc_cdb); + if (rc == -ENOMEM) { + transport_send_check_condition_and_sense(se_cmd, + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); + transport_generic_free_cmd(se_cmd, 0); + return; + } else if (rc < 0) { + if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT) + tcm_vhost_queue_status(se_cmd); + else + transport_send_check_condition_and_sense(se_cmd, + se_cmd->scsi_sense_reason, 0); + transport_generic_free_cmd(se_cmd, 0); + return; + } + + if (tv_cmd->tvc_sgl_count) { + sg_ptr = tv_cmd->tvc_sgl; + /* + * For BIDI commands, pass in the extra READ buffer + * to transport_generic_map_mem_to_cmd() below.. + */ +/* FIXME: Fix BIDI operation in tcm_vhost_submission_work() */ +#if 0 + if (se_cmd->se_cmd_flags & SCF_BIDI) { + sg_bidi_ptr = NULL; + sg_no_bidi = 0; + } +#endif + } else { + sg_ptr = NULL; + } + + rc = transport_generic_map_mem_to_cmd(se_cmd, sg_ptr, + tv_cmd->tvc_sgl_count, sg_bidi_ptr, + sg_no_bidi); + if (rc < 0) { + transport_send_check_condition_and_sense(se_cmd, + se_cmd->scsi_sense_reason, 0); + transport_generic_free_cmd(se_cmd, 0); + return; + } + transport_handle_cdb_direct(se_cmd); +} + +static void vhost_scsi_handle_vq(struct vhost_scsi *vs) +{ + struct vhost_virtqueue *vq = &vs->vqs[2]; + struct virtio_scsi_cmd_req v_req; + struct tcm_vhost_tpg *tv_tpg; + struct tcm_vhost_cmd *tv_cmd; + u32 exp_data_len, data_first, data_num, data_direction; + unsigned out, in, i; + int head, ret; + + /* Must use ioctl VHOST_SCSI_SET_ENDPOINT */ + tv_tpg = vs->vs_tpg; + if (unlikely(!tv_tpg)) { + pr_err("%s endpoint not set\n", __func__); + return; + } + + mutex_lock(&vq->mutex); + vhost_disable_notify(&vs->dev, vq); + + for (;;) { + head = vhost_get_vq_desc(&vs->dev, vq, vq->iov, + ARRAY_SIZE(vq->iov), &out, &in, + NULL, NULL); + pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n", + head, out, in); + /* On error, stop handling until the next kick. */ + if (unlikely(head < 0)) + break; + /* Nothing new? Wait for eventfd to tell us they refilled. */ + if (head == vq->num) { + if (unlikely(vhost_enable_notify(&vs->dev, vq))) { + vhost_disable_notify(&vs->dev, vq); + continue; + } + break; + } + +/* FIXME: BIDI operation */ + if (out == 1 && in == 1) { + data_direction = DMA_NONE; + data_first = 0; + data_num = 0; + } else if (out == 1 && in > 1) { + data_direction = DMA_FROM_DEVICE; + data_first = out + 1; + data_num = in - 1; + } else if (out > 1 && in == 1) { + data_direction = DMA_TO_DEVICE; + data_first = 1; + data_num = out - 1; + } else { + vq_err(vq, "Invalid buffer layout out: %u in: %u\n", + out, in); + break; + } + + /* + * Check for a sane resp buffer so we can report errors to + * the guest. + */ + if (unlikely(vq->iov[out].iov_len != + sizeof(struct virtio_scsi_cmd_resp))) { + vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu" + " bytes\n", vq->iov[out].iov_len); + break; + } + + if (unlikely(vq->iov[0].iov_len != sizeof(v_req))) { + vq_err(vq, "Expecting virtio_scsi_cmd_req, got %zu" + " bytes\n", vq->iov[0].iov_len); + break; + } + pr_debug("Calling __copy_from_user: vq->iov[0].iov_base: %p," + " len: %zu\n", vq->iov[0].iov_base, sizeof(v_req)); + ret = __copy_from_user(&v_req, vq->iov[0].iov_base, + sizeof(v_req)); + if (unlikely(ret)) { + vq_err(vq, "Faulted on virtio_scsi_cmd_req\n"); + break; + } + + exp_data_len = 0; + for (i = 0; i < data_num; i++) + exp_data_len += vq->iov[data_first + i].iov_len; + + tv_cmd = vhost_scsi_allocate_cmd(tv_tpg, &v_req, + exp_data_len, data_direction); + if (IS_ERR(tv_cmd)) { + vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n", + PTR_ERR(tv_cmd)); + break; + } + pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction" + ": %d\n", tv_cmd, exp_data_len, data_direction); + + tv_cmd->tvc_vhost = vs; + + if (unlikely(vq->iov[out].iov_len != + sizeof(struct virtio_scsi_cmd_resp))) { + vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu" + " bytes, out: %d, in: %d\n", + vq->iov[out].iov_len, out, in); + break; + } + + tv_cmd->tvc_resp = vq->iov[out].iov_base; + + /* + * Copy in the recieved CDB descriptor into tv_cmd->tvc_cdb + * that will be used by tcm_vhost_new_cmd_map() and down into + * target_setup_cmd_from_cdb() + */ + memcpy(tv_cmd->tvc_cdb, v_req.cdb, TCM_VHOST_MAX_CDB_SIZE); + /* + * Check that the recieved CDB size does not exceeded our + * hardcoded max for tcm_vhost + */ + /* TODO what if cdb was too small for varlen cdb header? */ + if (unlikely(scsi_command_size(tv_cmd->tvc_cdb) > + TCM_VHOST_MAX_CDB_SIZE)) { + vq_err(vq, "Received SCSI CDB with command_size: %d that" + " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", + scsi_command_size(tv_cmd->tvc_cdb), + TCM_VHOST_MAX_CDB_SIZE); + break; /* TODO */ + } + tv_cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF; + + pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n", + tv_cmd->tvc_cdb[0], tv_cmd->tvc_lun); + + if (data_direction != DMA_NONE) { + ret = vhost_scsi_map_iov_to_sgl(tv_cmd, + &vq->iov[data_first], data_num, + data_direction == DMA_TO_DEVICE); + if (unlikely(ret)) { + vq_err(vq, "Failed to map iov to sgl\n"); + break; /* TODO */ + } + } + + /* + * Save the descriptor from vhost_get_vq_desc() to be used to + * complete the virtio-scsi request in TCM callback context via + * tcm_vhost_queue_data_in() and tcm_vhost_queue_status() + */ + tv_cmd->tvc_vq_desc = head; + /* + * Dispatch tv_cmd descriptor for cmwq execution in process + * context provided by tcm_vhost_workqueue. This also ensures + * tv_cmd is executed on the same kworker CPU as this vhost + * thread to gain positive L2 cache locality effects.. + */ + INIT_WORK(&tv_cmd->work, tcm_vhost_submission_work); + queue_work(tcm_vhost_workqueue, &tv_cmd->work); + } + + mutex_unlock(&vq->mutex); +} + +static void vhost_scsi_ctl_handle_kick(struct vhost_work *work) +{ + pr_debug("%s: The handling func for control queue.\n", __func__); +} + +static void vhost_scsi_evt_handle_kick(struct vhost_work *work) +{ + pr_debug("%s: The handling func for event queue.\n", __func__); +} + +static void vhost_scsi_handle_kick(struct vhost_work *work) +{ + struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, + poll.work); + struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev); + + vhost_scsi_handle_vq(vs); +} + +/* + * Called from vhost_scsi_ioctl() context to walk the list of available + * tcm_vhost_tpg with an active struct tcm_vhost_nexus + */ +static int vhost_scsi_set_endpoint( + struct vhost_scsi *vs, + struct vhost_scsi_target *t) +{ + struct tcm_vhost_tport *tv_tport; + struct tcm_vhost_tpg *tv_tpg; + int index; + + mutex_lock(&vs->dev.mutex); + /* Verify that ring has been setup correctly. */ + for (index = 0; index < vs->dev.nvqs; ++index) { + /* Verify that ring has been setup correctly. */ + if (!vhost_vq_access_ok(&vs->vqs[index])) { + mutex_unlock(&vs->dev.mutex); + return -EFAULT; + } + } + mutex_unlock(&vs->dev.mutex); + + mutex_lock(&tcm_vhost_mutex); + list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) { + mutex_lock(&tv_tpg->tv_tpg_mutex); + if (!tv_tpg->tpg_nexus) { + mutex_unlock(&tv_tpg->tv_tpg_mutex); + continue; + } + if (tv_tpg->tv_tpg_vhost_count != 0) { + mutex_unlock(&tv_tpg->tv_tpg_mutex); + continue; + } + tv_tport = tv_tpg->tport; + + if (!strcmp(tv_tport->tport_name, t->vhost_wwpn) && + (tv_tpg->tport_tpgt == t->vhost_tpgt)) { + tv_tpg->tv_tpg_vhost_count++; + mutex_unlock(&tv_tpg->tv_tpg_mutex); + mutex_unlock(&tcm_vhost_mutex); + + mutex_lock(&vs->dev.mutex); + if (vs->vs_tpg) { + mutex_unlock(&vs->dev.mutex); + mutex_lock(&tv_tpg->tv_tpg_mutex); + tv_tpg->tv_tpg_vhost_count--; + mutex_unlock(&tv_tpg->tv_tpg_mutex); + return -EEXIST; + } + + vs->vs_tpg = tv_tpg; + smp_mb__after_atomic_inc(); + mutex_unlock(&vs->dev.mutex); + return 0; + } + mutex_unlock(&tv_tpg->tv_tpg_mutex); + } + mutex_unlock(&tcm_vhost_mutex); + return -EINVAL; +} + +static int vhost_scsi_clear_endpoint( + struct vhost_scsi *vs, + struct vhost_scsi_target *t) +{ + struct tcm_vhost_tport *tv_tport; + struct tcm_vhost_tpg *tv_tpg; + int index, ret; + + mutex_lock(&vs->dev.mutex); + /* Verify that ring has been setup correctly. */ + for (index = 0; index < vs->dev.nvqs; ++index) { + if (!vhost_vq_access_ok(&vs->vqs[index])) { + ret = -EFAULT; + goto err; + } + } + + if (!vs->vs_tpg) { + ret = -ENODEV; + goto err; + } + tv_tpg = vs->vs_tpg; + tv_tport = tv_tpg->tport; + + if (strcmp(tv_tport->tport_name, t->vhost_wwpn) || + (tv_tpg->tport_tpgt != t->vhost_tpgt)) { + pr_warn("tv_tport->tport_name: %s, tv_tpg->tport_tpgt: %hu" + " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n", + tv_tport->tport_name, tv_tpg->tport_tpgt, + t->vhost_wwpn, t->vhost_tpgt); + ret = -EINVAL; + goto err; + } + tv_tpg->tv_tpg_vhost_count--; + vs->vs_tpg = NULL; + mutex_unlock(&vs->dev.mutex); + + return 0; + +err: + mutex_unlock(&vs->dev.mutex); + return ret; +} + +static int vhost_scsi_open(struct inode *inode, struct file *f) +{ + struct vhost_scsi *s; + int r; + + s = kzalloc(sizeof(*s), GFP_KERNEL); + if (!s) + return -ENOMEM; + + vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work); + INIT_LIST_HEAD(&s->vs_completion_list); + spin_lock_init(&s->vs_completion_lock); + + s->vqs[VHOST_SCSI_VQ_CTL].handle_kick = vhost_scsi_ctl_handle_kick; + s->vqs[VHOST_SCSI_VQ_EVT].handle_kick = vhost_scsi_evt_handle_kick; + s->vqs[VHOST_SCSI_VQ_IO].handle_kick = vhost_scsi_handle_kick; + r = vhost_dev_init(&s->dev, s->vqs, 3); + if (r < 0) { + kfree(s); + return r; + } + + f->private_data = s; + return 0; +} + +static int vhost_scsi_release(struct inode *inode, struct file *f) +{ + struct vhost_scsi *s = f->private_data; + + if (s->vs_tpg && s->vs_tpg->tport) { + struct vhost_scsi_target backend; + + memcpy(backend.vhost_wwpn, s->vs_tpg->tport->tport_name, + sizeof(backend.vhost_wwpn)); + backend.vhost_tpgt = s->vs_tpg->tport_tpgt; + vhost_scsi_clear_endpoint(s, &backend); + } + + vhost_dev_cleanup(&s->dev, false); + kfree(s); + return 0; +} + +static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index) +{ + vhost_poll_flush(&vs->dev.vqs[index].poll); +} + +static void vhost_scsi_flush(struct vhost_scsi *vs) +{ + vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_CTL); + vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_EVT); + vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_IO); +} + +static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features) +{ + if (features & ~VHOST_FEATURES) + return -EOPNOTSUPP; + + mutex_lock(&vs->dev.mutex); + if ((features & (1 << VHOST_F_LOG_ALL)) && + !vhost_log_access_ok(&vs->dev)) { + mutex_unlock(&vs->dev.mutex); + return -EFAULT; + } + vs->dev.acked_features = features; + smp_wmb(); + vhost_scsi_flush(vs); + mutex_unlock(&vs->dev.mutex); + return 0; +} + +static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl, + unsigned long arg) +{ + struct vhost_scsi *vs = f->private_data; + struct vhost_scsi_target backend; + void __user *argp = (void __user *)arg; + u64 __user *featurep = argp; + u64 features; + int r, abi_version = VHOST_SCSI_ABI_VERSION; + + switch (ioctl) { + case VHOST_SCSI_SET_ENDPOINT: + if (copy_from_user(&backend, argp, sizeof backend)) + return -EFAULT; + if (backend.reserved != 0) + return -EOPNOTSUPP; + + return vhost_scsi_set_endpoint(vs, &backend); + case VHOST_SCSI_CLEAR_ENDPOINT: + if (copy_from_user(&backend, argp, sizeof backend)) + return -EFAULT; + if (backend.reserved != 0) + return -EOPNOTSUPP; + + return vhost_scsi_clear_endpoint(vs, &backend); + case VHOST_SCSI_GET_ABI_VERSION: + if (copy_to_user(argp, &abi_version, sizeof abi_version)) + return -EFAULT; + return 0; + case VHOST_GET_FEATURES: + features = VHOST_FEATURES; + if (copy_to_user(featurep, &features, sizeof features)) + return -EFAULT; + return 0; + case VHOST_SET_FEATURES: + if (copy_from_user(&features, featurep, sizeof features)) + return -EFAULT; + return vhost_scsi_set_features(vs, features); + default: + mutex_lock(&vs->dev.mutex); + r = vhost_dev_ioctl(&vs->dev, ioctl, arg); + mutex_unlock(&vs->dev.mutex); + return r; + } +} + +#ifdef CONFIG_COMPAT +static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl, + unsigned long arg) +{ + return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg)); +} +#endif + +static const struct file_operations vhost_scsi_fops = { + .owner = THIS_MODULE, + .release = vhost_scsi_release, + .unlocked_ioctl = vhost_scsi_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = vhost_scsi_compat_ioctl, +#endif + .open = vhost_scsi_open, + .llseek = noop_llseek, +}; + +static struct miscdevice vhost_scsi_misc = { + MISC_DYNAMIC_MINOR, + "vhost-scsi", + &vhost_scsi_fops, +}; + +static int __init vhost_scsi_register(void) +{ + return misc_register(&vhost_scsi_misc); +} + +static int vhost_scsi_deregister(void) +{ + return misc_deregister(&vhost_scsi_misc); +} + +static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport) +{ + switch (tport->tport_proto_id) { + case SCSI_PROTOCOL_SAS: + return "SAS"; + case SCSI_PROTOCOL_FCP: + return "FCP"; + case SCSI_PROTOCOL_ISCSI: + return "iSCSI"; + default: + break; + } + + return "Unknown"; +} + +static int tcm_vhost_port_link(struct se_portal_group *se_tpg, + struct se_lun *lun) +{ + struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg, + struct tcm_vhost_tpg, se_tpg); + + mutex_lock(&tv_tpg->tv_tpg_mutex); + tv_tpg->tv_tpg_port_count++; + mutex_unlock(&tv_tpg->tv_tpg_mutex); + + return 0; +} + +static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg, + struct se_lun *se_lun) +{ + struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg, + struct tcm_vhost_tpg, se_tpg); + + mutex_lock(&tv_tpg->tv_tpg_mutex); + tv_tpg->tv_tpg_port_count--; + mutex_unlock(&tv_tpg->tv_tpg_mutex); +} + +static struct se_node_acl *tcm_vhost_make_nodeacl( + struct se_portal_group *se_tpg, + struct config_group *group, + const char *name) +{ + struct se_node_acl *se_nacl, *se_nacl_new; + struct tcm_vhost_nacl *nacl; + u64 wwpn = 0; + u32 nexus_depth; + + /* tcm_vhost_parse_wwn(name, &wwpn, 1) < 0) + return ERR_PTR(-EINVAL); */ + se_nacl_new = tcm_vhost_alloc_fabric_acl(se_tpg); + if (!se_nacl_new) + return ERR_PTR(-ENOMEM); + + nexus_depth = 1; + /* + * se_nacl_new may be released by core_tpg_add_initiator_node_acl() + * when converting a NodeACL from demo mode -> explict + */ + se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new, + name, nexus_depth); + if (IS_ERR(se_nacl)) { + tcm_vhost_release_fabric_acl(se_tpg, se_nacl_new); + return se_nacl; + } + /* + * Locate our struct tcm_vhost_nacl and set the FC Nport WWPN + */ + nacl = container_of(se_nacl, struct tcm_vhost_nacl, se_node_acl); + nacl->iport_wwpn = wwpn; + + return se_nacl; +} + +static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl) +{ + struct tcm_vhost_nacl *nacl = container_of(se_acl, + struct tcm_vhost_nacl, se_node_acl); + core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1); + kfree(nacl); +} + +static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tv_tpg, + const char *name) +{ + struct se_portal_group *se_tpg; + struct tcm_vhost_nexus *tv_nexus; + + mutex_lock(&tv_tpg->tv_tpg_mutex); + if (tv_tpg->tpg_nexus) { + mutex_unlock(&tv_tpg->tv_tpg_mutex); + pr_debug("tv_tpg->tpg_nexus already exists\n"); + return -EEXIST; + } + se_tpg = &tv_tpg->se_tpg; + + tv_nexus = kzalloc(sizeof(struct tcm_vhost_nexus), GFP_KERNEL); + if (!tv_nexus) { + mutex_unlock(&tv_tpg->tv_tpg_mutex); + pr_err("Unable to allocate struct tcm_vhost_nexus\n"); + return -ENOMEM; + } + /* + * Initialize the struct se_session pointer + */ + tv_nexus->tvn_se_sess = transport_init_session(); + if (IS_ERR(tv_nexus->tvn_se_sess)) { + mutex_unlock(&tv_tpg->tv_tpg_mutex); + kfree(tv_nexus); + return -ENOMEM; + } + /* + * Since we are running in 'demo mode' this call with generate a + * struct se_node_acl for the tcm_vhost struct se_portal_group with + * the SCSI Initiator port name of the passed configfs group 'name'. + */ + tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl( + se_tpg, (unsigned char *)name); + if (!tv_nexus->tvn_se_sess->se_node_acl) { + mutex_unlock(&tv_tpg->tv_tpg_mutex); + pr_debug("core_tpg_check_initiator_node_acl() failed" + " for %s\n", name); + transport_free_session(tv_nexus->tvn_se_sess); + kfree(tv_nexus); + return -ENOMEM; + } + /* + * Now register the TCM vhost virtual I_T Nexus as active with the + * call to __transport_register_session() + */ + __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl, + tv_nexus->tvn_se_sess, tv_nexus); + tv_tpg->tpg_nexus = tv_nexus; + + mutex_unlock(&tv_tpg->tv_tpg_mutex); + return 0; +} + +static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg) +{ + struct se_session *se_sess; + struct tcm_vhost_nexus *tv_nexus; + + mutex_lock(&tpg->tv_tpg_mutex); + tv_nexus = tpg->tpg_nexus; + if (!tv_nexus) { + mutex_unlock(&tpg->tv_tpg_mutex); + return -ENODEV; + } + + se_sess = tv_nexus->tvn_se_sess; + if (!se_sess) { + mutex_unlock(&tpg->tv_tpg_mutex); + return -ENODEV; + } + + if (tpg->tv_tpg_port_count != 0) { + mutex_unlock(&tpg->tv_tpg_mutex); + pr_err("Unable to remove TCM_vhost I_T Nexus with" + " active TPG port count: %d\n", + tpg->tv_tpg_port_count); + return -EBUSY; + } + + if (tpg->tv_tpg_vhost_count != 0) { + mutex_unlock(&tpg->tv_tpg_mutex); + pr_err("Unable to remove TCM_vhost I_T Nexus with" + " active TPG vhost count: %d\n", + tpg->tv_tpg_vhost_count); + return -EBUSY; + } + + pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated" + " %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport), + tv_nexus->tvn_se_sess->se_node_acl->initiatorname); + /* + * Release the SCSI I_T Nexus to the emulated vhost Target Port + */ + transport_deregister_session(tv_nexus->tvn_se_sess); + tpg->tpg_nexus = NULL; + mutex_unlock(&tpg->tv_tpg_mutex); + + kfree(tv_nexus); + return 0; +} + +static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg, + char *page) +{ + struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg, + struct tcm_vhost_tpg, se_tpg); + struct tcm_vhost_nexus *tv_nexus; + ssize_t ret; + + mutex_lock(&tv_tpg->tv_tpg_mutex); + tv_nexus = tv_tpg->tpg_nexus; + if (!tv_nexus) { + mutex_unlock(&tv_tpg->tv_tpg_mutex); + return -ENODEV; + } + ret = snprintf(page, PAGE_SIZE, "%s\n", + tv_nexus->tvn_se_sess->se_node_acl->initiatorname); + mutex_unlock(&tv_tpg->tv_tpg_mutex); + + return ret; +} + +static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg, + const char *page, + size_t count) +{ + struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg, + struct tcm_vhost_tpg, se_tpg); + struct tcm_vhost_tport *tport_wwn = tv_tpg->tport; + unsigned char i_port[TCM_VHOST_NAMELEN], *ptr, *port_ptr; + int ret; + /* + * Shutdown the active I_T nexus if 'NULL' is passed.. + */ + if (!strncmp(page, "NULL", 4)) { + ret = tcm_vhost_drop_nexus(tv_tpg); + return (!ret) ? count : ret; + } + /* + * Otherwise make sure the passed virtual Initiator port WWN matches + * the fabric protocol_id set in tcm_vhost_make_tport(), and call + * tcm_vhost_make_nexus(). + */ + if (strlen(page) >= TCM_VHOST_NAMELEN) { + pr_err("Emulated NAA Sas Address: %s, exceeds" + " max: %d\n", page, TCM_VHOST_NAMELEN); + return -EINVAL; + } + snprintf(&i_port[0], TCM_VHOST_NAMELEN, "%s", page); + + ptr = strstr(i_port, "naa."); + if (ptr) { + if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) { + pr_err("Passed SAS Initiator Port %s does not" + " match target port protoid: %s\n", i_port, + tcm_vhost_dump_proto_id(tport_wwn)); + return -EINVAL; + } + port_ptr = &i_port[0]; + goto check_newline; + } + ptr = strstr(i_port, "fc."); + if (ptr) { + if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) { + pr_err("Passed FCP Initiator Port %s does not" + " match target port protoid: %s\n", i_port, + tcm_vhost_dump_proto_id(tport_wwn)); + return -EINVAL; + } + port_ptr = &i_port[3]; /* Skip over "fc." */ + goto check_newline; + } + ptr = strstr(i_port, "iqn."); + if (ptr) { + if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) { + pr_err("Passed iSCSI Initiator Port %s does not" + " match target port protoid: %s\n", i_port, + tcm_vhost_dump_proto_id(tport_wwn)); + return -EINVAL; + } + port_ptr = &i_port[0]; + goto check_newline; + } + pr_err("Unable to locate prefix for emulated Initiator Port:" + " %s\n", i_port); + return -EINVAL; + /* + * Clear any trailing newline for the NAA WWN + */ +check_newline: + if (i_port[strlen(i_port)-1] == '\n') + i_port[strlen(i_port)-1] = '\0'; + + ret = tcm_vhost_make_nexus(tv_tpg, port_ptr); + if (ret < 0) + return ret; + + return count; +} + +TF_TPG_BASE_ATTR(tcm_vhost, nexus, S_IRUGO | S_IWUSR); + +static struct configfs_attribute *tcm_vhost_tpg_attrs[] = { + &tcm_vhost_tpg_nexus.attr, + NULL, +}; + +static struct se_portal_group *tcm_vhost_make_tpg(struct se_wwn *wwn, + struct config_group *group, + const char *name) +{ + struct tcm_vhost_tport *tport = container_of(wwn, + struct tcm_vhost_tport, tport_wwn); + + struct tcm_vhost_tpg *tpg; + unsigned long tpgt; + int ret; + + if (strstr(name, "tpgt_") != name) + return ERR_PTR(-EINVAL); + if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX) + return ERR_PTR(-EINVAL); + + tpg = kzalloc(sizeof(struct tcm_vhost_tpg), GFP_KERNEL); + if (!tpg) { + pr_err("Unable to allocate struct tcm_vhost_tpg"); + return ERR_PTR(-ENOMEM); + } + mutex_init(&tpg->tv_tpg_mutex); + INIT_LIST_HEAD(&tpg->tv_tpg_list); + tpg->tport = tport; + tpg->tport_tpgt = tpgt; + + ret = core_tpg_register(&tcm_vhost_fabric_configfs->tf_ops, wwn, + &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); + if (ret < 0) { + kfree(tpg); + return NULL; + } + mutex_lock(&tcm_vhost_mutex); + list_add_tail(&tpg->tv_tpg_list, &tcm_vhost_list); + mutex_unlock(&tcm_vhost_mutex); + + return &tpg->se_tpg; +} + +static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg) +{ + struct tcm_vhost_tpg *tpg = container_of(se_tpg, + struct tcm_vhost_tpg, se_tpg); + + mutex_lock(&tcm_vhost_mutex); + list_del(&tpg->tv_tpg_list); + mutex_unlock(&tcm_vhost_mutex); + /* + * Release the virtual I_T Nexus for this vhost TPG + */ + tcm_vhost_drop_nexus(tpg); + /* + * Deregister the se_tpg from TCM.. + */ + core_tpg_deregister(se_tpg); + kfree(tpg); +} + +static struct se_wwn *tcm_vhost_make_tport(struct target_fabric_configfs *tf, + struct config_group *group, + const char *name) +{ + struct tcm_vhost_tport *tport; + char *ptr; + u64 wwpn = 0; + int off = 0; + + /* if (tcm_vhost_parse_wwn(name, &wwpn, 1) < 0) + return ERR_PTR(-EINVAL); */ + + tport = kzalloc(sizeof(struct tcm_vhost_tport), GFP_KERNEL); + if (!tport) { + pr_err("Unable to allocate struct tcm_vhost_tport"); + return ERR_PTR(-ENOMEM); + } + tport->tport_wwpn = wwpn; + /* + * Determine the emulated Protocol Identifier and Target Port Name + * based on the incoming configfs directory name. + */ + ptr = strstr(name, "naa."); + if (ptr) { + tport->tport_proto_id = SCSI_PROTOCOL_SAS; + goto check_len; + } + ptr = strstr(name, "fc."); + if (ptr) { + tport->tport_proto_id = SCSI_PROTOCOL_FCP; + off = 3; /* Skip over "fc." */ + goto check_len; + } + ptr = strstr(name, "iqn."); + if (ptr) { + tport->tport_proto_id = SCSI_PROTOCOL_ISCSI; + goto check_len; + } + + pr_err("Unable to locate prefix for emulated Target Port:" + " %s\n", name); + kfree(tport); + return ERR_PTR(-EINVAL); + +check_len: + if (strlen(name) >= TCM_VHOST_NAMELEN) { + pr_err("Emulated %s Address: %s, exceeds" + " max: %d\n", name, tcm_vhost_dump_proto_id(tport), + TCM_VHOST_NAMELEN); + kfree(tport); + return ERR_PTR(-EINVAL); + } + snprintf(&tport->tport_name[0], TCM_VHOST_NAMELEN, "%s", &name[off]); + + pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target" + " %s Address: %s\n", tcm_vhost_dump_proto_id(tport), name); + + return &tport->tport_wwn; +} + +static void tcm_vhost_drop_tport(struct se_wwn *wwn) +{ + struct tcm_vhost_tport *tport = container_of(wwn, + struct tcm_vhost_tport, tport_wwn); + + pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target" + " %s Address: %s\n", tcm_vhost_dump_proto_id(tport), + tport->tport_name); + + kfree(tport); +} + +static ssize_t tcm_vhost_wwn_show_attr_version( + struct target_fabric_configfs *tf, + char *page) +{ + return sprintf(page, "TCM_VHOST fabric module %s on %s/%s" + "on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname, + utsname()->machine); +} + +TF_WWN_ATTR_RO(tcm_vhost, version); + +static struct configfs_attribute *tcm_vhost_wwn_attrs[] = { + &tcm_vhost_wwn_version.attr, + NULL, +}; + +static struct target_core_fabric_ops tcm_vhost_ops = { + .get_fabric_name = tcm_vhost_get_fabric_name, + .get_fabric_proto_ident = tcm_vhost_get_fabric_proto_ident, + .tpg_get_wwn = tcm_vhost_get_fabric_wwn, + .tpg_get_tag = tcm_vhost_get_tag, + .tpg_get_default_depth = tcm_vhost_get_default_depth, + .tpg_get_pr_transport_id = tcm_vhost_get_pr_transport_id, + .tpg_get_pr_transport_id_len = tcm_vhost_get_pr_transport_id_len, + .tpg_parse_pr_out_transport_id = tcm_vhost_parse_pr_out_transport_id, + .tpg_check_demo_mode = tcm_vhost_check_true, + .tpg_check_demo_mode_cache = tcm_vhost_check_true, + .tpg_check_demo_mode_write_protect = tcm_vhost_check_false, + .tpg_check_prod_mode_write_protect = tcm_vhost_check_false, + .tpg_alloc_fabric_acl = tcm_vhost_alloc_fabric_acl, + .tpg_release_fabric_acl = tcm_vhost_release_fabric_acl, + .tpg_get_inst_index = tcm_vhost_tpg_get_inst_index, + .release_cmd = tcm_vhost_release_cmd, + .shutdown_session = tcm_vhost_shutdown_session, + .close_session = tcm_vhost_close_session, + .sess_get_index = tcm_vhost_sess_get_index, + .sess_get_initiator_sid = NULL, + .write_pending = tcm_vhost_write_pending, + .write_pending_status = tcm_vhost_write_pending_status, + .set_default_node_attributes = tcm_vhost_set_default_node_attrs, + .get_task_tag = tcm_vhost_get_task_tag, + .get_cmd_state = tcm_vhost_get_cmd_state, + .queue_data_in = tcm_vhost_queue_data_in, + .queue_status = tcm_vhost_queue_status, + .queue_tm_rsp = tcm_vhost_queue_tm_rsp, + .get_fabric_sense_len = tcm_vhost_get_fabric_sense_len, + .set_fabric_sense_len = tcm_vhost_set_fabric_sense_len, + /* + * Setup callers for generic logic in target_core_fabric_configfs.c + */ + .fabric_make_wwn = tcm_vhost_make_tport, + .fabric_drop_wwn = tcm_vhost_drop_tport, + .fabric_make_tpg = tcm_vhost_make_tpg, + .fabric_drop_tpg = tcm_vhost_drop_tpg, + .fabric_post_link = tcm_vhost_port_link, + .fabric_pre_unlink = tcm_vhost_port_unlink, + .fabric_make_np = NULL, + .fabric_drop_np = NULL, + .fabric_make_nodeacl = tcm_vhost_make_nodeacl, + .fabric_drop_nodeacl = tcm_vhost_drop_nodeacl, +}; + +static int tcm_vhost_register_configfs(void) +{ + struct target_fabric_configfs *fabric; + int ret; + + pr_debug("TCM_VHOST fabric module %s on %s/%s" + " on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname, + utsname()->machine); + /* + * Register the top level struct config_item_type with TCM core + */ + fabric = target_fabric_configfs_init(THIS_MODULE, "vhost"); + if (IS_ERR(fabric)) { + pr_err("target_fabric_configfs_init() failed\n"); + return PTR_ERR(fabric); + } + /* + * Setup fabric->tf_ops from our local tcm_vhost_ops + */ + fabric->tf_ops = tcm_vhost_ops; + /* + * Setup default attribute lists for various fabric->tf_cit_tmpl + */ + TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs; + TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs; + TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL; + TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL; + TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; + TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL; + TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; + TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL; + TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL; + /* + * Register the fabric for use within TCM + */ + ret = target_fabric_configfs_register(fabric); + if (ret < 0) { + pr_err("target_fabric_configfs_register() failed" + " for TCM_VHOST\n"); + return ret; + } + /* + * Setup our local pointer to *fabric + */ + tcm_vhost_fabric_configfs = fabric; + pr_debug("TCM_VHOST[0] - Set fabric -> tcm_vhost_fabric_configfs\n"); + return 0; +}; + +static void tcm_vhost_deregister_configfs(void) +{ + if (!tcm_vhost_fabric_configfs) + return; + + target_fabric_configfs_deregister(tcm_vhost_fabric_configfs); + tcm_vhost_fabric_configfs = NULL; + pr_debug("TCM_VHOST[0] - Cleared tcm_vhost_fabric_configfs\n"); +}; + +static int __init tcm_vhost_init(void) +{ + int ret = -ENOMEM; + /* + * Use our own dedicated workqueue for submitting I/O into + * target core to avoid contention within system_wq. + */ + tcm_vhost_workqueue = alloc_workqueue("tcm_vhost", 0, 0); + if (!tcm_vhost_workqueue) + goto out; + + ret = vhost_scsi_register(); + if (ret < 0) + goto out_destroy_workqueue; + + ret = tcm_vhost_register_configfs(); + if (ret < 0) + goto out_vhost_scsi_deregister; + + return 0; + +out_vhost_scsi_deregister: + vhost_scsi_deregister(); +out_destroy_workqueue: + destroy_workqueue(tcm_vhost_workqueue); +out: + return ret; +}; + +static void tcm_vhost_exit(void) +{ + tcm_vhost_deregister_configfs(); + vhost_scsi_deregister(); + destroy_workqueue(tcm_vhost_workqueue); +}; + +MODULE_DESCRIPTION("TCM_VHOST series fabric driver"); +MODULE_LICENSE("GPL"); +module_init(tcm_vhost_init); +module_exit(tcm_vhost_exit); diff --git a/trunk/drivers/vhost/tcm_vhost.h b/trunk/drivers/vhost/tcm_vhost.h new file mode 100644 index 000000000000..d9e93557d669 --- /dev/null +++ b/trunk/drivers/vhost/tcm_vhost.h @@ -0,0 +1,103 @@ +#define TCM_VHOST_VERSION "v0.1" +#define TCM_VHOST_NAMELEN 256 +#define TCM_VHOST_MAX_CDB_SIZE 32 + +struct tcm_vhost_cmd { + /* Descriptor from vhost_get_vq_desc() for virt_queue segment */ + int tvc_vq_desc; + /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */ + u64 tvc_tag; + /* The number of scatterlists associated with this cmd */ + u32 tvc_sgl_count; + /* Saved unpacked SCSI LUN for tcm_vhost_submission_work() */ + u32 tvc_lun; + /* Pointer to the SGL formatted memory from virtio-scsi */ + struct scatterlist *tvc_sgl; + /* Pointer to response */ + struct virtio_scsi_cmd_resp __user *tvc_resp; + /* Pointer to vhost_scsi for our device */ + struct vhost_scsi *tvc_vhost; + /* The TCM I/O descriptor that is accessed via container_of() */ + struct se_cmd tvc_se_cmd; + /* work item used for cmwq dispatch to tcm_vhost_submission_work() */ + struct work_struct work; + /* Copy of the incoming SCSI command descriptor block (CDB) */ + unsigned char tvc_cdb[TCM_VHOST_MAX_CDB_SIZE]; + /* Sense buffer that will be mapped into outgoing status */ + unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER]; + /* Completed commands list, serviced from vhost worker thread */ + struct list_head tvc_completion_list; +}; + +struct tcm_vhost_nexus { + /* Pointer to TCM session for I_T Nexus */ + struct se_session *tvn_se_sess; +}; + +struct tcm_vhost_nacl { + /* Binary World Wide unique Port Name for Vhost Initiator port */ + u64 iport_wwpn; + /* ASCII formatted WWPN for Sas Initiator port */ + char iport_name[TCM_VHOST_NAMELEN]; + /* Returned by tcm_vhost_make_nodeacl() */ + struct se_node_acl se_node_acl; +}; + +struct tcm_vhost_tpg { + /* Vhost port target portal group tag for TCM */ + u16 tport_tpgt; + /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */ + int tv_tpg_port_count; + /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */ + int tv_tpg_vhost_count; + /* list for tcm_vhost_list */ + struct list_head tv_tpg_list; + /* Used to protect access for tpg_nexus */ + struct mutex tv_tpg_mutex; + /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */ + struct tcm_vhost_nexus *tpg_nexus; + /* Pointer back to tcm_vhost_tport */ + struct tcm_vhost_tport *tport; + /* Returned by tcm_vhost_make_tpg() */ + struct se_portal_group se_tpg; +}; + +struct tcm_vhost_tport { + /* SCSI protocol the tport is providing */ + u8 tport_proto_id; + /* Binary World Wide unique Port Name for Vhost Target port */ + u64 tport_wwpn; + /* ASCII formatted WWPN for Vhost Target port */ + char tport_name[TCM_VHOST_NAMELEN]; + /* Returned by tcm_vhost_make_tport() */ + struct se_wwn tport_wwn; +}; + +/* + * As per request from MST, keep TCM_VHOST related ioctl defines out of + * linux/vhost.h (user-space) for now.. + */ + +#include + +/* + * Used by QEMU userspace to ensure a consistent vhost-scsi ABI. + * + * ABI Rev 0: July 2012 version starting point for v3.6-rc merge candidate + + * RFC-v2 vhost-scsi userspace. Add GET_ABI_VERSION ioctl usage + */ + +#define VHOST_SCSI_ABI_VERSION 0 + +struct vhost_scsi_target { + int abi_version; + char vhost_wwpn[TRANSPORT_IQN_LEN]; + unsigned short vhost_tpgt; + unsigned short reserved; +}; + +/* VHOST_SCSI specific defines */ +#define VHOST_SCSI_SET_ENDPOINT _IOW(VHOST_VIRTIO, 0x40, struct vhost_scsi_target) +#define VHOST_SCSI_CLEAR_ENDPOINT _IOW(VHOST_VIRTIO, 0x41, struct vhost_scsi_target) +/* Changing this breaks userspace. */ +#define VHOST_SCSI_GET_ABI_VERSION _IOW(VHOST_VIRTIO, 0x42, int) diff --git a/trunk/drivers/video/aty/aty128fb.c b/trunk/drivers/video/aty/aty128fb.c index b0b2ac335347..747442d2c0f6 100644 --- a/trunk/drivers/video/aty/aty128fb.c +++ b/trunk/drivers/video/aty/aty128fb.c @@ -90,7 +90,8 @@ #undef DEBUG #ifdef DEBUG -#define DBG(fmt, args...) printk(KERN_DEBUG "aty128fb: %s " fmt, __func__, ##args); +#define DBG(fmt, args...) \ + printk(KERN_DEBUG "aty128fb: %s " fmt, __func__, ##args); #else #define DBG(fmt, args...) #endif @@ -449,8 +450,9 @@ static int aty128_decode_var(struct fb_var_screeninfo *var, struct aty128fb_par *par); #if 0 static void __devinit aty128_get_pllinfo(struct aty128fb_par *par, - void __iomem *bios); -static void __devinit __iomem *aty128_map_ROM(struct pci_dev *pdev, const struct aty128fb_par *par); + void __iomem *bios); +static void __devinit __iomem *aty128_map_ROM(struct pci_dev *pdev, + const struct aty128fb_par *par); #endif static void aty128_timings(struct aty128fb_par *par); static void aty128_init_engine(struct aty128fb_par *par); @@ -779,7 +781,8 @@ static u32 depth_to_dst(u32 depth) #ifndef __sparc__ -static void __iomem * __devinit aty128_map_ROM(const struct aty128fb_par *par, struct pci_dev *dev) +static void __iomem * __devinit aty128_map_ROM(const struct aty128fb_par *par, + struct pci_dev *dev) { u16 dptr; u8 rom_type; @@ -811,13 +814,14 @@ static void __iomem * __devinit aty128_map_ROM(const struct aty128fb_par *par, s /* Look for the PCI data to check the ROM type */ dptr = BIOS_IN16(0x18); - /* Check the PCI data signature. If it's wrong, we still assume a normal x86 ROM - * for now, until I've verified this works everywhere. The goal here is more - * to phase out Open Firmware images. + /* Check the PCI data signature. If it's wrong, we still assume a normal + * x86 ROM for now, until I've verified this works everywhere. + * The goal here is more to phase out Open Firmware images. * - * Currently, we only look at the first PCI data, we could iteratre and deal with - * them all, and we should use fb_bios_start relative to start of image and not - * relative start of ROM, but so far, I never found a dual-image ATI card + * Currently, we only look at the first PCI data, we could iteratre and + * deal with them all, and we should use fb_bios_start relative to start + * of image and not relative start of ROM, but so far, I never found a + * dual-image ATI card. * * typedef struct { * u32 signature; + 0x00 @@ -852,7 +856,8 @@ static void __iomem * __devinit aty128_map_ROM(const struct aty128fb_par *par, s printk(KERN_INFO "aty128fb: Found HP PA-RISC ROM Image\n"); goto failed; default: - printk(KERN_INFO "aty128fb: Found unknown type %d ROM Image\n", rom_type); + printk(KERN_INFO "aty128fb: Found unknown type %d ROM Image\n", + rom_type); goto failed; } anyway: @@ -863,7 +868,8 @@ static void __iomem * __devinit aty128_map_ROM(const struct aty128fb_par *par, s return NULL; } -static void __devinit aty128_get_pllinfo(struct aty128fb_par *par, unsigned char __iomem *bios) +static void __devinit aty128_get_pllinfo(struct aty128fb_par *par, + unsigned char __iomem *bios) { unsigned int bios_hdr; unsigned int bios_pll; @@ -1247,10 +1253,13 @@ static int aty128_crtc_to_var(const struct aty128_crtc *crtc, static void aty128_set_crt_enable(struct aty128fb_par *par, int on) { if (on) { - aty_st_le32(CRTC_EXT_CNTL, aty_ld_le32(CRTC_EXT_CNTL) | CRT_CRTC_ON); - aty_st_le32(DAC_CNTL, (aty_ld_le32(DAC_CNTL) | DAC_PALETTE2_SNOOP_EN)); + aty_st_le32(CRTC_EXT_CNTL, aty_ld_le32(CRTC_EXT_CNTL) | + CRT_CRTC_ON); + aty_st_le32(DAC_CNTL, (aty_ld_le32(DAC_CNTL) | + DAC_PALETTE2_SNOOP_EN)); } else - aty_st_le32(CRTC_EXT_CNTL, aty_ld_le32(CRTC_EXT_CNTL) & ~CRT_CRTC_ON); + aty_st_le32(CRTC_EXT_CNTL, aty_ld_le32(CRTC_EXT_CNTL) & + ~CRT_CRTC_ON); } static void aty128_set_lcd_enable(struct aty128fb_par *par, int on) @@ -1281,7 +1290,8 @@ static void aty128_set_lcd_enable(struct aty128fb_par *par, int on) } } -static void aty128_set_pll(struct aty128_pll *pll, const struct aty128fb_par *par) +static void aty128_set_pll(struct aty128_pll *pll, + const struct aty128fb_par *par) { u32 div3; @@ -1366,7 +1376,8 @@ static int aty128_var_to_pll(u32 period_in_ps, struct aty128_pll *pll, } -static int aty128_pll_to_var(const struct aty128_pll *pll, struct fb_var_screeninfo *var) +static int aty128_pll_to_var(const struct aty128_pll *pll, + struct fb_var_screeninfo *var) { var->pixclock = 100000000 / pll->vclk; @@ -1512,7 +1523,8 @@ static int aty128fb_set_par(struct fb_info *info) * encode/decode the User Defined Part of the Display */ -static int aty128_decode_var(struct fb_var_screeninfo *var, struct aty128fb_par *par) +static int aty128_decode_var(struct fb_var_screeninfo *var, + struct aty128fb_par *par) { int err; struct aty128_crtc crtc; @@ -1559,7 +1571,8 @@ static int aty128_encode_var(struct fb_var_screeninfo *var, } -static int aty128fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) +static int aty128fb_check_var(struct fb_var_screeninfo *var, + struct fb_info *info) { struct aty128fb_par par; int err; @@ -1575,7 +1588,8 @@ static int aty128fb_check_var(struct fb_var_screeninfo *var, struct fb_info *inf /* * Pan or Wrap the Display */ -static int aty128fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *fb) +static int aty128fb_pan_display(struct fb_var_screeninfo *var, + struct fb_info *fb) { struct aty128fb_par *par = fb->par; u32 xoffset, yoffset; @@ -1594,7 +1608,8 @@ static int aty128fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *f par->crtc.xoffset = xoffset; par->crtc.yoffset = yoffset; - offset = ((yoffset * par->crtc.vxres + xoffset)*(par->crtc.bpp >> 3)) & ~7; + offset = ((yoffset * par->crtc.vxres + xoffset) * (par->crtc.bpp >> 3)) + & ~7; if (par->crtc.bpp == 24) offset += 8 * (offset % 3); /* Must be multiple of 8 and 3 */ @@ -1620,11 +1635,13 @@ static void aty128_st_pal(u_int regno, u_int red, u_int green, u_int blue, * do mirroring */ - aty_st_le32(DAC_CNTL, aty_ld_le32(DAC_CNTL) | DAC_PALETTE_ACCESS_CNTL); + aty_st_le32(DAC_CNTL, aty_ld_le32(DAC_CNTL) | + DAC_PALETTE_ACCESS_CNTL); aty_st_8(PALETTE_INDEX, regno); aty_st_le32(PALETTE_DATA, (red<<16)|(green<<8)|blue); #endif - aty_st_le32(DAC_CNTL, aty_ld_le32(DAC_CNTL) & ~DAC_PALETTE_ACCESS_CNTL); + aty_st_le32(DAC_CNTL, aty_ld_le32(DAC_CNTL) & + ~DAC_PALETTE_ACCESS_CNTL); } aty_st_8(PALETTE_INDEX, regno); @@ -1753,7 +1770,8 @@ static int aty128_bl_update_status(struct backlight_device *bd) aty_st_le32(LVDS_GEN_CNTL, reg); } reg &= ~LVDS_BL_MOD_LEVEL_MASK; - reg |= (aty128_bl_get_level_brightness(par, level) << LVDS_BL_MOD_LEVEL_SHIFT); + reg |= (aty128_bl_get_level_brightness(par, level) << + LVDS_BL_MOD_LEVEL_SHIFT); #ifdef BACKLIGHT_LVDS_OFF reg |= LVDS_ON | LVDS_EN; reg &= ~LVDS_DISPLAY_DIS; @@ -1764,7 +1782,8 @@ static int aty128_bl_update_status(struct backlight_device *bd) #endif } else { reg &= ~LVDS_BL_MOD_LEVEL_MASK; - reg |= (aty128_bl_get_level_brightness(par, 0) << LVDS_BL_MOD_LEVEL_SHIFT); + reg |= (aty128_bl_get_level_brightness(par, 0) << + LVDS_BL_MOD_LEVEL_SHIFT); #ifdef BACKLIGHT_LVDS_OFF reg |= LVDS_DISPLAY_DIS; aty_st_le32(LVDS_GEN_CNTL, reg); @@ -1869,7 +1888,8 @@ static void aty128_early_resume(void *data) } #endif /* CONFIG_PPC_PMAC */ -static int __devinit aty128_init(struct pci_dev *pdev, const struct pci_device_id *ent) +static int __devinit aty128_init(struct pci_dev *pdev, + const struct pci_device_id *ent) { struct fb_info *info = pci_get_drvdata(pdev); struct aty128fb_par *par = info->par; @@ -1887,7 +1907,8 @@ static int __devinit aty128_init(struct pci_dev *pdev, const struct pci_device_i /* range check to make sure */ if (ent->driver_data < ARRAY_SIZE(r128_family)) - strlcat(video_card, r128_family[ent->driver_data], sizeof(video_card)); + strlcat(video_card, r128_family[ent->driver_data], + sizeof(video_card)); printk(KERN_INFO "aty128fb: %s [chip rev 0x%x] ", video_card, chip_rev); @@ -1911,11 +1932,11 @@ static int __devinit aty128_init(struct pci_dev *pdev, const struct pci_device_i /* Indicate sleep capability */ if (par->chip_gen == rage_M3) { pmac_call_feature(PMAC_FTR_DEVICE_CAN_WAKE, NULL, 0, 1); -#if 0 /* Disable the early video resume hack for now as it's causing problems, among - * others we now rely on the PCI core restoring the config space for us, which - * isn't the case with that hack, and that code path causes various things to - * be called with interrupts off while they shouldn't. I'm leaving the code in - * as it can be useful for debugging purposes +#if 0 /* Disable the early video resume hack for now as it's causing problems, + * among others we now rely on the PCI core restoring the config space + * for us, which isn't the case with that hack, and that code path causes + * various things to be called with interrupts off while they shouldn't. + * I'm leaving the code in as it can be useful for debugging purposes */ pmac_set_early_video_resume(aty128_early_resume, par); #endif @@ -1953,11 +1974,11 @@ static int __devinit aty128_init(struct pci_dev *pdev, const struct pci_device_i default_vmode = VMODE_1152_768_60; if (default_cmode > 16) - default_cmode = CMODE_32; + default_cmode = CMODE_32; else if (default_cmode > 8) - default_cmode = CMODE_16; + default_cmode = CMODE_16; else - default_cmode = CMODE_8; + default_cmode = CMODE_8; if (mac_vmode_to_var(default_vmode, default_cmode, &var)) var = default_var; @@ -2018,7 +2039,8 @@ static int __devinit aty128_init(struct pci_dev *pdev, const struct pci_device_i #ifdef CONFIG_PCI /* register a card ++ajoshi */ -static int __devinit aty128_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +static int __devinit aty128_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) { unsigned long fb_addr, reg_addr; struct aty128fb_par *par; @@ -2318,39 +2340,39 @@ static inline void aty128_rectcopy(int srcx, int srcy, int dstx, int dsty, u_int width, u_int height, struct fb_info_aty128 *par) { - u32 save_dp_datatype, save_dp_cntl, dstval; - - if (!width || !height) - return; - - dstval = depth_to_dst(par->current_par.crtc.depth); - if (dstval == DST_24BPP) { - srcx *= 3; - dstx *= 3; - width *= 3; - } else if (dstval == -EINVAL) { - printk("aty128fb: invalid depth or RGBA\n"); - return; - } - - wait_for_fifo(2, par); - save_dp_datatype = aty_ld_le32(DP_DATATYPE); - save_dp_cntl = aty_ld_le32(DP_CNTL); - - wait_for_fifo(6, par); - aty_st_le32(SRC_Y_X, (srcy << 16) | srcx); - aty_st_le32(DP_MIX, ROP3_SRCCOPY | DP_SRC_RECT); - aty_st_le32(DP_CNTL, DST_X_LEFT_TO_RIGHT | DST_Y_TOP_TO_BOTTOM); - aty_st_le32(DP_DATATYPE, save_dp_datatype | dstval | SRC_DSTCOLOR); - - aty_st_le32(DST_Y_X, (dsty << 16) | dstx); - aty_st_le32(DST_HEIGHT_WIDTH, (height << 16) | width); - - par->blitter_may_be_busy = 1; - - wait_for_fifo(2, par); - aty_st_le32(DP_DATATYPE, save_dp_datatype); - aty_st_le32(DP_CNTL, save_dp_cntl); + u32 save_dp_datatype, save_dp_cntl, dstval; + + if (!width || !height) + return; + + dstval = depth_to_dst(par->current_par.crtc.depth); + if (dstval == DST_24BPP) { + srcx *= 3; + dstx *= 3; + width *= 3; + } else if (dstval == -EINVAL) { + printk("aty128fb: invalid depth or RGBA\n"); + return; + } + + wait_for_fifo(2, par); + save_dp_datatype = aty_ld_le32(DP_DATATYPE); + save_dp_cntl = aty_ld_le32(DP_CNTL); + + wait_for_fifo(6, par); + aty_st_le32(SRC_Y_X, (srcy << 16) | srcx); + aty_st_le32(DP_MIX, ROP3_SRCCOPY | DP_SRC_RECT); + aty_st_le32(DP_CNTL, DST_X_LEFT_TO_RIGHT | DST_Y_TOP_TO_BOTTOM); + aty_st_le32(DP_DATATYPE, save_dp_datatype | dstval | SRC_DSTCOLOR); + + aty_st_le32(DST_Y_X, (dsty << 16) | dstx); + aty_st_le32(DST_HEIGHT_WIDTH, (height << 16) | width); + + par->blitter_may_be_busy = 1; + + wait_for_fifo(2, par); + aty_st_le32(DP_DATATYPE, save_dp_datatype); + aty_st_le32(DP_CNTL, save_dp_cntl); } @@ -2358,17 +2380,17 @@ static inline void aty128_rectcopy(int srcx, int srcy, int dstx, int dsty, * Text mode accelerated functions */ -static void fbcon_aty128_bmove(struct display *p, int sy, int sx, int dy, int dx, - int height, int width) +static void fbcon_aty128_bmove(struct display *p, int sy, int sx, int dy, + int dx, int height, int width) { - sx *= fontwidth(p); - sy *= fontheight(p); - dx *= fontwidth(p); - dy *= fontheight(p); - width *= fontwidth(p); - height *= fontheight(p); - - aty128_rectcopy(sx, sy, dx, dy, width, height, + sx *= fontwidth(p); + sy *= fontheight(p); + dx *= fontwidth(p); + dy *= fontheight(p); + width *= fontwidth(p); + height *= fontheight(p); + + aty128_rectcopy(sx, sy, dx, dy, width, height, (struct fb_info_aty128 *)p->fb_info); } #endif /* 0 */ diff --git a/trunk/drivers/video/auo_k190x.c b/trunk/drivers/video/auo_k190x.c index 77da6a2f43dc..c03ecdd31e4c 100644 --- a/trunk/drivers/video/auo_k190x.c +++ b/trunk/drivers/video/auo_k190x.c @@ -987,7 +987,6 @@ int __devinit auok190x_common_probe(struct platform_device *pdev, fb_dealloc_cmap(&info->cmap); err_cmap: fb_deferred_io_cleanup(info); - kfree(info->fbdefio); err_defio: vfree((void *)info->screen_base); err_irq: @@ -1022,7 +1021,6 @@ int __devexit auok190x_common_remove(struct platform_device *pdev) fb_dealloc_cmap(&info->cmap); fb_deferred_io_cleanup(info); - kfree(info->fbdefio); vfree((void *)info->screen_base); diff --git a/trunk/drivers/video/console/bitblit.c b/trunk/drivers/video/console/bitblit.c index 28b1a834906b..61b182bf32a2 100644 --- a/trunk/drivers/video/console/bitblit.c +++ b/trunk/drivers/video/console/bitblit.c @@ -162,7 +162,7 @@ static void bit_putcs(struct vc_data *vc, struct fb_info *info, image.depth = 1; if (attribute) { - buf = kmalloc(cellsize, GFP_KERNEL); + buf = kmalloc(cellsize, GFP_ATOMIC); if (!buf) return; } diff --git a/trunk/drivers/video/console/fbcon.c b/trunk/drivers/video/console/fbcon.c index 2e471c22abf5..fdefa8fd72c4 100644 --- a/trunk/drivers/video/console/fbcon.c +++ b/trunk/drivers/video/console/fbcon.c @@ -372,8 +372,15 @@ static void fb_flashcursor(struct work_struct *work) struct vc_data *vc = NULL; int c; int mode; + int ret; + + /* FIXME: we should sort out the unbind locking instead */ + /* instead we just fail to flash the cursor if we can't get + * the lock instead of blocking fbcon deinit */ + ret = console_trylock(); + if (ret == 0) + return; - console_lock(); if (ops && ops->currcon != -1) vc = vc_cons[ops->currcon].d; @@ -442,7 +449,7 @@ static int __init fb_console_setup(char *this_opt) while ((options = strsep(&this_opt, ",")) != NULL) { if (!strncmp(options, "font:", 5)) - strcpy(fontname, options + 5); + strlcpy(fontname, options + 5, sizeof(fontname)); if (!strncmp(options, "scrollback:", 11)) { options += 11; diff --git a/trunk/drivers/video/da8xx-fb.c b/trunk/drivers/video/da8xx-fb.c index 47118c75a4c0..7ae9d53f2bf1 100644 --- a/trunk/drivers/video/da8xx-fb.c +++ b/trunk/drivers/video/da8xx-fb.c @@ -30,7 +30,10 @@ #include #include #include +#include #include +#include +#include #include