diff --git a/[refs] b/[refs] index ea31aee26c64..dd110d4de53e 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: d93515611bbc70c2fe4db232e5feb448ed8e4cc9 +refs/heads/master: 06f4e926d256d902dd9a53dcb400fd74974ce087 diff --git a/trunk/Documentation/00-INDEX b/trunk/Documentation/00-INDEX index c17cd4bb2290..1b777b960492 100644 --- a/trunk/Documentation/00-INDEX +++ b/trunk/Documentation/00-INDEX @@ -328,8 +328,6 @@ sysrq.txt - info on the magic SysRq key. telephony/ - directory with info on telephony (e.g. voice over IP) support. -uml/ - - directory with information about User Mode Linux. unicode.txt - info on the Unicode character/font mapping used in Linux. unshare.txt diff --git a/trunk/Documentation/ABI/testing/sysfs-devices-system-cpu b/trunk/Documentation/ABI/testing/sysfs-devices-system-cpu index 7564e88bfa43..e7be75b96e4b 100644 --- a/trunk/Documentation/ABI/testing/sysfs-devices-system-cpu +++ b/trunk/Documentation/ABI/testing/sysfs-devices-system-cpu @@ -183,21 +183,21 @@ Description: Discover and change clock speed of CPUs to learn how to control the knobs. -What: /sys/devices/system/cpu/cpu*/cache/index*/cache_disable_X -Date: August 2008 +What: /sys/devices/system/cpu/cpu*/cache/index3/cache_disable_{0,1} +Date: August 2008 KernelVersion: 2.6.27 -Contact: mark.langsdorf@amd.com -Description: These files exist in every cpu's cache index directories. - There are currently 2 cache_disable_# files in each - directory. Reading from these files on a supported - processor will return that cache disable index value - for that processor and node. Writing to one of these - files will cause the specificed cache index to be disabled. - - Currently, only AMD Family 10h Processors support cache index - disable, and only for their L3 caches. See the BIOS and - Kernel Developer's Guide at - http://support.amd.com/us/Embedded_TechDocs/31116-Public-GH-BKDG_3-28_5-28-09.pdf - for formatting information and other details on the - cache index disable. -Users: joachim.deguara@amd.com +Contact: discuss@x86-64.org +Description: Disable L3 cache indices + + These files exist in every CPU's cache/index3 directory. Each + cache_disable_{0,1} file corresponds to one disable slot which + can be used to disable a cache index. Reading from these files + on a processor with this functionality will return the currently + disabled index for that node. There is one L3 structure per + node, or per internal node on MCM machines. Writing a valid + index to one of these files will cause the specificed cache + index to be disabled. + + All AMD processors with L3 caches provide this functionality. + For details, see BKDGs at + http://developer.amd.com/documentation/guides/Pages/default.aspx diff --git a/trunk/Documentation/ABI/testing/sysfs-firmware-dmi b/trunk/Documentation/ABI/testing/sysfs-firmware-dmi index ba9da9503c23..c78f9ab01e56 100644 --- a/trunk/Documentation/ABI/testing/sysfs-firmware-dmi +++ b/trunk/Documentation/ABI/testing/sysfs-firmware-dmi @@ -14,14 +14,15 @@ Description: DMI is structured as a large table of entries, where each entry has a common header indicating the type and - length of the entry, as well as 'handle' that is - supposed to be unique amongst all entries. + length of the entry, as well as a firmware-provided + 'handle' that is supposed to be unique amongst all + entries. Some entries are required by the specification, but many others are optional. In general though, users should never expect to find a specific entry type on their system unless they know for certain what their firmware - is doing. Machine to machine will vary. + is doing. Machine to machine experiences will vary. Multiple entries of the same type are allowed. In order to handle these duplicate entry types, each entry is @@ -67,25 +68,24 @@ Description: and the two terminating nul characters. type : The type of the entry. This value is the same as found in the directory name. It indicates - how the rest of the entry should be - interpreted. + how the rest of the entry should be interpreted. instance: The instance ordinal of the entry for the given type. This value is the same as found in the parent directory name. - position: The position of the entry within the entirety - of the entirety. + position: The ordinal position (zero-based) of the entry + within the entirety of the DMI entry table. === Entry Specialization === Some entry types may have other information available in - sysfs. + sysfs. Not all types are specialized. --- Type 15 - System Event Log --- This entry allows the firmware to export a log of events the system has taken. This information is typically backed by nvram, but the implementation - details are abstracted by this table. This entries data + details are abstracted by this table. This entry's data is exported in the directory: /sys/firmware/dmi/entries/15-0/system_event_log diff --git a/trunk/Documentation/ABI/testing/sysfs-firmware-gsmi b/trunk/Documentation/ABI/testing/sysfs-firmware-gsmi new file mode 100644 index 000000000000..0faa0aaf4b6a --- /dev/null +++ b/trunk/Documentation/ABI/testing/sysfs-firmware-gsmi @@ -0,0 +1,58 @@ +What: /sys/firmware/gsmi +Date: March 2011 +Contact: Mike Waychison +Description: + Some servers used internally at Google have firmware + that provides callback functionality via explicit SMI + triggers. Some of the callbacks are similar to those + provided by the EFI runtime services page, but due to + historical reasons this different entry-point has been + used. + + The gsmi driver implements the kernel's abstraction for + these firmware callbacks. Currently, this functionality + is limited to handling the system event log and getting + access to EFI-style variables stored in nvram. + + Layout: + + /sys/firmware/gsmi/vars: + + This directory has the same layout (and + underlying implementation as /sys/firmware/efi/vars. + See Documentation/ABI/*/sysfs-firmware-efi-vars + for more information on how to interact with + this structure. + + /sys/firmware/gsmi/append_to_eventlog - write-only: + + This file takes a binary blob and passes it onto + the firmware to be timestamped and appended to + the system eventlog. The binary format is + interpreted by the firmware and may change from + platform to platform. The only kernel-enforced + requirement is that the blob be prefixed with a + 32bit host-endian type used as part of the + firmware call. + + /sys/firmware/gsmi/clear_config - write-only: + + Writing any value to this file will cause the + entire firmware configuration to be reset to + "factory defaults". Callers should assume that + a reboot is required for the configuration to be + cleared. + + /sys/firmware/gsmi/clear_eventlog - write-only: + + This file is used to clear out a portion/the + whole of the system event log. Values written + should be values between 1 and 100 inclusive (in + ASCII) representing the fraction of the log to + clear. Not all platforms support fractional + clearing though, and this writes to this file + will error out if the firmware doesn't like your + submitted fraction. + + Callers should assume that a reboot is needed + for this operation to complete. diff --git a/trunk/Documentation/ABI/testing/sysfs-firmware-log b/trunk/Documentation/ABI/testing/sysfs-firmware-log new file mode 100644 index 000000000000..9b58e7c5365f --- /dev/null +++ b/trunk/Documentation/ABI/testing/sysfs-firmware-log @@ -0,0 +1,7 @@ +What: /sys/firmware/log +Date: February 2011 +Contact: Mike Waychison +Description: + The /sys/firmware/log is a binary file that represents a + read-only copy of the firmware's log if one is + available. diff --git a/trunk/Documentation/ABI/testing/sysfs-kernel-fscaps b/trunk/Documentation/ABI/testing/sysfs-kernel-fscaps new file mode 100644 index 000000000000..50a3033b5e15 --- /dev/null +++ b/trunk/Documentation/ABI/testing/sysfs-kernel-fscaps @@ -0,0 +1,8 @@ +What: /sys/kernel/fscaps +Date: February 2011 +KernelVersion: 2.6.38 +Contact: Ludwig Nussel +Description + Shows whether file system capabilities are honored + when executing a binary + diff --git a/trunk/Documentation/ABI/testing/sysfs-power b/trunk/Documentation/ABI/testing/sysfs-power index 194ca446ac28..b464d12761ba 100644 --- a/trunk/Documentation/ABI/testing/sysfs-power +++ b/trunk/Documentation/ABI/testing/sysfs-power @@ -158,3 +158,17 @@ Description: successful, will make the kernel abort a subsequent transition to a sleep state if any wakeup events are reported after the write has returned. + +What: /sys/power/reserved_size +Date: May 2011 +Contact: Rafael J. Wysocki +Description: + The /sys/power/reserved_size file allows user space to control + the amount of memory reserved for allocations made by device + drivers during the "device freeze" stage of hibernation. It can + be written a string representing a non-negative integer that + will be used as the amount of memory to reserve for allocations + made by device drivers' "freeze" callbacks, in bytes. + + Reading from this file will display the current value, which is + set to 1 MB by default. diff --git a/trunk/Documentation/DocBook/device-drivers.tmpl b/trunk/Documentation/DocBook/device-drivers.tmpl index 36f63d4a0a06..b638e50cf8f6 100644 --- a/trunk/Documentation/DocBook/device-drivers.tmpl +++ b/trunk/Documentation/DocBook/device-drivers.tmpl @@ -96,10 +96,10 @@ X!Iinclude/linux/kobject.h Device drivers infrastructure + The Basic Device Driver-Model Structures +!Iinclude/linux/device.h + Device Drivers Base - !Edrivers/base/driver.c !Edrivers/base/core.c !Edrivers/base/class.c diff --git a/trunk/Documentation/DocBook/genericirq.tmpl b/trunk/Documentation/DocBook/genericirq.tmpl index fb10fd08c05c..b3422341d65c 100644 --- a/trunk/Documentation/DocBook/genericirq.tmpl +++ b/trunk/Documentation/DocBook/genericirq.tmpl @@ -191,8 +191,8 @@ Whenever an interrupt triggers, the lowlevel arch code calls into the generic interrupt code by calling desc->handle_irq(). - This highlevel IRQ handling function only uses desc->chip primitives - referenced by the assigned chip descriptor structure. + This highlevel IRQ handling function only uses desc->irq_data.chip + primitives referenced by the assigned chip descriptor structure. @@ -206,11 +206,11 @@ enable_irq() disable_irq_nosync() (SMP only) synchronize_irq() (SMP only) - set_irq_type() - set_irq_wake() - set_irq_data() - set_irq_chip() - set_irq_chip_data() + irq_set_irq_type() + irq_set_irq_wake() + irq_set_handler_data() + irq_set_chip() + irq_set_chip_data() See the autogenerated function documentation for details. @@ -225,6 +225,8 @@ handle_fasteoi_irq handle_simple_irq handle_percpu_irq + handle_edge_eoi_irq + handle_bad_irq The interrupt flow handlers (either predefined or architecture specific) are assigned to specific interrupts by the architecture @@ -241,13 +243,13 @@ default_enable(struct irq_data *data) { - desc->chip->irq_unmask(data); + desc->irq_data.chip->irq_unmask(data); } default_disable(struct irq_data *data) { if (!delay_disable(data)) - desc->chip->irq_mask(data); + desc->irq_data.chip->irq_mask(data); } default_ack(struct irq_data *data) @@ -284,9 +286,9 @@ noop(struct irq_data *data)) The following control flow is implemented (simplified excerpt): -desc->chip->irq_mask(); -handle_IRQ_event(desc->action); -desc->chip->irq_unmask(); +desc->irq_data.chip->irq_mask_ack(); +handle_irq_event(desc->action); +desc->irq_data.chip->irq_unmask(); @@ -300,8 +302,8 @@ desc->chip->irq_unmask(); The following control flow is implemented (simplified excerpt): -handle_IRQ_event(desc->action); -desc->chip->irq_eoi(); +handle_irq_event(desc->action); +desc->irq_data.chip->irq_eoi(); @@ -315,17 +317,17 @@ desc->chip->irq_eoi(); The following control flow is implemented (simplified excerpt): if (desc->status & running) { - desc->chip->irq_mask(); + desc->irq_data.chip->irq_mask_ack(); desc->status |= pending | masked; return; } -desc->chip->irq_ack(); +desc->irq_data.chip->irq_ack(); desc->status |= running; do { if (desc->status & masked) - desc->chip->irq_unmask(); + desc->irq_data.chip->irq_unmask(); desc->status &= ~pending; - handle_IRQ_event(desc->action); + handle_irq_event(desc->action); } while (status & pending); desc->status &= ~running; @@ -344,7 +346,7 @@ desc->status &= ~running; The following control flow is implemented (simplified excerpt): -handle_IRQ_event(desc->action); +handle_irq_event(desc->action); @@ -362,12 +364,29 @@ handle_IRQ_event(desc->action); The following control flow is implemented (simplified excerpt): -handle_IRQ_event(desc->action); -if (desc->chip->irq_eoi) - desc->chip->irq_eoi(); +if (desc->irq_data.chip->irq_ack) + desc->irq_data.chip->irq_ack(); +handle_irq_event(desc->action); +if (desc->irq_data.chip->irq_eoi) + desc->irq_data.chip->irq_eoi(); + + EOI Edge IRQ flow handler + + handle_edge_eoi_irq provides an abnomination of the edge + handler which is solely used to tame a badly wreckaged + irq controller on powerpc/cell. + + + + Bad IRQ flow handler + + handle_bad_irq is used for spurious interrupts which + have no real handler assigned.. + + Quirks and optimizations @@ -410,6 +429,7 @@ if (desc->chip->irq_eoi) irq_mask_ack() - Optional, recommended for performance irq_mask() irq_unmask() + irq_eoi() - Optional, required for eoi flow handlers irq_retrigger() - Optional irq_set_type() - Optional irq_set_wake() - Optional @@ -424,32 +444,24 @@ if (desc->chip->irq_eoi) __do_IRQ entry point - The original implementation __do_IRQ() is an alternative entry - point for all types of interrupts. + The original implementation __do_IRQ() was an alternative entry + point for all types of interrupts. It not longer exists. This handler turned out to be not suitable for all interrupt hardware and was therefore reimplemented with split - functionality for egde/level/simple/percpu interrupts. This is not + functionality for edge/level/simple/percpu interrupts. This is not only a functional optimization. It also shortens code paths for interrupts. - - To make use of the split implementation, replace the call to - __do_IRQ by a call to desc->handle_irq() and associate - the appropriate handler function to desc->handle_irq(). - In most cases the generic handler implementations should - be sufficient. - Locking on SMP The locking of chip registers is up to the architecture that - defines the chip primitives. There is a chip->lock field that can be used - for serialization, but the generic layer does not touch it. The per-irq - structure is protected via desc->lock, by the generic layer. + defines the chip primitives. The per-irq structure is + protected via desc->lock, by the generic layer. diff --git a/trunk/Documentation/DocBook/media-entities.tmpl b/trunk/Documentation/DocBook/media-entities.tmpl index 5d259c632cdf..fea63b45471a 100644 --- a/trunk/Documentation/DocBook/media-entities.tmpl +++ b/trunk/Documentation/DocBook/media-entities.tmpl @@ -294,6 +294,7 @@ + diff --git a/trunk/Documentation/DocBook/v4l/media-ioc-setup-link.xml b/trunk/Documentation/DocBook/v4l/media-ioc-setup-link.xml index 2331e76ded17..cec97af4dab4 100644 --- a/trunk/Documentation/DocBook/v4l/media-ioc-setup-link.xml +++ b/trunk/Documentation/DocBook/v4l/media-ioc-setup-link.xml @@ -34,7 +34,7 @@ request - MEDIA_IOC_ENUM_LINKS + MEDIA_IOC_SETUP_LINK diff --git a/trunk/Documentation/DocBook/v4l/pixfmt-y12.xml b/trunk/Documentation/DocBook/v4l/pixfmt-y12.xml new file mode 100644 index 000000000000..ff417b858cc9 --- /dev/null +++ b/trunk/Documentation/DocBook/v4l/pixfmt-y12.xml @@ -0,0 +1,79 @@ + + + V4L2_PIX_FMT_Y12 ('Y12 ') + &manvol; + + + V4L2_PIX_FMT_Y12 + Grey-scale image + + + Description + + This is a grey-scale image with a depth of 12 bits per pixel. Pixels +are stored in 16-bit words with unused high bits padded with 0. The least +significant byte is stored at lower memory addresses (little-endian). + + + <constant>V4L2_PIX_FMT_Y12</constant> 4 × 4 +pixel image + + + Byte Order. + Each cell is one byte. + + + + + + start + 0: + Y'00low + Y'00high + Y'01low + Y'01high + Y'02low + Y'02high + Y'03low + Y'03high + + + start + 8: + Y'10low + Y'10high + Y'11low + Y'11high + Y'12low + Y'12high + Y'13low + Y'13high + + + start + 16: + Y'20low + Y'20high + Y'21low + Y'21high + Y'22low + Y'22high + Y'23low + Y'23high + + + start + 24: + Y'30low + Y'30high + Y'31low + Y'31high + Y'32low + Y'32high + Y'33low + Y'33high + + + + + + + + + diff --git a/trunk/Documentation/DocBook/v4l/pixfmt.xml b/trunk/Documentation/DocBook/v4l/pixfmt.xml index c6fdcbbd1b41..40af4beb48b9 100644 --- a/trunk/Documentation/DocBook/v4l/pixfmt.xml +++ b/trunk/Documentation/DocBook/v4l/pixfmt.xml @@ -696,6 +696,7 @@ information. &sub-packed-yuv; &sub-grey; &sub-y10; + &sub-y12; &sub-y16; &sub-yuyv; &sub-uyvy; diff --git a/trunk/Documentation/DocBook/v4l/subdev-formats.xml b/trunk/Documentation/DocBook/v4l/subdev-formats.xml index 7041127d6dfc..d7ccd25edcc1 100644 --- a/trunk/Documentation/DocBook/v4l/subdev-formats.xml +++ b/trunk/Documentation/DocBook/v4l/subdev-formats.xml @@ -456,6 +456,23 @@ b1 b0 + + V4L2_MBUS_FMT_SGBRG8_1X8 + 0x3013 + + - + - + - + - + g7 + g6 + g5 + g4 + g3 + g2 + g1 + g0 + V4L2_MBUS_FMT_SGRBG8_1X8 0x3002 @@ -473,6 +490,23 @@ g1 g0 + + V4L2_MBUS_FMT_SRGGB8_1X8 + 0x3014 + + - + - + - + - + r7 + r6 + r5 + r4 + r3 + r2 + r1 + r0 + V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8 0x300b @@ -2159,6 +2193,31 @@ u1 u0 + + V4L2_MBUS_FMT_Y12_1X12 + 0x2013 + + - + - + - + - + - + - + - + - + y11 + y10 + y9 + y8 + y7 + y6 + y5 + y4 + y3 + y2 + y1 + y0 + V4L2_MBUS_FMT_UYVY8_1X16 0x200f diff --git a/trunk/Documentation/RCU/00-INDEX b/trunk/Documentation/RCU/00-INDEX index 71b6f500ddb9..1d7a885761f5 100644 --- a/trunk/Documentation/RCU/00-INDEX +++ b/trunk/Documentation/RCU/00-INDEX @@ -21,7 +21,7 @@ rcu.txt RTFP.txt - List of RCU papers (bibliography) going back to 1980. stallwarn.txt - - RCU CPU stall warnings (CONFIG_RCU_CPU_STALL_DETECTOR) + - RCU CPU stall warnings (module parameter rcu_cpu_stall_suppress) torture.txt - RCU Torture Test Operation (CONFIG_RCU_TORTURE_TEST) trace.txt diff --git a/trunk/Documentation/RCU/stallwarn.txt b/trunk/Documentation/RCU/stallwarn.txt index 862c08ef1fde..4e959208f736 100644 --- a/trunk/Documentation/RCU/stallwarn.txt +++ b/trunk/Documentation/RCU/stallwarn.txt @@ -1,22 +1,25 @@ Using RCU's CPU Stall Detector -The CONFIG_RCU_CPU_STALL_DETECTOR kernel config parameter enables -RCU's CPU stall detector, which detects conditions that unduly delay -RCU grace periods. The stall detector's idea of what constitutes -"unduly delayed" is controlled by a set of C preprocessor macros: +The rcu_cpu_stall_suppress module parameter enables RCU's CPU stall +detector, which detects conditions that unduly delay RCU grace periods. +This module parameter enables CPU stall detection by default, but +may be overridden via boot-time parameter or at runtime via sysfs. +The stall detector's idea of what constitutes "unduly delayed" is +controlled by a set of kernel configuration variables and cpp macros: -RCU_SECONDS_TILL_STALL_CHECK +CONFIG_RCU_CPU_STALL_TIMEOUT - This macro defines the period of time that RCU will wait from - the beginning of a grace period until it issues an RCU CPU - stall warning. This time period is normally ten seconds. + This kernel configuration parameter defines the period of time + that RCU will wait from the beginning of a grace period until it + issues an RCU CPU stall warning. This time period is normally + ten seconds. RCU_SECONDS_TILL_STALL_RECHECK This macro defines the period of time that RCU will wait after issuing a stall warning until it issues another stall warning - for the same stall. This time period is normally set to thirty - seconds. + for the same stall. This time period is normally set to three + times the check interval plus thirty seconds. RCU_STALL_RAT_DELAY diff --git a/trunk/Documentation/RCU/trace.txt b/trunk/Documentation/RCU/trace.txt index 6a8c73f55b80..c078ad48f7a1 100644 --- a/trunk/Documentation/RCU/trace.txt +++ b/trunk/Documentation/RCU/trace.txt @@ -10,34 +10,46 @@ for rcutree and next for rcutiny. CONFIG_TREE_RCU and CONFIG_TREE_PREEMPT_RCU debugfs Files and Formats -These implementations of RCU provides five debugfs files under the -top-level directory RCU: rcu/rcudata (which displays fields in struct -rcu_data), rcu/rcudata.csv (which is a .csv spreadsheet version of -rcu/rcudata), rcu/rcugp (which displays grace-period counters), -rcu/rcuhier (which displays the struct rcu_node hierarchy), and -rcu/rcu_pending (which displays counts of the reasons that the -rcu_pending() function decided that there was core RCU work to do). +These implementations of RCU provides several debugfs files under the +top-level directory "rcu": + +rcu/rcudata: + Displays fields in struct rcu_data. +rcu/rcudata.csv: + Comma-separated values spreadsheet version of rcudata. +rcu/rcugp: + Displays grace-period counters. +rcu/rcuhier: + Displays the struct rcu_node hierarchy. +rcu/rcu_pending: + Displays counts of the reasons rcu_pending() decided that RCU had + work to do. +rcu/rcutorture: + Displays rcutorture test progress. +rcu/rcuboost: + Displays RCU boosting statistics. Only present if + CONFIG_RCU_BOOST=y. The output of "cat rcu/rcudata" looks as follows: rcu_sched: - 0 c=17829 g=17829 pq=1 pqc=17829 qp=0 dt=10951/1 dn=0 df=1101 of=0 ri=36 ql=0 b=10 - 1 c=17829 g=17829 pq=1 pqc=17829 qp=0 dt=16117/1 dn=0 df=1015 of=0 ri=0 ql=0 b=10 - 2 c=17829 g=17829 pq=1 pqc=17829 qp=0 dt=1445/1 dn=0 df=1839 of=0 ri=0 ql=0 b=10 - 3 c=17829 g=17829 pq=1 pqc=17829 qp=0 dt=6681/1 dn=0 df=1545 of=0 ri=0 ql=0 b=10 - 4 c=17829 g=17829 pq=1 pqc=17829 qp=0 dt=1003/1 dn=0 df=1992 of=0 ri=0 ql=0 b=10 - 5 c=17829 g=17830 pq=1 pqc=17829 qp=1 dt=3887/1 dn=0 df=3331 of=0 ri=4 ql=2 b=10 - 6 c=17829 g=17829 pq=1 pqc=17829 qp=0 dt=859/1 dn=0 df=3224 of=0 ri=0 ql=0 b=10 - 7 c=17829 g=17830 pq=0 pqc=17829 qp=1 dt=3761/1 dn=0 df=1818 of=0 ri=0 ql=2 b=10 + 0 c=20972 g=20973 pq=1 pqc=20972 qp=0 dt=545/1/0 df=50 of=0 ri=0 ql=163 qs=NRW. kt=0/W/0 ktl=ebc3 b=10 ci=153737 co=0 ca=0 + 1 c=20972 g=20973 pq=1 pqc=20972 qp=0 dt=967/1/0 df=58 of=0 ri=0 ql=634 qs=NRW. kt=0/W/1 ktl=58c b=10 ci=191037 co=0 ca=0 + 2 c=20972 g=20973 pq=1 pqc=20972 qp=0 dt=1081/1/0 df=175 of=0 ri=0 ql=74 qs=N.W. kt=0/W/2 ktl=da94 b=10 ci=75991 co=0 ca=0 + 3 c=20942 g=20943 pq=1 pqc=20942 qp=1 dt=1846/0/0 df=404 of=0 ri=0 ql=0 qs=.... kt=0/W/3 ktl=d1cd b=10 ci=72261 co=0 ca=0 + 4 c=20972 g=20973 pq=1 pqc=20972 qp=0 dt=369/1/0 df=83 of=0 ri=0 ql=48 qs=N.W. kt=0/W/4 ktl=e0e7 b=10 ci=128365 co=0 ca=0 + 5 c=20972 g=20973 pq=1 pqc=20972 qp=0 dt=381/1/0 df=64 of=0 ri=0 ql=169 qs=NRW. kt=0/W/5 ktl=fb2f b=10 ci=164360 co=0 ca=0 + 6 c=20972 g=20973 pq=1 pqc=20972 qp=0 dt=1037/1/0 df=183 of=0 ri=0 ql=62 qs=N.W. kt=0/W/6 ktl=d2ad b=10 ci=65663 co=0 ca=0 + 7 c=20897 g=20897 pq=1 pqc=20896 qp=0 dt=1572/0/0 df=382 of=0 ri=0 ql=0 qs=.... kt=0/W/7 ktl=cf15 b=10 ci=75006 co=0 ca=0 rcu_bh: - 0 c=-275 g=-275 pq=1 pqc=-275 qp=0 dt=10951/1 dn=0 df=0 of=0 ri=0 ql=0 b=10 - 1 c=-275 g=-275 pq=1 pqc=-275 qp=0 dt=16117/1 dn=0 df=13 of=0 ri=0 ql=0 b=10 - 2 c=-275 g=-275 pq=1 pqc=-275 qp=0 dt=1445/1 dn=0 df=15 of=0 ri=0 ql=0 b=10 - 3 c=-275 g=-275 pq=1 pqc=-275 qp=0 dt=6681/1 dn=0 df=9 of=0 ri=0 ql=0 b=10 - 4 c=-275 g=-275 pq=1 pqc=-275 qp=0 dt=1003/1 dn=0 df=15 of=0 ri=0 ql=0 b=10 - 5 c=-275 g=-275 pq=1 pqc=-275 qp=0 dt=3887/1 dn=0 df=15 of=0 ri=0 ql=0 b=10 - 6 c=-275 g=-275 pq=1 pqc=-275 qp=0 dt=859/1 dn=0 df=15 of=0 ri=0 ql=0 b=10 - 7 c=-275 g=-275 pq=1 pqc=-275 qp=0 dt=3761/1 dn=0 df=15 of=0 ri=0 ql=0 b=10 + 0 c=1480 g=1480 pq=1 pqc=1479 qp=0 dt=545/1/0 df=6 of=0 ri=1 ql=0 qs=.... kt=0/W/0 ktl=ebc3 b=10 ci=0 co=0 ca=0 + 1 c=1480 g=1480 pq=1 pqc=1479 qp=0 dt=967/1/0 df=3 of=0 ri=1 ql=0 qs=.... kt=0/W/1 ktl=58c b=10 ci=151 co=0 ca=0 + 2 c=1480 g=1480 pq=1 pqc=1479 qp=0 dt=1081/1/0 df=6 of=0 ri=1 ql=0 qs=.... kt=0/W/2 ktl=da94 b=10 ci=0 co=0 ca=0 + 3 c=1480 g=1480 pq=1 pqc=1479 qp=0 dt=1846/0/0 df=8 of=0 ri=1 ql=0 qs=.... kt=0/W/3 ktl=d1cd b=10 ci=0 co=0 ca=0 + 4 c=1480 g=1480 pq=1 pqc=1479 qp=0 dt=369/1/0 df=6 of=0 ri=1 ql=0 qs=.... kt=0/W/4 ktl=e0e7 b=10 ci=0 co=0 ca=0 + 5 c=1480 g=1480 pq=1 pqc=1479 qp=0 dt=381/1/0 df=4 of=0 ri=1 ql=0 qs=.... kt=0/W/5 ktl=fb2f b=10 ci=0 co=0 ca=0 + 6 c=1480 g=1480 pq=1 pqc=1479 qp=0 dt=1037/1/0 df=6 of=0 ri=1 ql=0 qs=.... kt=0/W/6 ktl=d2ad b=10 ci=0 co=0 ca=0 + 7 c=1474 g=1474 pq=1 pqc=1473 qp=0 dt=1572/0/0 df=8 of=0 ri=1 ql=0 qs=.... kt=0/W/7 ktl=cf15 b=10 ci=0 co=0 ca=0 The first section lists the rcu_data structures for rcu_sched, the second for rcu_bh. Note that CONFIG_TREE_PREEMPT_RCU kernels will have an @@ -52,17 +64,18 @@ o The number at the beginning of each line is the CPU number. substantially larger than the number of actual CPUs. o "c" is the count of grace periods that this CPU believes have - completed. CPUs in dynticks idle mode may lag quite a ways - behind, for example, CPU 4 under "rcu_sched" above, which has - slept through the past 25 RCU grace periods. It is not unusual - to see CPUs lagging by thousands of grace periods. + completed. Offlined CPUs and CPUs in dynticks idle mode may + lag quite a ways behind, for example, CPU 6 under "rcu_sched" + above, which has been offline through not quite 40,000 RCU grace + periods. It is not unusual to see CPUs lagging by thousands of + grace periods. o "g" is the count of grace periods that this CPU believes have - started. Again, CPUs in dynticks idle mode may lag behind. - If the "c" and "g" values are equal, this CPU has already - reported a quiescent state for the last RCU grace period that - it is aware of, otherwise, the CPU believes that it owes RCU a - quiescent state. + started. Again, offlined CPUs and CPUs in dynticks idle mode + may lag behind. If the "c" and "g" values are equal, this CPU + has already reported a quiescent state for the last RCU grace + period that it is aware of, otherwise, the CPU believes that it + owes RCU a quiescent state. o "pq" indicates that this CPU has passed through a quiescent state for the current grace period. It is possible for "pq" to be @@ -81,7 +94,8 @@ o "pqc" indicates which grace period the last-observed quiescent the next grace period! o "qp" indicates that RCU still expects a quiescent state from - this CPU. + this CPU. Offlined CPUs and CPUs in dyntick idle mode might + well have qp=1, which is OK: RCU is still ignoring them. o "dt" is the current value of the dyntick counter that is incremented when entering or leaving dynticks idle state, either by the @@ -108,7 +122,7 @@ o "df" is the number of times that some other CPU has forced a o "of" is the number of times that some other CPU has forced a quiescent state on behalf of this CPU due to this CPU being - offline. In a perfect world, this might neve happen, but it + offline. In a perfect world, this might never happen, but it turns out that offlining and onlining a CPU can take several grace periods, and so there is likely to be an extended period of time when RCU believes that the CPU is online when it really is not. @@ -125,6 +139,62 @@ o "ql" is the number of RCU callbacks currently residing on of what state they are in (new, waiting for grace period to start, waiting for grace period to end, ready to invoke). +o "qs" gives an indication of the state of the callback queue + with four characters: + + "N" Indicates that there are callbacks queued that are not + ready to be handled by the next grace period, and thus + will be handled by the grace period following the next + one. + + "R" Indicates that there are callbacks queued that are + ready to be handled by the next grace period. + + "W" Indicates that there are callbacks queued that are + waiting on the current grace period. + + "D" Indicates that there are callbacks queued that have + already been handled by a prior grace period, and are + thus waiting to be invoked. Note that callbacks in + the process of being invoked are not counted here. + Callbacks in the process of being invoked are those + that have been removed from the rcu_data structures + queues by rcu_do_batch(), but which have not yet been + invoked. + + If there are no callbacks in a given one of the above states, + the corresponding character is replaced by ".". + +o "kt" is the per-CPU kernel-thread state. The digit preceding + the first slash is zero if there is no work pending and 1 + otherwise. The character between the first pair of slashes is + as follows: + + "S" The kernel thread is stopped, in other words, all + CPUs corresponding to this rcu_node structure are + offline. + + "R" The kernel thread is running. + + "W" The kernel thread is waiting because there is no work + for it to do. + + "O" The kernel thread is waiting because it has been + forced off of its designated CPU or because its + ->cpus_allowed mask permits it to run on other than + its designated CPU. + + "Y" The kernel thread is yielding to avoid hogging CPU. + + "?" Unknown value, indicates a bug. + + The number after the final slash is the CPU that the kthread + is actually running on. + +o "ktl" is the low-order 16 bits (in hexadecimal) of the count of + the number of times that this CPU's per-CPU kthread has gone + through its loop servicing invoke_rcu_cpu_kthread() requests. + o "b" is the batch limit for this CPU. If more than this number of RCU callbacks is ready to invoke, then the remainder will be deferred. @@ -174,14 +244,14 @@ o "gpnum" is the number of grace periods that have started. It is The output of "cat rcu/rcuhier" looks as follows, with very long lines: c=6902 g=6903 s=2 jfq=3 j=72c7 nfqs=13142/nfqsng=0(13142) fqlh=6 -1/1 .>. 0:127 ^0 -3/3 .>. 0:35 ^0 0/0 .>. 36:71 ^1 0/0 .>. 72:107 ^2 0/0 .>. 108:127 ^3 -3/3f .>. 0:5 ^0 2/3 .>. 6:11 ^1 0/0 .>. 12:17 ^2 0/0 .>. 18:23 ^3 0/0 .>. 24:29 ^4 0/0 .>. 30:35 ^5 0/0 .>. 36:41 ^0 0/0 .>. 42:47 ^1 0/0 .>. 48:53 ^2 0/0 .>. 54:59 ^3 0/0 .>. 60:65 ^4 0/0 .>. 66:71 ^5 0/0 .>. 72:77 ^0 0/0 .>. 78:83 ^1 0/0 .>. 84:89 ^2 0/0 .>. 90:95 ^3 0/0 .>. 96:101 ^4 0/0 .>. 102:107 ^5 0/0 .>. 108:113 ^0 0/0 .>. 114:119 ^1 0/0 .>. 120:125 ^2 0/0 .>. 126:127 ^3 +1/1 ..>. 0:127 ^0 +3/3 ..>. 0:35 ^0 0/0 ..>. 36:71 ^1 0/0 ..>. 72:107 ^2 0/0 ..>. 108:127 ^3 +3/3f ..>. 0:5 ^0 2/3 ..>. 6:11 ^1 0/0 ..>. 12:17 ^2 0/0 ..>. 18:23 ^3 0/0 ..>. 24:29 ^4 0/0 ..>. 30:35 ^5 0/0 ..>. 36:41 ^0 0/0 ..>. 42:47 ^1 0/0 ..>. 48:53 ^2 0/0 ..>. 54:59 ^3 0/0 ..>. 60:65 ^4 0/0 ..>. 66:71 ^5 0/0 ..>. 72:77 ^0 0/0 ..>. 78:83 ^1 0/0 ..>. 84:89 ^2 0/0 ..>. 90:95 ^3 0/0 ..>. 96:101 ^4 0/0 ..>. 102:107 ^5 0/0 ..>. 108:113 ^0 0/0 ..>. 114:119 ^1 0/0 ..>. 120:125 ^2 0/0 ..>. 126:127 ^3 rcu_bh: c=-226 g=-226 s=1 jfq=-5701 j=72c7 nfqs=88/nfqsng=0(88) fqlh=0 -0/1 .>. 0:127 ^0 -0/3 .>. 0:35 ^0 0/0 .>. 36:71 ^1 0/0 .>. 72:107 ^2 0/0 .>. 108:127 ^3 -0/3f .>. 0:5 ^0 0/3 .>. 6:11 ^1 0/0 .>. 12:17 ^2 0/0 .>. 18:23 ^3 0/0 .>. 24:29 ^4 0/0 .>. 30:35 ^5 0/0 .>. 36:41 ^0 0/0 .>. 42:47 ^1 0/0 .>. 48:53 ^2 0/0 .>. 54:59 ^3 0/0 .>. 60:65 ^4 0/0 .>. 66:71 ^5 0/0 .>. 72:77 ^0 0/0 .>. 78:83 ^1 0/0 .>. 84:89 ^2 0/0 .>. 90:95 ^3 0/0 .>. 96:101 ^4 0/0 .>. 102:107 ^5 0/0 .>. 108:113 ^0 0/0 .>. 114:119 ^1 0/0 .>. 120:125 ^2 0/0 .>. 126:127 ^3 +0/1 ..>. 0:127 ^0 +0/3 ..>. 0:35 ^0 0/0 ..>. 36:71 ^1 0/0 ..>. 72:107 ^2 0/0 ..>. 108:127 ^3 +0/3f ..>. 0:5 ^0 0/3 ..>. 6:11 ^1 0/0 ..>. 12:17 ^2 0/0 ..>. 18:23 ^3 0/0 ..>. 24:29 ^4 0/0 ..>. 30:35 ^5 0/0 ..>. 36:41 ^0 0/0 ..>. 42:47 ^1 0/0 ..>. 48:53 ^2 0/0 ..>. 54:59 ^3 0/0 ..>. 60:65 ^4 0/0 ..>. 66:71 ^5 0/0 ..>. 72:77 ^0 0/0 ..>. 78:83 ^1 0/0 ..>. 84:89 ^2 0/0 ..>. 90:95 ^3 0/0 ..>. 96:101 ^4 0/0 ..>. 102:107 ^5 0/0 ..>. 108:113 ^0 0/0 ..>. 114:119 ^1 0/0 ..>. 120:125 ^2 0/0 ..>. 126:127 ^3 This is once again split into "rcu_sched" and "rcu_bh" portions, and CONFIG_TREE_PREEMPT_RCU kernels will again have an additional @@ -240,13 +310,20 @@ o Each element of the form "1/1 0:127 ^0" represents one struct current grace period. o The characters separated by the ">" indicate the state - of the blocked-tasks lists. A "T" preceding the ">" + of the blocked-tasks lists. A "G" preceding the ">" indicates that at least one task blocked in an RCU read-side critical section blocks the current grace - period, while a "." preceding the ">" indicates otherwise. - The character following the ">" indicates similarly for - the next grace period. A "T" should appear in this - field only for rcu-preempt. + period, while a "E" preceding the ">" indicates that + at least one task blocked in an RCU read-side critical + section blocks the current expedited grace period. + A "T" character following the ">" indicates that at + least one task is blocked within an RCU read-side + critical section, regardless of whether any current + grace period (expedited or normal) is inconvenienced. + A "." character appears if the corresponding condition + does not hold, so that "..>." indicates that no tasks + are blocked. In contrast, "GE>T" indicates maximal + inconvenience from blocked tasks. o The numbers separated by the ":" are the range of CPUs served by this struct rcu_node. This can be helpful @@ -328,6 +405,113 @@ o "nn" is the number of times that this CPU needed nothing. Alert is due to short-circuit evaluation in rcu_pending(). +The output of "cat rcu/rcutorture" looks as follows: + +rcutorture test sequence: 0 (test in progress) +rcutorture update version number: 615 + +The first line shows the number of rcutorture tests that have completed +since boot. If a test is currently running, the "(test in progress)" +string will appear as shown above. The second line shows the number of +update cycles that the current test has started, or zero if there is +no test in progress. + + +The output of "cat rcu/rcuboost" looks as follows: + +0:5 tasks=.... kt=W ntb=0 neb=0 nnb=0 j=2f95 bt=300f + balk: nt=0 egt=989 bt=0 nb=0 ny=0 nos=16 +6:7 tasks=.... kt=W ntb=0 neb=0 nnb=0 j=2f95 bt=300f + balk: nt=0 egt=225 bt=0 nb=0 ny=0 nos=6 + +This information is output only for rcu_preempt. Each two-line entry +corresponds to a leaf rcu_node strcuture. The fields are as follows: + +o "n:m" is the CPU-number range for the corresponding two-line + entry. In the sample output above, the first entry covers + CPUs zero through five and the second entry covers CPUs 6 + and 7. + +o "tasks=TNEB" gives the state of the various segments of the + rnp->blocked_tasks list: + + "T" This indicates that there are some tasks that blocked + while running on one of the corresponding CPUs while + in an RCU read-side critical section. + + "N" This indicates that some of the blocked tasks are preventing + the current normal (non-expedited) grace period from + completing. + + "E" This indicates that some of the blocked tasks are preventing + the current expedited grace period from completing. + + "B" This indicates that some of the blocked tasks are in + need of RCU priority boosting. + + Each character is replaced with "." if the corresponding + condition does not hold. + +o "kt" is the state of the RCU priority-boosting kernel + thread associated with the corresponding rcu_node structure. + The state can be one of the following: + + "S" The kernel thread is stopped, in other words, all + CPUs corresponding to this rcu_node structure are + offline. + + "R" The kernel thread is running. + + "W" The kernel thread is waiting because there is no work + for it to do. + + "Y" The kernel thread is yielding to avoid hogging CPU. + + "?" Unknown value, indicates a bug. + +o "ntb" is the number of tasks boosted. + +o "neb" is the number of tasks boosted in order to complete an + expedited grace period. + +o "nnb" is the number of tasks boosted in order to complete a + normal (non-expedited) grace period. When boosting a task + that was blocking both an expedited and a normal grace period, + it is counted against the expedited total above. + +o "j" is the low-order 16 bits of the jiffies counter in + hexadecimal. + +o "bt" is the low-order 16 bits of the value that the jiffies + counter will have when we next start boosting, assuming that + the current grace period does not end beforehand. This is + also in hexadecimal. + +o "balk: nt" counts the number of times we didn't boost (in + other words, we balked) even though it was time to boost because + there were no blocked tasks to boost. This situation occurs + when there is one blocked task on one rcu_node structure and + none on some other rcu_node structure. + +o "egt" counts the number of times we balked because although + there were blocked tasks, none of them were blocking the + current grace period, whether expedited or otherwise. + +o "bt" counts the number of times we balked because boosting + had already been initiated for the current grace period. + +o "nb" counts the number of times we balked because there + was at least one task blocking the current non-expedited grace + period that never had blocked. If it is already running, it + just won't help to boost its priority! + +o "ny" counts the number of times we balked because it was + not yet time to start boosting. + +o "nos" counts the number of times we balked for other + reasons, e.g., the grace period ended first. + + CONFIG_TINY_RCU and CONFIG_TINY_PREEMPT_RCU debugfs Files and Formats These implementations of RCU provides a single debugfs file under the @@ -394,9 +578,9 @@ o "neb" is the number of expedited grace periods that have had o "nnb" is the number of normal grace periods that have had to resort to RCU priority boosting since boot. -o "j" is the low-order 12 bits of the jiffies counter in hexadecimal. +o "j" is the low-order 16 bits of the jiffies counter in hexadecimal. -o "bt" is the low-order 12 bits of the value that the jiffies counter +o "bt" is the low-order 16 bits of the value that the jiffies counter will have at the next time that boosting is scheduled to begin. o In the line beginning with "normal balk", the fields are as follows: diff --git a/trunk/Documentation/cgroups/memory.txt b/trunk/Documentation/cgroups/memory.txt index b6ed61c95856..7c163477fcd8 100644 --- a/trunk/Documentation/cgroups/memory.txt +++ b/trunk/Documentation/cgroups/memory.txt @@ -52,8 +52,10 @@ Brief summary of control files. tasks # attach a task(thread) and show list of threads cgroup.procs # show list of processes cgroup.event_control # an interface for event_fd() - memory.usage_in_bytes # show current memory(RSS+Cache) usage. - memory.memsw.usage_in_bytes # show current memory+Swap usage + memory.usage_in_bytes # show current res_counter usage for memory + (See 5.5 for details) + memory.memsw.usage_in_bytes # show current res_counter usage for memory+Swap + (See 5.5 for details) memory.limit_in_bytes # set/show limit of memory usage memory.memsw.limit_in_bytes # set/show limit of memory+Swap usage memory.failcnt # show the number of memory usage hits limits @@ -453,6 +455,15 @@ memory under it will be reclaimed. You can reset failcnt by writing 0 to failcnt file. # echo 0 > .../memory.failcnt +5.5 usage_in_bytes + +For efficiency, as other kernel components, memory cgroup uses some optimization +to avoid unnecessary cacheline false sharing. usage_in_bytes is affected by the +method and doesn't show 'exact' value of memory(and swap) usage, it's an fuzz +value for efficient access. (Of course, when necessary, it's synchronized.) +If you want to know more exact memory usage, you should use RSS+CACHE(+SWAP) +value in memory.stat(see 5.2). + 6. Hierarchy support The memory controller supports a deep hierarchy and hierarchical accounting. diff --git a/trunk/Documentation/devicetree/bindings/net/can/fsl-flexcan.txt b/trunk/Documentation/devicetree/bindings/net/can/fsl-flexcan.txt new file mode 100755 index 000000000000..1a729f089866 --- /dev/null +++ b/trunk/Documentation/devicetree/bindings/net/can/fsl-flexcan.txt @@ -0,0 +1,61 @@ +CAN Device Tree Bindings +------------------------ +2011 Freescale Semiconductor, Inc. + +fsl,flexcan-v1.0 nodes +----------------------- +In addition to the required compatible-, reg- and interrupt-properties, you can +also specify which clock source shall be used for the controller. + +CPI Clock- Can Protocol Interface Clock + This CLK_SRC bit of CTRL(control register) selects the clock source to + the CAN Protocol Interface(CPI) to be either the peripheral clock + (driven by the PLL) or the crystal oscillator clock. The selected clock + is the one fed to the prescaler to generate the Serial Clock (Sclock). + The PRESDIV field of CTRL(control register) controls a prescaler that + generates the Serial Clock (Sclock), whose period defines the + time quantum used to compose the CAN waveform. + +Can Engine Clock Source + There are two sources for CAN clock + - Platform Clock It represents the bus clock + - Oscillator Clock + + Peripheral Clock (PLL) + -------------- + | + --------- ------------- + | |CPI Clock | Prescaler | Sclock + | |---------------->| (1.. 256) |------------> + --------- ------------- + | | + -------------- ---------------------CLK_SRC + Oscillator Clock + +- fsl,flexcan-clock-source : CAN Engine Clock Source.This property selects + the peripheral clock. PLL clock is fed to the + prescaler to generate the Serial Clock (Sclock). + Valid values are "oscillator" and "platform" + "oscillator": CAN engine clock source is oscillator clock. + "platform" The CAN engine clock source is the bus clock + (platform clock). + +- fsl,flexcan-clock-divider : for the reference and system clock, an additional + clock divider can be specified. +- clock-frequency: frequency required to calculate the bitrate for FlexCAN. + +Note: + - v1.0 of flexcan-v1.0 represent the IP block version for P1010 SOC. + - P1010 does not have oscillator as the Clock Source.So the default + Clock Source is platform clock. +Examples: + + can0@1c000 { + compatible = "fsl,flexcan-v1.0"; + reg = <0x1c000 0x1000>; + interrupts = <48 0x2>; + interrupt-parent = <&mpic>; + fsl,flexcan-clock-source = "platform"; + fsl,flexcan-clock-divider = <2>; + clock-frequency = ; + }; diff --git a/trunk/Documentation/devicetree/bindings/powerpc/fsl/ifc.txt b/trunk/Documentation/devicetree/bindings/powerpc/fsl/ifc.txt new file mode 100644 index 000000000000..939a26d541f6 --- /dev/null +++ b/trunk/Documentation/devicetree/bindings/powerpc/fsl/ifc.txt @@ -0,0 +1,76 @@ +Integrated Flash Controller + +Properties: +- name : Should be ifc +- compatible : should contain "fsl,ifc". The version of the integrated + flash controller can be found in the IFC_REV register at + offset zero. + +- #address-cells : Should be either two or three. The first cell is the + chipselect number, and the remaining cells are the + offset into the chipselect. +- #size-cells : Either one or two, depending on how large each chipselect + can be. +- reg : Offset and length of the register set for the device +- interrupts : IFC has two interrupts. The first one is the "common" + interrupt(CM_EVTER_STAT), and second is the NAND interrupt + (NAND_EVTER_STAT). + +- ranges : Each range corresponds to a single chipselect, and covers + the entire access window as configured. + +Child device nodes describe the devices connected to IFC such as NOR (e.g. +cfi-flash) and NAND (fsl,ifc-nand). There might be board specific devices +like FPGAs, CPLDs, etc. + +Example: + + ifc@ffe1e000 { + compatible = "fsl,ifc", "simple-bus"; + #address-cells = <2>; + #size-cells = <1>; + reg = <0x0 0xffe1e000 0 0x2000>; + interrupts = <16 2 19 2>; + + /* NOR, NAND Flashes and CPLD on board */ + ranges = <0x0 0x0 0x0 0xee000000 0x02000000 + 0x1 0x0 0x0 0xffa00000 0x00010000 + 0x3 0x0 0x0 0xffb00000 0x00020000>; + + flash@0,0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "cfi-flash"; + reg = <0x0 0x0 0x2000000>; + bank-width = <2>; + device-width = <1>; + + partition@0 { + /* 32MB for user data */ + reg = <0x0 0x02000000>; + label = "NOR Data"; + }; + }; + + flash@1,0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,ifc-nand"; + reg = <0x1 0x0 0x10000>; + + partition@0 { + /* This location must not be altered */ + /* 1MB for u-boot Bootloader Image */ + reg = <0x0 0x00100000>; + label = "NAND U-Boot Image"; + read-only; + }; + }; + + cpld@3,0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,p1010rdb-cpld"; + reg = <0x3 0x0 0x000001f>; + }; + }; diff --git a/trunk/Documentation/devicetree/bindings/powerpc/fsl/mpic-timer.txt b/trunk/Documentation/devicetree/bindings/powerpc/fsl/mpic-timer.txt new file mode 100644 index 000000000000..df41958140e8 --- /dev/null +++ b/trunk/Documentation/devicetree/bindings/powerpc/fsl/mpic-timer.txt @@ -0,0 +1,38 @@ +* Freescale MPIC timers + +Required properties: +- compatible: "fsl,mpic-global-timer" + +- reg : Contains two regions. The first is the main timer register bank + (GTCCRxx, GTBCRxx, GTVPRxx, GTDRxx). The second is the timer control + register (TCRx) for the group. + +- fsl,available-ranges: use style section to define which + timer interrupts can be used. This property is optional; without this, + all timers within the group can be used. + +- interrupts: one interrupt per timer in the group, in order, starting + with timer zero. If timer-available-ranges is present, only the + interrupts that correspond to available timers shall be present. + +Example: + /* Note that this requires #interrupt-cells to be 4 */ + timer0: timer@41100 { + compatible = "fsl,mpic-global-timer"; + reg = <0x41100 0x100 0x41300 4>; + + /* Another AMP partition is using timers 0 and 1 */ + fsl,available-ranges = <2 2>; + + interrupts = <2 0 3 0 + 3 0 3 0>; + }; + + timer1: timer@42100 { + compatible = "fsl,mpic-global-timer"; + reg = <0x42100 0x100 0x42300 4>; + interrupts = <4 0 3 0 + 5 0 3 0 + 6 0 3 0 + 7 0 3 0>; + }; diff --git a/trunk/Documentation/devicetree/bindings/powerpc/fsl/mpic.txt b/trunk/Documentation/devicetree/bindings/powerpc/fsl/mpic.txt index 4f6145859aab..2cf38bd841fd 100644 --- a/trunk/Documentation/devicetree/bindings/powerpc/fsl/mpic.txt +++ b/trunk/Documentation/devicetree/bindings/powerpc/fsl/mpic.txt @@ -190,7 +190,7 @@ EXAMPLE 4 */ timer0: timer@41100 { compatible = "fsl,mpic-global-timer"; - reg = <0x41100 0x100>; + reg = <0x41100 0x100 0x41300 4>; interrupts = <0 0 3 0 1 0 3 0 2 0 3 0 diff --git a/trunk/Documentation/driver-model/bus.txt b/trunk/Documentation/driver-model/bus.txt index 5001b7511626..6754b2df8aa1 100644 --- a/trunk/Documentation/driver-model/bus.txt +++ b/trunk/Documentation/driver-model/bus.txt @@ -3,24 +3,7 @@ Bus Types Definition ~~~~~~~~~~ - -struct bus_type { - char * name; - - struct subsystem subsys; - struct kset drivers; - struct kset devices; - - struct bus_attribute * bus_attrs; - struct device_attribute * dev_attrs; - struct driver_attribute * drv_attrs; - - int (*match)(struct device * dev, struct device_driver * drv); - int (*hotplug) (struct device *dev, char **envp, - int num_envp, char *buffer, int buffer_size); - int (*suspend)(struct device * dev, pm_message_t state); - int (*resume)(struct device * dev); -}; +See the kerneldoc for the struct bus_type. int bus_register(struct bus_type * bus); diff --git a/trunk/Documentation/driver-model/class.txt b/trunk/Documentation/driver-model/class.txt index 548505f14aa4..1fefc480a80b 100644 --- a/trunk/Documentation/driver-model/class.txt +++ b/trunk/Documentation/driver-model/class.txt @@ -27,22 +27,7 @@ The device class structure looks like: typedef int (*devclass_add)(struct device *); typedef void (*devclass_remove)(struct device *); -struct device_class { - char * name; - rwlock_t lock; - u32 devnum; - struct list_head node; - - struct list_head drivers; - struct list_head intf_list; - - struct driver_dir_entry dir; - struct driver_dir_entry device_dir; - struct driver_dir_entry driver_dir; - - devclass_add add_device; - devclass_remove remove_device; -}; +See the kerneldoc for the struct class. A typical device class definition would look like: diff --git a/trunk/Documentation/driver-model/device.txt b/trunk/Documentation/driver-model/device.txt index a124f3126b0d..b2ff42685bcb 100644 --- a/trunk/Documentation/driver-model/device.txt +++ b/trunk/Documentation/driver-model/device.txt @@ -2,96 +2,7 @@ The Basic Device Structure ~~~~~~~~~~~~~~~~~~~~~~~~~~ -struct device { - struct list_head g_list; - struct list_head node; - struct list_head bus_list; - struct list_head driver_list; - struct list_head intf_list; - struct list_head children; - struct device * parent; - - char name[DEVICE_NAME_SIZE]; - char bus_id[BUS_ID_SIZE]; - - spinlock_t lock; - atomic_t refcount; - - struct bus_type * bus; - struct driver_dir_entry dir; - - u32 class_num; - - struct device_driver *driver; - void *driver_data; - void *platform_data; - - u32 current_state; - unsigned char *saved_state; - - void (*release)(struct device * dev); -}; - -Fields -~~~~~~ -g_list: Node in the global device list. - -node: Node in device's parent's children list. - -bus_list: Node in device's bus's devices list. - -driver_list: Node in device's driver's devices list. - -intf_list: List of intf_data. There is one structure allocated for - each interface that the device supports. - -children: List of child devices. - -parent: *** FIXME *** - -name: ASCII description of device. - Example: " 3Com Corporation 3c905 100BaseTX [Boomerang]" - -bus_id: ASCII representation of device's bus position. This - field should be a name unique across all devices on the - bus type the device belongs to. - - Example: PCI bus_ids are in the form of - :. - This name is unique across all PCI devices in the system. - -lock: Spinlock for the device. - -refcount: Reference count on the device. - -bus: Pointer to struct bus_type that device belongs to. - -dir: Device's sysfs directory. - -class_num: Class-enumerated value of the device. - -driver: Pointer to struct device_driver that controls the device. - -driver_data: Driver-specific data. - -platform_data: Platform data specific to the device. - - Example: for devices on custom boards, as typical of embedded - and SOC based hardware, Linux often uses platform_data to point - to board-specific structures describing devices and how they - are wired. That can include what ports are available, chip - variants, which GPIO pins act in what additional roles, and so - on. This shrinks the "Board Support Packages" (BSPs) and - minimizes board-specific #ifdefs in drivers. - -current_state: Current power state of the device. - -saved_state: Pointer to saved state of the device. This is usable by - the device driver controlling the device. - -release: Callback to free the device after all references have - gone away. This should be set by the allocator of the - device (i.e. the bus driver that discovered the device). +See the kerneldoc for the struct device. Programming Interface diff --git a/trunk/Documentation/driver-model/driver.txt b/trunk/Documentation/driver-model/driver.txt index d2cd6fb8ba9e..4421135826a2 100644 --- a/trunk/Documentation/driver-model/driver.txt +++ b/trunk/Documentation/driver-model/driver.txt @@ -1,23 +1,7 @@ Device Drivers -struct device_driver { - char * name; - struct bus_type * bus; - - struct completion unloaded; - struct kobject kobj; - list_t devices; - - struct module *owner; - - int (*probe) (struct device * dev); - int (*remove) (struct device * dev); - - int (*suspend) (struct device * dev, pm_message_t state); - int (*resume) (struct device * dev); -}; - +See the kerneldoc for the struct device_driver. Allocation diff --git a/trunk/Documentation/feature-removal-schedule.txt b/trunk/Documentation/feature-removal-schedule.txt index 46679e48967a..4cba260e3059 100644 --- a/trunk/Documentation/feature-removal-schedule.txt +++ b/trunk/Documentation/feature-removal-schedule.txt @@ -376,26 +376,6 @@ Who: Tejun Heo ---------------------------- -What: Support for lcd_switch and display_get in asus-laptop driver -When: March 2010 -Why: These two features use non-standard interfaces. There are the - only features that really need multiple path to guess what's - the right method name on a specific laptop. - - Removing them will allow to remove a lot of code an significantly - clean the drivers. - - This will affect the backlight code which won't be able to know - if the backlight is on or off. The platform display file will also be - write only (like the one in eeepc-laptop). - - This should'nt affect a lot of user because they usually know - when their display is on or off. - -Who: Corentin Chary - ----------------------------- - What: sysfs-class-rfkill state file When: Feb 2014 Files: net/rfkill/core.c @@ -459,14 +439,6 @@ Who: Thomas Gleixner ---------------------------- -What: The acpi_sleep=s4_nonvs command line option -When: 2.6.37 -Files: arch/x86/kernel/acpi/sleep.c -Why: superseded by acpi_sleep=nonvs -Who: Rafael J. Wysocki - ----------------------------- - What: PCI DMA unmap state API When: August 2012 Why: PCI DMA unmap state API (include/linux/pci-dma.h) was replaced diff --git a/trunk/Documentation/filesystems/proc.txt b/trunk/Documentation/filesystems/proc.txt index b0b814d75ca1..60740e8ecb37 100644 --- a/trunk/Documentation/filesystems/proc.txt +++ b/trunk/Documentation/filesystems/proc.txt @@ -836,7 +836,6 @@ Provides counts of softirq handlers serviced since boot time, for each cpu. TASKLET: 0 0 0 290 SCHED: 27035 26983 26971 26746 HRTIMER: 0 0 0 0 - RCU: 1678 1769 2178 2250 1.3 IDE devices in /proc/ide diff --git a/trunk/Documentation/flexible-arrays.txt b/trunk/Documentation/flexible-arrays.txt index cb8a3a00cc92..df904aec9904 100644 --- a/trunk/Documentation/flexible-arrays.txt +++ b/trunk/Documentation/flexible-arrays.txt @@ -66,10 +66,10 @@ trick is to ensure that any needed memory allocations are done before entering atomic context, using: int flex_array_prealloc(struct flex_array *array, unsigned int start, - unsigned int end, gfp_t flags); + unsigned int nr_elements, gfp_t flags); This function will ensure that memory for the elements indexed in the range -defined by start and end has been allocated. Thereafter, a +defined by start and nr_elements has been allocated. Thereafter, a flex_array_put() call on an element in that range is guaranteed not to block. diff --git a/trunk/Documentation/hwmon/adm1021 b/trunk/Documentation/hwmon/adm1021 index 03d02bfb3df1..02ad96cf9b2b 100644 --- a/trunk/Documentation/hwmon/adm1021 +++ b/trunk/Documentation/hwmon/adm1021 @@ -14,10 +14,6 @@ Supported chips: Prefix: 'gl523sm' Addresses scanned: I2C 0x18 - 0x1a, 0x29 - 0x2b, 0x4c - 0x4e Datasheet: - * Intel Xeon Processor - Prefix: - any other - may require 'force_adm1021' parameter - Addresses scanned: none - Datasheet: Publicly available at Intel website * Maxim MAX1617 Prefix: 'max1617' Addresses scanned: I2C 0x18 - 0x1a, 0x29 - 0x2b, 0x4c - 0x4e @@ -91,21 +87,27 @@ will do no harm, but will return 'old' values. It is possible to make ADM1021-clones do faster measurements, but there is really no good reason for that. -Xeon support ------------- -Some Xeon processors have real max1617, adm1021, or compatible chips -within them, with two temperature sensors. +Netburst-based Xeon support +--------------------------- -Other Xeons have chips with only one sensor. +Some Xeon processors based on the Netburst (early Pentium 4, from 2001 to +2003) microarchitecture had real MAX1617, ADM1021, or compatible chips +within them, with two temperature sensors. Other Xeon processors of this +era (with 400 MHz FSB) had chips with only one temperature sensor. -If you have a Xeon, and the adm1021 module loads, and both temperatures -appear valid, then things are good. +If you have such an old Xeon, and you get two valid temperatures when +loading the adm1021 module, then things are good. -If the adm1021 module doesn't load, you should try this: - modprobe adm1021 force_adm1021=BUS,ADDRESS - ADDRESS can only be 0x18, 0x1a, 0x29, 0x2b, 0x4c, or 0x4e. +If nothing happens when loading the adm1021 module, and you are certain +that your specific Xeon processor model includes compatible sensors, you +will have to explicitly instantiate the sensor chips from user-space. See +method 4 in Documentation/i2c/instantiating-devices. Possible slave +addresses are 0x18, 0x1a, 0x29, 0x2b, 0x4c, or 0x4e. It is likely that +only temp2 will be correct and temp1 will have to be ignored. -If you have dual Xeons you may have appear to have two separate -adm1021-compatible chips, or two single-temperature sensors, at distinct -addresses. +Previous generations of the Xeon processor (based on Pentium II/III) +didn't have these sensors. Next generations of Xeon processors (533 MHz +FSB and faster) lost them, until the Core-based generation which +introduced integrated digital thermal sensors. These are supported by +the coretemp driver. diff --git a/trunk/Documentation/hwmon/lm90 b/trunk/Documentation/hwmon/lm90 index fa475c0a48a3..f3efd18e87f4 100644 --- a/trunk/Documentation/hwmon/lm90 +++ b/trunk/Documentation/hwmon/lm90 @@ -32,6 +32,16 @@ Supported chips: Addresses scanned: I2C 0x4c and 0x4d Datasheet: Publicly available at the ON Semiconductor website http://www.onsemi.com/PowerSolutions/product.do?id=ADT7461 + * Analog Devices ADT7461A + Prefix: 'adt7461a' + Addresses scanned: I2C 0x4c and 0x4d + Datasheet: Publicly available at the ON Semiconductor website + http://www.onsemi.com/PowerSolutions/product.do?id=ADT7461A + * ON Semiconductor NCT1008 + Prefix: 'nct1008' + Addresses scanned: I2C 0x4c and 0x4d + Datasheet: Publicly available at the ON Semiconductor website + http://www.onsemi.com/PowerSolutions/product.do?id=NCT1008 * Maxim MAX6646 Prefix: 'max6646' Addresses scanned: I2C 0x4d @@ -149,7 +159,7 @@ ADM1032: * ALERT is triggered by open remote sensor. * SMBus PEC support for Write Byte and Receive Byte transactions. -ADT7461: +ADT7461, ADT7461A, NCT1008: * Extended temperature range (breaks compatibility) * Lower resolution for remote temperature @@ -195,9 +205,9 @@ are exported, one for each channel, but these values are of course linked. Only the local hysteresis can be set from user-space, and the same delta applies to the remote hysteresis. -The lm90 driver will not update its values more frequently than every -other second; reading them more often will do no harm, but will return -'old' values. +The lm90 driver will not update its values more frequently than configured with +the update_interval attribute; reading them more often will do no harm, but will +return 'old' values. SMBus Alert Support ------------------- @@ -205,11 +215,12 @@ SMBus Alert Support This driver has basic support for SMBus alert. When an alert is received, the status register is read and the faulty temperature channel is logged. -The Analog Devices chips (ADM1032 and ADT7461) do not implement the SMBus -alert protocol properly so additional care is needed: the ALERT output is -disabled when an alert is received, and is re-enabled only when the alarm -is gone. Otherwise the chip would block alerts from other chips in the bus -as long as the alarm is active. +The Analog Devices chips (ADM1032, ADT7461 and ADT7461A) and ON +Semiconductor chips (NCT1008) do not implement the SMBus alert protocol +properly so additional care is needed: the ALERT output is disabled when +an alert is received, and is re-enabled only when the alarm is gone. +Otherwise the chip would block alerts from other chips in the bus as long +as the alarm is active. PEC Support ----------- diff --git a/trunk/Documentation/hwmon/max16064 b/trunk/Documentation/hwmon/max16064 new file mode 100644 index 000000000000..41728999e142 --- /dev/null +++ b/trunk/Documentation/hwmon/max16064 @@ -0,0 +1,62 @@ +Kernel driver max16064 +====================== + +Supported chips: + * Maxim MAX16064 + Prefix: 'max16064' + Addresses scanned: - + Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX16064.pdf + +Author: Guenter Roeck + + +Description +----------- + +This driver supports hardware montoring for Maxim MAX16064 Quad Power-Supply +Controller with Active-Voltage Output Control and PMBus Interface. + +The driver is a client driver to the core PMBus driver. +Please see Documentation/hwmon/pmbus for details on PMBus client drivers. + + +Usage Notes +----------- + +This driver does not auto-detect devices. You will have to instantiate the +devices explicitly. Please see Documentation/i2c/instantiating-devices for +details. + + +Platform data support +--------------------- + +The driver supports standard PMBus driver platform data. + + +Sysfs entries +------------- + +The following attributes are supported. Limits are read-write; all other +attributes are read-only. + +in[1-4]_label "vout[1-4]" +in[1-4]_input Measured voltage. From READ_VOUT register. +in[1-4]_min Minumum Voltage. From VOUT_UV_WARN_LIMIT register. +in[1-4]_max Maximum voltage. From VOUT_OV_WARN_LIMIT register. +in[1-4]_lcrit Critical minumum Voltage. VOUT_UV_FAULT_LIMIT register. +in[1-4]_crit Critical maximum voltage. From VOUT_OV_FAULT_LIMIT register. +in[1-4]_min_alarm Voltage low alarm. From VOLTAGE_UV_WARNING status. +in[1-4]_max_alarm Voltage high alarm. From VOLTAGE_OV_WARNING status. +in[1-4]_lcrit_alarm Voltage critical low alarm. From VOLTAGE_UV_FAULT status. +in[1-4]_crit_alarm Voltage critical high alarm. From VOLTAGE_OV_FAULT status. + +temp1_input Measured temperature. From READ_TEMPERATURE_1 register. +temp1_max Maximum temperature. From OT_WARN_LIMIT register. +temp1_crit Critical high temperature. From OT_FAULT_LIMIT register. +temp1_max_alarm Chip temperature high alarm. Set by comparing + READ_TEMPERATURE_1 with OT_WARN_LIMIT if TEMP_OT_WARNING + status is set. +temp1_crit_alarm Chip temperature critical high alarm. Set by comparing + READ_TEMPERATURE_1 with OT_FAULT_LIMIT if TEMP_OT_FAULT + status is set. diff --git a/trunk/Documentation/hwmon/max34440 b/trunk/Documentation/hwmon/max34440 new file mode 100644 index 000000000000..6c525dd07d59 --- /dev/null +++ b/trunk/Documentation/hwmon/max34440 @@ -0,0 +1,79 @@ +Kernel driver max34440 +====================== + +Supported chips: + * Maxim MAX34440 + Prefixes: 'max34440' + Addresses scanned: - + Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX34440.pdf + * Maxim MAX34441 + PMBus 5-Channel Power-Supply Manager and Intelligent Fan Controller + Prefixes: 'max34441' + Addresses scanned: - + Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX34441.pdf + +Author: Guenter Roeck + + +Description +----------- + +This driver supports hardware montoring for Maxim MAX34440 PMBus 6-Channel +Power-Supply Manager and MAX34441 PMBus 5-Channel Power-Supply Manager +and Intelligent Fan Controller. + +The driver is a client driver to the core PMBus driver. Please see +Documentation/hwmon/pmbus for details on PMBus client drivers. + + +Usage Notes +----------- + +This driver does not auto-detect devices. You will have to instantiate the +devices explicitly. Please see Documentation/i2c/instantiating-devices for +details. + + +Platform data support +--------------------- + +The driver supports standard PMBus driver platform data. + + +Sysfs entries +------------- + +The following attributes are supported. Limits are read-write; all other +attributes are read-only. + +in[1-6]_label "vout[1-6]". +in[1-6]_input Measured voltage. From READ_VOUT register. +in[1-6]_min Minumum Voltage. From VOUT_UV_WARN_LIMIT register. +in[1-6]_max Maximum voltage. From VOUT_OV_WARN_LIMIT register. +in[1-6]_lcrit Critical minumum Voltage. VOUT_UV_FAULT_LIMIT register. +in[1-6]_crit Critical maximum voltage. From VOUT_OV_FAULT_LIMIT register. +in[1-6]_min_alarm Voltage low alarm. From VOLTAGE_UV_WARNING status. +in[1-6]_max_alarm Voltage high alarm. From VOLTAGE_OV_WARNING status. +in[1-6]_lcrit_alarm Voltage critical low alarm. From VOLTAGE_UV_FAULT status. +in[1-6]_crit_alarm Voltage critical high alarm. From VOLTAGE_OV_FAULT status. + +curr[1-6]_label "iout[1-6]". +curr[1-6]_input Measured current. From READ_IOUT register. +curr[1-6]_max Maximum current. From IOUT_OC_WARN_LIMIT register. +curr[1-6]_crit Critical maximum current. From IOUT_OC_FAULT_LIMIT register. +curr[1-6]_max_alarm Current high alarm. From IOUT_OC_WARNING status. +curr[1-6]_crit_alarm Current critical high alarm. From IOUT_OC_FAULT status. + + in6 and curr6 attributes only exist for MAX34440. + +temp[1-8]_input Measured temperatures. From READ_TEMPERATURE_1 register. + temp1 is the chip's internal temperature. temp2..temp5 + are remote I2C temperature sensors. For MAX34441, temp6 + is a remote thermal-diode sensor. For MAX34440, temp6..8 + are remote I2C temperature sensors. +temp[1-8]_max Maximum temperature. From OT_WARN_LIMIT register. +temp[1-8]_crit Critical high temperature. From OT_FAULT_LIMIT register. +temp[1-8]_max_alarm Temperature high alarm. +temp[1-8]_crit_alarm Temperature critical high alarm. + + temp7 and temp8 attributes only exist for MAX34440. diff --git a/trunk/Documentation/hwmon/max8688 b/trunk/Documentation/hwmon/max8688 new file mode 100644 index 000000000000..0ddd3a412030 --- /dev/null +++ b/trunk/Documentation/hwmon/max8688 @@ -0,0 +1,69 @@ +Kernel driver max8688 +===================== + +Supported chips: + * Maxim MAX8688 + Prefix: 'max8688' + Addresses scanned: - + Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX8688.pdf + +Author: Guenter Roeck + + +Description +----------- + +This driver supports hardware montoring for Maxim MAX8688 Digital Power-Supply +Controller/Monitor with PMBus Interface. + +The driver is a client driver to the core PMBus driver. Please see +Documentation/hwmon/pmbus for details on PMBus client drivers. + + +Usage Notes +----------- + +This driver does not auto-detect devices. You will have to instantiate the +devices explicitly. Please see Documentation/i2c/instantiating-devices for +details. + + +Platform data support +--------------------- + +The driver supports standard PMBus driver platform data. + + +Sysfs entries +------------- + +The following attributes are supported. Limits are read-write; all other +attributes are read-only. + +in1_label "vout1" +in1_input Measured voltage. From READ_VOUT register. +in1_min Minumum Voltage. From VOUT_UV_WARN_LIMIT register. +in1_max Maximum voltage. From VOUT_OV_WARN_LIMIT register. +in1_lcrit Critical minumum Voltage. VOUT_UV_FAULT_LIMIT register. +in1_crit Critical maximum voltage. From VOUT_OV_FAULT_LIMIT register. +in1_min_alarm Voltage low alarm. From VOLTAGE_UV_WARNING status. +in1_max_alarm Voltage high alarm. From VOLTAGE_OV_WARNING status. +in1_lcrit_alarm Voltage critical low alarm. From VOLTAGE_UV_FAULT status. +in1_crit_alarm Voltage critical high alarm. From VOLTAGE_OV_FAULT status. + +curr1_label "iout1" +curr1_input Measured current. From READ_IOUT register. +curr1_max Maximum current. From IOUT_OC_WARN_LIMIT register. +curr1_crit Critical maximum current. From IOUT_OC_FAULT_LIMIT register. +curr1_max_alarm Current high alarm. From IOUT_OC_WARN_LIMIT register. +curr1_crit_alarm Current critical high alarm. From IOUT_OC_FAULT status. + +temp1_input Measured temperature. From READ_TEMPERATURE_1 register. +temp1_max Maximum temperature. From OT_WARN_LIMIT register. +temp1_crit Critical high temperature. From OT_FAULT_LIMIT register. +temp1_max_alarm Chip temperature high alarm. Set by comparing + READ_TEMPERATURE_1 with OT_WARN_LIMIT if TEMP_OT_WARNING + status is set. +temp1_crit_alarm Chip temperature critical high alarm. Set by comparing + READ_TEMPERATURE_1 with OT_FAULT_LIMIT if TEMP_OT_FAULT + status is set. diff --git a/trunk/Documentation/hwmon/pmbus b/trunk/Documentation/hwmon/pmbus index dc4933e96344..5e462fc7f99b 100644 --- a/trunk/Documentation/hwmon/pmbus +++ b/trunk/Documentation/hwmon/pmbus @@ -13,26 +13,6 @@ Supported chips: Prefix: 'ltc2978' Addresses scanned: - Datasheet: http://cds.linear.com/docs/Datasheet/2978fa.pdf - * Maxim MAX16064 - Quad Power-Supply Controller - Prefix: 'max16064' - Addresses scanned: - - Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX16064.pdf - * Maxim MAX34440 - PMBus 6-Channel Power-Supply Manager - Prefixes: 'max34440' - Addresses scanned: - - Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX34440.pdf - * Maxim MAX34441 - PMBus 5-Channel Power-Supply Manager and Intelligent Fan Controller - Prefixes: 'max34441' - Addresses scanned: - - Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX34441.pdf - * Maxim MAX8688 - Digital Power-Supply Controller/Monitor - Prefix: 'max8688' - Addresses scanned: - - Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX8688.pdf * Generic PMBus devices Prefix: 'pmbus' Addresses scanned: - @@ -175,11 +155,13 @@ currX_crit Critical maximum current. From IIN_OC_FAULT_LIMIT or IOUT_OC_FAULT_LIMIT register. currX_alarm Current high alarm. From IIN_OC_WARNING or IOUT_OC_WARNING status. +currX_max_alarm Current high alarm. + From IIN_OC_WARN_LIMIT or IOUT_OC_WARN_LIMIT status. currX_lcrit_alarm Output current critical low alarm. From IOUT_UC_FAULT status. currX_crit_alarm Current critical high alarm. From IIN_OC_FAULT or IOUT_OC_FAULT status. -currX_label "iin" or "vinY" +currX_label "iin" or "ioutY" powerX_input Measured power. From READ_PIN or READ_POUT register. powerX_cap Output power cap. From POUT_MAX register. @@ -193,13 +175,13 @@ powerX_crit_alarm Output power critical high alarm. From POUT_OP_FAULT status. powerX_label "pin" or "poutY" -tempX_input Measured tempererature. +tempX_input Measured temperature. From READ_TEMPERATURE_X register. -tempX_min Mimimum tempererature. From UT_WARN_LIMIT register. -tempX_max Maximum tempererature. From OT_WARN_LIMIT register. -tempX_lcrit Critical low tempererature. +tempX_min Mimimum temperature. From UT_WARN_LIMIT register. +tempX_max Maximum temperature. From OT_WARN_LIMIT register. +tempX_lcrit Critical low temperature. From UT_FAULT_LIMIT register. -tempX_crit Critical high tempererature. +tempX_crit Critical high temperature. From OT_FAULT_LIMIT register. tempX_min_alarm Chip temperature low alarm. Set by comparing READ_TEMPERATURE_X with UT_WARN_LIMIT if diff --git a/trunk/Documentation/hwmon/smm665 b/trunk/Documentation/hwmon/smm665 index 3820fc9ca52d..59e316140542 100644 --- a/trunk/Documentation/hwmon/smm665 +++ b/trunk/Documentation/hwmon/smm665 @@ -150,8 +150,8 @@ in8_crit_alarm Channel F critical alarm in9_crit_alarm AIN1 critical alarm in10_crit_alarm AIN2 critical alarm -temp1_input Chip tempererature -temp1_min Mimimum chip tempererature -temp1_max Maximum chip tempererature -temp1_crit Critical chip tempererature +temp1_input Chip temperature +temp1_min Mimimum chip temperature +temp1_max Maximum chip temperature +temp1_crit Critical chip temperature temp1_crit_alarm Temperature critical alarm diff --git a/trunk/Documentation/hwmon/submitting-patches b/trunk/Documentation/hwmon/submitting-patches new file mode 100644 index 000000000000..86f42e8e9e49 --- /dev/null +++ b/trunk/Documentation/hwmon/submitting-patches @@ -0,0 +1,109 @@ + How to Get Your Patch Accepted Into the Hwmon Subsystem + ------------------------------------------------------- + +This text is is a collection of suggestions for people writing patches or +drivers for the hwmon subsystem. Following these suggestions will greatly +increase the chances of your change being accepted. + + +1. General +---------- + +* It should be unnecessary to mention, but please read and follow + Documentation/SubmitChecklist + Documentation/SubmittingDrivers + Documentation/SubmittingPatches + Documentation/CodingStyle + +* If your patch generates checkpatch warnings, please refrain from explanations + such as "I don't like that coding style". Keep in mind that each unnecessary + warning helps hiding a real problem. If you don't like the kernel coding + style, don't write kernel drivers. + +* Please test your patch thoroughly. We are not your test group. + Sometimes a patch can not or not completely be tested because of missing + hardware. In such cases, you should test-build the code on at least one + architecture. If run-time testing was not achieved, it should be written + explicitly below the patch header. + +* If your patch (or the driver) is affected by configuration options such as + CONFIG_SMP or CONFIG_HOTPLUG, make sure it compiles for all configuration + variants. + + +2. Adding functionality to existing drivers +------------------------------------------- + +* Make sure the documentation in Documentation/hwmon/ is up to + date. + +* Make sure the information in Kconfig is up to date. + +* If the added functionality requires some cleanup or structural changes, split + your patch into a cleanup part and the actual addition. This makes it easier + to review your changes, and to bisect any resulting problems. + +* Never mix bug fixes, cleanup, and functional enhancements in a single patch. + + +3. New drivers +-------------- + +* Running your patch or driver file(s) through checkpatch does not mean its + formatting is clean. If unsure about formatting in your new driver, run it + through Lindent. Lindent is not perfect, and you may have to do some minor + cleanup, but it is a good start. + +* Consider adding yourself to MAINTAINERS. + +* Document the driver in Documentation/hwmon/. + +* Add the driver to Kconfig and Makefile in alphabetical order. + +* Make sure that all dependencies are listed in Kconfig. For new drivers, it + is most likely prudent to add a dependency on EXPERIMENTAL. + +* Avoid forward declarations if you can. Rearrange the code if necessary. + +* Avoid calculations in macros and macro-generated functions. While such macros + may save a line or so in the source, it obfuscates the code and makes code + review more difficult. It may also result in code which is more complicated + than necessary. Use inline functions or just regular functions instead. + +* If the driver has a detect function, make sure it is silent. Debug messages + and messages printed after a successful detection are acceptable, but it + must not print messages such as "Chip XXX not found/supported". + + Keep in mind that the detect function will run for all drivers supporting an + address if a chip is detected on that address. Unnecessary messages will just + pollute the kernel log and not provide any value. + +* Provide a detect function if and only if a chip can be detected reliably. + +* Avoid writing to chip registers in the detect function. If you have to write, + only do it after you have already gathered enough data to be certain that the + detection is going to be successful. + + Keep in mind that the chip might not be what your driver believes it is, and + writing to it might cause a bad misconfiguration. + +* Make sure there are no race conditions in the probe function. Specifically, + completely initialize your chip first, then create sysfs entries and register + with the hwmon subsystem. + +* Do not provide support for deprecated sysfs attributes. + +* Do not create non-standard attributes unless really needed. If you have to use + non-standard attributes, or you believe you do, discuss it on the mailing list + first. Either case, provide a detailed explanation why you need the + non-standard attribute(s). + Standard attributes are specified in Documentation/hwmon/sysfs-interface. + +* When deciding which sysfs attributes to support, look at the chip's + capabilities. While we do not expect your driver to support everything the + chip may offer, it should at least support all limits and alarms. + +* Last but not least, please check if a driver for your chip already exists + before starting to write a new driver. Especially for temperature sensors, + new chips are often variants of previously released chips. In some cases, + a presumably new chip may simply have been relabeled. diff --git a/trunk/Documentation/input/event-codes.txt b/trunk/Documentation/input/event-codes.txt new file mode 100644 index 000000000000..23fcb05175be --- /dev/null +++ b/trunk/Documentation/input/event-codes.txt @@ -0,0 +1,262 @@ +The input protocol uses a map of types and codes to express input device values +to userspace. This document describes the types and codes and how and when they +may be used. + +A single hardware event generates multiple input events. Each input event +contains the new value of a single data item. A special event type, EV_SYN, is +used to separate input events into packets of input data changes occurring at +the same moment in time. In the following, the term "event" refers to a single +input event encompassing a type, code, and value. + +The input protocol is a stateful protocol. Events are emitted only when values +of event codes have changed. However, the state is maintained within the Linux +input subsystem; drivers do not need to maintain the state and may attempt to +emit unchanged values without harm. Userspace may obtain the current state of +event code values using the EVIOCG* ioctls defined in linux/input.h. The event +reports supported by a device are also provided by sysfs in +class/input/event*/device/capabilities/, and the properties of a device are +provided in class/input/event*/device/properties. + +Types: +========== +Types are groupings of codes under a logical input construct. Each type has a +set of applicable codes to be used in generating events. See the Codes section +for details on valid codes for each type. + +* EV_SYN: + - Used as markers to separate events. Events may be separated in time or in + space, such as with the multitouch protocol. + +* EV_KEY: + - Used to describe state changes of keyboards, buttons, or other key-like + devices. + +* EV_REL: + - Used to describe relative axis value changes, e.g. moving the mouse 5 units + to the left. + +* EV_ABS: + - Used to describe absolute axis value changes, e.g. describing the + coordinates of a touch on a touchscreen. + +* EV_MSC: + - Used to describe miscellaneous input data that do not fit into other types. + +* EV_SW: + - Used to describe binary state input switches. + +* EV_LED: + - Used to turn LEDs on devices on and off. + +* EV_SND: + - Used to output sound to devices. + +* EV_REP: + - Used for autorepeating devices. + +* EV_FF: + - Used to send force feedback commands to an input device. + +* EV_PWR: + - A special type for power button and switch input. + +* EV_FF_STATUS: + - Used to receive force feedback device status. + +Codes: +========== +Codes define the precise type of event. + +EV_SYN: +---------- +EV_SYN event values are undefined. Their usage is defined only by when they are +sent in the evdev event stream. + +* SYN_REPORT: + - Used to synchronize and separate events into packets of input data changes + occurring at the same moment in time. For example, motion of a mouse may set + the REL_X and REL_Y values for one motion, then emit a SYN_REPORT. The next + motion will emit more REL_X and REL_Y values and send another SYN_REPORT. + +* SYN_CONFIG: + - TBD + +* SYN_MT_REPORT: + - Used to synchronize and separate touch events. See the + multi-touch-protocol.txt document for more information. + +* SYN_DROPPED: + - Used to indicate buffer overrun in the evdev client's event queue. + Client should ignore all events up to and including next SYN_REPORT + event and query the device (using EVIOCG* ioctls) to obtain its + current state. + +EV_KEY: +---------- +EV_KEY events take the form KEY_ or BTN_. For example, KEY_A is used +to represent the 'A' key on a keyboard. When a key is depressed, an event with +the key's code is emitted with value 1. When the key is released, an event is +emitted with value 0. Some hardware send events when a key is repeated. These +events have a value of 2. In general, KEY_ is used for keyboard keys, and +BTN_ is used for other types of momentary switch events. + +A few EV_KEY codes have special meanings: + +* BTN_TOOL_: + - These codes are used in conjunction with input trackpads, tablets, and + touchscreens. These devices may be used with fingers, pens, or other tools. + When an event occurs and a tool is used, the corresponding BTN_TOOL_ + code should be set to a value of 1. When the tool is no longer interacting + with the input device, the BTN_TOOL_ code should be reset to 0. All + trackpads, tablets, and touchscreens should use at least one BTN_TOOL_ + code when events are generated. + +* BTN_TOUCH: + BTN_TOUCH is used for touch contact. While an input tool is determined to be + within meaningful physical contact, the value of this property must be set + to 1. Meaningful physical contact may mean any contact, or it may mean + contact conditioned by an implementation defined property. For example, a + touchpad may set the value to 1 only when the touch pressure rises above a + certain value. BTN_TOUCH may be combined with BTN_TOOL_ codes. For + example, a pen tablet may set BTN_TOOL_PEN to 1 and BTN_TOUCH to 0 while the + pen is hovering over but not touching the tablet surface. + +Note: For appropriate function of the legacy mousedev emulation driver, +BTN_TOUCH must be the first evdev code emitted in a synchronization frame. + +Note: Historically a touch device with BTN_TOOL_FINGER and BTN_TOUCH was +interpreted as a touchpad by userspace, while a similar device without +BTN_TOOL_FINGER was interpreted as a touchscreen. For backwards compatibility +with current userspace it is recommended to follow this distinction. In the +future, this distinction will be deprecated and the device properties ioctl +EVIOCGPROP, defined in linux/input.h, will be used to convey the device type. + +* BTN_TOOL_FINGER, BTN_TOOL_DOUBLETAP, BTN_TOOL_TRIPLETAP, BTN_TOOL_QUADTAP: + - These codes denote one, two, three, and four finger interaction on a + trackpad or touchscreen. For example, if the user uses two fingers and moves + them on the touchpad in an effort to scroll content on screen, + BTN_TOOL_DOUBLETAP should be set to value 1 for the duration of the motion. + Note that all BTN_TOOL_ codes and the BTN_TOUCH code are orthogonal in + purpose. A trackpad event generated by finger touches should generate events + for one code from each group. At most only one of these BTN_TOOL_ + codes should have a value of 1 during any synchronization frame. + +Note: Historically some drivers emitted multiple of the finger count codes with +a value of 1 in the same synchronization frame. This usage is deprecated. + +Note: In multitouch drivers, the input_mt_report_finger_count() function should +be used to emit these codes. Please see multi-touch-protocol.txt for details. + +EV_REL: +---------- +EV_REL events describe relative changes in a property. For example, a mouse may +move to the left by a certain number of units, but its absolute position in +space is unknown. If the absolute position is known, EV_ABS codes should be used +instead of EV_REL codes. + +A few EV_REL codes have special meanings: + +* REL_WHEEL, REL_HWHEEL: + - These codes are used for vertical and horizontal scroll wheels, + respectively. + +EV_ABS: +---------- +EV_ABS events describe absolute changes in a property. For example, a touchpad +may emit coordinates for a touch location. + +A few EV_ABS codes have special meanings: + +* ABS_DISTANCE: + - Used to describe the distance of a tool from an interaction surface. This + event should only be emitted while the tool is hovering, meaning in close + proximity of the device and while the value of the BTN_TOUCH code is 0. If + the input device may be used freely in three dimensions, consider ABS_Z + instead. + +* ABS_MT_: + - Used to describe multitouch input events. Please see + multi-touch-protocol.txt for details. + +EV_SW: +---------- +EV_SW events describe stateful binary switches. For example, the SW_LID code is +used to denote when a laptop lid is closed. + +Upon binding to a device or resuming from suspend, a driver must report +the current switch state. This ensures that the device, kernel, and userspace +state is in sync. + +Upon resume, if the switch state is the same as before suspend, then the input +subsystem will filter out the duplicate switch state reports. The driver does +not need to keep the state of the switch at any time. + +EV_MSC: +---------- +EV_MSC events are used for input and output events that do not fall under other +categories. + +EV_LED: +---------- +EV_LED events are used for input and output to set and query the state of +various LEDs on devices. + +EV_REP: +---------- +EV_REP events are used for specifying autorepeating events. + +EV_SND: +---------- +EV_SND events are used for sending sound commands to simple sound output +devices. + +EV_FF: +---------- +EV_FF events are used to initialize a force feedback capable device and to cause +such device to feedback. + +EV_PWR: +---------- +EV_PWR events are a special type of event used specifically for power +mangement. Its usage is not well defined. To be addressed later. + +Guidelines: +========== +The guidelines below ensure proper single-touch and multi-finger functionality. +For multi-touch functionality, see the multi-touch-protocol.txt document for +more information. + +Mice: +---------- +REL_{X,Y} must be reported when the mouse moves. BTN_LEFT must be used to report +the primary button press. BTN_{MIDDLE,RIGHT,4,5,etc.} should be used to report +further buttons of the device. REL_WHEEL and REL_HWHEEL should be used to report +scroll wheel events where available. + +Touchscreens: +---------- +ABS_{X,Y} must be reported with the location of the touch. BTN_TOUCH must be +used to report when a touch is active on the screen. +BTN_{MOUSE,LEFT,MIDDLE,RIGHT} must not be reported as the result of touch +contact. BTN_TOOL_ events should be reported where possible. + +Trackpads: +---------- +Legacy trackpads that only provide relative position information must report +events like mice described above. + +Trackpads that provide absolute touch position must report ABS_{X,Y} for the +location of the touch. BTN_TOUCH should be used to report when a touch is active +on the trackpad. Where multi-finger support is available, BTN_TOOL_ should +be used to report the number of touches active on the trackpad. + +Tablets: +---------- +BTN_TOOL_ events must be reported when a stylus or other tool is active on +the tablet. ABS_{X,Y} must be reported with the location of the tool. BTN_TOUCH +should be used to report when the tool is in contact with the tablet. +BTN_{STYLUS,STYLUS2} should be used to report buttons on the tool itself. Any +button may be used for buttons on the tablet except BTN_{MOUSE,LEFT}. +BTN_{0,1,2,etc} are good generic codes for unlabeled buttons. Do not use +meaningful buttons, like BTN_FORWARD, unless the button is labeled for that +purpose on the device. diff --git a/trunk/Documentation/ja_JP/HOWTO b/trunk/Documentation/ja_JP/HOWTO index b63301a03811..050d37fe6d40 100644 --- a/trunk/Documentation/ja_JP/HOWTO +++ b/trunk/Documentation/ja_JP/HOWTO @@ -11,14 +11,14 @@ for non English (read: Japanese) speakers and is not intended as a fork. So if you have any comments or updates for this file, please try to update the original English file first. -Last Updated: 2008/10/24 +Last Updated: 2011/03/31 ================================== ã“れã¯ã€ -linux-2.6.28/Documentation/HOWTO +linux-2.6.38/Documentation/HOWTO ã®å’Œè¨³ã§ã™ã€‚ 翻訳団体: JF プロジェクト < http://www.linux.or.jp/JF/ > -翻訳日: 2008/10/24 +翻訳日: 2011/3/28 翻訳者: Tsugikazu Shibata 校正者: æ¾å€‰ã•ã‚“ å°æž— é›…å…¸ã•ã‚“ (Masanori Kobayasi) @@ -256,8 +256,8 @@ Linux カーãƒãƒ«ã®é–‹ç™ºãƒ—ロセスã¯ç¾åœ¨å¹¾ã¤ã‹ã®ç•°ãªã‚‹ãƒ¡ã‚¤ãƒ³ - メイン㮠2.6.x カーãƒãƒ«ãƒ„リー - 2.6.x.y -stable カーãƒãƒ«ãƒ„リー - 2.6.x -git カーãƒãƒ«ãƒ‘ッム- - 2.6.x -mm カーãƒãƒ«ãƒ‘ッム- サブシステム毎ã®ã‚«ãƒ¼ãƒãƒ«ãƒ„リーã¨ãƒ‘ッム+ - çµ±åˆãƒ†ã‚¹ãƒˆã®ãŸã‚ã® 2.6.x -next カーãƒãƒ«ãƒ„リー 2.6.x カーãƒãƒ«ãƒ„リー ----------------- @@ -268,9 +268,9 @@ Linux カーãƒãƒ«ã®é–‹ç™ºãƒ—ロセスã¯ç¾åœ¨å¹¾ã¤ã‹ã®ç•°ãªã‚‹ãƒ¡ã‚¤ãƒ³ - æ–°ã—ã„カーãƒãƒ«ãŒãƒªãƒªãƒ¼ã‚¹ã•れãŸç›´å¾Œã«ã€2週間ã®ç‰¹åˆ¥æœŸé–“ãŒè¨­ã‘られ〠ã“ã®æœŸé–“中ã«ã€ãƒ¡ãƒ³ãƒ†ãƒŠé”㯠Linus ã«å¤§ããªå·®åˆ†ã‚’é€ã‚‹ã“ã¨ãŒã§ãã¾ã™ã€‚ - ã“ã®ã‚ˆã†ãªå·®åˆ†ã¯é€šå¸¸ -mm カーãƒãƒ«ã«æ•°é€±é–“å«ã¾ã‚Œã¦ããŸãƒ‘ッãƒã§ã™ã€‚ + ã“ã®ã‚ˆã†ãªå·®åˆ†ã¯é€šå¸¸ -next カーãƒãƒ«ã«æ•°é€±é–“å«ã¾ã‚Œã¦ããŸãƒ‘ッãƒã§ã™ã€‚ 大ããªå¤‰æ›´ã¯ git(カーãƒãƒ«ã®ã‚½ãƒ¼ã‚¹ç®¡ç†ãƒ„ールã€è©³ç´°ã¯ - http://git.or.cz/ å‚ç…§) を使ã£ã¦é€ã‚‹ã®ãŒå¥½ã¾ã—ã„やり方ã§ã™ãŒã€ãƒ‘ッ + http://git-scm.com/ å‚ç…§) を使ã£ã¦é€ã‚‹ã®ãŒå¥½ã¾ã—ã„やり方ã§ã™ãŒã€ãƒ‘ッ ãƒãƒ•ァイルã®å½¢å¼ã®ã¾ã¾é€ã‚‹ã®ã§ã‚‚å分ã§ã™ã€‚ - 2週間後ã€-rc1 カーãƒãƒ«ãŒãƒªãƒªãƒ¼ã‚¹ã•れã€ã“ã®å¾Œã«ã¯ã‚«ãƒ¼ãƒãƒ«å…¨ä½“ã®å®‰å®š @@ -333,86 +333,44 @@ git リãƒã‚¸ãƒˆãƒªã§ç®¡ç†ã•れã¦ã„ã‚‹Linus ã®ã‚«ãƒ¼ãƒãƒ«ãƒ„ãƒªãƒ¼ã®æ¯Ž れ㯠-rc カーãƒãƒ«ã¨æ¯”ã¹ã¦ã€ãƒ‘ッãƒãŒå¤§ä¸ˆå¤«ã‹ã©ã†ã‹ã‚‚確èªã—ãªã„ã§è‡ªå‹•çš„ ã«ç”Ÿæˆã•れるã®ã§ã€ã‚ˆã‚Šå®Ÿé¨“çš„ã§ã™ã€‚ -2.6.x -mm カーãƒãƒ«ãƒ‘ッム------------------------- - -Andrew Morton ã«ã‚ˆã£ã¦ãƒªãƒªãƒ¼ã‚¹ã•れる実験的ãªã‚«ãƒ¼ãƒãƒ«ãƒ‘ッãƒç¾¤ã§ã™ã€‚ -Andrew ã¯å€‹åˆ¥ã®ã‚µãƒ–システムカーãƒãƒ«ãƒ„リーã¨ãƒ‘ッãƒã‚’å…¨ã¦é›†ã‚ã¦ã㦠-linux-kernel メーリングリストã§åŽé›†ã•れãŸå¤šæ•°ã®ãƒ‘ッãƒã¨åŒæ™‚ã«ä¸€ã¤ã«ã¾ -ã¨ã‚ã¾ã™ã€‚ -ã“ã®ãƒ„ãƒªãƒ¼ã¯æ–°æ©Ÿèƒ½ã¨ãƒ‘ッãƒãŒæ¤œè¨¼ã•れる場ã¨ãªã‚Šã¾ã™ã€‚ã‚る期間ã®é–“パッム-㌠-mm ã«å…¥ã£ã¦ä¾¡å€¤ã‚’証明ã•れãŸã‚‰ã€Andrew やサブシステムメンテナãŒã€ -メインラインã¸å…¥ã‚Œã‚‹ã‚ˆã†ã« Linus ã«ãƒ—ッシュã—ã¾ã™ã€‚ - -メインカーãƒãƒ«ãƒ„リーã«å«ã‚ã‚‹ãŸã‚ã« Linus ã«é€ã‚‹å‰ã«ã€ã™ã¹ã¦ã®æ–°ã—ã„パッ -ãƒãŒ -mm ツリーã§ãƒ†ã‚¹ãƒˆã•れるã“ã¨ãŒå¼·ã推奨ã•れã¦ã„ã¾ã™ã€‚マージウィン -ドウãŒé–‹ãå‰ã« -mm ツリーã«ç¾ã‚Œãªã‹ã£ãŸãƒ‘ッãƒã¯ãƒ¡ã‚¤ãƒ³ãƒ©ã‚¤ãƒ³ã«ãƒžãƒ¼ã‚¸ã• -れるã“ã¨ã¯å›°é›£ã«ãªã‚Šã¾ã™ã€‚ - -ã“れらã®ã‚«ãƒ¼ãƒãƒ«ã¯å®‰å®šã—ã¦å‹•作ã™ã¹ãシステムã¨ã—ã¦ä½¿ã†ã®ã«ã¯é©åˆ‡ã§ã¯ã‚ -りã¾ã›ã‚“ã—ã€ã‚«ãƒ¼ãƒãƒ«ãƒ–ランãƒã®ä¸­ã§ã‚‚ã‚‚ã£ã¨ã‚‚動作ã«ãƒªã‚¹ã‚¯ãŒé«˜ã„ã‚‚ã®ã§ã™ã€‚ - -ã‚‚ã—ã‚ãªãŸãŒã€ã‚«ãƒ¼ãƒãƒ«é–‹ç™ºãƒ—ãƒ­ã‚»ã‚¹ã®æ”¯æ´ã‚’ã—ãŸã„ã¨æ€ã£ã¦ã„ã‚‹ã®ã§ã‚れã°ã€ -ã©ã†ãžã“れらã®ã‚«ãƒ¼ãƒãƒ«ãƒªãƒªãƒ¼ã‚¹ã‚’テストã«ä½¿ã£ã¦ã¿ã¦ã€ãã—ã¦ã‚‚ã—å•題ãŒã‚ -れã°ã€ã¾ãŸã‚‚ã—å…¨ã¦ãŒæ­£ã—ã動作ã—ãŸã¨ã—ã¦ã‚‚ã€linux-kernel メーリングリ -ストã«ãƒ•ィードãƒãƒƒã‚¯ã‚’æä¾›ã—ã¦ãã ã•ã„。 - -ã™ã¹ã¦ã®ä»–ã®å®Ÿé¨“的パッãƒã«åŠ ãˆã¦ã€ã“れらã®ã‚«ãƒ¼ãƒãƒ«ã¯é€šå¸¸ãƒªãƒªãƒ¼ã‚¹æ™‚点㧠-メインライン㮠-git カーãƒãƒ«ã«å«ã¾ã‚Œã‚‹å…¨ã¦ã®å¤‰æ›´ã‚‚å«ã‚“ã§ã„ã¾ã™ã€‚ - --mm カーãƒãƒ«ã¯æ±ºã¾ã£ãŸã‚¹ã‚±ã‚¸ãƒ¥ãƒ¼ãƒ«ã§ã¯ãƒªãƒªãƒ¼ã‚¹ã•れã¾ã›ã‚“ã€ã—ã‹ã—通常幾 -ã¤ã‹ã® -mm カーãƒãƒ« (1 ã‹ã‚‰ 3 ãŒæ™®é€šï¼‰ãŒå„-rc カーãƒãƒ«ã®é–“ã«ãƒªãƒªãƒ¼ã‚¹ã• -れã¾ã™ã€‚ - サブシステム毎ã®ã‚«ãƒ¼ãƒãƒ«ãƒ„リーã¨ãƒ‘ッム------------------------------------------- -カーãƒãƒ«ã®æ§˜ã€…ãªé ˜åŸŸã§ä½•ãŒèµ·ãã¦ã„ã‚‹ã‹ã‚’見られるよã†ã«ã™ã‚‹ãŸã‚ã€å¤šãã® -カーãƒãƒ«ã‚µãƒ–システム開発者ã¯å½¼ã‚‰ã®é–‹ç™ºãƒ„リーを公開ã—ã¦ã„ã¾ã™ã€‚ã“れら㮠-ツリーã¯èª¬æ˜Žã—ãŸã‚ˆã†ã« -mm カーãƒãƒ«ãƒªãƒªãƒ¼ã‚¹ã«å…¥ã‚Œè¾¼ã¾ã‚Œã¾ã™ã€‚ - -以下ã¯ã•ã¾ã–ã¾ãªã‚«ãƒ¼ãƒãƒ«ãƒ„リーã®ä¸­ã®ã„ãã¤ã‹ã®ãƒªã‚¹ãƒˆ- - - git ツリー- - - Kbuild ã®é–‹ç™ºãƒ„リーã€Sam Ravnborg - git.kernel.org:/pub/scm/linux/kernel/git/sam/kbuild.git - - - ACPI ã®é–‹ç™ºãƒ„リー〠Len Brown - git.kernel.org:/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6.git - - - Block ã®é–‹ç™ºãƒ„リーã€Jens Axboe - git.kernel.org:/pub/scm/linux/kernel/git/axboe/linux-2.6-block.git - - - DRM ã®é–‹ç™ºãƒ„リーã€Dave Airlie - git.kernel.org:/pub/scm/linux/kernel/git/airlied/drm-2.6.git - - - ia64 ã®é–‹ç™ºãƒ„リーã€Tony Luck - git.kernel.org:/pub/scm/linux/kernel/git/aegl/linux-2.6.git - - - infiniband, Roland Dreier - git.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband.git - - - libata, Jeff Garzik - git.kernel.org:/pub/scm/linux/kernel/git/jgarzik/libata-dev.git - - - ãƒãƒƒãƒˆãƒ¯ãƒ¼ã‚¯ãƒ‰ãƒ©ã‚¤ãƒ, Jeff Garzik - git.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6.git - - - pcmcia, Dominik Brodowski - git.kernel.org:/pub/scm/linux/kernel/git/brodo/pcmcia-2.6.git - - - SCSI, James Bottomley - git.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6.git - - - x86, Ingo Molnar - git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86.git - - quilt ツリー- - - USB, ドライãƒã‚³ã‚¢ã¨ I2C, Greg Kroah-Hartman - kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/ +ãれãžã‚Œã®ã‚«ãƒ¼ãƒãƒ«ã‚µãƒ–システムã®ãƒ¡ãƒ³ãƒ†ãƒŠé”㯠--- ãã—ã¦å¤šãã®ã‚«ãƒ¼ãƒãƒ« +サブシステムã®é–‹ç™ºè€…é”ã‚‚ --- å„è‡ªã®æœ€æ–°ã®é–‹ç™ºçжæ³ã‚’ソースリãƒã‚¸ãƒˆãƒªã« +公開ã—ã¦ã„ã¾ã™ã€‚ãã®ãŸã‚ã€è‡ªåˆ†ã¨ã¯ç•°ãªã‚‹é ˜åŸŸã®ã‚«ãƒ¼ãƒãƒ«ã§ä½•ãŒèµ·ãã¦ã„ã‚‹ +ã‹ã‚’ä»–ã®äººãŒè¦‹ã‚‰ã‚Œã‚‹ã‚ˆã†ã«ãªã£ã¦ã„ã¾ã™ã€‚é–‹ç™ºãŒæ—©ã進んã§ã„る領域ã§ã¯ã€ +開発者ã¯è‡ªèº«ã®æŠ•稿ãŒã©ã®ã‚µãƒ–システムカーãƒãƒ«ãƒ„リーを元ã«ã—ã¦ã„ã‚‹ã‹è³ªå• +ã•れるã®ã§ã€ãã®æŠ•ç¨¿ã¨ã™ã§ã«é€²è¡Œä¸­ã®ä»–ã®ä½œæ¥­ã¨ã®è¡çªãŒé¿ã‘られã¾ã™ã€‚ + +大部分ã®ã“れらã®ãƒªãƒã‚¸ãƒˆãƒªã¯ git ツリーã§ã™ã€‚ã—ã‹ã—ãã®ä»–ã® SCM ã‚„ +quilt シリーズã¨ã—ã¦å…¬é–‹ã•れã¦ã„るパッãƒã‚­ãƒ¥ãƒ¼ã‚‚使ã‚れã¦ã„ã¾ã™ã€‚ã“れら +ã®ã‚µãƒ–システムリãƒã‚¸ãƒˆãƒªã®ã‚¢ãƒ‰ãƒ¬ã‚¹ã¯ MAINTAINERS ファイルã«ãƒªã‚¹ãƒˆã•れ +ã¦ã„ã¾ã™ã€‚ã“れらã®å¤šã㯠http://git.kernel.org/ ã§å‚ç…§ã™ã‚‹ã“ã¨ãŒã§ãã¾ +ã™ã€‚ - ãã®ä»–ã®ã‚«ãƒ¼ãƒãƒ«ãƒ„リー㯠http://git.kernel.org/ 㨠MAINTAINERS ファ - イルã«ä¸€è¦§è¡¨ãŒã‚りã¾ã™ã€‚ +ææ¡ˆã•れãŸãƒ‘ッãƒãŒã“ã®ã‚ˆã†ãªã‚µãƒ–システムツリーã«ã‚³ãƒŸãƒƒãƒˆã•れるå‰ã«ã€ãƒ¡ãƒ¼ +リングリストã§äº‹å‰ã«ãƒ¬ãƒ“ューã«ã‹ã‘られã¾ã™ï¼ˆä»¥ä¸‹ã®å¯¾å¿œã™ã‚‹ã‚»ã‚¯ã‚·ãƒ§ãƒ³ã‚’ +å‚照)。ã„ãã¤ã‹ã®ã‚«ãƒ¼ãƒãƒ«ã‚µãƒ–システムã§ã¯ã€ã“ã®ãƒ¬ãƒ“ュー㯠patchwork +ã¨ã„ã†ãƒ„ールã«ã‚ˆã£ã¦è¿½è·¡ã•れã¾ã™ã€‚Patchwork 㯠web インターフェイス㫠+よã£ã¦ãƒ‘ãƒƒãƒæŠ•ç¨¿ã®è¡¨ç¤ºã€ãƒ‘ッãƒã¸ã®ã‚³ãƒ¡ãƒ³ãƒˆä»˜ã‘や改訂ãªã©ãŒã§ãã€ãã—㦠+メンテナã¯ãƒ‘ッãƒã«å¯¾ã—ã¦ã€ãƒ¬ãƒ“ュー中ã€å—付済ã¿ã€æ‹’å¦ã¨ã„ã†ã‚ˆã†ãªãƒžãƒ¼ã‚¯ +ã‚’ã¤ã‘ã‚‹ã“ã¨ãŒã§ãã¾ã™ã€‚大部分ã®ã“れら㮠patchwork ã®ã‚µã‚¤ãƒˆã¯ +http://patchwork.kernel.org/ ã§ãƒªã‚¹ãƒˆã•れã¦ã„ã¾ã™ã€‚ + +çµ±åˆãƒ†ã‚¹ãƒˆã®ãŸã‚ã® 2.6.x -next カーãƒãƒ«ãƒ„リー +--------------------------------------------- + +ã‚µãƒ–ã‚·ã‚¹ãƒ†ãƒ ãƒ„ãƒªãƒ¼ã®æ›´æ–°å†…容ãŒãƒ¡ã‚¤ãƒ³ãƒ©ã‚¤ãƒ³ã® 2.6.x ツリーã«ãƒžãƒ¼ã‚¸ã•れ +ã‚‹å‰ã«ã€ãれらã¯çµ±åˆãƒ†ã‚¹ãƒˆã•れる必è¦ãŒã‚りã¾ã™ã€‚ã“ã®ç›®çš„ã®ãŸã‚ã€å®Ÿè³ªçš„ +ã«å…¨ã‚µãƒ–システムツリーã‹ã‚‰ã»ã¼æ¯Žæ—¥ãƒ—ルã•れã¦ã§ãる特別ãªãƒ†ã‚¹ãƒˆç”¨ã®ãƒª +ãƒã‚¸ãƒˆãƒªãŒå­˜åœ¨ã—ã¾ã™- + http://git.kernel.org/?p=linux/kernel/git/sfr/linux-next.git + http://linux.f-seidel.de/linux-next/pmwiki/ + +ã“ã®ã‚„り方ã«ã‚ˆã£ã¦ã€-next カーãƒãƒ«ã¯æ¬¡ã®ãƒžãƒ¼ã‚¸æ©Ÿä¼šã§ã©ã‚“ãªã‚‚ã®ãŒãƒ¡ã‚¤ãƒ³ +ラインカーãƒãƒ«ã«ãƒžãƒ¼ã‚¸ã•れるã‹ã€ãŠãŠã¾ã‹ãªã®å±•望をæä¾›ã—ã¾ã™ã€‚-next +カーãƒãƒ«ã®å®Ÿè¡Œãƒ†ã‚¹ãƒˆã‚’行ã†å†’険好ããªãƒ†ã‚¹ã‚¿ãƒ¼ã¯å¤§ã„ã«æ­“迎ã•れã¾ã™ ãƒã‚°ãƒ¬ãƒãƒ¼ãƒˆ ------------- @@ -673,10 +631,9 @@ Linux カーãƒãƒ«ã‚³ãƒŸãƒ¥ãƒ‹ãƒ†ã‚£ã¯ã€ä¸€åº¦ã«å¤§é‡ã®ã‚³ãƒ¼ãƒ‰ã®å¡Šã‚’ ã˜ã¨ã“ã‚ã‹ã‚‰ã‚¹ã‚¿ãƒ¼ãƒˆã—ãŸã®ã§ã™ã‹ã‚‰ã€‚ Paolo Ciarrocchi ã«æ„Ÿè¬ã€å½¼ã¯å½¼ã®æ›¸ã„㟠"Development Process" -(http://linux.tar.bz/articles/2.6-development_process)セクショ -ンをã“ã®ãƒ†ã‚­ã‚¹ãƒˆã®åŽŸåž‹ã«ã™ã‚‹ã“ã¨ã‚’許å¯ã—ã¦ãれã¾ã—ãŸã€‚ -Rundy Dunlap 㨠Gerrit Huizenga ã¯ãƒ¡ãƒ¼ãƒªãƒ³ã‚°ãƒªã‚¹ãƒˆã§ã‚„ã‚‹ã¹ãã“ã¨ã¨ã‚„㣠-ã¦ã¯ã„ã‘ãªã„ã“ã¨ã®ãƒªã‚¹ãƒˆã‚’æä¾›ã—ã¦ãれã¾ã—ãŸã€‚ +(http://lwn.net/Articles/94386/) セクションをã“ã®ãƒ†ã‚­ã‚¹ãƒˆã®åŽŸåž‹ã«ã™ã‚‹ +ã“ã¨ã‚’許å¯ã—ã¦ãれã¾ã—ãŸã€‚Rundy Dunlap 㨠Gerrit Huizenga ã¯ãƒ¡ãƒ¼ãƒªãƒ³ã‚° +リストã§ã‚„ã‚‹ã¹ãã“ã¨ã¨ã‚„ã£ã¦ã¯ã„ã‘ãªã„ã“ã¨ã®ãƒªã‚¹ãƒˆã‚’æä¾›ã—ã¦ãれã¾ã—ãŸã€‚ 以下ã®äººã€…ã®ãƒ¬ãƒ“ューã€ã‚³ãƒ¡ãƒ³ãƒˆã€è²¢çŒ®ã«æ„Ÿè¬ã€‚ Pat Mochel, Hanna Linder, Randy Dunlap, Kay Sievers, Vojtech Pavlik, Jan Kara, Josh Boyer, Kees Cook, Andrew Morton, Andi diff --git a/trunk/Documentation/kernel-parameters.txt b/trunk/Documentation/kernel-parameters.txt index cc85a9278190..c603ef7b0568 100644 --- a/trunk/Documentation/kernel-parameters.txt +++ b/trunk/Documentation/kernel-parameters.txt @@ -245,7 +245,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. acpi_sleep= [HW,ACPI] Sleep options Format: { s3_bios, s3_mode, s3_beep, s4_nohwsig, - old_ordering, s4_nonvs, sci_force_enable } + old_ordering, nonvs, sci_force_enable } See Documentation/power/video.txt for information on s3_bios and s3_mode. s3_beep is for debugging; it makes the PC's speaker beep @@ -1664,6 +1664,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted. noexec=on: enable non-executable mappings (default) noexec=off: disable non-executable mappings + nosmep [X86] + Disable SMEP (Supervisor Mode Execution Protection) + even if it is supported by processor. + noexec32 [X86-64] This affects only 32-bit executables. noexec32=on: enable non-executable mappings (default) diff --git a/trunk/Documentation/md.txt b/trunk/Documentation/md.txt index a81c7b4790f2..2366b1c8cf19 100644 --- a/trunk/Documentation/md.txt +++ b/trunk/Documentation/md.txt @@ -552,6 +552,16 @@ also have within the array where IO will be blocked. This is currently only supported for raid4/5/6. + sync_min + sync_max + The two values, given as numbers of sectors, indicate a range + withing the array where 'check'/'repair' will operate. Must be + a multiple of chunk_size. When it reaches "sync_max" it will + pause, rather than complete. + You can use 'select' or 'poll' on "sync_completed" to wait for + that number to reach sync_max. Then you can either increase + "sync_max", or can write 'idle' to "sync_action". + Each active md device may also have attributes specific to the personality module that manages it. diff --git a/trunk/Documentation/power/devices.txt b/trunk/Documentation/power/devices.txt index 1971bcf48a60..88880839ece4 100644 --- a/trunk/Documentation/power/devices.txt +++ b/trunk/Documentation/power/devices.txt @@ -279,11 +279,15 @@ When the system goes into the standby or memory sleep state, the phases are: time.) Unlike the other suspend-related phases, during the prepare phase the device tree is traversed top-down. - The prepare phase uses only a bus callback. After the callback method - returns, no new children may be registered below the device. The method - may also prepare the device or driver in some way for the upcoming - system power transition, but it should not put the device into a - low-power state. + In addition to that, if device drivers need to allocate additional + memory to be able to hadle device suspend correctly, that should be + done in the prepare phase. + + After the prepare callback method returns, no new children may be + registered below the device. The method may also prepare the device or + driver in some way for the upcoming system power transition (for + example, by allocating additional memory required for this purpose), but + it should not put the device into a low-power state. 2. The suspend methods should quiesce the device to stop it from performing I/O. They also may save the device registers and put it into the diff --git a/trunk/Documentation/power/notifiers.txt b/trunk/Documentation/power/notifiers.txt index cf980709122a..c2a4a346c0d9 100644 --- a/trunk/Documentation/power/notifiers.txt +++ b/trunk/Documentation/power/notifiers.txt @@ -1,46 +1,41 @@ Suspend notifiers - (C) 2007 Rafael J. Wysocki , GPL - -There are some operations that device drivers may want to carry out in their -.suspend() routines, but shouldn't, because they can cause the hibernation or -suspend to fail. For example, a driver may want to allocate a substantial amount -of memory (like 50 MB) in .suspend(), but that shouldn't be done after the -swsusp's memory shrinker has run. - -Also, there may be some operations, that subsystems want to carry out before a -hibernation/suspend or after a restore/resume, requiring the system to be fully -functional, so the drivers' .suspend() and .resume() routines are not suitable -for this purpose. For example, device drivers may want to upload firmware to -their devices after a restore from a hibernation image, but they cannot do it by -calling request_firmware() from their .resume() routines (user land processes -are frozen at this point). The solution may be to load the firmware into -memory before processes are frozen and upload it from there in the .resume() -routine. Of course, a hibernation notifier may be used for this purpose. - -The subsystems that have such needs can register suspend notifiers that will be -called upon the following events by the suspend core: + (C) 2007-2011 Rafael J. Wysocki , GPL + +There are some operations that subsystems or drivers may want to carry out +before hibernation/suspend or after restore/resume, but they require the system +to be fully functional, so the drivers' and subsystems' .suspend() and .resume() +or even .prepare() and .complete() callbacks are not suitable for this purpose. +For example, device drivers may want to upload firmware to their devices after +resume/restore, but they cannot do it by calling request_firmware() from their +.resume() or .complete() routines (user land processes are frozen at these +points). The solution may be to load the firmware into memory before processes +are frozen and upload it from there in the .resume() routine. +A suspend/hibernation notifier may be used for this purpose. + +The subsystems or drivers having such needs can register suspend notifiers that +will be called upon the following events by the PM core: PM_HIBERNATION_PREPARE The system is going to hibernate or suspend, tasks will be frozen immediately. PM_POST_HIBERNATION The system memory state has been restored from a - hibernation image or an error occurred during the - hibernation. Device drivers' .resume() callbacks have + hibernation image or an error occurred during + hibernation. Device drivers' restore callbacks have been executed and tasks have been thawed. PM_RESTORE_PREPARE The system is going to restore a hibernation image. - If all goes well the restored kernel will issue a + If all goes well, the restored kernel will issue a PM_POST_HIBERNATION notification. -PM_POST_RESTORE An error occurred during the hibernation restore. - Device drivers' .resume() callbacks have been executed +PM_POST_RESTORE An error occurred during restore from hibernation. + Device drivers' restore callbacks have been executed and tasks have been thawed. -PM_SUSPEND_PREPARE The system is preparing for a suspend. +PM_SUSPEND_PREPARE The system is preparing for suspend. PM_POST_SUSPEND The system has just resumed or an error occurred during - the suspend. Device drivers' .resume() callbacks have - been executed and tasks have been thawed. + suspend. Device drivers' resume callbacks have been + executed and tasks have been thawed. It is generally assumed that whatever the notifiers do for PM_HIBERNATION_PREPARE, should be undone for PM_POST_HIBERNATION. Analogously, diff --git a/trunk/Documentation/scsi/LICENSE.qla2xxx b/trunk/Documentation/scsi/LICENSE.qla2xxx index 9e15b4f9cd28..19e7cd4bba66 100644 --- a/trunk/Documentation/scsi/LICENSE.qla2xxx +++ b/trunk/Documentation/scsi/LICENSE.qla2xxx @@ -1,11 +1,11 @@ -Copyright (c) 2003-2005 QLogic Corporation -QLogic Linux Fibre Channel HBA Driver +Copyright (c) 2003-2011 QLogic Corporation +QLogic Linux/ESX Fibre Channel HBA Driver -This program includes a device driver for Linux 2.6 that may be +This program includes a device driver for Linux 2.6/ESX that may be distributed with QLogic hardware specific firmware binary file. You may modify and redistribute the device driver code under the -GNU General Public License as published by the Free Software -Foundation (version 2 or a later version). +GNU General Public License (a copy of which is attached hereto as +Exhibit A) published by the Free Software Foundation (version 2). You may redistribute the hardware specific firmware binary file under the following terms: @@ -43,3 +43,285 @@ OTHERWISE IN ANY INTELLECTUAL PROPERTY RIGHTS (PATENT, COPYRIGHT, TRADE SECRET, MASK WORK, OR OTHER PROPRIETARY RIGHT) EMBODIED IN ANY OTHER QLOGIC HARDWARE OR SOFTWARE EITHER SOLELY OR IN COMBINATION WITH THIS PROGRAM. + + +EXHIBIT A + + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc. + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Lesser General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. diff --git a/trunk/Documentation/sound/alsa/SB-Live-mixer.txt b/trunk/Documentation/sound/alsa/SB-Live-mixer.txt index f5639d40521d..f4b5988f450c 100644 --- a/trunk/Documentation/sound/alsa/SB-Live-mixer.txt +++ b/trunk/Documentation/sound/alsa/SB-Live-mixer.txt @@ -87,14 +87,14 @@ accumulator. ALSA uses accumulators 0 and 1 for left and right PCM. The result is forwarded to the ADC capture FIFO (thus to the standard capture PCM device). -name='Music Playback Volume',index=0 +name='Synth Playback Volume',index=0 This control is used to attenuate samples for left and right MIDI FX-bus accumulators. ALSA uses accumulators 4 and 5 for left and right MIDI samples. The result samples are forwarded to the front DAC PCM slots of the AC97 codec. -name='Music Capture Volume',index=0 -name='Music Capture Switch',index=0 +name='Synth Capture Volume',index=0 +name='Synth Capture Switch',index=0 These controls are used to attenuate samples for left and right MIDI FX-bus accumulator. ALSA uses accumulators 4 and 5 for left and right PCM. diff --git a/trunk/Documentation/trace/kprobetrace.txt b/trunk/Documentation/trace/kprobetrace.txt index 6d27ab8d6e9f..c83bd6b4e6e8 100644 --- a/trunk/Documentation/trace/kprobetrace.txt +++ b/trunk/Documentation/trace/kprobetrace.txt @@ -120,7 +120,6 @@ format: field:unsigned char common_flags; offset:2; size:1; signed:0; field:unsigned char common_preempt_count; offset:3; size:1;signed:0; field:int common_pid; offset:4; size:4; signed:1; - field:int common_lock_depth; offset:8; size:4; signed:1; field:unsigned long __probe_ip; offset:12; size:4; signed:0; field:int __probe_nargs; offset:16; size:4; signed:1; diff --git a/trunk/Documentation/video4linux/sh_mobile_ceu_camera.txt b/trunk/Documentation/video4linux/sh_mobile_ceu_camera.txt index cb47e723af74..1e96ce6e2d2f 100644 --- a/trunk/Documentation/video4linux/sh_mobile_ceu_camera.txt +++ b/trunk/Documentation/video4linux/sh_mobile_ceu_camera.txt @@ -37,7 +37,7 @@ Generic scaling / cropping scheme -1'- In the above chart minuses and slashes represent "real" data amounts, points and -accents represent "useful" data, basically, CEU scaled amd cropped output, +accents represent "useful" data, basically, CEU scaled and cropped output, mapped back onto the client's source plane. Such a configuration can be produced by user requests: @@ -65,7 +65,7 @@ Do not touch input rectangle - it is already optimal. 1. Calculate current sensor scales: - scale_s = ((3') - (3)) / ((2') - (2)) + scale_s = ((2') - (2)) / ((3') - (3)) 2. Calculate "effective" input crop (sensor subwindow) - CEU crop scaled back at current sensor scales onto input window - this is user S_CROP: @@ -80,7 +80,7 @@ window: 4. Calculate sensor output window by applying combined scales to real input window: - width_s_out = ((2') - (2)) / scale_comb + width_s_out = ((7') - (7)) = ((2') - (2)) / scale_comb 5. Apply iterative sensor S_FMT for sensor output window. diff --git a/trunk/Documentation/virtual/00-INDEX b/trunk/Documentation/virtual/00-INDEX new file mode 100644 index 000000000000..fe0251c4cfb7 --- /dev/null +++ b/trunk/Documentation/virtual/00-INDEX @@ -0,0 +1,10 @@ +Virtualization support in the Linux kernel. + +00-INDEX + - this file. +kvm/ + - Kernel Virtual Machine. See also http://linux-kvm.org +lguest/ + - Extremely simple hypervisor for experimental/educational use. +uml/ + - User Mode Linux, builds/runs Linux kernel as a userspace program. diff --git a/trunk/Documentation/kvm/api.txt b/trunk/Documentation/virtual/kvm/api.txt similarity index 100% rename from trunk/Documentation/kvm/api.txt rename to trunk/Documentation/virtual/kvm/api.txt diff --git a/trunk/Documentation/kvm/cpuid.txt b/trunk/Documentation/virtual/kvm/cpuid.txt similarity index 100% rename from trunk/Documentation/kvm/cpuid.txt rename to trunk/Documentation/virtual/kvm/cpuid.txt diff --git a/trunk/Documentation/kvm/locking.txt b/trunk/Documentation/virtual/kvm/locking.txt similarity index 100% rename from trunk/Documentation/kvm/locking.txt rename to trunk/Documentation/virtual/kvm/locking.txt diff --git a/trunk/Documentation/kvm/mmu.txt b/trunk/Documentation/virtual/kvm/mmu.txt similarity index 100% rename from trunk/Documentation/kvm/mmu.txt rename to trunk/Documentation/virtual/kvm/mmu.txt diff --git a/trunk/Documentation/kvm/msr.txt b/trunk/Documentation/virtual/kvm/msr.txt similarity index 100% rename from trunk/Documentation/kvm/msr.txt rename to trunk/Documentation/virtual/kvm/msr.txt diff --git a/trunk/Documentation/kvm/ppc-pv.txt b/trunk/Documentation/virtual/kvm/ppc-pv.txt similarity index 100% rename from trunk/Documentation/kvm/ppc-pv.txt rename to trunk/Documentation/virtual/kvm/ppc-pv.txt diff --git a/trunk/Documentation/kvm/review-checklist.txt b/trunk/Documentation/virtual/kvm/review-checklist.txt similarity index 95% rename from trunk/Documentation/kvm/review-checklist.txt rename to trunk/Documentation/virtual/kvm/review-checklist.txt index 730475ae1b8d..a850986ed684 100644 --- a/trunk/Documentation/kvm/review-checklist.txt +++ b/trunk/Documentation/virtual/kvm/review-checklist.txt @@ -7,7 +7,7 @@ Review checklist for kvm patches 2. Patches should be against kvm.git master branch. 3. If the patch introduces or modifies a new userspace API: - - the API must be documented in Documentation/kvm/api.txt + - the API must be documented in Documentation/virtual/kvm/api.txt - the API must be discoverable using KVM_CHECK_EXTENSION 4. New state must include support for save/restore. diff --git a/trunk/Documentation/kvm/timekeeping.txt b/trunk/Documentation/virtual/kvm/timekeeping.txt similarity index 100% rename from trunk/Documentation/kvm/timekeeping.txt rename to trunk/Documentation/virtual/kvm/timekeeping.txt diff --git a/trunk/Documentation/lguest/.gitignore b/trunk/Documentation/virtual/lguest/.gitignore similarity index 100% rename from trunk/Documentation/lguest/.gitignore rename to trunk/Documentation/virtual/lguest/.gitignore diff --git a/trunk/Documentation/lguest/Makefile b/trunk/Documentation/virtual/lguest/Makefile similarity index 100% rename from trunk/Documentation/lguest/Makefile rename to trunk/Documentation/virtual/lguest/Makefile diff --git a/trunk/Documentation/lguest/extract b/trunk/Documentation/virtual/lguest/extract similarity index 100% rename from trunk/Documentation/lguest/extract rename to trunk/Documentation/virtual/lguest/extract diff --git a/trunk/Documentation/lguest/lguest.c b/trunk/Documentation/virtual/lguest/lguest.c similarity index 100% rename from trunk/Documentation/lguest/lguest.c rename to trunk/Documentation/virtual/lguest/lguest.c diff --git a/trunk/Documentation/lguest/lguest.txt b/trunk/Documentation/virtual/lguest/lguest.txt similarity index 97% rename from trunk/Documentation/lguest/lguest.txt rename to trunk/Documentation/virtual/lguest/lguest.txt index dad99978a6a8..bff0c554485d 100644 --- a/trunk/Documentation/lguest/lguest.txt +++ b/trunk/Documentation/virtual/lguest/lguest.txt @@ -74,7 +74,8 @@ Running Lguest: - Run an lguest as root: - Documentation/lguest/lguest 64 vmlinux --tunnet=192.168.19.1 --block=rootfile root=/dev/vda + Documentation/virtual/lguest/lguest 64 vmlinux --tunnet=192.168.19.1 \ + --block=rootfile root=/dev/vda Explanation: 64: the amount of memory to use, in MB. diff --git a/trunk/Documentation/uml/UserModeLinux-HOWTO.txt b/trunk/Documentation/virtual/uml/UserModeLinux-HOWTO.txt similarity index 100% rename from trunk/Documentation/uml/UserModeLinux-HOWTO.txt rename to trunk/Documentation/virtual/uml/UserModeLinux-HOWTO.txt diff --git a/trunk/Documentation/workqueue.txt b/trunk/Documentation/workqueue.txt index 01c513fac40e..a0b577de918f 100644 --- a/trunk/Documentation/workqueue.txt +++ b/trunk/Documentation/workqueue.txt @@ -12,6 +12,7 @@ CONTENTS 4. Application Programming Interface (API) 5. Example Execution Scenarios 6. Guidelines +7. Debugging 1. Introduction @@ -379,3 +380,42 @@ If q1 has WQ_CPU_INTENSIVE set, * Unless work items are expected to consume a huge amount of CPU cycles, using a bound wq is usually beneficial due to the increased level of locality in wq operations and work item execution. + + +7. Debugging + +Because the work functions are executed by generic worker threads +there are a few tricks needed to shed some light on misbehaving +workqueue users. + +Worker threads show up in the process list as: + +root 5671 0.0 0.0 0 0 ? S 12:07 0:00 [kworker/0:1] +root 5672 0.0 0.0 0 0 ? S 12:07 0:00 [kworker/1:2] +root 5673 0.0 0.0 0 0 ? S 12:12 0:00 [kworker/0:0] +root 5674 0.0 0.0 0 0 ? S 12:13 0:00 [kworker/1:0] + +If kworkers are going crazy (using too much cpu), there are two types +of possible problems: + + 1. Something beeing scheduled in rapid succession + 2. A single work item that consumes lots of cpu cycles + +The first one can be tracked using tracing: + + $ echo workqueue:workqueue_queue_work > /sys/kernel/debug/tracing/set_event + $ cat /sys/kernel/debug/tracing/trace_pipe > out.txt + (wait a few secs) + ^C + +If something is busy looping on work queueing, it would be dominating +the output and the offender can be determined with the work item +function. + +For the second type of problems it should be possible to just check +the stack trace of the offending worker thread. + + $ cat /proc/THE_OFFENDING_KWORKER/stack + +The work item's function should be trivially visible in the stack +trace. diff --git a/trunk/Documentation/x86/x86_64/boot-options.txt b/trunk/Documentation/x86/x86_64/boot-options.txt index 092e596a1301..c54b4f503e2a 100644 --- a/trunk/Documentation/x86/x86_64/boot-options.txt +++ b/trunk/Documentation/x86/x86_64/boot-options.txt @@ -206,7 +206,7 @@ IOMMU (input/output memory management unit) (e.g. because you have < 3 GB memory). Kernel boot message: "PCI-DMA: Disabling IOMMU" - 2. : AMD GART based hardware IOMMU. + 2. : AMD GART based hardware IOMMU. Kernel boot message: "PCI-DMA: using GART IOMMU" 3. : Software IOMMU implementation. Used diff --git a/trunk/Documentation/zh_CN/email-clients.txt b/trunk/Documentation/zh_CN/email-clients.txt new file mode 100644 index 000000000000..5d65e323d060 --- /dev/null +++ b/trunk/Documentation/zh_CN/email-clients.txt @@ -0,0 +1,210 @@ +锘?Chinese translated version of Documentation/email-clients.txt + +If you have any comment or update to the content, please contact the +original document maintainer directly. However, if you have a problem +communicating in English you can also ask the Chinese maintainer for +help. Contact the Chinese maintainer if this translation is outdated +or if there is a problem with the translation. + +Chinese maintainer: Harry Wei +--------------------------------------------------------------------- +Documentation/email-clients.txt ???æ¶“????缈æ˜?? + +æ¿¡??????å® ??ç’烘????å­˜?版???????????瀹癸??璇风?å­˜?ヨ??绯诲?????妗g??ç¼å­˜?よ?????æ¿¡????æµ£?浣跨?ㄨ?辨?? +浜ゆ???????ä¼´?剧??ç’‡?é”›?æ¶”????浠ュ??æ¶“???????ç¼å­˜?よ??å§¹???┿??æ¿¡???????缈æ˜????å­˜?é¢???????舵?????缈? +ç’‡?瀛???ã„©??棰?é”›?璇疯??绯讳腑??????ç¼å­˜?よ????? + +æ¶“???????ç¼å­˜?よ??é”›? ç’æƒ§??濞? Harry Wei +æ¶“???????缈æ˜?????é”›? ç’æƒ§??濞? Harry Wei +æ¶“?????????¤?????é”›? Yinglin Luan + Xiaochen Wang + yaxinsn + +浠ヤ??涓烘?f?? +--------------------------------------------------------------------- + +Linux???æµ è·º?㈡?风?????缃?淇℃?? +====================================================================== + +?????????缃? +---------------------------------------------------------------------- +Linux?????歌ˉ涓???????æ©????浠惰?????浜ょ??é”›????濂芥??ç›ãƒ¤??æµ£?æ¶“æ´ª??浠朵????????宓?????????????浜?ç¼å­˜?よ?? +??ユ?å •??浠讹??æµ£???????æµ å‰?????瀹规?ç…Ž??æ´?璇ユ??"text/plain"?????惰??é”›????浠朵????????æ¶“?ç’§???????é”›? +???涓鸿??æµ¼?浣胯ˉ涓????寮???ã„©?ã„¥????ㄨ??ç’鸿??绋?æ¶“???????寰???ä¼´?俱?? + +??ㄦ?ュ?????Linux?????歌ˉ涓???????æµ è·º?㈡?风????ã„¥?????ç›ãƒ¤????è·º??璇ュ??浜?????????????æ¿®???舵?????渚?æ¿¡?é”›? +æµ ?æµ ?æ¶“???芥?ç‘°?????????????ã‚…?惰〃绗???????绌烘?ç¡·???????虫????ㄦ??æ¶“?ç›????寮?æ¾¶å­˜?????ç¼?ç俱?? + +æ¶“?ç‘•????æ©?"format=flowed"妯″????????ç›ãƒ¤?????æ©???蜂??寮?璧蜂?????棰????浠ュ?????瀹崇?????ç›???? + +æ¶“?ç‘•?ç’â•€????????æµ è·º?㈡?风??æ©?ç›??????ㄦ?㈣?????æ©???蜂??æµ¼???æ‘??æµ£????ç›ãƒ¤????? + +???æµ è·º?㈡?风??æ¶“???芥?ç‘°???????????瀛?ç»—????缂??????ç‘°?????ç‘•??????????ç›ãƒ¤???????芥??ASCII??????UTF-8缂??????ç‘°??é”›? +æ¿¡????æµ£?浣跨??UTF-8缂??????ç‘°???????????浠讹????d??æµ£?ç?æµ¼???åž®??æ¶“?浜??????è—‰????????瀛?ç»—???????棰???? + +???æµ è·º?㈡?风??æ´?璇ュ舰???骞朵??æ·‡???? References: ?????? In-Reply-To: ???棰?é”›???d?? +???浠惰??棰?çå˜??æµ¼?æ¶“??????? + +æ¾¶???å‰??甯?(?????????ç’寸??甯?)???甯é•????界?ㄤ??ç›ãƒ¤??é”›????æ¶“å“„?惰〃绗?æµ¼?æž????涓虹┖??笺??浣跨??xclipboard, xclip +??????xcutselæ¶”?ç’稿??浠ワ??æµ£???????濂芥??ç’‡?æ¶“?æ¶“?????????åž®??浣跨?ã„¥????å‰??甯???? + +æ¶“?ç‘•???ㄤ娇???PGP/GPG缃æ’????????浠朵腑??????ç›ãƒ¤?????æ©???蜂??浣垮??寰?æ¾¶???????æ¶“???借?诲??????????ㄤ??æµ£????ç›ãƒ¤????? +é”›?æ©?æ¶“????棰?æ´?璇ユ?????浠ヤ慨澶????é”›? + +??ã„§???????æ??æµ è·º??ç›ã„¥?????ç›ãƒ¤??æ¶”????é”›?ç¼????宸åž?????æ¶“?æ¶“?ç›ãƒ¤?????æ¶“?æ¶“???????涓绘??é”›?æ·‡?瀛???ユ?è·º?扮?? +???浠讹??ç?ç›ãƒ¤?????'patch'??戒护???æ¶“?é”›?æ¿¡??????????浜?é”›????ç¼??????æ??æµ è·º??ç›ã„¥???????? + + +æ¶“?浜????æµ è·º?㈡?风?????绀? +---------------------------------------------------------------------- +æ©????ç¼???è½°??浜?ç’‡?ç¼????MUA???缃????绀猴?????浠ョ?ㄤ??ç¼?Linux?????稿?????ç›ãƒ¤?????æ©?浜?骞朵???????虫?? +?????????æž?æµ è·º?????缃???è¤????? + +璇存??é”›? +TUI = 浠ユ?????æ¶“å“„?虹???????ㄦ?锋?ュ?? +GUI = ??惧舰?????㈢?ㄦ?锋?ュ?? + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Alpine (TUI) + +???缃????椤癸?? +???"Sending Preferences"??ã„¥??é”›? + +- "Do Not Send Flowed Text"蹇?椤诲????? +- "Strip Whitespace Before Sending"蹇?椤诲?抽?? + +褰???????浠舵?讹????????æ´?璇ユ?惧?ㄨˉ涓?æµ¼???虹?扮????版?癸????è·º?????æ¶“?CTRL-Rç¼???????é”›?浣挎??瀹???? +ç›ãƒ¤?????æµ è·º????ュ?ä¼´??浠朵腑??? + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Evolution (GUI) + +æ¶“?浜?寮????????????????浣跨?ã„¥????????ç›ãƒ¤?? + +褰??????â•…??æµ å •??椤癸??Preformat + æµ ?Format->Heading->Preformatted (Ctrl-7)??????宸ュ?锋?? + +??è·º??浣跨??é”›? + Insert->Text File... (Alt-n x)?????ヨˉ涓????浠躲?? + +æµ£?æ©????æµ ?"diff -Nru old.c new.c | xclip"é”›???????Preformaté”›???è·º??浣跨?ㄤ腑??æ’®??æ©?ç›?ç»®?甯???? + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Kmail (GUI) + +æ¶“?浜?寮????????????????浣跨?ã„¥????????ç›ãƒ¤????? + +榛?ç’よ?剧疆涓?æ¶“?HTML??ç…Ž??????????????é”›?æ¶“?ç‘•??????ã„¥????? + +褰?æ¶”????æ¶“?ç????æµ å‰????è·º??é”›???ã„©??椤逛?????æ¶“?ç‘•??????â•„????ㄦ?㈣????????æ¶“????缂虹?ç‘°æ°¨???æµ£???ã„©??浠朵腑æˆ???ョ??浠讳???????? +??戒??æµ¼?çš??????ㄦ?㈣??é”›????å§ã‚„??蹇?椤诲?ã„¥?????ç›ãƒ¤??æ¶”?????????ㄦ?㈣????????ç» ?????????è§„??ç辨???????ㄨ????ㄦ?㈣????ヤ功??????浠讹?? +??è·º?????瀹?æ·‡?瀛?涓鸿??绋裤??æ¶“????æµ£???ㄨ??绋夸腑???娆℃??寮?瀹?é”›?瀹?宸茬????ã„©?ㄨ????ㄦ?㈣??浜?é”›???d??æµ£???????浠惰?界?舵病??? +?????â•„????ㄦ?㈣??é”›?æµ£????æ©?æ¶“?æµ¼?æ¾¶åž?诲凡???????????ㄦ?㈣????? + +??ã„©??æµ å‰??æ´????é”›??????ヨˉ涓?æ¶”????é”›???å¥??甯哥?ã„§??ç›ãƒ¤??瀹????ç»—?é”›?æ¶“?æ¶“?æ©?瀛????(---)??? + +??è·º?????"Message"????????$??é”›??????â•‚????ユ??浠讹????ョ????????æµ£????ç›ãƒ¤?????浠躲??æ©????æ¶“?æ¶“?棰?æ¾¶???????椤癸??æµ£????æµ ? +???æ©?瀹????缃?æµ£???????浠跺缓绔?宸ュ?锋????????é”›?æ©????浠ュ甫涓?"insert file"??炬????? + +æµ£????浠ュ????ã„¥?ä¼´??æ©?GPG???ç’ä¼´??浠讹??æµ£???????宓?ç›ãƒ¤?????濂戒??ç‘•?浣跨??GPG???ç’æ¿??æµ ????æµ£?æ¶“å“„??宓??????????绛惧??ç›ãƒ¤??é”›? +褰?æµ ?GPGæ¶“???????7æµ£?缂??????朵??浣夸??æµ ?????????æ‘??æ¾¶??????? + +æ¿¡????æµ£????ç‘•?浠ラ??æµ å‰??褰㈠????????ç›ãƒ¤??é”›???d??çåž?抽????ç‘°?婚??浠讹????è·º?????æ¶“?çž???Ñ??ç»????"Suggest automatic +display"é”›?æ©???å³°??宓????浠舵?æ‘?è§„??ç’â•„?æ˜???????般?? + +褰?æµ£?ç‘•?æ·‡?瀛?ç?ç‘•?????????????宓???????ç›ãƒ¤??é”›?æµ£????浠ヤ??娑???????ç›ã„§????奸????â•?????ç›ãƒ¤????????浠讹????è·º????冲?婚????? +"save as"???æµ£????浠ヤ娇??ㄤ??æ¶“?娌℃????å­˜?圭????????ç›ãƒ¤????????浠讹??æ¿¡????瀹????浠ユ?g‘???褰㈠??ç¼???????褰?æµ£?å§ï½‡????ã„¥?? +???宸辩??ç»???d??æ¶“?瀵????é”›???f?舵病??????椤瑰??浠ヤ??瀛????æµ ?--宸茬?????æ¶“?æ¶“?æ©???风??bugçš?å§¹???ュ?é¢??kmail???bugzilla +骞朵??甯????æ©?ç?æµ¼?çš?æ¾¶??????????浠舵??浠ュ?????瀵规??æ¶“???ㄦ?å³°??璇诲???????????çš?æ·‡?瀛????é”›????浠ュ?????æµ£???虫?????æµ è·º????è·º?æ¿?朵????版?癸?? +æµ£?æ¶“?寰?æ¶“????æµ ?æµ ????????????逛负ç¼?????????ç¿ ?????璇汇?? + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Lotus Notes (GUI) + +æ¶“?ç‘•?浣跨?ã„¥????? + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Mutt (TUI) + +寰?æ¾¶?Linux寮????浜哄??浣跨??mutt瀹㈡?风??é”›????浠ヨ?????瀹????瀹?宸ヤ????????甯告??浜???? + +Muttæ¶“????甯?缂?æˆ????é”›????浠ヤ??绠′??浣跨?ㄤ??æ¶”?缂?æˆ???ã„©?戒??æ´?璇ュ甫????????ㄦ??ç›????澶у????扮??æˆ???ã„©?藉甫??? +æ¶“?æ¶“?"insert file"???椤癸??瀹????浠ラ??æ©?æ¶“???ç‘°?????æµ è·º??瀹圭????ç‘°???????ユ??浠躲?? + +'vim'æµ£?æ¶“?mutt???缂?æˆ????é”›? + set editor="vi" + + æ¿¡????浣跨??xclipé”›???æ’?ヤ互涓???戒护 + :set paste + ???æ¶“????æ¶”??????????shift-insert??????浣跨?? + :r filename + +æ¿¡??????å® ?????ç›ãƒ¤??æµ£?æ¶“å“„??宓?????????? +(a)ttach宸ヤ?????寰?æ¿‚æ–¤??æ¶“?甯????"set paste"??? + +???缃????椤癸?? +瀹?æ´?璇ヤ互榛?ç’よ?剧疆???褰㈠??宸ヤ????? +??惰??é”›????"send_charset"ç’剧疆涓?"us-ascii::utf-8"æ¶”????æ¶“?æ¶“?æ¶“???????涓绘????? + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Pine (TUI) + +Pineæ©???绘??æ¶“?浜?绌烘?ç…Ž????????棰?é”›?æµ£????æ©?浜???æ¿?ã„¥??璇ラ?借??æ·‡?æ¾¶?浜???? + +æ¿¡???????浠ワ??璇蜂娇???alpine(pine???ç¼Ñ„?胯??) + +???缃????椤癸?? +- ???æ©?????????????ç‘•?娑???ゆ??绋??????? +- "no-strip-whitespace-before-send"???椤逛????????ç‘•??????? + + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Sylpheed (GUI) + +- ???宓??????????浠ュ??濂界??宸ヤ??é”›???????浣跨?ã„©??浠讹????? +- ???ç’é•娇??ã„¥????ã„§??缂?æˆ???ã„£?? +- 瀵逛?????褰?æˆ?æ¾¶???å •??甯告????? +- æ¿¡???????æ©?non-SSLæ©???ワ?????娉?浣跨??TLS SMTP????????? +- ??ã„§?????ç»???d腑???æ¶“?æ¶“?寰??????ã„§??ruler bar??? +- ç¼???æ¿?????æ¶“?娣诲????æ¿??çå˜??æµ¼?å§ï½‡â€˜???浜?瑙f?剧ãš?????? + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Thunderbird (GUI) + +榛?ç’ゆ????å…¸??é”›?thunderbird寰?瀹规??????????????é”›?æµ£????æ©????æ¶“?浜???è§„?????浠ュ己??è·º?????寰???æ‘ソ??? + +- ??ã„§?ㄦ?å³°????ç–¯?剧疆???é”›?ç¼???????瀵诲??é”›?æ¶“?ç‘•???????"Compose messages in HTML format"??? + +- 缂?æˆ?æµ£????Thunderbird???缃?ç’剧疆??ヤ娇瀹?æ¶“?ç‘•????ç›?浣跨??é”›?user_pref("mailnews.wraplength", 0); + +- 缂?æˆ?æµ£????Thunderbird???缃?ç’剧疆锛?浣垮??æ¶“?ç‘•?浣跨??"format=flowed"??ç…Ž??é”›?user_pref("mailnews. + send_plaintext_flowed", false); + +- æµ£????ç‘•?æµ£?Thunderbird???æ¶“æ´ª???????ç…Ž????ç‘°??é”›? + æ¿¡????榛?ç’ゆ????å…¸??æµ£?æ¶”??????????HTML??ç…Ž??é”›???d?????寰???俱??æµ ?æµ ?æµ ????棰???????æ¶“????妗?æ¶“???????"Preformat"??ç…Ž????? + æ¿¡????榛?ç’ゆ????å…¸??æµ£?æ¶”??????????????????ç…Ž??é”›?æµ£?æ¶“?寰????瀹???逛负HTML??ç…Ž??é”›?æµ ?æµ ?æµ£?æ¶“è½°??娆℃?Ñ…??é”›???ヤ功?????扮??娑????é”›? + ??è·º??寮哄?朵娇瀹??????版???????ç…Ž??é”›???????瀹?çå˜?????ç›????ç‘•?瀹???æ¿??é”›???ã„¥??淇$????炬??æ¶“?浣跨??shift?????ヤ娇瀹????æ¶“?HTML + ??ç…Ž??é”›???è·º?????棰???????æ¶“????妗?æ¶“???????"Preformat"??ç…Ž????? + +- ???ç’é•娇??ã„¥????ã„§??缂?æˆ????é”›? + ???瀵?Thunderbird???ç›ãƒ¤?????ç» ?????????è§„??ç辨??浣跨?ㄤ??æ¶“?"external editor"??â•??é”›???è·º??浣跨?ㄤ????????娆㈢?? + $EDITOR??ヨ?诲???????????骞惰ˉ涓???版?????æ¶“????ç‘•?瀹???æ¿??é”›????浠ヤ??æžè—‰è‹Ÿæ¶“?瀹?ç‘?æ©?æ¶“???â•??é”›???è·º??娣诲??æ¶“?æ¶“?浣跨?ã„¥????? + ??????View->Toolbars->Customize...??????褰?æµ£?æ¶”????淇℃???????è·º??æµ ?æµ ???ç‘°?诲??çåž??浠ヤ????? + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +TkRat (GUI) + +???浠ヤ娇??ã„¥?????浣跨??"Insert file..."??????æ¾¶???ã„§??缂?æˆ???ã„£?? + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Gmail (Web GUI) + +æ¶“?ç‘•?浣跨?ã„¥????????ç›ãƒ¤????? + +Gmail缃?椤é›?㈡?风???????ã„¥?版????惰〃绗?æž????涓虹┖??笺?? + +??界?è·º?惰〃绗?æž????涓虹┖??奸??棰????浠ヨ??æ¾¶???ã„§??æˆ???ㄨВ??ç­¹???????è·º??æ©?æµ¼?浣跨?ã„¥??æž???㈣?????å§£?ç›???????æ¶“?78æ¶“?瀛?ç»—???? + +???æ¶“?æ¶“????棰????Gmailæ©?æµ¼????浠讳??æ¶“????ASCII???瀛?ç»—????淇℃????逛负base64缂???????瀹????æ¶“?ç‘—åž®????????娆ф床浜虹?????瀛???? + + ### diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS index e23cbd16f1bd..49a0bf3a5b97 100644 --- a/trunk/MAINTAINERS +++ b/trunk/MAINTAINERS @@ -185,10 +185,9 @@ F: Documentation/filesystems/9p.txt F: fs/9p/ A2232 SERIAL BOARD DRIVER -M: Enver Haase L: linux-m68k@lists.linux-m68k.org -S: Maintained -F: drivers/char/ser_a2232* +S: Orphan +F: drivers/staging/generic_serial/ser_a2232* AACRAID SCSI RAID DRIVER M: Adaptec OEM Raid Solutions @@ -406,8 +405,8 @@ S: Maintained F: sound/oss/aedsp16.c AFFS FILE SYSTEM -M: Roman Zippel -S: Maintained +L: linux-fsdevel@vger.kernel.org +S: Orphan F: Documentation/filesystems/affs.txt F: fs/affs/ @@ -878,6 +877,13 @@ F: arch/arm/mach-mv78xx0/ F: arch/arm/mach-orion5x/ F: arch/arm/plat-orion/ +ARM/Orion SoC/Technologic Systems TS-78xx platform support +M: Alexander Clouter +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +W: http://www.digriz.org.uk/ts78xx/kernel +S: Maintained +F: arch/arm/mach-orion5x/ts78xx-* + ARM/MIOA701 MACHINE SUPPORT M: Robert Jarzmik L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) @@ -1026,12 +1032,13 @@ W: http://www.fluff.org/ben/linux/ S: Maintained F: arch/arm/mach-s3c64xx/ -ARM/S5P ARM ARCHITECTURES +ARM/S5P EXYNOS ARM ARCHITECTURES M: Kukjin Kim L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) S: Maintained F: arch/arm/mach-s5p*/ +F: arch/arm/mach-exynos*/ ARM/SAMSUNG MOBILE MACHINE SUPPORT M: Kyungmin Park @@ -1064,7 +1071,7 @@ F: arch/arm/mach-shmobile/ F: drivers/sh/ ARM/TELECHIPS ARM ARCHITECTURE -M: "Hans J. Koch" +M: "Hans J. Koch" L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/plat-tcc/ @@ -1817,11 +1824,10 @@ S: Maintained F: drivers/platform/x86/compal-laptop.c COMPUTONE INTELLIPORT MULTIPORT CARD -M: "Michael H. Warfield" W: http://www.wittsend.com/computone.html -S: Maintained +S: Orphan F: Documentation/serial/computone.txt -F: drivers/char/ip2/ +F: drivers/staging/tty/ip2/ CONEXANT ACCESSRUNNER USB DRIVER M: Simon Arlott @@ -2004,7 +2010,7 @@ F: drivers/net/wan/cycx* CYCLADES ASYNC MUX DRIVER W: http://www.cyclades.com/ S: Orphan -F: drivers/char/cyclades.c +F: drivers/tty/cyclades.c F: include/linux/cyclades.h CYCLADES PC300 DRIVER @@ -2118,8 +2124,8 @@ L: Eng.Linux@digi.com W: http://www.digi.com S: Orphan F: Documentation/serial/digiepca.txt -F: drivers/char/epca* -F: drivers/char/digi* +F: drivers/staging/tty/epca* +F: drivers/staging/tty/digi* DIOLAN U2C-12 I2C DRIVER M: Guenter Roeck @@ -2796,42 +2802,23 @@ GPIO SUBSYSTEM M: Grant Likely S: Maintained T: git git://git.secretlab.ca/git/linux-2.6.git -F: Documentation/gpio/gpio.txt +F: Documentation/gpio.txt F: drivers/gpio/ F: include/linux/gpio* +GRE DEMULTIPLEXER DRIVER +M: Dmitry Kozlov +L: netdev@vger.kernel.org +S: Maintained +F: net/ipv4/gre.c +F: include/net/gre.h + GRETH 10/100/1G Ethernet MAC device driver M: Kristoffer Glembo L: netdev@vger.kernel.org S: Maintained F: drivers/net/greth* -HARD DRIVE ACTIVE PROTECTION SYSTEM (HDAPS) DRIVER -M: Frank Seidel -L: platform-driver-x86@vger.kernel.org -W: http://www.kernel.org/pub/linux/kernel/people/fseidel/hdaps/ -S: Maintained -F: drivers/platform/x86/hdaps.c - -HWPOISON MEMORY FAILURE HANDLING -M: Andi Kleen -L: linux-mm@kvack.org -T: git git://git.kernel.org/pub/scm/linux/kernel/git/ak/linux-mce-2.6.git hwpoison -S: Maintained -F: mm/memory-failure.c -F: mm/hwpoison-inject.c - -HYPERVISOR VIRTUAL CONSOLE DRIVER -L: linuxppc-dev@lists.ozlabs.org -S: Odd Fixes -F: drivers/tty/hvc/ - -iSCSI BOOT FIRMWARE TABLE (iBFT) DRIVER -M: Peter Jones -M: Konrad Rzeszutek Wilk -S: Maintained -F: drivers/firmware/iscsi_ibft* - GSPCA FINEPIX SUBDRIVER M: Frank Zago L: linux-media@vger.kernel.org @@ -2882,6 +2869,26 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git S: Maintained F: drivers/media/video/gspca/ +HARD DRIVE ACTIVE PROTECTION SYSTEM (HDAPS) DRIVER +M: Frank Seidel +L: platform-driver-x86@vger.kernel.org +W: http://www.kernel.org/pub/linux/kernel/people/fseidel/hdaps/ +S: Maintained +F: drivers/platform/x86/hdaps.c + +HWPOISON MEMORY FAILURE HANDLING +M: Andi Kleen +L: linux-mm@kvack.org +T: git git://git.kernel.org/pub/scm/linux/kernel/git/ak/linux-mce-2.6.git hwpoison +S: Maintained +F: mm/memory-failure.c +F: mm/hwpoison-inject.c + +HYPERVISOR VIRTUAL CONSOLE DRIVER +L: linuxppc-dev@lists.ozlabs.org +S: Odd Fixes +F: drivers/tty/hvc/ + HARDWARE MONITORING M: Jean Delvare M: Guenter Roeck @@ -2932,8 +2939,8 @@ F: drivers/block/cciss* F: include/linux/cciss_ioctl.h HFS FILESYSTEM -M: Roman Zippel -S: Maintained +L: linux-fsdevel@vger.kernel.org +S: Orphan F: Documentation/filesystems/hfs.txt F: fs/hfs/ @@ -3471,6 +3478,12 @@ F: Documentation/isapnp.txt F: drivers/pnp/isapnp/ F: include/linux/isapnp.h +iSCSI BOOT FIRMWARE TABLE (iBFT) DRIVER +M: Peter Jones +M: Konrad Rzeszutek Wilk +S: Maintained +F: drivers/firmware/iscsi_ibft* + ISCSI M: Mike Christie L: open-iscsi@googlegroups.com @@ -3800,7 +3813,7 @@ M: Rusty Russell L: lguest@lists.ozlabs.org W: http://lguest.ozlabs.org/ S: Odd Fixes -F: Documentation/lguest/ +F: Documentation/virtual/lguest/ F: arch/x86/lguest/ F: drivers/lguest/ F: include/linux/lguest*.h @@ -3987,7 +4000,6 @@ F: arch/m32r/ M68K ARCHITECTURE M: Geert Uytterhoeven -M: Roman Zippel L: linux-m68k@lists.linux-m68k.org W: http://www.linux-m68k.org/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/geert/linux-m68k.git @@ -4077,7 +4089,7 @@ F: drivers/video/matrox/matroxfb_* F: include/linux/matroxfb.h MAX6650 HARDWARE MONITOR AND FAN CONTROLLER DRIVER -M: "Hans J. Koch" +M: "Hans J. Koch" L: lm-sensors@lm-sensors.org S: Maintained F: Documentation/hwmon/max6650 @@ -4192,7 +4204,7 @@ MOXA SMARTIO/INDUSTIO/INTELLIO SERIAL CARD M: Jiri Slaby S: Maintained F: Documentation/serial/moxa-smartio -F: drivers/char/mxser.* +F: drivers/tty/mxser.* MSI LAPTOP SUPPORT M: "Lee, Chun-Yi" @@ -4234,7 +4246,7 @@ F: sound/oss/msnd* MULTITECH MULTIPORT CARD (ISICOM) S: Orphan -F: drivers/char/isicom.c +F: drivers/tty/isicom.c F: include/linux/isicom.h MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER @@ -4983,6 +4995,13 @@ F: Documentation/pps/ F: drivers/pps/ F: include/linux/pps*.h +PPTP DRIVER +M: Dmitry Kozlov +L: netdev@vger.kernel.org +S: Maintained +F: drivers/net/pptp.c +W: http://sourceforge.net/projects/accel-pptp + PREEMPTIBLE KERNEL M: Robert Love L: kpreempt-tech@lists.sourceforge.net @@ -5274,14 +5293,14 @@ F: drivers/memstick/host/r592.* RISCOM8 DRIVER S: Orphan F: Documentation/serial/riscom8.txt -F: drivers/char/riscom8* +F: drivers/staging/tty/riscom8* ROCKETPORT DRIVER P: Comtrol Corp. W: http://www.comtrol.com S: Maintained F: Documentation/serial/rocket.txt -F: drivers/char/rocket* +F: drivers/tty/rocket* ROSE NETWORK LAYER M: Ralf Baechle @@ -5391,7 +5410,7 @@ F: drivers/media/video/*7146* F: include/media/*7146* SAMSUNG AUDIO (ASoC) DRIVERS -M: Jassi Brar +M: Jassi Brar L: alsa-devel@alsa-project.org (moderated for non-subscribers) S: Supported F: sound/soc/samsung @@ -5412,6 +5431,7 @@ F: include/linux/timex.h F: kernel/time/clocksource.c F: kernel/time/time*.c F: kernel/time/ntp.c +F: drivers/clocksource TLG2300 VIDEO4LINUX-2 DRIVER M: Huang Shijie @@ -5592,9 +5612,9 @@ F: include/linux/ata.h F: include/linux/libata.h SERVER ENGINES 10Gbps iSCSI - BladeEngine 2 DRIVER -M: Jayamohan Kallickal +M: Jayamohan Kallickal L: linux-scsi@vger.kernel.org -W: http://www.serverengines.com +W: http://www.emulex.com S: Supported F: drivers/scsi/be2iscsi/ @@ -5924,10 +5944,9 @@ F: arch/arm/mach-spear6xx/spear600.c F: arch/arm/mach-spear6xx/spear600_evb.c SPECIALIX IO8+ MULTIPORT SERIAL CARD DRIVER -M: Roger Wolff -S: Supported +S: Orphan F: Documentation/serial/specialix.txt -F: drivers/char/specialix* +F: drivers/staging/tty/specialix* SPI SUBSYSTEM M: David Brownell @@ -5972,7 +5991,6 @@ F: arch/alpha/kernel/srm_env.c STABLE BRANCH M: Greg Kroah-Hartman -M: Chris Wright L: stable@kernel.org S: Maintained @@ -6256,7 +6274,8 @@ M: Greg Ungerer W: http://www.uclinux.org/ L: uclinux-dev@uclinux.org (subscribers-only) S: Maintained -F: arch/m68knommu/ +F: arch/m68k/*/*_no.* +F: arch/m68k/include/asm/*_no.* UCLINUX FOR RENESAS H8/300 (H8300) M: Yoshinori Sato @@ -6620,13 +6639,13 @@ L: user-mode-linux-devel@lists.sourceforge.net L: user-mode-linux-user@lists.sourceforge.net W: http://user-mode-linux.sourceforge.net S: Maintained -F: Documentation/uml/ +F: Documentation/virtual/uml/ F: arch/um/ F: fs/hostfs/ F: fs/hppfs/ USERSPACE I/O (UIO) -M: "Hans J. Koch" +M: "Hans J. Koch" M: Greg Kroah-Hartman S: Maintained F: Documentation/DocBook/uio-howto.tmpl @@ -6924,6 +6943,18 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mjg59/platform-drivers-x86. S: Maintained F: drivers/platform/x86 +XEN HYPERVISOR INTERFACE +M: Jeremy Fitzhardinge +M: Konrad Rzeszutek Wilk +L: xen-devel@lists.xensource.com (moderated for non-subscribers) +L: virtualization@lists.linux-foundation.org +S: Supported +F: arch/x86/xen/ +F: drivers/*/xen-*front.c +F: drivers/xen/ +F: arch/x86/include/asm/xen/ +F: include/xen/ + XEN NETWORK BACKEND DRIVER M: Ian Campbell L: xen-devel@lists.xensource.com (moderated for non-subscribers) @@ -6945,18 +6976,6 @@ S: Supported F: arch/x86/xen/*swiotlb* F: drivers/xen/*swiotlb* -XEN HYPERVISOR INTERFACE -M: Jeremy Fitzhardinge -M: Konrad Rzeszutek Wilk -L: xen-devel@lists.xensource.com (moderated for non-subscribers) -L: virtualization@lists.linux-foundation.org -S: Supported -F: arch/x86/xen/ -F: drivers/*/xen-*front.c -F: drivers/xen/ -F: arch/x86/include/asm/xen/ -F: include/xen/ - XFS FILESYSTEM P: Silicon Graphics Inc M: Alex Elder @@ -7026,20 +7045,6 @@ M: "Maciej W. Rozycki" S: Maintained F: drivers/tty/serial/zs.* -GRE DEMULTIPLEXER DRIVER -M: Dmitry Kozlov -L: netdev@vger.kernel.org -S: Maintained -F: net/ipv4/gre.c -F: include/net/gre.h - -PPTP DRIVER -M: Dmitry Kozlov -L: netdev@vger.kernel.org -S: Maintained -F: drivers/net/pptp.c -W: http://sourceforge.net/projects/accel-pptp - THE REST M: Linus Torvalds L: linux-kernel@vger.kernel.org diff --git a/trunk/Makefile b/trunk/Makefile index 8392b64079df..a0344a81a893 100644 --- a/trunk/Makefile +++ b/trunk/Makefile @@ -1,7 +1,7 @@ VERSION = 2 PATCHLEVEL = 6 SUBLEVEL = 39 -EXTRAVERSION = -rc2 +EXTRAVERSION = NAME = Flesh-Eating Bats with Fangs # *DOCUMENTATION* @@ -1268,6 +1268,7 @@ help: @echo ' make C=1 [targets] Check all c source with $$CHECK (sparse by default)' @echo ' make C=2 [targets] Force check of all c source with $$CHECK' @echo ' make W=1 [targets] Enable extra gcc checks' + @echo ' make RECORDMCOUNT_WARN=1 [targets] Warn about ignored mcount sections' @echo '' @echo 'Execute "make" or "make all" to build all targets marked with [*] ' @echo 'For further info see the ./README file' diff --git a/trunk/arch/alpha/include/asm/unistd.h b/trunk/arch/alpha/include/asm/unistd.h index 058937bf5a77..b1834166922d 100644 --- a/trunk/arch/alpha/include/asm/unistd.h +++ b/trunk/arch/alpha/include/asm/unistd.h @@ -452,10 +452,14 @@ #define __NR_fanotify_init 494 #define __NR_fanotify_mark 495 #define __NR_prlimit64 496 +#define __NR_name_to_handle_at 497 +#define __NR_open_by_handle_at 498 +#define __NR_clock_adjtime 499 +#define __NR_syncfs 500 #ifdef __KERNEL__ -#define NR_SYSCALLS 497 +#define NR_SYSCALLS 501 #define __ARCH_WANT_IPC_PARSE_VERSION #define __ARCH_WANT_OLD_READDIR diff --git a/trunk/arch/alpha/kernel/Makefile b/trunk/arch/alpha/kernel/Makefile index 9bb7b858ed23..7a6d908bb865 100644 --- a/trunk/arch/alpha/kernel/Makefile +++ b/trunk/arch/alpha/kernel/Makefile @@ -4,7 +4,7 @@ extra-y := head.o vmlinux.lds asflags-y := $(KBUILD_CFLAGS) -ccflags-y := -Werror -Wno-sign-compare +ccflags-y := -Wno-sign-compare obj-y := entry.o traps.o process.o init_task.o osf_sys.o irq.o \ irq_alpha.o signal.o setup.o ptrace.o time.o \ diff --git a/trunk/arch/alpha/kernel/core_mcpcia.c b/trunk/arch/alpha/kernel/core_mcpcia.c index 381fec0af52e..da7bcc372f16 100644 --- a/trunk/arch/alpha/kernel/core_mcpcia.c +++ b/trunk/arch/alpha/kernel/core_mcpcia.c @@ -88,7 +88,7 @@ conf_read(unsigned long addr, unsigned char type1, { unsigned long flags; unsigned long mid = MCPCIA_HOSE2MID(hose->index); - unsigned int stat0, value, temp, cpu; + unsigned int stat0, value, cpu; cpu = smp_processor_id(); @@ -101,7 +101,7 @@ conf_read(unsigned long addr, unsigned char type1, stat0 = *(vuip)MCPCIA_CAP_ERR(mid); *(vuip)MCPCIA_CAP_ERR(mid) = stat0; mb(); - temp = *(vuip)MCPCIA_CAP_ERR(mid); + *(vuip)MCPCIA_CAP_ERR(mid); DBG_CFG(("conf_read: MCPCIA_CAP_ERR(%d) was 0x%x\n", mid, stat0)); mb(); @@ -136,7 +136,7 @@ conf_write(unsigned long addr, unsigned int value, unsigned char type1, { unsigned long flags; unsigned long mid = MCPCIA_HOSE2MID(hose->index); - unsigned int stat0, temp, cpu; + unsigned int stat0, cpu; cpu = smp_processor_id(); @@ -145,7 +145,7 @@ conf_write(unsigned long addr, unsigned int value, unsigned char type1, /* Reset status register to avoid losing errors. */ stat0 = *(vuip)MCPCIA_CAP_ERR(mid); *(vuip)MCPCIA_CAP_ERR(mid) = stat0; mb(); - temp = *(vuip)MCPCIA_CAP_ERR(mid); + *(vuip)MCPCIA_CAP_ERR(mid); DBG_CFG(("conf_write: MCPCIA CAP_ERR(%d) was 0x%x\n", mid, stat0)); draina(); @@ -157,7 +157,7 @@ conf_write(unsigned long addr, unsigned int value, unsigned char type1, *((vuip)addr) = value; mb(); mb(); /* magic */ - temp = *(vuip)MCPCIA_CAP_ERR(mid); /* read to force the write */ + *(vuip)MCPCIA_CAP_ERR(mid); /* read to force the write */ mcheck_expected(cpu) = 0; mb(); @@ -572,12 +572,10 @@ mcpcia_print_system_area(unsigned long la_ptr) void mcpcia_machine_check(unsigned long vector, unsigned long la_ptr) { - struct el_common *mchk_header; struct el_MCPCIA_uncorrected_frame_mcheck *mchk_logout; unsigned int cpu = smp_processor_id(); int expected; - mchk_header = (struct el_common *)la_ptr; mchk_logout = (struct el_MCPCIA_uncorrected_frame_mcheck *)la_ptr; expected = mcheck_expected(cpu); diff --git a/trunk/arch/alpha/kernel/err_titan.c b/trunk/arch/alpha/kernel/err_titan.c index c3b3781a03de..14b26c466c89 100644 --- a/trunk/arch/alpha/kernel/err_titan.c +++ b/trunk/arch/alpha/kernel/err_titan.c @@ -533,8 +533,6 @@ static struct el_subpacket_annotation el_titan_annotations[] = { static struct el_subpacket * el_process_regatta_subpacket(struct el_subpacket *header) { - int status; - if (header->class != EL_CLASS__REGATTA_FAMILY) { printk("%s ** Unexpected header CLASS %d TYPE %d, aborting\n", err_print_prefix, @@ -551,7 +549,7 @@ el_process_regatta_subpacket(struct el_subpacket *header) printk("%s ** Occurred on CPU %d:\n", err_print_prefix, (int)header->by_type.regatta_frame.cpuid); - status = privateer_process_logout_frame((struct el_common *) + privateer_process_logout_frame((struct el_common *) header->by_type.regatta_frame.data_start, 1); break; default: diff --git a/trunk/arch/alpha/kernel/irq_alpha.c b/trunk/arch/alpha/kernel/irq_alpha.c index 1479dc6ebd97..51b7fbd9e4c1 100644 --- a/trunk/arch/alpha/kernel/irq_alpha.c +++ b/trunk/arch/alpha/kernel/irq_alpha.c @@ -228,7 +228,7 @@ struct irqaction timer_irqaction = { void __init init_rtc_irq(void) { - irq_set_chip_and_handler_name(RTC_IRQ, &no_irq_chip, + irq_set_chip_and_handler_name(RTC_IRQ, &dummy_irq_chip, handle_simple_irq, "RTC"); setup_irq(RTC_IRQ, &timer_irqaction); } diff --git a/trunk/arch/alpha/kernel/setup.c b/trunk/arch/alpha/kernel/setup.c index d2634e4476b4..edbddcbd5bc6 100644 --- a/trunk/arch/alpha/kernel/setup.c +++ b/trunk/arch/alpha/kernel/setup.c @@ -1404,8 +1404,6 @@ determine_cpu_caches (unsigned int cpu_type) case PCA56_CPU: case PCA57_CPU: { - unsigned long cbox_config, size; - if (cpu_type == PCA56_CPU) { L1I = CSHAPE(16*1024, 6, 1); L1D = CSHAPE(8*1024, 5, 1); @@ -1415,10 +1413,12 @@ determine_cpu_caches (unsigned int cpu_type) } L3 = -1; +#if 0 + unsigned long cbox_config, size; + cbox_config = *(vulp) phys_to_virt (0xfffff00008UL); size = 512*1024 * (1 << ((cbox_config >> 12) & 3)); -#if 0 L2 = ((cbox_config >> 31) & 1 ? CSHAPE (size, 6, 1) : -1); #else L2 = external_cache_probe(512*1024, 6); diff --git a/trunk/arch/alpha/kernel/smc37c93x.c b/trunk/arch/alpha/kernel/smc37c93x.c index 3e6a2893af9f..6886b834f487 100644 --- a/trunk/arch/alpha/kernel/smc37c93x.c +++ b/trunk/arch/alpha/kernel/smc37c93x.c @@ -79,7 +79,6 @@ static unsigned long __init SMCConfigState(unsigned long baseAddr) { unsigned char devId; - unsigned char devRev; unsigned long configPort; unsigned long indexPort; @@ -100,7 +99,7 @@ static unsigned long __init SMCConfigState(unsigned long baseAddr) devId = inb(dataPort); if (devId == VALID_DEVICE_ID) { outb(DEVICE_REV, indexPort); - devRev = inb(dataPort); + /* unsigned char devRev = */ inb(dataPort); break; } else diff --git a/trunk/arch/alpha/kernel/smp.c b/trunk/arch/alpha/kernel/smp.c index 42aa078a5e4d..5a621c6d22ab 100644 --- a/trunk/arch/alpha/kernel/smp.c +++ b/trunk/arch/alpha/kernel/smp.c @@ -585,8 +585,7 @@ handle_ipi(struct pt_regs *regs) switch (which) { case IPI_RESCHEDULE: - /* Reschedule callback. Everything to be done - is done by the interrupt return path. */ + scheduler_ipi(); break; case IPI_CALL_FUNC: diff --git a/trunk/arch/alpha/kernel/sys_wildfire.c b/trunk/arch/alpha/kernel/sys_wildfire.c index d3cb28bb8eb0..d92cdc715c65 100644 --- a/trunk/arch/alpha/kernel/sys_wildfire.c +++ b/trunk/arch/alpha/kernel/sys_wildfire.c @@ -156,7 +156,6 @@ static void __init wildfire_init_irq_per_pca(int qbbno, int pcano) { int i, irq_bias; - unsigned long io_bias; static struct irqaction isa_enable = { .handler = no_action, .name = "isa_enable", @@ -165,10 +164,12 @@ wildfire_init_irq_per_pca(int qbbno, int pcano) irq_bias = qbbno * (WILDFIRE_PCA_PER_QBB * WILDFIRE_IRQ_PER_PCA) + pcano * WILDFIRE_IRQ_PER_PCA; +#if 0 + unsigned long io_bias; + /* Only need the following for first PCI bus per PCA. */ io_bias = WILDFIRE_IO(qbbno, pcano<<1) - WILDFIRE_IO_BIAS; -#if 0 outb(0, DMA1_RESET_REG + io_bias); outb(0, DMA2_RESET_REG + io_bias); outb(DMA_MODE_CASCADE, DMA2_MODE_REG + io_bias); diff --git a/trunk/arch/alpha/kernel/systbls.S b/trunk/arch/alpha/kernel/systbls.S index a6a1de9db16f..15f999d41c75 100644 --- a/trunk/arch/alpha/kernel/systbls.S +++ b/trunk/arch/alpha/kernel/systbls.S @@ -498,23 +498,27 @@ sys_call_table: .quad sys_ni_syscall /* sys_timerfd */ .quad sys_eventfd .quad sys_recvmmsg - .quad sys_fallocate /* 480 */ + .quad sys_fallocate /* 480 */ .quad sys_timerfd_create .quad sys_timerfd_settime .quad sys_timerfd_gettime .quad sys_signalfd4 - .quad sys_eventfd2 /* 485 */ + .quad sys_eventfd2 /* 485 */ .quad sys_epoll_create1 .quad sys_dup3 .quad sys_pipe2 .quad sys_inotify_init1 - .quad sys_preadv /* 490 */ + .quad sys_preadv /* 490 */ .quad sys_pwritev .quad sys_rt_tgsigqueueinfo .quad sys_perf_event_open .quad sys_fanotify_init - .quad sys_fanotify_mark /* 495 */ + .quad sys_fanotify_mark /* 495 */ .quad sys_prlimit64 + .quad sys_name_to_handle_at + .quad sys_open_by_handle_at + .quad sys_clock_adjtime + .quad sys_syncfs /* 500 */ .size sys_call_table, . - sys_call_table .type sys_call_table, @object diff --git a/trunk/arch/alpha/kernel/time.c b/trunk/arch/alpha/kernel/time.c index a58e84f1a63b..818e74ed45dc 100644 --- a/trunk/arch/alpha/kernel/time.c +++ b/trunk/arch/alpha/kernel/time.c @@ -153,6 +153,7 @@ void read_persistent_clock(struct timespec *ts) year += 100; ts->tv_sec = mktime(year, mon, day, hour, min, sec); + ts->tv_nsec = 0; } @@ -374,8 +375,7 @@ static struct clocksource clocksource_rpcc = { static inline void register_rpcc_clocksource(long cycle_freq) { - clocksource_calc_mult_shift(&clocksource_rpcc, cycle_freq, 4); - clocksource_register(&clocksource_rpcc); + clocksource_register_hz(&clocksource_rpcc, cycle_freq); } #else /* !CONFIG_SMP */ static inline void register_rpcc_clocksource(long cycle_freq) diff --git a/trunk/arch/alpha/kernel/vmlinux.lds.S b/trunk/arch/alpha/kernel/vmlinux.lds.S index 433be2a24f31..3d890a98a08b 100644 --- a/trunk/arch/alpha/kernel/vmlinux.lds.S +++ b/trunk/arch/alpha/kernel/vmlinux.lds.S @@ -46,6 +46,7 @@ SECTIONS __init_end = .; /* Freed after init ends here */ + _sdata = .; /* Start of rw data section */ _data = .; RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) diff --git a/trunk/arch/arm/Kconfig b/trunk/arch/arm/Kconfig index fdc9d4dbf85b..377a7a595b08 100644 --- a/trunk/arch/arm/Kconfig +++ b/trunk/arch/arm/Kconfig @@ -1540,7 +1540,6 @@ config HIGHMEM config HIGHPTE bool "Allocate 2nd-level pagetables from highmem" depends on HIGHMEM - depends on !OUTER_CACHE config HW_PERF_EVENTS bool "Enable hardware performance counter support for perf events" @@ -2012,6 +2011,8 @@ source "kernel/power/Kconfig" config ARCH_SUSPEND_POSSIBLE depends on !ARCH_S5P64X0 && !ARCH_S5P6442 + depends on CPU_ARM920T || CPU_ARM926T || CPU_SA1100 || \ + CPU_V6 || CPU_V6K || CPU_V7 || CPU_XSC3 || CPU_XSCALE def_bool y endmenu diff --git a/trunk/arch/arm/Kconfig.debug b/trunk/arch/arm/Kconfig.debug index 494224a9b459..03d01d783e3b 100644 --- a/trunk/arch/arm/Kconfig.debug +++ b/trunk/arch/arm/Kconfig.debug @@ -63,17 +63,6 @@ config DEBUG_USER 8 - SIGSEGV faults 16 - SIGBUS faults -config DEBUG_ERRORS - bool "Verbose kernel error messages" - depends on DEBUG_KERNEL - help - This option controls verbose debugging information which can be - printed when the kernel detects an internal error. This debugging - information is useful to kernel hackers when tracking down problems, - but mostly meaningless to other people. It's safe to say Y unless - you are concerned with the code size or don't want to see these - messages. - config DEBUG_STACK_USAGE bool "Enable stack utilization instrumentation" depends on DEBUG_KERNEL diff --git a/trunk/arch/arm/boot/compressed/Makefile b/trunk/arch/arm/boot/compressed/Makefile index 8ebbb511c783..0c6852d93506 100644 --- a/trunk/arch/arm/boot/compressed/Makefile +++ b/trunk/arch/arm/boot/compressed/Makefile @@ -74,7 +74,7 @@ ZTEXTADDR := $(CONFIG_ZBOOT_ROM_TEXT) ZBSSADDR := $(CONFIG_ZBOOT_ROM_BSS) else ZTEXTADDR := 0 -ZBSSADDR := ALIGN(4) +ZBSSADDR := ALIGN(8) endif SEDFLAGS = s/TEXT_START/$(ZTEXTADDR)/;s/BSS_START/$(ZBSSADDR)/ diff --git a/trunk/arch/arm/boot/compressed/head.S b/trunk/arch/arm/boot/compressed/head.S index adf583cd0c35..49f5b2eaaa87 100644 --- a/trunk/arch/arm/boot/compressed/head.S +++ b/trunk/arch/arm/boot/compressed/head.S @@ -179,15 +179,14 @@ not_angel: bl cache_on restart: adr r0, LC0 - ldmia r0, {r1, r2, r3, r5, r6, r9, r11, r12} - ldr sp, [r0, #32] + ldmia r0, {r1, r2, r3, r6, r9, r11, r12} + ldr sp, [r0, #28] /* * We might be running at a different address. We need * to fix up various pointers. */ sub r0, r0, r1 @ calculate the delta offset - add r5, r5, r0 @ _start add r6, r6, r0 @ _edata #ifndef CONFIG_ZBOOT_ROM @@ -206,31 +205,40 @@ restart: adr r0, LC0 /* * Check to see if we will overwrite ourselves. * r4 = final kernel address - * r5 = start of this image * r9 = size of decompressed image * r10 = end of this image, including bss/stack/malloc space if non XIP * We basically want: - * r4 >= r10 -> OK - * r4 + image length <= r5 -> OK + * r4 - 16k page directory >= r10 -> OK + * r4 + image length <= current position (pc) -> OK */ + add r10, r10, #16384 cmp r4, r10 bhs wont_overwrite add r10, r4, r9 - cmp r10, r5 + ARM( cmp r10, pc ) + THUMB( mov lr, pc ) + THUMB( cmp r10, lr ) bls wont_overwrite /* * Relocate ourselves past the end of the decompressed kernel. - * r5 = start of this image * r6 = _edata * r10 = end of the decompressed kernel * Because we always copy ahead, we need to do it from the end and go * backward in case the source and destination overlap. */ - /* Round up to next 256-byte boundary. */ - add r10, r10, #256 + /* + * Bump to the next 256-byte boundary with the size of + * the relocation code added. This avoids overwriting + * ourself when the offset is small. + */ + add r10, r10, #((reloc_code_end - restart + 256) & ~255) bic r10, r10, #255 + /* Get start of code we want to copy and align it down. */ + adr r5, restart + bic r5, r5, #31 + sub r9, r6, r5 @ size to copy add r9, r9, #31 @ rounded up to a multiple bic r9, r9, #31 @ ... of 32 bytes @@ -245,6 +253,11 @@ restart: adr r0, LC0 /* Preserve offset to relocated code. */ sub r6, r9, r6 +#ifndef CONFIG_ZBOOT_ROM + /* cache_clean_flush may use the stack, so relocate it */ + add sp, sp, r6 +#endif + bl cache_clean_flush adr r0, BSYM(restart) @@ -333,7 +346,6 @@ not_relocated: mov r0, #0 LC0: .word LC0 @ r1 .word __bss_start @ r2 .word _end @ r3 - .word _start @ r5 .word _edata @ r6 .word _image_size @ r9 .word _got_start @ r11 @@ -1062,6 +1074,7 @@ memdump: mov r12, r0 #endif .ltorg +reloc_code_end: .align .section ".stack", "aw", %nobits diff --git a/trunk/arch/arm/boot/compressed/vmlinux.lds.in b/trunk/arch/arm/boot/compressed/vmlinux.lds.in index 5309909d7282..ea80abe78844 100644 --- a/trunk/arch/arm/boot/compressed/vmlinux.lds.in +++ b/trunk/arch/arm/boot/compressed/vmlinux.lds.in @@ -54,6 +54,7 @@ SECTIONS .bss : { *(.bss) } _end = .; + . = ALIGN(8); /* the stack must be 64-bit aligned */ .stack : { *(.stack) } .stab 0 : { *(.stab) } diff --git a/trunk/arch/arm/common/Makefile b/trunk/arch/arm/common/Makefile index e7521bca2c35..6ea9b6f3607a 100644 --- a/trunk/arch/arm/common/Makefile +++ b/trunk/arch/arm/common/Makefile @@ -16,5 +16,4 @@ obj-$(CONFIG_SHARP_SCOOP) += scoop.o obj-$(CONFIG_ARCH_IXP2000) += uengine.o obj-$(CONFIG_ARCH_IXP23XX) += uengine.o obj-$(CONFIG_PCI_HOST_ITE8152) += it8152.o -obj-$(CONFIG_COMMON_CLKDEV) += clkdev.o obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp.o diff --git a/trunk/arch/arm/common/vic.c b/trunk/arch/arm/common/vic.c index 113085a77123..7aa4262ada7a 100644 --- a/trunk/arch/arm/common/vic.c +++ b/trunk/arch/arm/common/vic.c @@ -22,17 +22,16 @@ #include #include #include -#include +#include #include #include #include #include -#if defined(CONFIG_PM) +#ifdef CONFIG_PM /** * struct vic_device - VIC PM device - * @sysdev: The system device which is registered. * @irq: The IRQ number for the base of the VIC. * @base: The register base for the VIC. * @resume_sources: A bitmask of interrupts for resume. @@ -43,8 +42,6 @@ * @protect: Save for VIC_PROTECT. */ struct vic_device { - struct sys_device sysdev; - void __iomem *base; int irq; u32 resume_sources; @@ -59,11 +56,6 @@ struct vic_device { static struct vic_device vic_devices[CONFIG_ARM_VIC_NR]; static int vic_id; - -static inline struct vic_device *to_vic(struct sys_device *sys) -{ - return container_of(sys, struct vic_device, sysdev); -} #endif /* CONFIG_PM */ /** @@ -85,10 +77,9 @@ static void vic_init2(void __iomem *base) writel(32, base + VIC_PL190_DEF_VECT_ADDR); } -#if defined(CONFIG_PM) -static int vic_class_resume(struct sys_device *dev) +#ifdef CONFIG_PM +static void resume_one_vic(struct vic_device *vic) { - struct vic_device *vic = to_vic(dev); void __iomem *base = vic->base; printk(KERN_DEBUG "%s: resuming vic at %p\n", __func__, base); @@ -107,13 +98,18 @@ static int vic_class_resume(struct sys_device *dev) writel(vic->soft_int, base + VIC_INT_SOFT); writel(~vic->soft_int, base + VIC_INT_SOFT_CLEAR); +} - return 0; +static void vic_resume(void) +{ + int id; + + for (id = vic_id - 1; id >= 0; id--) + resume_one_vic(vic_devices + id); } -static int vic_class_suspend(struct sys_device *dev, pm_message_t state) +static void suspend_one_vic(struct vic_device *vic) { - struct vic_device *vic = to_vic(dev); void __iomem *base = vic->base; printk(KERN_DEBUG "%s: suspending vic at %p\n", __func__, base); @@ -128,14 +124,21 @@ static int vic_class_suspend(struct sys_device *dev, pm_message_t state) writel(vic->resume_irqs, base + VIC_INT_ENABLE); writel(~vic->resume_irqs, base + VIC_INT_ENABLE_CLEAR); +} + +static int vic_suspend(void) +{ + int id; + + for (id = 0; id < vic_id; id++) + suspend_one_vic(vic_devices + id); return 0; } -struct sysdev_class vic_class = { - .name = "vic", - .suspend = vic_class_suspend, - .resume = vic_class_resume, +struct syscore_ops vic_syscore_ops = { + .suspend = vic_suspend, + .resume = vic_resume, }; /** @@ -147,30 +150,8 @@ struct sysdev_class vic_class = { */ static int __init vic_pm_init(void) { - struct vic_device *dev = vic_devices; - int err; - int id; - - if (vic_id == 0) - return 0; - - err = sysdev_class_register(&vic_class); - if (err) { - printk(KERN_ERR "%s: cannot register class\n", __func__); - return err; - } - - for (id = 0; id < vic_id; id++, dev++) { - dev->sysdev.id = id; - dev->sysdev.cls = &vic_class; - - err = sysdev_register(&dev->sysdev); - if (err) { - printk(KERN_ERR "%s: failed to register device\n", - __func__); - return err; - } - } + if (vic_id > 0) + register_syscore_ops(&vic_syscore_ops); return 0; } diff --git a/trunk/arch/arm/configs/at91x40_defconfig b/trunk/arch/arm/configs/at91x40_defconfig new file mode 100644 index 000000000000..c55e9212fcbb --- /dev/null +++ b/trunk/arch/arm/configs/at91x40_defconfig @@ -0,0 +1,48 @@ +CONFIG_EXPERIMENTAL=y +CONFIG_LOG_BUF_SHIFT=14 +CONFIG_EMBEDDED=y +# CONFIG_HOTPLUG is not set +# CONFIG_ELF_CORE is not set +# CONFIG_FUTEX is not set +# CONFIG_TIMERFD is not set +# CONFIG_VM_EVENT_COUNTERS is not set +# CONFIG_COMPAT_BRK is not set +CONFIG_SLAB=y +# CONFIG_LBDAF is not set +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_IOSCHED_DEADLINE is not set +# CONFIG_IOSCHED_CFQ is not set +# CONFIG_MMU is not set +CONFIG_ARCH_AT91=y +CONFIG_ARCH_AT91X40=y +CONFIG_MACH_AT91EB01=y +CONFIG_AT91_EARLY_USART0=y +CONFIG_CPU_ARM7TDMI=y +CONFIG_SET_MEM_PARAM=y +CONFIG_DRAM_BASE=0x01000000 +CONFIG_DRAM_SIZE=0x00400000 +CONFIG_FLASH_MEM_BASE=0x01400000 +CONFIG_PROCESSOR_ID=0x14000040 +CONFIG_ZBOOT_ROM_TEXT=0x0 +CONFIG_ZBOOT_ROM_BSS=0x0 +CONFIG_BINFMT_FLAT=y +# CONFIG_SUSPEND is not set +# CONFIG_FW_LOADER is not set +CONFIG_MTD=y +CONFIG_MTD_PARTITIONS=y +CONFIG_MTD_CHAR=y +CONFIG_MTD_BLOCK=y +CONFIG_MTD_RAM=y +CONFIG_MTD_ROM=y +CONFIG_BLK_DEV_RAM=y +# CONFIG_INPUT is not set +# CONFIG_SERIO is not set +# CONFIG_VT is not set +# CONFIG_DEVKMEM is not set +# CONFIG_HW_RANDOM is not set +# CONFIG_HWMON is not set +# CONFIG_USB_SUPPORT is not set +CONFIG_EXT2_FS=y +# CONFIG_DNOTIFY is not set +CONFIG_ROMFS_FS=y +# CONFIG_ENABLE_MUST_CHECK is not set diff --git a/trunk/arch/arm/include/asm/cputype.h b/trunk/arch/arm/include/asm/cputype.h index ed5bc9e05a4e..cd4458f64171 100644 --- a/trunk/arch/arm/include/asm/cputype.h +++ b/trunk/arch/arm/include/asm/cputype.h @@ -2,6 +2,7 @@ #define __ASM_ARM_CPUTYPE_H #include +#include #define CPUID_ID 0 #define CPUID_CACHETYPE 1 diff --git a/trunk/arch/arm/include/asm/i8253.h b/trunk/arch/arm/include/asm/i8253.h new file mode 100644 index 000000000000..70656b69d5ce --- /dev/null +++ b/trunk/arch/arm/include/asm/i8253.h @@ -0,0 +1,15 @@ +#ifndef __ASMARM_I8253_H +#define __ASMARM_I8253_H + +/* i8253A PIT registers */ +#define PIT_MODE 0x43 +#define PIT_CH0 0x40 + +#define PIT_LATCH ((PIT_TICK_RATE + HZ / 2) / HZ) + +extern raw_spinlock_t i8253_lock; + +#define outb_pit outb_p +#define inb_pit inb_p + +#endif diff --git a/trunk/arch/arm/include/asm/kprobes.h b/trunk/arch/arm/include/asm/kprobes.h index bb8a19bd5822..e46bdd0097eb 100644 --- a/trunk/arch/arm/include/asm/kprobes.h +++ b/trunk/arch/arm/include/asm/kprobes.h @@ -39,10 +39,13 @@ typedef u32 kprobe_opcode_t; struct kprobe; typedef void (kprobe_insn_handler_t)(struct kprobe *, struct pt_regs *); +typedef unsigned long (kprobe_check_cc)(unsigned long); + /* Architecture specific copy of original instruction. */ struct arch_specific_insn { kprobe_opcode_t *insn; kprobe_insn_handler_t *insn_handler; + kprobe_check_cc *insn_check_cc; }; struct prev_kprobe { diff --git a/trunk/arch/arm/include/asm/mach/time.h b/trunk/arch/arm/include/asm/mach/time.h index 883f6be5117a..d5adaae5ee2c 100644 --- a/trunk/arch/arm/include/asm/mach/time.h +++ b/trunk/arch/arm/include/asm/mach/time.h @@ -34,7 +34,6 @@ * timer interrupt which may be pending. */ struct sys_timer { - struct sys_device dev; void (*init)(void); void (*suspend)(void); void (*resume)(void); diff --git a/trunk/arch/arm/include/asm/system.h b/trunk/arch/arm/include/asm/system.h index 885be097769d..832888d0c20c 100644 --- a/trunk/arch/arm/include/asm/system.h +++ b/trunk/arch/arm/include/asm/system.h @@ -159,7 +159,7 @@ extern unsigned int user_debug; #include #elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP) #define mb() do { dsb(); outer_sync(); } while (0) -#define rmb() dmb() +#define rmb() dsb() #define wmb() mb() #else #include diff --git a/trunk/arch/arm/include/asm/thread_notify.h b/trunk/arch/arm/include/asm/thread_notify.h index c4391ba20350..1dc980675894 100644 --- a/trunk/arch/arm/include/asm/thread_notify.h +++ b/trunk/arch/arm/include/asm/thread_notify.h @@ -43,6 +43,7 @@ static inline void thread_notify(unsigned long rc, struct thread_info *thread) #define THREAD_NOTIFY_FLUSH 0 #define THREAD_NOTIFY_EXIT 1 #define THREAD_NOTIFY_SWITCH 2 +#define THREAD_NOTIFY_COPY 3 #endif #endif diff --git a/trunk/arch/arm/include/asm/unistd.h b/trunk/arch/arm/include/asm/unistd.h index c891eb76c0e3..87dbe3e21970 100644 --- a/trunk/arch/arm/include/asm/unistd.h +++ b/trunk/arch/arm/include/asm/unistd.h @@ -396,6 +396,10 @@ #define __NR_fanotify_init (__NR_SYSCALL_BASE+367) #define __NR_fanotify_mark (__NR_SYSCALL_BASE+368) #define __NR_prlimit64 (__NR_SYSCALL_BASE+369) +#define __NR_name_to_handle_at (__NR_SYSCALL_BASE+370) +#define __NR_open_by_handle_at (__NR_SYSCALL_BASE+371) +#define __NR_clock_adjtime (__NR_SYSCALL_BASE+372) +#define __NR_syncfs (__NR_SYSCALL_BASE+373) /* * The following SWIs are ARM private. diff --git a/trunk/arch/arm/kernel/Makefile b/trunk/arch/arm/kernel/Makefile index 74554f1742d7..8d95446150a3 100644 --- a/trunk/arch/arm/kernel/Makefile +++ b/trunk/arch/arm/kernel/Makefile @@ -29,7 +29,7 @@ obj-$(CONFIG_MODULES) += armksyms.o module.o obj-$(CONFIG_ARTHUR) += arthur.o obj-$(CONFIG_ISA_DMA) += dma-isa.o obj-$(CONFIG_PCI) += bios32.o isa.o -obj-$(CONFIG_PM) += sleep.o +obj-$(CONFIG_PM_SLEEP) += sleep.o obj-$(CONFIG_HAVE_SCHED_CLOCK) += sched_clock.o obj-$(CONFIG_SMP) += smp.o smp_tlb.o obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o diff --git a/trunk/arch/arm/kernel/calls.S b/trunk/arch/arm/kernel/calls.S index 5c26eccef998..7fbf28c35bb2 100644 --- a/trunk/arch/arm/kernel/calls.S +++ b/trunk/arch/arm/kernel/calls.S @@ -379,6 +379,10 @@ CALL(sys_fanotify_init) CALL(sys_fanotify_mark) CALL(sys_prlimit64) +/* 370 */ CALL(sys_name_to_handle_at) + CALL(sys_open_by_handle_at) + CALL(sys_clock_adjtime) + CALL(sys_syncfs) #ifndef syscalls_counted .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls #define syscalls_counted diff --git a/trunk/arch/arm/kernel/elf.c b/trunk/arch/arm/kernel/elf.c index d4a0da1e48f4..9b05c6a0dcea 100644 --- a/trunk/arch/arm/kernel/elf.c +++ b/trunk/arch/arm/kernel/elf.c @@ -40,15 +40,22 @@ EXPORT_SYMBOL(elf_check_arch); void elf_set_personality(const struct elf32_hdr *x) { unsigned int eflags = x->e_flags; - unsigned int personality = PER_LINUX_32BIT; + unsigned int personality = current->personality & ~PER_MASK; + + /* + * We only support Linux ELF executables, so always set the + * personality to LINUX. + */ + personality |= PER_LINUX; /* * APCS-26 is only valid for OABI executables */ - if ((eflags & EF_ARM_EABI_MASK) == EF_ARM_EABI_UNKNOWN) { - if (eflags & EF_ARM_APCS_26) - personality = PER_LINUX; - } + if ((eflags & EF_ARM_EABI_MASK) == EF_ARM_EABI_UNKNOWN && + (eflags & EF_ARM_APCS_26)) + personality &= ~ADDR_LIMIT_32BIT; + else + personality |= ADDR_LIMIT_32BIT; set_personality(personality); diff --git a/trunk/arch/arm/kernel/hw_breakpoint.c b/trunk/arch/arm/kernel/hw_breakpoint.c index 8dbc126f7152..87acc25d7a3e 100644 --- a/trunk/arch/arm/kernel/hw_breakpoint.c +++ b/trunk/arch/arm/kernel/hw_breakpoint.c @@ -868,6 +868,13 @@ static void reset_ctrl_regs(void *info) */ asm volatile("mcr p14, 0, %0, c1, c0, 4" : : "r" (0)); isb(); + + /* + * Clear any configured vector-catch events before + * enabling monitor mode. + */ + asm volatile("mcr p14, 0, %0, c0, c7, 0" : : "r" (0)); + isb(); } if (enable_monitor_mode()) diff --git a/trunk/arch/arm/kernel/kprobes-decode.c b/trunk/arch/arm/kernel/kprobes-decode.c index 23891317dc4b..15eeff6aea0e 100644 --- a/trunk/arch/arm/kernel/kprobes-decode.c +++ b/trunk/arch/arm/kernel/kprobes-decode.c @@ -34,9 +34,6 @@ * * *) If the PC is written to by the instruction, the * instruction must be fully simulated in software. - * If it is a conditional instruction, the handler - * will use insn[0] to copy its condition code to - * set r0 to 1 and insn[1] to "mov pc, lr" to return. * * *) Otherwise, a modified form of the instruction is * directly executed. Its handler calls the @@ -68,13 +65,17 @@ #define branch_displacement(insn) sign_extend(((insn) & 0xffffff) << 2, 25) +#define is_r15(insn, bitpos) (((insn) & (0xf << bitpos)) == (0xf << bitpos)) + +/* + * Test if load/store instructions writeback the address register. + * if P (bit 24) == 0 or W (bit 21) == 1 + */ +#define is_writeback(insn) ((insn ^ 0x01000000) & 0x01200000) + #define PSR_fs (PSR_f|PSR_s) #define KPROBE_RETURN_INSTRUCTION 0xe1a0f00e /* mov pc, lr */ -#define SET_R0_TRUE_INSTRUCTION 0xe3a00001 /* mov r0, #1 */ - -#define truecc_insn(insn) (((insn) & 0xf0000000) | \ - (SET_R0_TRUE_INSTRUCTION & 0x0fffffff)) typedef long (insn_0arg_fn_t)(void); typedef long (insn_1arg_fn_t)(long); @@ -419,14 +420,10 @@ insnslot_llret_4arg_rwflags(long r0, long r1, long r2, long r3, long *cpsr, static void __kprobes simulate_bbl(struct kprobe *p, struct pt_regs *regs) { - insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0]; kprobe_opcode_t insn = p->opcode; long iaddr = (long)p->addr; int disp = branch_displacement(insn); - if (!insnslot_1arg_rflags(0, regs->ARM_cpsr, i_fn)) - return; - if (insn & (1 << 24)) regs->ARM_lr = iaddr + 4; @@ -446,14 +443,10 @@ static void __kprobes simulate_blx1(struct kprobe *p, struct pt_regs *regs) static void __kprobes simulate_blx2bx(struct kprobe *p, struct pt_regs *regs) { - insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0]; kprobe_opcode_t insn = p->opcode; int rm = insn & 0xf; long rmv = regs->uregs[rm]; - if (!insnslot_1arg_rflags(0, regs->ARM_cpsr, i_fn)) - return; - if (insn & (1 << 5)) regs->ARM_lr = (long)p->addr + 4; @@ -463,9 +456,16 @@ static void __kprobes simulate_blx2bx(struct kprobe *p, struct pt_regs *regs) regs->ARM_cpsr |= PSR_T_BIT; } +static void __kprobes simulate_mrs(struct kprobe *p, struct pt_regs *regs) +{ + kprobe_opcode_t insn = p->opcode; + int rd = (insn >> 12) & 0xf; + unsigned long mask = 0xf8ff03df; /* Mask out execution state */ + regs->uregs[rd] = regs->ARM_cpsr & mask; +} + static void __kprobes simulate_ldm1stm1(struct kprobe *p, struct pt_regs *regs) { - insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0]; kprobe_opcode_t insn = p->opcode; int rn = (insn >> 16) & 0xf; int lbit = insn & (1 << 20); @@ -476,9 +476,6 @@ static void __kprobes simulate_ldm1stm1(struct kprobe *p, struct pt_regs *regs) int reg_bit_vector; int reg_count; - if (!insnslot_1arg_rflags(0, regs->ARM_cpsr, i_fn)) - return; - reg_count = 0; reg_bit_vector = insn & 0xffff; while (reg_bit_vector) { @@ -510,11 +507,6 @@ static void __kprobes simulate_ldm1stm1(struct kprobe *p, struct pt_regs *regs) static void __kprobes simulate_stm1_pc(struct kprobe *p, struct pt_regs *regs) { - insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0]; - - if (!insnslot_1arg_rflags(0, regs->ARM_cpsr, i_fn)) - return; - regs->ARM_pc = (long)p->addr + str_pc_offset; simulate_ldm1stm1(p, regs); regs->ARM_pc = (long)p->addr + 4; @@ -525,24 +517,16 @@ static void __kprobes simulate_mov_ipsp(struct kprobe *p, struct pt_regs *regs) regs->uregs[12] = regs->uregs[13]; } -static void __kprobes emulate_ldcstc(struct kprobe *p, struct pt_regs *regs) -{ - insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0]; - kprobe_opcode_t insn = p->opcode; - int rn = (insn >> 16) & 0xf; - long rnv = regs->uregs[rn]; - - /* Save Rn in case of writeback. */ - regs->uregs[rn] = insnslot_1arg_rflags(rnv, regs->ARM_cpsr, i_fn); -} - static void __kprobes emulate_ldrd(struct kprobe *p, struct pt_regs *regs) { insn_2arg_fn_t *i_fn = (insn_2arg_fn_t *)&p->ainsn.insn[0]; kprobe_opcode_t insn = p->opcode; + long ppc = (long)p->addr + 8; int rd = (insn >> 12) & 0xf; int rn = (insn >> 16) & 0xf; int rm = insn & 0xf; /* rm may be invalid, don't care. */ + long rmv = (rm == 15) ? ppc : regs->uregs[rm]; + long rnv = (rn == 15) ? ppc : regs->uregs[rn]; /* Not following the C calling convention here, so need asm(). */ __asm__ __volatile__ ( @@ -554,29 +538,36 @@ static void __kprobes emulate_ldrd(struct kprobe *p, struct pt_regs *regs) "str r0, %[rn] \n\t" /* in case of writeback */ "str r2, %[rd0] \n\t" "str r3, %[rd1] \n\t" - : [rn] "+m" (regs->uregs[rn]), + : [rn] "+m" (rnv), [rd0] "=m" (regs->uregs[rd]), [rd1] "=m" (regs->uregs[rd+1]) - : [rm] "m" (regs->uregs[rm]), + : [rm] "m" (rmv), [cpsr] "r" (regs->ARM_cpsr), [i_fn] "r" (i_fn) : "r0", "r1", "r2", "r3", "lr", "cc" ); + if (is_writeback(insn)) + regs->uregs[rn] = rnv; } static void __kprobes emulate_strd(struct kprobe *p, struct pt_regs *regs) { insn_4arg_fn_t *i_fn = (insn_4arg_fn_t *)&p->ainsn.insn[0]; kprobe_opcode_t insn = p->opcode; + long ppc = (long)p->addr + 8; int rd = (insn >> 12) & 0xf; int rn = (insn >> 16) & 0xf; int rm = insn & 0xf; - long rnv = regs->uregs[rn]; - long rmv = regs->uregs[rm]; /* rm/rmv may be invalid, don't care. */ + long rnv = (rn == 15) ? ppc : regs->uregs[rn]; + /* rm/rmv may be invalid, don't care. */ + long rmv = (rm == 15) ? ppc : regs->uregs[rm]; + long rnv_wb; - regs->uregs[rn] = insnslot_4arg_rflags(rnv, rmv, regs->uregs[rd], + rnv_wb = insnslot_4arg_rflags(rnv, rmv, regs->uregs[rd], regs->uregs[rd+1], regs->ARM_cpsr, i_fn); + if (is_writeback(insn)) + regs->uregs[rn] = rnv_wb; } static void __kprobes emulate_ldr(struct kprobe *p, struct pt_regs *regs) @@ -630,31 +621,6 @@ static void __kprobes emulate_str(struct kprobe *p, struct pt_regs *regs) regs->uregs[rn] = rnv_wb; /* Save Rn in case of writeback. */ } -static void __kprobes emulate_mrrc(struct kprobe *p, struct pt_regs *regs) -{ - insn_llret_0arg_fn_t *i_fn = (insn_llret_0arg_fn_t *)&p->ainsn.insn[0]; - kprobe_opcode_t insn = p->opcode; - union reg_pair fnr; - int rd = (insn >> 12) & 0xf; - int rn = (insn >> 16) & 0xf; - - fnr.dr = insnslot_llret_0arg_rflags(regs->ARM_cpsr, i_fn); - regs->uregs[rn] = fnr.r0; - regs->uregs[rd] = fnr.r1; -} - -static void __kprobes emulate_mcrr(struct kprobe *p, struct pt_regs *regs) -{ - insn_2arg_fn_t *i_fn = (insn_2arg_fn_t *)&p->ainsn.insn[0]; - kprobe_opcode_t insn = p->opcode; - int rd = (insn >> 12) & 0xf; - int rn = (insn >> 16) & 0xf; - long rnv = regs->uregs[rn]; - long rdv = regs->uregs[rd]; - - insnslot_2arg_rflags(rnv, rdv, regs->ARM_cpsr, i_fn); -} - static void __kprobes emulate_sat(struct kprobe *p, struct pt_regs *regs) { insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0]; @@ -688,32 +654,32 @@ static void __kprobes emulate_none(struct kprobe *p, struct pt_regs *regs) insnslot_0arg_rflags(regs->ARM_cpsr, i_fn); } -static void __kprobes emulate_rd12(struct kprobe *p, struct pt_regs *regs) +static void __kprobes emulate_nop(struct kprobe *p, struct pt_regs *regs) { - insn_0arg_fn_t *i_fn = (insn_0arg_fn_t *)&p->ainsn.insn[0]; - kprobe_opcode_t insn = p->opcode; - int rd = (insn >> 12) & 0xf; - - regs->uregs[rd] = insnslot_0arg_rflags(regs->ARM_cpsr, i_fn); } -static void __kprobes emulate_ird12(struct kprobe *p, struct pt_regs *regs) +static void __kprobes +emulate_rd12_modify(struct kprobe *p, struct pt_regs *regs) { insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0]; kprobe_opcode_t insn = p->opcode; - int ird = (insn >> 12) & 0xf; + int rd = (insn >> 12) & 0xf; + long rdv = regs->uregs[rd]; - insnslot_1arg_rflags(regs->uregs[ird], regs->ARM_cpsr, i_fn); + regs->uregs[rd] = insnslot_1arg_rflags(rdv, regs->ARM_cpsr, i_fn); } -static void __kprobes emulate_rn16(struct kprobe *p, struct pt_regs *regs) +static void __kprobes +emulate_rd12rn0_modify(struct kprobe *p, struct pt_regs *regs) { - insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0]; + insn_2arg_fn_t *i_fn = (insn_2arg_fn_t *)&p->ainsn.insn[0]; kprobe_opcode_t insn = p->opcode; - int rn = (insn >> 16) & 0xf; + int rd = (insn >> 12) & 0xf; + int rn = insn & 0xf; + long rdv = regs->uregs[rd]; long rnv = regs->uregs[rn]; - insnslot_1arg_rflags(rnv, regs->ARM_cpsr, i_fn); + regs->uregs[rd] = insnslot_2arg_rflags(rdv, rnv, regs->ARM_cpsr, i_fn); } static void __kprobes emulate_rd12rm0(struct kprobe *p, struct pt_regs *regs) @@ -818,6 +784,17 @@ emulate_alu_imm_rwflags(struct kprobe *p, struct pt_regs *regs) regs->uregs[rd] = insnslot_1arg_rwflags(rnv, ®s->ARM_cpsr, i_fn); } +static void __kprobes +emulate_alu_tests_imm(struct kprobe *p, struct pt_regs *regs) +{ + insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0]; + kprobe_opcode_t insn = p->opcode; + int rn = (insn >> 16) & 0xf; + long rnv = (rn == 15) ? (long)p->addr + 8 : regs->uregs[rn]; + + insnslot_1arg_rwflags(rnv, ®s->ARM_cpsr, i_fn); +} + static void __kprobes emulate_alu_rflags(struct kprobe *p, struct pt_regs *regs) { @@ -854,14 +831,34 @@ emulate_alu_rwflags(struct kprobe *p, struct pt_regs *regs) insnslot_3arg_rwflags(rnv, rmv, rsv, ®s->ARM_cpsr, i_fn); } +static void __kprobes +emulate_alu_tests(struct kprobe *p, struct pt_regs *regs) +{ + insn_3arg_fn_t *i_fn = (insn_3arg_fn_t *)&p->ainsn.insn[0]; + kprobe_opcode_t insn = p->opcode; + long ppc = (long)p->addr + 8; + int rn = (insn >> 16) & 0xf; + int rs = (insn >> 8) & 0xf; /* rs/rsv may be invalid, don't care. */ + int rm = insn & 0xf; + long rnv = (rn == 15) ? ppc : regs->uregs[rn]; + long rmv = (rm == 15) ? ppc : regs->uregs[rm]; + long rsv = regs->uregs[rs]; + + insnslot_3arg_rwflags(rnv, rmv, rsv, ®s->ARM_cpsr, i_fn); +} + static enum kprobe_insn __kprobes prep_emulate_ldr_str(kprobe_opcode_t insn, struct arch_specific_insn *asi) { - int ibit = (insn & (1 << 26)) ? 25 : 22; + int not_imm = (insn & (1 << 26)) ? (insn & (1 << 25)) + : (~insn & (1 << 22)); + + if (is_writeback(insn) && is_r15(insn, 16)) + return INSN_REJECTED; /* Writeback to PC */ insn &= 0xfff00fff; insn |= 0x00001000; /* Rn = r0, Rd = r1 */ - if (insn & (1 << ibit)) { + if (not_imm) { insn &= ~0xf; insn |= 2; /* Rm = r2 */ } @@ -871,20 +868,40 @@ prep_emulate_ldr_str(kprobe_opcode_t insn, struct arch_specific_insn *asi) } static enum kprobe_insn __kprobes -prep_emulate_rd12rm0(kprobe_opcode_t insn, struct arch_specific_insn *asi) +prep_emulate_rd12_modify(kprobe_opcode_t insn, struct arch_specific_insn *asi) { - insn &= 0xffff0ff0; /* Rd = r0, Rm = r0 */ + if (is_r15(insn, 12)) + return INSN_REJECTED; /* Rd is PC */ + + insn &= 0xffff0fff; /* Rd = r0 */ asi->insn[0] = insn; - asi->insn_handler = emulate_rd12rm0; + asi->insn_handler = emulate_rd12_modify; return INSN_GOOD; } static enum kprobe_insn __kprobes -prep_emulate_rd12(kprobe_opcode_t insn, struct arch_specific_insn *asi) +prep_emulate_rd12rn0_modify(kprobe_opcode_t insn, + struct arch_specific_insn *asi) { - insn &= 0xffff0fff; /* Rd = r0 */ + if (is_r15(insn, 12)) + return INSN_REJECTED; /* Rd is PC */ + + insn &= 0xffff0ff0; /* Rd = r0 */ + insn |= 0x00000001; /* Rn = r1 */ + asi->insn[0] = insn; + asi->insn_handler = emulate_rd12rn0_modify; + return INSN_GOOD; +} + +static enum kprobe_insn __kprobes +prep_emulate_rd12rm0(kprobe_opcode_t insn, struct arch_specific_insn *asi) +{ + if (is_r15(insn, 12)) + return INSN_REJECTED; /* Rd is PC */ + + insn &= 0xffff0ff0; /* Rd = r0, Rm = r0 */ asi->insn[0] = insn; - asi->insn_handler = emulate_rd12; + asi->insn_handler = emulate_rd12rm0; return INSN_GOOD; } @@ -892,6 +909,9 @@ static enum kprobe_insn __kprobes prep_emulate_rd12rn16rm0_wflags(kprobe_opcode_t insn, struct arch_specific_insn *asi) { + if (is_r15(insn, 12)) + return INSN_REJECTED; /* Rd is PC */ + insn &= 0xfff00ff0; /* Rd = r0, Rn = r0 */ insn |= 0x00000001; /* Rm = r1 */ asi->insn[0] = insn; @@ -903,6 +923,9 @@ static enum kprobe_insn __kprobes prep_emulate_rd16rs8rm0_wflags(kprobe_opcode_t insn, struct arch_specific_insn *asi) { + if (is_r15(insn, 16)) + return INSN_REJECTED; /* Rd is PC */ + insn &= 0xfff0f0f0; /* Rd = r0, Rs = r0 */ insn |= 0x00000001; /* Rm = r1 */ asi->insn[0] = insn; @@ -914,6 +937,9 @@ static enum kprobe_insn __kprobes prep_emulate_rd16rn12rs8rm0_wflags(kprobe_opcode_t insn, struct arch_specific_insn *asi) { + if (is_r15(insn, 16)) + return INSN_REJECTED; /* Rd is PC */ + insn &= 0xfff000f0; /* Rd = r0, Rn = r0 */ insn |= 0x00000102; /* Rs = r1, Rm = r2 */ asi->insn[0] = insn; @@ -925,6 +951,9 @@ static enum kprobe_insn __kprobes prep_emulate_rdhi16rdlo12rs8rm0_wflags(kprobe_opcode_t insn, struct arch_specific_insn *asi) { + if (is_r15(insn, 16) || is_r15(insn, 12)) + return INSN_REJECTED; /* RdHi or RdLo is PC */ + insn &= 0xfff000f0; /* RdHi = r0, RdLo = r1 */ insn |= 0x00001203; /* Rs = r2, Rm = r3 */ asi->insn[0] = insn; @@ -945,20 +974,13 @@ prep_emulate_rdhi16rdlo12rs8rm0_wflags(kprobe_opcode_t insn, static enum kprobe_insn __kprobes space_1111(kprobe_opcode_t insn, struct arch_specific_insn *asi) { - /* CPS mmod == 1 : 1111 0001 0000 xx10 xxxx xxxx xx0x xxxx */ - /* RFE : 1111 100x x0x1 xxxx xxxx 1010 xxxx xxxx */ - /* SRS : 1111 100x x1x0 1101 xxxx 0101 xxxx xxxx */ - if ((insn & 0xfff30020) == 0xf1020000 || - (insn & 0xfe500f00) == 0xf8100a00 || - (insn & 0xfe5f0f00) == 0xf84d0500) - return INSN_REJECTED; - - /* PLD : 1111 01x1 x101 xxxx xxxx xxxx xxxx xxxx : */ - if ((insn & 0xfd700000) == 0xf4500000) { - insn &= 0xfff0ffff; /* Rn = r0 */ - asi->insn[0] = insn; - asi->insn_handler = emulate_rn16; - return INSN_GOOD; + /* memory hint : 1111 0100 x001 xxxx xxxx xxxx xxxx xxxx : */ + /* PLDI : 1111 0100 x101 xxxx xxxx xxxx xxxx xxxx : */ + /* PLDW : 1111 0101 x001 xxxx xxxx xxxx xxxx xxxx : */ + /* PLD : 1111 0101 x101 xxxx xxxx xxxx xxxx xxxx : */ + if ((insn & 0xfe300000) == 0xf4100000) { + asi->insn_handler = emulate_nop; + return INSN_GOOD_NO_SLOT; } /* BLX(1) : 1111 101x xxxx xxxx xxxx xxxx xxxx xxxx : */ @@ -967,41 +989,22 @@ space_1111(kprobe_opcode_t insn, struct arch_specific_insn *asi) return INSN_GOOD_NO_SLOT; } - /* SETEND : 1111 0001 0000 0001 xxxx xxxx 0000 xxxx */ - /* CDP2 : 1111 1110 xxxx xxxx xxxx xxxx xxx0 xxxx */ - if ((insn & 0xffff00f0) == 0xf1010000 || - (insn & 0xff000010) == 0xfe000000) { - asi->insn[0] = insn; - asi->insn_handler = emulate_none; - return INSN_GOOD; - } + /* CPS : 1111 0001 0000 xxx0 xxxx xxxx xx0x xxxx */ + /* SETEND: 1111 0001 0000 0001 xxxx xxxx 0000 xxxx */ + /* SRS : 1111 100x x1x0 xxxx xxxx xxxx xxxx xxxx */ + /* RFE : 1111 100x x0x1 xxxx xxxx xxxx xxxx xxxx */ + + /* Coprocessor instructions... */ /* MCRR2 : 1111 1100 0100 xxxx xxxx xxxx xxxx xxxx : (Rd != Rn) */ /* MRRC2 : 1111 1100 0101 xxxx xxxx xxxx xxxx xxxx : (Rd != Rn) */ - if ((insn & 0xffe00000) == 0xfc400000) { - insn &= 0xfff00fff; /* Rn = r0 */ - insn |= 0x00001000; /* Rd = r1 */ - asi->insn[0] = insn; - asi->insn_handler = - (insn & (1 << 20)) ? emulate_mrrc : emulate_mcrr; - return INSN_GOOD; - } + /* LDC2 : 1111 110x xxx1 xxxx xxxx xxxx xxxx xxxx */ + /* STC2 : 1111 110x xxx0 xxxx xxxx xxxx xxxx xxxx */ + /* CDP2 : 1111 1110 xxxx xxxx xxxx xxxx xxx0 xxxx */ + /* MCR2 : 1111 1110 xxx0 xxxx xxxx xxxx xxx1 xxxx */ + /* MRC2 : 1111 1110 xxx1 xxxx xxxx xxxx xxx1 xxxx */ - /* LDC2 : 1111 110x xxx1 xxxx xxxx xxxx xxxx xxxx */ - /* STC2 : 1111 110x xxx0 xxxx xxxx xxxx xxxx xxxx */ - if ((insn & 0xfe000000) == 0xfc000000) { - insn &= 0xfff0ffff; /* Rn = r0 */ - asi->insn[0] = insn; - asi->insn_handler = emulate_ldcstc; - return INSN_GOOD; - } - - /* MCR2 : 1111 1110 xxx0 xxxx xxxx xxxx xxx1 xxxx */ - /* MRC2 : 1111 1110 xxx1 xxxx xxxx xxxx xxx1 xxxx */ - insn &= 0xffff0fff; /* Rd = r0 */ - asi->insn[0] = insn; - asi->insn_handler = (insn & (1 << 20)) ? emulate_rd12 : emulate_ird12; - return INSN_GOOD; + return INSN_REJECTED; } static enum kprobe_insn __kprobes @@ -1010,19 +1013,18 @@ space_cccc_000x(kprobe_opcode_t insn, struct arch_specific_insn *asi) /* cccc 0001 0xx0 xxxx xxxx xxxx xxxx xxx0 xxxx */ if ((insn & 0x0f900010) == 0x01000000) { - /* BXJ : cccc 0001 0010 xxxx xxxx xxxx 0010 xxxx */ - /* MSR : cccc 0001 0x10 xxxx xxxx xxxx 0000 xxxx */ - if ((insn & 0x0ff000f0) == 0x01200020 || - (insn & 0x0fb000f0) == 0x01200000) - return INSN_REJECTED; - - /* MRS : cccc 0001 0x00 xxxx xxxx xxxx 0000 xxxx */ - if ((insn & 0x0fb00010) == 0x01000000) - return prep_emulate_rd12(insn, asi); + /* MRS cpsr : cccc 0001 0000 xxxx xxxx xxxx 0000 xxxx */ + if ((insn & 0x0ff000f0) == 0x01000000) { + if (is_r15(insn, 12)) + return INSN_REJECTED; /* Rd is PC */ + asi->insn_handler = simulate_mrs; + return INSN_GOOD_NO_SLOT; + } /* SMLALxy : cccc 0001 0100 xxxx xxxx xxxx 1xx0 xxxx */ if ((insn & 0x0ff00090) == 0x01400080) - return prep_emulate_rdhi16rdlo12rs8rm0_wflags(insn, asi); + return prep_emulate_rdhi16rdlo12rs8rm0_wflags(insn, + asi); /* SMULWy : cccc 0001 0010 xxxx xxxx xxxx 1x10 xxxx */ /* SMULxy : cccc 0001 0110 xxxx xxxx xxxx 1xx0 xxxx */ @@ -1031,24 +1033,29 @@ space_cccc_000x(kprobe_opcode_t insn, struct arch_specific_insn *asi) return prep_emulate_rd16rs8rm0_wflags(insn, asi); /* SMLAxy : cccc 0001 0000 xxxx xxxx xxxx 1xx0 xxxx : Q */ - /* SMLAWy : cccc 0001 0010 xxxx xxxx xxxx 0x00 xxxx : Q */ - return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi); + /* SMLAWy : cccc 0001 0010 xxxx xxxx xxxx 1x00 xxxx : Q */ + if ((insn & 0x0ff00090) == 0x01000080 || + (insn & 0x0ff000b0) == 0x01200080) + return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi); + + /* BXJ : cccc 0001 0010 xxxx xxxx xxxx 0010 xxxx */ + /* MSR : cccc 0001 0x10 xxxx xxxx xxxx 0000 xxxx */ + /* MRS spsr : cccc 0001 0100 xxxx xxxx xxxx 0000 xxxx */ + /* Other instruction encodings aren't yet defined */ + return INSN_REJECTED; } /* cccc 0001 0xx0 xxxx xxxx xxxx xxxx 0xx1 xxxx */ else if ((insn & 0x0f900090) == 0x01000010) { - /* BKPT : 1110 0001 0010 xxxx xxxx xxxx 0111 xxxx */ - if ((insn & 0xfff000f0) == 0xe1200070) - return INSN_REJECTED; - /* BLX(2) : cccc 0001 0010 xxxx xxxx xxxx 0011 xxxx */ /* BX : cccc 0001 0010 xxxx xxxx xxxx 0001 xxxx */ if ((insn & 0x0ff000d0) == 0x01200010) { - asi->insn[0] = truecc_insn(insn); + if ((insn & 0x0ff000ff) == 0x0120003f) + return INSN_REJECTED; /* BLX pc */ asi->insn_handler = simulate_blx2bx; - return INSN_GOOD; + return INSN_GOOD_NO_SLOT; } /* CLZ : cccc 0001 0110 xxxx xxxx xxxx 0001 xxxx */ @@ -1059,17 +1066,27 @@ space_cccc_000x(kprobe_opcode_t insn, struct arch_specific_insn *asi) /* QSUB : cccc 0001 0010 xxxx xxxx xxxx 0101 xxxx :Q */ /* QDADD : cccc 0001 0100 xxxx xxxx xxxx 0101 xxxx :Q */ /* QDSUB : cccc 0001 0110 xxxx xxxx xxxx 0101 xxxx :Q */ - return prep_emulate_rd12rn16rm0_wflags(insn, asi); + if ((insn & 0x0f9000f0) == 0x01000050) + return prep_emulate_rd12rn16rm0_wflags(insn, asi); + + /* BKPT : 1110 0001 0010 xxxx xxxx xxxx 0111 xxxx */ + /* SMC : cccc 0001 0110 xxxx xxxx xxxx 0111 xxxx */ + + /* Other instruction encodings aren't yet defined */ + return INSN_REJECTED; } /* cccc 0000 xxxx xxxx xxxx xxxx xxxx 1001 xxxx */ - else if ((insn & 0x0f000090) == 0x00000090) { + else if ((insn & 0x0f0000f0) == 0x00000090) { /* MUL : cccc 0000 0000 xxxx xxxx xxxx 1001 xxxx : */ /* MULS : cccc 0000 0001 xxxx xxxx xxxx 1001 xxxx :cc */ /* MLA : cccc 0000 0010 xxxx xxxx xxxx 1001 xxxx : */ /* MLAS : cccc 0000 0011 xxxx xxxx xxxx 1001 xxxx :cc */ /* UMAAL : cccc 0000 0100 xxxx xxxx xxxx 1001 xxxx : */ + /* undef : cccc 0000 0101 xxxx xxxx xxxx 1001 xxxx : */ + /* MLS : cccc 0000 0110 xxxx xxxx xxxx 1001 xxxx : */ + /* undef : cccc 0000 0111 xxxx xxxx xxxx 1001 xxxx : */ /* UMULL : cccc 0000 1000 xxxx xxxx xxxx 1001 xxxx : */ /* UMULLS : cccc 0000 1001 xxxx xxxx xxxx 1001 xxxx :cc */ /* UMLAL : cccc 0000 1010 xxxx xxxx xxxx 1001 xxxx : */ @@ -1078,13 +1095,15 @@ space_cccc_000x(kprobe_opcode_t insn, struct arch_specific_insn *asi) /* SMULLS : cccc 0000 1101 xxxx xxxx xxxx 1001 xxxx :cc */ /* SMLAL : cccc 0000 1110 xxxx xxxx xxxx 1001 xxxx : */ /* SMLALS : cccc 0000 1111 xxxx xxxx xxxx 1001 xxxx :cc */ - if ((insn & 0x0fe000f0) == 0x00000090) { - return prep_emulate_rd16rs8rm0_wflags(insn, asi); - } else if ((insn & 0x0fe000f0) == 0x00200090) { - return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi); - } else { - return prep_emulate_rdhi16rdlo12rs8rm0_wflags(insn, asi); - } + if ((insn & 0x00d00000) == 0x00500000) + return INSN_REJECTED; + else if ((insn & 0x00e00000) == 0x00000000) + return prep_emulate_rd16rs8rm0_wflags(insn, asi); + else if ((insn & 0x00a00000) == 0x00200000) + return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi); + else + return prep_emulate_rdhi16rdlo12rs8rm0_wflags(insn, + asi); } /* cccc 000x xxxx xxxx xxxx xxxx xxxx 1xx1 xxxx */ @@ -1092,23 +1111,45 @@ space_cccc_000x(kprobe_opcode_t insn, struct arch_specific_insn *asi) /* SWP : cccc 0001 0000 xxxx xxxx xxxx 1001 xxxx */ /* SWPB : cccc 0001 0100 xxxx xxxx xxxx 1001 xxxx */ - /* LDRD : cccc 000x xxx0 xxxx xxxx xxxx 1101 xxxx */ - /* STRD : cccc 000x xxx0 xxxx xxxx xxxx 1111 xxxx */ + /* ??? : cccc 0001 0x01 xxxx xxxx xxxx 1001 xxxx */ + /* ??? : cccc 0001 0x10 xxxx xxxx xxxx 1001 xxxx */ + /* ??? : cccc 0001 0x11 xxxx xxxx xxxx 1001 xxxx */ /* STREX : cccc 0001 1000 xxxx xxxx xxxx 1001 xxxx */ /* LDREX : cccc 0001 1001 xxxx xxxx xxxx 1001 xxxx */ + /* STREXD: cccc 0001 1010 xxxx xxxx xxxx 1001 xxxx */ + /* LDREXD: cccc 0001 1011 xxxx xxxx xxxx 1001 xxxx */ + /* STREXB: cccc 0001 1100 xxxx xxxx xxxx 1001 xxxx */ + /* LDREXB: cccc 0001 1101 xxxx xxxx xxxx 1001 xxxx */ + /* STREXH: cccc 0001 1110 xxxx xxxx xxxx 1001 xxxx */ + /* LDREXH: cccc 0001 1111 xxxx xxxx xxxx 1001 xxxx */ + + /* LDRD : cccc 000x xxx0 xxxx xxxx xxxx 1101 xxxx */ + /* STRD : cccc 000x xxx0 xxxx xxxx xxxx 1111 xxxx */ /* LDRH : cccc 000x xxx1 xxxx xxxx xxxx 1011 xxxx */ /* STRH : cccc 000x xxx0 xxxx xxxx xxxx 1011 xxxx */ /* LDRSB : cccc 000x xxx1 xxxx xxxx xxxx 1101 xxxx */ /* LDRSH : cccc 000x xxx1 xxxx xxxx xxxx 1111 xxxx */ - if ((insn & 0x0fb000f0) == 0x01000090) { - /* SWP/SWPB */ - return prep_emulate_rd12rn16rm0_wflags(insn, asi); + if ((insn & 0x0f0000f0) == 0x01000090) { + if ((insn & 0x0fb000f0) == 0x01000090) { + /* SWP/SWPB */ + return prep_emulate_rd12rn16rm0_wflags(insn, + asi); + } else { + /* STREX/LDREX variants and unallocaed space */ + return INSN_REJECTED; + } + } else if ((insn & 0x0e1000d0) == 0x00000d0) { /* STRD/LDRD */ + if ((insn & 0x0000e000) == 0x0000e000) + return INSN_REJECTED; /* Rd is LR or PC */ + if (is_writeback(insn) && is_r15(insn, 16)) + return INSN_REJECTED; /* Writeback to PC */ + insn &= 0xfff00fff; insn |= 0x00002000; /* Rn = r0, Rd = r2 */ - if (insn & (1 << 22)) { - /* I bit */ + if (!(insn & (1 << 22))) { + /* Register index */ insn &= ~0xf; insn |= 1; /* Rm = r1 */ } @@ -1118,6 +1159,9 @@ space_cccc_000x(kprobe_opcode_t insn, struct arch_specific_insn *asi) return INSN_GOOD; } + /* LDRH/STRH/LDRSB/LDRSH */ + if (is_r15(insn, 12)) + return INSN_REJECTED; /* Rd is PC */ return prep_emulate_ldr_str(insn, asi); } @@ -1125,7 +1169,7 @@ space_cccc_000x(kprobe_opcode_t insn, struct arch_specific_insn *asi) /* * ALU op with S bit and Rd == 15 : - * cccc 000x xxx1 xxxx 1111 xxxx xxxx xxxx + * cccc 000x xxx1 xxxx 1111 xxxx xxxx xxxx */ if ((insn & 0x0e10f000) == 0x0010f000) return INSN_REJECTED; @@ -1154,22 +1198,61 @@ space_cccc_000x(kprobe_opcode_t insn, struct arch_specific_insn *asi) insn |= 0x00000200; /* Rs = r2 */ } asi->insn[0] = insn; - asi->insn_handler = (insn & (1 << 20)) ? /* S-bit */ + + if ((insn & 0x0f900000) == 0x01100000) { + /* + * TST : cccc 0001 0001 xxxx xxxx xxxx xxxx xxxx + * TEQ : cccc 0001 0011 xxxx xxxx xxxx xxxx xxxx + * CMP : cccc 0001 0101 xxxx xxxx xxxx xxxx xxxx + * CMN : cccc 0001 0111 xxxx xxxx xxxx xxxx xxxx + */ + asi->insn_handler = emulate_alu_tests; + } else { + /* ALU ops which write to Rd */ + asi->insn_handler = (insn & (1 << 20)) ? /* S-bit */ emulate_alu_rwflags : emulate_alu_rflags; + } return INSN_GOOD; } static enum kprobe_insn __kprobes space_cccc_001x(kprobe_opcode_t insn, struct arch_specific_insn *asi) { + /* MOVW : cccc 0011 0000 xxxx xxxx xxxx xxxx xxxx */ + /* MOVT : cccc 0011 0100 xxxx xxxx xxxx xxxx xxxx */ + if ((insn & 0x0fb00000) == 0x03000000) + return prep_emulate_rd12_modify(insn, asi); + + /* hints : cccc 0011 0010 0000 xxxx xxxx xxxx xxxx */ + if ((insn & 0x0fff0000) == 0x03200000) { + unsigned op2 = insn & 0x000000ff; + if (op2 == 0x01 || op2 == 0x04) { + /* YIELD : cccc 0011 0010 0000 xxxx xxxx 0000 0001 */ + /* SEV : cccc 0011 0010 0000 xxxx xxxx 0000 0100 */ + asi->insn[0] = insn; + asi->insn_handler = emulate_none; + return INSN_GOOD; + } else if (op2 <= 0x03) { + /* NOP : cccc 0011 0010 0000 xxxx xxxx 0000 0000 */ + /* WFE : cccc 0011 0010 0000 xxxx xxxx 0000 0010 */ + /* WFI : cccc 0011 0010 0000 xxxx xxxx 0000 0011 */ + /* + * We make WFE and WFI true NOPs to avoid stalls due + * to missing events whilst processing the probe. + */ + asi->insn_handler = emulate_nop; + return INSN_GOOD_NO_SLOT; + } + /* For DBG and unallocated hints it's safest to reject them */ + return INSN_REJECTED; + } + /* * MSR : cccc 0011 0x10 xxxx xxxx xxxx xxxx xxxx - * Undef : cccc 0011 0100 xxxx xxxx xxxx xxxx xxxx * ALU op with S bit and Rd == 15 : * cccc 001x xxx1 xxxx 1111 xxxx xxxx xxxx */ if ((insn & 0x0fb00000) == 0x03200000 || /* MSR */ - (insn & 0x0ff00000) == 0x03400000 || /* Undef */ (insn & 0x0e10f000) == 0x0210f000) /* ALU s-bit, R15 */ return INSN_REJECTED; @@ -1180,10 +1263,22 @@ space_cccc_001x(kprobe_opcode_t insn, struct arch_specific_insn *asi) * *S (bit 20) updates condition codes * ADC/SBC/RSC reads the C flag */ - insn &= 0xffff0fff; /* Rd = r0 */ + insn &= 0xfff00fff; /* Rn = r0 and Rd = r0 */ asi->insn[0] = insn; - asi->insn_handler = (insn & (1 << 20)) ? /* S-bit */ + + if ((insn & 0x0f900000) == 0x03100000) { + /* + * TST : cccc 0011 0001 xxxx xxxx xxxx xxxx xxxx + * TEQ : cccc 0011 0011 xxxx xxxx xxxx xxxx xxxx + * CMP : cccc 0011 0101 xxxx xxxx xxxx xxxx xxxx + * CMN : cccc 0011 0111 xxxx xxxx xxxx xxxx xxxx + */ + asi->insn_handler = emulate_alu_tests_imm; + } else { + /* ALU ops which write to Rd */ + asi->insn_handler = (insn & (1 << 20)) ? /* S-bit */ emulate_alu_imm_rwflags : emulate_alu_imm_rflags; + } return INSN_GOOD; } @@ -1192,6 +1287,8 @@ space_cccc_0110__1(kprobe_opcode_t insn, struct arch_specific_insn *asi) { /* SEL : cccc 0110 1000 xxxx xxxx xxxx 1011 xxxx GE: !!! */ if ((insn & 0x0ff000f0) == 0x068000b0) { + if (is_r15(insn, 12)) + return INSN_REJECTED; /* Rd is PC */ insn &= 0xfff00ff0; /* Rd = r0, Rn = r0 */ insn |= 0x00000001; /* Rm = r1 */ asi->insn[0] = insn; @@ -1205,6 +1302,8 @@ space_cccc_0110__1(kprobe_opcode_t insn, struct arch_specific_insn *asi) /* USAT16 : cccc 0110 1110 xxxx xxxx xxxx 0011 xxxx :Q */ if ((insn & 0x0fa00030) == 0x06a00010 || (insn & 0x0fb000f0) == 0x06a00030) { + if (is_r15(insn, 12)) + return INSN_REJECTED; /* Rd is PC */ insn &= 0xffff0ff0; /* Rd = r0, Rm = r0 */ asi->insn[0] = insn; asi->insn_handler = emulate_sat; @@ -1213,57 +1312,101 @@ space_cccc_0110__1(kprobe_opcode_t insn, struct arch_specific_insn *asi) /* REV : cccc 0110 1011 xxxx xxxx xxxx 0011 xxxx */ /* REV16 : cccc 0110 1011 xxxx xxxx xxxx 1011 xxxx */ + /* RBIT : cccc 0110 1111 xxxx xxxx xxxx 0011 xxxx */ /* REVSH : cccc 0110 1111 xxxx xxxx xxxx 1011 xxxx */ if ((insn & 0x0ff00070) == 0x06b00030 || - (insn & 0x0ff000f0) == 0x06f000b0) + (insn & 0x0ff00070) == 0x06f00030) return prep_emulate_rd12rm0(insn, asi); + /* ??? : cccc 0110 0000 xxxx xxxx xxxx xxx1 xxxx : */ /* SADD16 : cccc 0110 0001 xxxx xxxx xxxx 0001 xxxx :GE */ /* SADDSUBX : cccc 0110 0001 xxxx xxxx xxxx 0011 xxxx :GE */ /* SSUBADDX : cccc 0110 0001 xxxx xxxx xxxx 0101 xxxx :GE */ /* SSUB16 : cccc 0110 0001 xxxx xxxx xxxx 0111 xxxx :GE */ /* SADD8 : cccc 0110 0001 xxxx xxxx xxxx 1001 xxxx :GE */ + /* ??? : cccc 0110 0001 xxxx xxxx xxxx 1011 xxxx : */ + /* ??? : cccc 0110 0001 xxxx xxxx xxxx 1101 xxxx : */ /* SSUB8 : cccc 0110 0001 xxxx xxxx xxxx 1111 xxxx :GE */ /* QADD16 : cccc 0110 0010 xxxx xxxx xxxx 0001 xxxx : */ /* QADDSUBX : cccc 0110 0010 xxxx xxxx xxxx 0011 xxxx : */ /* QSUBADDX : cccc 0110 0010 xxxx xxxx xxxx 0101 xxxx : */ /* QSUB16 : cccc 0110 0010 xxxx xxxx xxxx 0111 xxxx : */ /* QADD8 : cccc 0110 0010 xxxx xxxx xxxx 1001 xxxx : */ + /* ??? : cccc 0110 0010 xxxx xxxx xxxx 1011 xxxx : */ + /* ??? : cccc 0110 0010 xxxx xxxx xxxx 1101 xxxx : */ /* QSUB8 : cccc 0110 0010 xxxx xxxx xxxx 1111 xxxx : */ /* SHADD16 : cccc 0110 0011 xxxx xxxx xxxx 0001 xxxx : */ /* SHADDSUBX : cccc 0110 0011 xxxx xxxx xxxx 0011 xxxx : */ /* SHSUBADDX : cccc 0110 0011 xxxx xxxx xxxx 0101 xxxx : */ /* SHSUB16 : cccc 0110 0011 xxxx xxxx xxxx 0111 xxxx : */ /* SHADD8 : cccc 0110 0011 xxxx xxxx xxxx 1001 xxxx : */ + /* ??? : cccc 0110 0011 xxxx xxxx xxxx 1011 xxxx : */ + /* ??? : cccc 0110 0011 xxxx xxxx xxxx 1101 xxxx : */ /* SHSUB8 : cccc 0110 0011 xxxx xxxx xxxx 1111 xxxx : */ + /* ??? : cccc 0110 0100 xxxx xxxx xxxx xxx1 xxxx : */ /* UADD16 : cccc 0110 0101 xxxx xxxx xxxx 0001 xxxx :GE */ /* UADDSUBX : cccc 0110 0101 xxxx xxxx xxxx 0011 xxxx :GE */ /* USUBADDX : cccc 0110 0101 xxxx xxxx xxxx 0101 xxxx :GE */ /* USUB16 : cccc 0110 0101 xxxx xxxx xxxx 0111 xxxx :GE */ /* UADD8 : cccc 0110 0101 xxxx xxxx xxxx 1001 xxxx :GE */ + /* ??? : cccc 0110 0101 xxxx xxxx xxxx 1011 xxxx : */ + /* ??? : cccc 0110 0101 xxxx xxxx xxxx 1101 xxxx : */ /* USUB8 : cccc 0110 0101 xxxx xxxx xxxx 1111 xxxx :GE */ /* UQADD16 : cccc 0110 0110 xxxx xxxx xxxx 0001 xxxx : */ /* UQADDSUBX : cccc 0110 0110 xxxx xxxx xxxx 0011 xxxx : */ /* UQSUBADDX : cccc 0110 0110 xxxx xxxx xxxx 0101 xxxx : */ /* UQSUB16 : cccc 0110 0110 xxxx xxxx xxxx 0111 xxxx : */ /* UQADD8 : cccc 0110 0110 xxxx xxxx xxxx 1001 xxxx : */ + /* ??? : cccc 0110 0110 xxxx xxxx xxxx 1011 xxxx : */ + /* ??? : cccc 0110 0110 xxxx xxxx xxxx 1101 xxxx : */ /* UQSUB8 : cccc 0110 0110 xxxx xxxx xxxx 1111 xxxx : */ /* UHADD16 : cccc 0110 0111 xxxx xxxx xxxx 0001 xxxx : */ /* UHADDSUBX : cccc 0110 0111 xxxx xxxx xxxx 0011 xxxx : */ /* UHSUBADDX : cccc 0110 0111 xxxx xxxx xxxx 0101 xxxx : */ /* UHSUB16 : cccc 0110 0111 xxxx xxxx xxxx 0111 xxxx : */ /* UHADD8 : cccc 0110 0111 xxxx xxxx xxxx 1001 xxxx : */ + /* ??? : cccc 0110 0111 xxxx xxxx xxxx 1011 xxxx : */ + /* ??? : cccc 0110 0111 xxxx xxxx xxxx 1101 xxxx : */ /* UHSUB8 : cccc 0110 0111 xxxx xxxx xxxx 1111 xxxx : */ + if ((insn & 0x0f800010) == 0x06000010) { + if ((insn & 0x00300000) == 0x00000000 || + (insn & 0x000000e0) == 0x000000a0 || + (insn & 0x000000e0) == 0x000000c0) + return INSN_REJECTED; /* Unallocated space */ + return prep_emulate_rd12rn16rm0_wflags(insn, asi); + } + /* PKHBT : cccc 0110 1000 xxxx xxxx xxxx x001 xxxx : */ /* PKHTB : cccc 0110 1000 xxxx xxxx xxxx x101 xxxx : */ + if ((insn & 0x0ff00030) == 0x06800010) + return prep_emulate_rd12rn16rm0_wflags(insn, asi); + /* SXTAB16 : cccc 0110 1000 xxxx xxxx xxxx 0111 xxxx : */ - /* SXTB : cccc 0110 1010 xxxx xxxx xxxx 0111 xxxx : */ + /* SXTB16 : cccc 0110 1000 1111 xxxx xxxx 0111 xxxx : */ + /* ??? : cccc 0110 1001 xxxx xxxx xxxx 0111 xxxx : */ /* SXTAB : cccc 0110 1010 xxxx xxxx xxxx 0111 xxxx : */ + /* SXTB : cccc 0110 1010 1111 xxxx xxxx 0111 xxxx : */ /* SXTAH : cccc 0110 1011 xxxx xxxx xxxx 0111 xxxx : */ + /* SXTH : cccc 0110 1011 1111 xxxx xxxx 0111 xxxx : */ /* UXTAB16 : cccc 0110 1100 xxxx xxxx xxxx 0111 xxxx : */ + /* UXTB16 : cccc 0110 1100 1111 xxxx xxxx 0111 xxxx : */ + /* ??? : cccc 0110 1101 xxxx xxxx xxxx 0111 xxxx : */ /* UXTAB : cccc 0110 1110 xxxx xxxx xxxx 0111 xxxx : */ + /* UXTB : cccc 0110 1110 1111 xxxx xxxx 0111 xxxx : */ /* UXTAH : cccc 0110 1111 xxxx xxxx xxxx 0111 xxxx : */ - return prep_emulate_rd12rn16rm0_wflags(insn, asi); + /* UXTH : cccc 0110 1111 1111 xxxx xxxx 0111 xxxx : */ + if ((insn & 0x0f8000f0) == 0x06800070) { + if ((insn & 0x00300000) == 0x00100000) + return INSN_REJECTED; /* Unallocated space */ + + if ((insn & 0x000f0000) == 0x000f0000) + return prep_emulate_rd12rm0(insn, asi); + else + return prep_emulate_rd12rn16rm0_wflags(insn, asi); + } + + /* Other instruction encodings aren't yet defined */ + return INSN_REJECTED; } static enum kprobe_insn __kprobes @@ -1273,29 +1416,49 @@ space_cccc_0111__1(kprobe_opcode_t insn, struct arch_specific_insn *asi) if ((insn & 0x0ff000f0) == 0x03f000f0) return INSN_REJECTED; - /* USADA8 : cccc 0111 1000 xxxx xxxx xxxx 0001 xxxx */ - /* USAD8 : cccc 0111 1000 xxxx 1111 xxxx 0001 xxxx */ - if ((insn & 0x0ff000f0) == 0x07800010) - return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi); - /* SMLALD : cccc 0111 0100 xxxx xxxx xxxx 00x1 xxxx */ /* SMLSLD : cccc 0111 0100 xxxx xxxx xxxx 01x1 xxxx */ if ((insn & 0x0ff00090) == 0x07400010) return prep_emulate_rdhi16rdlo12rs8rm0_wflags(insn, asi); /* SMLAD : cccc 0111 0000 xxxx xxxx xxxx 00x1 xxxx :Q */ + /* SMUAD : cccc 0111 0000 xxxx 1111 xxxx 00x1 xxxx :Q */ /* SMLSD : cccc 0111 0000 xxxx xxxx xxxx 01x1 xxxx :Q */ + /* SMUSD : cccc 0111 0000 xxxx 1111 xxxx 01x1 xxxx : */ /* SMMLA : cccc 0111 0101 xxxx xxxx xxxx 00x1 xxxx : */ - /* SMMLS : cccc 0111 0101 xxxx xxxx xxxx 11x1 xxxx : */ + /* SMMUL : cccc 0111 0101 xxxx 1111 xxxx 00x1 xxxx : */ + /* USADA8 : cccc 0111 1000 xxxx xxxx xxxx 0001 xxxx : */ + /* USAD8 : cccc 0111 1000 xxxx 1111 xxxx 0001 xxxx : */ if ((insn & 0x0ff00090) == 0x07000010 || (insn & 0x0ff000d0) == 0x07500010 || - (insn & 0x0ff000d0) == 0x075000d0) + (insn & 0x0ff000f0) == 0x07800010) { + + if ((insn & 0x0000f000) == 0x0000f000) + return prep_emulate_rd16rs8rm0_wflags(insn, asi); + else + return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi); + } + + /* SMMLS : cccc 0111 0101 xxxx xxxx xxxx 11x1 xxxx : */ + if ((insn & 0x0ff000d0) == 0x075000d0) return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi); - /* SMUSD : cccc 0111 0000 xxxx xxxx xxxx 01x1 xxxx : */ - /* SMUAD : cccc 0111 0000 xxxx 1111 xxxx 00x1 xxxx :Q */ - /* SMMUL : cccc 0111 0101 xxxx 1111 xxxx 00x1 xxxx : */ - return prep_emulate_rd16rs8rm0_wflags(insn, asi); + /* SBFX : cccc 0111 101x xxxx xxxx xxxx x101 xxxx : */ + /* UBFX : cccc 0111 111x xxxx xxxx xxxx x101 xxxx : */ + if ((insn & 0x0fa00070) == 0x07a00050) + return prep_emulate_rd12rm0(insn, asi); + + /* BFI : cccc 0111 110x xxxx xxxx xxxx x001 xxxx : */ + /* BFC : cccc 0111 110x xxxx xxxx xxxx x001 1111 : */ + if ((insn & 0x0fe00070) == 0x07c00010) { + + if ((insn & 0x0000000f) == 0x0000000f) + return prep_emulate_rd12_modify(insn, asi); + else + return prep_emulate_rd12rn0_modify(insn, asi); + } + + return INSN_REJECTED; } static enum kprobe_insn __kprobes @@ -1309,6 +1472,10 @@ space_cccc_01xx(kprobe_opcode_t insn, struct arch_specific_insn *asi) /* STRB : cccc 01xx x1x0 xxxx xxxx xxxx xxxx xxxx */ /* STRBT : cccc 01x0 x110 xxxx xxxx xxxx xxxx xxxx */ /* STRT : cccc 01x0 x010 xxxx xxxx xxxx xxxx xxxx */ + + if ((insn & 0x00500000) == 0x00500000 && is_r15(insn, 12)) + return INSN_REJECTED; /* LDRB into PC */ + return prep_emulate_ldr_str(insn, asi); } @@ -1323,10 +1490,9 @@ space_cccc_100x(kprobe_opcode_t insn, struct arch_specific_insn *asi) /* LDM(1) : cccc 100x x0x1 xxxx xxxx xxxx xxxx xxxx */ /* STM(1) : cccc 100x x0x0 xxxx xxxx xxxx xxxx xxxx */ - asi->insn[0] = truecc_insn(insn); asi->insn_handler = ((insn & 0x108000) == 0x008000) ? /* STM & R15 */ simulate_stm1_pc : simulate_ldm1stm1; - return INSN_GOOD; + return INSN_GOOD_NO_SLOT; } static enum kprobe_insn __kprobes @@ -1334,58 +1500,117 @@ space_cccc_101x(kprobe_opcode_t insn, struct arch_specific_insn *asi) { /* B : cccc 1010 xxxx xxxx xxxx xxxx xxxx xxxx */ /* BL : cccc 1011 xxxx xxxx xxxx xxxx xxxx xxxx */ - asi->insn[0] = truecc_insn(insn); asi->insn_handler = simulate_bbl; - return INSN_GOOD; + return INSN_GOOD_NO_SLOT; } static enum kprobe_insn __kprobes -space_cccc_1100_010x(kprobe_opcode_t insn, struct arch_specific_insn *asi) +space_cccc_11xx(kprobe_opcode_t insn, struct arch_specific_insn *asi) { + /* Coprocessor instructions... */ /* MCRR : cccc 1100 0100 xxxx xxxx xxxx xxxx xxxx : (Rd!=Rn) */ /* MRRC : cccc 1100 0101 xxxx xxxx xxxx xxxx xxxx : (Rd!=Rn) */ - insn &= 0xfff00fff; - insn |= 0x00001000; /* Rn = r0, Rd = r1 */ - asi->insn[0] = insn; - asi->insn_handler = (insn & (1 << 20)) ? emulate_mrrc : emulate_mcrr; - return INSN_GOOD; + /* LDC : cccc 110x xxx1 xxxx xxxx xxxx xxxx xxxx */ + /* STC : cccc 110x xxx0 xxxx xxxx xxxx xxxx xxxx */ + /* CDP : cccc 1110 xxxx xxxx xxxx xxxx xxx0 xxxx */ + /* MCR : cccc 1110 xxx0 xxxx xxxx xxxx xxx1 xxxx */ + /* MRC : cccc 1110 xxx1 xxxx xxxx xxxx xxx1 xxxx */ + + /* SVC : cccc 1111 xxxx xxxx xxxx xxxx xxxx xxxx */ + + return INSN_REJECTED; } -static enum kprobe_insn __kprobes -space_cccc_110x(kprobe_opcode_t insn, struct arch_specific_insn *asi) +static unsigned long __kprobes __check_eq(unsigned long cpsr) { - /* LDC : cccc 110x xxx1 xxxx xxxx xxxx xxxx xxxx */ - /* STC : cccc 110x xxx0 xxxx xxxx xxxx xxxx xxxx */ - insn &= 0xfff0ffff; /* Rn = r0 */ - asi->insn[0] = insn; - asi->insn_handler = emulate_ldcstc; - return INSN_GOOD; + return cpsr & PSR_Z_BIT; } -static enum kprobe_insn __kprobes -space_cccc_111x(kprobe_opcode_t insn, struct arch_specific_insn *asi) +static unsigned long __kprobes __check_ne(unsigned long cpsr) { - /* BKPT : 1110 0001 0010 xxxx xxxx xxxx 0111 xxxx */ - /* SWI : cccc 1111 xxxx xxxx xxxx xxxx xxxx xxxx */ - if ((insn & 0xfff000f0) == 0xe1200070 || - (insn & 0x0f000000) == 0x0f000000) - return INSN_REJECTED; + return (~cpsr) & PSR_Z_BIT; +} - /* CDP : cccc 1110 xxxx xxxx xxxx xxxx xxx0 xxxx */ - if ((insn & 0x0f000010) == 0x0e000000) { - asi->insn[0] = insn; - asi->insn_handler = emulate_none; - return INSN_GOOD; - } +static unsigned long __kprobes __check_cs(unsigned long cpsr) +{ + return cpsr & PSR_C_BIT; +} - /* MCR : cccc 1110 xxx0 xxxx xxxx xxxx xxx1 xxxx */ - /* MRC : cccc 1110 xxx1 xxxx xxxx xxxx xxx1 xxxx */ - insn &= 0xffff0fff; /* Rd = r0 */ - asi->insn[0] = insn; - asi->insn_handler = (insn & (1 << 20)) ? emulate_rd12 : emulate_ird12; - return INSN_GOOD; +static unsigned long __kprobes __check_cc(unsigned long cpsr) +{ + return (~cpsr) & PSR_C_BIT; +} + +static unsigned long __kprobes __check_mi(unsigned long cpsr) +{ + return cpsr & PSR_N_BIT; +} + +static unsigned long __kprobes __check_pl(unsigned long cpsr) +{ + return (~cpsr) & PSR_N_BIT; +} + +static unsigned long __kprobes __check_vs(unsigned long cpsr) +{ + return cpsr & PSR_V_BIT; +} + +static unsigned long __kprobes __check_vc(unsigned long cpsr) +{ + return (~cpsr) & PSR_V_BIT; +} + +static unsigned long __kprobes __check_hi(unsigned long cpsr) +{ + cpsr &= ~(cpsr >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */ + return cpsr & PSR_C_BIT; } +static unsigned long __kprobes __check_ls(unsigned long cpsr) +{ + cpsr &= ~(cpsr >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */ + return (~cpsr) & PSR_C_BIT; +} + +static unsigned long __kprobes __check_ge(unsigned long cpsr) +{ + cpsr ^= (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */ + return (~cpsr) & PSR_N_BIT; +} + +static unsigned long __kprobes __check_lt(unsigned long cpsr) +{ + cpsr ^= (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */ + return cpsr & PSR_N_BIT; +} + +static unsigned long __kprobes __check_gt(unsigned long cpsr) +{ + unsigned long temp = cpsr ^ (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */ + temp |= (cpsr << 1); /* PSR_N_BIT |= PSR_Z_BIT */ + return (~temp) & PSR_N_BIT; +} + +static unsigned long __kprobes __check_le(unsigned long cpsr) +{ + unsigned long temp = cpsr ^ (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */ + temp |= (cpsr << 1); /* PSR_N_BIT |= PSR_Z_BIT */ + return temp & PSR_N_BIT; +} + +static unsigned long __kprobes __check_al(unsigned long cpsr) +{ + return true; +} + +static kprobe_check_cc * const condition_checks[16] = { + &__check_eq, &__check_ne, &__check_cs, &__check_cc, + &__check_mi, &__check_pl, &__check_vs, &__check_vc, + &__check_hi, &__check_ls, &__check_ge, &__check_lt, + &__check_gt, &__check_le, &__check_al, &__check_al +}; + /* Return: * INSN_REJECTED If instruction is one not allowed to kprobe, * INSN_GOOD If instruction is supported and uses instruction slot, @@ -1401,133 +1626,45 @@ space_cccc_111x(kprobe_opcode_t insn, struct arch_specific_insn *asi) enum kprobe_insn __kprobes arm_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi) { + asi->insn_check_cc = condition_checks[insn>>28]; asi->insn[1] = KPROBE_RETURN_INSTRUCTION; - if ((insn & 0xf0000000) == 0xf0000000) { + if ((insn & 0xf0000000) == 0xf0000000) return space_1111(insn, asi); - } else if ((insn & 0x0e000000) == 0x00000000) { + else if ((insn & 0x0e000000) == 0x00000000) return space_cccc_000x(insn, asi); - } else if ((insn & 0x0e000000) == 0x02000000) { + else if ((insn & 0x0e000000) == 0x02000000) return space_cccc_001x(insn, asi); - } else if ((insn & 0x0f000010) == 0x06000010) { + else if ((insn & 0x0f000010) == 0x06000010) return space_cccc_0110__1(insn, asi); - } else if ((insn & 0x0f000010) == 0x07000010) { + else if ((insn & 0x0f000010) == 0x07000010) return space_cccc_0111__1(insn, asi); - } else if ((insn & 0x0c000000) == 0x04000000) { + else if ((insn & 0x0c000000) == 0x04000000) return space_cccc_01xx(insn, asi); - } else if ((insn & 0x0e000000) == 0x08000000) { + else if ((insn & 0x0e000000) == 0x08000000) return space_cccc_100x(insn, asi); - } else if ((insn & 0x0e000000) == 0x0a000000) { + else if ((insn & 0x0e000000) == 0x0a000000) return space_cccc_101x(insn, asi); - } else if ((insn & 0x0fe00000) == 0x0c400000) { - - return space_cccc_1100_010x(insn, asi); - - } else if ((insn & 0x0e000000) == 0x0c000000) { - - return space_cccc_110x(insn, asi); - - } - - return space_cccc_111x(insn, asi); + return space_cccc_11xx(insn, asi); } void __init arm_kprobe_decode_init(void) { find_str_pc_offset(); } - - -/* - * All ARM instructions listed below. - * - * Instructions and their general purpose registers are given. - * If a particular register may not use R15, it is prefixed with a "!". - * If marked with a "*" means the value returned by reading R15 - * is implementation defined. - * - * ADC/ADD/AND/BIC/CMN/CMP/EOR/MOV/MVN/ORR/RSB/RSC/SBC/SUB/TEQ - * TST: Rd, Rn, Rm, !Rs - * BX: Rm - * BLX(2): !Rm - * BX: Rm (R15 legal, but discouraged) - * BXJ: !Rm, - * CLZ: !Rd, !Rm - * CPY: Rd, Rm - * LDC/2,STC/2 immediate offset & unindex: Rn - * LDC/2,STC/2 immediate pre/post-indexed: !Rn - * LDM(1/3): !Rn, register_list - * LDM(2): !Rn, !register_list - * LDR,STR,PLD immediate offset: Rd, Rn - * LDR,STR,PLD register offset: Rd, Rn, !Rm - * LDR,STR,PLD scaled register offset: Rd, !Rn, !Rm - * LDR,STR immediate pre/post-indexed: Rd, !Rn - * LDR,STR register pre/post-indexed: Rd, !Rn, !Rm - * LDR,STR scaled register pre/post-indexed: Rd, !Rn, !Rm - * LDRB,STRB immediate offset: !Rd, Rn - * LDRB,STRB register offset: !Rd, Rn, !Rm - * LDRB,STRB scaled register offset: !Rd, !Rn, !Rm - * LDRB,STRB immediate pre/post-indexed: !Rd, !Rn - * LDRB,STRB register pre/post-indexed: !Rd, !Rn, !Rm - * LDRB,STRB scaled register pre/post-indexed: !Rd, !Rn, !Rm - * LDRT,LDRBT,STRBT immediate pre/post-indexed: !Rd, !Rn - * LDRT,LDRBT,STRBT register pre/post-indexed: !Rd, !Rn, !Rm - * LDRT,LDRBT,STRBT scaled register pre/post-indexed: !Rd, !Rn, !Rm - * LDRH/SH/SB/D,STRH/SH/SB/D immediate offset: !Rd, Rn - * LDRH/SH/SB/D,STRH/SH/SB/D register offset: !Rd, Rn, !Rm - * LDRH/SH/SB/D,STRH/SH/SB/D immediate pre/post-indexed: !Rd, !Rn - * LDRH/SH/SB/D,STRH/SH/SB/D register pre/post-indexed: !Rd, !Rn, !Rm - * LDREX: !Rd, !Rn - * MCR/2: !Rd - * MCRR/2,MRRC/2: !Rd, !Rn - * MLA: !Rd, !Rn, !Rm, !Rs - * MOV: Rd - * MRC/2: !Rd (if Rd==15, only changes cond codes, not the register) - * MRS,MSR: !Rd - * MUL: !Rd, !Rm, !Rs - * PKH{BT,TB}: !Rd, !Rn, !Rm - * QDADD,[U]QADD/16/8/SUBX: !Rd, !Rm, !Rn - * QDSUB,[U]QSUB/16/8/ADDX: !Rd, !Rm, !Rn - * REV/16/SH: !Rd, !Rm - * RFE: !Rn - * {S,U}[H]ADD{16,8,SUBX},{S,U}[H]SUB{16,8,ADDX}: !Rd, !Rn, !Rm - * SEL: !Rd, !Rn, !Rm - * SMLA,SMLA{D,W},SMLSD,SMML{A,S}: !Rd, !Rn, !Rm, !Rs - * SMLAL,SMLA{D,LD},SMLSLD,SMMULL,SMULW: !RdHi, !RdLo, !Rm, !Rs - * SMMUL,SMUAD,SMUL,SMUSD: !Rd, !Rm, !Rs - * SSAT/16: !Rd, !Rm - * STM(1/2): !Rn, register_list* (R15 in reg list not recommended) - * STRT immediate pre/post-indexed: Rd*, !Rn - * STRT register pre/post-indexed: Rd*, !Rn, !Rm - * STRT scaled register pre/post-indexed: Rd*, !Rn, !Rm - * STREX: !Rd, !Rn, !Rm - * SWP/B: !Rd, !Rn, !Rm - * {S,U}XTA{B,B16,H}: !Rd, !Rn, !Rm - * {S,U}XT{B,B16,H}: !Rd, !Rm - * UM{AA,LA,UL}L: !RdHi, !RdLo, !Rm, !Rs - * USA{D8,A8,T,T16}: !Rd, !Rm, !Rs - * - * May transfer control by writing R15 (possible mode changes or alternate - * mode accesses marked by "*"): - * ALU op (* with s-bit), B, BL, BKPT, BLX(1/2), BX, BXJ, CPS*, CPY, - * LDM(1), LDM(2/3)*, LDR, MOV, RFE*, SWI* - * - * Instructions that do not take general registers, nor transfer control: - * CDP/2, SETEND, SRS* - */ diff --git a/trunk/arch/arm/kernel/kprobes.c b/trunk/arch/arm/kernel/kprobes.c index 2ba7deb3072e..1656c87501c0 100644 --- a/trunk/arch/arm/kernel/kprobes.c +++ b/trunk/arch/arm/kernel/kprobes.c @@ -134,7 +134,8 @@ static void __kprobes singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) { regs->ARM_pc += 4; - p->ainsn.insn_handler(p, regs); + if (p->ainsn.insn_check_cc(regs->ARM_cpsr)) + p->ainsn.insn_handler(p, regs); } /* diff --git a/trunk/arch/arm/kernel/leds.c b/trunk/arch/arm/kernel/leds.c index 31a316c1777b..0f107dcb0347 100644 --- a/trunk/arch/arm/kernel/leds.c +++ b/trunk/arch/arm/kernel/leds.c @@ -10,6 +10,7 @@ #include #include #include +#include #include @@ -69,36 +70,37 @@ static ssize_t leds_store(struct sys_device *dev, static SYSDEV_ATTR(event, 0200, NULL, leds_store); -static int leds_suspend(struct sys_device *dev, pm_message_t state) +static struct sysdev_class leds_sysclass = { + .name = "leds", +}; + +static struct sys_device leds_device = { + .id = 0, + .cls = &leds_sysclass, +}; + +static int leds_suspend(void) { leds_event(led_stop); return 0; } -static int leds_resume(struct sys_device *dev) +static void leds_resume(void) { leds_event(led_start); - return 0; } -static int leds_shutdown(struct sys_device *dev) +static void leds_shutdown(void) { leds_event(led_halted); - return 0; } -static struct sysdev_class leds_sysclass = { - .name = "leds", +static struct syscore_ops leds_syscore_ops = { .shutdown = leds_shutdown, .suspend = leds_suspend, .resume = leds_resume, }; -static struct sys_device leds_device = { - .id = 0, - .cls = &leds_sysclass, -}; - static int __init leds_init(void) { int ret; @@ -107,6 +109,8 @@ static int __init leds_init(void) ret = sysdev_register(&leds_device); if (ret == 0) ret = sysdev_create_file(&leds_device, &attr_event); + if (ret == 0) + register_syscore_ops(&leds_syscore_ops); return ret; } diff --git a/trunk/arch/arm/kernel/perf_event.c b/trunk/arch/arm/kernel/perf_event.c index 69cfee0fe00f..139e3c827369 100644 --- a/trunk/arch/arm/kernel/perf_event.c +++ b/trunk/arch/arm/kernel/perf_event.c @@ -221,7 +221,7 @@ armpmu_event_update(struct perf_event *event, prev_raw_count &= armpmu->max_period; if (overflow) - delta = armpmu->max_period - prev_raw_count + new_raw_count; + delta = armpmu->max_period - prev_raw_count + new_raw_count + 1; else delta = new_raw_count - prev_raw_count; @@ -746,7 +746,8 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) tail = (struct frame_tail __user *)regs->ARM_fp - 1; - while (tail && !((unsigned long)tail & 0x3)) + while ((entry->nr < PERF_MAX_STACK_DEPTH) && + tail && !((unsigned long)tail & 0x3)) tail = user_backtrace(tail, entry); } diff --git a/trunk/arch/arm/kernel/process.c b/trunk/arch/arm/kernel/process.c index 94bbedbed639..5e1e54197227 100644 --- a/trunk/arch/arm/kernel/process.c +++ b/trunk/arch/arm/kernel/process.c @@ -372,6 +372,8 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start, if (clone_flags & CLONE_SETTLS) thread->tp_value = regs->ARM_r3; + thread_notify(THREAD_NOTIFY_COPY, thread); + return 0; } diff --git a/trunk/arch/arm/kernel/ptrace.c b/trunk/arch/arm/kernel/ptrace.c index 2bf27f364d09..8182f45ca493 100644 --- a/trunk/arch/arm/kernel/ptrace.c +++ b/trunk/arch/arm/kernel/ptrace.c @@ -767,12 +767,20 @@ long arch_ptrace(struct task_struct *child, long request, #ifdef CONFIG_HAVE_HW_BREAKPOINT case PTRACE_GETHBPREGS: + if (ptrace_get_breakpoints(child) < 0) + return -ESRCH; + ret = ptrace_gethbpregs(child, addr, (unsigned long __user *)data); + ptrace_put_breakpoints(child); break; case PTRACE_SETHBPREGS: + if (ptrace_get_breakpoints(child) < 0) + return -ESRCH; + ret = ptrace_sethbpregs(child, addr, (unsigned long __user *)data); + ptrace_put_breakpoints(child); break; #endif diff --git a/trunk/arch/arm/kernel/signal.c b/trunk/arch/arm/kernel/signal.c index cb8398317644..0340224cf73c 100644 --- a/trunk/arch/arm/kernel/signal.c +++ b/trunk/arch/arm/kernel/signal.c @@ -597,45 +597,19 @@ setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info, return err; } -static inline void setup_syscall_restart(struct pt_regs *regs) -{ - regs->ARM_r0 = regs->ARM_ORIG_r0; - regs->ARM_pc -= thumb_mode(regs) ? 2 : 4; -} - /* * OK, we're invoking a handler */ static int handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset, - struct pt_regs * regs, int syscall) + struct pt_regs * regs) { struct thread_info *thread = current_thread_info(); struct task_struct *tsk = current; int usig = sig; int ret; - /* - * If we were from a system call, check for system call restarting... - */ - if (syscall) { - switch (regs->ARM_r0) { - case -ERESTART_RESTARTBLOCK: - case -ERESTARTNOHAND: - regs->ARM_r0 = -EINTR; - break; - case -ERESTARTSYS: - if (!(ka->sa.sa_flags & SA_RESTART)) { - regs->ARM_r0 = -EINTR; - break; - } - /* fallthrough */ - case -ERESTARTNOINTR: - setup_syscall_restart(regs); - } - } - /* * translate the signal */ @@ -685,6 +659,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, */ static void do_signal(struct pt_regs *regs, int syscall) { + unsigned int retval = 0, continue_addr = 0, restart_addr = 0; struct k_sigaction ka; siginfo_t info; int signr; @@ -698,18 +673,61 @@ static void do_signal(struct pt_regs *regs, int syscall) if (!user_mode(regs)) return; + /* + * If we were from a system call, check for system call restarting... + */ + if (syscall) { + continue_addr = regs->ARM_pc; + restart_addr = continue_addr - (thumb_mode(regs) ? 2 : 4); + retval = regs->ARM_r0; + + /* + * Prepare for system call restart. We do this here so that a + * debugger will see the already changed PSW. + */ + switch (retval) { + case -ERESTARTNOHAND: + case -ERESTARTSYS: + case -ERESTARTNOINTR: + regs->ARM_r0 = regs->ARM_ORIG_r0; + regs->ARM_pc = restart_addr; + break; + case -ERESTART_RESTARTBLOCK: + regs->ARM_r0 = -EINTR; + break; + } + } + if (try_to_freeze()) goto no_signal; + /* + * Get the signal to deliver. When running under ptrace, at this + * point the debugger may change all our registers ... + */ signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { sigset_t *oldset; + /* + * Depending on the signal settings we may need to revert the + * decision to restart the system call. But skip this if a + * debugger has chosen to restart at a different PC. + */ + if (regs->ARM_pc == restart_addr) { + if (retval == -ERESTARTNOHAND + || (retval == -ERESTARTSYS + && !(ka.sa.sa_flags & SA_RESTART))) { + regs->ARM_r0 = -EINTR; + regs->ARM_pc = continue_addr; + } + } + if (test_thread_flag(TIF_RESTORE_SIGMASK)) oldset = ¤t->saved_sigmask; else oldset = ¤t->blocked; - if (handle_signal(signr, &ka, &info, oldset, regs, syscall) == 0) { + if (handle_signal(signr, &ka, &info, oldset, regs) == 0) { /* * A signal was successfully delivered; the saved * sigmask will have been stored in the signal frame, @@ -723,11 +741,14 @@ static void do_signal(struct pt_regs *regs, int syscall) } no_signal: - /* - * No signal to deliver to the process - restart the syscall. - */ if (syscall) { - if (regs->ARM_r0 == -ERESTART_RESTARTBLOCK) { + /* + * Handle restarting a different system call. As above, + * if a debugger has chosen to restart at a different PC, + * ignore the restart. + */ + if (retval == -ERESTART_RESTARTBLOCK + && regs->ARM_pc == continue_addr) { if (thumb_mode(regs)) { regs->ARM_r7 = __NR_restart_syscall - __NR_SYSCALL_BASE; regs->ARM_pc -= 2; @@ -750,11 +771,6 @@ static void do_signal(struct pt_regs *regs, int syscall) #endif } } - if (regs->ARM_r0 == -ERESTARTNOHAND || - regs->ARM_r0 == -ERESTARTSYS || - regs->ARM_r0 == -ERESTARTNOINTR) { - setup_syscall_restart(regs); - } /* If there's no signal to deliver, we just put the saved sigmask * back. diff --git a/trunk/arch/arm/kernel/smp.c b/trunk/arch/arm/kernel/smp.c index 8fe05ad932e4..007a0a950e75 100644 --- a/trunk/arch/arm/kernel/smp.c +++ b/trunk/arch/arm/kernel/smp.c @@ -479,7 +479,7 @@ static void broadcast_timer_set_mode(enum clock_event_mode mode, { } -static void broadcast_timer_setup(struct clock_event_device *evt) +static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt) { evt->name = "dummy_timer"; evt->features = CLOCK_EVT_FEAT_ONESHOT | @@ -560,10 +560,7 @@ asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs) break; case IPI_RESCHEDULE: - /* - * nothing more to do - eveything is - * done on the interrupt return path - */ + scheduler_ipi(); break; case IPI_CALL_FUNC: diff --git a/trunk/arch/arm/kernel/sys_oabi-compat.c b/trunk/arch/arm/kernel/sys_oabi-compat.c index 4ad8da15ef2b..af0aaebf4de6 100644 --- a/trunk/arch/arm/kernel/sys_oabi-compat.c +++ b/trunk/arch/arm/kernel/sys_oabi-compat.c @@ -311,7 +311,7 @@ asmlinkage long sys_oabi_semtimedop(int semid, long err; int i; - if (nsops < 1) + if (nsops < 1 || nsops > SEMOPM) return -EINVAL; sops = kmalloc(sizeof(*sops) * nsops, GFP_KERNEL); if (!sops) diff --git a/trunk/arch/arm/kernel/time.c b/trunk/arch/arm/kernel/time.c index 1ff46cabc7ef..cb634c3e28e9 100644 --- a/trunk/arch/arm/kernel/time.c +++ b/trunk/arch/arm/kernel/time.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include @@ -115,48 +115,37 @@ void timer_tick(void) #endif #if defined(CONFIG_PM) && !defined(CONFIG_GENERIC_CLOCKEVENTS) -static int timer_suspend(struct sys_device *dev, pm_message_t state) +static int timer_suspend(void) { - struct sys_timer *timer = container_of(dev, struct sys_timer, dev); - - if (timer->suspend != NULL) - timer->suspend(); + if (system_timer->suspend) + system_timer->suspend(); return 0; } -static int timer_resume(struct sys_device *dev) +static void timer_resume(void) { - struct sys_timer *timer = container_of(dev, struct sys_timer, dev); - - if (timer->resume != NULL) - timer->resume(); - - return 0; + if (system_timer->resume) + system_timer->resume(); } #else #define timer_suspend NULL #define timer_resume NULL #endif -static struct sysdev_class timer_sysclass = { - .name = "timer", +static struct syscore_ops timer_syscore_ops = { .suspend = timer_suspend, .resume = timer_resume, }; -static int __init timer_init_sysfs(void) +static int __init timer_init_syscore_ops(void) { - int ret = sysdev_class_register(&timer_sysclass); - if (ret == 0) { - system_timer->dev.cls = &timer_sysclass; - ret = sysdev_register(&system_timer->dev); - } + register_syscore_ops(&timer_syscore_ops); - return ret; + return 0; } -device_initcall(timer_init_sysfs); +device_initcall(timer_init_syscore_ops); void __init time_init(void) { diff --git a/trunk/arch/arm/kernel/traps.c b/trunk/arch/arm/kernel/traps.c index f0000e188c8c..d52eec268b47 100644 --- a/trunk/arch/arm/kernel/traps.c +++ b/trunk/arch/arm/kernel/traps.c @@ -234,7 +234,6 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt printk(KERN_EMERG "Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n", str, err, ++die_counter); - sysfs_printk_last_file(); /* trap and error numbers are mostly meaningless on ARM */ ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, SIGSEGV); @@ -410,8 +409,7 @@ static int bad_syscall(int n, struct pt_regs *regs) struct thread_info *thread = current_thread_info(); siginfo_t info; - if (current->personality != PER_LINUX && - current->personality != PER_LINUX_32BIT && + if ((current->personality & PER_MASK) != PER_LINUX && thread->exec_domain->handler) { thread->exec_domain->handler(n, regs); return regs->ARM_r0; diff --git a/trunk/arch/arm/mach-at91/Kconfig b/trunk/arch/arm/mach-at91/Kconfig index 19390231a0e9..2d299bf5d72f 100644 --- a/trunk/arch/arm/mach-at91/Kconfig +++ b/trunk/arch/arm/mach-at91/Kconfig @@ -83,6 +83,7 @@ config ARCH_AT91CAP9 select CPU_ARM926T select GENERIC_CLOCKEVENTS select HAVE_FB_ATMEL + select HAVE_NET_MACB config ARCH_AT572D940HF bool "AT572D940HF" diff --git a/trunk/arch/arm/mach-at91/board-eb01.c b/trunk/arch/arm/mach-at91/board-eb01.c index 1f9d3cb64c50..d8df59a3426d 100644 --- a/trunk/arch/arm/mach-at91/board-eb01.c +++ b/trunk/arch/arm/mach-at91/board-eb01.c @@ -30,6 +30,11 @@ #include #include "generic.h" +static void __init at91eb01_init_irq(void) +{ + at91x40_init_interrupts(NULL); +} + static void __init at91eb01_map_io(void) { at91x40_initialize(40000000); @@ -38,7 +43,7 @@ static void __init at91eb01_map_io(void) MACHINE_START(AT91EB01, "Atmel AT91 EB01") /* Maintainer: Greg Ungerer */ .timer = &at91x40_timer, - .init_irq = at91x40_init_interrupts, + .init_irq = at91eb01_init_irq, .map_io = at91eb01_map_io, MACHINE_END diff --git a/trunk/arch/arm/mach-at91/include/mach/cpu.h b/trunk/arch/arm/mach-at91/include/mach/cpu.h index 3bef931d0b1c..0700f2125305 100644 --- a/trunk/arch/arm/mach-at91/include/mach/cpu.h +++ b/trunk/arch/arm/mach-at91/include/mach/cpu.h @@ -27,6 +27,7 @@ #define ARCH_ID_AT91SAM9G45 0x819b05a0 #define ARCH_ID_AT91SAM9G45MRL 0x819b05a2 /* aka 9G45-ES2 & non ES lots */ #define ARCH_ID_AT91SAM9G45ES 0x819b05a1 /* 9G45-ES (Engineering Sample) */ +#define ARCH_ID_AT91SAM9X5 0x819a05a0 #define ARCH_ID_AT91CAP9 0x039A03A0 #define ARCH_ID_AT91SAM9XE128 0x329973a0 @@ -55,6 +56,12 @@ static inline unsigned long at91_cpu_fully_identify(void) #define ARCH_EXID_AT91SAM9G46 0x00000003 #define ARCH_EXID_AT91SAM9G45 0x00000004 +#define ARCH_EXID_AT91SAM9G15 0x00000000 +#define ARCH_EXID_AT91SAM9G35 0x00000001 +#define ARCH_EXID_AT91SAM9X35 0x00000002 +#define ARCH_EXID_AT91SAM9G25 0x00000003 +#define ARCH_EXID_AT91SAM9X25 0x00000004 + static inline unsigned long at91_exid_identify(void) { return at91_sys_read(AT91_DBGU_EXID); @@ -143,6 +150,27 @@ static inline unsigned long at91cap9_rev_identify(void) #define cpu_is_at91sam9m11() (0) #endif +#ifdef CONFIG_ARCH_AT91SAM9X5 +#define cpu_is_at91sam9x5() (at91_cpu_identify() == ARCH_ID_AT91SAM9X5) +#define cpu_is_at91sam9g15() (cpu_is_at91sam9x5() && \ + (at91_exid_identify() == ARCH_EXID_AT91SAM9G15)) +#define cpu_is_at91sam9g35() (cpu_is_at91sam9x5() && \ + (at91_exid_identify() == ARCH_EXID_AT91SAM9G35)) +#define cpu_is_at91sam9x35() (cpu_is_at91sam9x5() && \ + (at91_exid_identify() == ARCH_EXID_AT91SAM9X35)) +#define cpu_is_at91sam9g25() (cpu_is_at91sam9x5() && \ + (at91_exid_identify() == ARCH_EXID_AT91SAM9G25)) +#define cpu_is_at91sam9x25() (cpu_is_at91sam9x5() && \ + (at91_exid_identify() == ARCH_EXID_AT91SAM9X25)) +#else +#define cpu_is_at91sam9x5() (0) +#define cpu_is_at91sam9g15() (0) +#define cpu_is_at91sam9g35() (0) +#define cpu_is_at91sam9x35() (0) +#define cpu_is_at91sam9g25() (0) +#define cpu_is_at91sam9x25() (0) +#endif + #ifdef CONFIG_ARCH_AT91CAP9 #define cpu_is_at91cap9() (at91_cpu_identify() == ARCH_ID_AT91CAP9) #define cpu_is_at91cap9_revB() (at91cap9_rev_identify() == ARCH_REVISION_CAP9_B) diff --git a/trunk/arch/arm/mach-davinci/Kconfig b/trunk/arch/arm/mach-davinci/Kconfig index 32f147998cd9..c0deacae778d 100644 --- a/trunk/arch/arm/mach-davinci/Kconfig +++ b/trunk/arch/arm/mach-davinci/Kconfig @@ -63,6 +63,7 @@ config MACH_DAVINCI_EVM depends on ARCH_DAVINCI_DM644x select MISC_DEVICES select EEPROM_AT24 + select I2C help Configure this option to specify the whether the board used for development is a DM644x EVM @@ -72,6 +73,7 @@ config MACH_SFFSDR depends on ARCH_DAVINCI_DM644x select MISC_DEVICES select EEPROM_AT24 + select I2C help Say Y here to select the Lyrtech Small Form Factor Software Defined Radio (SFFSDR) board. @@ -105,6 +107,7 @@ config MACH_DAVINCI_DM6467_EVM select MACH_DAVINCI_DM6467TEVM select MISC_DEVICES select EEPROM_AT24 + select I2C help Configure this option to specify the whether the board used for development is a DM6467 EVM @@ -118,6 +121,7 @@ config MACH_DAVINCI_DM365_EVM depends on ARCH_DAVINCI_DM365 select MISC_DEVICES select EEPROM_AT24 + select I2C help Configure this option to specify whether the board used for development is a DM365 EVM @@ -129,6 +133,7 @@ config MACH_DAVINCI_DA830_EVM select GPIO_PCF857X select MISC_DEVICES select EEPROM_AT24 + select I2C help Say Y here to select the TI DA830/OMAP-L137/AM17x Evaluation Module. @@ -205,6 +210,7 @@ config MACH_MITYOMAPL138 depends on ARCH_DAVINCI_DA850 select MISC_DEVICES select EEPROM_AT24 + select I2C help Say Y here to select the Critical Link MityDSP-L138/MityARM-1808 System on Module. Information on this SoM may be found at diff --git a/trunk/arch/arm/mach-davinci/board-mityomapl138.c b/trunk/arch/arm/mach-davinci/board-mityomapl138.c index 2aa79c54f98e..606a6f27ed6c 100644 --- a/trunk/arch/arm/mach-davinci/board-mityomapl138.c +++ b/trunk/arch/arm/mach-davinci/board-mityomapl138.c @@ -29,7 +29,7 @@ #include #include -#define MITYOMAPL138_PHY_ID "0:03" +#define MITYOMAPL138_PHY_ID "" #define FACTORY_CONFIG_MAGIC 0x012C0138 #define FACTORY_CONFIG_VERSION 0x00010001 @@ -414,7 +414,7 @@ static struct resource mityomapl138_nandflash_resource[] = { static struct platform_device mityomapl138_nandflash_device = { .name = "davinci_nand", - .id = 0, + .id = 1, .dev = { .platform_data = &mityomapl138_nandflash_data, }, diff --git a/trunk/arch/arm/mach-davinci/cpufreq.c b/trunk/arch/arm/mach-davinci/cpufreq.c index 0a95be1512bb..41669ecc1f91 100644 --- a/trunk/arch/arm/mach-davinci/cpufreq.c +++ b/trunk/arch/arm/mach-davinci/cpufreq.c @@ -94,9 +94,7 @@ static int davinci_target(struct cpufreq_policy *policy, if (freqs.old == freqs.new) return ret; - cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, - dev_driver_string(cpufreq.dev), - "transition: %u --> %u\n", freqs.old, freqs.new); + dev_dbg(&cpufreq.dev, "transition: %u --> %u\n", freqs.old, freqs.new); ret = cpufreq_frequency_table_target(policy, pdata->freq_table, freqs.new, relation, &idx); diff --git a/trunk/arch/arm/mach-davinci/devices-da8xx.c b/trunk/arch/arm/mach-davinci/devices-da8xx.c index 625d4b66718b..58a02dc7b15a 100644 --- a/trunk/arch/arm/mach-davinci/devices-da8xx.c +++ b/trunk/arch/arm/mach-davinci/devices-da8xx.c @@ -39,7 +39,8 @@ #define DA8XX_GPIO_BASE 0x01e26000 #define DA8XX_I2C1_BASE 0x01e28000 #define DA8XX_SPI0_BASE 0x01c41000 -#define DA8XX_SPI1_BASE 0x01f0e000 +#define DA830_SPI1_BASE 0x01e12000 +#define DA850_SPI1_BASE 0x01f0e000 #define DA8XX_EMAC_CTRL_REG_OFFSET 0x3000 #define DA8XX_EMAC_MOD_REG_OFFSET 0x2000 @@ -762,8 +763,8 @@ static struct resource da8xx_spi0_resources[] = { static struct resource da8xx_spi1_resources[] = { [0] = { - .start = DA8XX_SPI1_BASE, - .end = DA8XX_SPI1_BASE + SZ_4K - 1, + .start = DA830_SPI1_BASE, + .end = DA830_SPI1_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, [1] = { @@ -832,5 +833,10 @@ int __init da8xx_register_spi(int instance, struct spi_board_info *info, da8xx_spi_pdata[instance].num_chipselect = len; + if (instance == 1 && cpu_is_davinci_da850()) { + da8xx_spi1_resources[0].start = DA850_SPI1_BASE; + da8xx_spi1_resources[0].end = DA850_SPI1_BASE + SZ_4K - 1; + } + return platform_device_register(&da8xx_spi_device[instance]); } diff --git a/trunk/arch/arm/mach-davinci/dm355.c b/trunk/arch/arm/mach-davinci/dm355.c index f68012239641..a3a94e9c9378 100644 --- a/trunk/arch/arm/mach-davinci/dm355.c +++ b/trunk/arch/arm/mach-davinci/dm355.c @@ -314,7 +314,7 @@ static struct clk timer2_clk = { .name = "timer2", .parent = &pll1_aux_clk, .lpsc = DAVINCI_LPSC_TIMER2, - .usecount = 1, /* REVISIT: why can't' this be disabled? */ + .usecount = 1, /* REVISIT: why can't this be disabled? */ }; static struct clk timer3_clk = { diff --git a/trunk/arch/arm/mach-davinci/dm644x.c b/trunk/arch/arm/mach-davinci/dm644x.c index 5f8a65424184..4c82c2716293 100644 --- a/trunk/arch/arm/mach-davinci/dm644x.c +++ b/trunk/arch/arm/mach-davinci/dm644x.c @@ -274,7 +274,7 @@ static struct clk timer2_clk = { .name = "timer2", .parent = &pll1_aux_clk, .lpsc = DAVINCI_LPSC_TIMER2, - .usecount = 1, /* REVISIT: why can't' this be disabled? */ + .usecount = 1, /* REVISIT: why can't this be disabled? */ }; static struct clk_lookup dm644x_clks[] = { diff --git a/trunk/arch/arm/mach-davinci/include/mach/debug-macro.S b/trunk/arch/arm/mach-davinci/include/mach/debug-macro.S index 9f1befc5ac38..f8b7ea4f6235 100644 --- a/trunk/arch/arm/mach-davinci/include/mach/debug-macro.S +++ b/trunk/arch/arm/mach-davinci/include/mach/debug-macro.S @@ -24,6 +24,9 @@ #define UART_SHIFT 2 +#define davinci_uart_v2p(x) ((x) - PAGE_OFFSET + PLAT_PHYS_OFFSET) +#define davinci_uart_p2v(x) ((x) - PLAT_PHYS_OFFSET + PAGE_OFFSET) + .pushsection .data davinci_uart_phys: .word 0 davinci_uart_virt: .word 0 @@ -34,7 +37,7 @@ davinci_uart_virt: .word 0 /* Use davinci_uart_phys/virt if already configured */ 10: mrc p15, 0, \rp, c1, c0 tst \rp, #1 @ MMU enabled? - ldreq \rp, =__virt_to_phys(davinci_uart_phys) + ldreq \rp, =davinci_uart_v2p(davinci_uart_phys) ldrne \rp, =davinci_uart_phys add \rv, \rp, #4 @ davinci_uart_virt ldr \rp, [\rp, #0] @@ -48,18 +51,18 @@ davinci_uart_virt: .word 0 tst \rp, #1 @ MMU enabled? /* Copy uart phys address from decompressor uart info */ - ldreq \rv, =__virt_to_phys(davinci_uart_phys) + ldreq \rv, =davinci_uart_v2p(davinci_uart_phys) ldrne \rv, =davinci_uart_phys ldreq \rp, =DAVINCI_UART_INFO - ldrne \rp, =__phys_to_virt(DAVINCI_UART_INFO) + ldrne \rp, =davinci_uart_p2v(DAVINCI_UART_INFO) ldr \rp, [\rp, #0] str \rp, [\rv] /* Copy uart virt address from decompressor uart info */ - ldreq \rv, =__virt_to_phys(davinci_uart_virt) + ldreq \rv, =davinci_uart_v2p(davinci_uart_virt) ldrne \rv, =davinci_uart_virt ldreq \rp, =DAVINCI_UART_INFO - ldrne \rp, =__phys_to_virt(DAVINCI_UART_INFO) + ldrne \rp, =davinci_uart_p2v(DAVINCI_UART_INFO) ldr \rp, [\rp, #4] str \rp, [\rv] diff --git a/trunk/arch/arm/mach-davinci/include/mach/serial.h b/trunk/arch/arm/mach-davinci/include/mach/serial.h index 8051110b8ac3..c9e6ce185a66 100644 --- a/trunk/arch/arm/mach-davinci/include/mach/serial.h +++ b/trunk/arch/arm/mach-davinci/include/mach/serial.h @@ -22,7 +22,7 @@ * * This area sits just below the page tables (see arch/arm/kernel/head.S). */ -#define DAVINCI_UART_INFO (PHYS_OFFSET + 0x3ff8) +#define DAVINCI_UART_INFO (PLAT_PHYS_OFFSET + 0x3ff8) #define DAVINCI_UART0_BASE (IO_PHYS + 0x20000) #define DAVINCI_UART1_BASE (IO_PHYS + 0x20400) diff --git a/trunk/arch/arm/mach-exynos4/pm.c b/trunk/arch/arm/mach-exynos4/pm.c index 10d917d9e3ad..8755ca8dd48d 100644 --- a/trunk/arch/arm/mach-exynos4/pm.c +++ b/trunk/arch/arm/mach-exynos4/pm.c @@ -16,6 +16,7 @@ #include #include +#include #include #include @@ -372,7 +373,27 @@ void exynos4_scu_enable(void __iomem *scu_base) flush_cache_all(); } -static int exynos4_pm_resume(struct sys_device *dev) +static struct sysdev_driver exynos4_pm_driver = { + .add = exynos4_pm_add, +}; + +static __init int exynos4_pm_drvinit(void) +{ + unsigned int tmp; + + s3c_pm_init(); + + /* All wakeup disable */ + + tmp = __raw_readl(S5P_WAKEUP_MASK); + tmp |= ((0xFF << 8) | (0x1F << 1)); + __raw_writel(tmp, S5P_WAKEUP_MASK); + + return sysdev_driver_register(&exynos4_sysclass, &exynos4_pm_driver); +} +arch_initcall(exynos4_pm_drvinit); + +static void exynos4_pm_resume(void) { /* For release retention */ @@ -394,27 +415,15 @@ static int exynos4_pm_resume(struct sys_device *dev) /* enable L2X0*/ writel_relaxed(1, S5P_VA_L2CC + L2X0_CTRL); #endif - - return 0; } -static struct sysdev_driver exynos4_pm_driver = { - .add = exynos4_pm_add, +static struct syscore_ops exynos4_pm_syscore_ops = { .resume = exynos4_pm_resume, }; -static __init int exynos4_pm_drvinit(void) +static __init int exynos4_pm_syscore_init(void) { - unsigned int tmp; - - s3c_pm_init(); - - /* All wakeup disable */ - - tmp = __raw_readl(S5P_WAKEUP_MASK); - tmp |= ((0xFF << 8) | (0x1F << 1)); - __raw_writel(tmp, S5P_WAKEUP_MASK); - - return sysdev_driver_register(&exynos4_sysclass, &exynos4_pm_driver); + register_syscore_ops(&exynos4_pm_syscore_ops); + return 0; } -arch_initcall(exynos4_pm_drvinit); +arch_initcall(exynos4_pm_syscore_init); diff --git a/trunk/arch/arm/mach-footbridge/Kconfig b/trunk/arch/arm/mach-footbridge/Kconfig index bdd257921cfb..46adca068f2c 100644 --- a/trunk/arch/arm/mach-footbridge/Kconfig +++ b/trunk/arch/arm/mach-footbridge/Kconfig @@ -4,6 +4,7 @@ menu "Footbridge Implementations" config ARCH_CATS bool "CATS" + select CLKSRC_I8253 select FOOTBRIDGE_HOST select ISA select ISA_DMA @@ -59,6 +60,7 @@ config ARCH_EBSA285_HOST config ARCH_NETWINDER bool "NetWinder" + select CLKSRC_I8253 select FOOTBRIDGE_HOST select ISA select ISA_DMA diff --git a/trunk/arch/arm/mach-footbridge/isa-timer.c b/trunk/arch/arm/mach-footbridge/isa-timer.c index 441c6ce0d555..7020f1a3feca 100644 --- a/trunk/arch/arm/mach-footbridge/isa-timer.c +++ b/trunk/arch/arm/mach-footbridge/isa-timer.c @@ -10,53 +10,16 @@ #include #include #include +#include #include #include - +#include #include #include "common.h" -#define PIT_MODE 0x43 -#define PIT_CH0 0x40 - -#define PIT_LATCH ((PIT_TICK_RATE + HZ / 2) / HZ) - -static cycle_t pit_read(struct clocksource *cs) -{ - unsigned long flags; - static int old_count; - static u32 old_jifs; - int count; - u32 jifs; - - raw_local_irq_save(flags); - - jifs = jiffies; - outb_p(0x00, PIT_MODE); /* latch the count */ - count = inb_p(PIT_CH0); /* read the latched count */ - count |= inb_p(PIT_CH0) << 8; - - if (count > old_count && jifs == old_jifs) - count = old_count; - - old_count = count; - old_jifs = jifs; - - raw_local_irq_restore(flags); - - count = (PIT_LATCH - 1) - count; - - return (cycle_t)(jifs * PIT_LATCH) + count; -} - -static struct clocksource pit_cs = { - .name = "pit", - .rating = 110, - .read = pit_read, - .mask = CLOCKSOURCE_MASK(32), -}; +DEFINE_RAW_SPINLOCK(i8253_lock); static void pit_set_mode(enum clock_event_mode mode, struct clock_event_device *evt) @@ -121,7 +84,7 @@ static void __init isa_timer_init(void) pit_ce.max_delta_ns = clockevent_delta2ns(0x7fff, &pit_ce); pit_ce.min_delta_ns = clockevent_delta2ns(0x000f, &pit_ce); - clocksource_register_hz(&pit_cs, PIT_TICK_RATE); + clocksource_i8253_init(); setup_irq(pit_ce.irq, &pit_timer_irq); clockevents_register_device(&pit_ce); diff --git a/trunk/arch/arm/mach-integrator/integrator_ap.c b/trunk/arch/arm/mach-integrator/integrator_ap.c index 980803ff348c..d3e96451529c 100644 --- a/trunk/arch/arm/mach-integrator/integrator_ap.c +++ b/trunk/arch/arm/mach-integrator/integrator_ap.c @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include #include #include @@ -180,13 +180,13 @@ static void __init ap_init_irq(void) #ifdef CONFIG_PM static unsigned long ic_irq_enable; -static int irq_suspend(struct sys_device *dev, pm_message_t state) +static int irq_suspend(void) { ic_irq_enable = readl(VA_IC_BASE + IRQ_ENABLE); return 0; } -static int irq_resume(struct sys_device *dev) +static void irq_resume(void) { /* disable all irq sources */ writel(-1, VA_CMIC_BASE + IRQ_ENABLE_CLEAR); @@ -194,33 +194,25 @@ static int irq_resume(struct sys_device *dev) writel(-1, VA_IC_BASE + FIQ_ENABLE_CLEAR); writel(ic_irq_enable, VA_IC_BASE + IRQ_ENABLE_SET); - return 0; } #else #define irq_suspend NULL #define irq_resume NULL #endif -static struct sysdev_class irq_class = { - .name = "irq", +static struct syscore_ops irq_syscore_ops = { .suspend = irq_suspend, .resume = irq_resume, }; -static struct sys_device irq_device = { - .id = 0, - .cls = &irq_class, -}; - -static int __init irq_init_sysfs(void) +static int __init irq_syscore_init(void) { - int ret = sysdev_class_register(&irq_class); - if (ret == 0) - ret = sysdev_register(&irq_device); - return ret; + register_syscore_ops(&irq_syscore_ops); + + return 0; } -device_initcall(irq_init_sysfs); +device_initcall(irq_syscore_init); /* * Flash handling. diff --git a/trunk/arch/arm/mach-mmp/include/mach/gpio.h b/trunk/arch/arm/mach-mmp/include/mach/gpio.h index ee8b02ed8011..7bfb827f3fe3 100644 --- a/trunk/arch/arm/mach-mmp/include/mach/gpio.h +++ b/trunk/arch/arm/mach-mmp/include/mach/gpio.h @@ -10,7 +10,7 @@ #define BANK_OFF(n) (((n) < 3) ? (n) << 2 : 0x100 + (((n) - 3) << 2)) #define GPIO_REG(x) (*((volatile u32 *)(GPIO_REGS_VIRT + (x)))) -#define NR_BUILTIN_GPIO (192) +#define NR_BUILTIN_GPIO IRQ_GPIO_NUM #define gpio_to_bank(gpio) ((gpio) >> 5) #define gpio_to_irq(gpio) (IRQ_GPIO_START + (gpio)) diff --git a/trunk/arch/arm/mach-mmp/include/mach/mfp-pxa168.h b/trunk/arch/arm/mach-mmp/include/mach/mfp-pxa168.h index 4621067c7720..713be155a44d 100644 --- a/trunk/arch/arm/mach-mmp/include/mach/mfp-pxa168.h +++ b/trunk/arch/arm/mach-mmp/include/mach/mfp-pxa168.h @@ -8,6 +8,15 @@ #define MFP_DRIVE_MEDIUM (0x2 << 13) #define MFP_DRIVE_FAST (0x3 << 13) +#undef MFP_CFG +#undef MFP_CFG_DRV + +#define MFP_CFG(pin, af) \ + (MFP_LPM_INPUT | MFP_PIN(MFP_PIN_##pin) | MFP_##af | MFP_DRIVE_MEDIUM) + +#define MFP_CFG_DRV(pin, af, drv) \ + (MFP_LPM_INPUT | MFP_PIN(MFP_PIN_##pin) | MFP_##af | MFP_DRIVE_##drv) + /* GPIO */ #define GPIO0_GPIO MFP_CFG(GPIO0, AF5) #define GPIO1_GPIO MFP_CFG(GPIO1, AF5) diff --git a/trunk/arch/arm/mach-msm/board-qsd8x50.c b/trunk/arch/arm/mach-msm/board-qsd8x50.c index 7f568611547e..6a96911b0ad5 100644 --- a/trunk/arch/arm/mach-msm/board-qsd8x50.c +++ b/trunk/arch/arm/mach-msm/board-qsd8x50.c @@ -160,10 +160,7 @@ static struct msm_mmc_platform_data qsd8x50_sdc1_data = { static void __init qsd8x50_init_mmc(void) { - if (machine_is_qsd8x50_ffa() || machine_is_qsd8x50a_ffa()) - vreg_mmc = vreg_get(NULL, "gp6"); - else - vreg_mmc = vreg_get(NULL, "gp5"); + vreg_mmc = vreg_get(NULL, "gp5"); if (IS_ERR(vreg_mmc)) { pr_err("vreg get for vreg_mmc failed (%ld)\n", diff --git a/trunk/arch/arm/mach-msm/timer.c b/trunk/arch/arm/mach-msm/timer.c index 56f920c55b6a..38b95e949d13 100644 --- a/trunk/arch/arm/mach-msm/timer.c +++ b/trunk/arch/arm/mach-msm/timer.c @@ -269,7 +269,7 @@ int __cpuinit local_timer_setup(struct clock_event_device *evt) /* Use existing clock_event for cpu 0 */ if (!smp_processor_id()) - return; + return 0; writel(DGT_CLK_CTL_DIV_4, MSM_TMR_BASE + DGT_CLK_CTL); diff --git a/trunk/arch/arm/mach-mx3/mach-vpr200.c b/trunk/arch/arm/mach-mx3/mach-vpr200.c index 2cf390fbd980..47a69cbc31a8 100644 --- a/trunk/arch/arm/mach-mx3/mach-vpr200.c +++ b/trunk/arch/arm/mach-mx3/mach-vpr200.c @@ -257,11 +257,16 @@ static const struct fsl_usb2_platform_data otg_device_pdata __initconst = { .workaround = FLS_USB2_WORKAROUND_ENGCM09152, }; +static int vpr200_usbh_init(struct platform_device *pdev) +{ + return mx35_initialize_usb_hw(pdev->id, + MXC_EHCI_INTERFACE_SINGLE_UNI | MXC_EHCI_INTERNAL_PHY); +} + /* USB HOST config */ static const struct mxc_usbh_platform_data usb_host_pdata __initconst = { - .portsc = MXC_EHCI_MODE_SERIAL, - .flags = MXC_EHCI_INTERFACE_SINGLE_UNI | - MXC_EHCI_INTERNAL_PHY, + .init = vpr200_usbh_init, + .portsc = MXC_EHCI_MODE_SERIAL, }; static struct platform_device *devices[] __initdata = { diff --git a/trunk/arch/arm/mach-mx5/board-mx53_loco.c b/trunk/arch/arm/mach-mx5/board-mx53_loco.c index 10a1bea10548..6206b1191fe8 100644 --- a/trunk/arch/arm/mach-mx5/board-mx53_loco.c +++ b/trunk/arch/arm/mach-mx5/board-mx53_loco.c @@ -193,7 +193,7 @@ static iomux_v3_cfg_t mx53_loco_pads[] = { .wakeup = wake, \ } -static const struct gpio_keys_button loco_buttons[] __initconst = { +static struct gpio_keys_button loco_buttons[] = { GPIO_BUTTON(MX53_LOCO_POWER, KEY_POWER, 1, "power", 0), GPIO_BUTTON(MX53_LOCO_UI1, KEY_VOLUMEUP, 1, "volume-up", 0), GPIO_BUTTON(MX53_LOCO_UI2, KEY_VOLUMEDOWN, 1, "volume-down", 0), diff --git a/trunk/arch/arm/mach-mxs/clock-mx28.c b/trunk/arch/arm/mach-mxs/clock-mx28.c index 1ad97fed1e94..5dcc59d5b9ec 100644 --- a/trunk/arch/arm/mach-mxs/clock-mx28.c +++ b/trunk/arch/arm/mach-mxs/clock-mx28.c @@ -295,11 +295,11 @@ static int name##_set_rate(struct clk *clk, unsigned long rate) \ unsigned long diff, parent_rate, calc_rate; \ int i; \ \ - parent_rate = clk_get_rate(clk->parent); \ div_max = BM_CLKCTRL_##dr##_DIV >> BP_CLKCTRL_##dr##_DIV; \ bm_busy = BM_CLKCTRL_##dr##_BUSY; \ \ if (clk->parent == &ref_xtal_clk) { \ + parent_rate = clk_get_rate(clk->parent); \ div = DIV_ROUND_UP(parent_rate, rate); \ if (clk == &cpu_clk) { \ div_max = BM_CLKCTRL_CPU_DIV_XTAL >> \ @@ -309,6 +309,11 @@ static int name##_set_rate(struct clk *clk, unsigned long rate) \ if (div == 0 || div > div_max) \ return -EINVAL; \ } else { \ + /* \ + * hack alert: this block modifies clk->parent, too, \ + * so the base to use it the grand parent. \ + */ \ + parent_rate = clk_get_rate(clk->parent->parent); \ rate >>= PARENT_RATE_SHIFT; \ parent_rate >>= PARENT_RATE_SHIFT; \ diff = parent_rate; \ diff --git a/trunk/arch/arm/mach-omap1/pm_bus.c b/trunk/arch/arm/mach-omap1/pm_bus.c index 6588c22b8a64..fe31d933f0ed 100644 --- a/trunk/arch/arm/mach-omap1/pm_bus.c +++ b/trunk/arch/arm/mach-omap1/pm_bus.c @@ -24,75 +24,50 @@ #ifdef CONFIG_PM_RUNTIME static int omap1_pm_runtime_suspend(struct device *dev) { - struct clk *iclk, *fclk; - int ret = 0; + int ret; dev_dbg(dev, "%s\n", __func__); ret = pm_generic_runtime_suspend(dev); + if (ret) + return ret; - fclk = clk_get(dev, "fck"); - if (!IS_ERR(fclk)) { - clk_disable(fclk); - clk_put(fclk); - } - - iclk = clk_get(dev, "ick"); - if (!IS_ERR(iclk)) { - clk_disable(iclk); - clk_put(iclk); + ret = pm_runtime_clk_suspend(dev); + if (ret) { + pm_generic_runtime_resume(dev); + return ret; } return 0; -}; +} static int omap1_pm_runtime_resume(struct device *dev) { - struct clk *iclk, *fclk; - dev_dbg(dev, "%s\n", __func__); - iclk = clk_get(dev, "ick"); - if (!IS_ERR(iclk)) { - clk_enable(iclk); - clk_put(iclk); - } + pm_runtime_clk_resume(dev); + return pm_generic_runtime_resume(dev); +} - fclk = clk_get(dev, "fck"); - if (!IS_ERR(fclk)) { - clk_enable(fclk); - clk_put(fclk); - } +static struct dev_power_domain default_power_domain = { + .ops = { + .runtime_suspend = omap1_pm_runtime_suspend, + .runtime_resume = omap1_pm_runtime_resume, + USE_PLATFORM_PM_SLEEP_OPS + }, +}; - return pm_generic_runtime_resume(dev); +static struct pm_clk_notifier_block platform_bus_notifier = { + .pwr_domain = &default_power_domain, + .con_ids = { "ick", "fck", NULL, }, }; static int __init omap1_pm_runtime_init(void) { - const struct dev_pm_ops *pm; - struct dev_pm_ops *omap_pm; - if (!cpu_class_is_omap1()) return -ENODEV; - pm = platform_bus_get_pm_ops(); - if (!pm) { - pr_err("%s: unable to get dev_pm_ops from platform_bus\n", - __func__); - return -ENODEV; - } - - omap_pm = kmemdup(pm, sizeof(struct dev_pm_ops), GFP_KERNEL); - if (!omap_pm) { - pr_err("%s: unable to alloc memory for new dev_pm_ops\n", - __func__); - return -ENOMEM; - } - - omap_pm->runtime_suspend = omap1_pm_runtime_suspend; - omap_pm->runtime_resume = omap1_pm_runtime_resume; - - platform_bus_set_pm_ops(omap_pm); + pm_runtime_clk_add_notifier(&platform_bus_type, &platform_bus_notifier); return 0; } diff --git a/trunk/arch/arm/mach-omap2/Makefile b/trunk/arch/arm/mach-omap2/Makefile index a45cd6409686..66dfbccacd25 100644 --- a/trunk/arch/arm/mach-omap2/Makefile +++ b/trunk/arch/arm/mach-omap2/Makefile @@ -59,16 +59,16 @@ endif # Power Management ifeq ($(CONFIG_PM),y) obj-$(CONFIG_ARCH_OMAP2) += pm24xx.o -obj-$(CONFIG_ARCH_OMAP2) += sleep24xx.o pm_bus.o +obj-$(CONFIG_ARCH_OMAP2) += sleep24xx.o obj-$(CONFIG_ARCH_OMAP3) += pm34xx.o sleep34xx.o \ - cpuidle34xx.o pm_bus.o -obj-$(CONFIG_ARCH_OMAP4) += pm44xx.o pm_bus.o + cpuidle34xx.o +obj-$(CONFIG_ARCH_OMAP4) += pm44xx.o obj-$(CONFIG_PM_DEBUG) += pm-debug.o obj-$(CONFIG_OMAP_SMARTREFLEX) += sr_device.o smartreflex.o obj-$(CONFIG_OMAP_SMARTREFLEX_CLASS3) += smartreflex-class3.o AFLAGS_sleep24xx.o :=-Wa,-march=armv6 -AFLAGS_sleep34xx.o :=-Wa,-march=armv7-a +AFLAGS_sleep34xx.o :=-Wa,-march=armv7-a$(plus_sec) ifeq ($(CONFIG_PM_VERBOSE),y) CFLAGS_pm_bus.o += -DDEBUG diff --git a/trunk/arch/arm/mach-omap2/board-rx51.c b/trunk/arch/arm/mach-omap2/board-rx51.c index e964895b80e8..f8ba20a14e62 100644 --- a/trunk/arch/arm/mach-omap2/board-rx51.c +++ b/trunk/arch/arm/mach-omap2/board-rx51.c @@ -141,14 +141,19 @@ static void __init rx51_init(void) static void __init rx51_map_io(void) { omap2_set_globals_3xxx(); - rx51_video_mem_init(); omap34xx_map_common_io(); } +static void __init rx51_reserve(void) +{ + rx51_video_mem_init(); + omap_reserve(); +} + MACHINE_START(NOKIA_RX51, "Nokia RX-51 board") /* Maintainer: Lauri Leukkunen */ .boot_params = 0x80000100, - .reserve = omap_reserve, + .reserve = rx51_reserve, .map_io = rx51_map_io, .init_early = rx51_init_early, .init_irq = omap_init_irq, diff --git a/trunk/arch/arm/mach-omap2/clkt34xx_dpll3m2.c b/trunk/arch/arm/mach-omap2/clkt34xx_dpll3m2.c index b2b1e37bb6bb..d6e34dd9e7e7 100644 --- a/trunk/arch/arm/mach-omap2/clkt34xx_dpll3m2.c +++ b/trunk/arch/arm/mach-omap2/clkt34xx_dpll3m2.c @@ -115,6 +115,7 @@ int omap3_core_dpll_m2_set_rate(struct clk *clk, unsigned long rate) sdrc_cs0->rfr_ctrl, sdrc_cs0->actim_ctrla, sdrc_cs0->actim_ctrlb, sdrc_cs0->mr, 0, 0, 0, 0); + clk->rate = rate; return 0; } diff --git a/trunk/arch/arm/mach-omap2/clock44xx_data.c b/trunk/arch/arm/mach-omap2/clock44xx_data.c index 276992d3b7fb..8c965671b4d4 100644 --- a/trunk/arch/arm/mach-omap2/clock44xx_data.c +++ b/trunk/arch/arm/mach-omap2/clock44xx_data.c @@ -3116,14 +3116,9 @@ static struct omap_clk omap44xx_clks[] = { CLK(NULL, "dsp_fck", &dsp_fck, CK_443X), CLK("omapdss_dss", "sys_clk", &dss_sys_clk, CK_443X), CLK("omapdss_dss", "tv_clk", &dss_tv_clk, CK_443X), - CLK("omapdss_dss", "dss_clk", &dss_dss_clk, CK_443X), CLK("omapdss_dss", "video_clk", &dss_48mhz_clk, CK_443X), - CLK("omapdss_dss", "fck", &dss_fck, CK_443X), - /* - * On OMAP4, DSS ick is a dummy clock; this is needed for compatibility - * with OMAP2/3. - */ - CLK("omapdss_dss", "ick", &dummy_ck, CK_443X), + CLK("omapdss_dss", "fck", &dss_dss_clk, CK_443X), + CLK("omapdss_dss", "ick", &dss_fck, CK_443X), CLK(NULL, "efuse_ctrl_cust_fck", &efuse_ctrl_cust_fck, CK_443X), CLK(NULL, "emif1_fck", &emif1_fck, CK_443X), CLK(NULL, "emif2_fck", &emif2_fck, CK_443X), diff --git a/trunk/arch/arm/mach-omap2/cm2xxx_3xxx.c b/trunk/arch/arm/mach-omap2/cm2xxx_3xxx.c index 9d0dec806e92..38830d8d4783 100644 --- a/trunk/arch/arm/mach-omap2/cm2xxx_3xxx.c +++ b/trunk/arch/arm/mach-omap2/cm2xxx_3xxx.c @@ -247,6 +247,7 @@ struct omap3_cm_regs { u32 per_cm_clksel; u32 emu_cm_clksel; u32 emu_cm_clkstctrl; + u32 pll_cm_autoidle; u32 pll_cm_autoidle2; u32 pll_cm_clksel4; u32 pll_cm_clksel5; @@ -319,6 +320,15 @@ void omap3_cm_save_context(void) omap2_cm_read_mod_reg(OMAP3430_EMU_MOD, CM_CLKSEL1); cm_context.emu_cm_clkstctrl = omap2_cm_read_mod_reg(OMAP3430_EMU_MOD, OMAP2_CM_CLKSTCTRL); + /* + * As per erratum i671, ROM code does not respect the PER DPLL + * programming scheme if CM_AUTOIDLE_PLL.AUTO_PERIPH_DPLL == 1. + * In this case, even though this register has been saved in + * scratchpad contents, we need to restore AUTO_PERIPH_DPLL + * by ourselves. So, we need to save it anyway. + */ + cm_context.pll_cm_autoidle = + omap2_cm_read_mod_reg(PLL_MOD, CM_AUTOIDLE); cm_context.pll_cm_autoidle2 = omap2_cm_read_mod_reg(PLL_MOD, CM_AUTOIDLE2); cm_context.pll_cm_clksel4 = @@ -441,6 +451,13 @@ void omap3_cm_restore_context(void) CM_CLKSEL1); omap2_cm_write_mod_reg(cm_context.emu_cm_clkstctrl, OMAP3430_EMU_MOD, OMAP2_CM_CLKSTCTRL); + /* + * As per erratum i671, ROM code does not respect the PER DPLL + * programming scheme if CM_AUTOIDLE_PLL.AUTO_PERIPH_DPLL == 1. + * In this case, we need to restore AUTO_PERIPH_DPLL by ourselves. + */ + omap2_cm_write_mod_reg(cm_context.pll_cm_autoidle, PLL_MOD, + CM_AUTOIDLE); omap2_cm_write_mod_reg(cm_context.pll_cm_autoidle2, PLL_MOD, CM_AUTOIDLE2); omap2_cm_write_mod_reg(cm_context.pll_cm_clksel4, PLL_MOD, diff --git a/trunk/arch/arm/mach-omap2/control.c b/trunk/arch/arm/mach-omap2/control.c index 695279419020..da53ba3917ca 100644 --- a/trunk/arch/arm/mach-omap2/control.c +++ b/trunk/arch/arm/mach-omap2/control.c @@ -316,8 +316,14 @@ void omap3_save_scratchpad_contents(void) omap2_cm_read_mod_reg(WKUP_MOD, CM_CLKSEL); prcm_block_contents.cm_clken_pll = omap2_cm_read_mod_reg(PLL_MOD, CM_CLKEN); + /* + * As per erratum i671, ROM code does not respect the PER DPLL + * programming scheme if CM_AUTOIDLE_PLL..AUTO_PERIPH_DPLL == 1. + * Then, in anycase, clear these bits to avoid extra latencies. + */ prcm_block_contents.cm_autoidle_pll = - omap2_cm_read_mod_reg(PLL_MOD, OMAP3430_CM_AUTOIDLE_PLL); + omap2_cm_read_mod_reg(PLL_MOD, CM_AUTOIDLE) & + ~OMAP3430_AUTO_PERIPH_DPLL_MASK; prcm_block_contents.cm_clksel1_pll = omap2_cm_read_mod_reg(PLL_MOD, OMAP3430_CM_CLKSEL1_PLL); prcm_block_contents.cm_clksel2_pll = diff --git a/trunk/arch/arm/mach-omap2/omap_hwmod_2420_data.c b/trunk/arch/arm/mach-omap2/omap_hwmod_2420_data.c index 8eb3ce1bbfbe..c4d0ae87d62a 100644 --- a/trunk/arch/arm/mach-omap2/omap_hwmod_2420_data.c +++ b/trunk/arch/arm/mach-omap2/omap_hwmod_2420_data.c @@ -1639,6 +1639,7 @@ static struct omap_hwmod_ocp_if *omap2420_gpio1_slaves[] = { static struct omap_hwmod omap2420_gpio1_hwmod = { .name = "gpio1", + .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .mpu_irqs = omap242x_gpio1_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap242x_gpio1_irqs), .main_clk = "gpios_fck", @@ -1669,6 +1670,7 @@ static struct omap_hwmod_ocp_if *omap2420_gpio2_slaves[] = { static struct omap_hwmod omap2420_gpio2_hwmod = { .name = "gpio2", + .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .mpu_irqs = omap242x_gpio2_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap242x_gpio2_irqs), .main_clk = "gpios_fck", @@ -1699,6 +1701,7 @@ static struct omap_hwmod_ocp_if *omap2420_gpio3_slaves[] = { static struct omap_hwmod omap2420_gpio3_hwmod = { .name = "gpio3", + .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .mpu_irqs = omap242x_gpio3_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap242x_gpio3_irqs), .main_clk = "gpios_fck", @@ -1729,6 +1732,7 @@ static struct omap_hwmod_ocp_if *omap2420_gpio4_slaves[] = { static struct omap_hwmod omap2420_gpio4_hwmod = { .name = "gpio4", + .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .mpu_irqs = omap242x_gpio4_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap242x_gpio4_irqs), .main_clk = "gpios_fck", @@ -1782,7 +1786,7 @@ static struct omap_hwmod_irq_info omap2420_dma_system_irqs[] = { static struct omap_hwmod_addr_space omap2420_dma_system_addrs[] = { { .pa_start = 0x48056000, - .pa_end = 0x4a0560ff, + .pa_end = 0x48056fff, .flags = ADDR_TYPE_RT }, }; diff --git a/trunk/arch/arm/mach-omap2/omap_hwmod_2430_data.c b/trunk/arch/arm/mach-omap2/omap_hwmod_2430_data.c index e6e3810db77f..9682dd519f8d 100644 --- a/trunk/arch/arm/mach-omap2/omap_hwmod_2430_data.c +++ b/trunk/arch/arm/mach-omap2/omap_hwmod_2430_data.c @@ -1742,6 +1742,7 @@ static struct omap_hwmod_ocp_if *omap2430_gpio1_slaves[] = { static struct omap_hwmod omap2430_gpio1_hwmod = { .name = "gpio1", + .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .mpu_irqs = omap243x_gpio1_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap243x_gpio1_irqs), .main_clk = "gpios_fck", @@ -1772,6 +1773,7 @@ static struct omap_hwmod_ocp_if *omap2430_gpio2_slaves[] = { static struct omap_hwmod omap2430_gpio2_hwmod = { .name = "gpio2", + .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .mpu_irqs = omap243x_gpio2_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap243x_gpio2_irqs), .main_clk = "gpios_fck", @@ -1802,6 +1804,7 @@ static struct omap_hwmod_ocp_if *omap2430_gpio3_slaves[] = { static struct omap_hwmod omap2430_gpio3_hwmod = { .name = "gpio3", + .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .mpu_irqs = omap243x_gpio3_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap243x_gpio3_irqs), .main_clk = "gpios_fck", @@ -1832,6 +1835,7 @@ static struct omap_hwmod_ocp_if *omap2430_gpio4_slaves[] = { static struct omap_hwmod omap2430_gpio4_hwmod = { .name = "gpio4", + .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .mpu_irqs = omap243x_gpio4_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap243x_gpio4_irqs), .main_clk = "gpios_fck", @@ -1862,6 +1866,7 @@ static struct omap_hwmod_ocp_if *omap2430_gpio5_slaves[] = { static struct omap_hwmod omap2430_gpio5_hwmod = { .name = "gpio5", + .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .mpu_irqs = omap243x_gpio5_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap243x_gpio5_irqs), .main_clk = "gpio5_fck", @@ -1915,7 +1920,7 @@ static struct omap_hwmod_irq_info omap2430_dma_system_irqs[] = { static struct omap_hwmod_addr_space omap2430_dma_system_addrs[] = { { .pa_start = 0x48056000, - .pa_end = 0x4a0560ff, + .pa_end = 0x48056fff, .flags = ADDR_TYPE_RT }, }; diff --git a/trunk/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/trunk/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c index b98e2dfcba28..909a84de6682 100644 --- a/trunk/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c +++ b/trunk/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c @@ -2141,6 +2141,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_gpio1_slaves[] = { static struct omap_hwmod omap3xxx_gpio1_hwmod = { .name = "gpio1", + .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .mpu_irqs = omap3xxx_gpio1_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_gpio1_irqs), .main_clk = "gpio1_ick", @@ -2177,6 +2178,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_gpio2_slaves[] = { static struct omap_hwmod omap3xxx_gpio2_hwmod = { .name = "gpio2", + .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .mpu_irqs = omap3xxx_gpio2_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_gpio2_irqs), .main_clk = "gpio2_ick", @@ -2213,6 +2215,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_gpio3_slaves[] = { static struct omap_hwmod omap3xxx_gpio3_hwmod = { .name = "gpio3", + .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .mpu_irqs = omap3xxx_gpio3_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_gpio3_irqs), .main_clk = "gpio3_ick", @@ -2249,6 +2252,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_gpio4_slaves[] = { static struct omap_hwmod omap3xxx_gpio4_hwmod = { .name = "gpio4", + .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .mpu_irqs = omap3xxx_gpio4_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_gpio4_irqs), .main_clk = "gpio4_ick", @@ -2285,6 +2289,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_gpio5_slaves[] = { static struct omap_hwmod omap3xxx_gpio5_hwmod = { .name = "gpio5", + .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .mpu_irqs = omap3xxx_gpio5_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_gpio5_irqs), .main_clk = "gpio5_ick", @@ -2321,6 +2326,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_gpio6_slaves[] = { static struct omap_hwmod omap3xxx_gpio6_hwmod = { .name = "gpio6", + .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .mpu_irqs = omap3xxx_gpio6_irqs, .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_gpio6_irqs), .main_clk = "gpio6_ick", @@ -2386,7 +2392,7 @@ static struct omap_hwmod_irq_info omap3xxx_dma_system_irqs[] = { static struct omap_hwmod_addr_space omap3xxx_dma_system_addrs[] = { { .pa_start = 0x48056000, - .pa_end = 0x4a0560ff, + .pa_end = 0x48056fff, .flags = ADDR_TYPE_RT }, }; diff --git a/trunk/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/trunk/arch/arm/mach-omap2/omap_hwmod_44xx_data.c index 3e88dd3f8ef3..abc548a0c98d 100644 --- a/trunk/arch/arm/mach-omap2/omap_hwmod_44xx_data.c +++ b/trunk/arch/arm/mach-omap2/omap_hwmod_44xx_data.c @@ -885,7 +885,7 @@ static struct omap_hwmod_ocp_if *omap44xx_dma_system_masters[] = { static struct omap_hwmod_addr_space omap44xx_dma_system_addrs[] = { { .pa_start = 0x4a056000, - .pa_end = 0x4a0560ff, + .pa_end = 0x4a056fff, .flags = ADDR_TYPE_RT }, }; diff --git a/trunk/arch/arm/mach-omap2/omap_l3_smx.c b/trunk/arch/arm/mach-omap2/omap_l3_smx.c index 5f2da7565b68..4321e7938929 100644 --- a/trunk/arch/arm/mach-omap2/omap_l3_smx.c +++ b/trunk/arch/arm/mach-omap2/omap_l3_smx.c @@ -196,11 +196,11 @@ static irqreturn_t omap3_l3_app_irq(int irq, void *_l3) /* No timeout error for debug sources */ } - base = ((l3->rt) + (*(omap3_l3_bases[int_type] + err_source))); - /* identify the error source */ for (err_source = 0; !(status & (1 << err_source)); err_source++) ; + + base = l3->rt + *(omap3_l3_bases[int_type] + err_source); error = omap3_l3_readll(base, L3_ERROR_LOG); if (error) { diff --git a/trunk/arch/arm/mach-omap2/pm.c b/trunk/arch/arm/mach-omap2/pm.c index 30af3351c2d6..49486f522dca 100644 --- a/trunk/arch/arm/mach-omap2/pm.c +++ b/trunk/arch/arm/mach-omap2/pm.c @@ -89,6 +89,7 @@ static void omap2_init_processor_devices(void) if (cpu_is_omap44xx()) { _init_omap_device("l3_main_1", &l3_dev); _init_omap_device("dsp", &dsp_dev); + _init_omap_device("iva", &iva_dev); } else { _init_omap_device("l3_main", &l3_dev); } diff --git a/trunk/arch/arm/mach-omap2/pm_bus.c b/trunk/arch/arm/mach-omap2/pm_bus.c deleted file mode 100644 index 5acd2ab298b1..000000000000 --- a/trunk/arch/arm/mach-omap2/pm_bus.c +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Runtime PM support code for OMAP - * - * Author: Kevin Hilman, Deep Root Systems, LLC - * - * Copyright (C) 2010 Texas Instruments, Inc. - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. - */ -#include -#include -#include -#include -#include -#include - -#include -#include - -#ifdef CONFIG_PM_RUNTIME -static int omap_pm_runtime_suspend(struct device *dev) -{ - struct platform_device *pdev = to_platform_device(dev); - int r, ret = 0; - - dev_dbg(dev, "%s\n", __func__); - - ret = pm_generic_runtime_suspend(dev); - - if (!ret && dev->parent == &omap_device_parent) { - r = omap_device_idle(pdev); - WARN_ON(r); - } - - return ret; -}; - -static int omap_pm_runtime_resume(struct device *dev) -{ - struct platform_device *pdev = to_platform_device(dev); - int r; - - dev_dbg(dev, "%s\n", __func__); - - if (dev->parent == &omap_device_parent) { - r = omap_device_enable(pdev); - WARN_ON(r); - } - - return pm_generic_runtime_resume(dev); -}; -#else -#define omap_pm_runtime_suspend NULL -#define omap_pm_runtime_resume NULL -#endif /* CONFIG_PM_RUNTIME */ - -static int __init omap_pm_runtime_init(void) -{ - const struct dev_pm_ops *pm; - struct dev_pm_ops *omap_pm; - - pm = platform_bus_get_pm_ops(); - if (!pm) { - pr_err("%s: unable to get dev_pm_ops from platform_bus\n", - __func__); - return -ENODEV; - } - - omap_pm = kmemdup(pm, sizeof(struct dev_pm_ops), GFP_KERNEL); - if (!omap_pm) { - pr_err("%s: unable to alloc memory for new dev_pm_ops\n", - __func__); - return -ENOMEM; - } - - omap_pm->runtime_suspend = omap_pm_runtime_suspend; - omap_pm->runtime_resume = omap_pm_runtime_resume; - - platform_bus_set_pm_ops(omap_pm); - - return 0; -} -core_initcall(omap_pm_runtime_init); diff --git a/trunk/arch/arm/mach-omap2/voltage.c b/trunk/arch/arm/mach-omap2/voltage.c index 6fb520999b6e..0c1552d9d995 100644 --- a/trunk/arch/arm/mach-omap2/voltage.c +++ b/trunk/arch/arm/mach-omap2/voltage.c @@ -114,7 +114,6 @@ static int __init _config_common_vdd_data(struct omap_vdd_info *vdd) sys_clk_speed /= 1000; /* Generic voltage parameters */ - vdd->curr_volt = 1200000; vdd->volt_scale = vp_forceupdate_scale_voltage; vdd->vp_enabled = false; diff --git a/trunk/arch/arm/mach-pxa/balloon3.c b/trunk/arch/arm/mach-pxa/balloon3.c index bfbecec6d05f..810a982a66f8 100644 --- a/trunk/arch/arm/mach-pxa/balloon3.c +++ b/trunk/arch/arm/mach-pxa/balloon3.c @@ -15,7 +15,6 @@ #include #include -#include #include #include #include diff --git a/trunk/arch/arm/mach-pxa/clock-pxa2xx.c b/trunk/arch/arm/mach-pxa/clock-pxa2xx.c index 1ce090448493..1d5859d9a0e3 100644 --- a/trunk/arch/arm/mach-pxa/clock-pxa2xx.c +++ b/trunk/arch/arm/mach-pxa/clock-pxa2xx.c @@ -9,7 +9,7 @@ #include #include #include -#include +#include #include @@ -33,32 +33,22 @@ const struct clkops clk_pxa2xx_cken_ops = { #ifdef CONFIG_PM static uint32_t saved_cken; -static int pxa2xx_clock_suspend(struct sys_device *d, pm_message_t state) +static int pxa2xx_clock_suspend(void) { saved_cken = CKEN; return 0; } -static int pxa2xx_clock_resume(struct sys_device *d) +static void pxa2xx_clock_resume(void) { CKEN = saved_cken; - return 0; } #else #define pxa2xx_clock_suspend NULL #define pxa2xx_clock_resume NULL #endif -struct sysdev_class pxa2xx_clock_sysclass = { - .name = "pxa2xx-clock", +struct syscore_ops pxa2xx_clock_syscore_ops = { .suspend = pxa2xx_clock_suspend, .resume = pxa2xx_clock_resume, }; - -static int __init pxa2xx_clock_init(void) -{ - if (cpu_is_pxa2xx()) - return sysdev_class_register(&pxa2xx_clock_sysclass); - return 0; -} -postcore_initcall(pxa2xx_clock_init); diff --git a/trunk/arch/arm/mach-pxa/clock-pxa3xx.c b/trunk/arch/arm/mach-pxa/clock-pxa3xx.c index 3f864cd0bd28..2a37a9a8f621 100644 --- a/trunk/arch/arm/mach-pxa/clock-pxa3xx.c +++ b/trunk/arch/arm/mach-pxa/clock-pxa3xx.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -182,7 +183,7 @@ const struct clkops clk_pxa3xx_pout_ops = { static uint32_t cken[2]; static uint32_t accr; -static int pxa3xx_clock_suspend(struct sys_device *d, pm_message_t state) +static int pxa3xx_clock_suspend(void) { cken[0] = CKENA; cken[1] = CKENB; @@ -190,28 +191,18 @@ static int pxa3xx_clock_suspend(struct sys_device *d, pm_message_t state) return 0; } -static int pxa3xx_clock_resume(struct sys_device *d) +static void pxa3xx_clock_resume(void) { ACCR = accr; CKENA = cken[0]; CKENB = cken[1]; - return 0; } #else #define pxa3xx_clock_suspend NULL #define pxa3xx_clock_resume NULL #endif -struct sysdev_class pxa3xx_clock_sysclass = { - .name = "pxa3xx-clock", +struct syscore_ops pxa3xx_clock_syscore_ops = { .suspend = pxa3xx_clock_suspend, .resume = pxa3xx_clock_resume, }; - -static int __init pxa3xx_clock_init(void) -{ - if (cpu_is_pxa3xx() || cpu_is_pxa95x()) - return sysdev_class_register(&pxa3xx_clock_sysclass); - return 0; -} -postcore_initcall(pxa3xx_clock_init); diff --git a/trunk/arch/arm/mach-pxa/clock.h b/trunk/arch/arm/mach-pxa/clock.h index f9f349a21b54..1f2fb9c43f06 100644 --- a/trunk/arch/arm/mach-pxa/clock.h +++ b/trunk/arch/arm/mach-pxa/clock.h @@ -1,5 +1,5 @@ #include -#include +#include struct clkops { void (*enable)(struct clk *); @@ -54,7 +54,7 @@ extern const struct clkops clk_pxa2xx_cken_ops; void clk_pxa2xx_cken_enable(struct clk *clk); void clk_pxa2xx_cken_disable(struct clk *clk); -extern struct sysdev_class pxa2xx_clock_sysclass; +extern struct syscore_ops pxa2xx_clock_syscore_ops; #if defined(CONFIG_PXA3xx) || defined(CONFIG_PXA95x) #define DEFINE_PXA3_CKEN(_name, _cken, _rate, _delay) \ @@ -74,5 +74,6 @@ extern const struct clkops clk_pxa3xx_smemc_ops; extern void clk_pxa3xx_cken_enable(struct clk *); extern void clk_pxa3xx_cken_disable(struct clk *); -extern struct sysdev_class pxa3xx_clock_sysclass; +extern struct syscore_ops pxa3xx_clock_syscore_ops; + #endif diff --git a/trunk/arch/arm/mach-pxa/cm-x270.c b/trunk/arch/arm/mach-pxa/cm-x270.c index b88d601a8090..13518a705399 100644 --- a/trunk/arch/arm/mach-pxa/cm-x270.c +++ b/trunk/arch/arm/mach-pxa/cm-x270.c @@ -10,7 +10,6 @@ */ #include -#include #include #include #include diff --git a/trunk/arch/arm/mach-pxa/cm-x2xx.c b/trunk/arch/arm/mach-pxa/cm-x2xx.c index 8225e2e58c6e..a10996782476 100644 --- a/trunk/arch/arm/mach-pxa/cm-x2xx.c +++ b/trunk/arch/arm/mach-pxa/cm-x2xx.c @@ -10,7 +10,7 @@ */ #include -#include +#include #include #include @@ -388,7 +388,7 @@ static inline void cmx2xx_init_display(void) {} #ifdef CONFIG_PM static unsigned long sleep_save_msc[10]; -static int cmx2xx_suspend(struct sys_device *dev, pm_message_t state) +static int cmx2xx_suspend(void) { cmx2xx_pci_suspend(); @@ -412,7 +412,7 @@ static int cmx2xx_suspend(struct sys_device *dev, pm_message_t state) return 0; } -static int cmx2xx_resume(struct sys_device *dev) +static void cmx2xx_resume(void) { cmx2xx_pci_resume(); @@ -420,27 +420,18 @@ static int cmx2xx_resume(struct sys_device *dev) __raw_writel(sleep_save_msc[0], MSC0); __raw_writel(sleep_save_msc[1], MSC1); __raw_writel(sleep_save_msc[2], MSC2); - - return 0; } -static struct sysdev_class cmx2xx_pm_sysclass = { - .name = "pm", +static struct syscore_ops cmx2xx_pm_syscore_ops = { .resume = cmx2xx_resume, .suspend = cmx2xx_suspend, }; -static struct sys_device cmx2xx_pm_device = { - .cls = &cmx2xx_pm_sysclass, -}; - static int __init cmx2xx_pm_init(void) { - int error; - error = sysdev_class_register(&cmx2xx_pm_sysclass); - if (error == 0) - error = sysdev_register(&cmx2xx_pm_device); - return error; + register_syscore_ops(&cmx2xx_pm_syscore_ops); + + return 0; } #else static int __init cmx2xx_pm_init(void) { return 0; } diff --git a/trunk/arch/arm/mach-pxa/colibri-evalboard.c b/trunk/arch/arm/mach-pxa/colibri-evalboard.c index 81c3c433e2d6..d28e802e2448 100644 --- a/trunk/arch/arm/mach-pxa/colibri-evalboard.c +++ b/trunk/arch/arm/mach-pxa/colibri-evalboard.c @@ -13,7 +13,6 @@ #include #include #include -#include #include #include #include diff --git a/trunk/arch/arm/mach-pxa/colibri-pxa270-income.c b/trunk/arch/arm/mach-pxa/colibri-pxa270-income.c index 44c1b77ece67..80538b8806ed 100644 --- a/trunk/arch/arm/mach-pxa/colibri-pxa270-income.c +++ b/trunk/arch/arm/mach-pxa/colibri-pxa270-income.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include diff --git a/trunk/arch/arm/mach-pxa/colibri-pxa270.c b/trunk/arch/arm/mach-pxa/colibri-pxa270.c index 6fc5d328ba7f..7545a48ed88b 100644 --- a/trunk/arch/arm/mach-pxa/colibri-pxa270.c +++ b/trunk/arch/arm/mach-pxa/colibri-pxa270.c @@ -17,7 +17,6 @@ #include #include #include -#include #include #include diff --git a/trunk/arch/arm/mach-pxa/generic.h b/trunk/arch/arm/mach-pxa/generic.h index a079d8baa45a..e6c9344a95ae 100644 --- a/trunk/arch/arm/mach-pxa/generic.h +++ b/trunk/arch/arm/mach-pxa/generic.h @@ -61,10 +61,10 @@ extern unsigned pxa3xx_get_clk_frequency_khz(int); #define pxa3xx_get_clk_frequency_khz(x) (0) #endif -extern struct sysdev_class pxa_irq_sysclass; -extern struct sysdev_class pxa_gpio_sysclass; -extern struct sysdev_class pxa2xx_mfp_sysclass; -extern struct sysdev_class pxa3xx_mfp_sysclass; +extern struct syscore_ops pxa_irq_syscore_ops; +extern struct syscore_ops pxa_gpio_syscore_ops; +extern struct syscore_ops pxa2xx_mfp_syscore_ops; +extern struct syscore_ops pxa3xx_mfp_syscore_ops; void __init pxa_set_ffuart_info(void *info); void __init pxa_set_btuart_info(void *info); diff --git a/trunk/arch/arm/mach-pxa/hx4700.c b/trunk/arch/arm/mach-pxa/hx4700.c index 6de0ad0eea65..9cdcca597924 100644 --- a/trunk/arch/arm/mach-pxa/hx4700.c +++ b/trunk/arch/arm/mach-pxa/hx4700.c @@ -711,7 +711,7 @@ static struct regulator_consumer_supply bq24022_consumers[] = { static struct regulator_init_data bq24022_init_data = { .constraints = { .max_uA = 500000, - .valid_ops_mask = REGULATOR_CHANGE_CURRENT, + .valid_ops_mask = REGULATOR_CHANGE_CURRENT|REGULATOR_CHANGE_STATUS, }, .num_consumer_supplies = ARRAY_SIZE(bq24022_consumers), .consumer_supplies = bq24022_consumers, diff --git a/trunk/arch/arm/mach-pxa/include/mach/gpio.h b/trunk/arch/arm/mach-pxa/include/mach/gpio.h index b024a8b37439..c4639502efca 100644 --- a/trunk/arch/arm/mach-pxa/include/mach/gpio.h +++ b/trunk/arch/arm/mach-pxa/include/mach/gpio.h @@ -99,11 +99,24 @@ #define GAFR(x) GPIO_REG(0x54 + (((x) & 0x70) >> 2)) -#define NR_BUILTIN_GPIO 128 +#define NR_BUILTIN_GPIO PXA_GPIO_IRQ_NUM #define gpio_to_bank(gpio) ((gpio) >> 5) #define gpio_to_irq(gpio) IRQ_GPIO(gpio) -#define irq_to_gpio(irq) IRQ_TO_GPIO(irq) + +static inline int irq_to_gpio(unsigned int irq) +{ + int gpio; + + if (irq == IRQ_GPIO0 || irq == IRQ_GPIO1) + return irq - IRQ_GPIO0; + + gpio = irq - PXA_GPIO_IRQ_BASE; + if (gpio >= 2 && gpio < NR_BUILTIN_GPIO) + return gpio; + + return -1; +} #ifdef CONFIG_CPU_PXA26x /* GPIO86/87/88/89 on PXA26x have their direction bits in GPDR2 inverted, diff --git a/trunk/arch/arm/mach-pxa/include/mach/irqs.h b/trunk/arch/arm/mach-pxa/include/mach/irqs.h index a4285fc00878..038402404e39 100644 --- a/trunk/arch/arm/mach-pxa/include/mach/irqs.h +++ b/trunk/arch/arm/mach-pxa/include/mach/irqs.h @@ -93,9 +93,6 @@ #define GPIO_2_x_TO_IRQ(x) (PXA_GPIO_IRQ_BASE + (x)) #define IRQ_GPIO(x) (((x) < 2) ? (IRQ_GPIO0 + (x)) : GPIO_2_x_TO_IRQ(x)) -#define IRQ_TO_GPIO_2_x(i) ((i) - PXA_GPIO_IRQ_BASE) -#define IRQ_TO_GPIO(i) (((i) < IRQ_GPIO(2)) ? ((i) - IRQ_GPIO0) : IRQ_TO_GPIO_2_x(i)) - /* * The following interrupts are for board specific purposes. Since * the kernel can only run on one machine at a time, we can re-use diff --git a/trunk/arch/arm/mach-pxa/irq.c b/trunk/arch/arm/mach-pxa/irq.c index 6251e3f5c62c..32ed551bf9c5 100644 --- a/trunk/arch/arm/mach-pxa/irq.c +++ b/trunk/arch/arm/mach-pxa/irq.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include #include @@ -183,7 +183,7 @@ void __init pxa_init_irq(int irq_nr, set_wake_t fn) static unsigned long saved_icmr[MAX_INTERNAL_IRQS/32]; static unsigned long saved_ipr[MAX_INTERNAL_IRQS]; -static int pxa_irq_suspend(struct sys_device *dev, pm_message_t state) +static int pxa_irq_suspend(void) { int i; @@ -202,7 +202,7 @@ static int pxa_irq_suspend(struct sys_device *dev, pm_message_t state) return 0; } -static int pxa_irq_resume(struct sys_device *dev) +static void pxa_irq_resume(void) { int i; @@ -218,22 +218,13 @@ static int pxa_irq_resume(struct sys_device *dev) __raw_writel(saved_ipr[i], IRQ_BASE + IPR(i)); __raw_writel(1, IRQ_BASE + ICCR); - return 0; } #else #define pxa_irq_suspend NULL #define pxa_irq_resume NULL #endif -struct sysdev_class pxa_irq_sysclass = { - .name = "irq", +struct syscore_ops pxa_irq_syscore_ops = { .suspend = pxa_irq_suspend, .resume = pxa_irq_resume, }; - -static int __init pxa_irq_init(void) -{ - return sysdev_class_register(&pxa_irq_sysclass); -} - -core_initcall(pxa_irq_init); diff --git a/trunk/arch/arm/mach-pxa/lpd270.c b/trunk/arch/arm/mach-pxa/lpd270.c index f5de541725b1..6cf8180bf5bd 100644 --- a/trunk/arch/arm/mach-pxa/lpd270.c +++ b/trunk/arch/arm/mach-pxa/lpd270.c @@ -15,7 +15,7 @@ #include #include -#include +#include #include #include #include @@ -159,30 +159,22 @@ static void __init lpd270_init_irq(void) #ifdef CONFIG_PM -static int lpd270_irq_resume(struct sys_device *dev) +static void lpd270_irq_resume(void) { __raw_writew(lpd270_irq_enabled, LPD270_INT_MASK); - return 0; } -static struct sysdev_class lpd270_irq_sysclass = { - .name = "cpld_irq", +static struct syscore_ops lpd270_irq_syscore_ops = { .resume = lpd270_irq_resume, }; -static struct sys_device lpd270_irq_device = { - .cls = &lpd270_irq_sysclass, -}; - static int __init lpd270_irq_device_init(void) { - int ret = -ENODEV; if (machine_is_logicpd_pxa270()) { - ret = sysdev_class_register(&lpd270_irq_sysclass); - if (ret == 0) - ret = sysdev_register(&lpd270_irq_device); + register_syscore_ops(&lpd270_irq_syscore_ops); + return 0; } - return ret; + return -ENODEV; } device_initcall(lpd270_irq_device_init); diff --git a/trunk/arch/arm/mach-pxa/lubbock.c b/trunk/arch/arm/mach-pxa/lubbock.c index 3ede978c83d9..e10ddb827147 100644 --- a/trunk/arch/arm/mach-pxa/lubbock.c +++ b/trunk/arch/arm/mach-pxa/lubbock.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include #include #include @@ -176,31 +176,22 @@ static void __init lubbock_init_irq(void) #ifdef CONFIG_PM -static int lubbock_irq_resume(struct sys_device *dev) +static void lubbock_irq_resume(void) { LUB_IRQ_MASK_EN = lubbock_irq_enabled; - return 0; } -static struct sysdev_class lubbock_irq_sysclass = { - .name = "cpld_irq", +static struct syscore_ops lubbock_irq_syscore_ops = { .resume = lubbock_irq_resume, }; -static struct sys_device lubbock_irq_device = { - .cls = &lubbock_irq_sysclass, -}; - static int __init lubbock_irq_device_init(void) { - int ret = -ENODEV; - if (machine_is_lubbock()) { - ret = sysdev_class_register(&lubbock_irq_sysclass); - if (ret == 0) - ret = sysdev_register(&lubbock_irq_device); + register_syscore_ops(&lubbock_irq_syscore_ops); + return 0; } - return ret; + return -ENODEV; } device_initcall(lubbock_irq_device_init); diff --git a/trunk/arch/arm/mach-pxa/magician.c b/trunk/arch/arm/mach-pxa/magician.c index a72993dde2b3..9984ef70bd79 100644 --- a/trunk/arch/arm/mach-pxa/magician.c +++ b/trunk/arch/arm/mach-pxa/magician.c @@ -599,7 +599,7 @@ static struct regulator_consumer_supply bq24022_consumers[] = { static struct regulator_init_data bq24022_init_data = { .constraints = { .max_uA = 500000, - .valid_ops_mask = REGULATOR_CHANGE_CURRENT, + .valid_ops_mask = REGULATOR_CHANGE_CURRENT | REGULATOR_CHANGE_STATUS, }, .num_consumer_supplies = ARRAY_SIZE(bq24022_consumers), .consumer_supplies = bq24022_consumers, diff --git a/trunk/arch/arm/mach-pxa/mainstone.c b/trunk/arch/arm/mach-pxa/mainstone.c index 95163baca29e..3479e2b3b511 100644 --- a/trunk/arch/arm/mach-pxa/mainstone.c +++ b/trunk/arch/arm/mach-pxa/mainstone.c @@ -15,7 +15,7 @@ #include #include -#include +#include #include #include #include @@ -185,31 +185,21 @@ static void __init mainstone_init_irq(void) #ifdef CONFIG_PM -static int mainstone_irq_resume(struct sys_device *dev) +static void mainstone_irq_resume(void) { MST_INTMSKENA = mainstone_irq_enabled; - return 0; } -static struct sysdev_class mainstone_irq_sysclass = { - .name = "cpld_irq", +static struct syscore_ops mainstone_irq_syscore_ops = { .resume = mainstone_irq_resume, }; -static struct sys_device mainstone_irq_device = { - .cls = &mainstone_irq_sysclass, -}; - static int __init mainstone_irq_device_init(void) { - int ret = -ENODEV; + if (machine_is_mainstone()) + register_syscore_ops(&mainstone_irq_syscore_ops); - if (machine_is_mainstone()) { - ret = sysdev_class_register(&mainstone_irq_sysclass); - if (ret == 0) - ret = sysdev_register(&mainstone_irq_device); - } - return ret; + return 0; } device_initcall(mainstone_irq_device_init); diff --git a/trunk/arch/arm/mach-pxa/mfp-pxa2xx.c b/trunk/arch/arm/mach-pxa/mfp-pxa2xx.c index 1d1419b73457..87ae3129f4f7 100644 --- a/trunk/arch/arm/mach-pxa/mfp-pxa2xx.c +++ b/trunk/arch/arm/mach-pxa/mfp-pxa2xx.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include @@ -338,7 +338,7 @@ static unsigned long saved_gafr[2][4]; static unsigned long saved_gpdr[4]; static unsigned long saved_pgsr[4]; -static int pxa2xx_mfp_suspend(struct sys_device *d, pm_message_t state) +static int pxa2xx_mfp_suspend(void) { int i; @@ -365,7 +365,7 @@ static int pxa2xx_mfp_suspend(struct sys_device *d, pm_message_t state) return 0; } -static int pxa2xx_mfp_resume(struct sys_device *d) +static void pxa2xx_mfp_resume(void) { int i; @@ -376,15 +376,13 @@ static int pxa2xx_mfp_resume(struct sys_device *d) PGSR(i) = saved_pgsr[i]; } PSSR = PSSR_RDH | PSSR_PH; - return 0; } #else #define pxa2xx_mfp_suspend NULL #define pxa2xx_mfp_resume NULL #endif -struct sysdev_class pxa2xx_mfp_sysclass = { - .name = "mfp", +struct syscore_ops pxa2xx_mfp_syscore_ops = { .suspend = pxa2xx_mfp_suspend, .resume = pxa2xx_mfp_resume, }; @@ -409,6 +407,6 @@ static int __init pxa2xx_mfp_init(void) for (i = 0; i <= gpio_to_bank(pxa_last_gpio); i++) gpdr_lpm[i] = GPDR(i * 32); - return sysdev_class_register(&pxa2xx_mfp_sysclass); + return 0; } postcore_initcall(pxa2xx_mfp_init); diff --git a/trunk/arch/arm/mach-pxa/mfp-pxa3xx.c b/trunk/arch/arm/mach-pxa/mfp-pxa3xx.c index 7a270eecd480..89863a01ecd7 100644 --- a/trunk/arch/arm/mach-pxa/mfp-pxa3xx.c +++ b/trunk/arch/arm/mach-pxa/mfp-pxa3xx.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include #include @@ -31,13 +31,13 @@ * a pull-down mode if they're an active low chip select, and we're * just entering standby. */ -static int pxa3xx_mfp_suspend(struct sys_device *d, pm_message_t state) +static int pxa3xx_mfp_suspend(void) { mfp_config_lpm(); return 0; } -static int pxa3xx_mfp_resume(struct sys_device *d) +static void pxa3xx_mfp_resume(void) { mfp_config_run(); @@ -47,24 +47,13 @@ static int pxa3xx_mfp_resume(struct sys_device *d) * preserve them here in case they will be referenced later */ ASCR &= ~(ASCR_RDH | ASCR_D1S | ASCR_D2S | ASCR_D3S); - return 0; } #else #define pxa3xx_mfp_suspend NULL #define pxa3xx_mfp_resume NULL #endif -struct sysdev_class pxa3xx_mfp_sysclass = { - .name = "mfp", +struct syscore_ops pxa3xx_mfp_syscore_ops = { .suspend = pxa3xx_mfp_suspend, - .resume = pxa3xx_mfp_resume, + .resume = pxa3xx_mfp_resume, }; - -static int __init mfp_init_devicefs(void) -{ - if (cpu_is_pxa3xx()) - return sysdev_class_register(&pxa3xx_mfp_sysclass); - - return 0; -} -postcore_initcall(mfp_init_devicefs); diff --git a/trunk/arch/arm/mach-pxa/mioa701.c b/trunk/arch/arm/mach-pxa/mioa701.c index 23925db8ff74..e3470137c934 100644 --- a/trunk/arch/arm/mach-pxa/mioa701.c +++ b/trunk/arch/arm/mach-pxa/mioa701.c @@ -22,7 +22,7 @@ #include #include #include -#include +#include #include #include #include @@ -488,7 +488,7 @@ static void install_bootstrap(void) } -static int mioa701_sys_suspend(struct sys_device *sysdev, pm_message_t state) +static int mioa701_sys_suspend(void) { int i = 0, is_bt_on; u32 *mem_resume_vector = phys_to_virt(RESUME_VECTOR_ADDR); @@ -514,7 +514,7 @@ static int mioa701_sys_suspend(struct sys_device *sysdev, pm_message_t state) return 0; } -static int mioa701_sys_resume(struct sys_device *sysdev) +static void mioa701_sys_resume(void) { int i = 0; u32 *mem_resume_vector = phys_to_virt(RESUME_VECTOR_ADDR); @@ -527,43 +527,18 @@ static int mioa701_sys_resume(struct sys_device *sysdev) *mem_resume_enabler = save_buffer[i++]; *mem_resume_bt = save_buffer[i++]; *mem_resume_unknown = save_buffer[i++]; - - return 0; } -static struct sysdev_class mioa701_sysclass = { - .name = "mioa701", -}; - -static struct sys_device sysdev_bootstrap = { - .cls = &mioa701_sysclass, -}; - -static struct sysdev_driver driver_bootstrap = { - .suspend = &mioa701_sys_suspend, - .resume = &mioa701_sys_resume, +static struct syscore_ops mioa701_syscore_ops = { + .suspend = mioa701_sys_suspend, + .resume = mioa701_sys_resume, }; static int __init bootstrap_init(void) { - int rc; int save_size = mioa701_bootstrap_lg + (sizeof(u32) * 3); - rc = sysdev_class_register(&mioa701_sysclass); - if (rc) { - printk(KERN_ERR "Failed registering mioa701 sys class\n"); - return -ENODEV; - } - rc = sysdev_register(&sysdev_bootstrap); - if (rc) { - printk(KERN_ERR "Failed registering mioa701 sys device\n"); - return -ENODEV; - } - rc = sysdev_driver_register(&mioa701_sysclass, &driver_bootstrap); - if (rc) { - printk(KERN_ERR "Failed registering PMU sys driver\n"); - return -ENODEV; - } + register_syscore_ops(&mioa701_syscore_ops); save_buffer = kmalloc(save_size, GFP_KERNEL); if (!save_buffer) @@ -576,9 +551,7 @@ static int __init bootstrap_init(void) static void bootstrap_exit(void) { kfree(save_buffer); - sysdev_driver_unregister(&mioa701_sysclass, &driver_bootstrap); - sysdev_unregister(&sysdev_bootstrap); - sysdev_class_unregister(&mioa701_sysclass); + unregister_syscore_ops(&mioa701_syscore_ops); printk(KERN_CRIT "Unregistering mioa701 suspend will hang next" "resume !!!\n"); diff --git a/trunk/arch/arm/mach-pxa/palmld.c b/trunk/arch/arm/mach-pxa/palmld.c index a6f898cbfac9..4061ecddee70 100644 --- a/trunk/arch/arm/mach-pxa/palmld.c +++ b/trunk/arch/arm/mach-pxa/palmld.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include #include diff --git a/trunk/arch/arm/mach-pxa/palmtreo.c b/trunk/arch/arm/mach-pxa/palmtreo.c index 8aadad55fbe4..20d1b18b1733 100644 --- a/trunk/arch/arm/mach-pxa/palmtreo.c +++ b/trunk/arch/arm/mach-pxa/palmtreo.c @@ -25,7 +25,6 @@ #include #include #include -#include #include #include diff --git a/trunk/arch/arm/mach-pxa/palmz72.c b/trunk/arch/arm/mach-pxa/palmz72.c index 3b8a4f37dbbe..65f24f0b77e8 100644 --- a/trunk/arch/arm/mach-pxa/palmz72.c +++ b/trunk/arch/arm/mach-pxa/palmz72.c @@ -19,7 +19,7 @@ */ #include -#include +#include #include #include #include @@ -233,9 +233,9 @@ static struct palmz72_resume_info palmz72_resume_info = { static unsigned long store_ptr; -/* sys_device for Palm Zire 72 PM */ +/* syscore_ops for Palm Zire 72 PM */ -static int palmz72_pm_suspend(struct sys_device *dev, pm_message_t msg) +static int palmz72_pm_suspend(void) { /* setup the resume_info struct for the original bootloader */ palmz72_resume_info.resume_addr = (u32) cpu_resume; @@ -249,31 +249,23 @@ static int palmz72_pm_suspend(struct sys_device *dev, pm_message_t msg) return 0; } -static int palmz72_pm_resume(struct sys_device *dev) +static void palmz72_pm_resume(void) { *PALMZ72_SAVE_DWORD = store_ptr; - return 0; } -static struct sysdev_class palmz72_pm_sysclass = { - .name = "palmz72_pm", +static struct syscore_ops palmz72_pm_syscore_ops = { .suspend = palmz72_pm_suspend, .resume = palmz72_pm_resume, }; -static struct sys_device palmz72_pm_device = { - .cls = &palmz72_pm_sysclass, -}; - static int __init palmz72_pm_init(void) { - int ret = -ENODEV; if (machine_is_palmz72()) { - ret = sysdev_class_register(&palmz72_pm_sysclass); - if (ret == 0) - ret = sysdev_register(&palmz72_pm_device); + register_syscore_ops(&palmz72_pm_syscore_ops); + return 0; } - return ret; + return -ENODEV; } device_initcall(palmz72_pm_init); diff --git a/trunk/arch/arm/mach-pxa/pxa25x.c b/trunk/arch/arm/mach-pxa/pxa25x.c index 6bde5956358d..fed363cec9c6 100644 --- a/trunk/arch/arm/mach-pxa/pxa25x.c +++ b/trunk/arch/arm/mach-pxa/pxa25x.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include @@ -285,7 +285,7 @@ static inline void pxa25x_init_pm(void) {} static int pxa25x_set_wake(struct irq_data *d, unsigned int on) { - int gpio = IRQ_TO_GPIO(d->irq); + int gpio = irq_to_gpio(d->irq); uint32_t mask = 0; if (gpio >= 0 && gpio < 85) @@ -350,21 +350,9 @@ static struct platform_device *pxa25x_devices[] __initdata = { &pxa_device_asoc_platform, }; -static struct sys_device pxa25x_sysdev[] = { - { - .cls = &pxa_irq_sysclass, - }, { - .cls = &pxa2xx_mfp_sysclass, - }, { - .cls = &pxa_gpio_sysclass, - }, { - .cls = &pxa2xx_clock_sysclass, - } -}; - static int __init pxa25x_init(void) { - int i, ret = 0; + int ret = 0; if (cpu_is_pxa25x()) { @@ -377,11 +365,10 @@ static int __init pxa25x_init(void) pxa25x_init_pm(); - for (i = 0; i < ARRAY_SIZE(pxa25x_sysdev); i++) { - ret = sysdev_register(&pxa25x_sysdev[i]); - if (ret) - pr_err("failed to register sysdev[%d]\n", i); - } + register_syscore_ops(&pxa_irq_syscore_ops); + register_syscore_ops(&pxa2xx_mfp_syscore_ops); + register_syscore_ops(&pxa_gpio_syscore_ops); + register_syscore_ops(&pxa2xx_clock_syscore_ops); ret = platform_add_devices(pxa25x_devices, ARRAY_SIZE(pxa25x_devices)); diff --git a/trunk/arch/arm/mach-pxa/pxa27x.c b/trunk/arch/arm/mach-pxa/pxa27x.c index 1cb5d0f9723f..2fecbec58d88 100644 --- a/trunk/arch/arm/mach-pxa/pxa27x.c +++ b/trunk/arch/arm/mach-pxa/pxa27x.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include #include @@ -345,7 +345,7 @@ static inline void pxa27x_init_pm(void) {} */ static int pxa27x_set_wake(struct irq_data *d, unsigned int on) { - int gpio = IRQ_TO_GPIO(d->irq); + int gpio = irq_to_gpio(d->irq); uint32_t mask; if (gpio >= 0 && gpio < 128) @@ -428,21 +428,9 @@ static struct platform_device *devices[] __initdata = { &pxa27x_device_pwm1, }; -static struct sys_device pxa27x_sysdev[] = { - { - .cls = &pxa_irq_sysclass, - }, { - .cls = &pxa2xx_mfp_sysclass, - }, { - .cls = &pxa_gpio_sysclass, - }, { - .cls = &pxa2xx_clock_sysclass, - } -}; - static int __init pxa27x_init(void) { - int i, ret = 0; + int ret = 0; if (cpu_is_pxa27x()) { @@ -455,11 +443,10 @@ static int __init pxa27x_init(void) pxa27x_init_pm(); - for (i = 0; i < ARRAY_SIZE(pxa27x_sysdev); i++) { - ret = sysdev_register(&pxa27x_sysdev[i]); - if (ret) - pr_err("failed to register sysdev[%d]\n", i); - } + register_syscore_ops(&pxa_irq_syscore_ops); + register_syscore_ops(&pxa2xx_mfp_syscore_ops); + register_syscore_ops(&pxa_gpio_syscore_ops); + register_syscore_ops(&pxa2xx_clock_syscore_ops); ret = platform_add_devices(devices, ARRAY_SIZE(devices)); } diff --git a/trunk/arch/arm/mach-pxa/pxa3xx.c b/trunk/arch/arm/mach-pxa/pxa3xx.c index 8dd107391157..8521d7d6f1da 100644 --- a/trunk/arch/arm/mach-pxa/pxa3xx.c +++ b/trunk/arch/arm/mach-pxa/pxa3xx.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include @@ -427,21 +427,9 @@ static struct platform_device *devices[] __initdata = { &pxa27x_device_pwm1, }; -static struct sys_device pxa3xx_sysdev[] = { - { - .cls = &pxa_irq_sysclass, - }, { - .cls = &pxa3xx_mfp_sysclass, - }, { - .cls = &pxa_gpio_sysclass, - }, { - .cls = &pxa3xx_clock_sysclass, - } -}; - static int __init pxa3xx_init(void) { - int i, ret = 0; + int ret = 0; if (cpu_is_pxa3xx()) { @@ -462,11 +450,10 @@ static int __init pxa3xx_init(void) pxa3xx_init_pm(); - for (i = 0; i < ARRAY_SIZE(pxa3xx_sysdev); i++) { - ret = sysdev_register(&pxa3xx_sysdev[i]); - if (ret) - pr_err("failed to register sysdev[%d]\n", i); - } + register_syscore_ops(&pxa_irq_syscore_ops); + register_syscore_ops(&pxa3xx_mfp_syscore_ops); + register_syscore_ops(&pxa_gpio_syscore_ops); + register_syscore_ops(&pxa3xx_clock_syscore_ops); ret = platform_add_devices(devices, ARRAY_SIZE(devices)); } diff --git a/trunk/arch/arm/mach-pxa/pxa95x.c b/trunk/arch/arm/mach-pxa/pxa95x.c index 23b229bd06e9..ecc82a330fad 100644 --- a/trunk/arch/arm/mach-pxa/pxa95x.c +++ b/trunk/arch/arm/mach-pxa/pxa95x.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include #include @@ -260,16 +260,6 @@ static struct platform_device *devices[] __initdata = { &pxa27x_device_pwm1, }; -static struct sys_device pxa95x_sysdev[] = { - { - .cls = &pxa_irq_sysclass, - }, { - .cls = &pxa_gpio_sysclass, - }, { - .cls = &pxa3xx_clock_sysclass, - } -}; - static int __init pxa95x_init(void) { int ret = 0, i; @@ -293,11 +283,9 @@ static int __init pxa95x_init(void) if ((ret = pxa_init_dma(IRQ_DMA, 32))) return ret; - for (i = 0; i < ARRAY_SIZE(pxa95x_sysdev); i++) { - ret = sysdev_register(&pxa95x_sysdev[i]); - if (ret) - pr_err("failed to register sysdev[%d]\n", i); - } + register_syscore_ops(&pxa_irq_syscore_ops); + register_syscore_ops(&pxa_gpio_syscore_ops); + register_syscore_ops(&pxa3xx_clock_syscore_ops); ret = platform_add_devices(devices, ARRAY_SIZE(devices)); } diff --git a/trunk/arch/arm/mach-pxa/raumfeld.c b/trunk/arch/arm/mach-pxa/raumfeld.c index cd1861351f75..d130f77b6d11 100644 --- a/trunk/arch/arm/mach-pxa/raumfeld.c +++ b/trunk/arch/arm/mach-pxa/raumfeld.c @@ -18,7 +18,6 @@ #include #include -#include #include #include #include diff --git a/trunk/arch/arm/mach-pxa/smemc.c b/trunk/arch/arm/mach-pxa/smemc.c index 232b7316ec08..79923058d10f 100644 --- a/trunk/arch/arm/mach-pxa/smemc.c +++ b/trunk/arch/arm/mach-pxa/smemc.c @@ -6,7 +6,7 @@ #include #include #include -#include +#include #include #include @@ -16,7 +16,7 @@ static unsigned long msc[2]; static unsigned long sxcnfg, memclkcfg; static unsigned long csadrcfg[4]; -static int pxa3xx_smemc_suspend(struct sys_device *dev, pm_message_t state) +static int pxa3xx_smemc_suspend(void) { msc[0] = __raw_readl(MSC0); msc[1] = __raw_readl(MSC1); @@ -30,7 +30,7 @@ static int pxa3xx_smemc_suspend(struct sys_device *dev, pm_message_t state) return 0; } -static int pxa3xx_smemc_resume(struct sys_device *dev) +static void pxa3xx_smemc_resume(void) { __raw_writel(msc[0], MSC0); __raw_writel(msc[1], MSC1); @@ -40,34 +40,19 @@ static int pxa3xx_smemc_resume(struct sys_device *dev) __raw_writel(csadrcfg[1], CSADRCFG1); __raw_writel(csadrcfg[2], CSADRCFG2); __raw_writel(csadrcfg[3], CSADRCFG3); - - return 0; } -static struct sysdev_class smemc_sysclass = { - .name = "smemc", +static struct syscore_ops smemc_syscore_ops = { .suspend = pxa3xx_smemc_suspend, .resume = pxa3xx_smemc_resume, }; -static struct sys_device smemc_sysdev = { - .id = 0, - .cls = &smemc_sysclass, -}; - static int __init smemc_init(void) { - int ret = 0; + if (cpu_is_pxa3xx()) + register_syscore_ops(&smemc_syscore_ops); - if (cpu_is_pxa3xx()) { - ret = sysdev_class_register(&smemc_sysclass); - if (ret) - return ret; - - ret = sysdev_register(&smemc_sysdev); - } - - return ret; + return 0; } subsys_initcall(smemc_init); #endif diff --git a/trunk/arch/arm/mach-pxa/trizeps4.c b/trunk/arch/arm/mach-pxa/trizeps4.c index b9cfbebdfe9c..687417a93698 100644 --- a/trunk/arch/arm/mach-pxa/trizeps4.c +++ b/trunk/arch/arm/mach-pxa/trizeps4.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include #include diff --git a/trunk/arch/arm/mach-pxa/viper.c b/trunk/arch/arm/mach-pxa/viper.c index b523f119e0f0..903218eab56d 100644 --- a/trunk/arch/arm/mach-pxa/viper.c +++ b/trunk/arch/arm/mach-pxa/viper.c @@ -44,6 +44,7 @@ #include #include #include +#include #include #include @@ -130,20 +131,19 @@ static u8 viper_hw_version(void) return v1; } -/* CPU sysdev */ -static int viper_cpu_suspend(struct sys_device *sysdev, pm_message_t state) +/* CPU system core operations. */ +static int viper_cpu_suspend(void) { viper_icr_set_bit(VIPER_ICR_R_DIS); return 0; } -static int viper_cpu_resume(struct sys_device *sysdev) +static void viper_cpu_resume(void) { viper_icr_clear_bit(VIPER_ICR_R_DIS); - return 0; } -static struct sysdev_driver viper_cpu_sysdev_driver = { +static struct syscore_ops viper_cpu_syscore_ops = { .suspend = viper_cpu_suspend, .resume = viper_cpu_resume, }; @@ -945,7 +945,7 @@ static void __init viper_init(void) viper_init_vcore_gpios(); viper_init_cpufreq(); - sysdev_driver_register(&cpu_sysdev_class, &viper_cpu_sysdev_driver); + register_syscore_ops(&viper_cpu_syscore_ops); if (version) { pr_info("viper: hardware v%di%d detected. " diff --git a/trunk/arch/arm/mach-pxa/vpac270.c b/trunk/arch/arm/mach-pxa/vpac270.c index f71d377c8640..67bd41488bf8 100644 --- a/trunk/arch/arm/mach-pxa/vpac270.c +++ b/trunk/arch/arm/mach-pxa/vpac270.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include #include diff --git a/trunk/arch/arm/mach-realview/include/mach/barriers.h b/trunk/arch/arm/mach-realview/include/mach/barriers.h index 0c5d749d7b5f..9a732195aa1c 100644 --- a/trunk/arch/arm/mach-realview/include/mach/barriers.h +++ b/trunk/arch/arm/mach-realview/include/mach/barriers.h @@ -4,5 +4,5 @@ * operation to deadlock the system. */ #define mb() dsb() -#define rmb() dmb() +#define rmb() dsb() #define wmb() mb() diff --git a/trunk/arch/arm/mach-s3c2410/irq.c b/trunk/arch/arm/mach-s3c2410/irq.c index 5e2f35332056..2854129f8cc7 100644 --- a/trunk/arch/arm/mach-s3c2410/irq.c +++ b/trunk/arch/arm/mach-s3c2410/irq.c @@ -23,38 +23,12 @@ #include #include #include -#include +#include #include #include -static int s3c2410_irq_add(struct sys_device *sysdev) -{ - return 0; -} - -static struct sysdev_driver s3c2410_irq_driver = { - .add = s3c2410_irq_add, +struct syscore_ops s3c24xx_irq_syscore_ops = { .suspend = s3c24xx_irq_suspend, .resume = s3c24xx_irq_resume, }; - -static int __init s3c2410_irq_init(void) -{ - return sysdev_driver_register(&s3c2410_sysclass, &s3c2410_irq_driver); -} - -arch_initcall(s3c2410_irq_init); - -static struct sysdev_driver s3c2410a_irq_driver = { - .add = s3c2410_irq_add, - .suspend = s3c24xx_irq_suspend, - .resume = s3c24xx_irq_resume, -}; - -static int __init s3c2410a_irq_init(void) -{ - return sysdev_driver_register(&s3c2410a_sysclass, &s3c2410a_irq_driver); -} - -arch_initcall(s3c2410a_irq_init); diff --git a/trunk/arch/arm/mach-s3c2410/mach-bast.c b/trunk/arch/arm/mach-s3c2410/mach-bast.c index 2970ea9f7c2b..1e2d536adda9 100644 --- a/trunk/arch/arm/mach-s3c2410/mach-bast.c +++ b/trunk/arch/arm/mach-s3c2410/mach-bast.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include #include #include @@ -214,17 +214,16 @@ static struct s3c2410_uartcfg bast_uartcfgs[] __initdata = { /* NAND Flash on BAST board */ #ifdef CONFIG_PM -static int bast_pm_suspend(struct sys_device *sd, pm_message_t state) +static int bast_pm_suspend(void) { /* ensure that an nRESET is not generated on resume. */ gpio_direction_output(S3C2410_GPA(21), 1); return 0; } -static int bast_pm_resume(struct sys_device *sd) +static void bast_pm_resume(void) { s3c_gpio_cfgpin(S3C2410_GPA(21), S3C2410_GPA21_nRSTOUT); - return 0; } #else @@ -232,16 +231,11 @@ static int bast_pm_resume(struct sys_device *sd) #define bast_pm_resume NULL #endif -static struct sysdev_class bast_pm_sysclass = { - .name = "mach-bast", +static struct syscore_ops bast_pm_syscore_ops = { .suspend = bast_pm_suspend, .resume = bast_pm_resume, }; -static struct sys_device bast_pm_sysdev = { - .cls = &bast_pm_sysclass, -}; - static int smartmedia_map[] = { 0 }; static int chip0_map[] = { 1 }; static int chip1_map[] = { 2 }; @@ -642,8 +636,7 @@ static void __init bast_map_io(void) static void __init bast_init(void) { - sysdev_class_register(&bast_pm_sysclass); - sysdev_register(&bast_pm_sysdev); + register_syscore_ops(&bast_pm_syscore_ops); s3c_i2c0_set_platdata(&bast_i2c_info); s3c_nand_set_platdata(&bast_nand_info); diff --git a/trunk/arch/arm/mach-s3c2410/pm.c b/trunk/arch/arm/mach-s3c2410/pm.c index 725636fc4dc3..4728f9aa7df1 100644 --- a/trunk/arch/arm/mach-s3c2410/pm.c +++ b/trunk/arch/arm/mach-s3c2410/pm.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -92,7 +93,7 @@ static void s3c2410_pm_prepare(void) } } -static int s3c2410_pm_resume(struct sys_device *dev) +static void s3c2410_pm_resume(void) { unsigned long tmp; @@ -104,10 +105,12 @@ static int s3c2410_pm_resume(struct sys_device *dev) if ( machine_is_aml_m5900() ) s3c2410_gpio_setpin(S3C2410_GPF(2), 0); - - return 0; } +struct syscore_ops s3c2410_pm_syscore_ops = { + .resume = s3c2410_pm_resume, +}; + static int s3c2410_pm_add(struct sys_device *dev) { pm_cpu_prep = s3c2410_pm_prepare; @@ -119,7 +122,6 @@ static int s3c2410_pm_add(struct sys_device *dev) #if defined(CONFIG_CPU_S3C2410) static struct sysdev_driver s3c2410_pm_driver = { .add = s3c2410_pm_add, - .resume = s3c2410_pm_resume, }; /* register ourselves */ @@ -133,7 +135,6 @@ arch_initcall(s3c2410_pm_drvinit); static struct sysdev_driver s3c2410a_pm_driver = { .add = s3c2410_pm_add, - .resume = s3c2410_pm_resume, }; static int __init s3c2410a_pm_drvinit(void) @@ -147,7 +148,6 @@ arch_initcall(s3c2410a_pm_drvinit); #if defined(CONFIG_CPU_S3C2440) static struct sysdev_driver s3c2440_pm_driver = { .add = s3c2410_pm_add, - .resume = s3c2410_pm_resume, }; static int __init s3c2440_pm_drvinit(void) @@ -161,7 +161,6 @@ arch_initcall(s3c2440_pm_drvinit); #if defined(CONFIG_CPU_S3C2442) static struct sysdev_driver s3c2442_pm_driver = { .add = s3c2410_pm_add, - .resume = s3c2410_pm_resume, }; static int __init s3c2442_pm_drvinit(void) diff --git a/trunk/arch/arm/mach-s3c2410/s3c2410.c b/trunk/arch/arm/mach-s3c2410/s3c2410.c index adc90a3c5890..f1d3bd8f6f17 100644 --- a/trunk/arch/arm/mach-s3c2410/s3c2410.c +++ b/trunk/arch/arm/mach-s3c2410/s3c2410.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -40,6 +41,7 @@ #include #include #include +#include #include #include @@ -168,6 +170,9 @@ int __init s3c2410_init(void) { printk("S3C2410: Initialising architecture\n"); + register_syscore_ops(&s3c2410_pm_syscore_ops); + register_syscore_ops(&s3c24xx_irq_syscore_ops); + return sysdev_register(&s3c2410_sysdev); } diff --git a/trunk/arch/arm/mach-s3c2412/irq.c b/trunk/arch/arm/mach-s3c2412/irq.c index f3355d2ec634..1a1aa220972b 100644 --- a/trunk/arch/arm/mach-s3c2412/irq.c +++ b/trunk/arch/arm/mach-s3c2412/irq.c @@ -202,8 +202,6 @@ static int s3c2412_irq_add(struct sys_device *sysdev) static struct sysdev_driver s3c2412_irq_driver = { .add = s3c2412_irq_add, - .suspend = s3c24xx_irq_suspend, - .resume = s3c24xx_irq_resume, }; static int s3c2412_irq_init(void) diff --git a/trunk/arch/arm/mach-s3c2412/mach-jive.c b/trunk/arch/arm/mach-s3c2412/mach-jive.c index 923e01bdf017..85dcaeb9e62f 100644 --- a/trunk/arch/arm/mach-s3c2412/mach-jive.c +++ b/trunk/arch/arm/mach-s3c2412/mach-jive.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include #include #include @@ -486,7 +486,7 @@ static struct s3c2410_udc_mach_info jive_udc_cfg __initdata = { /* Jive power management device */ #ifdef CONFIG_PM -static int jive_pm_suspend(struct sys_device *sd, pm_message_t state) +static int jive_pm_suspend(void) { /* Write the magic value u-boot uses to check for resume into * the INFORM0 register, and ensure INFORM1 is set to the @@ -498,10 +498,9 @@ static int jive_pm_suspend(struct sys_device *sd, pm_message_t state) return 0; } -static int jive_pm_resume(struct sys_device *sd) +static void jive_pm_resume(void) { __raw_writel(0x0, S3C2412_INFORM0); - return 0; } #else @@ -509,16 +508,11 @@ static int jive_pm_resume(struct sys_device *sd) #define jive_pm_resume NULL #endif -static struct sysdev_class jive_pm_sysclass = { - .name = "jive-pm", +static struct syscore_ops jive_pm_syscore_ops = { .suspend = jive_pm_suspend, .resume = jive_pm_resume, }; -static struct sys_device jive_pm_sysdev = { - .cls = &jive_pm_sysclass, -}; - static void __init jive_map_io(void) { s3c24xx_init_io(jive_iodesc, ARRAY_SIZE(jive_iodesc)); @@ -536,10 +530,9 @@ static void jive_power_off(void) static void __init jive_machine_init(void) { - /* register system devices for managing low level suspend */ + /* register system core operations for managing low level suspend */ - sysdev_class_register(&jive_pm_sysclass); - sysdev_register(&jive_pm_sysdev); + register_syscore_ops(&jive_pm_syscore_ops); /* write our sleep configurations for the IO. Pull down all unused * IO, ensure that we have turned off all peripherals we do not diff --git a/trunk/arch/arm/mach-s3c2412/pm.c b/trunk/arch/arm/mach-s3c2412/pm.c index a7417c479ffe..752b13a7b3db 100644 --- a/trunk/arch/arm/mach-s3c2412/pm.c +++ b/trunk/arch/arm/mach-s3c2412/pm.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -86,13 +87,24 @@ static struct sleep_save s3c2412_sleep[] = { SAVE_ITEM(S3C2413_GPJSLPCON), }; -static int s3c2412_pm_suspend(struct sys_device *dev, pm_message_t state) +static struct sysdev_driver s3c2412_pm_driver = { + .add = s3c2412_pm_add, +}; + +static __init int s3c2412_pm_init(void) +{ + return sysdev_driver_register(&s3c2412_sysclass, &s3c2412_pm_driver); +} + +arch_initcall(s3c2412_pm_init); + +static int s3c2412_pm_suspend(void) { s3c_pm_do_save(s3c2412_sleep, ARRAY_SIZE(s3c2412_sleep)); return 0; } -static int s3c2412_pm_resume(struct sys_device *dev) +static void s3c2412_pm_resume(void) { unsigned long tmp; @@ -102,18 +114,9 @@ static int s3c2412_pm_resume(struct sys_device *dev) __raw_writel(tmp, S3C2412_PWRCFG); s3c_pm_do_restore(s3c2412_sleep, ARRAY_SIZE(s3c2412_sleep)); - return 0; } -static struct sysdev_driver s3c2412_pm_driver = { - .add = s3c2412_pm_add, +struct syscore_ops s3c2412_pm_syscore_ops = { .suspend = s3c2412_pm_suspend, .resume = s3c2412_pm_resume, }; - -static __init int s3c2412_pm_init(void) -{ - return sysdev_driver_register(&s3c2412_sysclass, &s3c2412_pm_driver); -} - -arch_initcall(s3c2412_pm_init); diff --git a/trunk/arch/arm/mach-s3c2412/s3c2412.c b/trunk/arch/arm/mach-s3c2412/s3c2412.c index 4c6df51ddf33..ef0958d3e5c6 100644 --- a/trunk/arch/arm/mach-s3c2412/s3c2412.c +++ b/trunk/arch/arm/mach-s3c2412/s3c2412.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -244,5 +245,8 @@ int __init s3c2412_init(void) { printk("S3C2412: Initialising architecture\n"); + register_syscore_ops(&s3c2412_pm_syscore_ops); + register_syscore_ops(&s3c24xx_irq_syscore_ops); + return sysdev_register(&s3c2412_sysdev); } diff --git a/trunk/arch/arm/mach-s3c2416/irq.c b/trunk/arch/arm/mach-s3c2416/irq.c index 77b38f2381c1..28ad20d42445 100644 --- a/trunk/arch/arm/mach-s3c2416/irq.c +++ b/trunk/arch/arm/mach-s3c2416/irq.c @@ -236,8 +236,6 @@ static int __init s3c2416_irq_add(struct sys_device *sysdev) static struct sysdev_driver s3c2416_irq_driver = { .add = s3c2416_irq_add, - .suspend = s3c24xx_irq_suspend, - .resume = s3c24xx_irq_resume, }; static int __init s3c2416_irq_init(void) diff --git a/trunk/arch/arm/mach-s3c2416/pm.c b/trunk/arch/arm/mach-s3c2416/pm.c index 4a04205b04d5..41db2b21e213 100644 --- a/trunk/arch/arm/mach-s3c2416/pm.c +++ b/trunk/arch/arm/mach-s3c2416/pm.c @@ -11,6 +11,7 @@ */ #include +#include #include #include @@ -55,30 +56,26 @@ static int s3c2416_pm_add(struct sys_device *sysdev) return 0; } -static int s3c2416_pm_suspend(struct sys_device *dev, pm_message_t state) +static struct sysdev_driver s3c2416_pm_driver = { + .add = s3c2416_pm_add, +}; + +static __init int s3c2416_pm_init(void) { - return 0; + return sysdev_driver_register(&s3c2416_sysclass, &s3c2416_pm_driver); } -static int s3c2416_pm_resume(struct sys_device *dev) +arch_initcall(s3c2416_pm_init); + + +static void s3c2416_pm_resume(void) { /* unset the return-from-sleep amd inform flags */ __raw_writel(0x0, S3C2443_PWRMODE); __raw_writel(0x0, S3C2412_INFORM0); __raw_writel(0x0, S3C2412_INFORM1); - - return 0; } -static struct sysdev_driver s3c2416_pm_driver = { - .add = s3c2416_pm_add, - .suspend = s3c2416_pm_suspend, +struct syscore_ops s3c2416_pm_syscore_ops = { .resume = s3c2416_pm_resume, }; - -static __init int s3c2416_pm_init(void) -{ - return sysdev_driver_register(&s3c2416_sysclass, &s3c2416_pm_driver); -} - -arch_initcall(s3c2416_pm_init); diff --git a/trunk/arch/arm/mach-s3c2416/s3c2416.c b/trunk/arch/arm/mach-s3c2416/s3c2416.c index ba7fd8737434..494ce913dc95 100644 --- a/trunk/arch/arm/mach-s3c2416/s3c2416.c +++ b/trunk/arch/arm/mach-s3c2416/s3c2416.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include @@ -54,6 +55,7 @@ #include #include #include +#include #include #include @@ -95,6 +97,9 @@ int __init s3c2416_init(void) s3c_fb_setname("s3c2443-fb"); + register_syscore_ops(&s3c2416_pm_syscore_ops); + register_syscore_ops(&s3c24xx_irq_syscore_ops); + return sysdev_register(&s3c2416_sysdev); } diff --git a/trunk/arch/arm/mach-s3c2440/mach-gta02.c b/trunk/arch/arm/mach-s3c2440/mach-gta02.c index 0db2411ef4bb..716662008ce2 100644 --- a/trunk/arch/arm/mach-s3c2440/mach-gta02.c +++ b/trunk/arch/arm/mach-s3c2440/mach-gta02.c @@ -409,6 +409,10 @@ struct platform_device s3c24xx_pwm_device = { .num_resources = 0, }; +static struct platform_device gta02_dfbmcs320_device = { + .name = "dfbmcs320", +}; + static struct i2c_board_info gta02_i2c_devs[] __initdata = { { I2C_BOARD_INFO("pcf50633", 0x73), @@ -523,6 +527,7 @@ static struct platform_device *gta02_devices[] __initdata = { &s3c_device_iis, &samsung_asoc_dma, &s3c_device_i2c0, + >a02_dfbmcs320_device, >a02_buttons_device, &s3c_device_adc, &s3c_device_ts, diff --git a/trunk/arch/arm/mach-s3c2440/mach-osiris.c b/trunk/arch/arm/mach-s3c2440/mach-osiris.c index 14dc67897757..d88536393310 100644 --- a/trunk/arch/arm/mach-s3c2440/mach-osiris.c +++ b/trunk/arch/arm/mach-s3c2440/mach-osiris.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include #include #include @@ -284,7 +284,7 @@ static struct platform_device osiris_pcmcia = { #ifdef CONFIG_PM static unsigned char pm_osiris_ctrl0; -static int osiris_pm_suspend(struct sys_device *sd, pm_message_t state) +static int osiris_pm_suspend(void) { unsigned int tmp; @@ -304,7 +304,7 @@ static int osiris_pm_suspend(struct sys_device *sd, pm_message_t state) return 0; } -static int osiris_pm_resume(struct sys_device *sd) +static void osiris_pm_resume(void) { if (pm_osiris_ctrl0 & OSIRIS_CTRL0_FIX8) __raw_writeb(OSIRIS_CTRL1_FIX8, OSIRIS_VA_CTRL1); @@ -312,8 +312,6 @@ static int osiris_pm_resume(struct sys_device *sd) __raw_writeb(pm_osiris_ctrl0, OSIRIS_VA_CTRL0); s3c_gpio_cfgpin(S3C2410_GPA(21), S3C2410_GPA21_nRSTOUT); - - return 0; } #else @@ -321,16 +319,11 @@ static int osiris_pm_resume(struct sys_device *sd) #define osiris_pm_resume NULL #endif -static struct sysdev_class osiris_pm_sysclass = { - .name = "mach-osiris", +static struct syscore_ops osiris_pm_syscore_ops = { .suspend = osiris_pm_suspend, .resume = osiris_pm_resume, }; -static struct sys_device osiris_pm_sysdev = { - .cls = &osiris_pm_sysclass, -}; - /* Link for DVS driver to TPS65011 */ static void osiris_tps_release(struct device *dev) @@ -439,8 +432,7 @@ static void __init osiris_map_io(void) static void __init osiris_init(void) { - sysdev_class_register(&osiris_pm_sysclass); - sysdev_register(&osiris_pm_sysdev); + register_syscore_ops(&osiris_pm_syscore_ops); s3c_i2c0_set_platdata(NULL); s3c_nand_set_platdata(&osiris_nand_info); diff --git a/trunk/arch/arm/mach-s3c2440/s3c2440.c b/trunk/arch/arm/mach-s3c2440/s3c2440.c index f7663f731ea0..ce99ff72838d 100644 --- a/trunk/arch/arm/mach-s3c2440/s3c2440.c +++ b/trunk/arch/arm/mach-s3c2440/s3c2440.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -33,6 +34,7 @@ #include #include #include +#include #include #include @@ -51,6 +53,12 @@ int __init s3c2440_init(void) s3c_device_wdt.resource[1].start = IRQ_S3C2440_WDT; s3c_device_wdt.resource[1].end = IRQ_S3C2440_WDT; + /* register suspend/resume handlers */ + + register_syscore_ops(&s3c2410_pm_syscore_ops); + register_syscore_ops(&s3c244x_pm_syscore_ops); + register_syscore_ops(&s3c24xx_irq_syscore_ops); + /* register our system device for everything else */ return sysdev_register(&s3c2440_sysdev); diff --git a/trunk/arch/arm/mach-s3c2440/s3c2442.c b/trunk/arch/arm/mach-s3c2440/s3c2442.c index ecf813546554..6224bad4d604 100644 --- a/trunk/arch/arm/mach-s3c2440/s3c2442.c +++ b/trunk/arch/arm/mach-s3c2440/s3c2442.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -45,6 +46,7 @@ #include #include #include +#include #include #include @@ -167,6 +169,10 @@ int __init s3c2442_init(void) { printk("S3C2442: Initialising architecture\n"); + register_syscore_ops(&s3c2410_pm_syscore_ops); + register_syscore_ops(&s3c244x_pm_syscore_ops); + register_syscore_ops(&s3c24xx_irq_syscore_ops); + return sysdev_register(&s3c2442_sysdev); } diff --git a/trunk/arch/arm/mach-s3c2440/s3c244x-irq.c b/trunk/arch/arm/mach-s3c2440/s3c244x-irq.c index de07c2feaa32..c63e8f26d901 100644 --- a/trunk/arch/arm/mach-s3c2440/s3c244x-irq.c +++ b/trunk/arch/arm/mach-s3c2440/s3c244x-irq.c @@ -116,8 +116,6 @@ static int s3c244x_irq_add(struct sys_device *sysdev) static struct sysdev_driver s3c2440_irq_driver = { .add = s3c244x_irq_add, - .suspend = s3c24xx_irq_suspend, - .resume = s3c24xx_irq_resume, }; static int s3c2440_irq_init(void) @@ -129,8 +127,6 @@ arch_initcall(s3c2440_irq_init); static struct sysdev_driver s3c2442_irq_driver = { .add = s3c244x_irq_add, - .suspend = s3c24xx_irq_suspend, - .resume = s3c24xx_irq_resume, }; diff --git a/trunk/arch/arm/mach-s3c2440/s3c244x.c b/trunk/arch/arm/mach-s3c2440/s3c244x.c index 90c1707b9c95..7e8a23d2098a 100644 --- a/trunk/arch/arm/mach-s3c2440/s3c244x.c +++ b/trunk/arch/arm/mach-s3c2440/s3c244x.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -134,45 +135,14 @@ void __init s3c244x_init_clocks(int xtal) s3c2410_baseclk_add(); } -#ifdef CONFIG_PM - -static struct sleep_save s3c244x_sleep[] = { - SAVE_ITEM(S3C2440_DSC0), - SAVE_ITEM(S3C2440_DSC1), - SAVE_ITEM(S3C2440_GPJDAT), - SAVE_ITEM(S3C2440_GPJCON), - SAVE_ITEM(S3C2440_GPJUP) -}; - -static int s3c244x_suspend(struct sys_device *dev, pm_message_t state) -{ - s3c_pm_do_save(s3c244x_sleep, ARRAY_SIZE(s3c244x_sleep)); - return 0; -} - -static int s3c244x_resume(struct sys_device *dev) -{ - s3c_pm_do_restore(s3c244x_sleep, ARRAY_SIZE(s3c244x_sleep)); - return 0; -} - -#else -#define s3c244x_suspend NULL -#define s3c244x_resume NULL -#endif - /* Since the S3C2442 and S3C2440 share items, put both sysclasses here */ struct sysdev_class s3c2440_sysclass = { .name = "s3c2440-core", - .suspend = s3c244x_suspend, - .resume = s3c244x_resume }; struct sysdev_class s3c2442_sysclass = { .name = "s3c2442-core", - .suspend = s3c244x_suspend, - .resume = s3c244x_resume }; /* need to register class before we actually register the device, and @@ -194,3 +164,33 @@ static int __init s3c2442_core_init(void) } core_initcall(s3c2442_core_init); + + +#ifdef CONFIG_PM +static struct sleep_save s3c244x_sleep[] = { + SAVE_ITEM(S3C2440_DSC0), + SAVE_ITEM(S3C2440_DSC1), + SAVE_ITEM(S3C2440_GPJDAT), + SAVE_ITEM(S3C2440_GPJCON), + SAVE_ITEM(S3C2440_GPJUP) +}; + +static int s3c244x_suspend(void) +{ + s3c_pm_do_save(s3c244x_sleep, ARRAY_SIZE(s3c244x_sleep)); + return 0; +} + +static void s3c244x_resume(void) +{ + s3c_pm_do_restore(s3c244x_sleep, ARRAY_SIZE(s3c244x_sleep)); +} +#else +#define s3c244x_suspend NULL +#define s3c244x_resume NULL +#endif + +struct syscore_ops s3c244x_pm_syscore_ops = { + .suspend = s3c244x_suspend, + .resume = s3c244x_resume, +}; diff --git a/trunk/arch/arm/mach-s3c64xx/irq-pm.c b/trunk/arch/arm/mach-s3c64xx/irq-pm.c index da1bec64b9da..8bec61e242c7 100644 --- a/trunk/arch/arm/mach-s3c64xx/irq-pm.c +++ b/trunk/arch/arm/mach-s3c64xx/irq-pm.c @@ -13,7 +13,7 @@ */ #include -#include +#include #include #include #include @@ -54,7 +54,7 @@ static struct irq_grp_save { static u32 irq_uart_mask[CONFIG_SERIAL_SAMSUNG_UARTS]; -static int s3c64xx_irq_pm_suspend(struct sys_device *dev, pm_message_t state) +static int s3c64xx_irq_pm_suspend(void) { struct irq_grp_save *grp = eint_grp_save; int i; @@ -75,7 +75,7 @@ static int s3c64xx_irq_pm_suspend(struct sys_device *dev, pm_message_t state) return 0; } -static int s3c64xx_irq_pm_resume(struct sys_device *dev) +static void s3c64xx_irq_pm_resume(void) { struct irq_grp_save *grp = eint_grp_save; int i; @@ -94,18 +94,18 @@ static int s3c64xx_irq_pm_resume(struct sys_device *dev) } S3C_PMDBG("%s: IRQ configuration restored\n", __func__); - return 0; } -static struct sysdev_driver s3c64xx_irq_driver = { +struct syscore_ops s3c64xx_irq_syscore_ops = { .suspend = s3c64xx_irq_pm_suspend, .resume = s3c64xx_irq_pm_resume, }; -static int __init s3c64xx_irq_pm_init(void) +static __init int s3c64xx_syscore_init(void) { - return sysdev_driver_register(&s3c64xx_sysclass, &s3c64xx_irq_driver); -} + register_syscore_ops(&s3c64xx_irq_syscore_ops); -arch_initcall(s3c64xx_irq_pm_init); + return 0; +} +core_initcall(s3c64xx_syscore_init); diff --git a/trunk/arch/arm/mach-s5pv210/pm.c b/trunk/arch/arm/mach-s5pv210/pm.c index 549d7924fd4c..24febae3d4c0 100644 --- a/trunk/arch/arm/mach-s5pv210/pm.c +++ b/trunk/arch/arm/mach-s5pv210/pm.c @@ -16,6 +16,7 @@ #include #include +#include #include #include @@ -140,7 +141,17 @@ static int s5pv210_pm_add(struct sys_device *sysdev) return 0; } -static int s5pv210_pm_resume(struct sys_device *dev) +static struct sysdev_driver s5pv210_pm_driver = { + .add = s5pv210_pm_add, +}; + +static __init int s5pv210_pm_drvinit(void) +{ + return sysdev_driver_register(&s5pv210_sysclass, &s5pv210_pm_driver); +} +arch_initcall(s5pv210_pm_drvinit); + +static void s5pv210_pm_resume(void) { u32 tmp; @@ -150,17 +161,15 @@ static int s5pv210_pm_resume(struct sys_device *dev) __raw_writel(tmp , S5P_OTHERS); s3c_pm_do_restore_core(s5pv210_core_save, ARRAY_SIZE(s5pv210_core_save)); - - return 0; } -static struct sysdev_driver s5pv210_pm_driver = { - .add = s5pv210_pm_add, +static struct syscore_ops s5pv210_pm_syscore_ops = { .resume = s5pv210_pm_resume, }; -static __init int s5pv210_pm_drvinit(void) +static __init int s5pv210_pm_syscore_init(void) { - return sysdev_driver_register(&s5pv210_sysclass, &s5pv210_pm_driver); + register_syscore_ops(&s5pv210_pm_syscore_ops); + return 0; } -arch_initcall(s5pv210_pm_drvinit); +arch_initcall(s5pv210_pm_syscore_init); diff --git a/trunk/arch/arm/mach-sa1100/irq.c b/trunk/arch/arm/mach-sa1100/irq.c index 423ddb3d65e9..dfbf824a69fa 100644 --- a/trunk/arch/arm/mach-sa1100/irq.c +++ b/trunk/arch/arm/mach-sa1100/irq.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include #include @@ -234,7 +234,7 @@ static struct sa1100irq_state { unsigned int iccr; } sa1100irq_state; -static int sa1100irq_suspend(struct sys_device *dev, pm_message_t state) +static int sa1100irq_suspend(void) { struct sa1100irq_state *st = &sa1100irq_state; @@ -264,7 +264,7 @@ static int sa1100irq_suspend(struct sys_device *dev, pm_message_t state) return 0; } -static int sa1100irq_resume(struct sys_device *dev) +static void sa1100irq_resume(void) { struct sa1100irq_state *st = &sa1100irq_state; @@ -277,24 +277,17 @@ static int sa1100irq_resume(struct sys_device *dev) ICMR = st->icmr; } - return 0; } -static struct sysdev_class sa1100irq_sysclass = { - .name = "sa11x0-irq", +static struct syscore_ops sa1100irq_syscore_ops = { .suspend = sa1100irq_suspend, .resume = sa1100irq_resume, }; -static struct sys_device sa1100irq_device = { - .id = 0, - .cls = &sa1100irq_sysclass, -}; - static int __init sa1100irq_init_devicefs(void) { - sysdev_class_register(&sa1100irq_sysclass); - return sysdev_register(&sa1100irq_device); + register_syscore_ops(&sa1100irq_syscore_ops); + return 0; } device_initcall(sa1100irq_init_devicefs); diff --git a/trunk/arch/arm/mach-shmobile/pm_runtime.c b/trunk/arch/arm/mach-shmobile/pm_runtime.c index 94912d3944d3..2d1b67a59e4a 100644 --- a/trunk/arch/arm/mach-shmobile/pm_runtime.c +++ b/trunk/arch/arm/mach-shmobile/pm_runtime.c @@ -18,152 +18,41 @@ #include #include #include +#include #ifdef CONFIG_PM_RUNTIME -#define BIT_ONCE 0 -#define BIT_ACTIVE 1 -#define BIT_CLK_ENABLED 2 -struct pm_runtime_data { - unsigned long flags; - struct clk *clk; -}; - -static void __devres_release(struct device *dev, void *res) -{ - struct pm_runtime_data *prd = res; - - dev_dbg(dev, "__devres_release()\n"); - - if (test_bit(BIT_CLK_ENABLED, &prd->flags)) - clk_disable(prd->clk); - - if (test_bit(BIT_ACTIVE, &prd->flags)) - clk_put(prd->clk); -} - -static struct pm_runtime_data *__to_prd(struct device *dev) -{ - return devres_find(dev, __devres_release, NULL, NULL); -} - -static void platform_pm_runtime_init(struct device *dev, - struct pm_runtime_data *prd) -{ - if (prd && !test_and_set_bit(BIT_ONCE, &prd->flags)) { - prd->clk = clk_get(dev, NULL); - if (!IS_ERR(prd->clk)) { - set_bit(BIT_ACTIVE, &prd->flags); - dev_info(dev, "clocks managed by runtime pm\n"); - } - } -} - -static void platform_pm_runtime_bug(struct device *dev, - struct pm_runtime_data *prd) -{ - if (prd && !test_and_set_bit(BIT_ONCE, &prd->flags)) - dev_err(dev, "runtime pm suspend before resume\n"); -} - -int platform_pm_runtime_suspend(struct device *dev) -{ - struct pm_runtime_data *prd = __to_prd(dev); - - dev_dbg(dev, "platform_pm_runtime_suspend()\n"); - - platform_pm_runtime_bug(dev, prd); - - if (prd && test_bit(BIT_ACTIVE, &prd->flags)) { - clk_disable(prd->clk); - clear_bit(BIT_CLK_ENABLED, &prd->flags); - } - - return 0; -} - -int platform_pm_runtime_resume(struct device *dev) -{ - struct pm_runtime_data *prd = __to_prd(dev); - - dev_dbg(dev, "platform_pm_runtime_resume()\n"); - - platform_pm_runtime_init(dev, prd); - - if (prd && test_bit(BIT_ACTIVE, &prd->flags)) { - clk_enable(prd->clk); - set_bit(BIT_CLK_ENABLED, &prd->flags); - } - - return 0; -} - -int platform_pm_runtime_idle(struct device *dev) +static int default_platform_runtime_idle(struct device *dev) { /* suspend synchronously to disable clocks immediately */ return pm_runtime_suspend(dev); } -static int platform_bus_notify(struct notifier_block *nb, - unsigned long action, void *data) -{ - struct device *dev = data; - struct pm_runtime_data *prd; - - dev_dbg(dev, "platform_bus_notify() %ld !\n", action); - - if (action == BUS_NOTIFY_BIND_DRIVER) { - prd = devres_alloc(__devres_release, sizeof(*prd), GFP_KERNEL); - if (prd) - devres_add(dev, prd); - else - dev_err(dev, "unable to alloc memory for runtime pm\n"); - } - - return 0; -} - -#else /* CONFIG_PM_RUNTIME */ - -static int platform_bus_notify(struct notifier_block *nb, - unsigned long action, void *data) -{ - struct device *dev = data; - struct clk *clk; +static struct dev_power_domain default_power_domain = { + .ops = { + .runtime_suspend = pm_runtime_clk_suspend, + .runtime_resume = pm_runtime_clk_resume, + .runtime_idle = default_platform_runtime_idle, + USE_PLATFORM_PM_SLEEP_OPS + }, +}; - dev_dbg(dev, "platform_bus_notify() %ld !\n", action); +#define DEFAULT_PWR_DOMAIN_PTR (&default_power_domain) - switch (action) { - case BUS_NOTIFY_BIND_DRIVER: - clk = clk_get(dev, NULL); - if (!IS_ERR(clk)) { - clk_enable(clk); - clk_put(clk); - dev_info(dev, "runtime pm disabled, clock forced on\n"); - } - break; - case BUS_NOTIFY_UNBOUND_DRIVER: - clk = clk_get(dev, NULL); - if (!IS_ERR(clk)) { - clk_disable(clk); - clk_put(clk); - dev_info(dev, "runtime pm disabled, clock forced off\n"); - } - break; - } +#else - return 0; -} +#define DEFAULT_PWR_DOMAIN_PTR NULL #endif /* CONFIG_PM_RUNTIME */ -static struct notifier_block platform_bus_notifier = { - .notifier_call = platform_bus_notify +static struct pm_clk_notifier_block platform_bus_notifier = { + .pwr_domain = DEFAULT_PWR_DOMAIN_PTR, + .con_ids = { NULL, }, }; static int __init sh_pm_runtime_init(void) { - bus_register_notifier(&platform_bus_type, &platform_bus_notifier); + pm_runtime_clk_add_notifier(&platform_bus_type, &platform_bus_notifier); return 0; } core_initcall(sh_pm_runtime_init); diff --git a/trunk/arch/arm/mach-tegra/gpio.c b/trunk/arch/arm/mach-tegra/gpio.c index 76a3f654220f..65a1aba6823d 100644 --- a/trunk/arch/arm/mach-tegra/gpio.c +++ b/trunk/arch/arm/mach-tegra/gpio.c @@ -257,7 +257,8 @@ static void tegra_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) void tegra_gpio_resume(void) { unsigned long flags; - int b, p, i; + int b; + int p; local_irq_save(flags); @@ -280,7 +281,8 @@ void tegra_gpio_resume(void) void tegra_gpio_suspend(void) { unsigned long flags; - int b, p, i; + int b; + int p; local_irq_save(flags); for (b = 0; b < ARRAY_SIZE(tegra_gpio_banks); b++) { diff --git a/trunk/arch/arm/mach-tegra/include/mach/barriers.h b/trunk/arch/arm/mach-tegra/include/mach/barriers.h index cc115174899b..425b42e91ef6 100644 --- a/trunk/arch/arm/mach-tegra/include/mach/barriers.h +++ b/trunk/arch/arm/mach-tegra/include/mach/barriers.h @@ -23,7 +23,7 @@ #include -#define rmb() dmb() +#define rmb() dsb() #define wmb() do { dsb(); outer_sync(); } while (0) #define mb() wmb() diff --git a/trunk/arch/arm/mach-tegra/tegra2_clocks.c b/trunk/arch/arm/mach-tegra/tegra2_clocks.c index 6d7c4eea4dcb..4459470c052d 100644 --- a/trunk/arch/arm/mach-tegra/tegra2_clocks.c +++ b/trunk/arch/arm/mach-tegra/tegra2_clocks.c @@ -1362,14 +1362,15 @@ static int tegra_clk_shared_bus_set_rate(struct clk *c, unsigned long rate) { unsigned long flags; int ret; + long new_rate = rate; - rate = clk_round_rate(c->parent, rate); - if (rate < 0) - return rate; + new_rate = clk_round_rate(c->parent, new_rate); + if (new_rate < 0) + return new_rate; spin_lock_irqsave(&c->parent->spinlock, flags); - c->u.shared_bus_user.rate = rate; + c->u.shared_bus_user.rate = new_rate; ret = tegra_clk_shared_bus_update(c->parent); spin_unlock_irqrestore(&c->parent->spinlock, flags); diff --git a/trunk/arch/arm/mach-ux500/board-mop500.c b/trunk/arch/arm/mach-ux500/board-mop500.c index af913741e6ec..6e1907fa94f0 100644 --- a/trunk/arch/arm/mach-ux500/board-mop500.c +++ b/trunk/arch/arm/mach-ux500/board-mop500.c @@ -178,16 +178,15 @@ static struct i2c_board_info __initdata mop500_i2c0_devices[] = { .irq = NOMADIK_GPIO_TO_IRQ(217), .platform_data = &mop500_tc35892_data, }, -}; - -/* I2C0 devices only available prior to HREFv60 */ -static struct i2c_board_info __initdata mop500_i2c0_old_devices[] = { + /* I2C0 devices only available prior to HREFv60 */ { I2C_BOARD_INFO("tps61052", 0x33), .platform_data = &mop500_tps61052_data, }, }; +#define NUM_PRE_V60_I2C0_DEVICES 1 + static struct i2c_board_info __initdata mop500_i2c2_devices[] = { { /* lp5521 LED driver, 1st device */ @@ -425,6 +424,8 @@ static void __init mop500_uart_init(void) static void __init mop500_init_machine(void) { + int i2c0_devs; + /* * The HREFv60 board removed a GPIO expander and routed * all these GPIO pins to the internal GPIO controller @@ -448,11 +449,11 @@ static void __init mop500_init_machine(void) platform_device_register(&ab8500_device); - i2c_register_board_info(0, mop500_i2c0_devices, - ARRAY_SIZE(mop500_i2c0_devices)); - if (!machine_is_hrefv60()) - i2c_register_board_info(0, mop500_i2c0_old_devices, - ARRAY_SIZE(mop500_i2c0_old_devices)); + i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices); + if (machine_is_hrefv60()) + i2c0_devs -= NUM_PRE_V60_I2C0_DEVICES; + + i2c_register_board_info(0, mop500_i2c0_devices, i2c0_devs); i2c_register_board_info(2, mop500_i2c2_devices, ARRAY_SIZE(mop500_i2c2_devices)); } diff --git a/trunk/arch/arm/mm/init.c b/trunk/arch/arm/mm/init.c index e5f6fc428348..e591513bb53e 100644 --- a/trunk/arch/arm/mm/init.c +++ b/trunk/arch/arm/mm/init.c @@ -392,7 +392,7 @@ free_memmap(unsigned long start_pfn, unsigned long end_pfn) * Convert start_pfn/end_pfn to a struct page pointer. */ start_pg = pfn_to_page(start_pfn - 1) + 1; - end_pg = pfn_to_page(end_pfn); + end_pg = pfn_to_page(end_pfn - 1) + 1; /* * Convert to physical addresses, and @@ -426,6 +426,14 @@ static void __init free_unused_memmap(struct meminfo *mi) bank_start = bank_pfn_start(bank); +#ifdef CONFIG_SPARSEMEM + /* + * Take care not to free memmap entries that don't exist + * due to SPARSEMEM sections which aren't present. + */ + bank_start = min(bank_start, + ALIGN(prev_bank_end, PAGES_PER_SECTION)); +#endif /* * If we had a previous bank, and there is a space * between the current bank and the previous, free it. @@ -440,6 +448,12 @@ static void __init free_unused_memmap(struct meminfo *mi) */ prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES); } + +#ifdef CONFIG_SPARSEMEM + if (!IS_ALIGNED(prev_bank_end, PAGES_PER_SECTION)) + free_memmap(prev_bank_end, + ALIGN(prev_bank_end, PAGES_PER_SECTION)); +#endif } static void __init free_highpages(void) diff --git a/trunk/arch/arm/mm/mmap.c b/trunk/arch/arm/mm/mmap.c index afe209e1e1f8..74be05f3e03a 100644 --- a/trunk/arch/arm/mm/mmap.c +++ b/trunk/arch/arm/mm/mmap.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -82,7 +83,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, mm->cached_hole_size = 0; } /* 8 bits of randomness in 20 address space bits */ - if (current->flags & PF_RANDOMIZE) + if ((current->flags & PF_RANDOMIZE) && + !(current->personality & ADDR_NO_RANDOMIZE)) addr += (get_random_int() % (1 << 8)) << PAGE_SHIFT; full_search: diff --git a/trunk/arch/arm/mm/proc-arm920.S b/trunk/arch/arm/mm/proc-arm920.S index b46eb21f05c7..bf8a1d1cccb6 100644 --- a/trunk/arch/arm/mm/proc-arm920.S +++ b/trunk/arch/arm/mm/proc-arm920.S @@ -390,7 +390,7 @@ ENTRY(cpu_arm920_set_pte_ext) /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ .globl cpu_arm920_suspend_size .equ cpu_arm920_suspend_size, 4 * 3 -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP ENTRY(cpu_arm920_do_suspend) stmfd sp!, {r4 - r7, lr} mrc p15, 0, r4, c13, c0, 0 @ PID diff --git a/trunk/arch/arm/mm/proc-arm926.S b/trunk/arch/arm/mm/proc-arm926.S index 6a4bdb2c94a7..0ed85d930c09 100644 --- a/trunk/arch/arm/mm/proc-arm926.S +++ b/trunk/arch/arm/mm/proc-arm926.S @@ -404,7 +404,7 @@ ENTRY(cpu_arm926_set_pte_ext) /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ .globl cpu_arm926_suspend_size .equ cpu_arm926_suspend_size, 4 * 3 -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP ENTRY(cpu_arm926_do_suspend) stmfd sp!, {r4 - r7, lr} mrc p15, 0, r4, c13, c0, 0 @ PID diff --git a/trunk/arch/arm/mm/proc-sa1100.S b/trunk/arch/arm/mm/proc-sa1100.S index 74483d1977fe..184a9c997e36 100644 --- a/trunk/arch/arm/mm/proc-sa1100.S +++ b/trunk/arch/arm/mm/proc-sa1100.S @@ -171,7 +171,7 @@ ENTRY(cpu_sa1100_set_pte_ext) .globl cpu_sa1100_suspend_size .equ cpu_sa1100_suspend_size, 4*4 -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP ENTRY(cpu_sa1100_do_suspend) stmfd sp!, {r4 - r7, lr} mrc p15, 0, r4, c3, c0, 0 @ domain ID diff --git a/trunk/arch/arm/mm/proc-v6.S b/trunk/arch/arm/mm/proc-v6.S index bfa0c9f611c5..7c99cb4c8e4f 100644 --- a/trunk/arch/arm/mm/proc-v6.S +++ b/trunk/arch/arm/mm/proc-v6.S @@ -124,7 +124,7 @@ ENTRY(cpu_v6_set_pte_ext) /* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */ .globl cpu_v6_suspend_size .equ cpu_v6_suspend_size, 4 * 8 -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP ENTRY(cpu_v6_do_suspend) stmfd sp!, {r4 - r11, lr} mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID diff --git a/trunk/arch/arm/mm/proc-v7.S b/trunk/arch/arm/mm/proc-v7.S index c35618e42f6f..babfba09c89f 100644 --- a/trunk/arch/arm/mm/proc-v7.S +++ b/trunk/arch/arm/mm/proc-v7.S @@ -211,7 +211,7 @@ cpu_v7_name: /* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */ .globl cpu_v7_suspend_size .equ cpu_v7_suspend_size, 4 * 8 -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP ENTRY(cpu_v7_do_suspend) stmfd sp!, {r4 - r11, lr} mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID diff --git a/trunk/arch/arm/mm/proc-xsc3.S b/trunk/arch/arm/mm/proc-xsc3.S index 63d8b2044e84..596213699f37 100644 --- a/trunk/arch/arm/mm/proc-xsc3.S +++ b/trunk/arch/arm/mm/proc-xsc3.S @@ -417,7 +417,7 @@ ENTRY(cpu_xsc3_set_pte_ext) .globl cpu_xsc3_suspend_size .equ cpu_xsc3_suspend_size, 4 * 8 -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP ENTRY(cpu_xsc3_do_suspend) stmfd sp!, {r4 - r10, lr} mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode diff --git a/trunk/arch/arm/mm/proc-xscale.S b/trunk/arch/arm/mm/proc-xscale.S index 086038cd86ab..42af97664c9d 100644 --- a/trunk/arch/arm/mm/proc-xscale.S +++ b/trunk/arch/arm/mm/proc-xscale.S @@ -395,7 +395,7 @@ ENTRY(xscale_dma_a0_map_area) teq r2, #DMA_TO_DEVICE beq xscale_dma_clean_range b xscale_dma_flush_range -ENDPROC(xscsale_dma_a0_map_area) +ENDPROC(xscale_dma_a0_map_area) /* * dma_unmap_area(start, size, dir) @@ -518,7 +518,7 @@ ENTRY(cpu_xscale_set_pte_ext) .globl cpu_xscale_suspend_size .equ cpu_xscale_suspend_size, 4 * 7 -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP ENTRY(cpu_xscale_do_suspend) stmfd sp!, {r4 - r10, lr} mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode diff --git a/trunk/arch/arm/plat-mxc/gpio.c b/trunk/arch/arm/plat-mxc/gpio.c index 7a107246fd98..6cd6d7f686f6 100644 --- a/trunk/arch/arm/plat-mxc/gpio.c +++ b/trunk/arch/arm/plat-mxc/gpio.c @@ -295,6 +295,12 @@ static int mxc_gpio_direction_output(struct gpio_chip *chip, return 0; } +/* + * This lock class tells lockdep that GPIO irqs are in a different + * category than their parents, so it won't report false recursion. + */ +static struct lock_class_key gpio_lock_class; + int __init mxc_gpio_init(struct mxc_gpio_port *port, int cnt) { int i, j; @@ -311,6 +317,7 @@ int __init mxc_gpio_init(struct mxc_gpio_port *port, int cnt) __raw_writel(~0, port[i].base + GPIO_ISR); for (j = port[i].virtual_irq_start; j < port[i].virtual_irq_start + 32; j++) { + irq_set_lockdep_class(j, &gpio_lock_class); irq_set_chip_and_handler(j, &gpio_irq_chip, handle_level_irq); set_irq_flags(j, IRQF_VALID); diff --git a/trunk/arch/arm/plat-mxc/ssi-fiq.S b/trunk/arch/arm/plat-mxc/ssi-fiq.S index 4ddce565b353..8397a2dd19f2 100644 --- a/trunk/arch/arm/plat-mxc/ssi-fiq.S +++ b/trunk/arch/arm/plat-mxc/ssi-fiq.S @@ -124,6 +124,8 @@ imx_ssi_fiq_start: 1: @ return from FIQ subs pc, lr, #4 + + .align imx_ssi_fiq_base: .word 0x0 imx_ssi_fiq_rx_buffer: diff --git a/trunk/arch/arm/plat-omap/gpio.c b/trunk/arch/arm/plat-omap/gpio.c index d2adcdda23cf..bd9e32187eab 100644 --- a/trunk/arch/arm/plat-omap/gpio.c +++ b/trunk/arch/arm/plat-omap/gpio.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include #include #include @@ -1372,9 +1372,7 @@ static const struct dev_pm_ops omap_mpuio_dev_pm_ops = { .resume_noirq = omap_mpuio_resume_noirq, }; -/* use platform_driver for this, now that there's no longer any - * point to sys_device (other than not disturbing old code). - */ +/* use platform_driver for this. */ static struct platform_driver omap_mpuio_driver = { .driver = { .name = "mpuio", @@ -1745,7 +1743,7 @@ static int __devinit omap_gpio_probe(struct platform_device *pdev) } #if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS) -static int omap_gpio_suspend(struct sys_device *dev, pm_message_t mesg) +static int omap_gpio_suspend(void) { int i; @@ -1795,12 +1793,12 @@ static int omap_gpio_suspend(struct sys_device *dev, pm_message_t mesg) return 0; } -static int omap_gpio_resume(struct sys_device *dev) +static void omap_gpio_resume(void) { int i; if (!cpu_class_is_omap2() && !cpu_is_omap16xx()) - return 0; + return; for (i = 0; i < gpio_bank_count; i++) { struct gpio_bank *bank = &gpio_bank[i]; @@ -1836,21 +1834,13 @@ static int omap_gpio_resume(struct sys_device *dev) __raw_writel(bank->saved_wakeup, wake_set); spin_unlock_irqrestore(&bank->lock, flags); } - - return 0; } -static struct sysdev_class omap_gpio_sysclass = { - .name = "gpio", +static struct syscore_ops omap_gpio_syscore_ops = { .suspend = omap_gpio_suspend, .resume = omap_gpio_resume, }; -static struct sys_device omap_gpio_device = { - .id = 0, - .cls = &omap_gpio_sysclass, -}; - #endif #ifdef CONFIG_ARCH_OMAP2PLUS @@ -2108,21 +2098,14 @@ postcore_initcall(omap_gpio_drv_reg); static int __init omap_gpio_sysinit(void) { - int ret = 0; - mpuio_init(); #if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS) - if (cpu_is_omap16xx() || cpu_class_is_omap2()) { - if (ret == 0) { - ret = sysdev_class_register(&omap_gpio_sysclass); - if (ret == 0) - ret = sysdev_register(&omap_gpio_device); - } - } + if (cpu_is_omap16xx() || cpu_class_is_omap2()) + register_syscore_ops(&omap_gpio_syscore_ops); #endif - return ret; + return 0; } arch_initcall(omap_gpio_sysinit); diff --git a/trunk/arch/arm/plat-omap/iommu.c b/trunk/arch/arm/plat-omap/iommu.c index 8a51fd58f656..34fc31ee9081 100644 --- a/trunk/arch/arm/plat-omap/iommu.c +++ b/trunk/arch/arm/plat-omap/iommu.c @@ -793,6 +793,8 @@ static irqreturn_t iommu_fault_handler(int irq, void *data) clk_enable(obj->clk); errs = iommu_report_fault(obj, &da); clk_disable(obj->clk); + if (errs == 0) + return IRQ_HANDLED; /* Fault callback or TLB/PTE Dynamic loading */ if (obj->isr && !obj->isr(obj, da, errs, obj->isr_priv)) diff --git a/trunk/arch/arm/plat-omap/omap_device.c b/trunk/arch/arm/plat-omap/omap_device.c index 9bbda9acb73b..a37b8eb65b76 100644 --- a/trunk/arch/arm/plat-omap/omap_device.c +++ b/trunk/arch/arm/plat-omap/omap_device.c @@ -536,6 +536,28 @@ int omap_early_device_register(struct omap_device *od) return 0; } +static int _od_runtime_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + + return omap_device_idle(pdev); +} + +static int _od_runtime_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + + return omap_device_enable(pdev); +} + +static struct dev_power_domain omap_device_power_domain = { + .ops = { + .runtime_suspend = _od_runtime_suspend, + .runtime_resume = _od_runtime_resume, + USE_PLATFORM_PM_SLEEP_OPS + } +}; + /** * omap_device_register - register an omap_device with one omap_hwmod * @od: struct omap_device * to register @@ -549,6 +571,7 @@ int omap_device_register(struct omap_device *od) pr_debug("omap_device: %s: registering\n", od->pdev.name); od->pdev.dev.parent = &omap_device_parent; + od->pdev.dev.pwr_domain = &omap_device_power_domain; return platform_device_register(&od->pdev); } diff --git a/trunk/arch/arm/plat-pxa/gpio.c b/trunk/arch/arm/plat-pxa/gpio.c index dce088f45678..48ebb9479b61 100644 --- a/trunk/arch/arm/plat-pxa/gpio.c +++ b/trunk/arch/arm/plat-pxa/gpio.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include #include @@ -295,7 +295,7 @@ void __init pxa_init_gpio(int mux_irq, int start, int end, set_wake_t fn) } #ifdef CONFIG_PM -static int pxa_gpio_suspend(struct sys_device *dev, pm_message_t state) +static int pxa_gpio_suspend(void) { struct pxa_gpio_chip *c; int gpio; @@ -312,7 +312,7 @@ static int pxa_gpio_suspend(struct sys_device *dev, pm_message_t state) return 0; } -static int pxa_gpio_resume(struct sys_device *dev) +static void pxa_gpio_resume(void) { struct pxa_gpio_chip *c; int gpio; @@ -326,22 +326,13 @@ static int pxa_gpio_resume(struct sys_device *dev) __raw_writel(c->saved_gfer, c->regbase + GFER_OFFSET); __raw_writel(c->saved_gpdr, c->regbase + GPDR_OFFSET); } - return 0; } #else #define pxa_gpio_suspend NULL #define pxa_gpio_resume NULL #endif -struct sysdev_class pxa_gpio_sysclass = { - .name = "gpio", +struct syscore_ops pxa_gpio_syscore_ops = { .suspend = pxa_gpio_suspend, .resume = pxa_gpio_resume, }; - -static int __init pxa_gpio_init(void) -{ - return sysdev_class_register(&pxa_gpio_sysclass); -} - -core_initcall(pxa_gpio_init); diff --git a/trunk/arch/arm/plat-pxa/mfp.c b/trunk/arch/arm/plat-pxa/mfp.c index a9aa5ad3f4eb..be12eadcce20 100644 --- a/trunk/arch/arm/plat-pxa/mfp.c +++ b/trunk/arch/arm/plat-pxa/mfp.c @@ -17,7 +17,6 @@ #include #include #include -#include #include diff --git a/trunk/arch/arm/plat-s3c24xx/dma.c b/trunk/arch/arm/plat-s3c24xx/dma.c index 27ea852e3370..c10d10c56e2e 100644 --- a/trunk/arch/arm/plat-s3c24xx/dma.c +++ b/trunk/arch/arm/plat-s3c24xx/dma.c @@ -22,7 +22,7 @@ #include #include #include -#include +#include #include #include #include @@ -1195,19 +1195,12 @@ int s3c2410_dma_getposition(unsigned int channel, dma_addr_t *src, dma_addr_t *d EXPORT_SYMBOL(s3c2410_dma_getposition); -static inline struct s3c2410_dma_chan *to_dma_chan(struct sys_device *dev) -{ - return container_of(dev, struct s3c2410_dma_chan, dev); -} - -/* system device class */ +/* system core operations */ #ifdef CONFIG_PM -static int s3c2410_dma_suspend(struct sys_device *dev, pm_message_t state) +static void s3c2410_dma_suspend_chan(s3c2410_dma_chan *cp) { - struct s3c2410_dma_chan *cp = to_dma_chan(dev); - printk(KERN_DEBUG "suspending dma channel %d\n", cp->number); if (dma_rdreg(cp, S3C2410_DMA_DMASKTRIG) & S3C2410_DMASKTRIG_ON) { @@ -1222,13 +1215,21 @@ static int s3c2410_dma_suspend(struct sys_device *dev, pm_message_t state) s3c2410_dma_dostop(cp); } +} + +static int s3c2410_dma_suspend(void) +{ + struct s3c2410_dma_chan *cp = s3c2410_chans; + int channel; + + for (channel = 0; channel < dma_channels; cp++, channel++) + s3c2410_dma_suspend_chan(cp); return 0; } -static int s3c2410_dma_resume(struct sys_device *dev) +static void s3c2410_dma_resume_chan(struct s3c2410_dma_chan *cp) { - struct s3c2410_dma_chan *cp = to_dma_chan(dev); unsigned int no = cp->number | DMACH_LOW_LEVEL; /* restore channel's hardware configuration */ @@ -1249,13 +1250,21 @@ static int s3c2410_dma_resume(struct sys_device *dev) return 0; } +static void s3c2410_dma_resume(void) +{ + struct s3c2410_dma_chan *cp = s3c2410_chans + dma_channels - 1; + int channel; + + for (channel = dma_channels - 1; channel >= 0; cp++, channel--) + s3c2410_dma_resume_chan(cp); +} + #else #define s3c2410_dma_suspend NULL #define s3c2410_dma_resume NULL #endif /* CONFIG_PM */ -struct sysdev_class dma_sysclass = { - .name = "s3c24xx-dma", +struct syscore_ops dma_syscore_ops = { .suspend = s3c2410_dma_suspend, .resume = s3c2410_dma_resume, }; @@ -1269,39 +1278,14 @@ static void s3c2410_dma_cache_ctor(void *p) /* initialisation code */ -static int __init s3c24xx_dma_sysclass_init(void) +static int __init s3c24xx_dma_syscore_init(void) { - int ret = sysdev_class_register(&dma_sysclass); - - if (ret != 0) - printk(KERN_ERR "dma sysclass registration failed\n"); - - return ret; -} - -core_initcall(s3c24xx_dma_sysclass_init); - -static int __init s3c24xx_dma_sysdev_register(void) -{ - struct s3c2410_dma_chan *cp = s3c2410_chans; - int channel, ret; - - for (channel = 0; channel < dma_channels; cp++, channel++) { - cp->dev.cls = &dma_sysclass; - cp->dev.id = channel; - ret = sysdev_register(&cp->dev); - - if (ret) { - printk(KERN_ERR "error registering dev for dma %d\n", - channel); - return ret; - } - } + register_syscore_ops(&dma_syscore_ops); return 0; } -late_initcall(s3c24xx_dma_sysdev_register); +late_initcall(s3c24xx_dma_syscore_init); int __init s3c24xx_dma_init(unsigned int channels, unsigned int irq, unsigned int stride) diff --git a/trunk/arch/arm/plat-s3c24xx/irq-pm.c b/trunk/arch/arm/plat-s3c24xx/irq-pm.c index c3624d898630..0efb2e2848c8 100644 --- a/trunk/arch/arm/plat-s3c24xx/irq-pm.c +++ b/trunk/arch/arm/plat-s3c24xx/irq-pm.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include @@ -65,7 +64,7 @@ static unsigned long save_extint[3]; static unsigned long save_eintflt[4]; static unsigned long save_eintmask; -int s3c24xx_irq_suspend(struct sys_device *dev, pm_message_t state) +int s3c24xx_irq_suspend(void) { unsigned int i; @@ -81,7 +80,7 @@ int s3c24xx_irq_suspend(struct sys_device *dev, pm_message_t state) return 0; } -int s3c24xx_irq_resume(struct sys_device *dev) +void s3c24xx_irq_resume(void) { unsigned int i; @@ -93,6 +92,4 @@ int s3c24xx_irq_resume(struct sys_device *dev) s3c_pm_do_restore(irq_save, ARRAY_SIZE(irq_save)); __raw_writel(save_eintmask, S3C24XX_EINTMASK); - - return 0; } diff --git a/trunk/arch/arm/plat-s5p/irq-pm.c b/trunk/arch/arm/plat-s5p/irq-pm.c index 5259ad458bc8..327acb3a4464 100644 --- a/trunk/arch/arm/plat-s5p/irq-pm.c +++ b/trunk/arch/arm/plat-s5p/irq-pm.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include @@ -77,17 +76,15 @@ static struct sleep_save eint_save[] = { SAVE_ITEM(S5P_EINT_MASK(3)), }; -int s3c24xx_irq_suspend(struct sys_device *dev, pm_message_t state) +int s3c24xx_irq_suspend(void) { s3c_pm_do_save(eint_save, ARRAY_SIZE(eint_save)); return 0; } -int s3c24xx_irq_resume(struct sys_device *dev) +void s3c24xx_irq_resume(void) { s3c_pm_do_restore(eint_save, ARRAY_SIZE(eint_save)); - - return 0; } diff --git a/trunk/arch/arm/plat-s5p/pm.c b/trunk/arch/arm/plat-s5p/pm.c index d592b6304b48..d15dc47b0e3d 100644 --- a/trunk/arch/arm/plat-s5p/pm.c +++ b/trunk/arch/arm/plat-s5p/pm.c @@ -19,17 +19,6 @@ #define PFX "s5p pm: " -/* s3c_pm_check_resume_pin - * - * check to see if the pin is configured correctly for sleep mode, and - * make any necessary adjustments if it is not -*/ - -static void s3c_pm_check_resume_pin(unsigned int pin, unsigned int irqoffs) -{ - /* nothing here yet */ -} - /* s3c_pm_configure_extint * * configure all external interrupt pins diff --git a/trunk/arch/arm/plat-samsung/include/plat/cpu.h b/trunk/arch/arm/plat-samsung/include/plat/cpu.h index cedfff51c82b..3aedac0034ba 100644 --- a/trunk/arch/arm/plat-samsung/include/plat/cpu.h +++ b/trunk/arch/arm/plat-samsung/include/plat/cpu.h @@ -68,6 +68,12 @@ extern void s3c24xx_init_uartdevs(char *name, struct sys_timer; extern struct sys_timer s3c24xx_timer; +extern struct syscore_ops s3c2410_pm_syscore_ops; +extern struct syscore_ops s3c2412_pm_syscore_ops; +extern struct syscore_ops s3c2416_pm_syscore_ops; +extern struct syscore_ops s3c244x_pm_syscore_ops; +extern struct syscore_ops s3c64xx_irq_syscore_ops; + /* system device classes */ extern struct sysdev_class s3c2410_sysclass; diff --git a/trunk/arch/arm/plat-samsung/include/plat/pm.h b/trunk/arch/arm/plat-samsung/include/plat/pm.h index 937cc2ace517..7fb6f6be8c81 100644 --- a/trunk/arch/arm/plat-samsung/include/plat/pm.h +++ b/trunk/arch/arm/plat-samsung/include/plat/pm.h @@ -103,14 +103,16 @@ extern void s3c_pm_do_restore_core(struct sleep_save *ptr, int count); #ifdef CONFIG_PM extern int s3c_irqext_wake(struct irq_data *data, unsigned int state); -extern int s3c24xx_irq_suspend(struct sys_device *dev, pm_message_t state); -extern int s3c24xx_irq_resume(struct sys_device *dev); +extern int s3c24xx_irq_suspend(void); +extern void s3c24xx_irq_resume(void); #else #define s3c_irqext_wake NULL #define s3c24xx_irq_suspend NULL #define s3c24xx_irq_resume NULL #endif +extern struct syscore_ops s3c24xx_irq_syscore_ops; + /* PM debug functions */ #ifdef CONFIG_SAMSUNG_PM_DEBUG diff --git a/trunk/arch/arm/plat-samsung/pm-check.c b/trunk/arch/arm/plat-samsung/pm-check.c index e4baf76f374a..6b733fafe7cd 100644 --- a/trunk/arch/arm/plat-samsung/pm-check.c +++ b/trunk/arch/arm/plat-samsung/pm-check.c @@ -164,7 +164,6 @@ static inline int in_region(void *ptr, int size, void *what, size_t whatsz) */ static u32 *s3c_pm_runcheck(struct resource *res, u32 *val) { - void *save_at = phys_to_virt(s3c_sleep_save_phys); unsigned long addr; unsigned long left; void *stkpage; @@ -192,11 +191,6 @@ static u32 *s3c_pm_runcheck(struct resource *res, u32 *val) goto skip_check; } - if (in_region(ptr, left, save_at, 32*4 )) { - S3C_PMDBG("skipping %08lx, has save block in\n", addr); - goto skip_check; - } - /* calculate and check the checksum */ calc = crc32_le(~0, ptr, left); diff --git a/trunk/arch/arm/plat-samsung/pm.c b/trunk/arch/arm/plat-samsung/pm.c index d5b58d31903c..5c0a440d6e16 100644 --- a/trunk/arch/arm/plat-samsung/pm.c +++ b/trunk/arch/arm/plat-samsung/pm.c @@ -214,8 +214,9 @@ void s3c_pm_do_restore_core(struct sleep_save *ptr, int count) * * print any IRQs asserted at resume time (ie, we woke from) */ -static void s3c_pm_show_resume_irqs(int start, unsigned long which, - unsigned long mask) +static void __maybe_unused s3c_pm_show_resume_irqs(int start, + unsigned long which, + unsigned long mask) { int i; diff --git a/trunk/arch/arm/vfp/vfpmodule.c b/trunk/arch/arm/vfp/vfpmodule.c index bbf3da012afd..f25e7ec89416 100644 --- a/trunk/arch/arm/vfp/vfpmodule.c +++ b/trunk/arch/arm/vfp/vfpmodule.c @@ -78,6 +78,14 @@ static void vfp_thread_exit(struct thread_info *thread) put_cpu(); } +static void vfp_thread_copy(struct thread_info *thread) +{ + struct thread_info *parent = current_thread_info(); + + vfp_sync_hwstate(parent); + thread->vfpstate = parent->vfpstate; +} + /* * When this function is called with the following 'cmd's, the following * is true while this function is being run: @@ -104,12 +112,17 @@ static void vfp_thread_exit(struct thread_info *thread) static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v) { struct thread_info *thread = v; + u32 fpexc; +#ifdef CONFIG_SMP + unsigned int cpu; +#endif - if (likely(cmd == THREAD_NOTIFY_SWITCH)) { - u32 fpexc = fmrx(FPEXC); + switch (cmd) { + case THREAD_NOTIFY_SWITCH: + fpexc = fmrx(FPEXC); #ifdef CONFIG_SMP - unsigned int cpu = thread->cpu; + cpu = thread->cpu; /* * On SMP, if VFP is enabled, save the old state in @@ -134,13 +147,20 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v) * old state. */ fmxr(FPEXC, fpexc & ~FPEXC_EN); - return NOTIFY_DONE; - } + break; - if (cmd == THREAD_NOTIFY_FLUSH) + case THREAD_NOTIFY_FLUSH: vfp_thread_flush(thread); - else + break; + + case THREAD_NOTIFY_EXIT: vfp_thread_exit(thread); + break; + + case THREAD_NOTIFY_COPY: + vfp_thread_copy(thread); + break; + } return NOTIFY_DONE; } @@ -378,9 +398,9 @@ static void vfp_enable(void *unused) } #ifdef CONFIG_PM -#include +#include -static int vfp_pm_suspend(struct sys_device *dev, pm_message_t state) +static int vfp_pm_suspend(void) { struct thread_info *ti = current_thread_info(); u32 fpexc = fmrx(FPEXC); @@ -400,34 +420,25 @@ static int vfp_pm_suspend(struct sys_device *dev, pm_message_t state) return 0; } -static int vfp_pm_resume(struct sys_device *dev) +static void vfp_pm_resume(void) { /* ensure we have access to the vfp */ vfp_enable(NULL); /* and disable it to ensure the next usage restores the state */ fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); - - return 0; } -static struct sysdev_class vfp_pm_sysclass = { - .name = "vfp", +static struct syscore_ops vfp_pm_syscore_ops = { .suspend = vfp_pm_suspend, .resume = vfp_pm_resume, }; -static struct sys_device vfp_pm_sysdev = { - .cls = &vfp_pm_sysclass, -}; - static void vfp_pm_init(void) { - sysdev_class_register(&vfp_pm_sysclass); - sysdev_register(&vfp_pm_sysdev); + register_syscore_ops(&vfp_pm_syscore_ops); } - #else static inline void vfp_pm_init(void) { } #endif /* CONFIG_PM */ diff --git a/trunk/arch/avr32/include/asm/setup.h b/trunk/arch/avr32/include/asm/setup.h index ff5b7cf6be4d..160543dbec7e 100644 --- a/trunk/arch/avr32/include/asm/setup.h +++ b/trunk/arch/avr32/include/asm/setup.h @@ -94,6 +94,13 @@ struct tag_ethernet { #define ETH_INVALID_PHY 0xff +/* board information */ +#define ATAG_BOARDINFO 0x54410008 + +struct tag_boardinfo { + u32 board_number; +}; + struct tag { struct tag_header hdr; union { @@ -102,6 +109,7 @@ struct tag { struct tag_cmdline cmdline; struct tag_clock clock; struct tag_ethernet ethernet; + struct tag_boardinfo boardinfo; } u; }; @@ -128,6 +136,7 @@ extern struct tag *bootloader_tags; extern resource_size_t fbmem_start; extern resource_size_t fbmem_size; +extern u32 board_number; void setup_processor(void); diff --git a/trunk/arch/avr32/kernel/setup.c b/trunk/arch/avr32/kernel/setup.c index 5c7083916c33..bb0974cce4ac 100644 --- a/trunk/arch/avr32/kernel/setup.c +++ b/trunk/arch/avr32/kernel/setup.c @@ -390,6 +390,21 @@ static int __init parse_tag_clock(struct tag *tag) } __tagtable(ATAG_CLOCK, parse_tag_clock); +/* + * The board_number correspond to the bd->bi_board_number in U-Boot. This + * parameter is only available during initialisation and can be used in some + * kind of board identification. + */ +u32 __initdata board_number; + +static int __init parse_tag_boardinfo(struct tag *tag) +{ + board_number = tag->u.boardinfo.board_number; + + return 0; +} +__tagtable(ATAG_BOARDINFO, parse_tag_boardinfo); + /* * Scan the tag table for this tag, and call its parse function. The * tag table is built by the linker from all the __tagtable diff --git a/trunk/arch/avr32/kernel/traps.c b/trunk/arch/avr32/kernel/traps.c index b91b2044af9c..7aa25756412f 100644 --- a/trunk/arch/avr32/kernel/traps.c +++ b/trunk/arch/avr32/kernel/traps.c @@ -95,28 +95,6 @@ void _exception(long signr, struct pt_regs *regs, int code, info.si_code = code; info.si_addr = (void __user *)addr; force_sig_info(signr, &info, current); - - /* - * Init gets no signals that it doesn't have a handler for. - * That's all very well, but if it has caused a synchronous - * exception and we ignore the resulting signal, it will just - * generate the same exception over and over again and we get - * nowhere. Better to kill it and let the kernel panic. - */ - if (is_global_init(current)) { - __sighandler_t handler; - - spin_lock_irq(¤t->sighand->siglock); - handler = current->sighand->action[signr-1].sa.sa_handler; - spin_unlock_irq(¤t->sighand->siglock); - if (handler == SIG_DFL) { - /* init has generated a synchronous exception - and it doesn't have a handler for the signal */ - printk(KERN_CRIT "init has generated signal %ld " - "but has no handler for it\n", signr); - do_exit(signr); - } - } } asmlinkage void do_nmi(unsigned long ecr, struct pt_regs *regs) diff --git a/trunk/arch/avr32/mach-at32ap/clock.c b/trunk/arch/avr32/mach-at32ap/clock.c index 442f08c5e641..86925fd6ea5b 100644 --- a/trunk/arch/avr32/mach-at32ap/clock.c +++ b/trunk/arch/avr32/mach-at32ap/clock.c @@ -35,22 +35,30 @@ void at32_clk_register(struct clk *clk) spin_unlock(&clk_list_lock); } -struct clk *clk_get(struct device *dev, const char *id) +static struct clk *__clk_get(struct device *dev, const char *id) { struct clk *clk; - spin_lock(&clk_list_lock); - list_for_each_entry(clk, &at32_clock_list, list) { if (clk->dev == dev && strcmp(id, clk->name) == 0) { - spin_unlock(&clk_list_lock); return clk; } } - spin_unlock(&clk_list_lock); return ERR_PTR(-ENOENT); } + +struct clk *clk_get(struct device *dev, const char *id) +{ + struct clk *clk; + + spin_lock(&clk_list_lock); + clk = __clk_get(dev, id); + spin_unlock(&clk_list_lock); + + return clk; +} + EXPORT_SYMBOL(clk_get); void clk_put(struct clk *clk) @@ -257,15 +265,15 @@ static int clk_show(struct seq_file *s, void *unused) spin_lock(&clk_list_lock); /* show clock tree as derived from the three oscillators */ - clk = clk_get(NULL, "osc32k"); + clk = __clk_get(NULL, "osc32k"); dump_clock(clk, &r); clk_put(clk); - clk = clk_get(NULL, "osc0"); + clk = __clk_get(NULL, "osc0"); dump_clock(clk, &r); clk_put(clk); - clk = clk_get(NULL, "osc1"); + clk = __clk_get(NULL, "osc1"); dump_clock(clk, &r); clk_put(clk); diff --git a/trunk/arch/avr32/mach-at32ap/extint.c b/trunk/arch/avr32/mach-at32ap/extint.c index 47ba4b9b6db1..fbc2aeaebddb 100644 --- a/trunk/arch/avr32/mach-at32ap/extint.c +++ b/trunk/arch/avr32/mach-at32ap/extint.c @@ -61,34 +61,34 @@ struct eic { static struct eic *nmi_eic; static bool nmi_enabled; -static void eic_ack_irq(struct irq_chip *d) +static void eic_ack_irq(struct irq_data *d) { - struct eic *eic = irq_data_get_irq_chip_data(data); + struct eic *eic = irq_data_get_irq_chip_data(d); eic_writel(eic, ICR, 1 << (d->irq - eic->first_irq)); } -static void eic_mask_irq(struct irq_chip *d) +static void eic_mask_irq(struct irq_data *d) { - struct eic *eic = irq_data_get_irq_chip_data(data); + struct eic *eic = irq_data_get_irq_chip_data(d); eic_writel(eic, IDR, 1 << (d->irq - eic->first_irq)); } -static void eic_mask_ack_irq(struct irq_chip *d) +static void eic_mask_ack_irq(struct irq_data *d) { - struct eic *eic = irq_data_get_irq_chip_data(data); + struct eic *eic = irq_data_get_irq_chip_data(d); eic_writel(eic, ICR, 1 << (d->irq - eic->first_irq)); eic_writel(eic, IDR, 1 << (d->irq - eic->first_irq)); } -static void eic_unmask_irq(struct irq_chip *d) +static void eic_unmask_irq(struct irq_data *d) { - struct eic *eic = irq_data_get_irq_chip_data(data); + struct eic *eic = irq_data_get_irq_chip_data(d); eic_writel(eic, IER, 1 << (d->irq - eic->first_irq)); } -static int eic_set_irq_type(struct irq_chip *d, unsigned int flow_type) +static int eic_set_irq_type(struct irq_data *d, unsigned int flow_type) { - struct eic *eic = irq_data_get_irq_chip_data(data); + struct eic *eic = irq_data_get_irq_chip_data(d); unsigned int irq = d->irq; unsigned int i = irq - eic->first_irq; u32 mode, edge, level; @@ -191,7 +191,7 @@ static int __init eic_probe(struct platform_device *pdev) regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); int_irq = platform_get_irq(pdev, 0); - if (!regs || !int_irq) { + if (!regs || (int)int_irq <= 0) { dev_dbg(&pdev->dev, "missing regs and/or irq resource\n"); return -ENXIO; } diff --git a/trunk/arch/avr32/mach-at32ap/intc.c b/trunk/arch/avr32/mach-at32ap/intc.c index 21ce35f33aa5..3e3646186c9f 100644 --- a/trunk/arch/avr32/mach-at32ap/intc.c +++ b/trunk/arch/avr32/mach-at32ap/intc.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include @@ -21,7 +21,6 @@ struct intc { void __iomem *regs; struct irq_chip chip; - struct sys_device sysdev; #ifdef CONFIG_PM unsigned long suspend_ipr; unsigned long saved_ipr[64]; @@ -146,9 +145,8 @@ void intc_set_suspend_handler(unsigned long offset) intc0.suspend_ipr = offset; } -static int intc_suspend(struct sys_device *sdev, pm_message_t state) +static int intc_suspend(void) { - struct intc *intc = container_of(sdev, struct intc, sysdev); int i; if (unlikely(!irqs_disabled())) { @@ -156,28 +154,25 @@ static int intc_suspend(struct sys_device *sdev, pm_message_t state) return -EINVAL; } - if (unlikely(!intc->suspend_ipr)) { + if (unlikely(!intc0.suspend_ipr)) { pr_err("intc_suspend: suspend_ipr not initialized\n"); return -EINVAL; } for (i = 0; i < 64; i++) { - intc->saved_ipr[i] = intc_readl(intc, INTPR0 + 4 * i); - intc_writel(intc, INTPR0 + 4 * i, intc->suspend_ipr); + intc0.saved_ipr[i] = intc_readl(&intc0, INTPR0 + 4 * i); + intc_writel(&intc0, INTPR0 + 4 * i, intc0.suspend_ipr); } return 0; } -static int intc_resume(struct sys_device *sdev) +static int intc_resume(void) { - struct intc *intc = container_of(sdev, struct intc, sysdev); int i; - WARN_ON(!irqs_disabled()); - for (i = 0; i < 64; i++) - intc_writel(intc, INTPR0 + 4 * i, intc->saved_ipr[i]); + intc_writel(&intc0, INTPR0 + 4 * i, intc0.saved_ipr[i]); return 0; } @@ -186,27 +181,18 @@ static int intc_resume(struct sys_device *sdev) #define intc_resume NULL #endif -static struct sysdev_class intc_class = { - .name = "intc", +static struct syscore_ops intc_syscore_ops = { .suspend = intc_suspend, .resume = intc_resume, }; -static int __init intc_init_sysdev(void) +static int __init intc_init_syscore(void) { - int ret; - - ret = sysdev_class_register(&intc_class); - if (ret) - return ret; + register_syscore_ops(&intc_syscore_ops); - intc0.sysdev.id = 0; - intc0.sysdev.cls = &intc_class; - ret = sysdev_register(&intc0.sysdev); - - return ret; + return 0; } -device_initcall(intc_init_sysdev); +device_initcall(intc_init_syscore); unsigned long intc_get_pending(unsigned int group) { diff --git a/trunk/arch/avr32/mach-at32ap/pio.c b/trunk/arch/avr32/mach-at32ap/pio.c index f308e1ddc629..2e0aa853a4bc 100644 --- a/trunk/arch/avr32/mach-at32ap/pio.c +++ b/trunk/arch/avr32/mach-at32ap/pio.c @@ -257,7 +257,7 @@ static void gpio_irq_mask(struct irq_data *d) pio_writel(pio, IDR, 1 << (gpio & 0x1f)); } -static void gpio_irq_unmask(struct irq_data *d)) +static void gpio_irq_unmask(struct irq_data *d) { unsigned gpio = irq_to_gpio(d->irq); struct pio_device *pio = &pio_dev[gpio >> 5]; diff --git a/trunk/arch/avr32/mach-at32ap/pm-at32ap700x.S b/trunk/arch/avr32/mach-at32ap/pm-at32ap700x.S index 17503b0ed6c9..f868f4ce761b 100644 --- a/trunk/arch/avr32/mach-at32ap/pm-at32ap700x.S +++ b/trunk/arch/avr32/mach-at32ap/pm-at32ap700x.S @@ -53,7 +53,7 @@ cpu_enter_idle: st.w r8[TI_flags], r9 unmask_interrupts sleep CPU_SLEEP_IDLE - .size cpu_idle_sleep, . - cpu_idle_sleep + .size cpu_enter_idle, . - cpu_enter_idle /* * Common return path for PM functions that don't run from diff --git a/trunk/arch/blackfin/include/asm/system.h b/trunk/arch/blackfin/include/asm/system.h index 19e2c7c3e63a..44bd0cced725 100644 --- a/trunk/arch/blackfin/include/asm/system.h +++ b/trunk/arch/blackfin/include/asm/system.h @@ -19,11 +19,11 @@ * Force strict CPU ordering. */ #define nop() __asm__ __volatile__ ("nop;\n\t" : : ) -#define mb() __asm__ __volatile__ ("" : : : "memory") -#define rmb() __asm__ __volatile__ ("" : : : "memory") -#define wmb() __asm__ __volatile__ ("" : : : "memory") -#define set_mb(var, value) do { (void) xchg(&var, value); } while (0) -#define read_barrier_depends() do { } while(0) +#define smp_mb() mb() +#define smp_rmb() rmb() +#define smp_wmb() wmb() +#define set_mb(var, value) do { var = value; mb(); } while (0) +#define smp_read_barrier_depends() read_barrier_depends() #ifdef CONFIG_SMP asmlinkage unsigned long __raw_xchg_1_asm(volatile void *ptr, unsigned long value); @@ -37,16 +37,16 @@ asmlinkage unsigned long __raw_cmpxchg_4_asm(volatile void *ptr, unsigned long new, unsigned long old); #ifdef __ARCH_SYNC_CORE_DCACHE -# define smp_mb() do { barrier(); smp_check_barrier(); smp_mark_barrier(); } while (0) -# define smp_rmb() do { barrier(); smp_check_barrier(); } while (0) -# define smp_wmb() do { barrier(); smp_mark_barrier(); } while (0) -#define smp_read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0) - +/* Force Core data cache coherence */ +# define mb() do { barrier(); smp_check_barrier(); smp_mark_barrier(); } while (0) +# define rmb() do { barrier(); smp_check_barrier(); } while (0) +# define wmb() do { barrier(); smp_mark_barrier(); } while (0) +# define read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0) #else -# define smp_mb() barrier() -# define smp_rmb() barrier() -# define smp_wmb() barrier() -#define smp_read_barrier_depends() barrier() +# define mb() barrier() +# define rmb() barrier() +# define wmb() barrier() +# define read_barrier_depends() do { } while (0) #endif static inline unsigned long __xchg(unsigned long x, volatile void *ptr, @@ -99,10 +99,10 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, #else /* !CONFIG_SMP */ -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_read_barrier_depends() do { } while(0) +#define mb() barrier() +#define rmb() barrier() +#define wmb() barrier() +#define read_barrier_depends() do { } while (0) struct __xchg_dummy { unsigned long a[100]; diff --git a/trunk/arch/blackfin/kernel/gptimers.c b/trunk/arch/blackfin/kernel/gptimers.c index cdbe075de1dc..8b81dc04488a 100644 --- a/trunk/arch/blackfin/kernel/gptimers.c +++ b/trunk/arch/blackfin/kernel/gptimers.c @@ -268,7 +268,7 @@ void disable_gptimers(uint16_t mask) _disable_gptimers(mask); for (i = 0; i < MAX_BLACKFIN_GPTIMERS; ++i) if (mask & (1 << i)) - group_regs[BFIN_TIMER_OCTET(i)]->status |= trun_mask[i]; + group_regs[BFIN_TIMER_OCTET(i)]->status = trun_mask[i]; SSYNC(); } EXPORT_SYMBOL(disable_gptimers); diff --git a/trunk/arch/blackfin/kernel/nmi.c b/trunk/arch/blackfin/kernel/nmi.c index 0b5f72f17fd0..401eb1d8e3b4 100644 --- a/trunk/arch/blackfin/kernel/nmi.c +++ b/trunk/arch/blackfin/kernel/nmi.c @@ -12,7 +12,7 @@ #include #include -#include +#include #include #include #include @@ -196,43 +196,31 @@ void touch_nmi_watchdog(void) /* Suspend/resume support */ #ifdef CONFIG_PM -static int nmi_wdt_suspend(struct sys_device *dev, pm_message_t state) +static int nmi_wdt_suspend(void) { nmi_wdt_stop(); return 0; } -static int nmi_wdt_resume(struct sys_device *dev) +static void nmi_wdt_resume(void) { if (nmi_active) nmi_wdt_start(); - return 0; } -static struct sysdev_class nmi_sysclass = { - .name = DRV_NAME, +static struct syscore_ops nmi_syscore_ops = { .resume = nmi_wdt_resume, .suspend = nmi_wdt_suspend, }; -static struct sys_device device_nmi_wdt = { - .id = 0, - .cls = &nmi_sysclass, -}; - -static int __init init_nmi_wdt_sysfs(void) +static int __init init_nmi_wdt_syscore(void) { - int error; - - if (!nmi_active) - return 0; + if (nmi_active) + register_syscore_ops(&nmi_syscore_ops); - error = sysdev_class_register(&nmi_sysclass); - if (!error) - error = sysdev_register(&device_nmi_wdt); - return error; + return 0; } -late_initcall(init_nmi_wdt_sysfs); +late_initcall(init_nmi_wdt_syscore); #endif /* CONFIG_PM */ diff --git a/trunk/arch/blackfin/kernel/time-ts.c b/trunk/arch/blackfin/kernel/time-ts.c index 8c9a43daf80f..9e9b60d969dc 100644 --- a/trunk/arch/blackfin/kernel/time-ts.c +++ b/trunk/arch/blackfin/kernel/time-ts.c @@ -23,29 +23,6 @@ #include #include -/* Accelerators for sched_clock() - * convert from cycles(64bits) => nanoseconds (64bits) - * basic equation: - * ns = cycles / (freq / ns_per_sec) - * ns = cycles * (ns_per_sec / freq) - * ns = cycles * (10^9 / (cpu_khz * 10^3)) - * ns = cycles * (10^6 / cpu_khz) - * - * Then we use scaling math (suggested by george@mvista.com) to get: - * ns = cycles * (10^6 * SC / cpu_khz) / SC - * ns = cycles * cyc2ns_scale / SC - * - * And since SC is a constant power of two, we can convert the div - * into a shift. - * - * We can use khz divisor instead of mhz to keep a better precision, since - * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits. - * (mathieu.desnoyers@polymtl.ca) - * - * -johnstul@us.ibm.com "math is hard, lets go shopping!" - */ - -#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ #if defined(CONFIG_CYCLES_CLOCKSOURCE) @@ -63,7 +40,6 @@ static struct clocksource bfin_cs_cycles = { .rating = 400, .read = bfin_read_cycles, .mask = CLOCKSOURCE_MASK(64), - .shift = CYC2NS_SCALE_FACTOR, .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; @@ -75,10 +51,7 @@ static inline unsigned long long bfin_cs_cycles_sched_clock(void) static int __init bfin_cs_cycles_init(void) { - bfin_cs_cycles.mult = \ - clocksource_hz2mult(get_cclk(), bfin_cs_cycles.shift); - - if (clocksource_register(&bfin_cs_cycles)) + if (clocksource_register_hz(&bfin_cs_cycles, get_cclk())) panic("failed to register clocksource"); return 0; @@ -111,7 +84,6 @@ static struct clocksource bfin_cs_gptimer0 = { .rating = 350, .read = bfin_read_gptimer0, .mask = CLOCKSOURCE_MASK(32), - .shift = CYC2NS_SCALE_FACTOR, .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; @@ -125,10 +97,7 @@ static int __init bfin_cs_gptimer0_init(void) { setup_gptimer0(); - bfin_cs_gptimer0.mult = \ - clocksource_hz2mult(get_sclk(), bfin_cs_gptimer0.shift); - - if (clocksource_register(&bfin_cs_gptimer0)) + if (clocksource_register_hz(&bfin_cs_gptimer0, get_sclk())) panic("failed to register clocksource"); return 0; @@ -206,8 +175,14 @@ irqreturn_t bfin_gptmr0_interrupt(int irq, void *dev_id) { struct clock_event_device *evt = dev_id; smp_mb(); - evt->event_handler(evt); + /* + * We want to ACK before we handle so that we can handle smaller timer + * intervals. This way if the timer expires again while we're handling + * things, we're more likely to see that 2nd int rather than swallowing + * it by ACKing the int at the end of this handler. + */ bfin_gptmr0_ack(); + evt->event_handler(evt); return IRQ_HANDLED; } diff --git a/trunk/arch/blackfin/mach-common/dpmc.c b/trunk/arch/blackfin/mach-common/dpmc.c index 382099fd5561..5e4112e518a9 100644 --- a/trunk/arch/blackfin/mach-common/dpmc.c +++ b/trunk/arch/blackfin/mach-common/dpmc.c @@ -19,9 +19,6 @@ #define DRIVER_NAME "bfin dpmc" -#define dprintk(msg...) \ - cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, DRIVER_NAME, msg) - struct bfin_dpmc_platform_data *pdata; /** diff --git a/trunk/arch/blackfin/mach-common/smp.c b/trunk/arch/blackfin/mach-common/smp.c index 6e17a265c4d3..1fbd94c44457 100644 --- a/trunk/arch/blackfin/mach-common/smp.c +++ b/trunk/arch/blackfin/mach-common/smp.c @@ -109,10 +109,23 @@ static void ipi_flush_icache(void *info) struct blackfin_flush_data *fdata = info; /* Invalidate the memory holding the bounds of the flushed region. */ - invalidate_dcache_range((unsigned long)fdata, - (unsigned long)fdata + sizeof(*fdata)); + blackfin_dcache_invalidate_range((unsigned long)fdata, + (unsigned long)fdata + sizeof(*fdata)); + + /* Make sure all write buffers in the data side of the core + * are flushed before trying to invalidate the icache. This + * needs to be after the data flush and before the icache + * flush so that the SSYNC does the right thing in preventing + * the instruction prefetcher from hitting things in cached + * memory at the wrong time -- it runs much further ahead than + * the pipeline. + */ + SSYNC(); - flush_icache_range(fdata->start, fdata->end); + /* ipi_flaush_icache is invoked by generic flush_icache_range, + * so call blackfin arch icache flush directly here. + */ + blackfin_icache_flush_range(fdata->start, fdata->end); } static void ipi_call_function(unsigned int cpu, struct ipi_message *msg) @@ -164,6 +177,9 @@ static irqreturn_t ipi_handler_int1(int irq, void *dev_instance) while (msg_queue->count) { msg = &msg_queue->ipi_message[msg_queue->head]; switch (msg->type) { + case BFIN_IPI_RESCHEDULE: + scheduler_ipi(); + break; case BFIN_IPI_CALL_FUNC: spin_unlock_irqrestore(&msg_queue->lock, flags); ipi_call_function(cpu, msg); diff --git a/trunk/arch/cris/arch-v32/kernel/smp.c b/trunk/arch/cris/arch-v32/kernel/smp.c index 4c9e3e1ba5d1..66cc75657e2f 100644 --- a/trunk/arch/cris/arch-v32/kernel/smp.c +++ b/trunk/arch/cris/arch-v32/kernel/smp.c @@ -342,15 +342,18 @@ irqreturn_t crisv32_ipi_interrupt(int irq, void *dev_id) ipi = REG_RD(intr_vect, irq_regs[smp_processor_id()], rw_ipi); + if (ipi.vector & IPI_SCHEDULE) { + scheduler_ipi(); + } if (ipi.vector & IPI_CALL) { - func(info); + func(info); } if (ipi.vector & IPI_FLUSH_TLB) { - if (flush_mm == FLUSH_ALL) - __flush_tlb_all(); - else if (flush_vma == FLUSH_ALL) + if (flush_mm == FLUSH_ALL) + __flush_tlb_all(); + else if (flush_vma == FLUSH_ALL) __flush_tlb_mm(flush_mm); - else + else __flush_tlb_page(flush_vma, flush_addr); } diff --git a/trunk/arch/ia64/hp/common/sba_iommu.c b/trunk/arch/ia64/hp/common/sba_iommu.c index 4ce8d1358fee..c04dd576f333 100644 --- a/trunk/arch/ia64/hp/common/sba_iommu.c +++ b/trunk/arch/ia64/hp/common/sba_iommu.c @@ -37,6 +37,7 @@ #include #include #include +#include #include /* ia64_get_itc() */ #include diff --git a/trunk/arch/ia64/kernel/cpufreq/acpi-cpufreq.c b/trunk/arch/ia64/kernel/cpufreq/acpi-cpufreq.c index 22f61526a8e1..f09b174244d5 100644 --- a/trunk/arch/ia64/kernel/cpufreq/acpi-cpufreq.c +++ b/trunk/arch/ia64/kernel/cpufreq/acpi-cpufreq.c @@ -23,8 +23,6 @@ #include #include -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "acpi-cpufreq", msg) - MODULE_AUTHOR("Venkatesh Pallipadi"); MODULE_DESCRIPTION("ACPI Processor P-States Driver"); MODULE_LICENSE("GPL"); @@ -47,12 +45,12 @@ processor_set_pstate ( { s64 retval; - dprintk("processor_set_pstate\n"); + pr_debug("processor_set_pstate\n"); retval = ia64_pal_set_pstate((u64)value); if (retval) { - dprintk("Failed to set freq to 0x%x, with error 0x%lx\n", + pr_debug("Failed to set freq to 0x%x, with error 0x%lx\n", value, retval); return -ENODEV; } @@ -67,14 +65,14 @@ processor_get_pstate ( u64 pstate_index = 0; s64 retval; - dprintk("processor_get_pstate\n"); + pr_debug("processor_get_pstate\n"); retval = ia64_pal_get_pstate(&pstate_index, PAL_GET_PSTATE_TYPE_INSTANT); *value = (u32) pstate_index; if (retval) - dprintk("Failed to get current freq with " + pr_debug("Failed to get current freq with " "error 0x%lx, idx 0x%x\n", retval, *value); return (int)retval; @@ -90,7 +88,7 @@ extract_clock ( { unsigned long i; - dprintk("extract_clock\n"); + pr_debug("extract_clock\n"); for (i = 0; i < data->acpi_data.state_count; i++) { if (value == data->acpi_data.states[i].status) @@ -110,7 +108,7 @@ processor_get_freq ( cpumask_t saved_mask; unsigned long clock_freq; - dprintk("processor_get_freq\n"); + pr_debug("processor_get_freq\n"); saved_mask = current->cpus_allowed; set_cpus_allowed_ptr(current, cpumask_of(cpu)); @@ -148,7 +146,7 @@ processor_set_freq ( cpumask_t saved_mask; int retval; - dprintk("processor_set_freq\n"); + pr_debug("processor_set_freq\n"); saved_mask = current->cpus_allowed; set_cpus_allowed_ptr(current, cpumask_of(cpu)); @@ -159,16 +157,16 @@ processor_set_freq ( if (state == data->acpi_data.state) { if (unlikely(data->resume)) { - dprintk("Called after resume, resetting to P%d\n", state); + pr_debug("Called after resume, resetting to P%d\n", state); data->resume = 0; } else { - dprintk("Already at target state (P%d)\n", state); + pr_debug("Already at target state (P%d)\n", state); retval = 0; goto migrate_end; } } - dprintk("Transitioning from P%d to P%d\n", + pr_debug("Transitioning from P%d to P%d\n", data->acpi_data.state, state); /* cpufreq frequency struct */ @@ -186,7 +184,7 @@ processor_set_freq ( value = (u32) data->acpi_data.states[state].control; - dprintk("Transitioning to state: 0x%08x\n", value); + pr_debug("Transitioning to state: 0x%08x\n", value); ret = processor_set_pstate(value); if (ret) { @@ -219,7 +217,7 @@ acpi_cpufreq_get ( { struct cpufreq_acpi_io *data = acpi_io_data[cpu]; - dprintk("acpi_cpufreq_get\n"); + pr_debug("acpi_cpufreq_get\n"); return processor_get_freq(data, cpu); } @@ -235,7 +233,7 @@ acpi_cpufreq_target ( unsigned int next_state = 0; unsigned int result = 0; - dprintk("acpi_cpufreq_setpolicy\n"); + pr_debug("acpi_cpufreq_setpolicy\n"); result = cpufreq_frequency_table_target(policy, data->freq_table, target_freq, relation, &next_state); @@ -255,7 +253,7 @@ acpi_cpufreq_verify ( unsigned int result = 0; struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; - dprintk("acpi_cpufreq_verify\n"); + pr_debug("acpi_cpufreq_verify\n"); result = cpufreq_frequency_table_verify(policy, data->freq_table); @@ -273,7 +271,7 @@ acpi_cpufreq_cpu_init ( struct cpufreq_acpi_io *data; unsigned int result = 0; - dprintk("acpi_cpufreq_cpu_init\n"); + pr_debug("acpi_cpufreq_cpu_init\n"); data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL); if (!data) @@ -288,7 +286,7 @@ acpi_cpufreq_cpu_init ( /* capability check */ if (data->acpi_data.state_count <= 1) { - dprintk("No P-States\n"); + pr_debug("No P-States\n"); result = -ENODEV; goto err_unreg; } @@ -297,7 +295,7 @@ acpi_cpufreq_cpu_init ( ACPI_ADR_SPACE_FIXED_HARDWARE) || (data->acpi_data.status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) { - dprintk("Unsupported address space [%d, %d]\n", + pr_debug("Unsupported address space [%d, %d]\n", (u32) (data->acpi_data.control_register.space_id), (u32) (data->acpi_data.status_register.space_id)); result = -ENODEV; @@ -348,7 +346,7 @@ acpi_cpufreq_cpu_init ( "activated.\n", cpu); for (i = 0; i < data->acpi_data.state_count; i++) - dprintk(" %cP%d: %d MHz, %d mW, %d uS, %d uS, 0x%x 0x%x\n", + pr_debug(" %cP%d: %d MHz, %d mW, %d uS, %d uS, 0x%x 0x%x\n", (i == data->acpi_data.state?'*':' '), i, (u32) data->acpi_data.states[i].core_frequency, (u32) data->acpi_data.states[i].power, @@ -383,7 +381,7 @@ acpi_cpufreq_cpu_exit ( { struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; - dprintk("acpi_cpufreq_cpu_exit\n"); + pr_debug("acpi_cpufreq_cpu_exit\n"); if (data) { cpufreq_frequency_table_put_attr(policy->cpu); @@ -418,7 +416,7 @@ static struct cpufreq_driver acpi_cpufreq_driver = { static int __init acpi_cpufreq_init (void) { - dprintk("acpi_cpufreq_init\n"); + pr_debug("acpi_cpufreq_init\n"); return cpufreq_register_driver(&acpi_cpufreq_driver); } @@ -427,7 +425,7 @@ acpi_cpufreq_init (void) static void __exit acpi_cpufreq_exit (void) { - dprintk("acpi_cpufreq_exit\n"); + pr_debug("acpi_cpufreq_exit\n"); cpufreq_unregister_driver(&acpi_cpufreq_driver); return; diff --git a/trunk/arch/ia64/kernel/cyclone.c b/trunk/arch/ia64/kernel/cyclone.c index 1b811c61bdc6..f64097b5118a 100644 --- a/trunk/arch/ia64/kernel/cyclone.c +++ b/trunk/arch/ia64/kernel/cyclone.c @@ -31,8 +31,6 @@ static struct clocksource clocksource_cyclone = { .rating = 300, .read = read_cyclone, .mask = (1LL << 40) - 1, - .mult = 0, /*to be calculated*/ - .shift = 16, .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; @@ -118,9 +116,7 @@ int __init init_cyclone_clock(void) /* initialize last tick */ cyclone_mc = cyclone_timer; clocksource_cyclone.fsys_mmio = cyclone_timer; - clocksource_cyclone.mult = clocksource_hz2mult(CYCLONE_TIMER_FREQ, - clocksource_cyclone.shift); - clocksource_register(&clocksource_cyclone); + clocksource_register_hz(&clocksource_cyclone, CYCLONE_TIMER_FREQ); return 0; } diff --git a/trunk/arch/ia64/kernel/irq_ia64.c b/trunk/arch/ia64/kernel/irq_ia64.c index 5b704740f160..782c3a357f24 100644 --- a/trunk/arch/ia64/kernel/irq_ia64.c +++ b/trunk/arch/ia64/kernel/irq_ia64.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include @@ -496,6 +497,7 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs) smp_local_flush_tlb(); kstat_incr_irqs_this_cpu(irq, desc); } else if (unlikely(IS_RESCHEDULE(vector))) { + scheduler_ipi(); kstat_incr_irqs_this_cpu(irq, desc); } else { ia64_setreg(_IA64_REG_CR_TPR, vector); diff --git a/trunk/arch/ia64/kernel/time.c b/trunk/arch/ia64/kernel/time.c index 156ad803d5b7..04440cc09b40 100644 --- a/trunk/arch/ia64/kernel/time.c +++ b/trunk/arch/ia64/kernel/time.c @@ -73,8 +73,6 @@ static struct clocksource clocksource_itc = { .rating = 350, .read = itc_get_cycles, .mask = CLOCKSOURCE_MASK(64), - .mult = 0, /*to be calculated*/ - .shift = 16, .flags = CLOCK_SOURCE_IS_CONTINUOUS, #ifdef CONFIG_PARAVIRT .resume = paravirt_clocksource_resume, @@ -365,11 +363,8 @@ ia64_init_itm (void) ia64_cpu_local_tick(); if (!itc_clocksource) { - /* Sort out mult/shift values: */ - clocksource_itc.mult = - clocksource_hz2mult(local_cpu_data->itc_freq, - clocksource_itc.shift); - clocksource_register(&clocksource_itc); + clocksource_register_hz(&clocksource_itc, + local_cpu_data->itc_freq); itc_clocksource = &clocksource_itc; } } diff --git a/trunk/arch/ia64/kernel/vmlinux.lds.S b/trunk/arch/ia64/kernel/vmlinux.lds.S index 787de4a77d82..53c0ba004e9e 100644 --- a/trunk/arch/ia64/kernel/vmlinux.lds.S +++ b/trunk/arch/ia64/kernel/vmlinux.lds.S @@ -209,6 +209,7 @@ SECTIONS { data : { } :data .data : AT(ADDR(.data) - LOAD_OFFSET) { + _sdata = .; INIT_TASK_DATA(PAGE_SIZE) CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES) READ_MOSTLY_DATA(SMP_CACHE_BYTES) diff --git a/trunk/arch/ia64/mm/fault.c b/trunk/arch/ia64/mm/fault.c index 0799fea4c588..20b359376128 100644 --- a/trunk/arch/ia64/mm/fault.c +++ b/trunk/arch/ia64/mm/fault.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include diff --git a/trunk/arch/ia64/oprofile/backtrace.c b/trunk/arch/ia64/oprofile/backtrace.c index 5cdd7e4a597c..f7b798993cea 100644 --- a/trunk/arch/ia64/oprofile/backtrace.c +++ b/trunk/arch/ia64/oprofile/backtrace.c @@ -29,7 +29,7 @@ typedef struct unsigned int depth; struct pt_regs *regs; struct unw_frame_info frame; - u64 *prev_pfs_loc; /* state for WAR for old spinlock ool code */ + unsigned long *prev_pfs_loc; /* state for WAR for old spinlock ool code */ } ia64_backtrace_t; /* Returns non-zero if the PC is in the Interrupt Vector Table */ diff --git a/trunk/arch/ia64/sn/kernel/sn2/timer.c b/trunk/arch/ia64/sn/kernel/sn2/timer.c index 21d6f09e3447..c34efda122e1 100644 --- a/trunk/arch/ia64/sn/kernel/sn2/timer.c +++ b/trunk/arch/ia64/sn/kernel/sn2/timer.c @@ -33,8 +33,6 @@ static struct clocksource clocksource_sn2 = { .rating = 450, .read = read_sn2, .mask = (1LL << 55) - 1, - .mult = 0, - .shift = 10, .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; @@ -57,9 +55,7 @@ ia64_sn_udelay (unsigned long usecs) void __init sn_timer_init(void) { clocksource_sn2.fsys_mmio = RTC_COUNTER_ADDR; - clocksource_sn2.mult = clocksource_hz2mult(sn_rtc_cycles_per_second, - clocksource_sn2.shift); - clocksource_register(&clocksource_sn2); + clocksource_register_hz(&clocksource_sn2, sn_rtc_cycles_per_second); ia64_udelay = &ia64_sn_udelay; } diff --git a/trunk/arch/ia64/xen/irq_xen.c b/trunk/arch/ia64/xen/irq_xen.c index 108bb858acf2..b279e142c633 100644 --- a/trunk/arch/ia64/xen/irq_xen.c +++ b/trunk/arch/ia64/xen/irq_xen.c @@ -92,6 +92,8 @@ static unsigned short saved_irq_cnt; static int xen_slab_ready; #ifdef CONFIG_SMP +#include + /* Dummy stub. Though we may check XEN_RESCHEDULE_VECTOR before __do_IRQ, * it ends up to issue several memory accesses upon percpu data and * thus adds unnecessary traffic to other paths. @@ -99,7 +101,13 @@ static int xen_slab_ready; static irqreturn_t xen_dummy_handler(int irq, void *dev_id) { + return IRQ_HANDLED; +} +static irqreturn_t +xen_resched_handler(int irq, void *dev_id) +{ + scheduler_ipi(); return IRQ_HANDLED; } @@ -110,7 +118,7 @@ static struct irqaction xen_ipi_irqaction = { }; static struct irqaction xen_resched_irqaction = { - .handler = xen_dummy_handler, + .handler = xen_resched_handler, .flags = IRQF_DISABLED, .name = "resched" }; diff --git a/trunk/arch/m32r/kernel/smp.c b/trunk/arch/m32r/kernel/smp.c index 31cef20b2996..fc10b39893d4 100644 --- a/trunk/arch/m32r/kernel/smp.c +++ b/trunk/arch/m32r/kernel/smp.c @@ -122,8 +122,6 @@ void smp_send_reschedule(int cpu_id) * * Description: This routine executes on CPU which received * 'RESCHEDULE_IPI'. - * Rescheduling is processed at the exit of interrupt - * operation. * * Born on Date: 2002.02.05 * @@ -138,7 +136,7 @@ void smp_send_reschedule(int cpu_id) *==========================================================================*/ void smp_reschedule_interrupt(void) { - /* nothing to do */ + scheduler_ipi(); } /*==========================================================================* diff --git a/trunk/arch/m32r/kernel/vmlinux.lds.S b/trunk/arch/m32r/kernel/vmlinux.lds.S index c194d64cdbb9..cf95aec77460 100644 --- a/trunk/arch/m32r/kernel/vmlinux.lds.S +++ b/trunk/arch/m32r/kernel/vmlinux.lds.S @@ -44,6 +44,7 @@ SECTIONS EXCEPTION_TABLE(16) NOTES + _sdata = .; /* Start of data section */ RODATA RW_DATA_SECTION(32, PAGE_SIZE, THREAD_SIZE) _edata = .; /* End of data section */ diff --git a/trunk/arch/m68k/atari/atakeyb.c b/trunk/arch/m68k/atari/atakeyb.c index b995513d527f..95022b04b62d 100644 --- a/trunk/arch/m68k/atari/atakeyb.c +++ b/trunk/arch/m68k/atari/atakeyb.c @@ -36,13 +36,10 @@ /* Hook for MIDI serial driver */ void (*atari_MIDI_interrupt_hook) (void); -/* Hook for mouse driver */ -void (*atari_mouse_interrupt_hook) (char *); /* Hook for keyboard inputdev driver */ void (*atari_input_keyboard_interrupt_hook) (unsigned char, char); /* Hook for mouse inputdev driver */ void (*atari_input_mouse_interrupt_hook) (char *); -EXPORT_SYMBOL(atari_mouse_interrupt_hook); EXPORT_SYMBOL(atari_input_keyboard_interrupt_hook); EXPORT_SYMBOL(atari_input_mouse_interrupt_hook); @@ -263,8 +260,8 @@ static irqreturn_t atari_keyboard_interrupt(int irq, void *dummy) kb_state.buf[kb_state.len++] = scancode; if (kb_state.len == 3) { kb_state.state = KEYBOARD; - if (atari_mouse_interrupt_hook) - atari_mouse_interrupt_hook(kb_state.buf); + if (atari_input_mouse_interrupt_hook) + atari_input_mouse_interrupt_hook(kb_state.buf); } break; @@ -575,7 +572,7 @@ int atari_keyb_init(void) kb_state.len = 0; error = request_irq(IRQ_MFP_ACIA, atari_keyboard_interrupt, - IRQ_TYPE_SLOW, "keyboard/mouse/MIDI", + IRQ_TYPE_SLOW, "keyboard,mouse,MIDI", atari_keyboard_interrupt); if (error) return error; diff --git a/trunk/arch/m68k/atari/stdma.c b/trunk/arch/m68k/atari/stdma.c index 604329fafbb8..ddbf43ca8858 100644 --- a/trunk/arch/m68k/atari/stdma.c +++ b/trunk/arch/m68k/atari/stdma.c @@ -180,7 +180,7 @@ void __init stdma_init(void) { stdma_isr = NULL; if (request_irq(IRQ_MFP_FDC, stdma_int, IRQ_TYPE_SLOW | IRQF_SHARED, - "ST-DMA: floppy/ACSI/IDE/Falcon-SCSI", stdma_int)) + "ST-DMA floppy,ACSI,IDE,Falcon-SCSI", stdma_int)) pr_err("Couldn't register ST-DMA interrupt\n"); } diff --git a/trunk/arch/m68k/include/asm/atarikb.h b/trunk/arch/m68k/include/asm/atarikb.h index 546e7da5804f..68f3622bf591 100644 --- a/trunk/arch/m68k/include/asm/atarikb.h +++ b/trunk/arch/m68k/include/asm/atarikb.h @@ -34,8 +34,6 @@ void ikbd_joystick_disable(void); /* Hook for MIDI serial driver */ extern void (*atari_MIDI_interrupt_hook) (void); -/* Hook for mouse driver */ -extern void (*atari_mouse_interrupt_hook) (char *); /* Hook for keyboard inputdev driver */ extern void (*atari_input_keyboard_interrupt_hook) (unsigned char, char); /* Hook for mouse inputdev driver */ diff --git a/trunk/arch/m68k/include/asm/bitops_mm.h b/trunk/arch/m68k/include/asm/bitops_mm.h index 9d69f6e62365..e9020f88a748 100644 --- a/trunk/arch/m68k/include/asm/bitops_mm.h +++ b/trunk/arch/m68k/include/asm/bitops_mm.h @@ -181,14 +181,15 @@ static inline int find_first_zero_bit(const unsigned long *vaddr, { const unsigned long *p = vaddr; int res = 32; + unsigned int words; unsigned long num; if (!size) return 0; - size = (size + 31) >> 5; + words = (size + 31) >> 5; while (!(num = ~*p++)) { - if (!--size) + if (!--words) goto out; } @@ -196,7 +197,8 @@ static inline int find_first_zero_bit(const unsigned long *vaddr, : "=d" (res) : "d" (num & -num)); res ^= 31; out: - return ((long)p - (long)vaddr - 4) * 8 + res; + res += ((long)p - (long)vaddr - 4) * 8; + return res < size ? res : size; } static inline int find_next_zero_bit(const unsigned long *vaddr, int size, @@ -215,27 +217,32 @@ static inline int find_next_zero_bit(const unsigned long *vaddr, int size, /* Look for zero in first longword */ __asm__ __volatile__ ("bfffo %1{#0,#0},%0" : "=d" (res) : "d" (num & -num)); - if (res < 32) - return offset + (res ^ 31); + if (res < 32) { + offset += res ^ 31; + return offset < size ? offset : size; + } offset += 32; + + if (offset >= size) + return size; } /* No zero yet, search remaining full bytes for a zero */ - res = find_first_zero_bit(p, size - ((long)p - (long)vaddr) * 8); - return offset + res; + return offset + find_first_zero_bit(p, size - offset); } static inline int find_first_bit(const unsigned long *vaddr, unsigned size) { const unsigned long *p = vaddr; int res = 32; + unsigned int words; unsigned long num; if (!size) return 0; - size = (size + 31) >> 5; + words = (size + 31) >> 5; while (!(num = *p++)) { - if (!--size) + if (!--words) goto out; } @@ -243,7 +250,8 @@ static inline int find_first_bit(const unsigned long *vaddr, unsigned size) : "=d" (res) : "d" (num & -num)); res ^= 31; out: - return ((long)p - (long)vaddr - 4) * 8 + res; + res += ((long)p - (long)vaddr - 4) * 8; + return res < size ? res : size; } static inline int find_next_bit(const unsigned long *vaddr, int size, @@ -262,13 +270,17 @@ static inline int find_next_bit(const unsigned long *vaddr, int size, /* Look for one in first longword */ __asm__ __volatile__ ("bfffo %1{#0,#0},%0" : "=d" (res) : "d" (num & -num)); - if (res < 32) - return offset + (res ^ 31); + if (res < 32) { + offset += res ^ 31; + return offset < size ? offset : size; + } offset += 32; + + if (offset >= size) + return size; } /* No one yet, search remaining full bytes for a one */ - res = find_first_bit(p, size - ((long)p - (long)vaddr) * 8); - return offset + res; + return offset + find_first_bit(p, size - offset); } /* @@ -366,23 +378,25 @@ static inline int test_bit_le(int nr, const void *vaddr) static inline int find_first_zero_bit_le(const void *vaddr, unsigned size) { const unsigned long *p = vaddr, *addr = vaddr; - int res; + int res = 0; + unsigned int words; if (!size) return 0; - size = (size >> 5) + ((size & 31) > 0); - while (*p++ == ~0UL) - { - if (--size == 0) - return (p - addr) << 5; + words = (size >> 5) + ((size & 31) > 0); + while (*p++ == ~0UL) { + if (--words == 0) + goto out; } --p; for (res = 0; res < 32; res++) if (!test_bit_le(res, p)) break; - return (p - addr) * 32 + res; +out: + res += (p - addr) * 32; + return res < size ? res : size; } static inline unsigned long find_next_zero_bit_le(const void *addr, @@ -400,10 +414,15 @@ static inline unsigned long find_next_zero_bit_le(const void *addr, offset -= bit; /* Look for zero in first longword */ for (res = bit; res < 32; res++) - if (!test_bit_le(res, p)) - return offset + res; + if (!test_bit_le(res, p)) { + offset += res; + return offset < size ? offset : size; + } p++; offset += 32; + + if (offset >= size) + return size; } /* No zero yet, search remaining full bytes for a zero */ return offset + find_first_zero_bit_le(p, size - offset); @@ -412,22 +431,25 @@ static inline unsigned long find_next_zero_bit_le(const void *addr, static inline int find_first_bit_le(const void *vaddr, unsigned size) { const unsigned long *p = vaddr, *addr = vaddr; - int res; + int res = 0; + unsigned int words; if (!size) return 0; - size = (size >> 5) + ((size & 31) > 0); + words = (size >> 5) + ((size & 31) > 0); while (*p++ == 0UL) { - if (--size == 0) - return (p - addr) << 5; + if (--words == 0) + goto out; } --p; for (res = 0; res < 32; res++) if (test_bit_le(res, p)) break; - return (p - addr) * 32 + res; +out: + res += (p - addr) * 32; + return res < size ? res : size; } static inline unsigned long find_next_bit_le(const void *addr, @@ -445,10 +467,15 @@ static inline unsigned long find_next_bit_le(const void *addr, offset -= bit; /* Look for one in first longword */ for (res = bit; res < 32; res++) - if (test_bit_le(res, p)) - return offset + res; + if (test_bit_le(res, p)) { + offset += res; + return offset < size ? offset : size; + } p++; offset += 32; + + if (offset >= size) + return size; } /* No set bit yet, search remaining full bytes for a set bit */ return offset + find_first_bit_le(p, size - offset); diff --git a/trunk/arch/m68k/include/asm/unistd.h b/trunk/arch/m68k/include/asm/unistd.h index 26d851d385bb..f3b649de2a1b 100644 --- a/trunk/arch/m68k/include/asm/unistd.h +++ b/trunk/arch/m68k/include/asm/unistd.h @@ -22,7 +22,7 @@ #define __NR_mknod 14 #define __NR_chmod 15 #define __NR_chown 16 -#define __NR_break 17 +/*#define __NR_break 17*/ #define __NR_oldstat 18 #define __NR_lseek 19 #define __NR_getpid 20 @@ -36,11 +36,11 @@ #define __NR_oldfstat 28 #define __NR_pause 29 #define __NR_utime 30 -#define __NR_stty 31 -#define __NR_gtty 32 +/*#define __NR_stty 31*/ +/*#define __NR_gtty 32*/ #define __NR_access 33 #define __NR_nice 34 -#define __NR_ftime 35 +/*#define __NR_ftime 35*/ #define __NR_sync 36 #define __NR_kill 37 #define __NR_rename 38 @@ -49,7 +49,7 @@ #define __NR_dup 41 #define __NR_pipe 42 #define __NR_times 43 -#define __NR_prof 44 +/*#define __NR_prof 44*/ #define __NR_brk 45 #define __NR_setgid 46 #define __NR_getgid 47 @@ -58,13 +58,13 @@ #define __NR_getegid 50 #define __NR_acct 51 #define __NR_umount2 52 -#define __NR_lock 53 +/*#define __NR_lock 53*/ #define __NR_ioctl 54 #define __NR_fcntl 55 -#define __NR_mpx 56 +/*#define __NR_mpx 56*/ #define __NR_setpgid 57 -#define __NR_ulimit 58 -#define __NR_oldolduname 59 +/*#define __NR_ulimit 58*/ +/*#define __NR_oldolduname 59*/ #define __NR_umask 60 #define __NR_chroot 61 #define __NR_ustat 62 @@ -103,10 +103,10 @@ #define __NR_fchown 95 #define __NR_getpriority 96 #define __NR_setpriority 97 -#define __NR_profil 98 +/*#define __NR_profil 98*/ #define __NR_statfs 99 #define __NR_fstatfs 100 -#define __NR_ioperm 101 +/*#define __NR_ioperm 101*/ #define __NR_socketcall 102 #define __NR_syslog 103 #define __NR_setitimer 104 @@ -114,11 +114,11 @@ #define __NR_stat 106 #define __NR_lstat 107 #define __NR_fstat 108 -#define __NR_olduname 109 -#define __NR_iopl /* 110 */ not supported +/*#define __NR_olduname 109*/ +/*#define __NR_iopl 110*/ /* not supported */ #define __NR_vhangup 111 -#define __NR_idle /* 112 */ Obsolete -#define __NR_vm86 /* 113 */ not supported +/*#define __NR_idle 112*/ /* Obsolete */ +/*#define __NR_vm86 113*/ /* not supported */ #define __NR_wait4 114 #define __NR_swapoff 115 #define __NR_sysinfo 116 @@ -132,17 +132,17 @@ #define __NR_adjtimex 124 #define __NR_mprotect 125 #define __NR_sigprocmask 126 -#define __NR_create_module 127 +/*#define __NR_create_module 127*/ #define __NR_init_module 128 #define __NR_delete_module 129 -#define __NR_get_kernel_syms 130 +/*#define __NR_get_kernel_syms 130*/ #define __NR_quotactl 131 #define __NR_getpgid 132 #define __NR_fchdir 133 #define __NR_bdflush 134 #define __NR_sysfs 135 #define __NR_personality 136 -#define __NR_afs_syscall 137 /* Syscall for Andrew File System */ +/*#define __NR_afs_syscall 137*/ /* Syscall for Andrew File System */ #define __NR_setfsuid 138 #define __NR_setfsgid 139 #define __NR__llseek 140 @@ -172,7 +172,7 @@ #define __NR_setresuid 164 #define __NR_getresuid 165 #define __NR_getpagesize 166 -#define __NR_query_module 167 +/*#define __NR_query_module 167*/ #define __NR_poll 168 #define __NR_nfsservctl 169 #define __NR_setresgid 170 @@ -193,8 +193,8 @@ #define __NR_capset 185 #define __NR_sigaltstack 186 #define __NR_sendfile 187 -#define __NR_getpmsg 188 /* some people actually want streams */ -#define __NR_putpmsg 189 /* some people actually want streams */ +/*#define __NR_getpmsg 188*/ /* some people actually want streams */ +/*#define __NR_putpmsg 189*/ /* some people actually want streams */ #define __NR_vfork 190 #define __NR_ugetrlimit 191 #define __NR_mmap2 192 @@ -223,6 +223,8 @@ #define __NR_setfsuid32 215 #define __NR_setfsgid32 216 #define __NR_pivot_root 217 +/* 218*/ +/* 219*/ #define __NR_getdents64 220 #define __NR_gettid 221 #define __NR_tkill 222 @@ -281,7 +283,7 @@ #define __NR_mq_notify 275 #define __NR_mq_getsetattr 276 #define __NR_waitid 277 -#define __NR_vserver 278 +/*#define __NR_vserver 278*/ #define __NR_add_key 279 #define __NR_request_key 280 #define __NR_keyctl 281 @@ -343,10 +345,14 @@ #define __NR_fanotify_init 337 #define __NR_fanotify_mark 338 #define __NR_prlimit64 339 +#define __NR_name_to_handle_at 340 +#define __NR_open_by_handle_at 341 +#define __NR_clock_adjtime 342 +#define __NR_syncfs 343 #ifdef __KERNEL__ -#define NR_syscalls 340 +#define NR_syscalls 344 #define __ARCH_WANT_IPC_PARSE_VERSION #define __ARCH_WANT_OLD_READDIR diff --git a/trunk/arch/m68k/kernel/Makefile_mm b/trunk/arch/m68k/kernel/Makefile_mm index 55d5d6b680a2..aced67804579 100644 --- a/trunk/arch/m68k/kernel/Makefile_mm +++ b/trunk/arch/m68k/kernel/Makefile_mm @@ -10,7 +10,7 @@ endif extra-y += vmlinux.lds obj-y := entry.o process.o traps.o ints.o signal.o ptrace.o module.o \ - sys_m68k.o time.o setup.o m68k_ksyms.o devres.o + sys_m68k.o time.o setup.o m68k_ksyms.o devres.o syscalltable.o devres-y = ../../../kernel/irq/devres.o diff --git a/trunk/arch/m68k/kernel/entry_mm.S b/trunk/arch/m68k/kernel/entry_mm.S index 1559dea36e55..bd0ec05263b2 100644 --- a/trunk/arch/m68k/kernel/entry_mm.S +++ b/trunk/arch/m68k/kernel/entry_mm.S @@ -407,347 +407,3 @@ resume: rts -.data -ALIGN -sys_call_table: - .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */ - .long sys_exit - .long sys_fork - .long sys_read - .long sys_write - .long sys_open /* 5 */ - .long sys_close - .long sys_waitpid - .long sys_creat - .long sys_link - .long sys_unlink /* 10 */ - .long sys_execve - .long sys_chdir - .long sys_time - .long sys_mknod - .long sys_chmod /* 15 */ - .long sys_chown16 - .long sys_ni_syscall /* old break syscall holder */ - .long sys_stat - .long sys_lseek - .long sys_getpid /* 20 */ - .long sys_mount - .long sys_oldumount - .long sys_setuid16 - .long sys_getuid16 - .long sys_stime /* 25 */ - .long sys_ptrace - .long sys_alarm - .long sys_fstat - .long sys_pause - .long sys_utime /* 30 */ - .long sys_ni_syscall /* old stty syscall holder */ - .long sys_ni_syscall /* old gtty syscall holder */ - .long sys_access - .long sys_nice - .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */ - .long sys_sync - .long sys_kill - .long sys_rename - .long sys_mkdir - .long sys_rmdir /* 40 */ - .long sys_dup - .long sys_pipe - .long sys_times - .long sys_ni_syscall /* old prof syscall holder */ - .long sys_brk /* 45 */ - .long sys_setgid16 - .long sys_getgid16 - .long sys_signal - .long sys_geteuid16 - .long sys_getegid16 /* 50 */ - .long sys_acct - .long sys_umount /* recycled never used phys() */ - .long sys_ni_syscall /* old lock syscall holder */ - .long sys_ioctl - .long sys_fcntl /* 55 */ - .long sys_ni_syscall /* old mpx syscall holder */ - .long sys_setpgid - .long sys_ni_syscall /* old ulimit syscall holder */ - .long sys_ni_syscall - .long sys_umask /* 60 */ - .long sys_chroot - .long sys_ustat - .long sys_dup2 - .long sys_getppid - .long sys_getpgrp /* 65 */ - .long sys_setsid - .long sys_sigaction - .long sys_sgetmask - .long sys_ssetmask - .long sys_setreuid16 /* 70 */ - .long sys_setregid16 - .long sys_sigsuspend - .long sys_sigpending - .long sys_sethostname - .long sys_setrlimit /* 75 */ - .long sys_old_getrlimit - .long sys_getrusage - .long sys_gettimeofday - .long sys_settimeofday - .long sys_getgroups16 /* 80 */ - .long sys_setgroups16 - .long sys_old_select - .long sys_symlink - .long sys_lstat - .long sys_readlink /* 85 */ - .long sys_uselib - .long sys_swapon - .long sys_reboot - .long sys_old_readdir - .long sys_old_mmap /* 90 */ - .long sys_munmap - .long sys_truncate - .long sys_ftruncate - .long sys_fchmod - .long sys_fchown16 /* 95 */ - .long sys_getpriority - .long sys_setpriority - .long sys_ni_syscall /* old profil syscall holder */ - .long sys_statfs - .long sys_fstatfs /* 100 */ - .long sys_ni_syscall /* ioperm for i386 */ - .long sys_socketcall - .long sys_syslog - .long sys_setitimer - .long sys_getitimer /* 105 */ - .long sys_newstat - .long sys_newlstat - .long sys_newfstat - .long sys_ni_syscall - .long sys_ni_syscall /* 110 */ /* iopl for i386 */ - .long sys_vhangup - .long sys_ni_syscall /* obsolete idle() syscall */ - .long sys_ni_syscall /* vm86old for i386 */ - .long sys_wait4 - .long sys_swapoff /* 115 */ - .long sys_sysinfo - .long sys_ipc - .long sys_fsync - .long sys_sigreturn - .long sys_clone /* 120 */ - .long sys_setdomainname - .long sys_newuname - .long sys_cacheflush /* modify_ldt for i386 */ - .long sys_adjtimex - .long sys_mprotect /* 125 */ - .long sys_sigprocmask - .long sys_ni_syscall /* old "create_module" */ - .long sys_init_module - .long sys_delete_module - .long sys_ni_syscall /* 130 - old "get_kernel_syms" */ - .long sys_quotactl - .long sys_getpgid - .long sys_fchdir - .long sys_bdflush - .long sys_sysfs /* 135 */ - .long sys_personality - .long sys_ni_syscall /* for afs_syscall */ - .long sys_setfsuid16 - .long sys_setfsgid16 - .long sys_llseek /* 140 */ - .long sys_getdents - .long sys_select - .long sys_flock - .long sys_msync - .long sys_readv /* 145 */ - .long sys_writev - .long sys_getsid - .long sys_fdatasync - .long sys_sysctl - .long sys_mlock /* 150 */ - .long sys_munlock - .long sys_mlockall - .long sys_munlockall - .long sys_sched_setparam - .long sys_sched_getparam /* 155 */ - .long sys_sched_setscheduler - .long sys_sched_getscheduler - .long sys_sched_yield - .long sys_sched_get_priority_max - .long sys_sched_get_priority_min /* 160 */ - .long sys_sched_rr_get_interval - .long sys_nanosleep - .long sys_mremap - .long sys_setresuid16 - .long sys_getresuid16 /* 165 */ - .long sys_getpagesize - .long sys_ni_syscall /* old sys_query_module */ - .long sys_poll - .long sys_nfsservctl - .long sys_setresgid16 /* 170 */ - .long sys_getresgid16 - .long sys_prctl - .long sys_rt_sigreturn - .long sys_rt_sigaction - .long sys_rt_sigprocmask /* 175 */ - .long sys_rt_sigpending - .long sys_rt_sigtimedwait - .long sys_rt_sigqueueinfo - .long sys_rt_sigsuspend - .long sys_pread64 /* 180 */ - .long sys_pwrite64 - .long sys_lchown16; - .long sys_getcwd - .long sys_capget - .long sys_capset /* 185 */ - .long sys_sigaltstack - .long sys_sendfile - .long sys_ni_syscall /* streams1 */ - .long sys_ni_syscall /* streams2 */ - .long sys_vfork /* 190 */ - .long sys_getrlimit - .long sys_mmap2 - .long sys_truncate64 - .long sys_ftruncate64 - .long sys_stat64 /* 195 */ - .long sys_lstat64 - .long sys_fstat64 - .long sys_chown - .long sys_getuid - .long sys_getgid /* 200 */ - .long sys_geteuid - .long sys_getegid - .long sys_setreuid - .long sys_setregid - .long sys_getgroups /* 205 */ - .long sys_setgroups - .long sys_fchown - .long sys_setresuid - .long sys_getresuid - .long sys_setresgid /* 210 */ - .long sys_getresgid - .long sys_lchown - .long sys_setuid - .long sys_setgid - .long sys_setfsuid /* 215 */ - .long sys_setfsgid - .long sys_pivot_root - .long sys_ni_syscall - .long sys_ni_syscall - .long sys_getdents64 /* 220 */ - .long sys_gettid - .long sys_tkill - .long sys_setxattr - .long sys_lsetxattr - .long sys_fsetxattr /* 225 */ - .long sys_getxattr - .long sys_lgetxattr - .long sys_fgetxattr - .long sys_listxattr - .long sys_llistxattr /* 230 */ - .long sys_flistxattr - .long sys_removexattr - .long sys_lremovexattr - .long sys_fremovexattr - .long sys_futex /* 235 */ - .long sys_sendfile64 - .long sys_mincore - .long sys_madvise - .long sys_fcntl64 - .long sys_readahead /* 240 */ - .long sys_io_setup - .long sys_io_destroy - .long sys_io_getevents - .long sys_io_submit - .long sys_io_cancel /* 245 */ - .long sys_fadvise64 - .long sys_exit_group - .long sys_lookup_dcookie - .long sys_epoll_create - .long sys_epoll_ctl /* 250 */ - .long sys_epoll_wait - .long sys_remap_file_pages - .long sys_set_tid_address - .long sys_timer_create - .long sys_timer_settime /* 255 */ - .long sys_timer_gettime - .long sys_timer_getoverrun - .long sys_timer_delete - .long sys_clock_settime - .long sys_clock_gettime /* 260 */ - .long sys_clock_getres - .long sys_clock_nanosleep - .long sys_statfs64 - .long sys_fstatfs64 - .long sys_tgkill /* 265 */ - .long sys_utimes - .long sys_fadvise64_64 - .long sys_mbind - .long sys_get_mempolicy - .long sys_set_mempolicy /* 270 */ - .long sys_mq_open - .long sys_mq_unlink - .long sys_mq_timedsend - .long sys_mq_timedreceive - .long sys_mq_notify /* 275 */ - .long sys_mq_getsetattr - .long sys_waitid - .long sys_ni_syscall /* for sys_vserver */ - .long sys_add_key - .long sys_request_key /* 280 */ - .long sys_keyctl - .long sys_ioprio_set - .long sys_ioprio_get - .long sys_inotify_init - .long sys_inotify_add_watch /* 285 */ - .long sys_inotify_rm_watch - .long sys_migrate_pages - .long sys_openat - .long sys_mkdirat - .long sys_mknodat /* 290 */ - .long sys_fchownat - .long sys_futimesat - .long sys_fstatat64 - .long sys_unlinkat - .long sys_renameat /* 295 */ - .long sys_linkat - .long sys_symlinkat - .long sys_readlinkat - .long sys_fchmodat - .long sys_faccessat /* 300 */ - .long sys_ni_syscall /* Reserved for pselect6 */ - .long sys_ni_syscall /* Reserved for ppoll */ - .long sys_unshare - .long sys_set_robust_list - .long sys_get_robust_list /* 305 */ - .long sys_splice - .long sys_sync_file_range - .long sys_tee - .long sys_vmsplice - .long sys_move_pages /* 310 */ - .long sys_sched_setaffinity - .long sys_sched_getaffinity - .long sys_kexec_load - .long sys_getcpu - .long sys_epoll_pwait /* 315 */ - .long sys_utimensat - .long sys_signalfd - .long sys_timerfd_create - .long sys_eventfd - .long sys_fallocate /* 320 */ - .long sys_timerfd_settime - .long sys_timerfd_gettime - .long sys_signalfd4 - .long sys_eventfd2 - .long sys_epoll_create1 /* 325 */ - .long sys_dup3 - .long sys_pipe2 - .long sys_inotify_init1 - .long sys_preadv - .long sys_pwritev /* 330 */ - .long sys_rt_tgsigqueueinfo - .long sys_perf_event_open - .long sys_get_thread_area - .long sys_set_thread_area - .long sys_atomic_cmpxchg_32 /* 335 */ - .long sys_atomic_barrier - .long sys_fanotify_init - .long sys_fanotify_mark - .long sys_prlimit64 - diff --git a/trunk/arch/m68k/kernel/syscalltable.S b/trunk/arch/m68k/kernel/syscalltable.S index 79b1ed198c07..5909e392cb1e 100644 --- a/trunk/arch/m68k/kernel/syscalltable.S +++ b/trunk/arch/m68k/kernel/syscalltable.S @@ -1,6 +1,4 @@ /* - * linux/arch/m68knommu/kernel/syscalltable.S - * * Copyright (C) 2002, Greg Ungerer (gerg@snapgear.com) * * Based on older entry.S files, the following copyrights apply: @@ -9,171 +7,176 @@ * Kenneth Albanowski , * Copyright (C) 2000 Lineo Inc. (www.lineo.com) * Copyright (C) 1991, 1992 Linus Torvalds + * + * Linux/m68k support by Hamish Macdonald */ #include #include -#include -.text +#ifndef CONFIG_MMU +#define sys_mmap2 sys_mmap_pgoff +#endif + +.section .rodata ALIGN ENTRY(sys_call_table) - .long sys_restart_syscall /* 0 - old "setup()" system call */ + .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */ .long sys_exit .long sys_fork .long sys_read .long sys_write - .long sys_open /* 5 */ + .long sys_open /* 5 */ .long sys_close .long sys_waitpid .long sys_creat .long sys_link - .long sys_unlink /* 10 */ + .long sys_unlink /* 10 */ .long sys_execve .long sys_chdir .long sys_time .long sys_mknod - .long sys_chmod /* 15 */ + .long sys_chmod /* 15 */ .long sys_chown16 - .long sys_ni_syscall /* old break syscall holder */ + .long sys_ni_syscall /* old break syscall holder */ .long sys_stat .long sys_lseek - .long sys_getpid /* 20 */ + .long sys_getpid /* 20 */ .long sys_mount .long sys_oldumount .long sys_setuid16 .long sys_getuid16 - .long sys_stime /* 25 */ + .long sys_stime /* 25 */ .long sys_ptrace .long sys_alarm .long sys_fstat .long sys_pause - .long sys_utime /* 30 */ - .long sys_ni_syscall /* old stty syscall holder */ - .long sys_ni_syscall /* old gtty syscall holder */ + .long sys_utime /* 30 */ + .long sys_ni_syscall /* old stty syscall holder */ + .long sys_ni_syscall /* old gtty syscall holder */ .long sys_access .long sys_nice - .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */ + .long sys_ni_syscall /* 35 - old ftime syscall holder */ .long sys_sync .long sys_kill .long sys_rename .long sys_mkdir - .long sys_rmdir /* 40 */ + .long sys_rmdir /* 40 */ .long sys_dup .long sys_pipe .long sys_times - .long sys_ni_syscall /* old prof syscall holder */ - .long sys_brk /* 45 */ + .long sys_ni_syscall /* old prof syscall holder */ + .long sys_brk /* 45 */ .long sys_setgid16 .long sys_getgid16 .long sys_signal .long sys_geteuid16 - .long sys_getegid16 /* 50 */ + .long sys_getegid16 /* 50 */ .long sys_acct - .long sys_umount /* recycled never used phys() */ - .long sys_ni_syscall /* old lock syscall holder */ + .long sys_umount /* recycled never used phys() */ + .long sys_ni_syscall /* old lock syscall holder */ .long sys_ioctl - .long sys_fcntl /* 55 */ - .long sys_ni_syscall /* old mpx syscall holder */ + .long sys_fcntl /* 55 */ + .long sys_ni_syscall /* old mpx syscall holder */ .long sys_setpgid - .long sys_ni_syscall /* old ulimit syscall holder */ + .long sys_ni_syscall /* old ulimit syscall holder */ .long sys_ni_syscall - .long sys_umask /* 60 */ + .long sys_umask /* 60 */ .long sys_chroot .long sys_ustat .long sys_dup2 .long sys_getppid - .long sys_getpgrp /* 65 */ + .long sys_getpgrp /* 65 */ .long sys_setsid .long sys_sigaction .long sys_sgetmask .long sys_ssetmask - .long sys_setreuid16 /* 70 */ + .long sys_setreuid16 /* 70 */ .long sys_setregid16 .long sys_sigsuspend .long sys_sigpending .long sys_sethostname - .long sys_setrlimit /* 75 */ + .long sys_setrlimit /* 75 */ .long sys_old_getrlimit .long sys_getrusage .long sys_gettimeofday .long sys_settimeofday - .long sys_getgroups16 /* 80 */ + .long sys_getgroups16 /* 80 */ .long sys_setgroups16 .long sys_old_select .long sys_symlink .long sys_lstat - .long sys_readlink /* 85 */ + .long sys_readlink /* 85 */ .long sys_uselib - .long sys_ni_syscall /* sys_swapon */ + .long sys_swapon .long sys_reboot .long sys_old_readdir - .long sys_old_mmap /* 90 */ + .long sys_old_mmap /* 90 */ .long sys_munmap .long sys_truncate .long sys_ftruncate .long sys_fchmod - .long sys_fchown16 /* 95 */ + .long sys_fchown16 /* 95 */ .long sys_getpriority .long sys_setpriority - .long sys_ni_syscall /* old profil syscall holder */ + .long sys_ni_syscall /* old profil syscall holder */ .long sys_statfs - .long sys_fstatfs /* 100 */ - .long sys_ni_syscall /* ioperm for i386 */ + .long sys_fstatfs /* 100 */ + .long sys_ni_syscall /* ioperm for i386 */ .long sys_socketcall .long sys_syslog .long sys_setitimer - .long sys_getitimer /* 105 */ + .long sys_getitimer /* 105 */ .long sys_newstat .long sys_newlstat .long sys_newfstat .long sys_ni_syscall - .long sys_ni_syscall /* iopl for i386 */ /* 110 */ + .long sys_ni_syscall /* 110 - iopl for i386 */ .long sys_vhangup - .long sys_ni_syscall /* obsolete idle() syscall */ - .long sys_ni_syscall /* vm86old for i386 */ + .long sys_ni_syscall /* obsolete idle() syscall */ + .long sys_ni_syscall /* vm86old for i386 */ .long sys_wait4 - .long sys_ni_syscall /* 115 */ /* sys_swapoff */ + .long sys_swapoff /* 115 */ .long sys_sysinfo .long sys_ipc .long sys_fsync .long sys_sigreturn - .long sys_clone /* 120 */ + .long sys_clone /* 120 */ .long sys_setdomainname .long sys_newuname - .long sys_cacheflush /* modify_ldt for i386 */ + .long sys_cacheflush /* modify_ldt for i386 */ .long sys_adjtimex - .long sys_ni_syscall /* 125 */ /* sys_mprotect */ + .long sys_mprotect /* 125 */ .long sys_sigprocmask - .long sys_ni_syscall /* old "creat_module" */ + .long sys_ni_syscall /* old "create_module" */ .long sys_init_module .long sys_delete_module - .long sys_ni_syscall /* 130: old "get_kernel_syms" */ + .long sys_ni_syscall /* 130 - old "get_kernel_syms" */ .long sys_quotactl .long sys_getpgid .long sys_fchdir .long sys_bdflush - .long sys_sysfs /* 135 */ + .long sys_sysfs /* 135 */ .long sys_personality - .long sys_ni_syscall /* for afs_syscall */ + .long sys_ni_syscall /* for afs_syscall */ .long sys_setfsuid16 .long sys_setfsgid16 - .long sys_llseek /* 140 */ + .long sys_llseek /* 140 */ .long sys_getdents .long sys_select .long sys_flock - .long sys_ni_syscall /* sys_msync */ - .long sys_readv /* 145 */ + .long sys_msync + .long sys_readv /* 145 */ .long sys_writev .long sys_getsid .long sys_fdatasync .long sys_sysctl - .long sys_ni_syscall /* 150 */ /* sys_mlock */ - .long sys_ni_syscall /* sys_munlock */ - .long sys_ni_syscall /* sys_mlockall */ - .long sys_ni_syscall /* sys_munlockall */ + .long sys_mlock /* 150 */ + .long sys_munlock + .long sys_mlockall + .long sys_munlockall .long sys_sched_setparam - .long sys_sched_getparam /* 155 */ + .long sys_sched_getparam /* 155 */ .long sys_sched_setscheduler .long sys_sched_getscheduler .long sys_sched_yield @@ -181,124 +184,124 @@ ENTRY(sys_call_table) .long sys_sched_get_priority_min /* 160 */ .long sys_sched_rr_get_interval .long sys_nanosleep - .long sys_ni_syscall /* sys_mremap */ + .long sys_mremap .long sys_setresuid16 - .long sys_getresuid16 /* 165 */ - .long sys_getpagesize /* sys_getpagesize */ - .long sys_ni_syscall /* old "query_module" */ + .long sys_getresuid16 /* 165 */ + .long sys_getpagesize + .long sys_ni_syscall /* old "query_module" */ .long sys_poll - .long sys_ni_syscall /* sys_nfsservctl */ - .long sys_setresgid16 /* 170 */ + .long sys_nfsservctl + .long sys_setresgid16 /* 170 */ .long sys_getresgid16 .long sys_prctl .long sys_rt_sigreturn .long sys_rt_sigaction - .long sys_rt_sigprocmask /* 175 */ + .long sys_rt_sigprocmask /* 175 */ .long sys_rt_sigpending .long sys_rt_sigtimedwait .long sys_rt_sigqueueinfo .long sys_rt_sigsuspend - .long sys_pread64 /* 180 */ + .long sys_pread64 /* 180 */ .long sys_pwrite64 .long sys_lchown16 .long sys_getcwd .long sys_capget - .long sys_capset /* 185 */ + .long sys_capset /* 185 */ .long sys_sigaltstack .long sys_sendfile - .long sys_ni_syscall /* streams1 */ - .long sys_ni_syscall /* streams2 */ - .long sys_vfork /* 190 */ + .long sys_ni_syscall /* streams1 */ + .long sys_ni_syscall /* streams2 */ + .long sys_vfork /* 190 */ .long sys_getrlimit - .long sys_mmap_pgoff + .long sys_mmap2 .long sys_truncate64 .long sys_ftruncate64 - .long sys_stat64 /* 195 */ + .long sys_stat64 /* 195 */ .long sys_lstat64 .long sys_fstat64 .long sys_chown .long sys_getuid - .long sys_getgid /* 200 */ + .long sys_getgid /* 200 */ .long sys_geteuid .long sys_getegid .long sys_setreuid .long sys_setregid - .long sys_getgroups /* 205 */ + .long sys_getgroups /* 205 */ .long sys_setgroups .long sys_fchown .long sys_setresuid .long sys_getresuid - .long sys_setresgid /* 210 */ + .long sys_setresgid /* 210 */ .long sys_getresgid .long sys_lchown .long sys_setuid .long sys_setgid - .long sys_setfsuid /* 215 */ + .long sys_setfsuid /* 215 */ .long sys_setfsgid .long sys_pivot_root .long sys_ni_syscall .long sys_ni_syscall - .long sys_getdents64 /* 220 */ + .long sys_getdents64 /* 220 */ .long sys_gettid .long sys_tkill .long sys_setxattr .long sys_lsetxattr - .long sys_fsetxattr /* 225 */ + .long sys_fsetxattr /* 225 */ .long sys_getxattr .long sys_lgetxattr .long sys_fgetxattr .long sys_listxattr - .long sys_llistxattr /* 230 */ + .long sys_llistxattr /* 230 */ .long sys_flistxattr .long sys_removexattr .long sys_lremovexattr .long sys_fremovexattr - .long sys_futex /* 235 */ + .long sys_futex /* 235 */ .long sys_sendfile64 - .long sys_ni_syscall /* sys_mincore */ - .long sys_ni_syscall /* sys_madvise */ + .long sys_mincore + .long sys_madvise .long sys_fcntl64 - .long sys_readahead /* 240 */ + .long sys_readahead /* 240 */ .long sys_io_setup .long sys_io_destroy .long sys_io_getevents .long sys_io_submit - .long sys_io_cancel /* 245 */ + .long sys_io_cancel /* 245 */ .long sys_fadvise64 .long sys_exit_group .long sys_lookup_dcookie .long sys_epoll_create - .long sys_epoll_ctl /* 250 */ + .long sys_epoll_ctl /* 250 */ .long sys_epoll_wait - .long sys_ni_syscall /* sys_remap_file_pages */ + .long sys_remap_file_pages .long sys_set_tid_address .long sys_timer_create - .long sys_timer_settime /* 255 */ + .long sys_timer_settime /* 255 */ .long sys_timer_gettime .long sys_timer_getoverrun .long sys_timer_delete .long sys_clock_settime - .long sys_clock_gettime /* 260 */ + .long sys_clock_gettime /* 260 */ .long sys_clock_getres .long sys_clock_nanosleep .long sys_statfs64 .long sys_fstatfs64 - .long sys_tgkill /* 265 */ + .long sys_tgkill /* 265 */ .long sys_utimes .long sys_fadvise64_64 - .long sys_mbind + .long sys_mbind .long sys_get_mempolicy - .long sys_set_mempolicy /* 270 */ + .long sys_set_mempolicy /* 270 */ .long sys_mq_open .long sys_mq_unlink .long sys_mq_timedsend .long sys_mq_timedreceive - .long sys_mq_notify /* 275 */ + .long sys_mq_notify /* 275 */ .long sys_mq_getsetattr .long sys_waitid - .long sys_ni_syscall /* for sys_vserver */ + .long sys_ni_syscall /* for sys_vserver */ .long sys_add_key - .long sys_request_key /* 280 */ + .long sys_request_key /* 280 */ .long sys_keyctl .long sys_ioprio_set .long sys_ioprio_get @@ -319,8 +322,8 @@ ENTRY(sys_call_table) .long sys_readlinkat .long sys_fchmodat .long sys_faccessat /* 300 */ - .long sys_ni_syscall /* Reserved for pselect6 */ - .long sys_ni_syscall /* Reserved for ppoll */ + .long sys_pselect6 + .long sys_ppoll .long sys_unshare .long sys_set_robust_list .long sys_get_robust_list /* 305 */ @@ -358,8 +361,8 @@ ENTRY(sys_call_table) .long sys_fanotify_init .long sys_fanotify_mark .long sys_prlimit64 - - .rept NR_syscalls-(.-sys_call_table)/4 - .long sys_ni_syscall - .endr + .long sys_name_to_handle_at /* 340 */ + .long sys_open_by_handle_at + .long sys_clock_adjtime + .long sys_syncfs diff --git a/trunk/arch/m68k/kernel/vmlinux-std.lds b/trunk/arch/m68k/kernel/vmlinux-std.lds index 878be5f38cad..d0993594f558 100644 --- a/trunk/arch/m68k/kernel/vmlinux-std.lds +++ b/trunk/arch/m68k/kernel/vmlinux-std.lds @@ -25,6 +25,8 @@ SECTIONS EXCEPTION_TABLE(16) + _sdata = .; /* Start of data section */ + RODATA RW_DATA_SECTION(16, PAGE_SIZE, THREAD_SIZE) diff --git a/trunk/arch/m68k/kernel/vmlinux-sun3.lds b/trunk/arch/m68k/kernel/vmlinux-sun3.lds index 1ad6b7ad2c17..8080469ee6c1 100644 --- a/trunk/arch/m68k/kernel/vmlinux-sun3.lds +++ b/trunk/arch/m68k/kernel/vmlinux-sun3.lds @@ -25,6 +25,7 @@ SECTIONS _etext = .; /* End of text section */ EXCEPTION_TABLE(16) :data + _sdata = .; /* Start of rw data section */ RW_DATA_SECTION(16, PAGE_SIZE, THREAD_SIZE) :data /* End of data goes *here* so that freeing init code works properly. */ _edata = .; diff --git a/trunk/arch/m68k/mm/motorola.c b/trunk/arch/m68k/mm/motorola.c index 02b7a03e4226..8b3db1c587fc 100644 --- a/trunk/arch/m68k/mm/motorola.c +++ b/trunk/arch/m68k/mm/motorola.c @@ -300,6 +300,8 @@ void __init paging_init(void) zones_size[ZONE_DMA] = m68k_memory[i].size >> PAGE_SHIFT; free_area_init_node(i, zones_size, m68k_memory[i].addr >> PAGE_SHIFT, NULL); + if (node_present_pages(i)) + node_set_state(i, N_NORMAL_MEMORY); } } diff --git a/trunk/arch/microblaze/Kconfig b/trunk/arch/microblaze/Kconfig index 851b3bf6e962..eccdefe70d4e 100644 --- a/trunk/arch/microblaze/Kconfig +++ b/trunk/arch/microblaze/Kconfig @@ -6,7 +6,6 @@ config MICROBLAZE select HAVE_FUNCTION_GRAPH_TRACER select HAVE_DYNAMIC_FTRACE select HAVE_FTRACE_MCOUNT_RECORD - select USB_ARCH_HAS_EHCI select ARCH_WANT_OPTIONAL_GPIOLIB select HAVE_OPROFILE select HAVE_ARCH_KGDB diff --git a/trunk/arch/microblaze/kernel/timer.c b/trunk/arch/microblaze/kernel/timer.c index d8a214f11ac2..e5550ce4e0eb 100644 --- a/trunk/arch/microblaze/kernel/timer.c +++ b/trunk/arch/microblaze/kernel/timer.c @@ -217,16 +217,12 @@ static struct clocksource clocksource_microblaze = { .rating = 300, .read = microblaze_read, .mask = CLOCKSOURCE_MASK(32), - .shift = 8, /* I can shift it */ .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; static int __init microblaze_clocksource_init(void) { - clocksource_microblaze.mult = - clocksource_hz2mult(timer_clock_freq, - clocksource_microblaze.shift); - if (clocksource_register(&clocksource_microblaze)) + if (clocksource_register_hz(&clocksource_microblaze, timer_clock_freq)) panic("failed to register clocksource"); /* stop timer1 */ diff --git a/trunk/arch/mips/Kbuild.platforms b/trunk/arch/mips/Kbuild.platforms index 7ff9b5492041..aef6c917b45a 100644 --- a/trunk/arch/mips/Kbuild.platforms +++ b/trunk/arch/mips/Kbuild.platforms @@ -11,6 +11,7 @@ platforms += dec platforms += emma platforms += jazz platforms += jz4740 +platforms += lantiq platforms += lasat platforms += loongson platforms += mipssim diff --git a/trunk/arch/mips/Kconfig b/trunk/arch/mips/Kconfig index 8e256cc5dcd9..cef1a854487d 100644 --- a/trunk/arch/mips/Kconfig +++ b/trunk/arch/mips/Kconfig @@ -212,6 +212,24 @@ config MACH_JZ4740 select HAVE_PWM select HAVE_CLK +config LANTIQ + bool "Lantiq based platforms" + select DMA_NONCOHERENT + select IRQ_CPU + select CEVT_R4K + select CSRC_R4K + select SYS_HAS_CPU_MIPS32_R1 + select SYS_HAS_CPU_MIPS32_R2 + select SYS_SUPPORTS_BIG_ENDIAN + select SYS_SUPPORTS_32BIT_KERNEL + select SYS_SUPPORTS_MULTITHREADING + select SYS_HAS_EARLY_PRINTK + select ARCH_REQUIRE_GPIOLIB + select SWAP_IO_SPACE + select BOOT_RAW + select HAVE_CLK + select MIPS_MACHINE + config LASAT bool "LASAT Networks platforms" select CEVT_R4K @@ -736,6 +754,33 @@ config CAVIUM_OCTEON_REFERENCE_BOARD Hikari Say Y here for most Octeon reference boards. +config NLM_XLR_BOARD + bool "Netlogic XLR/XLS based systems" + depends on EXPERIMENTAL + select BOOT_ELF32 + select NLM_COMMON + select NLM_XLR + select SYS_HAS_CPU_XLR + select SYS_SUPPORTS_SMP + select HW_HAS_PCI + select SWAP_IO_SPACE + select SYS_SUPPORTS_32BIT_KERNEL + select SYS_SUPPORTS_64BIT_KERNEL + select 64BIT_PHYS_ADDR + select SYS_SUPPORTS_BIG_ENDIAN + select SYS_SUPPORTS_HIGHMEM + select DMA_COHERENT + select NR_CPUS_DEFAULT_32 + select CEVT_R4K + select CSRC_R4K + select IRQ_CPU + select ZONE_DMA if 64BIT + select SYNC_R4K + select SYS_HAS_EARLY_PRINTK + help + Support for systems based on Netlogic XLR and XLS processors. + Say Y here if you have a XLR or XLS based board. + endchoice source "arch/mips/alchemy/Kconfig" @@ -743,6 +788,7 @@ source "arch/mips/ath79/Kconfig" source "arch/mips/bcm63xx/Kconfig" source "arch/mips/jazz/Kconfig" source "arch/mips/jz4740/Kconfig" +source "arch/mips/lantiq/Kconfig" source "arch/mips/lasat/Kconfig" source "arch/mips/pmc-sierra/Kconfig" source "arch/mips/powertv/Kconfig" @@ -752,6 +798,7 @@ source "arch/mips/txx9/Kconfig" source "arch/mips/vr41xx/Kconfig" source "arch/mips/cavium-octeon/Kconfig" source "arch/mips/loongson/Kconfig" +source "arch/mips/netlogic/Kconfig" endmenu @@ -997,9 +1044,6 @@ config IRQ_GT641XX config IRQ_GIC bool -config IRQ_CPU_OCTEON - bool - config MIPS_BOARDS_GEN bool @@ -1359,8 +1403,6 @@ config CPU_SB1 config CPU_CAVIUM_OCTEON bool "Cavium Octeon processor" depends on SYS_HAS_CPU_CAVIUM_OCTEON - select IRQ_CPU - select IRQ_CPU_OCTEON select CPU_HAS_PREFETCH select CPU_SUPPORTS_64BIT_KERNEL select SYS_SUPPORTS_SMP @@ -1425,6 +1467,17 @@ config CPU_BMIPS5000 help Broadcom BMIPS5000 processors. +config CPU_XLR + bool "Netlogic XLR SoC" + depends on SYS_HAS_CPU_XLR + select CPU_SUPPORTS_32BIT_KERNEL + select CPU_SUPPORTS_64BIT_KERNEL + select CPU_SUPPORTS_HIGHMEM + select WEAK_ORDERING + select WEAK_REORDERING_BEYOND_LLSC + select CPU_SUPPORTS_HUGEPAGES + help + Netlogic Microsystems XLR/XLS processors. endchoice if CPU_LOONGSON2F @@ -1555,6 +1608,9 @@ config SYS_HAS_CPU_BMIPS4380 config SYS_HAS_CPU_BMIPS5000 bool +config SYS_HAS_CPU_XLR + bool + # # CPU may reorder R->R, R->W, W->R, W->W # Reordering beyond LL and SC is handled in WEAK_REORDERING_BEYOND_LLSC @@ -2339,6 +2395,7 @@ config MMU config I8253 bool + select CLKSRC_I8253 select MIPS_EXTERNAL_TIMER config ZONE_DMA32 diff --git a/trunk/arch/mips/Makefile b/trunk/arch/mips/Makefile index 53e3514ba10e..884819cd0607 100644 --- a/trunk/arch/mips/Makefile +++ b/trunk/arch/mips/Makefile @@ -191,6 +191,18 @@ endif # include $(srctree)/arch/mips/Kbuild.platforms +# +# NETLOGIC SOC Common (common) +# +cflags-$(CONFIG_NLM_COMMON) += -I$(srctree)/arch/mips/include/asm/mach-netlogic +cflags-$(CONFIG_NLM_COMMON) += -I$(srctree)/arch/mips/include/asm/netlogic + +# +# NETLOGIC XLR/XLS SoC, Simulator and boards +# +core-$(CONFIG_NLM_XLR) += arch/mips/netlogic/xlr/ +load-$(CONFIG_NLM_XLR_BOARD) += 0xffffffff84000000 + cflags-y += -I$(srctree)/arch/mips/include/asm/mach-generic drivers-$(CONFIG_PCI) += arch/mips/pci/ diff --git a/trunk/arch/mips/alchemy/common/dbdma.c b/trunk/arch/mips/alchemy/common/dbdma.c index ca0506a8585a..3a5abb54d505 100644 --- a/trunk/arch/mips/alchemy/common/dbdma.c +++ b/trunk/arch/mips/alchemy/common/dbdma.c @@ -36,7 +36,7 @@ #include #include #include -#include +#include #include #include @@ -58,7 +58,8 @@ static DEFINE_SPINLOCK(au1xxx_dbdma_spin_lock); /* I couldn't find a macro that did this... */ #define ALIGN_ADDR(x, a) ((((u32)(x)) + (a-1)) & ~(a-1)) -static dbdma_global_t *dbdma_gptr = (dbdma_global_t *)DDMA_GLOBAL_BASE; +static dbdma_global_t *dbdma_gptr = + (dbdma_global_t *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR); static int dbdma_initialized; static dbdev_tab_t dbdev_tab[] = { @@ -299,7 +300,7 @@ u32 au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid, if (ctp != NULL) { memset(ctp, 0, sizeof(chan_tab_t)); ctp->chan_index = chan = i; - dcp = DDMA_CHANNEL_BASE; + dcp = KSEG1ADDR(AU1550_DBDMA_PHYS_ADDR); dcp += (0x0100 * chan); ctp->chan_ptr = (au1x_dma_chan_t *)dcp; cp = (au1x_dma_chan_t *)dcp; @@ -958,105 +959,75 @@ u32 au1xxx_dbdma_put_dscr(u32 chanid, au1x_ddma_desc_t *dscr) } -struct alchemy_dbdma_sysdev { - struct sys_device sysdev; - u32 pm_regs[NUM_DBDMA_CHANS + 1][6]; -}; +static unsigned long alchemy_dbdma_pm_data[NUM_DBDMA_CHANS + 1][6]; -static int alchemy_dbdma_suspend(struct sys_device *dev, - pm_message_t state) +static int alchemy_dbdma_suspend(void) { - struct alchemy_dbdma_sysdev *sdev = - container_of(dev, struct alchemy_dbdma_sysdev, sysdev); int i; - u32 addr; + void __iomem *addr; - addr = DDMA_GLOBAL_BASE; - sdev->pm_regs[0][0] = au_readl(addr + 0x00); - sdev->pm_regs[0][1] = au_readl(addr + 0x04); - sdev->pm_regs[0][2] = au_readl(addr + 0x08); - sdev->pm_regs[0][3] = au_readl(addr + 0x0c); + addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR); + alchemy_dbdma_pm_data[0][0] = __raw_readl(addr + 0x00); + alchemy_dbdma_pm_data[0][1] = __raw_readl(addr + 0x04); + alchemy_dbdma_pm_data[0][2] = __raw_readl(addr + 0x08); + alchemy_dbdma_pm_data[0][3] = __raw_readl(addr + 0x0c); /* save channel configurations */ - for (i = 1, addr = DDMA_CHANNEL_BASE; i <= NUM_DBDMA_CHANS; i++) { - sdev->pm_regs[i][0] = au_readl(addr + 0x00); - sdev->pm_regs[i][1] = au_readl(addr + 0x04); - sdev->pm_regs[i][2] = au_readl(addr + 0x08); - sdev->pm_regs[i][3] = au_readl(addr + 0x0c); - sdev->pm_regs[i][4] = au_readl(addr + 0x10); - sdev->pm_regs[i][5] = au_readl(addr + 0x14); + addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_PHYS_ADDR); + for (i = 1; i <= NUM_DBDMA_CHANS; i++) { + alchemy_dbdma_pm_data[i][0] = __raw_readl(addr + 0x00); + alchemy_dbdma_pm_data[i][1] = __raw_readl(addr + 0x04); + alchemy_dbdma_pm_data[i][2] = __raw_readl(addr + 0x08); + alchemy_dbdma_pm_data[i][3] = __raw_readl(addr + 0x0c); + alchemy_dbdma_pm_data[i][4] = __raw_readl(addr + 0x10); + alchemy_dbdma_pm_data[i][5] = __raw_readl(addr + 0x14); /* halt channel */ - au_writel(sdev->pm_regs[i][0] & ~1, addr + 0x00); - au_sync(); - while (!(au_readl(addr + 0x14) & 1)) - au_sync(); + __raw_writel(alchemy_dbdma_pm_data[i][0] & ~1, addr + 0x00); + wmb(); + while (!(__raw_readl(addr + 0x14) & 1)) + wmb(); addr += 0x100; /* next channel base */ } /* disable channel interrupts */ - au_writel(0, DDMA_GLOBAL_BASE + 0x0c); - au_sync(); + addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR); + __raw_writel(0, addr + 0x0c); + wmb(); return 0; } -static int alchemy_dbdma_resume(struct sys_device *dev) +static void alchemy_dbdma_resume(void) { - struct alchemy_dbdma_sysdev *sdev = - container_of(dev, struct alchemy_dbdma_sysdev, sysdev); int i; - u32 addr; + void __iomem *addr; - addr = DDMA_GLOBAL_BASE; - au_writel(sdev->pm_regs[0][0], addr + 0x00); - au_writel(sdev->pm_regs[0][1], addr + 0x04); - au_writel(sdev->pm_regs[0][2], addr + 0x08); - au_writel(sdev->pm_regs[0][3], addr + 0x0c); + addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR); + __raw_writel(alchemy_dbdma_pm_data[0][0], addr + 0x00); + __raw_writel(alchemy_dbdma_pm_data[0][1], addr + 0x04); + __raw_writel(alchemy_dbdma_pm_data[0][2], addr + 0x08); + __raw_writel(alchemy_dbdma_pm_data[0][3], addr + 0x0c); /* restore channel configurations */ - for (i = 1, addr = DDMA_CHANNEL_BASE; i <= NUM_DBDMA_CHANS; i++) { - au_writel(sdev->pm_regs[i][0], addr + 0x00); - au_writel(sdev->pm_regs[i][1], addr + 0x04); - au_writel(sdev->pm_regs[i][2], addr + 0x08); - au_writel(sdev->pm_regs[i][3], addr + 0x0c); - au_writel(sdev->pm_regs[i][4], addr + 0x10); - au_writel(sdev->pm_regs[i][5], addr + 0x14); - au_sync(); + addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_PHYS_ADDR); + for (i = 1; i <= NUM_DBDMA_CHANS; i++) { + __raw_writel(alchemy_dbdma_pm_data[i][0], addr + 0x00); + __raw_writel(alchemy_dbdma_pm_data[i][1], addr + 0x04); + __raw_writel(alchemy_dbdma_pm_data[i][2], addr + 0x08); + __raw_writel(alchemy_dbdma_pm_data[i][3], addr + 0x0c); + __raw_writel(alchemy_dbdma_pm_data[i][4], addr + 0x10); + __raw_writel(alchemy_dbdma_pm_data[i][5], addr + 0x14); + wmb(); addr += 0x100; /* next channel base */ } - - return 0; } -static struct sysdev_class alchemy_dbdma_sysdev_class = { - .name = "dbdma", +static struct syscore_ops alchemy_dbdma_syscore_ops = { .suspend = alchemy_dbdma_suspend, .resume = alchemy_dbdma_resume, }; -static int __init alchemy_dbdma_sysdev_init(void) -{ - struct alchemy_dbdma_sysdev *sdev; - int ret; - - ret = sysdev_class_register(&alchemy_dbdma_sysdev_class); - if (ret) - return ret; - - sdev = kzalloc(sizeof(struct alchemy_dbdma_sysdev), GFP_KERNEL); - if (!sdev) - return -ENOMEM; - - sdev->sysdev.id = -1; - sdev->sysdev.cls = &alchemy_dbdma_sysdev_class; - ret = sysdev_register(&sdev->sysdev); - if (ret) - kfree(sdev); - - return ret; -} - static int __init au1xxx_dbdma_init(void) { int irq_nr, ret; @@ -1084,11 +1055,7 @@ static int __init au1xxx_dbdma_init(void) else { dbdma_initialized = 1; printk(KERN_INFO "Alchemy DBDMA initialized\n"); - ret = alchemy_dbdma_sysdev_init(); - if (ret) { - printk(KERN_ERR "DBDMA PM init failed\n"); - ret = 0; - } + register_syscore_ops(&alchemy_dbdma_syscore_ops); } return ret; diff --git a/trunk/arch/mips/alchemy/common/dma.c b/trunk/arch/mips/alchemy/common/dma.c index d5278877891d..347980e79a89 100644 --- a/trunk/arch/mips/alchemy/common/dma.c +++ b/trunk/arch/mips/alchemy/common/dma.c @@ -58,6 +58,9 @@ * returned from request_dma. */ +/* DMA Channel register block spacing */ +#define DMA_CHANNEL_LEN 0x00000100 + DEFINE_SPINLOCK(au1000_dma_spin_lock); struct dma_chan au1000_dma_table[NUM_AU1000_DMA_CHANNELS] = { @@ -77,22 +80,23 @@ static const struct dma_dev { unsigned int fifo_addr; unsigned int dma_mode; } dma_dev_table[DMA_NUM_DEV] = { - {UART0_ADDR + UART_TX, 0}, - {UART0_ADDR + UART_RX, 0}, - {0, 0}, - {0, 0}, - {AC97C_DATA, DMA_DW16 }, /* coherent */ - {AC97C_DATA, DMA_DR | DMA_DW16 }, /* coherent */ - {UART3_ADDR + UART_TX, DMA_DW8 | DMA_NC}, - {UART3_ADDR + UART_RX, DMA_DR | DMA_DW8 | DMA_NC}, - {USBD_EP0RD, DMA_DR | DMA_DW8 | DMA_NC}, - {USBD_EP0WR, DMA_DW8 | DMA_NC}, - {USBD_EP2WR, DMA_DW8 | DMA_NC}, - {USBD_EP3WR, DMA_DW8 | DMA_NC}, - {USBD_EP4RD, DMA_DR | DMA_DW8 | DMA_NC}, - {USBD_EP5RD, DMA_DR | DMA_DW8 | DMA_NC}, - {I2S_DATA, DMA_DW32 | DMA_NC}, - {I2S_DATA, DMA_DR | DMA_DW32 | DMA_NC} + { AU1000_UART0_PHYS_ADDR + 0x04, DMA_DW8 }, /* UART0_TX */ + { AU1000_UART0_PHYS_ADDR + 0x00, DMA_DW8 | DMA_DR }, /* UART0_RX */ + { 0, 0 }, /* DMA_REQ0 */ + { 0, 0 }, /* DMA_REQ1 */ + { AU1000_AC97_PHYS_ADDR + 0x08, DMA_DW16 }, /* AC97 TX c */ + { AU1000_AC97_PHYS_ADDR + 0x08, DMA_DW16 | DMA_DR }, /* AC97 RX c */ + { AU1000_UART3_PHYS_ADDR + 0x04, DMA_DW8 | DMA_NC }, /* UART3_TX */ + { AU1000_UART3_PHYS_ADDR + 0x00, DMA_DW8 | DMA_NC | DMA_DR }, /* UART3_RX */ + { AU1000_USBD_PHYS_ADDR + 0x00, DMA_DW8 | DMA_NC | DMA_DR }, /* EP0RD */ + { AU1000_USBD_PHYS_ADDR + 0x04, DMA_DW8 | DMA_NC }, /* EP0WR */ + { AU1000_USBD_PHYS_ADDR + 0x08, DMA_DW8 | DMA_NC }, /* EP2WR */ + { AU1000_USBD_PHYS_ADDR + 0x0c, DMA_DW8 | DMA_NC }, /* EP3WR */ + { AU1000_USBD_PHYS_ADDR + 0x10, DMA_DW8 | DMA_NC | DMA_DR }, /* EP4RD */ + { AU1000_USBD_PHYS_ADDR + 0x14, DMA_DW8 | DMA_NC | DMA_DR }, /* EP5RD */ + /* on Au1500, these 2 are DMA_REQ2/3 (GPIO208/209) instead! */ + { AU1000_I2S_PHYS_ADDR + 0x00, DMA_DW32 | DMA_NC}, /* I2S TX */ + { AU1000_I2S_PHYS_ADDR + 0x00, DMA_DW32 | DMA_NC | DMA_DR}, /* I2S RX */ }; int au1000_dma_read_proc(char *buf, char **start, off_t fpos, @@ -123,10 +127,10 @@ int au1000_dma_read_proc(char *buf, char **start, off_t fpos, /* Device FIFO addresses and default DMA modes - 2nd bank */ static const struct dma_dev dma_dev_table_bank2[DMA_NUM_DEV_BANK2] = { - { SD0_XMIT_FIFO, DMA_DS | DMA_DW8 }, /* coherent */ - { SD0_RECV_FIFO, DMA_DS | DMA_DR | DMA_DW8 }, /* coherent */ - { SD1_XMIT_FIFO, DMA_DS | DMA_DW8 }, /* coherent */ - { SD1_RECV_FIFO, DMA_DS | DMA_DR | DMA_DW8 } /* coherent */ + { AU1100_SD0_PHYS_ADDR + 0x00, DMA_DS | DMA_DW8 }, /* coherent */ + { AU1100_SD0_PHYS_ADDR + 0x04, DMA_DS | DMA_DW8 | DMA_DR }, /* coherent */ + { AU1100_SD1_PHYS_ADDR + 0x00, DMA_DS | DMA_DW8 }, /* coherent */ + { AU1100_SD1_PHYS_ADDR + 0x04, DMA_DS | DMA_DW8 | DMA_DR } /* coherent */ }; void dump_au1000_dma_channel(unsigned int dmanr) @@ -202,7 +206,7 @@ int request_au1000_dma(int dev_id, const char *dev_str, } /* fill it in */ - chan->io = DMA_CHANNEL_BASE + i * DMA_CHANNEL_LEN; + chan->io = KSEG1ADDR(AU1000_DMA_PHYS_ADDR) + i * DMA_CHANNEL_LEN; chan->dev_id = dev_id; chan->dev_str = dev_str; chan->fifo_addr = dev->fifo_addr; diff --git a/trunk/arch/mips/alchemy/common/irq.c b/trunk/arch/mips/alchemy/common/irq.c index 55dd7c888517..8b60ba0675e2 100644 --- a/trunk/arch/mips/alchemy/common/irq.c +++ b/trunk/arch/mips/alchemy/common/irq.c @@ -30,7 +30,7 @@ #include #include #include -#include +#include #include #include @@ -39,6 +39,36 @@ #include #endif +/* Interrupt Controller register offsets */ +#define IC_CFG0RD 0x40 +#define IC_CFG0SET 0x40 +#define IC_CFG0CLR 0x44 +#define IC_CFG1RD 0x48 +#define IC_CFG1SET 0x48 +#define IC_CFG1CLR 0x4C +#define IC_CFG2RD 0x50 +#define IC_CFG2SET 0x50 +#define IC_CFG2CLR 0x54 +#define IC_REQ0INT 0x54 +#define IC_SRCRD 0x58 +#define IC_SRCSET 0x58 +#define IC_SRCCLR 0x5C +#define IC_REQ1INT 0x5C +#define IC_ASSIGNRD 0x60 +#define IC_ASSIGNSET 0x60 +#define IC_ASSIGNCLR 0x64 +#define IC_WAKERD 0x68 +#define IC_WAKESET 0x68 +#define IC_WAKECLR 0x6C +#define IC_MASKRD 0x70 +#define IC_MASKSET 0x70 +#define IC_MASKCLR 0x74 +#define IC_RISINGRD 0x78 +#define IC_RISINGCLR 0x78 +#define IC_FALLINGRD 0x7C +#define IC_FALLINGCLR 0x7C +#define IC_TESTBIT 0x80 + static int au1x_ic_settype(struct irq_data *d, unsigned int flow_type); /* NOTE on interrupt priorities: The original writers of this code said: @@ -221,89 +251,101 @@ struct au1xxx_irqmap au1200_irqmap[] __initdata = { static void au1x_ic0_unmask(struct irq_data *d) { unsigned int bit = d->irq - AU1000_INTC0_INT_BASE; - au_writel(1 << bit, IC0_MASKSET); - au_writel(1 << bit, IC0_WAKESET); - au_sync(); + void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR); + + __raw_writel(1 << bit, base + IC_MASKSET); + __raw_writel(1 << bit, base + IC_WAKESET); + wmb(); } static void au1x_ic1_unmask(struct irq_data *d) { unsigned int bit = d->irq - AU1000_INTC1_INT_BASE; - au_writel(1 << bit, IC1_MASKSET); - au_writel(1 << bit, IC1_WAKESET); + void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR); + + __raw_writel(1 << bit, base + IC_MASKSET); + __raw_writel(1 << bit, base + IC_WAKESET); /* very hacky. does the pb1000 cpld auto-disable this int? * nowhere in the current kernel sources is it disabled. --mlau */ #if defined(CONFIG_MIPS_PB1000) if (d->irq == AU1000_GPIO15_INT) - au_writel(0x4000, PB1000_MDR); /* enable int */ + __raw_writel(0x4000, (void __iomem *)PB1000_MDR); /* enable int */ #endif - au_sync(); + wmb(); } static void au1x_ic0_mask(struct irq_data *d) { unsigned int bit = d->irq - AU1000_INTC0_INT_BASE; - au_writel(1 << bit, IC0_MASKCLR); - au_writel(1 << bit, IC0_WAKECLR); - au_sync(); + void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR); + + __raw_writel(1 << bit, base + IC_MASKCLR); + __raw_writel(1 << bit, base + IC_WAKECLR); + wmb(); } static void au1x_ic1_mask(struct irq_data *d) { unsigned int bit = d->irq - AU1000_INTC1_INT_BASE; - au_writel(1 << bit, IC1_MASKCLR); - au_writel(1 << bit, IC1_WAKECLR); - au_sync(); + void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR); + + __raw_writel(1 << bit, base + IC_MASKCLR); + __raw_writel(1 << bit, base + IC_WAKECLR); + wmb(); } static void au1x_ic0_ack(struct irq_data *d) { unsigned int bit = d->irq - AU1000_INTC0_INT_BASE; + void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR); /* * This may assume that we don't get interrupts from * both edges at once, or if we do, that we don't care. */ - au_writel(1 << bit, IC0_FALLINGCLR); - au_writel(1 << bit, IC0_RISINGCLR); - au_sync(); + __raw_writel(1 << bit, base + IC_FALLINGCLR); + __raw_writel(1 << bit, base + IC_RISINGCLR); + wmb(); } static void au1x_ic1_ack(struct irq_data *d) { unsigned int bit = d->irq - AU1000_INTC1_INT_BASE; + void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR); /* * This may assume that we don't get interrupts from * both edges at once, or if we do, that we don't care. */ - au_writel(1 << bit, IC1_FALLINGCLR); - au_writel(1 << bit, IC1_RISINGCLR); - au_sync(); + __raw_writel(1 << bit, base + IC_FALLINGCLR); + __raw_writel(1 << bit, base + IC_RISINGCLR); + wmb(); } static void au1x_ic0_maskack(struct irq_data *d) { unsigned int bit = d->irq - AU1000_INTC0_INT_BASE; + void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR); - au_writel(1 << bit, IC0_WAKECLR); - au_writel(1 << bit, IC0_MASKCLR); - au_writel(1 << bit, IC0_RISINGCLR); - au_writel(1 << bit, IC0_FALLINGCLR); - au_sync(); + __raw_writel(1 << bit, base + IC_WAKECLR); + __raw_writel(1 << bit, base + IC_MASKCLR); + __raw_writel(1 << bit, base + IC_RISINGCLR); + __raw_writel(1 << bit, base + IC_FALLINGCLR); + wmb(); } static void au1x_ic1_maskack(struct irq_data *d) { unsigned int bit = d->irq - AU1000_INTC1_INT_BASE; + void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR); - au_writel(1 << bit, IC1_WAKECLR); - au_writel(1 << bit, IC1_MASKCLR); - au_writel(1 << bit, IC1_RISINGCLR); - au_writel(1 << bit, IC1_FALLINGCLR); - au_sync(); + __raw_writel(1 << bit, base + IC_WAKECLR); + __raw_writel(1 << bit, base + IC_MASKCLR); + __raw_writel(1 << bit, base + IC_RISINGCLR); + __raw_writel(1 << bit, base + IC_FALLINGCLR); + wmb(); } static int au1x_ic1_setwake(struct irq_data *d, unsigned int on) @@ -318,13 +360,13 @@ static int au1x_ic1_setwake(struct irq_data *d, unsigned int on) return -EINVAL; local_irq_save(flags); - wakemsk = au_readl(SYS_WAKEMSK); + wakemsk = __raw_readl((void __iomem *)SYS_WAKEMSK); if (on) wakemsk |= 1 << bit; else wakemsk &= ~(1 << bit); - au_writel(wakemsk, SYS_WAKEMSK); - au_sync(); + __raw_writel(wakemsk, (void __iomem *)SYS_WAKEMSK); + wmb(); local_irq_restore(flags); return 0; @@ -356,81 +398,74 @@ static struct irq_chip au1x_ic1_chip = { static int au1x_ic_settype(struct irq_data *d, unsigned int flow_type) { struct irq_chip *chip; - unsigned long icr[6]; - unsigned int bit, ic, irq = d->irq; + unsigned int bit, irq = d->irq; irq_flow_handler_t handler = NULL; unsigned char *name = NULL; + void __iomem *base; int ret; if (irq >= AU1000_INTC1_INT_BASE) { bit = irq - AU1000_INTC1_INT_BASE; chip = &au1x_ic1_chip; - ic = 1; + base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR); } else { bit = irq - AU1000_INTC0_INT_BASE; chip = &au1x_ic0_chip; - ic = 0; + base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR); } if (bit > 31) return -EINVAL; - icr[0] = ic ? IC1_CFG0SET : IC0_CFG0SET; - icr[1] = ic ? IC1_CFG1SET : IC0_CFG1SET; - icr[2] = ic ? IC1_CFG2SET : IC0_CFG2SET; - icr[3] = ic ? IC1_CFG0CLR : IC0_CFG0CLR; - icr[4] = ic ? IC1_CFG1CLR : IC0_CFG1CLR; - icr[5] = ic ? IC1_CFG2CLR : IC0_CFG2CLR; - ret = 0; switch (flow_type) { /* cfgregs 2:1:0 */ case IRQ_TYPE_EDGE_RISING: /* 0:0:1 */ - au_writel(1 << bit, icr[5]); - au_writel(1 << bit, icr[4]); - au_writel(1 << bit, icr[0]); + __raw_writel(1 << bit, base + IC_CFG2CLR); + __raw_writel(1 << bit, base + IC_CFG1CLR); + __raw_writel(1 << bit, base + IC_CFG0SET); handler = handle_edge_irq; name = "riseedge"; break; case IRQ_TYPE_EDGE_FALLING: /* 0:1:0 */ - au_writel(1 << bit, icr[5]); - au_writel(1 << bit, icr[1]); - au_writel(1 << bit, icr[3]); + __raw_writel(1 << bit, base + IC_CFG2CLR); + __raw_writel(1 << bit, base + IC_CFG1SET); + __raw_writel(1 << bit, base + IC_CFG0CLR); handler = handle_edge_irq; name = "falledge"; break; case IRQ_TYPE_EDGE_BOTH: /* 0:1:1 */ - au_writel(1 << bit, icr[5]); - au_writel(1 << bit, icr[1]); - au_writel(1 << bit, icr[0]); + __raw_writel(1 << bit, base + IC_CFG2CLR); + __raw_writel(1 << bit, base + IC_CFG1SET); + __raw_writel(1 << bit, base + IC_CFG0SET); handler = handle_edge_irq; name = "bothedge"; break; case IRQ_TYPE_LEVEL_HIGH: /* 1:0:1 */ - au_writel(1 << bit, icr[2]); - au_writel(1 << bit, icr[4]); - au_writel(1 << bit, icr[0]); + __raw_writel(1 << bit, base + IC_CFG2SET); + __raw_writel(1 << bit, base + IC_CFG1CLR); + __raw_writel(1 << bit, base + IC_CFG0SET); handler = handle_level_irq; name = "hilevel"; break; case IRQ_TYPE_LEVEL_LOW: /* 1:1:0 */ - au_writel(1 << bit, icr[2]); - au_writel(1 << bit, icr[1]); - au_writel(1 << bit, icr[3]); + __raw_writel(1 << bit, base + IC_CFG2SET); + __raw_writel(1 << bit, base + IC_CFG1SET); + __raw_writel(1 << bit, base + IC_CFG0CLR); handler = handle_level_irq; name = "lowlevel"; break; case IRQ_TYPE_NONE: /* 0:0:0 */ - au_writel(1 << bit, icr[5]); - au_writel(1 << bit, icr[4]); - au_writel(1 << bit, icr[3]); + __raw_writel(1 << bit, base + IC_CFG2CLR); + __raw_writel(1 << bit, base + IC_CFG1CLR); + __raw_writel(1 << bit, base + IC_CFG0CLR); break; default: ret = -EINVAL; } __irq_set_chip_handler_name_locked(d->irq, chip, handler, name); - au_sync(); + wmb(); return ret; } @@ -444,21 +479,21 @@ asmlinkage void plat_irq_dispatch(void) off = MIPS_CPU_IRQ_BASE + 7; goto handle; } else if (pending & CAUSEF_IP2) { - s = IC0_REQ0INT; + s = KSEG1ADDR(AU1000_IC0_PHYS_ADDR) + IC_REQ0INT; off = AU1000_INTC0_INT_BASE; } else if (pending & CAUSEF_IP3) { - s = IC0_REQ1INT; + s = KSEG1ADDR(AU1000_IC0_PHYS_ADDR) + IC_REQ1INT; off = AU1000_INTC0_INT_BASE; } else if (pending & CAUSEF_IP4) { - s = IC1_REQ0INT; + s = KSEG1ADDR(AU1000_IC1_PHYS_ADDR) + IC_REQ0INT; off = AU1000_INTC1_INT_BASE; } else if (pending & CAUSEF_IP5) { - s = IC1_REQ1INT; + s = KSEG1ADDR(AU1000_IC1_PHYS_ADDR) + IC_REQ1INT; off = AU1000_INTC1_INT_BASE; } else goto spurious; - s = au_readl(s); + s = __raw_readl((void __iomem *)s); if (unlikely(!s)) { spurious: spurious_interrupt(); @@ -469,48 +504,42 @@ asmlinkage void plat_irq_dispatch(void) do_IRQ(off); } + +static inline void ic_init(void __iomem *base) +{ + /* initialize interrupt controller to a safe state */ + __raw_writel(0xffffffff, base + IC_CFG0CLR); + __raw_writel(0xffffffff, base + IC_CFG1CLR); + __raw_writel(0xffffffff, base + IC_CFG2CLR); + __raw_writel(0xffffffff, base + IC_MASKCLR); + __raw_writel(0xffffffff, base + IC_ASSIGNCLR); + __raw_writel(0xffffffff, base + IC_WAKECLR); + __raw_writel(0xffffffff, base + IC_SRCSET); + __raw_writel(0xffffffff, base + IC_FALLINGCLR); + __raw_writel(0xffffffff, base + IC_RISINGCLR); + __raw_writel(0x00000000, base + IC_TESTBIT); + wmb(); +} + static void __init au1000_init_irq(struct au1xxx_irqmap *map) { unsigned int bit, irq_nr; - int i; - - /* - * Initialize interrupt controllers to a safe state. - */ - au_writel(0xffffffff, IC0_CFG0CLR); - au_writel(0xffffffff, IC0_CFG1CLR); - au_writel(0xffffffff, IC0_CFG2CLR); - au_writel(0xffffffff, IC0_MASKCLR); - au_writel(0xffffffff, IC0_ASSIGNCLR); - au_writel(0xffffffff, IC0_WAKECLR); - au_writel(0xffffffff, IC0_SRCSET); - au_writel(0xffffffff, IC0_FALLINGCLR); - au_writel(0xffffffff, IC0_RISINGCLR); - au_writel(0x00000000, IC0_TESTBIT); - - au_writel(0xffffffff, IC1_CFG0CLR); - au_writel(0xffffffff, IC1_CFG1CLR); - au_writel(0xffffffff, IC1_CFG2CLR); - au_writel(0xffffffff, IC1_MASKCLR); - au_writel(0xffffffff, IC1_ASSIGNCLR); - au_writel(0xffffffff, IC1_WAKECLR); - au_writel(0xffffffff, IC1_SRCSET); - au_writel(0xffffffff, IC1_FALLINGCLR); - au_writel(0xffffffff, IC1_RISINGCLR); - au_writel(0x00000000, IC1_TESTBIT); + void __iomem *base; + ic_init((void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR)); + ic_init((void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR)); mips_cpu_irq_init(); /* register all 64 possible IC0+IC1 irq sources as type "none". * Use set_irq_type() to set edge/level behaviour at runtime. */ - for (i = AU1000_INTC0_INT_BASE; - (i < AU1000_INTC0_INT_BASE + 32); i++) - au1x_ic_settype(irq_get_irq_data(i), IRQ_TYPE_NONE); + for (irq_nr = AU1000_INTC0_INT_BASE; + (irq_nr < AU1000_INTC0_INT_BASE + 32); irq_nr++) + au1x_ic_settype(irq_get_irq_data(irq_nr), IRQ_TYPE_NONE); - for (i = AU1000_INTC1_INT_BASE; - (i < AU1000_INTC1_INT_BASE + 32); i++) - au1x_ic_settype(irq_get_irq_data(i), IRQ_TYPE_NONE); + for (irq_nr = AU1000_INTC1_INT_BASE; + (irq_nr < AU1000_INTC1_INT_BASE + 32); irq_nr++) + au1x_ic_settype(irq_get_irq_data(irq_nr), IRQ_TYPE_NONE); /* * Initialize IC0, which is fixed per processor. @@ -520,13 +549,13 @@ static void __init au1000_init_irq(struct au1xxx_irqmap *map) if (irq_nr >= AU1000_INTC1_INT_BASE) { bit = irq_nr - AU1000_INTC1_INT_BASE; - if (map->im_request) - au_writel(1 << bit, IC1_ASSIGNSET); + base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR); } else { bit = irq_nr - AU1000_INTC0_INT_BASE; - if (map->im_request) - au_writel(1 << bit, IC0_ASSIGNSET); + base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR); } + if (map->im_request) + __raw_writel(1 << bit, base + IC_ASSIGNSET); au1x_ic_settype(irq_get_irq_data(irq_nr), map->im_type); ++map; @@ -556,90 +585,62 @@ void __init arch_init_irq(void) } } -struct alchemy_ic_sysdev { - struct sys_device sysdev; - void __iomem *base; - unsigned long pmdata[7]; -}; -static int alchemy_ic_suspend(struct sys_device *dev, pm_message_t state) -{ - struct alchemy_ic_sysdev *icdev = - container_of(dev, struct alchemy_ic_sysdev, sysdev); +static unsigned long alchemy_ic_pmdata[7 * 2]; - icdev->pmdata[0] = __raw_readl(icdev->base + IC_CFG0RD); - icdev->pmdata[1] = __raw_readl(icdev->base + IC_CFG1RD); - icdev->pmdata[2] = __raw_readl(icdev->base + IC_CFG2RD); - icdev->pmdata[3] = __raw_readl(icdev->base + IC_SRCRD); - icdev->pmdata[4] = __raw_readl(icdev->base + IC_ASSIGNRD); - icdev->pmdata[5] = __raw_readl(icdev->base + IC_WAKERD); - icdev->pmdata[6] = __raw_readl(icdev->base + IC_MASKRD); - - return 0; +static inline void alchemy_ic_suspend_one(void __iomem *base, unsigned long *d) +{ + d[0] = __raw_readl(base + IC_CFG0RD); + d[1] = __raw_readl(base + IC_CFG1RD); + d[2] = __raw_readl(base + IC_CFG2RD); + d[3] = __raw_readl(base + IC_SRCRD); + d[4] = __raw_readl(base + IC_ASSIGNRD); + d[5] = __raw_readl(base + IC_WAKERD); + d[6] = __raw_readl(base + IC_MASKRD); + ic_init(base); /* shut it up too while at it */ } -static int alchemy_ic_resume(struct sys_device *dev) +static inline void alchemy_ic_resume_one(void __iomem *base, unsigned long *d) { - struct alchemy_ic_sysdev *icdev = - container_of(dev, struct alchemy_ic_sysdev, sysdev); - - __raw_writel(0xffffffff, icdev->base + IC_MASKCLR); - __raw_writel(0xffffffff, icdev->base + IC_CFG0CLR); - __raw_writel(0xffffffff, icdev->base + IC_CFG1CLR); - __raw_writel(0xffffffff, icdev->base + IC_CFG2CLR); - __raw_writel(0xffffffff, icdev->base + IC_SRCCLR); - __raw_writel(0xffffffff, icdev->base + IC_ASSIGNCLR); - __raw_writel(0xffffffff, icdev->base + IC_WAKECLR); - __raw_writel(0xffffffff, icdev->base + IC_RISINGCLR); - __raw_writel(0xffffffff, icdev->base + IC_FALLINGCLR); - __raw_writel(0x00000000, icdev->base + IC_TESTBIT); - wmb(); - __raw_writel(icdev->pmdata[0], icdev->base + IC_CFG0SET); - __raw_writel(icdev->pmdata[1], icdev->base + IC_CFG1SET); - __raw_writel(icdev->pmdata[2], icdev->base + IC_CFG2SET); - __raw_writel(icdev->pmdata[3], icdev->base + IC_SRCSET); - __raw_writel(icdev->pmdata[4], icdev->base + IC_ASSIGNSET); - __raw_writel(icdev->pmdata[5], icdev->base + IC_WAKESET); + ic_init(base); + + __raw_writel(d[0], base + IC_CFG0SET); + __raw_writel(d[1], base + IC_CFG1SET); + __raw_writel(d[2], base + IC_CFG2SET); + __raw_writel(d[3], base + IC_SRCSET); + __raw_writel(d[4], base + IC_ASSIGNSET); + __raw_writel(d[5], base + IC_WAKESET); wmb(); - __raw_writel(icdev->pmdata[6], icdev->base + IC_MASKSET); + __raw_writel(d[6], base + IC_MASKSET); wmb(); +} +static int alchemy_ic_suspend(void) +{ + alchemy_ic_suspend_one((void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR), + alchemy_ic_pmdata); + alchemy_ic_suspend_one((void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR), + &alchemy_ic_pmdata[7]); return 0; } -static struct sysdev_class alchemy_ic_sysdev_class = { - .name = "ic", +static void alchemy_ic_resume(void) +{ + alchemy_ic_resume_one((void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR), + &alchemy_ic_pmdata[7]); + alchemy_ic_resume_one((void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR), + alchemy_ic_pmdata); +} + +static struct syscore_ops alchemy_ic_syscore_ops = { .suspend = alchemy_ic_suspend, .resume = alchemy_ic_resume, }; -static int __init alchemy_ic_sysdev_init(void) +static int __init alchemy_ic_pm_init(void) { - struct alchemy_ic_sysdev *icdev; - unsigned long icbase[2] = { IC0_PHYS_ADDR, IC1_PHYS_ADDR }; - int err, i; - - err = sysdev_class_register(&alchemy_ic_sysdev_class); - if (err) - return err; - - for (i = 0; i < 2; i++) { - icdev = kzalloc(sizeof(struct alchemy_ic_sysdev), GFP_KERNEL); - if (!icdev) - return -ENOMEM; - - icdev->base = ioremap(icbase[i], 0x1000); - - icdev->sysdev.id = i; - icdev->sysdev.cls = &alchemy_ic_sysdev_class; - err = sysdev_register(&icdev->sysdev); - if (err) { - kfree(icdev); - return err; - } - } - + register_syscore_ops(&alchemy_ic_syscore_ops); return 0; } -device_initcall(alchemy_ic_sysdev_init); +device_initcall(alchemy_ic_pm_init); diff --git a/trunk/arch/mips/alchemy/common/platform.c b/trunk/arch/mips/alchemy/common/platform.c index 9e7814db3d03..3b2c18b14341 100644 --- a/trunk/arch/mips/alchemy/common/platform.c +++ b/trunk/arch/mips/alchemy/common/platform.c @@ -13,9 +13,10 @@ #include #include +#include #include #include -#include +#include #include #include @@ -30,21 +31,12 @@ static void alchemy_8250_pm(struct uart_port *port, unsigned int state, #ifdef CONFIG_SERIAL_8250 switch (state) { case 0: - if ((__raw_readl(port->membase + UART_MOD_CNTRL) & 3) != 3) { - /* power-on sequence as suggested in the databooks */ - __raw_writel(0, port->membase + UART_MOD_CNTRL); - wmb(); - __raw_writel(1, port->membase + UART_MOD_CNTRL); - wmb(); - } - __raw_writel(3, port->membase + UART_MOD_CNTRL); /* full on */ - wmb(); + alchemy_uart_enable(CPHYSADDR(port->membase)); serial8250_do_pm(port, state, old_state); break; case 3: /* power off */ serial8250_do_pm(port, state, old_state); - __raw_writel(0, port->membase + UART_MOD_CNTRL); - wmb(); + alchemy_uart_disable(CPHYSADDR(port->membase)); break; default: serial8250_do_pm(port, state, old_state); @@ -65,38 +57,60 @@ static void alchemy_8250_pm(struct uart_port *port, unsigned int state, .pm = alchemy_8250_pm, \ } -static struct plat_serial8250_port au1x00_uart_data[] = { -#if defined(CONFIG_SOC_AU1000) - PORT(UART0_PHYS_ADDR, AU1000_UART0_INT), - PORT(UART1_PHYS_ADDR, AU1000_UART1_INT), - PORT(UART2_PHYS_ADDR, AU1000_UART2_INT), - PORT(UART3_PHYS_ADDR, AU1000_UART3_INT), -#elif defined(CONFIG_SOC_AU1500) - PORT(UART0_PHYS_ADDR, AU1500_UART0_INT), - PORT(UART3_PHYS_ADDR, AU1500_UART3_INT), -#elif defined(CONFIG_SOC_AU1100) - PORT(UART0_PHYS_ADDR, AU1100_UART0_INT), - PORT(UART1_PHYS_ADDR, AU1100_UART1_INT), - PORT(UART3_PHYS_ADDR, AU1100_UART3_INT), -#elif defined(CONFIG_SOC_AU1550) - PORT(UART0_PHYS_ADDR, AU1550_UART0_INT), - PORT(UART1_PHYS_ADDR, AU1550_UART1_INT), - PORT(UART3_PHYS_ADDR, AU1550_UART3_INT), -#elif defined(CONFIG_SOC_AU1200) - PORT(UART0_PHYS_ADDR, AU1200_UART0_INT), - PORT(UART1_PHYS_ADDR, AU1200_UART1_INT), -#endif - { }, +static struct plat_serial8250_port au1x00_uart_data[][4] __initdata = { + [ALCHEMY_CPU_AU1000] = { + PORT(AU1000_UART0_PHYS_ADDR, AU1000_UART0_INT), + PORT(AU1000_UART1_PHYS_ADDR, AU1000_UART1_INT), + PORT(AU1000_UART2_PHYS_ADDR, AU1000_UART2_INT), + PORT(AU1000_UART3_PHYS_ADDR, AU1000_UART3_INT), + }, + [ALCHEMY_CPU_AU1500] = { + PORT(AU1000_UART0_PHYS_ADDR, AU1500_UART0_INT), + PORT(AU1000_UART3_PHYS_ADDR, AU1500_UART3_INT), + }, + [ALCHEMY_CPU_AU1100] = { + PORT(AU1000_UART0_PHYS_ADDR, AU1100_UART0_INT), + PORT(AU1000_UART1_PHYS_ADDR, AU1100_UART1_INT), + PORT(AU1000_UART3_PHYS_ADDR, AU1100_UART3_INT), + }, + [ALCHEMY_CPU_AU1550] = { + PORT(AU1000_UART0_PHYS_ADDR, AU1550_UART0_INT), + PORT(AU1000_UART1_PHYS_ADDR, AU1550_UART1_INT), + PORT(AU1000_UART3_PHYS_ADDR, AU1550_UART3_INT), + }, + [ALCHEMY_CPU_AU1200] = { + PORT(AU1000_UART0_PHYS_ADDR, AU1200_UART0_INT), + PORT(AU1000_UART1_PHYS_ADDR, AU1200_UART1_INT), + }, }; static struct platform_device au1xx0_uart_device = { .name = "serial8250", .id = PLAT8250_DEV_AU1X00, - .dev = { - .platform_data = au1x00_uart_data, - }, }; +static void __init alchemy_setup_uarts(int ctype) +{ + unsigned int uartclk = get_au1x00_uart_baud_base() * 16; + int s = sizeof(struct plat_serial8250_port); + int c = alchemy_get_uarts(ctype); + struct plat_serial8250_port *ports; + + ports = kzalloc(s * (c + 1), GFP_KERNEL); + if (!ports) { + printk(KERN_INFO "Alchemy: no memory for UART data\n"); + return; + } + memcpy(ports, au1x00_uart_data[ctype], s * c); + au1xx0_uart_device.dev.platform_data = ports; + + /* Fill up uartclk. */ + for (s = 0; s < c; s++) + ports[s].uartclk = uartclk; + if (platform_device_register(&au1xx0_uart_device)) + printk(KERN_INFO "Alchemy: failed to register UARTs\n"); +} + /* OHCI (USB full speed host controller) */ static struct resource au1xxx_usb_ohci_resources[] = { [0] = { @@ -269,8 +283,8 @@ extern struct au1xmmc_platform_data au1xmmc_platdata[2]; static struct resource au1200_mmc0_resources[] = { [0] = { - .start = SD0_PHYS_ADDR, - .end = SD0_PHYS_ADDR + 0x7ffff, + .start = AU1100_SD0_PHYS_ADDR, + .end = AU1100_SD0_PHYS_ADDR + 0xfff, .flags = IORESOURCE_MEM, }, [1] = { @@ -305,8 +319,8 @@ static struct platform_device au1200_mmc0_device = { #ifndef CONFIG_MIPS_DB1200 static struct resource au1200_mmc1_resources[] = { [0] = { - .start = SD1_PHYS_ADDR, - .end = SD1_PHYS_ADDR + 0x7ffff, + .start = AU1100_SD1_PHYS_ADDR, + .end = AU1100_SD1_PHYS_ADDR + 0xfff, .flags = IORESOURCE_MEM, }, [1] = { @@ -359,15 +373,16 @@ static struct platform_device pbdb_smbus_device = { #endif /* Macro to help defining the Ethernet MAC resources */ +#define MAC_RES_COUNT 3 /* MAC regs base, MAC enable reg, MAC INT */ #define MAC_RES(_base, _enable, _irq) \ { \ - .start = CPHYSADDR(_base), \ - .end = CPHYSADDR(_base + 0xffff), \ + .start = _base, \ + .end = _base + 0xffff, \ .flags = IORESOURCE_MEM, \ }, \ { \ - .start = CPHYSADDR(_enable), \ - .end = CPHYSADDR(_enable + 0x3), \ + .start = _enable, \ + .end = _enable + 0x3, \ .flags = IORESOURCE_MEM, \ }, \ { \ @@ -376,19 +391,29 @@ static struct platform_device pbdb_smbus_device = { .flags = IORESOURCE_IRQ \ } -static struct resource au1xxx_eth0_resources[] = { -#if defined(CONFIG_SOC_AU1000) - MAC_RES(AU1000_ETH0_BASE, AU1000_MAC0_ENABLE, AU1000_MAC0_DMA_INT), -#elif defined(CONFIG_SOC_AU1100) - MAC_RES(AU1100_ETH0_BASE, AU1100_MAC0_ENABLE, AU1100_MAC0_DMA_INT), -#elif defined(CONFIG_SOC_AU1550) - MAC_RES(AU1550_ETH0_BASE, AU1550_MAC0_ENABLE, AU1550_MAC0_DMA_INT), -#elif defined(CONFIG_SOC_AU1500) - MAC_RES(AU1500_ETH0_BASE, AU1500_MAC0_ENABLE, AU1500_MAC0_DMA_INT), -#endif +static struct resource au1xxx_eth0_resources[][MAC_RES_COUNT] __initdata = { + [ALCHEMY_CPU_AU1000] = { + MAC_RES(AU1000_MAC0_PHYS_ADDR, + AU1000_MACEN_PHYS_ADDR, + AU1000_MAC0_DMA_INT) + }, + [ALCHEMY_CPU_AU1500] = { + MAC_RES(AU1500_MAC0_PHYS_ADDR, + AU1500_MACEN_PHYS_ADDR, + AU1500_MAC0_DMA_INT) + }, + [ALCHEMY_CPU_AU1100] = { + MAC_RES(AU1000_MAC0_PHYS_ADDR, + AU1000_MACEN_PHYS_ADDR, + AU1100_MAC0_DMA_INT) + }, + [ALCHEMY_CPU_AU1550] = { + MAC_RES(AU1000_MAC0_PHYS_ADDR, + AU1000_MACEN_PHYS_ADDR, + AU1550_MAC0_DMA_INT) + }, }; - static struct au1000_eth_platform_data au1xxx_eth0_platform_data = { .phy1_search_mac0 = 1, }; @@ -396,20 +421,26 @@ static struct au1000_eth_platform_data au1xxx_eth0_platform_data = { static struct platform_device au1xxx_eth0_device = { .name = "au1000-eth", .id = 0, - .num_resources = ARRAY_SIZE(au1xxx_eth0_resources), - .resource = au1xxx_eth0_resources, + .num_resources = MAC_RES_COUNT, .dev.platform_data = &au1xxx_eth0_platform_data, }; -#ifndef CONFIG_SOC_AU1100 -static struct resource au1xxx_eth1_resources[] = { -#if defined(CONFIG_SOC_AU1000) - MAC_RES(AU1000_ETH1_BASE, AU1000_MAC1_ENABLE, AU1000_MAC1_DMA_INT), -#elif defined(CONFIG_SOC_AU1550) - MAC_RES(AU1550_ETH1_BASE, AU1550_MAC1_ENABLE, AU1550_MAC1_DMA_INT), -#elif defined(CONFIG_SOC_AU1500) - MAC_RES(AU1500_ETH1_BASE, AU1500_MAC1_ENABLE, AU1500_MAC1_DMA_INT), -#endif +static struct resource au1xxx_eth1_resources[][MAC_RES_COUNT] __initdata = { + [ALCHEMY_CPU_AU1000] = { + MAC_RES(AU1000_MAC1_PHYS_ADDR, + AU1000_MACEN_PHYS_ADDR + 4, + AU1000_MAC1_DMA_INT) + }, + [ALCHEMY_CPU_AU1500] = { + MAC_RES(AU1500_MAC1_PHYS_ADDR, + AU1500_MACEN_PHYS_ADDR + 4, + AU1500_MAC1_DMA_INT) + }, + [ALCHEMY_CPU_AU1550] = { + MAC_RES(AU1000_MAC1_PHYS_ADDR, + AU1000_MACEN_PHYS_ADDR + 4, + AU1550_MAC1_DMA_INT) + }, }; static struct au1000_eth_platform_data au1xxx_eth1_platform_data = { @@ -419,11 +450,9 @@ static struct au1000_eth_platform_data au1xxx_eth1_platform_data = { static struct platform_device au1xxx_eth1_device = { .name = "au1000-eth", .id = 1, - .num_resources = ARRAY_SIZE(au1xxx_eth1_resources), - .resource = au1xxx_eth1_resources, + .num_resources = MAC_RES_COUNT, .dev.platform_data = &au1xxx_eth1_platform_data, }; -#endif void __init au1xxx_override_eth_cfg(unsigned int port, struct au1000_eth_platform_data *eth_data) @@ -434,15 +463,65 @@ void __init au1xxx_override_eth_cfg(unsigned int port, if (port == 0) memcpy(&au1xxx_eth0_platform_data, eth_data, sizeof(struct au1000_eth_platform_data)); -#ifndef CONFIG_SOC_AU1100 else memcpy(&au1xxx_eth1_platform_data, eth_data, sizeof(struct au1000_eth_platform_data)); -#endif +} + +static void __init alchemy_setup_macs(int ctype) +{ + int ret, i; + unsigned char ethaddr[6]; + struct resource *macres; + + /* Handle 1st MAC */ + if (alchemy_get_macs(ctype) < 1) + return; + + macres = kmalloc(sizeof(struct resource) * MAC_RES_COUNT, GFP_KERNEL); + if (!macres) { + printk(KERN_INFO "Alchemy: no memory for MAC0 resources\n"); + return; + } + memcpy(macres, au1xxx_eth0_resources[ctype], + sizeof(struct resource) * MAC_RES_COUNT); + au1xxx_eth0_device.resource = macres; + + i = prom_get_ethernet_addr(ethaddr); + if (!i && !is_valid_ether_addr(au1xxx_eth0_platform_data.mac)) + memcpy(au1xxx_eth0_platform_data.mac, ethaddr, 6); + + ret = platform_device_register(&au1xxx_eth0_device); + if (!ret) + printk(KERN_INFO "Alchemy: failed to register MAC0\n"); + + + /* Handle 2nd MAC */ + if (alchemy_get_macs(ctype) < 2) + return; + + macres = kmalloc(sizeof(struct resource) * MAC_RES_COUNT, GFP_KERNEL); + if (!macres) { + printk(KERN_INFO "Alchemy: no memory for MAC1 resources\n"); + return; + } + memcpy(macres, au1xxx_eth1_resources[ctype], + sizeof(struct resource) * MAC_RES_COUNT); + au1xxx_eth1_device.resource = macres; + + ethaddr[5] += 1; /* next addr for 2nd MAC */ + if (!i && !is_valid_ether_addr(au1xxx_eth1_platform_data.mac)) + memcpy(au1xxx_eth1_platform_data.mac, ethaddr, 6); + + /* Register second MAC if enabled in pinfunc */ + if (!(au_readl(SYS_PINFUNC) & (u32)SYS_PF_NI2)) { + ret = platform_device_register(&au1xxx_eth1_device); + if (ret) + printk(KERN_INFO "Alchemy: failed to register MAC1\n"); + } } static struct platform_device *au1xxx_platform_devices[] __initdata = { - &au1xx0_uart_device, &au1xxx_usb_ohci_device, #ifdef CONFIG_FB_AU1100 &au1100_lcd_device, @@ -460,36 +539,17 @@ static struct platform_device *au1xxx_platform_devices[] __initdata = { #ifdef SMBUS_PSC_BASE &pbdb_smbus_device, #endif - &au1xxx_eth0_device, }; static int __init au1xxx_platform_init(void) { - unsigned int uartclk = get_au1x00_uart_baud_base() * 16; - int err, i; - unsigned char ethaddr[6]; + int err, ctype = alchemy_get_cputype(); - /* Fill up uartclk. */ - for (i = 0; au1x00_uart_data[i].flags; i++) - au1x00_uart_data[i].uartclk = uartclk; - - /* use firmware-provided mac addr if available and necessary */ - i = prom_get_ethernet_addr(ethaddr); - if (!i && !is_valid_ether_addr(au1xxx_eth0_platform_data.mac)) - memcpy(au1xxx_eth0_platform_data.mac, ethaddr, 6); + alchemy_setup_uarts(ctype); + alchemy_setup_macs(ctype); err = platform_add_devices(au1xxx_platform_devices, ARRAY_SIZE(au1xxx_platform_devices)); -#ifndef CONFIG_SOC_AU1100 - ethaddr[5] += 1; /* next addr for 2nd MAC */ - if (!i && !is_valid_ether_addr(au1xxx_eth1_platform_data.mac)) - memcpy(au1xxx_eth1_platform_data.mac, ethaddr, 6); - - /* Register second MAC if enabled in pinfunc */ - if (!err && !(au_readl(SYS_PINFUNC) & (u32)SYS_PF_NI2)) - err = platform_device_register(&au1xxx_eth1_device); -#endif - return err; } diff --git a/trunk/arch/mips/alchemy/common/setup.c b/trunk/arch/mips/alchemy/common/setup.c index 561e5da2658b..1b887c868417 100644 --- a/trunk/arch/mips/alchemy/common/setup.c +++ b/trunk/arch/mips/alchemy/common/setup.c @@ -52,8 +52,6 @@ void __init plat_mem_setup(void) /* this is faster than wasting cycles trying to approximate it */ preset_lpj = (est_freq >> 1) / HZ; - board_setup(); /* board specific setup */ - if (au1xxx_cpu_needs_config_od()) /* Various early Au1xx0 errata corrected by this */ set_c0_config(1 << 19); /* Set Config[OD] */ @@ -61,6 +59,8 @@ void __init plat_mem_setup(void) /* Clear to obtain best system bus performance */ clear_c0_config(1 << 19); /* Clear Config[OD] */ + board_setup(); /* board specific setup */ + /* IO/MEM resources. */ set_io_port_base(0); ioport_resource.start = IOPORT_RESOURCE_START; diff --git a/trunk/arch/mips/alchemy/common/time.c b/trunk/arch/mips/alchemy/common/time.c index 2aecb2fdf982..d5da6adbf634 100644 --- a/trunk/arch/mips/alchemy/common/time.c +++ b/trunk/arch/mips/alchemy/common/time.c @@ -141,8 +141,7 @@ static int __init alchemy_time_init(unsigned int m2int) goto cntr_err; /* register counter1 clocksource and event device */ - clocksource_set_clock(&au1x_counter1_clocksource, 32768); - clocksource_register(&au1x_counter1_clocksource); + clocksource_register_hz(&au1x_counter1_clocksource, 32768); cd->shift = 32; cd->mult = div_sc(32768, NSEC_PER_SEC, cd->shift); diff --git a/trunk/arch/mips/alchemy/devboards/db1200/setup.c b/trunk/arch/mips/alchemy/devboards/db1200/setup.c index 4a8980027ecf..1dac4f27d334 100644 --- a/trunk/arch/mips/alchemy/devboards/db1200/setup.c +++ b/trunk/arch/mips/alchemy/devboards/db1200/setup.c @@ -23,6 +23,13 @@ void __init board_setup(void) unsigned long freq0, clksrc, div, pfc; unsigned short whoami; + /* Set Config[OD] (disable overlapping bus transaction): + * This gets rid of a _lot_ of spurious interrupts (especially + * wrt. IDE); but incurs ~10% performance hit in some + * cpu-bound applications. + */ + set_c0_config(1 << 19); + bcsr_init(DB1200_BCSR_PHYS_ADDR, DB1200_BCSR_PHYS_ADDR + DB1200_BCSR_HEXLED_OFS); diff --git a/trunk/arch/mips/alchemy/devboards/db1x00/board_setup.c b/trunk/arch/mips/alchemy/devboards/db1x00/board_setup.c index 05f120ff90f9..5c956fe8760f 100644 --- a/trunk/arch/mips/alchemy/devboards/db1x00/board_setup.c +++ b/trunk/arch/mips/alchemy/devboards/db1x00/board_setup.c @@ -127,13 +127,10 @@ const char *get_system_type(void) void __init board_setup(void) { unsigned long bcsr1, bcsr2; - u32 pin_func; bcsr1 = DB1000_BCSR_PHYS_ADDR; bcsr2 = DB1000_BCSR_PHYS_ADDR + DB1000_BCSR_HEXLED_OFS; - pin_func = 0; - #ifdef CONFIG_MIPS_DB1000 printk(KERN_INFO "AMD Alchemy Au1000/Db1000 Board\n"); #endif @@ -164,12 +161,16 @@ void __init board_setup(void) /* Not valid for Au1550 */ #if defined(CONFIG_IRDA) && \ (defined(CONFIG_SOC_AU1000) || defined(CONFIG_SOC_AU1100)) - /* Set IRFIRSEL instead of GPIO15 */ - pin_func = au_readl(SYS_PINFUNC) | SYS_PF_IRF; - au_writel(pin_func, SYS_PINFUNC); - /* Power off until the driver is in use */ - bcsr_mod(BCSR_RESETS, BCSR_RESETS_IRDA_MODE_MASK, - BCSR_RESETS_IRDA_MODE_OFF); + { + u32 pin_func; + + /* Set IRFIRSEL instead of GPIO15 */ + pin_func = au_readl(SYS_PINFUNC) | SYS_PF_IRF; + au_writel(pin_func, SYS_PINFUNC); + /* Power off until the driver is in use */ + bcsr_mod(BCSR_RESETS, BCSR_RESETS_IRDA_MODE_MASK, + BCSR_RESETS_IRDA_MODE_OFF); + } #endif bcsr_write(BCSR_PCMCIA, 0); /* turn off PCMCIA power */ @@ -177,31 +178,35 @@ void __init board_setup(void) alchemy_gpio1_input_enable(); #ifdef CONFIG_MIPS_MIRAGE - /* GPIO[20] is output */ - alchemy_gpio_direction_output(20, 0); + { + u32 pin_func; - /* Set GPIO[210:208] instead of SSI_0 */ - pin_func = au_readl(SYS_PINFUNC) | SYS_PF_S0; + /* GPIO[20] is output */ + alchemy_gpio_direction_output(20, 0); - /* Set GPIO[215:211] for LEDs */ - pin_func |= 5 << 2; + /* Set GPIO[210:208] instead of SSI_0 */ + pin_func = au_readl(SYS_PINFUNC) | SYS_PF_S0; - /* Set GPIO[214:213] for more LEDs */ - pin_func |= 5 << 12; + /* Set GPIO[215:211] for LEDs */ + pin_func |= 5 << 2; - /* Set GPIO[207:200] instead of PCMCIA/LCD */ - pin_func |= SYS_PF_LCD | SYS_PF_PC; - au_writel(pin_func, SYS_PINFUNC); + /* Set GPIO[214:213] for more LEDs */ + pin_func |= 5 << 12; - /* - * Enable speaker amplifier. This should - * be part of the audio driver. - */ - alchemy_gpio_direction_output(209, 1); + /* Set GPIO[207:200] instead of PCMCIA/LCD */ + pin_func |= SYS_PF_LCD | SYS_PF_PC; + au_writel(pin_func, SYS_PINFUNC); - pm_power_off = mirage_power_off; - _machine_halt = mirage_power_off; - _machine_restart = (void(*)(char *))mips_softreset; + /* + * Enable speaker amplifier. This should + * be part of the audio driver. + */ + alchemy_gpio_direction_output(209, 1); + + pm_power_off = mirage_power_off; + _machine_halt = mirage_power_off; + _machine_restart = (void(*)(char *))mips_softreset; + } #endif #ifdef CONFIG_MIPS_BOSPORUS diff --git a/trunk/arch/mips/alchemy/devboards/pb1000/board_setup.c b/trunk/arch/mips/alchemy/devboards/pb1000/board_setup.c index 2d85c4b5be09..e64fdcbf75d0 100644 --- a/trunk/arch/mips/alchemy/devboards/pb1000/board_setup.c +++ b/trunk/arch/mips/alchemy/devboards/pb1000/board_setup.c @@ -65,7 +65,7 @@ void __init board_setup(void) /* Set AUX clock to 12 MHz * 8 = 96 MHz */ au_writel(8, SYS_AUXPLL); - au_writel(0, SYS_PINSTATERD); + alchemy_gpio1_input_enable(); udelay(100); #if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) diff --git a/trunk/arch/mips/alchemy/devboards/pb1500/board_setup.c b/trunk/arch/mips/alchemy/devboards/pb1500/board_setup.c index 83f46215eb0c..3b4fa3206969 100644 --- a/trunk/arch/mips/alchemy/devboards/pb1500/board_setup.c +++ b/trunk/arch/mips/alchemy/devboards/pb1500/board_setup.c @@ -56,7 +56,7 @@ void __init board_setup(void) sys_clksrc = sys_freqctrl = pin_func = 0; /* Set AUX clock to 12 MHz * 8 = 96 MHz */ au_writel(8, SYS_AUXPLL); - au_writel(0, SYS_PINSTATERD); + alchemy_gpio1_input_enable(); udelay(100); /* GPIO201 is input for PCMCIA card detect */ diff --git a/trunk/arch/mips/alchemy/devboards/prom.c b/trunk/arch/mips/alchemy/devboards/prom.c index baeb21385058..e5306b56da6d 100644 --- a/trunk/arch/mips/alchemy/devboards/prom.c +++ b/trunk/arch/mips/alchemy/devboards/prom.c @@ -62,5 +62,5 @@ void __init prom_init(void) void prom_putchar(unsigned char c) { - alchemy_uart_putchar(UART0_PHYS_ADDR, c); + alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c); } diff --git a/trunk/arch/mips/alchemy/gpr/board_setup.c b/trunk/arch/mips/alchemy/gpr/board_setup.c index ad2e3f137933..5f8f0691ed2d 100644 --- a/trunk/arch/mips/alchemy/gpr/board_setup.c +++ b/trunk/arch/mips/alchemy/gpr/board_setup.c @@ -36,9 +36,6 @@ #include -#define UART1_ADDR KSEG1ADDR(UART1_PHYS_ADDR) -#define UART3_ADDR KSEG1ADDR(UART3_PHYS_ADDR) - char irq_tab_alchemy[][5] __initdata = { [0] = { -1, AU1500_PCI_INTA, AU1500_PCI_INTB, 0xff, 0xff }, }; @@ -67,18 +64,15 @@ static void gpr_power_off(void) void __init board_setup(void) { - printk(KERN_INFO "Tarpeze ITS GPR board\n"); + printk(KERN_INFO "Trapeze ITS GPR board\n"); pm_power_off = gpr_power_off; _machine_halt = gpr_power_off; _machine_restart = gpr_reset; - /* Enable UART3 */ - au_writel(0x1, UART3_ADDR + UART_MOD_CNTRL);/* clock enable (CE) */ - au_writel(0x3, UART3_ADDR + UART_MOD_CNTRL); /* CE and "enable" */ - /* Enable UART1 */ - au_writel(0x1, UART1_ADDR + UART_MOD_CNTRL); /* clock enable (CE) */ - au_writel(0x3, UART1_ADDR + UART_MOD_CNTRL); /* CE and "enable" */ + /* Enable UART1/3 */ + alchemy_uart_enable(AU1000_UART3_PHYS_ADDR); + alchemy_uart_enable(AU1000_UART1_PHYS_ADDR); /* Take away Reset of UMTS-card */ alchemy_gpio_direction_output(215, 1); diff --git a/trunk/arch/mips/alchemy/gpr/init.c b/trunk/arch/mips/alchemy/gpr/init.c index f044f4c541d7..229aafae680c 100644 --- a/trunk/arch/mips/alchemy/gpr/init.c +++ b/trunk/arch/mips/alchemy/gpr/init.c @@ -59,5 +59,5 @@ void __init prom_init(void) void prom_putchar(unsigned char c) { - alchemy_uart_putchar(UART0_PHYS_ADDR, c); + alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c); } diff --git a/trunk/arch/mips/alchemy/mtx-1/board_setup.c b/trunk/arch/mips/alchemy/mtx-1/board_setup.c index cf436ab679ae..3ae984cf98cf 100644 --- a/trunk/arch/mips/alchemy/mtx-1/board_setup.c +++ b/trunk/arch/mips/alchemy/mtx-1/board_setup.c @@ -87,7 +87,7 @@ void __init board_setup(void) au_writel(SYS_PF_NI2, SYS_PINFUNC); /* Initialize GPIO */ - au_writel(0xFFFFFFFF, SYS_TRIOUTCLR); + au_writel(~0, KSEG1ADDR(AU1000_SYS_PHYS_ADDR) + SYS_TRIOUTCLR); alchemy_gpio_direction_output(0, 0); /* Disable M66EN (PCI 66MHz) */ alchemy_gpio_direction_output(3, 1); /* Disable PCI CLKRUN# */ alchemy_gpio_direction_output(1, 1); /* Enable EXT_IO3 */ diff --git a/trunk/arch/mips/alchemy/mtx-1/init.c b/trunk/arch/mips/alchemy/mtx-1/init.c index f8d25575fa05..2e81cc7f3422 100644 --- a/trunk/arch/mips/alchemy/mtx-1/init.c +++ b/trunk/arch/mips/alchemy/mtx-1/init.c @@ -62,5 +62,5 @@ void __init prom_init(void) void prom_putchar(unsigned char c) { - alchemy_uart_putchar(UART0_PHYS_ADDR, c); + alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c); } diff --git a/trunk/arch/mips/alchemy/mtx-1/platform.c b/trunk/arch/mips/alchemy/mtx-1/platform.c index 956f946218c5..55628e390fd7 100644 --- a/trunk/arch/mips/alchemy/mtx-1/platform.c +++ b/trunk/arch/mips/alchemy/mtx-1/platform.c @@ -53,8 +53,8 @@ static struct platform_device mtx1_button = { static struct resource mtx1_wdt_res[] = { [0] = { - .start = 15, - .end = 15, + .start = 215, + .end = 215, .name = "mtx1-wdt-gpio", .flags = IORESOURCE_IRQ, } diff --git a/trunk/arch/mips/alchemy/xxs1500/board_setup.c b/trunk/arch/mips/alchemy/xxs1500/board_setup.c index febfb0fb0896..81e57fad07ab 100644 --- a/trunk/arch/mips/alchemy/xxs1500/board_setup.c +++ b/trunk/arch/mips/alchemy/xxs1500/board_setup.c @@ -66,13 +66,10 @@ void __init board_setup(void) au_writel(pin_func, SYS_PINFUNC); /* Enable UART */ - au_writel(0x01, UART3_ADDR + UART_MOD_CNTRL); /* clock enable (CE) */ - mdelay(10); - au_writel(0x03, UART3_ADDR + UART_MOD_CNTRL); /* CE and "enable" */ - mdelay(10); - - /* Enable DTR = USB power up */ - au_writel(0x01, UART3_ADDR + UART_MCR); /* UART_MCR_DTR is 0x01??? */ + alchemy_uart_enable(AU1000_UART3_PHYS_ADDR); + /* Enable DTR (MCR bit 0) = USB power up */ + __raw_writel(1, (void __iomem *)KSEG1ADDR(AU1000_UART3_PHYS_ADDR + 0x18)); + wmb(); #ifdef CONFIG_PCI #if defined(__MIPSEB__) diff --git a/trunk/arch/mips/alchemy/xxs1500/init.c b/trunk/arch/mips/alchemy/xxs1500/init.c index 15125c2fda7d..0ee02cfa989d 100644 --- a/trunk/arch/mips/alchemy/xxs1500/init.c +++ b/trunk/arch/mips/alchemy/xxs1500/init.c @@ -51,14 +51,13 @@ void __init prom_init(void) prom_init_cmdline(); memsize_str = prom_getenv("memsize"); - if (!memsize_str) + if (!memsize_str || strict_strtoul(memsize_str, 0, &memsize)) memsize = 0x04000000; - else - strict_strtoul(memsize_str, 0, &memsize); + add_memory_region(0, memsize, BOOT_MEM_RAM); } void prom_putchar(unsigned char c) { - alchemy_uart_putchar(UART0_PHYS_ADDR, c); + alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c); } diff --git a/trunk/arch/mips/ar7/gpio.c b/trunk/arch/mips/ar7/gpio.c index 425dfa5d6e12..bb571bcdb8f2 100644 --- a/trunk/arch/mips/ar7/gpio.c +++ b/trunk/arch/mips/ar7/gpio.c @@ -325,9 +325,7 @@ int __init ar7_gpio_init(void) size = 0x1f; } - gpch->regs = ioremap_nocache(AR7_REGS_GPIO, - AR7_REGS_GPIO + 0x10); - + gpch->regs = ioremap_nocache(AR7_REGS_GPIO, size); if (!gpch->regs) { printk(KERN_ERR "%s: failed to ioremap regs\n", gpch->chip.label); diff --git a/trunk/arch/mips/bcm47xx/nvram.c b/trunk/arch/mips/bcm47xx/nvram.c index e5b6615731e5..54db815bc86c 100644 --- a/trunk/arch/mips/bcm47xx/nvram.c +++ b/trunk/arch/mips/bcm47xx/nvram.c @@ -3,6 +3,7 @@ * * Copyright (C) 2005 Broadcom Corporation * Copyright (C) 2006 Felix Fietkau + * Copyright (C) 2010-2011 Hauke Mehrtens * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -23,7 +24,7 @@ static char nvram_buf[NVRAM_SPACE]; /* Probe for NVRAM header */ -static void __init early_nvram_init(void) +static void early_nvram_init(void) { struct ssb_mipscore *mcore = &ssb_bcm47xx.mipscore; struct nvram_header *header; diff --git a/trunk/arch/mips/bcm47xx/setup.c b/trunk/arch/mips/bcm47xx/setup.c index c95f90bf734c..73b529b57433 100644 --- a/trunk/arch/mips/bcm47xx/setup.c +++ b/trunk/arch/mips/bcm47xx/setup.c @@ -3,6 +3,7 @@ * Copyright (C) 2006 Felix Fietkau * Copyright (C) 2006 Michael Buesch * Copyright (C) 2010 Waldemar Brodkorb + * Copyright (C) 2010-2011 Hauke Mehrtens * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -57,10 +58,49 @@ static void bcm47xx_machine_halt(void) } #define READ_FROM_NVRAM(_outvar, name, buf) \ - if (nvram_getenv(name, buf, sizeof(buf)) >= 0)\ + if (nvram_getprefix(prefix, name, buf, sizeof(buf)) >= 0)\ sprom->_outvar = simple_strtoul(buf, NULL, 0); -static void bcm47xx_fill_sprom(struct ssb_sprom *sprom) +#define READ_FROM_NVRAM2(_outvar, name1, name2, buf) \ + if (nvram_getprefix(prefix, name1, buf, sizeof(buf)) >= 0 || \ + nvram_getprefix(prefix, name2, buf, sizeof(buf)) >= 0)\ + sprom->_outvar = simple_strtoul(buf, NULL, 0); + +static inline int nvram_getprefix(const char *prefix, char *name, + char *buf, int len) +{ + if (prefix) { + char key[100]; + + snprintf(key, sizeof(key), "%s%s", prefix, name); + return nvram_getenv(key, buf, len); + } + + return nvram_getenv(name, buf, len); +} + +static u32 nvram_getu32(const char *name, char *buf, int len) +{ + int rv; + char key[100]; + u16 var0, var1; + + snprintf(key, sizeof(key), "%s0", name); + rv = nvram_getenv(key, buf, len); + /* return 0 here so this looks like unset */ + if (rv < 0) + return 0; + var0 = simple_strtoul(buf, NULL, 0); + + snprintf(key, sizeof(key), "%s1", name); + rv = nvram_getenv(key, buf, len); + if (rv < 0) + return 0; + var1 = simple_strtoul(buf, NULL, 0); + return var1 << 16 | var0; +} + +static void bcm47xx_fill_sprom(struct ssb_sprom *sprom, const char *prefix) { char buf[100]; u32 boardflags; @@ -69,11 +109,12 @@ static void bcm47xx_fill_sprom(struct ssb_sprom *sprom) sprom->revision = 1; /* Fallback: Old hardware does not define this. */ READ_FROM_NVRAM(revision, "sromrev", buf); - if (nvram_getenv("il0macaddr", buf, sizeof(buf)) >= 0) + if (nvram_getprefix(prefix, "il0macaddr", buf, sizeof(buf)) >= 0 || + nvram_getprefix(prefix, "macaddr", buf, sizeof(buf)) >= 0) nvram_parse_macaddr(buf, sprom->il0mac); - if (nvram_getenv("et0macaddr", buf, sizeof(buf)) >= 0) + if (nvram_getprefix(prefix, "et0macaddr", buf, sizeof(buf)) >= 0) nvram_parse_macaddr(buf, sprom->et0mac); - if (nvram_getenv("et1macaddr", buf, sizeof(buf)) >= 0) + if (nvram_getprefix(prefix, "et1macaddr", buf, sizeof(buf)) >= 0) nvram_parse_macaddr(buf, sprom->et1mac); READ_FROM_NVRAM(et0phyaddr, "et0phyaddr", buf); READ_FROM_NVRAM(et1phyaddr, "et1phyaddr", buf); @@ -95,20 +136,36 @@ static void bcm47xx_fill_sprom(struct ssb_sprom *sprom) READ_FROM_NVRAM(pa1hib0, "pa1hib0", buf); READ_FROM_NVRAM(pa1hib2, "pa1hib1", buf); READ_FROM_NVRAM(pa1hib1, "pa1hib2", buf); - READ_FROM_NVRAM(gpio0, "wl0gpio0", buf); - READ_FROM_NVRAM(gpio1, "wl0gpio1", buf); - READ_FROM_NVRAM(gpio2, "wl0gpio2", buf); - READ_FROM_NVRAM(gpio3, "wl0gpio3", buf); - READ_FROM_NVRAM(maxpwr_bg, "pa0maxpwr", buf); - READ_FROM_NVRAM(maxpwr_al, "pa1lomaxpwr", buf); - READ_FROM_NVRAM(maxpwr_a, "pa1maxpwr", buf); - READ_FROM_NVRAM(maxpwr_ah, "pa1himaxpwr", buf); - READ_FROM_NVRAM(itssi_a, "pa1itssit", buf); - READ_FROM_NVRAM(itssi_bg, "pa0itssit", buf); + READ_FROM_NVRAM2(gpio0, "ledbh0", "wl0gpio0", buf); + READ_FROM_NVRAM2(gpio1, "ledbh1", "wl0gpio1", buf); + READ_FROM_NVRAM2(gpio2, "ledbh2", "wl0gpio2", buf); + READ_FROM_NVRAM2(gpio3, "ledbh3", "wl0gpio3", buf); + READ_FROM_NVRAM2(maxpwr_bg, "maxp2ga0", "pa0maxpwr", buf); + READ_FROM_NVRAM2(maxpwr_al, "maxp5gla0", "pa1lomaxpwr", buf); + READ_FROM_NVRAM2(maxpwr_a, "maxp5ga0", "pa1maxpwr", buf); + READ_FROM_NVRAM2(maxpwr_ah, "maxp5gha0", "pa1himaxpwr", buf); + READ_FROM_NVRAM2(itssi_bg, "itt5ga0", "pa0itssit", buf); + READ_FROM_NVRAM2(itssi_a, "itt2ga0", "pa1itssit", buf); READ_FROM_NVRAM(tri2g, "tri2g", buf); READ_FROM_NVRAM(tri5gl, "tri5gl", buf); READ_FROM_NVRAM(tri5g, "tri5g", buf); READ_FROM_NVRAM(tri5gh, "tri5gh", buf); + READ_FROM_NVRAM(txpid2g[0], "txpid2ga0", buf); + READ_FROM_NVRAM(txpid2g[1], "txpid2ga1", buf); + READ_FROM_NVRAM(txpid2g[2], "txpid2ga2", buf); + READ_FROM_NVRAM(txpid2g[3], "txpid2ga3", buf); + READ_FROM_NVRAM(txpid5g[0], "txpid5ga0", buf); + READ_FROM_NVRAM(txpid5g[1], "txpid5ga1", buf); + READ_FROM_NVRAM(txpid5g[2], "txpid5ga2", buf); + READ_FROM_NVRAM(txpid5g[3], "txpid5ga3", buf); + READ_FROM_NVRAM(txpid5gl[0], "txpid5gla0", buf); + READ_FROM_NVRAM(txpid5gl[1], "txpid5gla1", buf); + READ_FROM_NVRAM(txpid5gl[2], "txpid5gla2", buf); + READ_FROM_NVRAM(txpid5gl[3], "txpid5gla3", buf); + READ_FROM_NVRAM(txpid5gh[0], "txpid5gha0", buf); + READ_FROM_NVRAM(txpid5gh[1], "txpid5gha1", buf); + READ_FROM_NVRAM(txpid5gh[2], "txpid5gha2", buf); + READ_FROM_NVRAM(txpid5gh[3], "txpid5gha3", buf); READ_FROM_NVRAM(rxpo2g, "rxpo2g", buf); READ_FROM_NVRAM(rxpo5g, "rxpo5g", buf); READ_FROM_NVRAM(rssisav2g, "rssisav2g", buf); @@ -120,19 +177,27 @@ static void bcm47xx_fill_sprom(struct ssb_sprom *sprom) READ_FROM_NVRAM(rssismf5g, "rssismf5g", buf); READ_FROM_NVRAM(bxa5g, "bxa5g", buf); READ_FROM_NVRAM(cck2gpo, "cck2gpo", buf); - READ_FROM_NVRAM(ofdm2gpo, "ofdm2gpo", buf); - READ_FROM_NVRAM(ofdm5glpo, "ofdm5glpo", buf); - READ_FROM_NVRAM(ofdm5gpo, "ofdm5gpo", buf); - READ_FROM_NVRAM(ofdm5ghpo, "ofdm5ghpo", buf); - if (nvram_getenv("boardflags", buf, sizeof(buf)) >= 0) { + sprom->ofdm2gpo = nvram_getu32("ofdm2gpo", buf, sizeof(buf)); + sprom->ofdm5glpo = nvram_getu32("ofdm5glpo", buf, sizeof(buf)); + sprom->ofdm5gpo = nvram_getu32("ofdm5gpo", buf, sizeof(buf)); + sprom->ofdm5ghpo = nvram_getu32("ofdm5ghpo", buf, sizeof(buf)); + + READ_FROM_NVRAM(antenna_gain.ghz24.a0, "ag0", buf); + READ_FROM_NVRAM(antenna_gain.ghz24.a1, "ag1", buf); + READ_FROM_NVRAM(antenna_gain.ghz24.a2, "ag2", buf); + READ_FROM_NVRAM(antenna_gain.ghz24.a3, "ag3", buf); + memcpy(&sprom->antenna_gain.ghz5, &sprom->antenna_gain.ghz24, + sizeof(sprom->antenna_gain.ghz5)); + + if (nvram_getprefix(prefix, "boardflags", buf, sizeof(buf)) >= 0) { boardflags = simple_strtoul(buf, NULL, 0); if (boardflags) { sprom->boardflags_lo = (boardflags & 0x0000FFFFU); sprom->boardflags_hi = (boardflags & 0xFFFF0000U) >> 16; } } - if (nvram_getenv("boardflags2", buf, sizeof(buf)) >= 0) { + if (nvram_getprefix(prefix, "boardflags2", buf, sizeof(buf)) >= 0) { boardflags = simple_strtoul(buf, NULL, 0); if (boardflags) { sprom->boardflags2_lo = (boardflags & 0x0000FFFFU); @@ -141,6 +206,22 @@ static void bcm47xx_fill_sprom(struct ssb_sprom *sprom) } } +int bcm47xx_get_sprom(struct ssb_bus *bus, struct ssb_sprom *out) +{ + char prefix[10]; + + if (bus->bustype == SSB_BUSTYPE_PCI) { + snprintf(prefix, sizeof(prefix), "pci/%u/%u/", + bus->host_pci->bus->number + 1, + PCI_SLOT(bus->host_pci->devfn)); + bcm47xx_fill_sprom(out, prefix); + return 0; + } else { + printk(KERN_WARNING "bcm47xx: unable to fill SPROM for given bustype.\n"); + return -EINVAL; + } +} + static int bcm47xx_get_invariants(struct ssb_bus *bus, struct ssb_init_invariants *iv) { @@ -158,7 +239,7 @@ static int bcm47xx_get_invariants(struct ssb_bus *bus, if (nvram_getenv("boardrev", buf, sizeof(buf)) >= 0) iv->boardinfo.rev = (u16)simple_strtoul(buf, NULL, 0); - bcm47xx_fill_sprom(&iv->sprom); + bcm47xx_fill_sprom(&iv->sprom, NULL); if (nvram_getenv("cardbus", buf, sizeof(buf)) >= 0) iv->has_cardbus_slot = !!simple_strtoul(buf, NULL, 10); @@ -172,6 +253,11 @@ void __init plat_mem_setup(void) char buf[100]; struct ssb_mipscore *mcore; + err = ssb_arch_register_fallback_sprom(&bcm47xx_get_sprom); + if (err) + printk(KERN_WARNING "bcm47xx: someone else already registered" + " a ssb SPROM callback handler (err %d)\n", err); + err = ssb_bus_ssbbus_register(&ssb_bcm47xx, SSB_ENUM_BASE, bcm47xx_get_invariants); if (err) diff --git a/trunk/arch/mips/bcm63xx/boards/board_bcm963xx.c b/trunk/arch/mips/bcm63xx/boards/board_bcm963xx.c index 8dba8cfb752f..40b223b603be 100644 --- a/trunk/arch/mips/bcm63xx/boards/board_bcm963xx.c +++ b/trunk/arch/mips/bcm63xx/boards/board_bcm963xx.c @@ -643,6 +643,17 @@ static struct ssb_sprom bcm63xx_sprom = { .boardflags_lo = 0x2848, .boardflags_hi = 0x0000, }; + +int bcm63xx_get_fallback_sprom(struct ssb_bus *bus, struct ssb_sprom *out) +{ + if (bus->bustype == SSB_BUSTYPE_PCI) { + memcpy(out, &bcm63xx_sprom, sizeof(struct ssb_sprom)); + return 0; + } else { + printk(KERN_ERR PFX "unable to fill SPROM for given bustype.\n"); + return -EINVAL; + } +} #endif /* @@ -793,8 +804,9 @@ void __init board_prom_init(void) if (!board_get_mac_address(bcm63xx_sprom.il0mac)) { memcpy(bcm63xx_sprom.et0mac, bcm63xx_sprom.il0mac, ETH_ALEN); memcpy(bcm63xx_sprom.et1mac, bcm63xx_sprom.il0mac, ETH_ALEN); - if (ssb_arch_set_fallback_sprom(&bcm63xx_sprom) < 0) - printk(KERN_ERR "failed to register fallback SPROM\n"); + if (ssb_arch_register_fallback_sprom( + &bcm63xx_get_fallback_sprom) < 0) + printk(KERN_ERR PFX "failed to register fallback SPROM\n"); } #endif } diff --git a/trunk/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c b/trunk/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c index 88c9d963be88..9a6243676e22 100644 --- a/trunk/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c +++ b/trunk/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c @@ -16,8 +16,8 @@ int main(int argc, char *argv[]) { + unsigned long long vmlinux_size, vmlinux_load_addr, vmlinuz_load_addr; struct stat sb; - uint64_t vmlinux_size, vmlinux_load_addr, vmlinuz_load_addr; if (argc != 3) { fprintf(stderr, "Usage: %s \n", diff --git a/trunk/arch/mips/boot/compressed/uart-alchemy.c b/trunk/arch/mips/boot/compressed/uart-alchemy.c index 1bff22fa089b..eb063e6dead9 100644 --- a/trunk/arch/mips/boot/compressed/uart-alchemy.c +++ b/trunk/arch/mips/boot/compressed/uart-alchemy.c @@ -3,5 +3,5 @@ void putc(char c) { /* all current (Jan. 2010) in-kernel boards */ - alchemy_uart_putchar(UART0_PHYS_ADDR, c); + alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c); } diff --git a/trunk/arch/mips/cavium-octeon/Kconfig b/trunk/arch/mips/cavium-octeon/Kconfig index caae22858163..cad555ebeca3 100644 --- a/trunk/arch/mips/cavium-octeon/Kconfig +++ b/trunk/arch/mips/cavium-octeon/Kconfig @@ -1,11 +1,7 @@ -config CAVIUM_OCTEON_SPECIFIC_OPTIONS - bool "Enable Octeon specific options" - depends on CPU_CAVIUM_OCTEON - default "y" +if CPU_CAVIUM_OCTEON config CAVIUM_CN63XXP1 bool "Enable CN63XXP1 errata worarounds" - depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS default "n" help The CN63XXP1 chip requires build time workarounds to @@ -16,7 +12,6 @@ config CAVIUM_CN63XXP1 config CAVIUM_OCTEON_2ND_KERNEL bool "Build the kernel to be used as a 2nd kernel on the same chip" - depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS default "n" help This option configures this kernel to be linked at a different @@ -26,7 +21,6 @@ config CAVIUM_OCTEON_2ND_KERNEL config CAVIUM_OCTEON_HW_FIX_UNALIGNED bool "Enable hardware fixups of unaligned loads and stores" - depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS default "y" help Configure the Octeon hardware to automatically fix unaligned loads @@ -38,7 +32,6 @@ config CAVIUM_OCTEON_HW_FIX_UNALIGNED config CAVIUM_OCTEON_CVMSEG_SIZE int "Number of L1 cache lines reserved for CVMSEG memory" - depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS range 0 54 default 1 help @@ -50,7 +43,6 @@ config CAVIUM_OCTEON_CVMSEG_SIZE config CAVIUM_OCTEON_LOCK_L2 bool "Lock often used kernel code in the L2" - depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS default "y" help Enable locking parts of the kernel into the L2 cache. @@ -93,7 +85,6 @@ config CAVIUM_OCTEON_LOCK_L2_MEMCPY config ARCH_SPARSEMEM_ENABLE def_bool y select SPARSEMEM_STATIC - depends on CPU_CAVIUM_OCTEON config CAVIUM_OCTEON_HELPER def_bool y @@ -107,6 +98,8 @@ config NEED_SG_DMA_LENGTH config SWIOTLB def_bool y - depends on CPU_CAVIUM_OCTEON select IOMMU_HELPER select NEED_SG_DMA_LENGTH + + +endif # CPU_CAVIUM_OCTEON diff --git a/trunk/arch/mips/cavium-octeon/csrc-octeon.c b/trunk/arch/mips/cavium-octeon/csrc-octeon.c index 26bf71130bf8..29d56afbb02d 100644 --- a/trunk/arch/mips/cavium-octeon/csrc-octeon.c +++ b/trunk/arch/mips/cavium-octeon/csrc-octeon.c @@ -105,8 +105,7 @@ unsigned long long notrace sched_clock(void) void __init plat_time_init(void) { clocksource_mips.rating = 300; - clocksource_set_clock(&clocksource_mips, octeon_get_clock_rate()); - clocksource_register(&clocksource_mips); + clocksource_register_hz(&clocksource_mips, octeon_get_clock_rate()); } static u64 octeon_udelay_factor; diff --git a/trunk/arch/mips/cavium-octeon/setup.c b/trunk/arch/mips/cavium-octeon/setup.c index 0707fae3f0ee..2d9028f1474c 100644 --- a/trunk/arch/mips/cavium-octeon/setup.c +++ b/trunk/arch/mips/cavium-octeon/setup.c @@ -288,7 +288,6 @@ void octeon_user_io_init(void) union octeon_cvmemctl cvmmemctl; union cvmx_iob_fau_timeout fau_timeout; union cvmx_pow_nw_tim nm_tim; - uint64_t cvmctl; /* Get the current settings for CP0_CVMMEMCTL_REG */ cvmmemctl.u64 = read_c0_cvmmemctl(); @@ -392,12 +391,6 @@ void octeon_user_io_init(void) CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE, CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128); - /* Move the performance counter interrupts to IRQ 6 */ - cvmctl = read_c0_cvmctl(); - cvmctl &= ~(7 << 7); - cvmctl |= 6 << 7; - write_c0_cvmctl(cvmctl); - /* Set a default for the hardware timeouts */ fau_timeout.u64 = 0; fau_timeout.s.tout_val = 0xfff; diff --git a/trunk/arch/mips/cavium-octeon/smp.c b/trunk/arch/mips/cavium-octeon/smp.c index ba78b21cc8d0..8b606423bbd7 100644 --- a/trunk/arch/mips/cavium-octeon/smp.c +++ b/trunk/arch/mips/cavium-octeon/smp.c @@ -37,13 +37,15 @@ static irqreturn_t mailbox_interrupt(int irq, void *dev_id) uint64_t action; /* Load the mailbox register to figure out what we're supposed to do */ - action = cvmx_read_csr(CVMX_CIU_MBOX_CLRX(coreid)); + action = cvmx_read_csr(CVMX_CIU_MBOX_CLRX(coreid)) & 0xffff; /* Clear the mailbox to clear the interrupt */ cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action); if (action & SMP_CALL_FUNCTION) smp_call_function_interrupt(); + if (action & SMP_RESCHEDULE_YOURSELF) + scheduler_ipi(); /* Check if we've been told to flush the icache */ if (action & SMP_ICACHE_FLUSH) @@ -200,16 +202,15 @@ void octeon_prepare_cpus(unsigned int max_cpus) if (labi->labi_signature != LABI_SIGNATURE) panic("The bootloader version on this board is incorrect."); #endif - - cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffffffff); + /* + * Only the low order mailbox bits are used for IPIs, leave + * the other bits alone. + */ + cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffff); if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt, IRQF_DISABLED, - "mailbox0", mailbox_interrupt)) { + "SMP-IPI", mailbox_interrupt)) { panic("Cannot request_irq(OCTEON_IRQ_MBOX0)\n"); } - if (request_irq(OCTEON_IRQ_MBOX1, mailbox_interrupt, IRQF_DISABLED, - "mailbox1", mailbox_interrupt)) { - panic("Cannot request_irq(OCTEON_IRQ_MBOX1)\n"); - } } /** diff --git a/trunk/arch/mips/configs/lemote2f_defconfig b/trunk/arch/mips/configs/lemote2f_defconfig index 167c1d07b809..b6acd2f256b6 100644 --- a/trunk/arch/mips/configs/lemote2f_defconfig +++ b/trunk/arch/mips/configs/lemote2f_defconfig @@ -86,8 +86,8 @@ CONFIG_NET_SCHED=y CONFIG_NET_EMATCH=y CONFIG_NET_CLS_ACT=y CONFIG_BT=m -CONFIG_BT_L2CAP=m -CONFIG_BT_SCO=m +CONFIG_BT_L2CAP=y +CONFIG_BT_SCO=y CONFIG_BT_RFCOMM=m CONFIG_BT_RFCOMM_TTY=y CONFIG_BT_BNEP=m @@ -329,7 +329,7 @@ CONFIG_USB_LED=m CONFIG_USB_GADGET=m CONFIG_USB_GADGET_M66592=y CONFIG_MMC=m -CONFIG_LEDS_CLASS=m +CONFIG_LEDS_CLASS=y CONFIG_STAGING=y # CONFIG_STAGING_EXCLUDE_BUILD is not set CONFIG_FB_SM7XX=y diff --git a/trunk/arch/mips/configs/malta_defconfig b/trunk/arch/mips/configs/malta_defconfig index 7270f3183bda..5527abbb7dea 100644 --- a/trunk/arch/mips/configs/malta_defconfig +++ b/trunk/arch/mips/configs/malta_defconfig @@ -374,7 +374,7 @@ CONFIG_FB_CIRRUS=y # CONFIG_VGA_CONSOLE is not set CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_HID=m -CONFIG_LEDS_CLASS=m +CONFIG_LEDS_CLASS=y CONFIG_LEDS_TRIGGER_TIMER=m CONFIG_LEDS_TRIGGER_IDE_DISK=y CONFIG_LEDS_TRIGGER_HEARTBEAT=m diff --git a/trunk/arch/mips/configs/mtx1_defconfig b/trunk/arch/mips/configs/mtx1_defconfig index a97a42c6b2c8..37862b2ce363 100644 --- a/trunk/arch/mips/configs/mtx1_defconfig +++ b/trunk/arch/mips/configs/mtx1_defconfig @@ -225,8 +225,8 @@ CONFIG_TOSHIBA_FIR=m CONFIG_VLSI_FIR=m CONFIG_MCS_FIR=m CONFIG_BT=m -CONFIG_BT_L2CAP=m -CONFIG_BT_SCO=m +CONFIG_BT_L2CAP=y +CONFIG_BT_SCO=y CONFIG_BT_RFCOMM=m CONFIG_BT_RFCOMM_TTY=y CONFIG_BT_BNEP=m diff --git a/trunk/arch/mips/configs/nlm_xlr_defconfig b/trunk/arch/mips/configs/nlm_xlr_defconfig new file mode 100644 index 000000000000..e4b399fdaa61 --- /dev/null +++ b/trunk/arch/mips/configs/nlm_xlr_defconfig @@ -0,0 +1,574 @@ +CONFIG_NLM_XLR_BOARD=y +CONFIG_HIGHMEM=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 +CONFIG_SMP=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_PREEMPT_VOLUNTARY=y +CONFIG_KEXEC=y +CONFIG_EXPERIMENTAL=y +CONFIG_CROSS_COMPILE="mips64-unknown-linux-gnu-" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_AUDIT=y +CONFIG_NAMESPACES=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="usr/dev_file_list usr/rootfs" +CONFIG_RD_BZIP2=y +CONFIG_RD_LZMA=y +CONFIG_INITRAMFS_COMPRESSION_GZIP=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_EXPERT=y +CONFIG_KALLSYMS_ALL=y +# CONFIG_ELF_CORE is not set +# CONFIG_PCSPKR_PLATFORM is not set +# CONFIG_PERF_EVENTS is not set +# CONFIG_COMPAT_BRK is not set +CONFIG_PROFILING=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_BLK_DEV_INTEGRITY=y +CONFIG_BINFMT_MISC=m +CONFIG_PM_RUNTIME=y +CONFIG_PM_DEBUG=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=m +CONFIG_NET_KEY=m +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_NET_IPIP=m +CONFIG_IP_MROUTE=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_IPCOMP=m +CONFIG_INET_XFRM_MODE_TRANSPORT=m +CONFIG_INET_XFRM_MODE_TUNNEL=m +CONFIG_INET_XFRM_MODE_BEET=m +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=y +CONFIG_IPV6_PRIVACY=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_IPCOMP=m +CONFIG_INET6_XFRM_MODE_TRANSPORT=m +CONFIG_INET6_XFRM_MODE_TUNNEL=m +CONFIG_INET6_XFRM_MODE_BEET=m +CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m +CONFIG_IPV6_SIT=m +CONFIG_IPV6_TUNNEL=m +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_NETLABEL=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=m +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CT_PROTO_UDPLITE=m +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NETFILTER_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m +CONFIG_IP_VS_FTP=m +CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_IP_NF_QUEUE=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_LOG=m +CONFIG_IP_NF_TARGET_ULOG=m +CONFIG_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_CLUSTERIP=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_IP6_NF_QUEUE=m +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RT=m +CONFIG_IP6_NF_TARGET_HL=m +CONFIG_IP6_NF_TARGET_LOG=m +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_IP6_NF_SECURITY=m +CONFIG_DECNET_NF_GRABULATOR=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_ULOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +CONFIG_IP_DCCP=m +CONFIG_RDS=m +CONFIG_RDS_TCP=m +CONFIG_TIPC=m +CONFIG_ATM=m +CONFIG_ATM_CLIP=m +CONFIG_ATM_LANE=m +CONFIG_ATM_MPOA=m +CONFIG_ATM_BR2684=m +CONFIG_BRIDGE=m +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_DECNET=m +CONFIG_LLC2=m +CONFIG_IPX=m +CONFIG_ATALK=m +CONFIG_DEV_APPLETALK=m +CONFIG_IPDDP=m +CONFIG_IPDDP_ENCAP=y +CONFIG_IPDDP_DECAP=y +CONFIG_X25=m +CONFIG_LAPB=m +CONFIG_ECONET=m +CONFIG_ECONET_AUNUDP=y +CONFIG_ECONET_NATIVE=y +CONFIG_WAN_ROUTER=m +CONFIG_PHONET=m +CONFIG_IEEE802154=m +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_ATM=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_IPT=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_DCB=y +CONFIG_NET_PKTGEN=m +# CONFIG_WIRELESS is not set +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +# CONFIG_STANDALONE is not set +CONFIG_CONNECTOR=y +CONFIG_MTD=m +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_CRYPTOLOOP=m +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_OSD=m +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=65536 +CONFIG_CDROM_PKTCDVD=y +CONFIG_MISC_DEVICES=y +CONFIG_RAID_ATTRS=m +CONFIG_SCSI=y +CONFIG_SCSI_TGT=m +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=m +CONFIG_CHR_DEV_OSST=m +CONFIG_BLK_DEV_SR=y +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=m +CONFIG_SCSI_MULTI_LUN=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_SPI_ATTRS=m +CONFIG_SCSI_FC_TGT_ATTRS=y +CONFIG_SCSI_SAS_LIBSAS=m +CONFIG_SCSI_SRP_ATTRS=m +CONFIG_SCSI_SRP_TGT_ATTRS=y +CONFIG_ISCSI_TCP=m +CONFIG_LIBFCOE=m +CONFIG_SCSI_DEBUG=m +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=m +CONFIG_SCSI_DH_HP_SW=m +CONFIG_SCSI_DH_EMC=m +CONFIG_SCSI_DH_ALUA=m +CONFIG_SCSI_OSD_INITIATOR=m +CONFIG_SCSI_OSD_ULD=m +# CONFIG_INPUT_MOUSEDEV is not set +CONFIG_INPUT_EVDEV=y +CONFIG_INPUT_EVBUG=m +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_SERIO_I8042 is not set +CONFIG_SERIO_SERPORT=m +CONFIG_SERIO_LIBPS2=y +CONFIG_SERIO_RAW=m +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_DEVPTS_MULTIPLE_INSTANCES=y +CONFIG_LEGACY_PTY_COUNT=0 +CONFIG_SERIAL_NONSTANDARD=y +CONFIG_N_HDLC=m +# CONFIG_DEVKMEM is not set +CONFIG_STALDRV=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_NR_UARTS=48 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +CONFIG_SERIAL_8250_SHARE_IRQ=y +CONFIG_SERIAL_8250_RSA=y +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_TIMERIOMEM=m +CONFIG_RAW_DRIVER=m +# CONFIG_HWMON is not set +# CONFIG_VGA_CONSOLE is not set +# CONFIG_HID_SUPPORT is not set +# CONFIG_USB_SUPPORT is not set +CONFIG_UIO=y +CONFIG_UIO_PDRV=m +CONFIG_UIO_PDRV_GENIRQ=m +CONFIG_EXT2_FS=y +CONFIG_EXT2_FS_XATTR=y +CONFIG_EXT2_FS_POSIX_ACL=y +CONFIG_EXT2_FS_SECURITY=y +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_POSIX_ACL=y +CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_GFS2_FS=m +CONFIG_GFS2_FS_LOCKING_DLM=y +CONFIG_OCFS2_FS=m +CONFIG_BTRFS_FS=m +CONFIG_BTRFS_FS_POSIX_ACL=y +CONFIG_NILFS2_FS=m +CONFIG_QUOTA_NETLINK_INTERFACE=y +# CONFIG_PRINT_QUOTA_WARNING is not set +CONFIG_QFMT_V1=m +CONFIG_QFMT_V2=m +CONFIG_AUTOFS4_FS=m +CONFIG_FUSE_FS=y +CONFIG_CUSE=m +CONFIG_FSCACHE=m +CONFIG_FSCACHE_STATS=y +CONFIG_FSCACHE_HISTOGRAM=y +CONFIG_CACHEFILES=m +CONFIG_ISO9660_FS=m +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_NTFS_FS=m +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_CONFIGFS_FS=y +CONFIG_ADFS_FS=m +CONFIG_AFFS_FS=m +CONFIG_ECRYPT_FS=y +CONFIG_HFS_FS=m +CONFIG_HFSPLUS_FS=m +CONFIG_BEFS_FS=m +CONFIG_BFS_FS=m +CONFIG_EFS_FS=m +CONFIG_CRAMFS=m +CONFIG_SQUASHFS=m +CONFIG_VXFS_FS=m +CONFIG_MINIX_FS=m +CONFIG_OMFS_FS=m +CONFIG_HPFS_FS=m +CONFIG_QNX4FS_FS=m +CONFIG_ROMFS_FS=m +CONFIG_SYSV_FS=m +CONFIG_UFS_FS=m +CONFIG_EXOFS_FS=m +CONFIG_NFS_FS=m +CONFIG_NFS_V3=y +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=y +CONFIG_NFS_FSCACHE=y +CONFIG_NFSD=m +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_CIFS=m +CONFIG_CIFS_WEAK_PW_HASH=y +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +CONFIG_CIFS_DFS_UPCALL=y +CONFIG_CIFS_EXPERIMENTAL=y +CONFIG_NCP_FS=m +CONFIG_NCPFS_PACKET_SIGNING=y +CONFIG_NCPFS_IOCTL_LOCKING=y +CONFIG_NCPFS_STRONG=y +CONFIG_NCPFS_NFS_NS=y +CONFIG_NCPFS_OS2_NS=y +CONFIG_NCPFS_NLS=y +CONFIG_NCPFS_EXTRAS=y +CONFIG_CODA_FS=m +CONFIG_AFS_FS=m +CONFIG_PARTITION_ADVANCED=y +CONFIG_ACORN_PARTITION=y +CONFIG_ACORN_PARTITION_ICS=y +CONFIG_ACORN_PARTITION_RISCIX=y +CONFIG_OSF_PARTITION=y +CONFIG_AMIGA_PARTITION=y +CONFIG_ATARI_PARTITION=y +CONFIG_MAC_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +CONFIG_LDM_PARTITION=y +CONFIG_SGI_PARTITION=y +CONFIG_ULTRIX_PARTITION=y +CONFIG_SUN_PARTITION=y +CONFIG_KARMA_PARTITION=y +CONFIG_EFI_PARTITION=y +CONFIG_SYSV68_PARTITION=y +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="cp437" +CONFIG_NLS_CODEPAGE_437=m +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=m +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_PRINTK_TIME=y +# CONFIG_ENABLE_WARN_DEPRECATED is not set +# CONFIG_ENABLE_MUST_CHECK is not set +CONFIG_UNUSED_SYMBOLS=y +CONFIG_DEBUG_KERNEL=y +CONFIG_DETECT_HUNG_TASK=y +CONFIG_SCHEDSTATS=y +CONFIG_TIMER_STATS=y +CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_MEMORY_INIT=y +CONFIG_SYSCTL_SYSCALL_CHECK=y +CONFIG_SCHED_TRACER=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_KGDB=y +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_LSM_MMAP_MIN_ADDR=0 +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 +CONFIG_SECURITY_SELINUX_DISABLE=y +CONFIG_SECURITY_SMACK=y +CONFIG_SECURITY_TOMOYO=y +CONFIG_CRYPTO_NULL=m +CONFIG_CRYPTO_CRYPTD=m +CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_CCM=m +CONFIG_CRYPTO_GCM=m +CONFIG_CRYPTO_CTS=m +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_XTS=m +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_VMAC=m +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_RMD128=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_RMD256=m +CONFIG_CRYPTO_RMD320=m +CONFIG_CRYPTO_SHA256=m +CONFIG_CRYPTO_SHA512=m +CONFIG_CRYPTO_TGR192=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SALSA20=m +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_ZLIB=m +CONFIG_CRYPTO_LZO=m +CONFIG_CRC_CCITT=m +CONFIG_CRC7=m diff --git a/trunk/arch/mips/include/asm/cache.h b/trunk/arch/mips/include/asm/cache.h index 650ac9ba734c..b4db69fbc40c 100644 --- a/trunk/arch/mips/include/asm/cache.h +++ b/trunk/arch/mips/include/asm/cache.h @@ -17,6 +17,6 @@ #define SMP_CACHE_SHIFT L1_CACHE_SHIFT #define SMP_CACHE_BYTES L1_CACHE_BYTES -#define __read_mostly __attribute__((__section__(".data.read_mostly"))) +#define __read_mostly __attribute__((__section__(".data..read_mostly"))) #endif /* _ASM_CACHE_H */ diff --git a/trunk/arch/mips/include/asm/cevt-r4k.h b/trunk/arch/mips/include/asm/cevt-r4k.h index fa4328f9124f..65f9bdd02f1f 100644 --- a/trunk/arch/mips/include/asm/cevt-r4k.h +++ b/trunk/arch/mips/include/asm/cevt-r4k.h @@ -14,6 +14,9 @@ #ifndef __ASM_CEVT_R4K_H #define __ASM_CEVT_R4K_H +#include +#include + DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device); void mips_event_handler(struct clock_event_device *dev); diff --git a/trunk/arch/mips/include/asm/cpu.h b/trunk/arch/mips/include/asm/cpu.h index 86877539c6e8..34c0d3cb116f 100644 --- a/trunk/arch/mips/include/asm/cpu.h +++ b/trunk/arch/mips/include/asm/cpu.h @@ -33,6 +33,7 @@ #define PRID_COMP_TOSHIBA 0x070000 #define PRID_COMP_LSI 0x080000 #define PRID_COMP_LEXRA 0x0b0000 +#define PRID_COMP_NETLOGIC 0x0c0000 #define PRID_COMP_CAVIUM 0x0d0000 #define PRID_COMP_INGENIC 0xd00000 @@ -141,6 +142,31 @@ #define PRID_IMP_JZRISC 0x0200 +/* + * These are the PRID's for when 23:16 == PRID_COMP_NETLOGIC + */ +#define PRID_IMP_NETLOGIC_XLR732 0x0000 +#define PRID_IMP_NETLOGIC_XLR716 0x0200 +#define PRID_IMP_NETLOGIC_XLR532 0x0900 +#define PRID_IMP_NETLOGIC_XLR308 0x0600 +#define PRID_IMP_NETLOGIC_XLR532C 0x0800 +#define PRID_IMP_NETLOGIC_XLR516C 0x0a00 +#define PRID_IMP_NETLOGIC_XLR508C 0x0b00 +#define PRID_IMP_NETLOGIC_XLR308C 0x0f00 +#define PRID_IMP_NETLOGIC_XLS608 0x8000 +#define PRID_IMP_NETLOGIC_XLS408 0x8800 +#define PRID_IMP_NETLOGIC_XLS404 0x8c00 +#define PRID_IMP_NETLOGIC_XLS208 0x8e00 +#define PRID_IMP_NETLOGIC_XLS204 0x8f00 +#define PRID_IMP_NETLOGIC_XLS108 0xce00 +#define PRID_IMP_NETLOGIC_XLS104 0xcf00 +#define PRID_IMP_NETLOGIC_XLS616B 0x4000 +#define PRID_IMP_NETLOGIC_XLS608B 0x4a00 +#define PRID_IMP_NETLOGIC_XLS416B 0x4400 +#define PRID_IMP_NETLOGIC_XLS412B 0x4c00 +#define PRID_IMP_NETLOGIC_XLS408B 0x4e00 +#define PRID_IMP_NETLOGIC_XLS404B 0x4f00 + /* * Definitions for 7:0 on legacy processors */ @@ -234,6 +260,7 @@ enum cpu_type_enum { */ CPU_5KC, CPU_20KC, CPU_25KF, CPU_SB1, CPU_SB1A, CPU_LOONGSON2, CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS, CPU_CAVIUM_OCTEON2, + CPU_XLR, CPU_LAST }; diff --git a/trunk/arch/mips/include/asm/dma-mapping.h b/trunk/arch/mips/include/asm/dma-mapping.h index 655f849bd08d..7aa37ddfca4b 100644 --- a/trunk/arch/mips/include/asm/dma-mapping.h +++ b/trunk/arch/mips/include/asm/dma-mapping.h @@ -5,7 +5,9 @@ #include #include +#ifndef CONFIG_SGI_IP27 /* Kludge to fix 2.6.39 build for IP27 */ #include +#endif extern struct dma_map_ops *mips_dma_map_ops; diff --git a/trunk/arch/mips/include/asm/hugetlb.h b/trunk/arch/mips/include/asm/hugetlb.h index f5e856015329..c565b7c3f0b5 100644 --- a/trunk/arch/mips/include/asm/hugetlb.h +++ b/trunk/arch/mips/include/asm/hugetlb.h @@ -70,6 +70,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { + flush_tlb_mm(vma->vm_mm); } static inline int huge_pte_none(pte_t pte) diff --git a/trunk/arch/mips/include/asm/i8253.h b/trunk/arch/mips/include/asm/i8253.h index 48bb82372994..9ad011366f73 100644 --- a/trunk/arch/mips/include/asm/i8253.h +++ b/trunk/arch/mips/include/asm/i8253.h @@ -12,8 +12,13 @@ #define PIT_CH0 0x40 #define PIT_CH2 0x42 +#define PIT_LATCH LATCH + extern raw_spinlock_t i8253_lock; extern void setup_pit_timer(void); +#define inb_pit inb_p +#define outb_pit outb_p + #endif /* __ASM_I8253_H */ diff --git a/trunk/arch/mips/include/asm/jump_label.h b/trunk/arch/mips/include/asm/jump_label.h index 7622ccf75076..1881b316ca45 100644 --- a/trunk/arch/mips/include/asm/jump_label.h +++ b/trunk/arch/mips/include/asm/jump_label.h @@ -20,16 +20,18 @@ #define WORD_INSN ".word" #endif -#define JUMP_LABEL(key, label) \ - do { \ - asm goto("1:\tnop\n\t" \ - "nop\n\t" \ - ".pushsection __jump_table, \"a\"\n\t" \ - WORD_INSN " 1b, %l[" #label "], %0\n\t" \ - ".popsection\n\t" \ - : : "i" (key) : : label); \ - } while (0) - +static __always_inline bool arch_static_branch(struct jump_label_key *key) +{ + asm goto("1:\tnop\n\t" + "nop\n\t" + ".pushsection __jump_table, \"aw\"\n\t" + WORD_INSN " 1b, %l[l_yes], %0\n\t" + ".popsection\n\t" + : : "i" (key) : : l_yes); + return false; +l_yes: + return true; +} #endif /* __KERNEL__ */ diff --git a/trunk/arch/mips/include/asm/mach-au1x00/au1000.h b/trunk/arch/mips/include/asm/mach-au1x00/au1000.h index a6976619160a..f260ebed713b 100644 --- a/trunk/arch/mips/include/asm/mach-au1x00/au1000.h +++ b/trunk/arch/mips/include/asm/mach-au1x00/au1000.h @@ -161,6 +161,45 @@ static inline int alchemy_get_cputype(void) return ALCHEMY_CPU_UNKNOWN; } +/* return number of uarts on a given cputype */ +static inline int alchemy_get_uarts(int type) +{ + switch (type) { + case ALCHEMY_CPU_AU1000: + return 4; + case ALCHEMY_CPU_AU1500: + case ALCHEMY_CPU_AU1200: + return 2; + case ALCHEMY_CPU_AU1100: + case ALCHEMY_CPU_AU1550: + return 3; + } + return 0; +} + +/* enable an UART block if it isn't already */ +static inline void alchemy_uart_enable(u32 uart_phys) +{ + void __iomem *addr = (void __iomem *)KSEG1ADDR(uart_phys); + + /* reset, enable clock, deassert reset */ + if ((__raw_readl(addr + 0x100) & 3) != 3) { + __raw_writel(0, addr + 0x100); + wmb(); + __raw_writel(1, addr + 0x100); + wmb(); + } + __raw_writel(3, addr + 0x100); + wmb(); +} + +static inline void alchemy_uart_disable(u32 uart_phys) +{ + void __iomem *addr = (void __iomem *)KSEG1ADDR(uart_phys); + __raw_writel(0, addr + 0x100); /* UART_MOD_CNTRL */ + wmb(); +} + static inline void alchemy_uart_putchar(u32 uart_phys, u8 c) { void __iomem *base = (void __iomem *)KSEG1ADDR(uart_phys); @@ -180,6 +219,20 @@ static inline void alchemy_uart_putchar(u32 uart_phys, u8 c) wmb(); } +/* return number of ethernet MACs on a given cputype */ +static inline int alchemy_get_macs(int type) +{ + switch (type) { + case ALCHEMY_CPU_AU1000: + case ALCHEMY_CPU_AU1500: + case ALCHEMY_CPU_AU1550: + return 2; + case ALCHEMY_CPU_AU1100: + return 1; + } + return 0; +} + /* arch/mips/au1000/common/clocks.c */ extern void set_au1x00_speed(unsigned int new_freq); extern unsigned int get_au1x00_speed(void); @@ -630,38 +683,42 @@ enum soc_au1200_ints { /* * Physical base addresses for integrated peripherals + * 0..au1000 1..au1500 2..au1100 3..au1550 4..au1200 */ +#define AU1000_AC97_PHYS_ADDR 0x10000000 /* 012 */ +#define AU1000_USBD_PHYS_ADDR 0x10200000 /* 0123 */ +#define AU1000_IC0_PHYS_ADDR 0x10400000 /* 01234 */ +#define AU1000_MAC0_PHYS_ADDR 0x10500000 /* 023 */ +#define AU1000_MAC1_PHYS_ADDR 0x10510000 /* 023 */ +#define AU1000_MACEN_PHYS_ADDR 0x10520000 /* 023 */ +#define AU1100_SD0_PHYS_ADDR 0x10600000 /* 24 */ +#define AU1100_SD1_PHYS_ADDR 0x10680000 /* 24 */ +#define AU1000_I2S_PHYS_ADDR 0x11000000 /* 02 */ +#define AU1500_MAC0_PHYS_ADDR 0x11500000 /* 1 */ +#define AU1500_MAC1_PHYS_ADDR 0x11510000 /* 1 */ +#define AU1500_MACEN_PHYS_ADDR 0x11520000 /* 1 */ +#define AU1000_UART0_PHYS_ADDR 0x11100000 /* 01234 */ +#define AU1000_UART1_PHYS_ADDR 0x11200000 /* 0234 */ +#define AU1000_UART2_PHYS_ADDR 0x11300000 /* 0 */ +#define AU1000_UART3_PHYS_ADDR 0x11400000 /* 0123 */ +#define AU1500_GPIO2_PHYS_ADDR 0x11700000 /* 1234 */ +#define AU1000_IC1_PHYS_ADDR 0x11800000 /* 01234 */ +#define AU1000_SYS_PHYS_ADDR 0x11900000 /* 01234 */ +#define AU1000_DMA_PHYS_ADDR 0x14002000 /* 012 */ +#define AU1550_DBDMA_PHYS_ADDR 0x14002000 /* 34 */ +#define AU1550_DBDMA_CONF_PHYS_ADDR 0x14003000 /* 34 */ +#define AU1000_MACDMA0_PHYS_ADDR 0x14004000 /* 0123 */ +#define AU1000_MACDMA1_PHYS_ADDR 0x14004200 /* 0123 */ + + #ifdef CONFIG_SOC_AU1000 #define MEM_PHYS_ADDR 0x14000000 #define STATIC_MEM_PHYS_ADDR 0x14001000 -#define DMA0_PHYS_ADDR 0x14002000 -#define DMA1_PHYS_ADDR 0x14002100 -#define DMA2_PHYS_ADDR 0x14002200 -#define DMA3_PHYS_ADDR 0x14002300 -#define DMA4_PHYS_ADDR 0x14002400 -#define DMA5_PHYS_ADDR 0x14002500 -#define DMA6_PHYS_ADDR 0x14002600 -#define DMA7_PHYS_ADDR 0x14002700 -#define IC0_PHYS_ADDR 0x10400000 -#define IC1_PHYS_ADDR 0x11800000 -#define AC97_PHYS_ADDR 0x10000000 #define USBH_PHYS_ADDR 0x10100000 -#define USBD_PHYS_ADDR 0x10200000 #define IRDA_PHYS_ADDR 0x10300000 -#define MAC0_PHYS_ADDR 0x10500000 -#define MAC1_PHYS_ADDR 0x10510000 -#define MACEN_PHYS_ADDR 0x10520000 -#define MACDMA0_PHYS_ADDR 0x14004000 -#define MACDMA1_PHYS_ADDR 0x14004200 -#define I2S_PHYS_ADDR 0x11000000 -#define UART0_PHYS_ADDR 0x11100000 -#define UART1_PHYS_ADDR 0x11200000 -#define UART2_PHYS_ADDR 0x11300000 -#define UART3_PHYS_ADDR 0x11400000 #define SSI0_PHYS_ADDR 0x11600000 #define SSI1_PHYS_ADDR 0x11680000 -#define SYS_PHYS_ADDR 0x11900000 #define PCMCIA_IO_PHYS_ADDR 0xF00000000ULL #define PCMCIA_ATTR_PHYS_ADDR 0xF40000000ULL #define PCMCIA_MEM_PHYS_ADDR 0xF80000000ULL @@ -672,30 +729,8 @@ enum soc_au1200_ints { #ifdef CONFIG_SOC_AU1500 #define MEM_PHYS_ADDR 0x14000000 #define STATIC_MEM_PHYS_ADDR 0x14001000 -#define DMA0_PHYS_ADDR 0x14002000 -#define DMA1_PHYS_ADDR 0x14002100 -#define DMA2_PHYS_ADDR 0x14002200 -#define DMA3_PHYS_ADDR 0x14002300 -#define DMA4_PHYS_ADDR 0x14002400 -#define DMA5_PHYS_ADDR 0x14002500 -#define DMA6_PHYS_ADDR 0x14002600 -#define DMA7_PHYS_ADDR 0x14002700 -#define IC0_PHYS_ADDR 0x10400000 -#define IC1_PHYS_ADDR 0x11800000 -#define AC97_PHYS_ADDR 0x10000000 #define USBH_PHYS_ADDR 0x10100000 -#define USBD_PHYS_ADDR 0x10200000 #define PCI_PHYS_ADDR 0x14005000 -#define MAC0_PHYS_ADDR 0x11500000 -#define MAC1_PHYS_ADDR 0x11510000 -#define MACEN_PHYS_ADDR 0x11520000 -#define MACDMA0_PHYS_ADDR 0x14004000 -#define MACDMA1_PHYS_ADDR 0x14004200 -#define I2S_PHYS_ADDR 0x11000000 -#define UART0_PHYS_ADDR 0x11100000 -#define UART3_PHYS_ADDR 0x11400000 -#define GPIO2_PHYS_ADDR 0x11700000 -#define SYS_PHYS_ADDR 0x11900000 #define PCI_MEM_PHYS_ADDR 0x400000000ULL #define PCI_IO_PHYS_ADDR 0x500000000ULL #define PCI_CONFIG0_PHYS_ADDR 0x600000000ULL @@ -710,34 +745,10 @@ enum soc_au1200_ints { #ifdef CONFIG_SOC_AU1100 #define MEM_PHYS_ADDR 0x14000000 #define STATIC_MEM_PHYS_ADDR 0x14001000 -#define DMA0_PHYS_ADDR 0x14002000 -#define DMA1_PHYS_ADDR 0x14002100 -#define DMA2_PHYS_ADDR 0x14002200 -#define DMA3_PHYS_ADDR 0x14002300 -#define DMA4_PHYS_ADDR 0x14002400 -#define DMA5_PHYS_ADDR 0x14002500 -#define DMA6_PHYS_ADDR 0x14002600 -#define DMA7_PHYS_ADDR 0x14002700 -#define IC0_PHYS_ADDR 0x10400000 -#define SD0_PHYS_ADDR 0x10600000 -#define SD1_PHYS_ADDR 0x10680000 -#define IC1_PHYS_ADDR 0x11800000 -#define AC97_PHYS_ADDR 0x10000000 #define USBH_PHYS_ADDR 0x10100000 -#define USBD_PHYS_ADDR 0x10200000 #define IRDA_PHYS_ADDR 0x10300000 -#define MAC0_PHYS_ADDR 0x10500000 -#define MACEN_PHYS_ADDR 0x10520000 -#define MACDMA0_PHYS_ADDR 0x14004000 -#define MACDMA1_PHYS_ADDR 0x14004200 -#define I2S_PHYS_ADDR 0x11000000 -#define UART0_PHYS_ADDR 0x11100000 -#define UART1_PHYS_ADDR 0x11200000 -#define UART3_PHYS_ADDR 0x11400000 #define SSI0_PHYS_ADDR 0x11600000 #define SSI1_PHYS_ADDR 0x11680000 -#define GPIO2_PHYS_ADDR 0x11700000 -#define SYS_PHYS_ADDR 0x11900000 #define LCD_PHYS_ADDR 0x15000000 #define PCMCIA_IO_PHYS_ADDR 0xF00000000ULL #define PCMCIA_ATTR_PHYS_ADDR 0xF40000000ULL @@ -749,22 +760,8 @@ enum soc_au1200_ints { #ifdef CONFIG_SOC_AU1550 #define MEM_PHYS_ADDR 0x14000000 #define STATIC_MEM_PHYS_ADDR 0x14001000 -#define IC0_PHYS_ADDR 0x10400000 -#define IC1_PHYS_ADDR 0x11800000 #define USBH_PHYS_ADDR 0x14020000 -#define USBD_PHYS_ADDR 0x10200000 #define PCI_PHYS_ADDR 0x14005000 -#define MAC0_PHYS_ADDR 0x10500000 -#define MAC1_PHYS_ADDR 0x10510000 -#define MACEN_PHYS_ADDR 0x10520000 -#define MACDMA0_PHYS_ADDR 0x14004000 -#define MACDMA1_PHYS_ADDR 0x14004200 -#define UART0_PHYS_ADDR 0x11100000 -#define UART1_PHYS_ADDR 0x11200000 -#define UART3_PHYS_ADDR 0x11400000 -#define GPIO2_PHYS_ADDR 0x11700000 -#define SYS_PHYS_ADDR 0x11900000 -#define DDMA_PHYS_ADDR 0x14002000 #define PE_PHYS_ADDR 0x14008000 #define PSC0_PHYS_ADDR 0x11A00000 #define PSC1_PHYS_ADDR 0x11B00000 @@ -786,19 +783,10 @@ enum soc_au1200_ints { #define STATIC_MEM_PHYS_ADDR 0x14001000 #define AES_PHYS_ADDR 0x10300000 #define CIM_PHYS_ADDR 0x14004000 -#define IC0_PHYS_ADDR 0x10400000 -#define IC1_PHYS_ADDR 0x11800000 #define USBM_PHYS_ADDR 0x14020000 #define USBH_PHYS_ADDR 0x14020100 -#define UART0_PHYS_ADDR 0x11100000 -#define UART1_PHYS_ADDR 0x11200000 -#define GPIO2_PHYS_ADDR 0x11700000 -#define SYS_PHYS_ADDR 0x11900000 -#define DDMA_PHYS_ADDR 0x14002000 #define PSC0_PHYS_ADDR 0x11A00000 #define PSC1_PHYS_ADDR 0x11B00000 -#define SD0_PHYS_ADDR 0x10600000 -#define SD1_PHYS_ADDR 0x10680000 #define LCD_PHYS_ADDR 0x15000000 #define SWCNT_PHYS_ADDR 0x1110010C #define MAEFE_PHYS_ADDR 0x14012000 @@ -835,183 +823,43 @@ enum soc_au1200_ints { #endif -/* Interrupt Controller register offsets */ -#define IC_CFG0RD 0x40 -#define IC_CFG0SET 0x40 -#define IC_CFG0CLR 0x44 -#define IC_CFG1RD 0x48 -#define IC_CFG1SET 0x48 -#define IC_CFG1CLR 0x4C -#define IC_CFG2RD 0x50 -#define IC_CFG2SET 0x50 -#define IC_CFG2CLR 0x54 -#define IC_REQ0INT 0x54 -#define IC_SRCRD 0x58 -#define IC_SRCSET 0x58 -#define IC_SRCCLR 0x5C -#define IC_REQ1INT 0x5C -#define IC_ASSIGNRD 0x60 -#define IC_ASSIGNSET 0x60 -#define IC_ASSIGNCLR 0x64 -#define IC_WAKERD 0x68 -#define IC_WAKESET 0x68 -#define IC_WAKECLR 0x6C -#define IC_MASKRD 0x70 -#define IC_MASKSET 0x70 -#define IC_MASKCLR 0x74 -#define IC_RISINGRD 0x78 -#define IC_RISINGCLR 0x78 -#define IC_FALLINGRD 0x7C -#define IC_FALLINGCLR 0x7C -#define IC_TESTBIT 0x80 - - -/* Interrupt Controller 0 */ -#define IC0_CFG0RD 0xB0400040 -#define IC0_CFG0SET 0xB0400040 -#define IC0_CFG0CLR 0xB0400044 - -#define IC0_CFG1RD 0xB0400048 -#define IC0_CFG1SET 0xB0400048 -#define IC0_CFG1CLR 0xB040004C - -#define IC0_CFG2RD 0xB0400050 -#define IC0_CFG2SET 0xB0400050 -#define IC0_CFG2CLR 0xB0400054 - -#define IC0_REQ0INT 0xB0400054 -#define IC0_SRCRD 0xB0400058 -#define IC0_SRCSET 0xB0400058 -#define IC0_SRCCLR 0xB040005C -#define IC0_REQ1INT 0xB040005C - -#define IC0_ASSIGNRD 0xB0400060 -#define IC0_ASSIGNSET 0xB0400060 -#define IC0_ASSIGNCLR 0xB0400064 - -#define IC0_WAKERD 0xB0400068 -#define IC0_WAKESET 0xB0400068 -#define IC0_WAKECLR 0xB040006C - -#define IC0_MASKRD 0xB0400070 -#define IC0_MASKSET 0xB0400070 -#define IC0_MASKCLR 0xB0400074 - -#define IC0_RISINGRD 0xB0400078 -#define IC0_RISINGCLR 0xB0400078 -#define IC0_FALLINGRD 0xB040007C -#define IC0_FALLINGCLR 0xB040007C - -#define IC0_TESTBIT 0xB0400080 - -/* Interrupt Controller 1 */ -#define IC1_CFG0RD 0xB1800040 -#define IC1_CFG0SET 0xB1800040 -#define IC1_CFG0CLR 0xB1800044 - -#define IC1_CFG1RD 0xB1800048 -#define IC1_CFG1SET 0xB1800048 -#define IC1_CFG1CLR 0xB180004C - -#define IC1_CFG2RD 0xB1800050 -#define IC1_CFG2SET 0xB1800050 -#define IC1_CFG2CLR 0xB1800054 - -#define IC1_REQ0INT 0xB1800054 -#define IC1_SRCRD 0xB1800058 -#define IC1_SRCSET 0xB1800058 -#define IC1_SRCCLR 0xB180005C -#define IC1_REQ1INT 0xB180005C - -#define IC1_ASSIGNRD 0xB1800060 -#define IC1_ASSIGNSET 0xB1800060 -#define IC1_ASSIGNCLR 0xB1800064 - -#define IC1_WAKERD 0xB1800068 -#define IC1_WAKESET 0xB1800068 -#define IC1_WAKECLR 0xB180006C - -#define IC1_MASKRD 0xB1800070 -#define IC1_MASKSET 0xB1800070 -#define IC1_MASKCLR 0xB1800074 - -#define IC1_RISINGRD 0xB1800078 -#define IC1_RISINGCLR 0xB1800078 -#define IC1_FALLINGRD 0xB180007C -#define IC1_FALLINGCLR 0xB180007C - -#define IC1_TESTBIT 0xB1800080 /* Au1000 */ #ifdef CONFIG_SOC_AU1000 -#define UART0_ADDR 0xB1100000 -#define UART3_ADDR 0xB1400000 - #define USB_OHCI_BASE 0x10100000 /* phys addr for ioremap */ #define USB_HOST_CONFIG 0xB017FFFC #define FOR_PLATFORM_C_USB_HOST_INT AU1000_USB_HOST_INT - -#define AU1000_ETH0_BASE 0xB0500000 -#define AU1000_ETH1_BASE 0xB0510000 -#define AU1000_MAC0_ENABLE 0xB0520000 -#define AU1000_MAC1_ENABLE 0xB0520004 -#define NUM_ETH_INTERFACES 2 #endif /* CONFIG_SOC_AU1000 */ /* Au1500 */ #ifdef CONFIG_SOC_AU1500 -#define UART0_ADDR 0xB1100000 -#define UART3_ADDR 0xB1400000 - #define USB_OHCI_BASE 0x10100000 /* phys addr for ioremap */ #define USB_HOST_CONFIG 0xB017fffc #define FOR_PLATFORM_C_USB_HOST_INT AU1500_USB_HOST_INT - -#define AU1500_ETH0_BASE 0xB1500000 -#define AU1500_ETH1_BASE 0xB1510000 -#define AU1500_MAC0_ENABLE 0xB1520000 -#define AU1500_MAC1_ENABLE 0xB1520004 -#define NUM_ETH_INTERFACES 2 #endif /* CONFIG_SOC_AU1500 */ /* Au1100 */ #ifdef CONFIG_SOC_AU1100 -#define UART0_ADDR 0xB1100000 -#define UART3_ADDR 0xB1400000 - #define USB_OHCI_BASE 0x10100000 /* phys addr for ioremap */ #define USB_HOST_CONFIG 0xB017FFFC #define FOR_PLATFORM_C_USB_HOST_INT AU1100_USB_HOST_INT - -#define AU1100_ETH0_BASE 0xB0500000 -#define AU1100_MAC0_ENABLE 0xB0520000 -#define NUM_ETH_INTERFACES 1 #endif /* CONFIG_SOC_AU1100 */ #ifdef CONFIG_SOC_AU1550 -#define UART0_ADDR 0xB1100000 #define USB_OHCI_BASE 0x14020000 /* phys addr for ioremap */ #define USB_OHCI_LEN 0x00060000 #define USB_HOST_CONFIG 0xB4027ffc #define FOR_PLATFORM_C_USB_HOST_INT AU1550_USB_HOST_INT - -#define AU1550_ETH0_BASE 0xB0500000 -#define AU1550_ETH1_BASE 0xB0510000 -#define AU1550_MAC0_ENABLE 0xB0520000 -#define AU1550_MAC1_ENABLE 0xB0520004 -#define NUM_ETH_INTERFACES 2 #endif /* CONFIG_SOC_AU1550 */ #ifdef CONFIG_SOC_AU1200 -#define UART0_ADDR 0xB1100000 - #define USB_UOC_BASE 0x14020020 #define USB_UOC_LEN 0x20 #define USB_OHCI_BASE 0x14020100 @@ -1504,22 +1352,6 @@ enum soc_au1200_ints { #define SYS_PINFUNC_S1B (1 << 2) #endif -#define SYS_TRIOUTRD 0xB1900100 -#define SYS_TRIOUTCLR 0xB1900100 -#define SYS_OUTPUTRD 0xB1900108 -#define SYS_OUTPUTSET 0xB1900108 -#define SYS_OUTPUTCLR 0xB190010C -#define SYS_PINSTATERD 0xB1900110 -#define SYS_PININPUTEN 0xB1900110 - -/* GPIO2, Au1500, Au1550 only */ -#define GPIO2_BASE 0xB1700000 -#define GPIO2_DIR (GPIO2_BASE + 0) -#define GPIO2_OUTPUT (GPIO2_BASE + 8) -#define GPIO2_PINSTATE (GPIO2_BASE + 0xC) -#define GPIO2_INTENABLE (GPIO2_BASE + 0x10) -#define GPIO2_ENABLE (GPIO2_BASE + 0x14) - /* Power Management */ #define SYS_SCRATCH0 0xB1900018 #define SYS_SCRATCH1 0xB190001C @@ -1635,12 +1467,6 @@ enum soc_au1200_ints { # define AC97C_RS (1 << 1) # define AC97C_CE (1 << 0) -/* Secure Digital (SD) Controller */ -#define SD0_XMIT_FIFO 0xB0600000 -#define SD0_RECV_FIFO 0xB0600004 -#define SD1_XMIT_FIFO 0xB0680000 -#define SD1_RECV_FIFO 0xB0680004 - #if defined(CONFIG_SOC_AU1500) || defined(CONFIG_SOC_AU1550) /* Au1500 PCI Controller */ #define Au1500_CFG_BASE 0xB4005000 /* virtual, KSEG1 addr */ diff --git a/trunk/arch/mips/include/asm/mach-au1x00/au1000_dma.h b/trunk/arch/mips/include/asm/mach-au1x00/au1000_dma.h index c333b4e1cd44..59f5b55b2200 100644 --- a/trunk/arch/mips/include/asm/mach-au1x00/au1000_dma.h +++ b/trunk/arch/mips/include/asm/mach-au1x00/au1000_dma.h @@ -37,10 +37,6 @@ #define NUM_AU1000_DMA_CHANNELS 8 -/* DMA Channel Base Addresses */ -#define DMA_CHANNEL_BASE 0xB4002000 -#define DMA_CHANNEL_LEN 0x00000100 - /* DMA Channel Register Offsets */ #define DMA_MODE_SET 0x00000000 #define DMA_MODE_READ DMA_MODE_SET diff --git a/trunk/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h b/trunk/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h index c8a553a36ba4..2fdacfe85e23 100644 --- a/trunk/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h +++ b/trunk/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h @@ -37,14 +37,6 @@ #ifndef _LANGUAGE_ASSEMBLY -/* - * The DMA base addresses. - * The channels are every 256 bytes (0x0100) from the channel 0 base. - * Interrupt status/enable is bits 15:0 for channels 15 to zero. - */ -#define DDMA_GLOBAL_BASE 0xb4003000 -#define DDMA_CHANNEL_BASE 0xb4002000 - typedef volatile struct dbdma_global { u32 ddma_config; u32 ddma_intstat; diff --git a/trunk/arch/mips/include/asm/mach-au1x00/gpio-au1000.h b/trunk/arch/mips/include/asm/mach-au1x00/gpio-au1000.h index 62d2f136d941..1f41a522906d 100644 --- a/trunk/arch/mips/include/asm/mach-au1x00/gpio-au1000.h +++ b/trunk/arch/mips/include/asm/mach-au1x00/gpio-au1000.h @@ -24,6 +24,23 @@ #define MAKE_IRQ(intc, off) (AU1000_INTC##intc##_INT_BASE + (off)) +/* GPIO1 registers within SYS_ area */ +#define SYS_TRIOUTRD 0x100 +#define SYS_TRIOUTCLR 0x100 +#define SYS_OUTPUTRD 0x108 +#define SYS_OUTPUTSET 0x108 +#define SYS_OUTPUTCLR 0x10C +#define SYS_PINSTATERD 0x110 +#define SYS_PININPUTEN 0x110 + +/* register offsets within GPIO2 block */ +#define GPIO2_DIR 0x00 +#define GPIO2_OUTPUT 0x08 +#define GPIO2_PINSTATE 0x0C +#define GPIO2_INTENABLE 0x10 +#define GPIO2_ENABLE 0x14 + +struct gpio; static inline int au1000_gpio1_to_irq(int gpio) { @@ -200,23 +217,26 @@ static inline int au1200_irq_to_gpio(int irq) */ static inline void alchemy_gpio1_set_value(int gpio, int v) { + void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_SYS_PHYS_ADDR); unsigned long mask = 1 << (gpio - ALCHEMY_GPIO1_BASE); unsigned long r = v ? SYS_OUTPUTSET : SYS_OUTPUTCLR; - au_writel(mask, r); - au_sync(); + __raw_writel(mask, base + r); + wmb(); } static inline int alchemy_gpio1_get_value(int gpio) { + void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_SYS_PHYS_ADDR); unsigned long mask = 1 << (gpio - ALCHEMY_GPIO1_BASE); - return au_readl(SYS_PINSTATERD) & mask; + return __raw_readl(base + SYS_PINSTATERD) & mask; } static inline int alchemy_gpio1_direction_input(int gpio) { + void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_SYS_PHYS_ADDR); unsigned long mask = 1 << (gpio - ALCHEMY_GPIO1_BASE); - au_writel(mask, SYS_TRIOUTCLR); - au_sync(); + __raw_writel(mask, base + SYS_TRIOUTCLR); + wmb(); return 0; } @@ -257,27 +277,31 @@ static inline int alchemy_gpio1_to_irq(int gpio) */ static inline void __alchemy_gpio2_mod_dir(int gpio, int to_out) { + void __iomem *base = (void __iomem *)KSEG1ADDR(AU1500_GPIO2_PHYS_ADDR); unsigned long mask = 1 << (gpio - ALCHEMY_GPIO2_BASE); - unsigned long d = au_readl(GPIO2_DIR); + unsigned long d = __raw_readl(base + GPIO2_DIR); + if (to_out) d |= mask; else d &= ~mask; - au_writel(d, GPIO2_DIR); - au_sync(); + __raw_writel(d, base + GPIO2_DIR); + wmb(); } static inline void alchemy_gpio2_set_value(int gpio, int v) { + void __iomem *base = (void __iomem *)KSEG1ADDR(AU1500_GPIO2_PHYS_ADDR); unsigned long mask; mask = ((v) ? 0x00010001 : 0x00010000) << (gpio - ALCHEMY_GPIO2_BASE); - au_writel(mask, GPIO2_OUTPUT); - au_sync(); + __raw_writel(mask, base + GPIO2_OUTPUT); + wmb(); } static inline int alchemy_gpio2_get_value(int gpio) { - return au_readl(GPIO2_PINSTATE) & (1 << (gpio - ALCHEMY_GPIO2_BASE)); + void __iomem *base = (void __iomem *)KSEG1ADDR(AU1500_GPIO2_PHYS_ADDR); + return __raw_readl(base + GPIO2_PINSTATE) & (1 << (gpio - ALCHEMY_GPIO2_BASE)); } static inline int alchemy_gpio2_direction_input(int gpio) @@ -329,21 +353,23 @@ static inline int alchemy_gpio2_to_irq(int gpio) */ static inline void alchemy_gpio1_input_enable(void) { - au_writel(0, SYS_PININPUTEN); /* the write op is key */ - au_sync(); + void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_SYS_PHYS_ADDR); + __raw_writel(0, base + SYS_PININPUTEN); /* the write op is key */ + wmb(); } /* GPIO2 shared interrupts and control */ static inline void __alchemy_gpio2_mod_int(int gpio2, int en) { - unsigned long r = au_readl(GPIO2_INTENABLE); + void __iomem *base = (void __iomem *)KSEG1ADDR(AU1500_GPIO2_PHYS_ADDR); + unsigned long r = __raw_readl(base + GPIO2_INTENABLE); if (en) r |= 1 << gpio2; else r &= ~(1 << gpio2); - au_writel(r, GPIO2_INTENABLE); - au_sync(); + __raw_writel(r, base + GPIO2_INTENABLE); + wmb(); } /** @@ -418,10 +444,11 @@ static inline void alchemy_gpio2_disable_int(int gpio2) */ static inline void alchemy_gpio2_enable(void) { - au_writel(3, GPIO2_ENABLE); /* reset, clock enabled */ - au_sync(); - au_writel(1, GPIO2_ENABLE); /* clock enabled */ - au_sync(); + void __iomem *base = (void __iomem *)KSEG1ADDR(AU1500_GPIO2_PHYS_ADDR); + __raw_writel(3, base + GPIO2_ENABLE); /* reset, clock enabled */ + wmb(); + __raw_writel(1, base + GPIO2_ENABLE); /* clock enabled */ + wmb(); } /** @@ -431,8 +458,9 @@ static inline void alchemy_gpio2_enable(void) */ static inline void alchemy_gpio2_disable(void) { - au_writel(2, GPIO2_ENABLE); /* reset, clock disabled */ - au_sync(); + void __iomem *base = (void __iomem *)KSEG1ADDR(AU1500_GPIO2_PHYS_ADDR); + __raw_writel(2, base + GPIO2_ENABLE); /* reset, clock disabled */ + wmb(); } /**********************************************************************/ @@ -556,6 +584,16 @@ static inline void gpio_set_value(int gpio, int v) alchemy_gpio_set_value(gpio, v); } +static inline int gpio_get_value_cansleep(unsigned gpio) +{ + return gpio_get_value(gpio); +} + +static inline void gpio_set_value_cansleep(unsigned gpio, int value) +{ + gpio_set_value(gpio, value); +} + static inline int gpio_is_valid(int gpio) { return alchemy_gpio_is_valid(gpio); @@ -581,10 +619,50 @@ static inline int gpio_request(unsigned gpio, const char *label) return 0; } +static inline int gpio_request_one(unsigned gpio, + unsigned long flags, const char *label) +{ + return 0; +} + +static inline int gpio_request_array(struct gpio *array, size_t num) +{ + return 0; +} + static inline void gpio_free(unsigned gpio) { } +static inline void gpio_free_array(struct gpio *array, size_t num) +{ +} + +static inline int gpio_set_debounce(unsigned gpio, unsigned debounce) +{ + return -ENOSYS; +} + +static inline int gpio_export(unsigned gpio, bool direction_may_change) +{ + return -ENOSYS; +} + +static inline int gpio_export_link(struct device *dev, const char *name, + unsigned gpio) +{ + return -ENOSYS; +} + +static inline int gpio_sysfs_set_active_low(unsigned gpio, int value) +{ + return -ENOSYS; +} + +static inline void gpio_unexport(unsigned gpio) +{ +} + #endif /* !CONFIG_ALCHEMY_GPIO_INDIRECT */ diff --git a/trunk/arch/mips/include/asm/mach-bcm47xx/nvram.h b/trunk/arch/mips/include/asm/mach-bcm47xx/nvram.h index 9759588ba3cf..184d5ecb5f51 100644 --- a/trunk/arch/mips/include/asm/mach-bcm47xx/nvram.h +++ b/trunk/arch/mips/include/asm/mach-bcm47xx/nvram.h @@ -39,8 +39,16 @@ extern int nvram_getenv(char *name, char *val, size_t val_len); static inline void nvram_parse_macaddr(char *buf, u8 *macaddr) { - sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &macaddr[0], &macaddr[1], - &macaddr[2], &macaddr[3], &macaddr[4], &macaddr[5]); + if (strchr(buf, ':')) + sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &macaddr[0], + &macaddr[1], &macaddr[2], &macaddr[3], &macaddr[4], + &macaddr[5]); + else if (strchr(buf, '-')) + sscanf(buf, "%hhx-%hhx-%hhx-%hhx-%hhx-%hhx", &macaddr[0], + &macaddr[1], &macaddr[2], &macaddr[3], &macaddr[4], + &macaddr[5]); + else + printk(KERN_WARNING "Can not parse mac address: %s\n", buf); } #endif diff --git a/trunk/arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h b/trunk/arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h index 32978d32561a..ed72e6a26b73 100644 --- a/trunk/arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h +++ b/trunk/arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h @@ -88,7 +88,7 @@ struct bcm_tag { char kernel_crc[CRC_LEN]; /* 228-235: Unused at present */ char reserved1[8]; - /* 236-239: CRC32 of header excluding tagVersion */ + /* 236-239: CRC32 of header excluding last 20 bytes */ char header_crc[CRC_LEN]; /* 240-255: Unused at present */ char reserved2[16]; diff --git a/trunk/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h b/trunk/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h index 0b2b5eb22e9b..dedef7d2b01f 100644 --- a/trunk/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h +++ b/trunk/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h @@ -63,6 +63,11 @@ # CN30XX Disable instruction prefetching or v0, v0, 0x2000 skip: + # First clear off CvmCtl[IPPCI] bit and move the performance + # counters interrupt to IRQ 6 + li v1, ~(7 << 7) + and v0, v0, v1 + ori v0, v0, (6 << 7) # Write the cavium control register dmtc0 v0, CP0_CVMCTL_REG sync diff --git a/trunk/arch/mips/include/asm/mach-lantiq/lantiq.h b/trunk/arch/mips/include/asm/mach-lantiq/lantiq.h new file mode 100644 index 000000000000..ce2f02929d22 --- /dev/null +++ b/trunk/arch/mips/include/asm/mach-lantiq/lantiq.h @@ -0,0 +1,63 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Copyright (C) 2010 John Crispin + */ +#ifndef _LANTIQ_H__ +#define _LANTIQ_H__ + +#include + +/* generic reg access functions */ +#define ltq_r32(reg) __raw_readl(reg) +#define ltq_w32(val, reg) __raw_writel(val, reg) +#define ltq_w32_mask(clear, set, reg) \ + ltq_w32((ltq_r32(reg) & ~(clear)) | (set), reg) +#define ltq_r8(reg) __raw_readb(reg) +#define ltq_w8(val, reg) __raw_writeb(val, reg) + +/* register access macros for EBU and CGU */ +#define ltq_ebu_w32(x, y) ltq_w32((x), ltq_ebu_membase + (y)) +#define ltq_ebu_r32(x) ltq_r32(ltq_ebu_membase + (x)) +#define ltq_cgu_w32(x, y) ltq_w32((x), ltq_cgu_membase + (y)) +#define ltq_cgu_r32(x) ltq_r32(ltq_cgu_membase + (x)) + +extern __iomem void *ltq_ebu_membase; +extern __iomem void *ltq_cgu_membase; + +extern unsigned int ltq_get_cpu_ver(void); +extern unsigned int ltq_get_soc_type(void); + +/* clock speeds */ +#define CLOCK_60M 60000000 +#define CLOCK_83M 83333333 +#define CLOCK_111M 111111111 +#define CLOCK_133M 133333333 +#define CLOCK_167M 166666667 +#define CLOCK_200M 200000000 +#define CLOCK_266M 266666666 +#define CLOCK_333M 333333333 +#define CLOCK_400M 400000000 + +/* spinlock all ebu i/o */ +extern spinlock_t ebu_lock; + +/* some irq helpers */ +extern void ltq_disable_irq(struct irq_data *data); +extern void ltq_mask_and_ack_irq(struct irq_data *data); +extern void ltq_enable_irq(struct irq_data *data); + +/* find out what caused the last cpu reset */ +extern int ltq_reset_cause(void); +#define LTQ_RST_CAUSE_WDTRST 0x20 + +#define IOPORT_RESOURCE_START 0x10000000 +#define IOPORT_RESOURCE_END 0xffffffff +#define IOMEM_RESOURCE_START 0x10000000 +#define IOMEM_RESOURCE_END 0xffffffff +#define LTQ_FLASH_START 0x10000000 +#define LTQ_FLASH_MAX 0x04000000 + +#endif diff --git a/trunk/arch/mips/include/asm/mach-lantiq/lantiq_platform.h b/trunk/arch/mips/include/asm/mach-lantiq/lantiq_platform.h new file mode 100644 index 000000000000..a305f1d0259e --- /dev/null +++ b/trunk/arch/mips/include/asm/mach-lantiq/lantiq_platform.h @@ -0,0 +1,53 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Copyright (C) 2010 John Crispin + */ + +#ifndef _LANTIQ_PLATFORM_H__ +#define _LANTIQ_PLATFORM_H__ + +#include +#include + +/* struct used to pass info to the pci core */ +enum { + PCI_CLOCK_INT = 0, + PCI_CLOCK_EXT +}; + +#define PCI_EXIN0 0x0001 +#define PCI_EXIN1 0x0002 +#define PCI_EXIN2 0x0004 +#define PCI_EXIN3 0x0008 +#define PCI_EXIN4 0x0010 +#define PCI_EXIN5 0x0020 +#define PCI_EXIN_MAX 6 + +#define PCI_GNT1 0x0040 +#define PCI_GNT2 0x0080 +#define PCI_GNT3 0x0100 +#define PCI_GNT4 0x0200 + +#define PCI_REQ1 0x0400 +#define PCI_REQ2 0x0800 +#define PCI_REQ3 0x1000 +#define PCI_REQ4 0x2000 +#define PCI_REQ_SHIFT 10 +#define PCI_REQ_MASK 0xf + +struct ltq_pci_data { + int clock; + int gpio; + int irq[16]; +}; + +/* struct used to pass info to network drivers */ +struct ltq_eth_data { + struct sockaddr mac; + int mii_mode; +}; + +#endif diff --git a/trunk/arch/mips/include/asm/mach-lantiq/war.h b/trunk/arch/mips/include/asm/mach-lantiq/war.h new file mode 100644 index 000000000000..01b08ef368d1 --- /dev/null +++ b/trunk/arch/mips/include/asm/mach-lantiq/war.h @@ -0,0 +1,24 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + */ +#ifndef __ASM_MIPS_MACH_LANTIQ_WAR_H +#define __ASM_MIPS_MACH_LANTIQ_WAR_H + +#define R4600_V1_INDEX_ICACHEOP_WAR 0 +#define R4600_V1_HIT_CACHEOP_WAR 0 +#define R4600_V2_HIT_CACHEOP_WAR 0 +#define R5432_CP0_INTERRUPT_WAR 0 +#define BCM1250_M3_WAR 0 +#define SIBYTE_1956_WAR 0 +#define MIPS4K_ICACHE_REFILL_WAR 0 +#define MIPS_CACHE_SYNC_WAR 0 +#define TX49XX_ICACHE_INDEX_INV_WAR 0 +#define RM9000_CDEX_SMP_WAR 0 +#define ICACHE_REFILLS_WORKAROUND_WAR 0 +#define R10000_LLSC_WAR 0 +#define MIPS34K_MISSED_ITLB_WAR 0 + +#endif diff --git a/trunk/arch/mips/include/asm/mach-lantiq/xway/irq.h b/trunk/arch/mips/include/asm/mach-lantiq/xway/irq.h new file mode 100644 index 000000000000..a1471d2dd0d2 --- /dev/null +++ b/trunk/arch/mips/include/asm/mach-lantiq/xway/irq.h @@ -0,0 +1,18 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Copyright (C) 2010 John Crispin + */ + +#ifndef __LANTIQ_IRQ_H +#define __LANTIQ_IRQ_H + +#include + +#define NR_IRQS 256 + +#include_next + +#endif diff --git a/trunk/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h b/trunk/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h new file mode 100644 index 000000000000..b4465a888e20 --- /dev/null +++ b/trunk/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h @@ -0,0 +1,66 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Copyright (C) 2010 John Crispin + */ + +#ifndef _LANTIQ_XWAY_IRQ_H__ +#define _LANTIQ_XWAY_IRQ_H__ + +#define INT_NUM_IRQ0 8 +#define INT_NUM_IM0_IRL0 (INT_NUM_IRQ0 + 0) +#define INT_NUM_IM1_IRL0 (INT_NUM_IRQ0 + 32) +#define INT_NUM_IM2_IRL0 (INT_NUM_IRQ0 + 64) +#define INT_NUM_IM3_IRL0 (INT_NUM_IRQ0 + 96) +#define INT_NUM_IM4_IRL0 (INT_NUM_IRQ0 + 128) +#define INT_NUM_IM_OFFSET (INT_NUM_IM1_IRL0 - INT_NUM_IM0_IRL0) + +#define LTQ_ASC_TIR(x) (INT_NUM_IM3_IRL0 + (x * 8)) +#define LTQ_ASC_RIR(x) (INT_NUM_IM3_IRL0 + (x * 8) + 1) +#define LTQ_ASC_EIR(x) (INT_NUM_IM3_IRL0 + (x * 8) + 2) + +#define LTQ_ASC_ASE_TIR INT_NUM_IM2_IRL0 +#define LTQ_ASC_ASE_RIR (INT_NUM_IM2_IRL0 + 2) +#define LTQ_ASC_ASE_EIR (INT_NUM_IM2_IRL0 + 3) + +#define LTQ_SSC_TIR (INT_NUM_IM0_IRL0 + 15) +#define LTQ_SSC_RIR (INT_NUM_IM0_IRL0 + 14) +#define LTQ_SSC_EIR (INT_NUM_IM0_IRL0 + 16) + +#define LTQ_MEI_DYING_GASP_INT (INT_NUM_IM1_IRL0 + 21) +#define LTQ_MEI_INT (INT_NUM_IM1_IRL0 + 23) + +#define LTQ_TIMER6_INT (INT_NUM_IM1_IRL0 + 23) +#define LTQ_USB_INT (INT_NUM_IM1_IRL0 + 22) +#define LTQ_USB_OC_INT (INT_NUM_IM4_IRL0 + 23) + +#define MIPS_CPU_TIMER_IRQ 7 + +#define LTQ_DMA_CH0_INT (INT_NUM_IM2_IRL0) +#define LTQ_DMA_CH1_INT (INT_NUM_IM2_IRL0 + 1) +#define LTQ_DMA_CH2_INT (INT_NUM_IM2_IRL0 + 2) +#define LTQ_DMA_CH3_INT (INT_NUM_IM2_IRL0 + 3) +#define LTQ_DMA_CH4_INT (INT_NUM_IM2_IRL0 + 4) +#define LTQ_DMA_CH5_INT (INT_NUM_IM2_IRL0 + 5) +#define LTQ_DMA_CH6_INT (INT_NUM_IM2_IRL0 + 6) +#define LTQ_DMA_CH7_INT (INT_NUM_IM2_IRL0 + 7) +#define LTQ_DMA_CH8_INT (INT_NUM_IM2_IRL0 + 8) +#define LTQ_DMA_CH9_INT (INT_NUM_IM2_IRL0 + 9) +#define LTQ_DMA_CH10_INT (INT_NUM_IM2_IRL0 + 10) +#define LTQ_DMA_CH11_INT (INT_NUM_IM2_IRL0 + 11) +#define LTQ_DMA_CH12_INT (INT_NUM_IM2_IRL0 + 25) +#define LTQ_DMA_CH13_INT (INT_NUM_IM2_IRL0 + 26) +#define LTQ_DMA_CH14_INT (INT_NUM_IM2_IRL0 + 27) +#define LTQ_DMA_CH15_INT (INT_NUM_IM2_IRL0 + 28) +#define LTQ_DMA_CH16_INT (INT_NUM_IM2_IRL0 + 29) +#define LTQ_DMA_CH17_INT (INT_NUM_IM2_IRL0 + 30) +#define LTQ_DMA_CH18_INT (INT_NUM_IM2_IRL0 + 16) +#define LTQ_DMA_CH19_INT (INT_NUM_IM2_IRL0 + 21) + +#define LTQ_PPE_MBOX_INT (INT_NUM_IM2_IRL0 + 24) + +#define INT_NUM_IM4_IRL14 (INT_NUM_IM4_IRL0 + 14) + +#endif diff --git a/trunk/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h b/trunk/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h new file mode 100644 index 000000000000..8a3c6be669d2 --- /dev/null +++ b/trunk/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h @@ -0,0 +1,141 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Copyright (C) 2010 John Crispin + */ + +#ifndef _LTQ_XWAY_H__ +#define _LTQ_XWAY_H__ + +#ifdef CONFIG_SOC_TYPE_XWAY + +#include + +/* Chip IDs */ +#define SOC_ID_DANUBE1 0x129 +#define SOC_ID_DANUBE2 0x12B +#define SOC_ID_TWINPASS 0x12D +#define SOC_ID_AMAZON_SE 0x152 +#define SOC_ID_ARX188 0x16C +#define SOC_ID_ARX168 0x16D +#define SOC_ID_ARX182 0x16F + +/* SoC Types */ +#define SOC_TYPE_DANUBE 0x01 +#define SOC_TYPE_TWINPASS 0x02 +#define SOC_TYPE_AR9 0x03 +#define SOC_TYPE_VR9 0x04 +#define SOC_TYPE_AMAZON_SE 0x05 + +/* ASC0/1 - serial port */ +#define LTQ_ASC0_BASE_ADDR 0x1E100400 +#define LTQ_ASC1_BASE_ADDR 0x1E100C00 +#define LTQ_ASC_SIZE 0x400 + +/* RCU - reset control unit */ +#define LTQ_RCU_BASE_ADDR 0x1F203000 +#define LTQ_RCU_SIZE 0x1000 + +/* GPTU - general purpose timer unit */ +#define LTQ_GPTU_BASE_ADDR 0x18000300 +#define LTQ_GPTU_SIZE 0x100 + +/* EBU - external bus unit */ +#define LTQ_EBU_GPIO_START 0x14000000 +#define LTQ_EBU_GPIO_SIZE 0x1000 + +#define LTQ_EBU_BASE_ADDR 0x1E105300 +#define LTQ_EBU_SIZE 0x100 + +#define LTQ_EBU_BUSCON0 0x0060 +#define LTQ_EBU_PCC_CON 0x0090 +#define LTQ_EBU_PCC_IEN 0x00A4 +#define LTQ_EBU_PCC_ISTAT 0x00A0 +#define LTQ_EBU_BUSCON1 0x0064 +#define LTQ_EBU_ADDRSEL1 0x0024 +#define EBU_WRDIS 0x80000000 + +/* CGU - clock generation unit */ +#define LTQ_CGU_BASE_ADDR 0x1F103000 +#define LTQ_CGU_SIZE 0x1000 + +/* ICU - interrupt control unit */ +#define LTQ_ICU_BASE_ADDR 0x1F880200 +#define LTQ_ICU_SIZE 0x100 + +/* EIU - external interrupt unit */ +#define LTQ_EIU_BASE_ADDR 0x1F101000 +#define LTQ_EIU_SIZE 0x1000 + +/* PMU - power management unit */ +#define LTQ_PMU_BASE_ADDR 0x1F102000 +#define LTQ_PMU_SIZE 0x1000 + +#define PMU_DMA 0x0020 +#define PMU_USB 0x8041 +#define PMU_LED 0x0800 +#define PMU_GPT 0x1000 +#define PMU_PPE 0x2000 +#define PMU_FPI 0x4000 +#define PMU_SWITCH 0x10000000 + +/* ETOP - ethernet */ +#define LTQ_ETOP_BASE_ADDR 0x1E180000 +#define LTQ_ETOP_SIZE 0x40000 + +/* DMA */ +#define LTQ_DMA_BASE_ADDR 0x1E104100 +#define LTQ_DMA_SIZE 0x800 + +/* PCI */ +#define PCI_CR_BASE_ADDR 0x1E105400 +#define PCI_CR_SIZE 0x400 + +/* WDT */ +#define LTQ_WDT_BASE_ADDR 0x1F8803F0 +#define LTQ_WDT_SIZE 0x10 + +/* STP - serial to parallel conversion unit */ +#define LTQ_STP_BASE_ADDR 0x1E100BB0 +#define LTQ_STP_SIZE 0x40 + +/* GPIO */ +#define LTQ_GPIO0_BASE_ADDR 0x1E100B10 +#define LTQ_GPIO1_BASE_ADDR 0x1E100B40 +#define LTQ_GPIO2_BASE_ADDR 0x1E100B70 +#define LTQ_GPIO_SIZE 0x30 + +/* SSC */ +#define LTQ_SSC_BASE_ADDR 0x1e100800 +#define LTQ_SSC_SIZE 0x100 + +/* MEI - dsl core */ +#define LTQ_MEI_BASE_ADDR 0x1E116000 + +/* DEU - data encryption unit */ +#define LTQ_DEU_BASE_ADDR 0x1E103100 + +/* MPS - multi processor unit (voice) */ +#define LTQ_MPS_BASE_ADDR (KSEG1 + 0x1F107000) +#define LTQ_MPS_CHIPID ((u32 *)(LTQ_MPS_BASE_ADDR + 0x0344)) + +/* request a non-gpio and set the PIO config */ +extern int ltq_gpio_request(unsigned int pin, unsigned int alt0, + unsigned int alt1, unsigned int dir, const char *name); +extern void ltq_pmu_enable(unsigned int module); +extern void ltq_pmu_disable(unsigned int module); + +static inline int ltq_is_ar9(void) +{ + return (ltq_get_soc_type() == SOC_TYPE_AR9); +} + +static inline int ltq_is_vr9(void) +{ + return (ltq_get_soc_type() == SOC_TYPE_VR9); +} + +#endif /* CONFIG_SOC_TYPE_XWAY */ +#endif /* _LTQ_XWAY_H__ */ diff --git a/trunk/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h b/trunk/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h new file mode 100644 index 000000000000..872943a4b90e --- /dev/null +++ b/trunk/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h @@ -0,0 +1,60 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) 2011 John Crispin + */ + +#ifndef LTQ_DMA_H__ +#define LTQ_DMA_H__ + +#define LTQ_DESC_SIZE 0x08 /* each descriptor is 64bit */ +#define LTQ_DESC_NUM 0x40 /* 64 descriptors / channel */ + +#define LTQ_DMA_OWN BIT(31) /* owner bit */ +#define LTQ_DMA_C BIT(30) /* complete bit */ +#define LTQ_DMA_SOP BIT(29) /* start of packet */ +#define LTQ_DMA_EOP BIT(28) /* end of packet */ +#define LTQ_DMA_TX_OFFSET(x) ((x & 0x1f) << 23) /* data bytes offset */ +#define LTQ_DMA_RX_OFFSET(x) ((x & 0x7) << 23) /* data bytes offset */ +#define LTQ_DMA_SIZE_MASK (0xffff) /* the size field is 16 bit */ + +struct ltq_dma_desc { + u32 ctl; + u32 addr; +}; + +struct ltq_dma_channel { + int nr; /* the channel number */ + int irq; /* the mapped irq */ + int desc; /* the current descriptor */ + struct ltq_dma_desc *desc_base; /* the descriptor base */ + int phys; /* physical addr */ +}; + +enum { + DMA_PORT_ETOP = 0, + DMA_PORT_DEU, +}; + +extern void ltq_dma_enable_irq(struct ltq_dma_channel *ch); +extern void ltq_dma_disable_irq(struct ltq_dma_channel *ch); +extern void ltq_dma_ack_irq(struct ltq_dma_channel *ch); +extern void ltq_dma_open(struct ltq_dma_channel *ch); +extern void ltq_dma_close(struct ltq_dma_channel *ch); +extern void ltq_dma_alloc_tx(struct ltq_dma_channel *ch); +extern void ltq_dma_alloc_rx(struct ltq_dma_channel *ch); +extern void ltq_dma_free(struct ltq_dma_channel *ch); +extern void ltq_dma_init_port(int p); + +#endif diff --git a/trunk/arch/mips/include/asm/mach-netlogic/cpu-feature-overrides.h b/trunk/arch/mips/include/asm/mach-netlogic/cpu-feature-overrides.h new file mode 100644 index 000000000000..3b728275b9b0 --- /dev/null +++ b/trunk/arch/mips/include/asm/mach-netlogic/cpu-feature-overrides.h @@ -0,0 +1,47 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2011 Netlogic Microsystems + * Copyright (C) 2003 Ralf Baechle + */ +#ifndef __ASM_MACH_NETLOGIC_CPU_FEATURE_OVERRIDES_H +#define __ASM_MACH_NETLOGIC_CPU_FEATURE_OVERRIDES_H + +#define cpu_has_4kex 1 +#define cpu_has_4k_cache 1 +#define cpu_has_watch 1 +#define cpu_has_mips16 0 +#define cpu_has_counter 1 +#define cpu_has_divec 1 +#define cpu_has_vce 0 +#define cpu_has_cache_cdex_p 0 +#define cpu_has_cache_cdex_s 0 +#define cpu_has_prefetch 1 +#define cpu_has_mcheck 1 +#define cpu_has_ejtag 1 + +#define cpu_has_llsc 1 +#define cpu_has_vtag_icache 0 +#define cpu_has_dc_aliases 0 +#define cpu_has_ic_fills_f_dc 0 +#define cpu_has_dsp 0 +#define cpu_has_mipsmt 0 +#define cpu_has_userlocal 0 +#define cpu_icache_snoops_remote_store 0 + +#define cpu_has_nofpuex 0 +#define cpu_has_64bits 1 + +#define cpu_has_mips32r1 1 +#define cpu_has_mips32r2 0 +#define cpu_has_mips64r1 1 +#define cpu_has_mips64r2 0 + +#define cpu_has_inclusive_pcaches 0 + +#define cpu_dcache_line_size() 32 +#define cpu_icache_line_size() 32 + +#endif /* __ASM_MACH_NETLOGIC_CPU_FEATURE_OVERRIDES_H */ diff --git a/trunk/arch/mips/include/asm/mach-netlogic/irq.h b/trunk/arch/mips/include/asm/mach-netlogic/irq.h new file mode 100644 index 000000000000..b5902458e7c1 --- /dev/null +++ b/trunk/arch/mips/include/asm/mach-netlogic/irq.h @@ -0,0 +1,14 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2011 Netlogic Microsystems. + */ +#ifndef __ASM_NETLOGIC_IRQ_H +#define __ASM_NETLOGIC_IRQ_H + +#define NR_IRQS 64 +#define MIPS_CPU_IRQ_BASE 0 + +#endif /* __ASM_NETLOGIC_IRQ_H */ diff --git a/trunk/arch/mips/include/asm/mach-netlogic/war.h b/trunk/arch/mips/include/asm/mach-netlogic/war.h new file mode 100644 index 000000000000..22da89327352 --- /dev/null +++ b/trunk/arch/mips/include/asm/mach-netlogic/war.h @@ -0,0 +1,26 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2011 Netlogic Microsystems. + * Copyright (C) 2002, 2004, 2007 by Ralf Baechle + */ +#ifndef __ASM_MIPS_MACH_NLM_WAR_H +#define __ASM_MIPS_MACH_NLM_WAR_H + +#define R4600_V1_INDEX_ICACHEOP_WAR 0 +#define R4600_V1_HIT_CACHEOP_WAR 0 +#define R4600_V2_HIT_CACHEOP_WAR 0 +#define R5432_CP0_INTERRUPT_WAR 0 +#define BCM1250_M3_WAR 0 +#define SIBYTE_1956_WAR 0 +#define MIPS4K_ICACHE_REFILL_WAR 0 +#define MIPS_CACHE_SYNC_WAR 0 +#define TX49XX_ICACHE_INDEX_INV_WAR 0 +#define RM9000_CDEX_SMP_WAR 0 +#define ICACHE_REFILLS_WORKAROUND_WAR 0 +#define R10000_LLSC_WAR 0 +#define MIPS34K_MISSED_ITLB_WAR 0 + +#endif /* __ASM_MIPS_MACH_NLM_WAR_H */ diff --git a/trunk/arch/mips/include/asm/module.h b/trunk/arch/mips/include/asm/module.h index d94085a3eafb..bc01a02cacd8 100644 --- a/trunk/arch/mips/include/asm/module.h +++ b/trunk/arch/mips/include/asm/module.h @@ -118,6 +118,8 @@ search_module_dbetables(unsigned long addr) #define MODULE_PROC_FAMILY "LOONGSON2 " #elif defined CONFIG_CPU_CAVIUM_OCTEON #define MODULE_PROC_FAMILY "OCTEON " +#elif defined CONFIG_CPU_XLR +#define MODULE_PROC_FAMILY "XLR " #else #error MODULE_PROC_FAMILY undefined for your processor configuration #endif diff --git a/trunk/arch/mips/include/asm/netlogic/interrupt.h b/trunk/arch/mips/include/asm/netlogic/interrupt.h new file mode 100644 index 000000000000..a85aadb6cfd7 --- /dev/null +++ b/trunk/arch/mips/include/asm/netlogic/interrupt.h @@ -0,0 +1,45 @@ +/* + * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights + * reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the NetLogic + * license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _ASM_NLM_INTERRUPT_H +#define _ASM_NLM_INTERRUPT_H + +/* Defines for the IRQ numbers */ + +#define IRQ_IPI_SMP_FUNCTION 3 +#define IRQ_IPI_SMP_RESCHEDULE 4 +#define IRQ_MSGRING 6 +#define IRQ_TIMER 7 + +#endif diff --git a/trunk/arch/mips/include/asm/netlogic/mips-extns.h b/trunk/arch/mips/include/asm/netlogic/mips-extns.h new file mode 100644 index 000000000000..8c53d0ba4bf2 --- /dev/null +++ b/trunk/arch/mips/include/asm/netlogic/mips-extns.h @@ -0,0 +1,76 @@ +/* + * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights + * reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the NetLogic + * license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _ASM_NLM_MIPS_EXTS_H +#define _ASM_NLM_MIPS_EXTS_H + +/* + * XLR and XLP interrupt request and interrupt mask registers + */ +#define read_c0_eirr() __read_64bit_c0_register($9, 6) +#define read_c0_eimr() __read_64bit_c0_register($9, 7) +#define write_c0_eirr(val) __write_64bit_c0_register($9, 6, val) + +/* + * Writing EIMR in 32 bit is a special case, the lower 8 bit of the + * EIMR is shadowed in the status register, so we cannot save and + * restore status register for split read. + */ +#define write_c0_eimr(val) \ +do { \ + if (sizeof(unsigned long) == 4) { \ + unsigned long __flags; \ + \ + local_irq_save(__flags); \ + __asm__ __volatile__( \ + ".set\tmips64\n\t" \ + "dsll\t%L0, %L0, 32\n\t" \ + "dsrl\t%L0, %L0, 32\n\t" \ + "dsll\t%M0, %M0, 32\n\t" \ + "or\t%L0, %L0, %M0\n\t" \ + "dmtc0\t%L0, $9, 7\n\t" \ + ".set\tmips0" \ + : : "r" (val)); \ + __flags = (__flags & 0xffff00ff) | (((val) & 0xff) << 8);\ + local_irq_restore(__flags); \ + } else \ + __write_64bit_c0_register($9, 7, (val)); \ +} while (0) + +static inline int hard_smp_processor_id(void) +{ + return __read_32bit_c0_register($15, 1) & 0x3ff; +} + +#endif /*_ASM_NLM_MIPS_EXTS_H */ diff --git a/trunk/arch/mips/include/asm/netlogic/psb-bootinfo.h b/trunk/arch/mips/include/asm/netlogic/psb-bootinfo.h new file mode 100644 index 000000000000..6878307f0ee6 --- /dev/null +++ b/trunk/arch/mips/include/asm/netlogic/psb-bootinfo.h @@ -0,0 +1,109 @@ +/* + * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights + * reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the NetLogic + * license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _ASM_NETLOGIC_BOOTINFO_H +#define _ASM_NETLOGIC_BOOTINFO_H + +struct psb_info { + uint64_t boot_level; + uint64_t io_base; + uint64_t output_device; + uint64_t uart_print; + uint64_t led_output; + uint64_t init; + uint64_t exit; + uint64_t warm_reset; + uint64_t wakeup; + uint64_t online_cpu_map; + uint64_t master_reentry_sp; + uint64_t master_reentry_gp; + uint64_t master_reentry_fn; + uint64_t slave_reentry_fn; + uint64_t magic_dword; + uint64_t uart_putchar; + uint64_t size; + uint64_t uart_getchar; + uint64_t nmi_handler; + uint64_t psb_version; + uint64_t mac_addr; + uint64_t cpu_frequency; + uint64_t board_version; + uint64_t malloc; + uint64_t free; + uint64_t global_shmem_addr; + uint64_t global_shmem_size; + uint64_t psb_os_cpu_map; + uint64_t userapp_cpu_map; + uint64_t wakeup_os; + uint64_t psb_mem_map; + uint64_t board_major_version; + uint64_t board_minor_version; + uint64_t board_manf_revision; + uint64_t board_serial_number; + uint64_t psb_physaddr_map; + uint64_t xlr_loaderip_config; + uint64_t bldr_envp; + uint64_t avail_mem_map; +}; + +enum { + NETLOGIC_IO_SPACE = 0x10, + PCIX_IO_SPACE, + PCIX_CFG_SPACE, + PCIX_MEMORY_SPACE, + HT_IO_SPACE, + HT_CFG_SPACE, + HT_MEMORY_SPACE, + SRAM_SPACE, + FLASH_CONTROLLER_SPACE +}; + +#define NLM_MAX_ARGS 64 +#define NLM_MAX_ENVS 32 + +/* This is what netlboot passes and linux boot_mem_map is subtly different */ +#define NLM_BOOT_MEM_MAP_MAX 32 +struct nlm_boot_mem_map { + int nr_map; + struct nlm_boot_mem_map_entry { + uint64_t addr; /* start of memory segment */ + uint64_t size; /* size of memory segment */ + uint32_t type; /* type of memory segment */ + } map[NLM_BOOT_MEM_MAP_MAX]; +}; + +/* Pointer to saved boot loader info */ +extern struct psb_info nlm_prom_info; + +#endif diff --git a/trunk/arch/mips/include/asm/netlogic/xlr/gpio.h b/trunk/arch/mips/include/asm/netlogic/xlr/gpio.h new file mode 100644 index 000000000000..51f6ad4aeb14 --- /dev/null +++ b/trunk/arch/mips/include/asm/netlogic/xlr/gpio.h @@ -0,0 +1,73 @@ +/* + * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights + * reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the NetLogic + * license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _ASM_NLM_GPIO_H +#define _ASM_NLM_GPIO_H + +#define NETLOGIC_GPIO_INT_EN_REG 0 +#define NETLOGIC_GPIO_INPUT_INVERSION_REG 1 +#define NETLOGIC_GPIO_IO_DIR_REG 2 +#define NETLOGIC_GPIO_IO_DATA_WR_REG 3 +#define NETLOGIC_GPIO_IO_DATA_RD_REG 4 + +#define NETLOGIC_GPIO_SWRESET_REG 8 +#define NETLOGIC_GPIO_DRAM1_CNTRL_REG 9 +#define NETLOGIC_GPIO_DRAM1_RATIO_REG 10 +#define NETLOGIC_GPIO_DRAM1_RESET_REG 11 +#define NETLOGIC_GPIO_DRAM1_STATUS_REG 12 +#define NETLOGIC_GPIO_DRAM2_CNTRL_REG 13 +#define NETLOGIC_GPIO_DRAM2_RATIO_REG 14 +#define NETLOGIC_GPIO_DRAM2_RESET_REG 15 +#define NETLOGIC_GPIO_DRAM2_STATUS_REG 16 + +#define NETLOGIC_GPIO_PWRON_RESET_CFG_REG 21 +#define NETLOGIC_GPIO_BIST_ALL_GO_STATUS_REG 24 +#define NETLOGIC_GPIO_BIST_CPU_GO_STATUS_REG 25 +#define NETLOGIC_GPIO_BIST_DEV_GO_STATUS_REG 26 + +#define NETLOGIC_GPIO_FUSE_BANK_REG 35 +#define NETLOGIC_GPIO_CPU_RESET_REG 40 +#define NETLOGIC_GPIO_RNG_REG 43 + +#define NETLOGIC_PWRON_RESET_PCMCIA_BOOT 17 +#define NETLOGIC_GPIO_LED_BITMAP 0x1700000 +#define NETLOGIC_GPIO_LED_0_SHIFT 20 +#define NETLOGIC_GPIO_LED_1_SHIFT 24 + +#define NETLOGIC_GPIO_LED_OUTPUT_CODE_RESET 0x01 +#define NETLOGIC_GPIO_LED_OUTPUT_CODE_HARD_RESET 0x02 +#define NETLOGIC_GPIO_LED_OUTPUT_CODE_SOFT_RESET 0x03 +#define NETLOGIC_GPIO_LED_OUTPUT_CODE_MAIN 0x04 + +#endif diff --git a/trunk/arch/mips/include/asm/netlogic/xlr/iomap.h b/trunk/arch/mips/include/asm/netlogic/xlr/iomap.h new file mode 100644 index 000000000000..2e3a4dd53045 --- /dev/null +++ b/trunk/arch/mips/include/asm/netlogic/xlr/iomap.h @@ -0,0 +1,131 @@ +/* + * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights + * reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the NetLogic + * license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _ASM_NLM_IOMAP_H +#define _ASM_NLM_IOMAP_H + +#define DEFAULT_NETLOGIC_IO_BASE CKSEG1ADDR(0x1ef00000) +#define NETLOGIC_IO_DDR2_CHN0_OFFSET 0x01000 +#define NETLOGIC_IO_DDR2_CHN1_OFFSET 0x02000 +#define NETLOGIC_IO_DDR2_CHN2_OFFSET 0x03000 +#define NETLOGIC_IO_DDR2_CHN3_OFFSET 0x04000 +#define NETLOGIC_IO_PIC_OFFSET 0x08000 +#define NETLOGIC_IO_UART_0_OFFSET 0x14000 +#define NETLOGIC_IO_UART_1_OFFSET 0x15100 + +#define NETLOGIC_IO_SIZE 0x1000 + +#define NETLOGIC_IO_BRIDGE_OFFSET 0x00000 + +#define NETLOGIC_IO_RLD2_CHN0_OFFSET 0x05000 +#define NETLOGIC_IO_RLD2_CHN1_OFFSET 0x06000 + +#define NETLOGIC_IO_SRAM_OFFSET 0x07000 + +#define NETLOGIC_IO_PCIX_OFFSET 0x09000 +#define NETLOGIC_IO_HT_OFFSET 0x0A000 + +#define NETLOGIC_IO_SECURITY_OFFSET 0x0B000 + +#define NETLOGIC_IO_GMAC_0_OFFSET 0x0C000 +#define NETLOGIC_IO_GMAC_1_OFFSET 0x0D000 +#define NETLOGIC_IO_GMAC_2_OFFSET 0x0E000 +#define NETLOGIC_IO_GMAC_3_OFFSET 0x0F000 + +/* XLS devices */ +#define NETLOGIC_IO_GMAC_4_OFFSET 0x20000 +#define NETLOGIC_IO_GMAC_5_OFFSET 0x21000 +#define NETLOGIC_IO_GMAC_6_OFFSET 0x22000 +#define NETLOGIC_IO_GMAC_7_OFFSET 0x23000 + +#define NETLOGIC_IO_PCIE_0_OFFSET 0x1E000 +#define NETLOGIC_IO_PCIE_1_OFFSET 0x1F000 +#define NETLOGIC_IO_SRIO_0_OFFSET 0x1E000 +#define NETLOGIC_IO_SRIO_1_OFFSET 0x1F000 + +#define NETLOGIC_IO_USB_0_OFFSET 0x24000 +#define NETLOGIC_IO_USB_1_OFFSET 0x25000 + +#define NETLOGIC_IO_COMP_OFFSET 0x1D000 +/* end XLS devices */ + +/* XLR devices */ +#define NETLOGIC_IO_SPI4_0_OFFSET 0x10000 +#define NETLOGIC_IO_XGMAC_0_OFFSET 0x11000 +#define NETLOGIC_IO_SPI4_1_OFFSET 0x12000 +#define NETLOGIC_IO_XGMAC_1_OFFSET 0x13000 +/* end XLR devices */ + +#define NETLOGIC_IO_I2C_0_OFFSET 0x16000 +#define NETLOGIC_IO_I2C_1_OFFSET 0x17000 + +#define NETLOGIC_IO_GPIO_OFFSET 0x18000 +#define NETLOGIC_IO_FLASH_OFFSET 0x19000 +#define NETLOGIC_IO_TB_OFFSET 0x1C000 + +#define NETLOGIC_CPLD_OFFSET KSEG1ADDR(0x1d840000) + +/* + * Base Address (Virtual) of the PCI Config address space + * For now, choose 256M phys in kseg1 = 0xA0000000 + (1<<28) + * Config space spans 256 (num of buses) * 256 (num functions) * 256 bytes + * ie 1<<24 = 16M + */ +#define DEFAULT_PCI_CONFIG_BASE 0x18000000 +#define DEFAULT_HT_TYPE0_CFG_BASE 0x16000000 +#define DEFAULT_HT_TYPE1_CFG_BASE 0x17000000 + +#ifndef __ASSEMBLY__ +#include +#include + +typedef volatile __u32 nlm_reg_t; +extern unsigned long netlogic_io_base; + +/* FIXME read once in write_reg */ +#ifdef CONFIG_CPU_LITTLE_ENDIAN +#define netlogic_read_reg(base, offset) ((base)[(offset)]) +#define netlogic_write_reg(base, offset, value) ((base)[(offset)] = (value)) +#else +#define netlogic_read_reg(base, offset) (be32_to_cpu((base)[(offset)])) +#define netlogic_write_reg(base, offset, value) \ + ((base)[(offset)] = cpu_to_be32((value))) +#endif + +#define netlogic_read_reg_le32(base, offset) (le32_to_cpu((base)[(offset)])) +#define netlogic_write_reg_le32(base, offset, value) \ + ((base)[(offset)] = cpu_to_le32((value))) +#define netlogic_io_mmio(offset) ((nlm_reg_t *)(netlogic_io_base+(offset))) +#endif /* __ASSEMBLY__ */ +#endif diff --git a/trunk/arch/mips/include/asm/netlogic/xlr/pic.h b/trunk/arch/mips/include/asm/netlogic/xlr/pic.h new file mode 100644 index 000000000000..5cceb746f080 --- /dev/null +++ b/trunk/arch/mips/include/asm/netlogic/xlr/pic.h @@ -0,0 +1,231 @@ +/* + * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights + * reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the NetLogic + * license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _ASM_NLM_XLR_PIC_H +#define _ASM_NLM_XLR_PIC_H + +#define PIC_CLKS_PER_SEC 66666666ULL +/* PIC hardware interrupt numbers */ +#define PIC_IRT_WD_INDEX 0 +#define PIC_IRT_TIMER_0_INDEX 1 +#define PIC_IRT_TIMER_1_INDEX 2 +#define PIC_IRT_TIMER_2_INDEX 3 +#define PIC_IRT_TIMER_3_INDEX 4 +#define PIC_IRT_TIMER_4_INDEX 5 +#define PIC_IRT_TIMER_5_INDEX 6 +#define PIC_IRT_TIMER_6_INDEX 7 +#define PIC_IRT_TIMER_7_INDEX 8 +#define PIC_IRT_CLOCK_INDEX PIC_IRT_TIMER_7_INDEX +#define PIC_IRT_UART_0_INDEX 9 +#define PIC_IRT_UART_1_INDEX 10 +#define PIC_IRT_I2C_0_INDEX 11 +#define PIC_IRT_I2C_1_INDEX 12 +#define PIC_IRT_PCMCIA_INDEX 13 +#define PIC_IRT_GPIO_INDEX 14 +#define PIC_IRT_HYPER_INDEX 15 +#define PIC_IRT_PCIX_INDEX 16 +/* XLS */ +#define PIC_IRT_CDE_INDEX 15 +#define PIC_IRT_BRIDGE_TB_XLS_INDEX 16 +/* XLS */ +#define PIC_IRT_GMAC0_INDEX 17 +#define PIC_IRT_GMAC1_INDEX 18 +#define PIC_IRT_GMAC2_INDEX 19 +#define PIC_IRT_GMAC3_INDEX 20 +#define PIC_IRT_XGS0_INDEX 21 +#define PIC_IRT_XGS1_INDEX 22 +#define PIC_IRT_HYPER_FATAL_INDEX 23 +#define PIC_IRT_PCIX_FATAL_INDEX 24 +#define PIC_IRT_BRIDGE_AERR_INDEX 25 +#define PIC_IRT_BRIDGE_BERR_INDEX 26 +#define PIC_IRT_BRIDGE_TB_XLR_INDEX 27 +#define PIC_IRT_BRIDGE_AERR_NMI_INDEX 28 +/* XLS */ +#define PIC_IRT_GMAC4_INDEX 21 +#define PIC_IRT_GMAC5_INDEX 22 +#define PIC_IRT_GMAC6_INDEX 23 +#define PIC_IRT_GMAC7_INDEX 24 +#define PIC_IRT_BRIDGE_ERR_INDEX 25 +#define PIC_IRT_PCIE_LINK0_INDEX 26 +#define PIC_IRT_PCIE_LINK1_INDEX 27 +#define PIC_IRT_PCIE_LINK2_INDEX 23 +#define PIC_IRT_PCIE_LINK3_INDEX 24 +#define PIC_IRT_PCIE_XLSB0_LINK2_INDEX 28 +#define PIC_IRT_PCIE_XLSB0_LINK3_INDEX 29 +#define PIC_IRT_SRIO_LINK0_INDEX 26 +#define PIC_IRT_SRIO_LINK1_INDEX 27 +#define PIC_IRT_SRIO_LINK2_INDEX 28 +#define PIC_IRT_SRIO_LINK3_INDEX 29 +#define PIC_IRT_PCIE_INT_INDEX 28 +#define PIC_IRT_PCIE_FATAL_INDEX 29 +#define PIC_IRT_GPIO_B_INDEX 30 +#define PIC_IRT_USB_INDEX 31 +/* XLS */ +#define PIC_NUM_IRTS 32 + + +#define PIC_CLOCK_TIMER 7 + +/* PIC Registers */ +#define PIC_CTRL 0x00 +#define PIC_IPI 0x04 +#define PIC_INT_ACK 0x06 + +#define WD_MAX_VAL_0 0x08 +#define WD_MAX_VAL_1 0x09 +#define WD_MASK_0 0x0a +#define WD_MASK_1 0x0b +#define WD_HEARBEAT_0 0x0c +#define WD_HEARBEAT_1 0x0d + +#define PIC_IRT_0_BASE 0x40 +#define PIC_IRT_1_BASE 0x80 +#define PIC_TIMER_MAXVAL_0_BASE 0x100 +#define PIC_TIMER_MAXVAL_1_BASE 0x110 +#define PIC_TIMER_COUNT_0_BASE 0x120 +#define PIC_TIMER_COUNT_1_BASE 0x130 + +#define PIC_IRT_0(picintr) (PIC_IRT_0_BASE + (picintr)) +#define PIC_IRT_1(picintr) (PIC_IRT_1_BASE + (picintr)) + +#define PIC_TIMER_MAXVAL_0(i) (PIC_TIMER_MAXVAL_0_BASE + (i)) +#define PIC_TIMER_MAXVAL_1(i) (PIC_TIMER_MAXVAL_1_BASE + (i)) +#define PIC_TIMER_COUNT_0(i) (PIC_TIMER_COUNT_0_BASE + (i)) +#define PIC_TIMER_COUNT_1(i) (PIC_TIMER_COUNT_0_BASE + (i)) + +/* + * Mapping between hardware interrupt numbers and IRQs on CPU + * we use a simple scheme to map PIC interrupts 0-31 to IRQs + * 8-39. This leaves the IRQ 0-7 for cpu interrupts like + * count/compare and FMN + */ +#define PIC_IRQ_BASE 8 +#define PIC_INTR_TO_IRQ(i) (PIC_IRQ_BASE + (i)) +#define PIC_IRQ_TO_INTR(i) ((i) - PIC_IRQ_BASE) + +#define PIC_IRT_FIRST_IRQ PIC_IRQ_BASE +#define PIC_WD_IRQ PIC_INTR_TO_IRQ(PIC_IRT_WD_INDEX) +#define PIC_TIMER_0_IRQ PIC_INTR_TO_IRQ(PIC_IRT_TIMER_0_INDEX) +#define PIC_TIMER_1_IRQ PIC_INTR_TO_IRQ(PIC_IRT_TIMER_1_INDEX) +#define PIC_TIMER_2_IRQ PIC_INTR_TO_IRQ(PIC_IRT_TIMER_2_INDEX) +#define PIC_TIMER_3_IRQ PIC_INTR_TO_IRQ(PIC_IRT_TIMER_3_INDEX) +#define PIC_TIMER_4_IRQ PIC_INTR_TO_IRQ(PIC_IRT_TIMER_4_INDEX) +#define PIC_TIMER_5_IRQ PIC_INTR_TO_IRQ(PIC_IRT_TIMER_5_INDEX) +#define PIC_TIMER_6_IRQ PIC_INTR_TO_IRQ(PIC_IRT_TIMER_6_INDEX) +#define PIC_TIMER_7_IRQ PIC_INTR_TO_IRQ(PIC_IRT_TIMER_7_INDEX) +#define PIC_CLOCK_IRQ (PIC_TIMER_7_IRQ) +#define PIC_UART_0_IRQ PIC_INTR_TO_IRQ(PIC_IRT_UART_0_INDEX) +#define PIC_UART_1_IRQ PIC_INTR_TO_IRQ(PIC_IRT_UART_1_INDEX) +#define PIC_I2C_0_IRQ PIC_INTR_TO_IRQ(PIC_IRT_I2C_0_INDEX) +#define PIC_I2C_1_IRQ PIC_INTR_TO_IRQ(PIC_IRT_I2C_1_INDEX) +#define PIC_PCMCIA_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCMCIA_INDEX) +#define PIC_GPIO_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GPIO_INDEX) +#define PIC_HYPER_IRQ PIC_INTR_TO_IRQ(PIC_IRT_HYPER_INDEX) +#define PIC_PCIX_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIX_INDEX) +/* XLS */ +#define PIC_CDE_IRQ PIC_INTR_TO_IRQ(PIC_IRT_CDE_INDEX) +#define PIC_BRIDGE_TB_XLS_IRQ PIC_INTR_TO_IRQ(PIC_IRT_BRIDGE_TB_XLS_INDEX) +/* end XLS */ +#define PIC_GMAC_0_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GMAC0_INDEX) +#define PIC_GMAC_1_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GMAC1_INDEX) +#define PIC_GMAC_2_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GMAC2_INDEX) +#define PIC_GMAC_3_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GMAC3_INDEX) +#define PIC_XGS_0_IRQ PIC_INTR_TO_IRQ(PIC_IRT_XGS0_INDEX) +#define PIC_XGS_1_IRQ PIC_INTR_TO_IRQ(PIC_IRT_XGS1_INDEX) +#define PIC_HYPER_FATAL_IRQ PIC_INTR_TO_IRQ(PIC_IRT_HYPER_FATAL_INDEX) +#define PIC_PCIX_FATAL_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIX_FATAL_INDEX) +#define PIC_BRIDGE_AERR_IRQ PIC_INTR_TO_IRQ(PIC_IRT_BRIDGE_AERR_INDEX) +#define PIC_BRIDGE_BERR_IRQ PIC_INTR_TO_IRQ(PIC_IRT_BRIDGE_BERR_INDEX) +#define PIC_BRIDGE_TB_XLR_IRQ PIC_INTR_TO_IRQ(PIC_IRT_BRIDGE_TB_XLR_INDEX) +#define PIC_BRIDGE_AERR_NMI_IRQ PIC_INTR_TO_IRQ(PIC_IRT_BRIDGE_AERR_NMI_INDEX) +/* XLS defines */ +#define PIC_GMAC_4_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GMAC4_INDEX) +#define PIC_GMAC_5_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GMAC5_INDEX) +#define PIC_GMAC_6_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GMAC6_INDEX) +#define PIC_GMAC_7_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GMAC7_INDEX) +#define PIC_BRIDGE_ERR_IRQ PIC_INTR_TO_IRQ(PIC_IRT_BRIDGE_ERR_INDEX) +#define PIC_PCIE_LINK0_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIE_LINK0_INDEX) +#define PIC_PCIE_LINK1_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIE_LINK1_INDEX) +#define PIC_PCIE_LINK2_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIE_LINK2_INDEX) +#define PIC_PCIE_LINK3_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIE_LINK3_INDEX) +#define PIC_PCIE_XLSB0_LINK2_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIE_XLSB0_LINK2_INDEX) +#define PIC_PCIE_XLSB0_LINK3_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIE_XLSB0_LINK3_INDEX) +#define PIC_SRIO_LINK0_IRQ PIC_INTR_TO_IRQ(PIC_IRT_SRIO_LINK0_INDEX) +#define PIC_SRIO_LINK1_IRQ PIC_INTR_TO_IRQ(PIC_IRT_SRIO_LINK1_INDEX) +#define PIC_SRIO_LINK2_IRQ PIC_INTR_TO_IRQ(PIC_IRT_SRIO_LINK2_INDEX) +#define PIC_SRIO_LINK3_IRQ PIC_INTR_TO_IRQ(PIC_IRT_SRIO_LINK3_INDEX) +#define PIC_PCIE_INT_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIE_INT__INDEX) +#define PIC_PCIE_FATAL_IRQ PIC_INTR_TO_IRQ(PIC_IRT_PCIE_FATAL_INDEX) +#define PIC_GPIO_B_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GPIO_B_INDEX) +#define PIC_USB_IRQ PIC_INTR_TO_IRQ(PIC_IRT_USB_INDEX) +#define PIC_IRT_LAST_IRQ PIC_USB_IRQ +/* end XLS */ + +#ifndef __ASSEMBLY__ +static inline void pic_send_ipi(u32 ipi) +{ + nlm_reg_t *mmio = netlogic_io_mmio(NETLOGIC_IO_PIC_OFFSET); + + netlogic_write_reg(mmio, PIC_IPI, ipi); +} + +static inline u32 pic_read_control(void) +{ + nlm_reg_t *mmio = netlogic_io_mmio(NETLOGIC_IO_PIC_OFFSET); + + return netlogic_read_reg(mmio, PIC_CTRL); +} + +static inline void pic_write_control(u32 control) +{ + nlm_reg_t *mmio = netlogic_io_mmio(NETLOGIC_IO_PIC_OFFSET); + + netlogic_write_reg(mmio, PIC_CTRL, control); +} + +static inline void pic_update_control(u32 control) +{ + nlm_reg_t *mmio = netlogic_io_mmio(NETLOGIC_IO_PIC_OFFSET); + + netlogic_write_reg(mmio, PIC_CTRL, + (control | netlogic_read_reg(mmio, PIC_CTRL))); +} + +#define PIC_IRQ_IS_EDGE_TRIGGERED(irq) (((irq) >= PIC_TIMER_0_IRQ) && \ + ((irq) <= PIC_TIMER_7_IRQ)) +#define PIC_IRQ_IS_IRT(irq) (((irq) >= PIC_IRT_FIRST_IRQ) && \ + ((irq) <= PIC_IRT_LAST_IRQ)) +#endif + +#endif /* _ASM_NLM_XLR_PIC_H */ diff --git a/trunk/arch/mips/include/asm/netlogic/xlr/xlr.h b/trunk/arch/mips/include/asm/netlogic/xlr/xlr.h new file mode 100644 index 000000000000..3e6372692a04 --- /dev/null +++ b/trunk/arch/mips/include/asm/netlogic/xlr/xlr.h @@ -0,0 +1,75 @@ +/* + * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights + * reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the NetLogic + * license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _ASM_NLM_XLR_H +#define _ASM_NLM_XLR_H + +/* Platform UART functions */ +struct uart_port; +unsigned int nlm_xlr_uart_in(struct uart_port *, int); +void nlm_xlr_uart_out(struct uart_port *, int, int); + +/* SMP support functions */ +struct irq_desc; +void nlm_smp_function_ipi_handler(unsigned int irq, struct irq_desc *desc); +void nlm_smp_resched_ipi_handler(unsigned int irq, struct irq_desc *desc); +int nlm_wakeup_secondary_cpus(u32 wakeup_mask); +void nlm_smp_irq_init(void); +void nlm_boot_smp_nmi(void); +void prom_pre_boot_secondary_cpus(void); + +extern struct plat_smp_ops nlm_smp_ops; +extern unsigned long nlm_common_ebase; + +/* XLS B silicon "Rook" */ +static inline unsigned int nlm_chip_is_xls_b(void) +{ + uint32_t prid = read_c0_prid(); + + return ((prid & 0xf000) == 0x4000); +} + +/* + * XLR chip types + */ + /* The XLS product line has chip versions 0x[48c]? */ +static inline unsigned int nlm_chip_is_xls(void) +{ + uint32_t prid = read_c0_prid(); + + return ((prid & 0xf000) == 0x8000 || (prid & 0xf000) == 0x4000 || + (prid & 0xf000) == 0xc000); +} + +#endif /* _ASM_NLM_XLR_H */ diff --git a/trunk/arch/mips/include/asm/ptrace.h b/trunk/arch/mips/include/asm/ptrace.h index 9f1b8dba2c81..de39b1f343ea 100644 --- a/trunk/arch/mips/include/asm/ptrace.h +++ b/trunk/arch/mips/include/asm/ptrace.h @@ -141,7 +141,8 @@ extern int ptrace_set_watch_regs(struct task_struct *child, #define instruction_pointer(regs) ((regs)->cp0_epc) #define profile_pc(regs) instruction_pointer(regs) -extern asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit); +extern asmlinkage void syscall_trace_enter(struct pt_regs *regs); +extern asmlinkage void syscall_trace_leave(struct pt_regs *regs); extern NORET_TYPE void die(const char *, struct pt_regs *) ATTRIB_NORET; diff --git a/trunk/arch/mips/include/asm/thread_info.h b/trunk/arch/mips/include/asm/thread_info.h index d71160de4d10..97f8bf6639e7 100644 --- a/trunk/arch/mips/include/asm/thread_info.h +++ b/trunk/arch/mips/include/asm/thread_info.h @@ -149,6 +149,9 @@ register struct thread_info *__current_thread_info __asm__("$28"); #define _TIF_FPUBOUND (1< 0xffffff) { if (vdma_debug) @@ -228,8 +228,7 @@ int vdma_remap(unsigned long laddr, unsigned long paddr, unsigned long size) return -EINVAL; /* invalid physical address */ } - npages = pages = - (((paddr & (VDMA_PAGESIZE - 1)) + size) >> 12) + 1; + pages = (((paddr & (VDMA_PAGESIZE - 1)) + size) >> 12) + 1; first = laddr >> 12; if (vdma_debug) printk("vdma_remap: first=%x, pages=%x\n", first, pages); diff --git a/trunk/arch/mips/jz4740/dma.c b/trunk/arch/mips/jz4740/dma.c index 5ebe75a68350..d7feb898692c 100644 --- a/trunk/arch/mips/jz4740/dma.c +++ b/trunk/arch/mips/jz4740/dma.c @@ -242,9 +242,7 @@ EXPORT_SYMBOL_GPL(jz4740_dma_get_residue); static void jz4740_dma_chan_irq(struct jz4740_dma_chan *dma) { - uint32_t status; - - status = jz4740_dma_read(JZ_REG_DMA_STATUS_CTRL(dma->id)); + (void) jz4740_dma_read(JZ_REG_DMA_STATUS_CTRL(dma->id)); jz4740_dma_write_mask(JZ_REG_DMA_STATUS_CTRL(dma->id), 0, JZ_DMA_STATUS_CTRL_ENABLE | JZ_DMA_STATUS_CTRL_TRANSFER_DONE); diff --git a/trunk/arch/mips/jz4740/setup.c b/trunk/arch/mips/jz4740/setup.c index 6a9e14dab91e..d97cfbf882f5 100644 --- a/trunk/arch/mips/jz4740/setup.c +++ b/trunk/arch/mips/jz4740/setup.c @@ -1,5 +1,6 @@ /* * Copyright (C) 2009-2010, Lars-Peter Clausen + * Copyright (C) 2011, Maarten ter Huurne * JZ4740 setup code * * This program is free software; you can redistribute it and/or modify it @@ -14,13 +15,44 @@ */ #include +#include #include +#include + +#include + #include "reset.h" + +#define JZ4740_EMC_SDRAM_CTRL 0x80 + + +static void __init jz4740_detect_mem(void) +{ + void __iomem *jz_emc_base; + u32 ctrl, bus, bank, rows, cols; + phys_t size; + + jz_emc_base = ioremap(JZ4740_EMC_BASE_ADDR, 0x100); + ctrl = readl(jz_emc_base + JZ4740_EMC_SDRAM_CTRL); + bus = 2 - ((ctrl >> 31) & 1); + bank = 1 + ((ctrl >> 19) & 1); + cols = 8 + ((ctrl >> 26) & 7); + rows = 11 + ((ctrl >> 20) & 3); + printk(KERN_DEBUG + "SDRAM preconfigured: bus:%u bank:%u rows:%u cols:%u\n", + bus, bank, rows, cols); + iounmap(jz_emc_base); + + size = 1 << (bus + bank + cols + rows); + add_memory_region(0, size, BOOT_MEM_RAM); +} + void __init plat_mem_setup(void) { jz4740_reset_init(); + jz4740_detect_mem(); } const char *get_system_type(void) diff --git a/trunk/arch/mips/jz4740/time.c b/trunk/arch/mips/jz4740/time.c index fe01678d94fd..f83c2dd07a27 100644 --- a/trunk/arch/mips/jz4740/time.c +++ b/trunk/arch/mips/jz4740/time.c @@ -89,7 +89,7 @@ static int jz4740_clockevent_set_next(unsigned long evt, static struct clock_event_device jz4740_clockevent = { .name = "jz4740-timer", - .features = CLOCK_EVT_FEAT_PERIODIC, + .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, .set_next_event = jz4740_clockevent_set_next, .set_mode = jz4740_clockevent_set_mode, .rating = 200, @@ -121,8 +121,7 @@ void __init plat_time_init(void) clockevents_register_device(&jz4740_clockevent); - clocksource_set_clock(&jz4740_clocksource, clk_rate); - ret = clocksource_register(&jz4740_clocksource); + ret = clocksource_register_hz(&jz4740_clocksource, clk_rate); if (ret) printk(KERN_ERR "Failed to register clocksource: %d\n", ret); diff --git a/trunk/arch/mips/jz4740/timer.c b/trunk/arch/mips/jz4740/timer.c index b2c015129055..654d5c3900b6 100644 --- a/trunk/arch/mips/jz4740/timer.c +++ b/trunk/arch/mips/jz4740/timer.c @@ -27,11 +27,13 @@ void jz4740_timer_enable_watchdog(void) { writel(BIT(16), jz4740_timer_base + JZ_REG_TIMER_STOP_CLEAR); } +EXPORT_SYMBOL_GPL(jz4740_timer_enable_watchdog); void jz4740_timer_disable_watchdog(void) { writel(BIT(16), jz4740_timer_base + JZ_REG_TIMER_STOP_SET); } +EXPORT_SYMBOL_GPL(jz4740_timer_disable_watchdog); void __init jz4740_timer_init(void) { diff --git a/trunk/arch/mips/kernel/Makefile b/trunk/arch/mips/kernel/Makefile index cedee2bcbd18..83bba332bbfc 100644 --- a/trunk/arch/mips/kernel/Makefile +++ b/trunk/arch/mips/kernel/Makefile @@ -52,6 +52,7 @@ obj-$(CONFIG_CPU_TX39XX) += r2300_fpu.o r2300_switch.o obj-$(CONFIG_CPU_TX49XX) += r4k_fpu.o r4k_switch.o obj-$(CONFIG_CPU_VR41XX) += r4k_fpu.o r4k_switch.o obj-$(CONFIG_CPU_CAVIUM_OCTEON) += octeon_switch.o +obj-$(CONFIG_CPU_XLR) += r4k_fpu.o r4k_switch.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_SMP_UP) += smp-up.o diff --git a/trunk/arch/mips/kernel/cevt-txx9.c b/trunk/arch/mips/kernel/cevt-txx9.c index 0b7377361e22..f0ab92a1b057 100644 --- a/trunk/arch/mips/kernel/cevt-txx9.c +++ b/trunk/arch/mips/kernel/cevt-txx9.c @@ -51,8 +51,7 @@ void __init txx9_clocksource_init(unsigned long baseaddr, { struct txx9_tmr_reg __iomem *tmrptr; - clocksource_set_clock(&txx9_clocksource.cs, TIMER_CLK(imbusclk)); - clocksource_register(&txx9_clocksource.cs); + clocksource_register_hz(&txx9_clocksource.cs, TIMER_CLK(imbusclk)); tmrptr = ioremap(baseaddr, sizeof(struct txx9_tmr_reg)); __raw_writel(TCR_BASE, &tmrptr->tcr); diff --git a/trunk/arch/mips/kernel/cpu-probe.c b/trunk/arch/mips/kernel/cpu-probe.c index f65d4c8c65a6..bb133d10b145 100644 --- a/trunk/arch/mips/kernel/cpu-probe.c +++ b/trunk/arch/mips/kernel/cpu-probe.c @@ -291,6 +291,12 @@ static inline int cpu_has_confreg(void) #endif } +static inline void set_elf_platform(int cpu, const char *plat) +{ + if (cpu == 0) + __elf_platform = plat; +} + /* * Get the FPU Implementation/Revision. */ @@ -614,6 +620,16 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) case PRID_IMP_LOONGSON2: c->cputype = CPU_LOONGSON2; __cpu_name[cpu] = "ICT Loongson-2"; + + switch (c->processor_id & PRID_REV_MASK) { + case PRID_REV_LOONGSON2E: + set_elf_platform(cpu, "loongson2e"); + break; + case PRID_REV_LOONGSON2F: + set_elf_platform(cpu, "loongson2f"); + break; + } + c->isa_level = MIPS_CPU_ISA_III; c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_LLSC | @@ -911,12 +927,14 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu) case PRID_IMP_BMIPS32_REV8: c->cputype = CPU_BMIPS32; __cpu_name[cpu] = "Broadcom BMIPS32"; + set_elf_platform(cpu, "bmips32"); break; case PRID_IMP_BMIPS3300: case PRID_IMP_BMIPS3300_ALT: case PRID_IMP_BMIPS3300_BUG: c->cputype = CPU_BMIPS3300; __cpu_name[cpu] = "Broadcom BMIPS3300"; + set_elf_platform(cpu, "bmips3300"); break; case PRID_IMP_BMIPS43XX: { int rev = c->processor_id & 0xff; @@ -925,15 +943,18 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu) rev <= PRID_REV_BMIPS4380_HI) { c->cputype = CPU_BMIPS4380; __cpu_name[cpu] = "Broadcom BMIPS4380"; + set_elf_platform(cpu, "bmips4380"); } else { c->cputype = CPU_BMIPS4350; __cpu_name[cpu] = "Broadcom BMIPS4350"; + set_elf_platform(cpu, "bmips4350"); } break; } case PRID_IMP_BMIPS5000: c->cputype = CPU_BMIPS5000; __cpu_name[cpu] = "Broadcom BMIPS5000"; + set_elf_platform(cpu, "bmips5000"); c->options |= MIPS_CPU_ULRI; break; } @@ -956,14 +977,12 @@ static inline void cpu_probe_cavium(struct cpuinfo_mips *c, unsigned int cpu) c->cputype = CPU_CAVIUM_OCTEON_PLUS; __cpu_name[cpu] = "Cavium Octeon+"; platform: - if (cpu == 0) - __elf_platform = "octeon"; + set_elf_platform(cpu, "octeon"); break; case PRID_IMP_CAVIUM_CN63XX: c->cputype = CPU_CAVIUM_OCTEON2; __cpu_name[cpu] = "Cavium Octeon II"; - if (cpu == 0) - __elf_platform = "octeon2"; + set_elf_platform(cpu, "octeon2"); break; default: printk(KERN_INFO "Unknown Octeon chip!\n"); @@ -988,6 +1007,59 @@ static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu) } } +static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu) +{ + decode_configs(c); + + c->options = (MIPS_CPU_TLB | + MIPS_CPU_4KEX | + MIPS_CPU_COUNTER | + MIPS_CPU_DIVEC | + MIPS_CPU_WATCH | + MIPS_CPU_EJTAG | + MIPS_CPU_LLSC); + + switch (c->processor_id & 0xff00) { + case PRID_IMP_NETLOGIC_XLR732: + case PRID_IMP_NETLOGIC_XLR716: + case PRID_IMP_NETLOGIC_XLR532: + case PRID_IMP_NETLOGIC_XLR308: + case PRID_IMP_NETLOGIC_XLR532C: + case PRID_IMP_NETLOGIC_XLR516C: + case PRID_IMP_NETLOGIC_XLR508C: + case PRID_IMP_NETLOGIC_XLR308C: + c->cputype = CPU_XLR; + __cpu_name[cpu] = "Netlogic XLR"; + break; + + case PRID_IMP_NETLOGIC_XLS608: + case PRID_IMP_NETLOGIC_XLS408: + case PRID_IMP_NETLOGIC_XLS404: + case PRID_IMP_NETLOGIC_XLS208: + case PRID_IMP_NETLOGIC_XLS204: + case PRID_IMP_NETLOGIC_XLS108: + case PRID_IMP_NETLOGIC_XLS104: + case PRID_IMP_NETLOGIC_XLS616B: + case PRID_IMP_NETLOGIC_XLS608B: + case PRID_IMP_NETLOGIC_XLS416B: + case PRID_IMP_NETLOGIC_XLS412B: + case PRID_IMP_NETLOGIC_XLS408B: + case PRID_IMP_NETLOGIC_XLS404B: + c->cputype = CPU_XLR; + __cpu_name[cpu] = "Netlogic XLS"; + break; + + default: + printk(KERN_INFO "Unknown Netlogic chip id [%02x]!\n", + c->processor_id); + c->cputype = CPU_XLR; + break; + } + + c->isa_level = MIPS_CPU_ISA_M64R1; + c->tlbsize = ((read_c0_config1() >> 25) & 0x3f) + 1; +} + #ifdef CONFIG_64BIT /* For use by uaccess.h */ u64 __ua_limit; @@ -1035,6 +1107,9 @@ __cpuinit void cpu_probe(void) case PRID_COMP_INGENIC: cpu_probe_ingenic(c, cpu); break; + case PRID_COMP_NETLOGIC: + cpu_probe_netlogic(c, cpu); + break; } BUG_ON(!__cpu_name[cpu]); diff --git a/trunk/arch/mips/kernel/csrc-bcm1480.c b/trunk/arch/mips/kernel/csrc-bcm1480.c index 51489f8a825e..f96f99c794a3 100644 --- a/trunk/arch/mips/kernel/csrc-bcm1480.c +++ b/trunk/arch/mips/kernel/csrc-bcm1480.c @@ -49,6 +49,5 @@ void __init sb1480_clocksource_init(void) plldiv = G_BCM1480_SYS_PLL_DIV(__raw_readq(IOADDR(A_SCD_SYSTEM_CFG))); zbbus = ((plldiv >> 1) * 50000000) + ((plldiv & 1) * 25000000); - clocksource_set_clock(cs, zbbus); - clocksource_register(cs); + clocksource_register_hz(cs, zbbus); } diff --git a/trunk/arch/mips/kernel/csrc-ioasic.c b/trunk/arch/mips/kernel/csrc-ioasic.c index 23da108506b0..46bd7fa98d6c 100644 --- a/trunk/arch/mips/kernel/csrc-ioasic.c +++ b/trunk/arch/mips/kernel/csrc-ioasic.c @@ -59,7 +59,5 @@ void __init dec_ioasic_clocksource_init(void) printk(KERN_INFO "I/O ASIC clock frequency %dHz\n", freq); clocksource_dec.rating = 200 + freq / 10000000; - clocksource_set_clock(&clocksource_dec, freq); - - clocksource_register(&clocksource_dec); + clocksource_register_hz(&clocksource_dec, freq); } diff --git a/trunk/arch/mips/kernel/csrc-powertv.c b/trunk/arch/mips/kernel/csrc-powertv.c index a27c16c8690e..2e7c5232da8d 100644 --- a/trunk/arch/mips/kernel/csrc-powertv.c +++ b/trunk/arch/mips/kernel/csrc-powertv.c @@ -78,9 +78,7 @@ static void __init powertv_c0_hpt_clocksource_init(void) clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000; - clocksource_set_clock(&clocksource_mips, mips_hpt_frequency); - - clocksource_register(&clocksource_mips); + clocksource_register_hz(&clocksource_mips, mips_hpt_frequency); } /** @@ -130,43 +128,16 @@ static struct clocksource clocksource_tim_c = { /** * powertv_tim_c_clocksource_init - set up a clock source for the TIM_C clock * - * The hard part here is coming up with a constant k and shift s such that - * the 48-bit TIM_C value multiplied by k doesn't overflow and that value, - * when shifted right by s, yields the corresponding number of nanoseconds. * We know that TIM_C counts at 27 MHz/8, so each cycle corresponds to - * 1 / (27,000,000/8) seconds. Multiply that by a billion and you get the - * number of nanoseconds. Since the TIM_C value has 48 bits and the math is - * done in 64 bits, avoiding an overflow means that k must be less than - * 64 - 48 = 16 bits. + * 1 / (27,000,000/8) seconds. */ static void __init powertv_tim_c_clocksource_init(void) { - int prescale; - unsigned long dividend; - unsigned long k; - int s; - const int max_k_bits = (64 - 48) - 1; - const unsigned long billion = 1000000000; const unsigned long counts_per_second = 27000000 / 8; - prescale = BITS_PER_LONG - ilog2(billion) - 1; - dividend = billion << prescale; - k = dividend / counts_per_second; - s = ilog2(k) - max_k_bits; - - if (s < 0) - s = prescale; - - else { - k >>= s; - s += prescale; - } - - clocksource_tim_c.mult = k; - clocksource_tim_c.shift = s; clocksource_tim_c.rating = 200; - clocksource_register(&clocksource_tim_c); + clocksource_register_hz(&clocksource_tim_c, counts_per_second); tim_c = (struct tim_c *) asic_reg_addr(tim_ch); } diff --git a/trunk/arch/mips/kernel/csrc-r4k.c b/trunk/arch/mips/kernel/csrc-r4k.c index e95a3cd48eea..decd1fa38d55 100644 --- a/trunk/arch/mips/kernel/csrc-r4k.c +++ b/trunk/arch/mips/kernel/csrc-r4k.c @@ -30,9 +30,7 @@ int __init init_r4k_clocksource(void) /* Calculate a somewhat reasonable rating value */ clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000; - clocksource_set_clock(&clocksource_mips, mips_hpt_frequency); - - clocksource_register(&clocksource_mips); + clocksource_register_hz(&clocksource_mips, mips_hpt_frequency); return 0; } diff --git a/trunk/arch/mips/kernel/csrc-sb1250.c b/trunk/arch/mips/kernel/csrc-sb1250.c index d14d3d1907fa..e9606d907685 100644 --- a/trunk/arch/mips/kernel/csrc-sb1250.c +++ b/trunk/arch/mips/kernel/csrc-sb1250.c @@ -65,6 +65,5 @@ void __init sb1250_clocksource_init(void) IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM, R_SCD_TIMER_CFG))); - clocksource_set_clock(cs, V_SCD_TIMER_FREQ); - clocksource_register(cs); + clocksource_register_hz(cs, V_SCD_TIMER_FREQ); } diff --git a/trunk/arch/mips/kernel/entry.S b/trunk/arch/mips/kernel/entry.S index ffa331029e08..37acfa036d44 100644 --- a/trunk/arch/mips/kernel/entry.S +++ b/trunk/arch/mips/kernel/entry.S @@ -167,14 +167,13 @@ work_notifysig: # deal with pending signals and FEXPORT(syscall_exit_work_partial) SAVE_STATIC syscall_exit_work: - li t0, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT + li t0, _TIF_WORK_SYSCALL_EXIT and t0, a2 # a2 is preloaded with TI_FLAGS beqz t0, work_pending # trace bit set? - local_irq_enable # could let do_syscall_trace() + local_irq_enable # could let syscall_trace_leave() # call schedule() instead move a0, sp - li a1, 1 - jal do_syscall_trace + jal syscall_trace_leave b resume_userspace #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT) diff --git a/trunk/arch/mips/kernel/ftrace.c b/trunk/arch/mips/kernel/ftrace.c index 94ca2b018af7..feb8021a305f 100644 --- a/trunk/arch/mips/kernel/ftrace.c +++ b/trunk/arch/mips/kernel/ftrace.c @@ -23,6 +23,7 @@ #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ #define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */ +#define JUMP_RANGE_MASK ((1UL << 28) - 1) #define INSN_NOP 0x00000000 /* nop */ #define INSN_JAL(addr) \ @@ -44,12 +45,12 @@ static inline void ftrace_dyn_arch_init_insns(void) /* jal (ftrace_caller + 8), jump over the first two instruction */ buf = (u32 *)&insn_jal_ftrace_caller; - uasm_i_jal(&buf, (FTRACE_ADDR + 8)); + uasm_i_jal(&buf, (FTRACE_ADDR + 8) & JUMP_RANGE_MASK); #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* j ftrace_graph_caller */ buf = (u32 *)&insn_j_ftrace_graph_caller; - uasm_i_j(&buf, (unsigned long)ftrace_graph_caller); + uasm_i_j(&buf, (unsigned long)ftrace_graph_caller & JUMP_RANGE_MASK); #endif } diff --git a/trunk/arch/mips/kernel/i8253.c b/trunk/arch/mips/kernel/i8253.c index 2392a7a296d4..391221b6a6aa 100644 --- a/trunk/arch/mips/kernel/i8253.c +++ b/trunk/arch/mips/kernel/i8253.c @@ -125,87 +125,11 @@ void __init setup_pit_timer(void) setup_irq(0, &irq0); } -/* - * Since the PIT overflows every tick, its not very useful - * to just read by itself. So use jiffies to emulate a free - * running counter: - */ -static cycle_t pit_read(struct clocksource *cs) -{ - unsigned long flags; - int count; - u32 jifs; - static int old_count; - static u32 old_jifs; - - raw_spin_lock_irqsave(&i8253_lock, flags); - /* - * Although our caller may have the read side of xtime_lock, - * this is now a seqlock, and we are cheating in this routine - * by having side effects on state that we cannot undo if - * there is a collision on the seqlock and our caller has to - * retry. (Namely, old_jifs and old_count.) So we must treat - * jiffies as volatile despite the lock. We read jiffies - * before latching the timer count to guarantee that although - * the jiffies value might be older than the count (that is, - * the counter may underflow between the last point where - * jiffies was incremented and the point where we latch the - * count), it cannot be newer. - */ - jifs = jiffies; - outb_p(0x00, PIT_MODE); /* latch the count ASAP */ - count = inb_p(PIT_CH0); /* read the latched count */ - count |= inb_p(PIT_CH0) << 8; - - /* VIA686a test code... reset the latch if count > max + 1 */ - if (count > LATCH) { - outb_p(0x34, PIT_MODE); - outb_p(LATCH & 0xff, PIT_CH0); - outb(LATCH >> 8, PIT_CH0); - count = LATCH - 1; - } - - /* - * It's possible for count to appear to go the wrong way for a - * couple of reasons: - * - * 1. The timer counter underflows, but we haven't handled the - * resulting interrupt and incremented jiffies yet. - * 2. Hardware problem with the timer, not giving us continuous time, - * the counter does small "jumps" upwards on some Pentium systems, - * (see c't 95/10 page 335 for Neptun bug.) - * - * Previous attempts to handle these cases intelligently were - * buggy, so we just do the simple thing now. - */ - if (count > old_count && jifs == old_jifs) { - count = old_count; - } - old_count = count; - old_jifs = jifs; - - raw_spin_unlock_irqrestore(&i8253_lock, flags); - - count = (LATCH - 1) - count; - - return (cycle_t)(jifs * LATCH) + count; -} - -static struct clocksource clocksource_pit = { - .name = "pit", - .rating = 110, - .read = pit_read, - .mask = CLOCKSOURCE_MASK(32), - .mult = 0, - .shift = 20, -}; - static int __init init_pit_clocksource(void) { if (num_possible_cpus() > 1) /* PIT does not scale! */ return 0; - clocksource_pit.mult = clocksource_hz2mult(CLOCK_TICK_RATE, 20); - return clocksource_register(&clocksource_pit); + return clocksource_i8253_init(); } arch_initcall(init_pit_clocksource); diff --git a/trunk/arch/mips/kernel/ptrace.c b/trunk/arch/mips/kernel/ptrace.c index d21c388c0116..4e6ea1ffad46 100644 --- a/trunk/arch/mips/kernel/ptrace.c +++ b/trunk/arch/mips/kernel/ptrace.c @@ -533,15 +533,10 @@ static inline int audit_arch(void) * Notification of system call entry/exit * - triggered by current->work.syscall_trace */ -asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit) +asmlinkage void syscall_trace_enter(struct pt_regs *regs) { /* do the secure computing check first */ - if (!entryexit) - secure_computing(regs->regs[2]); - - if (unlikely(current->audit_context) && entryexit) - audit_syscall_exit(AUDITSC_RESULT(regs->regs[2]), - regs->regs[2]); + secure_computing(regs->regs[2]); if (!(current->ptrace & PT_PTRACED)) goto out; @@ -565,8 +560,40 @@ asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit) } out: - if (unlikely(current->audit_context) && !entryexit) + if (unlikely(current->audit_context)) audit_syscall_entry(audit_arch(), regs->regs[2], regs->regs[4], regs->regs[5], regs->regs[6], regs->regs[7]); } + +/* + * Notification of system call entry/exit + * - triggered by current->work.syscall_trace + */ +asmlinkage void syscall_trace_leave(struct pt_regs *regs) +{ + if (unlikely(current->audit_context)) + audit_syscall_exit(AUDITSC_RESULT(regs->regs[7]), + -regs->regs[2]); + + if (!(current->ptrace & PT_PTRACED)) + return; + + if (!test_thread_flag(TIF_SYSCALL_TRACE)) + return; + + /* The 0x80 provides a way for the tracing parent to distinguish + between a syscall stop and SIGTRAP delivery */ + ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ? + 0x80 : 0)); + + /* + * this isn't the same as continuing with a signal, but it will do + * for normal use. strace only continues with a signal if the + * stopping signal is not SIGTRAP. -brl + */ + if (current->exit_code) { + send_sig(current->exit_code, current, 1); + current->exit_code = 0; + } +} diff --git a/trunk/arch/mips/kernel/scall32-o32.S b/trunk/arch/mips/kernel/scall32-o32.S index 7f5468b38d4c..7a8e1dd7f6f2 100644 --- a/trunk/arch/mips/kernel/scall32-o32.S +++ b/trunk/arch/mips/kernel/scall32-o32.S @@ -88,8 +88,7 @@ syscall_trace_entry: SAVE_STATIC move s0, t2 move a0, sp - li a1, 0 - jal do_syscall_trace + jal syscall_trace_enter move t0, s0 RESTORE_STATIC @@ -565,7 +564,7 @@ einval: li v0, -ENOSYS sys sys_ioprio_get 2 /* 4315 */ sys sys_utimensat 4 sys sys_signalfd 3 - sys sys_ni_syscall 0 + sys sys_ni_syscall 0 /* was timerfd */ sys sys_eventfd 1 sys sys_fallocate 6 /* 4320 */ sys sys_timerfd_create 2 diff --git a/trunk/arch/mips/kernel/scall64-64.S b/trunk/arch/mips/kernel/scall64-64.S index a2e1fcbc41dc..2d31c83224f9 100644 --- a/trunk/arch/mips/kernel/scall64-64.S +++ b/trunk/arch/mips/kernel/scall64-64.S @@ -91,8 +91,7 @@ syscall_trace_entry: SAVE_STATIC move s0, t2 move a0, sp - li a1, 0 - jal do_syscall_trace + jal syscall_trace_enter move t0, s0 RESTORE_STATIC @@ -404,7 +403,7 @@ sys_call_table: PTR sys_ioprio_get PTR sys_utimensat /* 5275 */ PTR sys_signalfd - PTR sys_ni_syscall + PTR sys_ni_syscall /* was timerfd */ PTR sys_eventfd PTR sys_fallocate PTR sys_timerfd_create /* 5280 */ diff --git a/trunk/arch/mips/kernel/scall64-n32.S b/trunk/arch/mips/kernel/scall64-n32.S index b2c7624995b8..38a0503b9a4a 100644 --- a/trunk/arch/mips/kernel/scall64-n32.S +++ b/trunk/arch/mips/kernel/scall64-n32.S @@ -89,8 +89,7 @@ n32_syscall_trace_entry: SAVE_STATIC move s0, t2 move a0, sp - li a1, 0 - jal do_syscall_trace + jal syscall_trace_enter move t0, s0 RESTORE_STATIC @@ -403,7 +402,7 @@ EXPORT(sysn32_call_table) PTR sys_ioprio_get PTR compat_sys_utimensat PTR compat_sys_signalfd /* 6280 */ - PTR sys_ni_syscall + PTR sys_ni_syscall /* was timerfd */ PTR sys_eventfd PTR sys_fallocate PTR sys_timerfd_create diff --git a/trunk/arch/mips/kernel/scall64-o32.S b/trunk/arch/mips/kernel/scall64-o32.S index 049a9c8c49a0..91ea5e4041dd 100644 --- a/trunk/arch/mips/kernel/scall64-o32.S +++ b/trunk/arch/mips/kernel/scall64-o32.S @@ -123,8 +123,7 @@ trace_a_syscall: move s0, t2 # Save syscall pointer move a0, sp - li a1, 0 - jal do_syscall_trace + jal syscall_trace_enter move t0, s0 RESTORE_STATIC @@ -522,7 +521,7 @@ sys_call_table: PTR sys_ioprio_get /* 4315 */ PTR compat_sys_utimensat PTR compat_sys_signalfd - PTR sys_ni_syscall + PTR sys_ni_syscall /* was timerfd */ PTR sys_eventfd PTR sys32_fallocate /* 4320 */ PTR sys_timerfd_create diff --git a/trunk/arch/mips/kernel/smtc.c b/trunk/arch/mips/kernel/smtc.c index 5a88cc4ccd5a..cedac4633741 100644 --- a/trunk/arch/mips/kernel/smtc.c +++ b/trunk/arch/mips/kernel/smtc.c @@ -929,7 +929,7 @@ static void post_direct_ipi(int cpu, struct smtc_ipi *pipi) static void ipi_resched_interrupt(void) { - /* Return from interrupt should be enough to cause scheduler check */ + scheduler_ipi(); } static void ipi_call_interrupt(void) diff --git a/trunk/arch/mips/kernel/syscall.c b/trunk/arch/mips/kernel/syscall.c index 58beabf50b3c..d02765708ddb 100644 --- a/trunk/arch/mips/kernel/syscall.c +++ b/trunk/arch/mips/kernel/syscall.c @@ -10,12 +10,9 @@ #include #include #include -#include #include #include -#include #include -#include #include #include #include @@ -25,11 +22,9 @@ #include #include #include -#include #include #include #include -#include #include #include @@ -66,121 +61,6 @@ asmlinkage int sysm_pipe(nabi_no_regargs volatile struct pt_regs regs) return res; } -unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ - -EXPORT_SYMBOL(shm_align_mask); - -#define COLOUR_ALIGN(addr,pgoff) \ - ((((addr) + shm_align_mask) & ~shm_align_mask) + \ - (((pgoff) << PAGE_SHIFT) & shm_align_mask)) - -unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, - unsigned long len, unsigned long pgoff, unsigned long flags) -{ - struct vm_area_struct * vmm; - int do_color_align; - unsigned long task_size; - -#ifdef CONFIG_32BIT - task_size = TASK_SIZE; -#else /* Must be CONFIG_64BIT*/ - task_size = test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE; -#endif - - if (len > task_size) - return -ENOMEM; - - if (flags & MAP_FIXED) { - /* Even MAP_FIXED mappings must reside within task_size. */ - if (task_size - len < addr) - return -EINVAL; - - /* - * We do not accept a shared mapping if it would violate - * cache aliasing constraints. - */ - if ((flags & MAP_SHARED) && - ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) - return -EINVAL; - return addr; - } - - do_color_align = 0; - if (filp || (flags & MAP_SHARED)) - do_color_align = 1; - if (addr) { - if (do_color_align) - addr = COLOUR_ALIGN(addr, pgoff); - else - addr = PAGE_ALIGN(addr); - vmm = find_vma(current->mm, addr); - if (task_size - len >= addr && - (!vmm || addr + len <= vmm->vm_start)) - return addr; - } - addr = current->mm->mmap_base; - if (do_color_align) - addr = COLOUR_ALIGN(addr, pgoff); - else - addr = PAGE_ALIGN(addr); - - for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) { - /* At this point: (!vmm || addr < vmm->vm_end). */ - if (task_size - len < addr) - return -ENOMEM; - if (!vmm || addr + len <= vmm->vm_start) - return addr; - addr = vmm->vm_end; - if (do_color_align) - addr = COLOUR_ALIGN(addr, pgoff); - } -} - -void arch_pick_mmap_layout(struct mm_struct *mm) -{ - unsigned long random_factor = 0UL; - - if (current->flags & PF_RANDOMIZE) { - random_factor = get_random_int(); - random_factor = random_factor << PAGE_SHIFT; - if (TASK_IS_32BIT_ADDR) - random_factor &= 0xfffffful; - else - random_factor &= 0xffffffful; - } - - mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; - mm->get_unmapped_area = arch_get_unmapped_area; - mm->unmap_area = arch_unmap_area; -} - -static inline unsigned long brk_rnd(void) -{ - unsigned long rnd = get_random_int(); - - rnd = rnd << PAGE_SHIFT; - /* 8MB for 32bit, 256MB for 64bit */ - if (TASK_IS_32BIT_ADDR) - rnd = rnd & 0x7ffffful; - else - rnd = rnd & 0xffffffful; - - return rnd; -} - -unsigned long arch_randomize_brk(struct mm_struct *mm) -{ - unsigned long base = mm->brk; - unsigned long ret; - - ret = PAGE_ALIGN(base + brk_rnd()); - - if (ret < mm->brk) - return mm->brk; - - return ret; -} - SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len, unsigned long, prot, unsigned long, flags, unsigned long, fd, off_t, offset) diff --git a/trunk/arch/mips/kernel/traps.c b/trunk/arch/mips/kernel/traps.c index 71350f7f2d88..e9b3af27d844 100644 --- a/trunk/arch/mips/kernel/traps.c +++ b/trunk/arch/mips/kernel/traps.c @@ -374,7 +374,8 @@ void __noreturn die(const char *str, struct pt_regs *regs) unsigned long dvpret = dvpe(); #endif /* CONFIG_MIPS_MT_SMTC */ - notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV); + if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP) + sig = 0; console_verbose(); spin_lock_irq(&die_lock); @@ -383,9 +384,6 @@ void __noreturn die(const char *str, struct pt_regs *regs) mips_mt_regdump(dvpret); #endif /* CONFIG_MIPS_MT_SMTC */ - if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP) - sig = 0; - printk("%s[#%d]:\n", str, ++die_counter); show_registers(regs); add_taint(TAINT_DIE); diff --git a/trunk/arch/mips/kernel/vmlinux.lds.S b/trunk/arch/mips/kernel/vmlinux.lds.S index 832afbb87588..01af3876cf90 100644 --- a/trunk/arch/mips/kernel/vmlinux.lds.S +++ b/trunk/arch/mips/kernel/vmlinux.lds.S @@ -65,15 +65,18 @@ SECTIONS NOTES :text :note .dummy : { *(.dummy) } :text + _sdata = .; /* Start of data section */ RODATA /* writeable */ + _sdata = .; /* Start of data section */ .data : { /* Data */ . = . + DATAOFFSET; /* for CONFIG_MAPPED_KERNEL */ INIT_TASK_DATA(PAGE_SIZE) NOSAVE_DATA CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT) + READ_MOSTLY_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT) DATA_DATA CONSTRUCTORS } diff --git a/trunk/arch/mips/lantiq/Kconfig b/trunk/arch/mips/lantiq/Kconfig new file mode 100644 index 000000000000..3fccf2104513 --- /dev/null +++ b/trunk/arch/mips/lantiq/Kconfig @@ -0,0 +1,23 @@ +if LANTIQ + +config SOC_TYPE_XWAY + bool + default n + +choice + prompt "SoC Type" + default SOC_XWAY + +config SOC_AMAZON_SE + bool "Amazon SE" + select SOC_TYPE_XWAY + +config SOC_XWAY + bool "XWAY" + select SOC_TYPE_XWAY + select HW_HAS_PCI +endchoice + +source "arch/mips/lantiq/xway/Kconfig" + +endif diff --git a/trunk/arch/mips/lantiq/Makefile b/trunk/arch/mips/lantiq/Makefile new file mode 100644 index 000000000000..e5dae0e24b00 --- /dev/null +++ b/trunk/arch/mips/lantiq/Makefile @@ -0,0 +1,11 @@ +# Copyright (C) 2010 John Crispin +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 as published +# by the Free Software Foundation. + +obj-y := irq.o setup.o clk.o prom.o devices.o + +obj-$(CONFIG_EARLY_PRINTK) += early_printk.o + +obj-$(CONFIG_SOC_TYPE_XWAY) += xway/ diff --git a/trunk/arch/mips/lantiq/Platform b/trunk/arch/mips/lantiq/Platform new file mode 100644 index 000000000000..f3dff05722de --- /dev/null +++ b/trunk/arch/mips/lantiq/Platform @@ -0,0 +1,8 @@ +# +# Lantiq +# + +platform-$(CONFIG_LANTIQ) += lantiq/ +cflags-$(CONFIG_LANTIQ) += -I$(srctree)/arch/mips/include/asm/mach-lantiq +load-$(CONFIG_LANTIQ) = 0xffffffff80002000 +cflags-$(CONFIG_SOC_TYPE_XWAY) += -I$(srctree)/arch/mips/include/asm/mach-lantiq/xway diff --git a/trunk/arch/mips/lantiq/clk.c b/trunk/arch/mips/lantiq/clk.c new file mode 100644 index 000000000000..94560899d13e --- /dev/null +++ b/trunk/arch/mips/lantiq/clk.c @@ -0,0 +1,140 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Copyright (C) 2010 Thomas Langer + * Copyright (C) 2010 John Crispin + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include "clk.h" + +struct clk { + const char *name; + unsigned long rate; + unsigned long (*get_rate) (void); +}; + +static struct clk *cpu_clk; +static int cpu_clk_cnt; + +/* lantiq socs have 3 static clocks */ +static struct clk cpu_clk_generic[] = { + { + .name = "cpu", + .get_rate = ltq_get_cpu_hz, + }, { + .name = "fpi", + .get_rate = ltq_get_fpi_hz, + }, { + .name = "io", + .get_rate = ltq_get_io_region_clock, + }, +}; + +static struct resource ltq_cgu_resource = { + .name = "cgu", + .start = LTQ_CGU_BASE_ADDR, + .end = LTQ_CGU_BASE_ADDR + LTQ_CGU_SIZE - 1, + .flags = IORESOURCE_MEM, +}; + +/* remapped clock register range */ +void __iomem *ltq_cgu_membase; + +void clk_init(void) +{ + cpu_clk = cpu_clk_generic; + cpu_clk_cnt = ARRAY_SIZE(cpu_clk_generic); +} + +static inline int clk_good(struct clk *clk) +{ + return clk && !IS_ERR(clk); +} + +unsigned long clk_get_rate(struct clk *clk) +{ + if (unlikely(!clk_good(clk))) + return 0; + + if (clk->rate != 0) + return clk->rate; + + if (clk->get_rate != NULL) + return clk->get_rate(); + + return 0; +} +EXPORT_SYMBOL(clk_get_rate); + +struct clk *clk_get(struct device *dev, const char *id) +{ + int i; + + for (i = 0; i < cpu_clk_cnt; i++) + if (!strcmp(id, cpu_clk[i].name)) + return &cpu_clk[i]; + BUG(); + return ERR_PTR(-ENOENT); +} +EXPORT_SYMBOL(clk_get); + +void clk_put(struct clk *clk) +{ + /* not used */ +} +EXPORT_SYMBOL(clk_put); + +static inline u32 ltq_get_counter_resolution(void) +{ + u32 res; + + __asm__ __volatile__( + ".set push\n" + ".set mips32r2\n" + "rdhwr %0, $3\n" + ".set pop\n" + : "=&r" (res) + : /* no input */ + : "memory"); + + return res; +} + +void __init plat_time_init(void) +{ + struct clk *clk; + + if (insert_resource(&iomem_resource, <q_cgu_resource) < 0) + panic("Failed to insert cgu memory\n"); + + if (request_mem_region(ltq_cgu_resource.start, + resource_size(<q_cgu_resource), "cgu") < 0) + panic("Failed to request cgu memory\n"); + + ltq_cgu_membase = ioremap_nocache(ltq_cgu_resource.start, + resource_size(<q_cgu_resource)); + if (!ltq_cgu_membase) { + pr_err("Failed to remap cgu memory\n"); + unreachable(); + } + clk = clk_get(0, "cpu"); + mips_hpt_frequency = clk_get_rate(clk) / ltq_get_counter_resolution(); + write_c0_compare(read_c0_count()); + clk_put(clk); +} diff --git a/trunk/arch/mips/lantiq/clk.h b/trunk/arch/mips/lantiq/clk.h new file mode 100644 index 000000000000..3328925f2c3f --- /dev/null +++ b/trunk/arch/mips/lantiq/clk.h @@ -0,0 +1,18 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Copyright (C) 2010 John Crispin + */ + +#ifndef _LTQ_CLK_H__ +#define _LTQ_CLK_H__ + +extern void clk_init(void); + +extern unsigned long ltq_get_cpu_hz(void); +extern unsigned long ltq_get_fpi_hz(void); +extern unsigned long ltq_get_io_region_clock(void); + +#endif diff --git a/trunk/arch/mips/lantiq/devices.c b/trunk/arch/mips/lantiq/devices.c new file mode 100644 index 000000000000..7b82c34cb169 --- /dev/null +++ b/trunk/arch/mips/lantiq/devices.c @@ -0,0 +1,122 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Copyright (C) 2010 John Crispin + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#include "devices.h" + +/* nor flash */ +static struct resource ltq_nor_resource = { + .name = "nor", + .start = LTQ_FLASH_START, + .end = LTQ_FLASH_START + LTQ_FLASH_MAX - 1, + .flags = IORESOURCE_MEM, +}; + +static struct platform_device ltq_nor = { + .name = "ltq_nor", + .resource = <q_nor_resource, + .num_resources = 1, +}; + +void __init ltq_register_nor(struct physmap_flash_data *data) +{ + ltq_nor.dev.platform_data = data; + platform_device_register(<q_nor); +} + +/* watchdog */ +static struct resource ltq_wdt_resource = { + .name = "watchdog", + .start = LTQ_WDT_BASE_ADDR, + .end = LTQ_WDT_BASE_ADDR + LTQ_WDT_SIZE - 1, + .flags = IORESOURCE_MEM, +}; + +void __init ltq_register_wdt(void) +{ + platform_device_register_simple("ltq_wdt", 0, <q_wdt_resource, 1); +} + +/* asc ports */ +static struct resource ltq_asc0_resources[] = { + { + .name = "asc0", + .start = LTQ_ASC0_BASE_ADDR, + .end = LTQ_ASC0_BASE_ADDR + LTQ_ASC_SIZE - 1, + .flags = IORESOURCE_MEM, + }, + IRQ_RES(tx, LTQ_ASC_TIR(0)), + IRQ_RES(rx, LTQ_ASC_RIR(0)), + IRQ_RES(err, LTQ_ASC_EIR(0)), +}; + +static struct resource ltq_asc1_resources[] = { + { + .name = "asc1", + .start = LTQ_ASC1_BASE_ADDR, + .end = LTQ_ASC1_BASE_ADDR + LTQ_ASC_SIZE - 1, + .flags = IORESOURCE_MEM, + }, + IRQ_RES(tx, LTQ_ASC_TIR(1)), + IRQ_RES(rx, LTQ_ASC_RIR(1)), + IRQ_RES(err, LTQ_ASC_EIR(1)), +}; + +void __init ltq_register_asc(int port) +{ + switch (port) { + case 0: + platform_device_register_simple("ltq_asc", 0, + ltq_asc0_resources, ARRAY_SIZE(ltq_asc0_resources)); + break; + case 1: + platform_device_register_simple("ltq_asc", 1, + ltq_asc1_resources, ARRAY_SIZE(ltq_asc1_resources)); + break; + default: + break; + } +} + +#ifdef CONFIG_PCI +/* pci */ +static struct platform_device ltq_pci = { + .name = "ltq_pci", + .num_resources = 0, +}; + +void __init ltq_register_pci(struct ltq_pci_data *data) +{ + ltq_pci.dev.platform_data = data; + platform_device_register(<q_pci); +} +#else +void __init ltq_register_pci(struct ltq_pci_data *data) +{ + pr_err("kernel is compiled without PCI support\n"); +} +#endif diff --git a/trunk/arch/mips/lantiq/devices.h b/trunk/arch/mips/lantiq/devices.h new file mode 100644 index 000000000000..2947bb19a528 --- /dev/null +++ b/trunk/arch/mips/lantiq/devices.h @@ -0,0 +1,23 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Copyright (C) 2010 John Crispin + */ + +#ifndef _LTQ_DEVICES_H__ +#define _LTQ_DEVICES_H__ + +#include +#include + +#define IRQ_RES(resname, irq) \ + {.name = #resname, .start = (irq), .flags = IORESOURCE_IRQ} + +extern void ltq_register_nor(struct physmap_flash_data *data); +extern void ltq_register_wdt(void); +extern void ltq_register_asc(int port); +extern void ltq_register_pci(struct ltq_pci_data *data); + +#endif diff --git a/trunk/arch/mips/lantiq/early_printk.c b/trunk/arch/mips/lantiq/early_printk.c new file mode 100644 index 000000000000..972e05f87631 --- /dev/null +++ b/trunk/arch/mips/lantiq/early_printk.c @@ -0,0 +1,33 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Copyright (C) 2010 John Crispin + */ + +#include +#include + +#include +#include + +/* no ioremap possible at this early stage, lets use KSEG1 instead */ +#define LTQ_ASC_BASE KSEG1ADDR(LTQ_ASC1_BASE_ADDR) +#define ASC_BUF 1024 +#define LTQ_ASC_FSTAT ((u32 *)(LTQ_ASC_BASE + 0x0048)) +#define LTQ_ASC_TBUF ((u32 *)(LTQ_ASC_BASE + 0x0020)) +#define TXMASK 0x3F00 +#define TXOFFSET 8 + +void prom_putchar(char c) +{ + unsigned long flags; + + local_irq_save(flags); + do { } while ((ltq_r32(LTQ_ASC_FSTAT) & TXMASK) >> TXOFFSET); + if (c == '\n') + ltq_w32('\r', LTQ_ASC_TBUF); + ltq_w32(c, LTQ_ASC_TBUF); + local_irq_restore(flags); +} diff --git a/trunk/arch/mips/lantiq/irq.c b/trunk/arch/mips/lantiq/irq.c new file mode 100644 index 000000000000..fc89795cafdb --- /dev/null +++ b/trunk/arch/mips/lantiq/irq.c @@ -0,0 +1,326 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Copyright (C) 2010 John Crispin + * Copyright (C) 2010 Thomas Langer + */ + +#include +#include + +#include +#include + +#include +#include + +/* register definitions */ +#define LTQ_ICU_IM0_ISR 0x0000 +#define LTQ_ICU_IM0_IER 0x0008 +#define LTQ_ICU_IM0_IOSR 0x0010 +#define LTQ_ICU_IM0_IRSR 0x0018 +#define LTQ_ICU_IM0_IMR 0x0020 +#define LTQ_ICU_IM1_ISR 0x0028 +#define LTQ_ICU_OFFSET (LTQ_ICU_IM1_ISR - LTQ_ICU_IM0_ISR) + +#define LTQ_EIU_EXIN_C 0x0000 +#define LTQ_EIU_EXIN_INIC 0x0004 +#define LTQ_EIU_EXIN_INEN 0x000C + +/* irq numbers used by the external interrupt unit (EIU) */ +#define LTQ_EIU_IR0 (INT_NUM_IM4_IRL0 + 30) +#define LTQ_EIU_IR1 (INT_NUM_IM3_IRL0 + 31) +#define LTQ_EIU_IR2 (INT_NUM_IM1_IRL0 + 26) +#define LTQ_EIU_IR3 INT_NUM_IM1_IRL0 +#define LTQ_EIU_IR4 (INT_NUM_IM1_IRL0 + 1) +#define LTQ_EIU_IR5 (INT_NUM_IM1_IRL0 + 2) +#define LTQ_EIU_IR6 (INT_NUM_IM2_IRL0 + 30) + +#define MAX_EIU 6 + +/* irqs generated by device attached to the EBU need to be acked in + * a special manner + */ +#define LTQ_ICU_EBU_IRQ 22 + +#define ltq_icu_w32(x, y) ltq_w32((x), ltq_icu_membase + (y)) +#define ltq_icu_r32(x) ltq_r32(ltq_icu_membase + (x)) + +#define ltq_eiu_w32(x, y) ltq_w32((x), ltq_eiu_membase + (y)) +#define ltq_eiu_r32(x) ltq_r32(ltq_eiu_membase + (x)) + +static unsigned short ltq_eiu_irq[MAX_EIU] = { + LTQ_EIU_IR0, + LTQ_EIU_IR1, + LTQ_EIU_IR2, + LTQ_EIU_IR3, + LTQ_EIU_IR4, + LTQ_EIU_IR5, +}; + +static struct resource ltq_icu_resource = { + .name = "icu", + .start = LTQ_ICU_BASE_ADDR, + .end = LTQ_ICU_BASE_ADDR + LTQ_ICU_SIZE - 1, + .flags = IORESOURCE_MEM, +}; + +static struct resource ltq_eiu_resource = { + .name = "eiu", + .start = LTQ_EIU_BASE_ADDR, + .end = LTQ_EIU_BASE_ADDR + LTQ_ICU_SIZE - 1, + .flags = IORESOURCE_MEM, +}; + +static void __iomem *ltq_icu_membase; +static void __iomem *ltq_eiu_membase; + +void ltq_disable_irq(struct irq_data *d) +{ + u32 ier = LTQ_ICU_IM0_IER; + int irq_nr = d->irq - INT_NUM_IRQ0; + + ier += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET); + irq_nr %= INT_NUM_IM_OFFSET; + ltq_icu_w32(ltq_icu_r32(ier) & ~(1 << irq_nr), ier); +} + +void ltq_mask_and_ack_irq(struct irq_data *d) +{ + u32 ier = LTQ_ICU_IM0_IER; + u32 isr = LTQ_ICU_IM0_ISR; + int irq_nr = d->irq - INT_NUM_IRQ0; + + ier += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET); + isr += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET); + irq_nr %= INT_NUM_IM_OFFSET; + ltq_icu_w32(ltq_icu_r32(ier) & ~(1 << irq_nr), ier); + ltq_icu_w32((1 << irq_nr), isr); +} + +static void ltq_ack_irq(struct irq_data *d) +{ + u32 isr = LTQ_ICU_IM0_ISR; + int irq_nr = d->irq - INT_NUM_IRQ0; + + isr += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET); + irq_nr %= INT_NUM_IM_OFFSET; + ltq_icu_w32((1 << irq_nr), isr); +} + +void ltq_enable_irq(struct irq_data *d) +{ + u32 ier = LTQ_ICU_IM0_IER; + int irq_nr = d->irq - INT_NUM_IRQ0; + + ier += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET); + irq_nr %= INT_NUM_IM_OFFSET; + ltq_icu_w32(ltq_icu_r32(ier) | (1 << irq_nr), ier); +} + +static unsigned int ltq_startup_eiu_irq(struct irq_data *d) +{ + int i; + int irq_nr = d->irq - INT_NUM_IRQ0; + + ltq_enable_irq(d); + for (i = 0; i < MAX_EIU; i++) { + if (irq_nr == ltq_eiu_irq[i]) { + /* low level - we should really handle set_type */ + ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) | + (0x6 << (i * 4)), LTQ_EIU_EXIN_C); + /* clear all pending */ + ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INIC) & ~(1 << i), + LTQ_EIU_EXIN_INIC); + /* enable */ + ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) | (1 << i), + LTQ_EIU_EXIN_INEN); + break; + } + } + + return 0; +} + +static void ltq_shutdown_eiu_irq(struct irq_data *d) +{ + int i; + int irq_nr = d->irq - INT_NUM_IRQ0; + + ltq_disable_irq(d); + for (i = 0; i < MAX_EIU; i++) { + if (irq_nr == ltq_eiu_irq[i]) { + /* disable */ + ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~(1 << i), + LTQ_EIU_EXIN_INEN); + break; + } + } +} + +static struct irq_chip ltq_irq_type = { + "icu", + .irq_enable = ltq_enable_irq, + .irq_disable = ltq_disable_irq, + .irq_unmask = ltq_enable_irq, + .irq_ack = ltq_ack_irq, + .irq_mask = ltq_disable_irq, + .irq_mask_ack = ltq_mask_and_ack_irq, +}; + +static struct irq_chip ltq_eiu_type = { + "eiu", + .irq_startup = ltq_startup_eiu_irq, + .irq_shutdown = ltq_shutdown_eiu_irq, + .irq_enable = ltq_enable_irq, + .irq_disable = ltq_disable_irq, + .irq_unmask = ltq_enable_irq, + .irq_ack = ltq_ack_irq, + .irq_mask = ltq_disable_irq, + .irq_mask_ack = ltq_mask_and_ack_irq, +}; + +static void ltq_hw_irqdispatch(int module) +{ + u32 irq; + + irq = ltq_icu_r32(LTQ_ICU_IM0_IOSR + (module * LTQ_ICU_OFFSET)); + if (irq == 0) + return; + + /* silicon bug causes only the msb set to 1 to be valid. all + * other bits might be bogus + */ + irq = __fls(irq); + do_IRQ((int)irq + INT_NUM_IM0_IRL0 + (INT_NUM_IM_OFFSET * module)); + + /* if this is a EBU irq, we need to ack it or get a deadlock */ + if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0)) + ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_ISTAT) | 0x10, + LTQ_EBU_PCC_ISTAT); +} + +#define DEFINE_HWx_IRQDISPATCH(x) \ + static void ltq_hw ## x ## _irqdispatch(void) \ + { \ + ltq_hw_irqdispatch(x); \ + } +DEFINE_HWx_IRQDISPATCH(0) +DEFINE_HWx_IRQDISPATCH(1) +DEFINE_HWx_IRQDISPATCH(2) +DEFINE_HWx_IRQDISPATCH(3) +DEFINE_HWx_IRQDISPATCH(4) + +static void ltq_hw5_irqdispatch(void) +{ + do_IRQ(MIPS_CPU_TIMER_IRQ); +} + +asmlinkage void plat_irq_dispatch(void) +{ + unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM; + unsigned int i; + + if (pending & CAUSEF_IP7) { + do_IRQ(MIPS_CPU_TIMER_IRQ); + goto out; + } else { + for (i = 0; i < 5; i++) { + if (pending & (CAUSEF_IP2 << i)) { + ltq_hw_irqdispatch(i); + goto out; + } + } + } + pr_alert("Spurious IRQ: CAUSE=0x%08x\n", read_c0_status()); + +out: + return; +} + +static struct irqaction cascade = { + .handler = no_action, + .flags = IRQF_DISABLED, + .name = "cascade", +}; + +void __init arch_init_irq(void) +{ + int i; + + if (insert_resource(&iomem_resource, <q_icu_resource) < 0) + panic("Failed to insert icu memory\n"); + + if (request_mem_region(ltq_icu_resource.start, + resource_size(<q_icu_resource), "icu") < 0) + panic("Failed to request icu memory\n"); + + ltq_icu_membase = ioremap_nocache(ltq_icu_resource.start, + resource_size(<q_icu_resource)); + if (!ltq_icu_membase) + panic("Failed to remap icu memory\n"); + + if (insert_resource(&iomem_resource, <q_eiu_resource) < 0) + panic("Failed to insert eiu memory\n"); + + if (request_mem_region(ltq_eiu_resource.start, + resource_size(<q_eiu_resource), "eiu") < 0) + panic("Failed to request eiu memory\n"); + + ltq_eiu_membase = ioremap_nocache(ltq_eiu_resource.start, + resource_size(<q_eiu_resource)); + if (!ltq_eiu_membase) + panic("Failed to remap eiu memory\n"); + + /* make sure all irqs are turned off by default */ + for (i = 0; i < 5; i++) + ltq_icu_w32(0, LTQ_ICU_IM0_IER + (i * LTQ_ICU_OFFSET)); + + /* clear all possibly pending interrupts */ + ltq_icu_w32(~0, LTQ_ICU_IM0_ISR + (i * LTQ_ICU_OFFSET)); + + mips_cpu_irq_init(); + + for (i = 2; i <= 6; i++) + setup_irq(i, &cascade); + + if (cpu_has_vint) { + pr_info("Setting up vectored interrupts\n"); + set_vi_handler(2, ltq_hw0_irqdispatch); + set_vi_handler(3, ltq_hw1_irqdispatch); + set_vi_handler(4, ltq_hw2_irqdispatch); + set_vi_handler(5, ltq_hw3_irqdispatch); + set_vi_handler(6, ltq_hw4_irqdispatch); + set_vi_handler(7, ltq_hw5_irqdispatch); + } + + for (i = INT_NUM_IRQ0; + i <= (INT_NUM_IRQ0 + (5 * INT_NUM_IM_OFFSET)); i++) + if ((i == LTQ_EIU_IR0) || (i == LTQ_EIU_IR1) || + (i == LTQ_EIU_IR2)) + irq_set_chip_and_handler(i, <q_eiu_type, + handle_level_irq); + /* EIU3-5 only exist on ar9 and vr9 */ + else if (((i == LTQ_EIU_IR3) || (i == LTQ_EIU_IR4) || + (i == LTQ_EIU_IR5)) && (ltq_is_ar9() || ltq_is_vr9())) + irq_set_chip_and_handler(i, <q_eiu_type, + handle_level_irq); + else + irq_set_chip_and_handler(i, <q_irq_type, + handle_level_irq); + +#if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC) + set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | + IE_IRQ3 | IE_IRQ4 | IE_IRQ5); +#else + set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ0 | IE_IRQ1 | + IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5); +#endif +} + +unsigned int __cpuinit get_c0_compare_int(void) +{ + return CP0_LEGACY_COMPARE_IRQ; +} diff --git a/trunk/arch/mips/lantiq/machtypes.h b/trunk/arch/mips/lantiq/machtypes.h new file mode 100644 index 000000000000..7e01b8c484eb --- /dev/null +++ b/trunk/arch/mips/lantiq/machtypes.h @@ -0,0 +1,20 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Copyright (C) 2010 John Crispin + */ + +#ifndef _LANTIQ_MACH_H__ +#define _LANTIQ_MACH_H__ + +#include + +enum lantiq_mach_type { + LTQ_MACH_GENERIC = 0, + LTQ_MACH_EASY50712, /* Danube evaluation board */ + LTQ_MACH_EASY50601, /* Amazon SE evaluation board */ +}; + +#endif diff --git a/trunk/arch/mips/lantiq/prom.c b/trunk/arch/mips/lantiq/prom.c new file mode 100644 index 000000000000..56ba007bf1e5 --- /dev/null +++ b/trunk/arch/mips/lantiq/prom.c @@ -0,0 +1,71 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Copyright (C) 2010 John Crispin + */ + +#include +#include +#include +#include + +#include + +#include "prom.h" +#include "clk.h" + +static struct ltq_soc_info soc_info; + +unsigned int ltq_get_cpu_ver(void) +{ + return soc_info.rev; +} +EXPORT_SYMBOL(ltq_get_cpu_ver); + +unsigned int ltq_get_soc_type(void) +{ + return soc_info.type; +} +EXPORT_SYMBOL(ltq_get_soc_type); + +const char *get_system_type(void) +{ + return soc_info.sys_type; +} + +void prom_free_prom_memory(void) +{ +} + +static void __init prom_init_cmdline(void) +{ + int argc = fw_arg0; + char **argv = (char **) KSEG1ADDR(fw_arg1); + int i; + + for (i = 0; i < argc; i++) { + char *p = (char *) KSEG1ADDR(argv[i]); + + if (p && *p) { + strlcat(arcs_cmdline, p, sizeof(arcs_cmdline)); + strlcat(arcs_cmdline, " ", sizeof(arcs_cmdline)); + } + } +} + +void __init prom_init(void) +{ + struct clk *clk; + + ltq_soc_detect(&soc_info); + clk_init(); + clk = clk_get(0, "cpu"); + snprintf(soc_info.sys_type, LTQ_SYS_TYPE_LEN - 1, "%s rev1.%d", + soc_info.name, soc_info.rev); + clk_put(clk); + soc_info.sys_type[LTQ_SYS_TYPE_LEN - 1] = '\0'; + pr_info("SoC: %s\n", soc_info.sys_type); + prom_init_cmdline(); +} diff --git a/trunk/arch/mips/lantiq/prom.h b/trunk/arch/mips/lantiq/prom.h new file mode 100644 index 000000000000..b4229d94280f --- /dev/null +++ b/trunk/arch/mips/lantiq/prom.h @@ -0,0 +1,25 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Copyright (C) 2010 John Crispin + */ + +#ifndef _LTQ_PROM_H__ +#define _LTQ_PROM_H__ + +#define LTQ_SYS_TYPE_LEN 0x100 + +struct ltq_soc_info { + unsigned char *name; + unsigned int rev; + unsigned int partnum; + unsigned int type; + unsigned char sys_type[LTQ_SYS_TYPE_LEN]; +}; + +extern void ltq_soc_detect(struct ltq_soc_info *i); +extern void ltq_soc_setup(void); + +#endif diff --git a/trunk/arch/mips/lantiq/setup.c b/trunk/arch/mips/lantiq/setup.c new file mode 100644 index 000000000000..9b8af77ed0f9 --- /dev/null +++ b/trunk/arch/mips/lantiq/setup.c @@ -0,0 +1,66 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Copyright (C) 2010 John Crispin + */ + +#include +#include +#include +#include +#include + +#include + +#include "machtypes.h" +#include "devices.h" +#include "prom.h" + +void __init plat_mem_setup(void) +{ + /* assume 16M as default incase uboot fails to pass proper ramsize */ + unsigned long memsize = 16; + char **envp = (char **) KSEG1ADDR(fw_arg2); + + ioport_resource.start = IOPORT_RESOURCE_START; + ioport_resource.end = IOPORT_RESOURCE_END; + iomem_resource.start = IOMEM_RESOURCE_START; + iomem_resource.end = IOMEM_RESOURCE_END; + + set_io_port_base((unsigned long) KSEG1); + + while (*envp) { + char *e = (char *)KSEG1ADDR(*envp); + if (!strncmp(e, "memsize=", 8)) { + e += 8; + if (strict_strtoul(e, 0, &memsize)) + pr_warn("bad memsize specified\n"); + } + envp++; + } + memsize *= 1024 * 1024; + add_memory_region(0x00000000, memsize, BOOT_MEM_RAM); +} + +static int __init +lantiq_setup(void) +{ + ltq_soc_setup(); + mips_machine_setup(); + return 0; +} + +arch_initcall(lantiq_setup); + +static void __init +lantiq_generic_init(void) +{ + /* Nothing to do */ +} + +MIPS_MACHINE(LTQ_MACH_GENERIC, + "Generic", + "Generic Lantiq based board", + lantiq_generic_init); diff --git a/trunk/arch/mips/lantiq/xway/Kconfig b/trunk/arch/mips/lantiq/xway/Kconfig new file mode 100644 index 000000000000..2b857de36620 --- /dev/null +++ b/trunk/arch/mips/lantiq/xway/Kconfig @@ -0,0 +1,23 @@ +if SOC_XWAY + +menu "MIPS Machine" + +config LANTIQ_MACH_EASY50712 + bool "Easy50712 - Danube" + default y + +endmenu + +endif + +if SOC_AMAZON_SE + +menu "MIPS Machine" + +config LANTIQ_MACH_EASY50601 + bool "Easy50601 - Amazon SE" + default y + +endmenu + +endif diff --git a/trunk/arch/mips/lantiq/xway/Makefile b/trunk/arch/mips/lantiq/xway/Makefile new file mode 100644 index 000000000000..c517f2e77563 --- /dev/null +++ b/trunk/arch/mips/lantiq/xway/Makefile @@ -0,0 +1,7 @@ +obj-y := pmu.o ebu.o reset.o gpio.o gpio_stp.o gpio_ebu.o devices.o dma.o + +obj-$(CONFIG_SOC_XWAY) += clk-xway.o prom-xway.o setup-xway.o +obj-$(CONFIG_SOC_AMAZON_SE) += clk-ase.o prom-ase.o setup-ase.o + +obj-$(CONFIG_LANTIQ_MACH_EASY50712) += mach-easy50712.o +obj-$(CONFIG_LANTIQ_MACH_EASY50601) += mach-easy50601.o diff --git a/trunk/arch/mips/lantiq/xway/clk-ase.c b/trunk/arch/mips/lantiq/xway/clk-ase.c new file mode 100644 index 000000000000..22d823acd536 --- /dev/null +++ b/trunk/arch/mips/lantiq/xway/clk-ase.c @@ -0,0 +1,48 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Copyright (C) 2011 John Crispin + */ + +#include +#include +#include +#include + +#include +#include +#include + +#include + +/* cgu registers */ +#define LTQ_CGU_SYS 0x0010 + +unsigned int ltq_get_io_region_clock(void) +{ + return CLOCK_133M; +} +EXPORT_SYMBOL(ltq_get_io_region_clock); + +unsigned int ltq_get_fpi_bus_clock(int fpi) +{ + return CLOCK_133M; +} +EXPORT_SYMBOL(ltq_get_fpi_bus_clock); + +unsigned int ltq_get_cpu_hz(void) +{ + if (ltq_cgu_r32(LTQ_CGU_SYS) & (1 << 5)) + return CLOCK_266M; + else + return CLOCK_133M; +} +EXPORT_SYMBOL(ltq_get_cpu_hz); + +unsigned int ltq_get_fpi_hz(void) +{ + return CLOCK_133M; +} +EXPORT_SYMBOL(ltq_get_fpi_hz); diff --git a/trunk/arch/mips/lantiq/xway/clk-xway.c b/trunk/arch/mips/lantiq/xway/clk-xway.c new file mode 100644 index 000000000000..ddd39593c581 --- /dev/null +++ b/trunk/arch/mips/lantiq/xway/clk-xway.c @@ -0,0 +1,223 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Copyright (C) 2010 John Crispin + */ + +#include +#include +#include +#include + +#include +#include +#include + +#include + +static unsigned int ltq_ram_clocks[] = { + CLOCK_167M, CLOCK_133M, CLOCK_111M, CLOCK_83M }; +#define DDR_HZ ltq_ram_clocks[ltq_cgu_r32(LTQ_CGU_SYS) & 0x3] + +#define BASIC_FREQUENCY_1 35328000 +#define BASIC_FREQUENCY_2 36000000 +#define BASIS_REQUENCY_USB 12000000 + +#define GET_BITS(x, msb, lsb) \ + (((x) & ((1 << ((msb) + 1)) - 1)) >> (lsb)) + +#define LTQ_CGU_PLL0_CFG 0x0004 +#define LTQ_CGU_PLL1_CFG 0x0008 +#define LTQ_CGU_PLL2_CFG 0x000C +#define LTQ_CGU_SYS 0x0010 +#define LTQ_CGU_UPDATE 0x0014 +#define LTQ_CGU_IF_CLK 0x0018 +#define LTQ_CGU_OSC_CON 0x001C +#define LTQ_CGU_SMD 0x0020 +#define LTQ_CGU_CT1SR 0x0028 +#define LTQ_CGU_CT2SR 0x002C +#define LTQ_CGU_PCMCR 0x0030 +#define LTQ_CGU_PCI_CR 0x0034 +#define LTQ_CGU_PD_PC 0x0038 +#define LTQ_CGU_FMR 0x003C + +#define CGU_PLL0_PHASE_DIVIDER_ENABLE \ + (ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & (1 << 31)) +#define CGU_PLL0_BYPASS \ + (ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & (1 << 30)) +#define CGU_PLL0_CFG_DSMSEL \ + (ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & (1 << 28)) +#define CGU_PLL0_CFG_FRAC_EN \ + (ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & (1 << 27)) +#define CGU_PLL1_SRC \ + (ltq_cgu_r32(LTQ_CGU_PLL1_CFG) & (1 << 31)) +#define CGU_PLL2_PHASE_DIVIDER_ENABLE \ + (ltq_cgu_r32(LTQ_CGU_PLL2_CFG) & (1 << 20)) +#define CGU_SYS_FPI_SEL (1 << 6) +#define CGU_SYS_DDR_SEL 0x3 +#define CGU_PLL0_SRC (1 << 29) + +#define CGU_PLL0_CFG_PLLK GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL0_CFG), 26, 17) +#define CGU_PLL0_CFG_PLLN GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL0_CFG), 12, 6) +#define CGU_PLL0_CFG_PLLM GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL0_CFG), 5, 2) +#define CGU_PLL2_SRC GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL2_CFG), 18, 17) +#define CGU_PLL2_CFG_INPUT_DIV GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL2_CFG), 16, 13) + +static unsigned int ltq_get_pll0_fdiv(void); + +static inline unsigned int get_input_clock(int pll) +{ + switch (pll) { + case 0: + if (ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & CGU_PLL0_SRC) + return BASIS_REQUENCY_USB; + else if (CGU_PLL0_PHASE_DIVIDER_ENABLE) + return BASIC_FREQUENCY_1; + else + return BASIC_FREQUENCY_2; + case 1: + if (CGU_PLL1_SRC) + return BASIS_REQUENCY_USB; + else if (CGU_PLL0_PHASE_DIVIDER_ENABLE) + return BASIC_FREQUENCY_1; + else + return BASIC_FREQUENCY_2; + case 2: + switch (CGU_PLL2_SRC) { + case 0: + return ltq_get_pll0_fdiv(); + case 1: + return CGU_PLL2_PHASE_DIVIDER_ENABLE ? + BASIC_FREQUENCY_1 : + BASIC_FREQUENCY_2; + case 2: + return BASIS_REQUENCY_USB; + } + default: + return 0; + } +} + +static inline unsigned int cal_dsm(int pll, unsigned int num, unsigned int den) +{ + u64 res, clock = get_input_clock(pll); + + res = num * clock; + do_div(res, den); + return res; +} + +static inline unsigned int mash_dsm(int pll, unsigned int M, unsigned int N, + unsigned int K) +{ + unsigned int num = ((N + 1) << 10) + K; + unsigned int den = (M + 1) << 10; + + return cal_dsm(pll, num, den); +} + +static inline unsigned int ssff_dsm_1(int pll, unsigned int M, unsigned int N, + unsigned int K) +{ + unsigned int num = ((N + 1) << 11) + K + 512; + unsigned int den = (M + 1) << 11; + + return cal_dsm(pll, num, den); +} + +static inline unsigned int ssff_dsm_2(int pll, unsigned int M, unsigned int N, + unsigned int K) +{ + unsigned int num = K >= 512 ? + ((N + 1) << 12) + K - 512 : ((N + 1) << 12) + K + 3584; + unsigned int den = (M + 1) << 12; + + return cal_dsm(pll, num, den); +} + +static inline unsigned int dsm(int pll, unsigned int M, unsigned int N, + unsigned int K, unsigned int dsmsel, unsigned int phase_div_en) +{ + if (!dsmsel) + return mash_dsm(pll, M, N, K); + else if (!phase_div_en) + return mash_dsm(pll, M, N, K); + else + return ssff_dsm_2(pll, M, N, K); +} + +static inline unsigned int ltq_get_pll0_fosc(void) +{ + if (CGU_PLL0_BYPASS) + return get_input_clock(0); + else + return !CGU_PLL0_CFG_FRAC_EN + ? dsm(0, CGU_PLL0_CFG_PLLM, CGU_PLL0_CFG_PLLN, 0, + CGU_PLL0_CFG_DSMSEL, + CGU_PLL0_PHASE_DIVIDER_ENABLE) + : dsm(0, CGU_PLL0_CFG_PLLM, CGU_PLL0_CFG_PLLN, + CGU_PLL0_CFG_PLLK, CGU_PLL0_CFG_DSMSEL, + CGU_PLL0_PHASE_DIVIDER_ENABLE); +} + +static unsigned int ltq_get_pll0_fdiv(void) +{ + unsigned int div = CGU_PLL2_CFG_INPUT_DIV + 1; + + return (ltq_get_pll0_fosc() + (div >> 1)) / div; +} + +unsigned int ltq_get_io_region_clock(void) +{ + unsigned int ret = ltq_get_pll0_fosc(); + + switch (ltq_cgu_r32(LTQ_CGU_PLL2_CFG) & CGU_SYS_DDR_SEL) { + default: + case 0: + return (ret + 1) / 2; + case 1: + return (ret * 2 + 2) / 5; + case 2: + return (ret + 1) / 3; + case 3: + return (ret + 2) / 4; + } +} +EXPORT_SYMBOL(ltq_get_io_region_clock); + +unsigned int ltq_get_fpi_bus_clock(int fpi) +{ + unsigned int ret = ltq_get_io_region_clock(); + + if ((fpi == 2) && (ltq_cgu_r32(LTQ_CGU_SYS) & CGU_SYS_FPI_SEL)) + ret >>= 1; + return ret; +} +EXPORT_SYMBOL(ltq_get_fpi_bus_clock); + +unsigned int ltq_get_cpu_hz(void) +{ + switch (ltq_cgu_r32(LTQ_CGU_SYS) & 0xc) { + case 0: + return CLOCK_333M; + case 4: + return DDR_HZ; + case 8: + return DDR_HZ << 1; + default: + return DDR_HZ >> 1; + } +} +EXPORT_SYMBOL(ltq_get_cpu_hz); + +unsigned int ltq_get_fpi_hz(void) +{ + unsigned int ddr_clock = DDR_HZ; + + if (ltq_cgu_r32(LTQ_CGU_SYS) & 0x40) + return ddr_clock >> 1; + return ddr_clock; +} +EXPORT_SYMBOL(ltq_get_fpi_hz); diff --git a/trunk/arch/mips/lantiq/xway/devices.c b/trunk/arch/mips/lantiq/xway/devices.c new file mode 100644 index 000000000000..e09e789dfc27 --- /dev/null +++ b/trunk/arch/mips/lantiq/xway/devices.c @@ -0,0 +1,121 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Copyright (C) 2010 John Crispin + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +#include "devices.h" + +/* gpio */ +static struct resource ltq_gpio_resource[] = { + { + .name = "gpio0", + .start = LTQ_GPIO0_BASE_ADDR, + .end = LTQ_GPIO0_BASE_ADDR + LTQ_GPIO_SIZE - 1, + .flags = IORESOURCE_MEM, + }, { + .name = "gpio1", + .start = LTQ_GPIO1_BASE_ADDR, + .end = LTQ_GPIO1_BASE_ADDR + LTQ_GPIO_SIZE - 1, + .flags = IORESOURCE_MEM, + }, { + .name = "gpio2", + .start = LTQ_GPIO2_BASE_ADDR, + .end = LTQ_GPIO2_BASE_ADDR + LTQ_GPIO_SIZE - 1, + .flags = IORESOURCE_MEM, + } +}; + +void __init ltq_register_gpio(void) +{ + platform_device_register_simple("ltq_gpio", 0, + <q_gpio_resource[0], 1); + platform_device_register_simple("ltq_gpio", 1, + <q_gpio_resource[1], 1); + + /* AR9 and VR9 have an extra gpio block */ + if (ltq_is_ar9() || ltq_is_vr9()) { + platform_device_register_simple("ltq_gpio", 2, + <q_gpio_resource[2], 1); + } +} + +/* serial to parallel conversion */ +static struct resource ltq_stp_resource = { + .name = "stp", + .start = LTQ_STP_BASE_ADDR, + .end = LTQ_STP_BASE_ADDR + LTQ_STP_SIZE - 1, + .flags = IORESOURCE_MEM, +}; + +void __init ltq_register_gpio_stp(void) +{ + platform_device_register_simple("ltq_stp", 0, <q_stp_resource, 1); +} + +/* asc ports - amazon se has its own serial mapping */ +static struct resource ltq_ase_asc_resources[] = { + { + .name = "asc0", + .start = LTQ_ASC1_BASE_ADDR, + .end = LTQ_ASC1_BASE_ADDR + LTQ_ASC_SIZE - 1, + .flags = IORESOURCE_MEM, + }, + IRQ_RES(tx, LTQ_ASC_ASE_TIR), + IRQ_RES(rx, LTQ_ASC_ASE_RIR), + IRQ_RES(err, LTQ_ASC_ASE_EIR), +}; + +void __init ltq_register_ase_asc(void) +{ + platform_device_register_simple("ltq_asc", 0, + ltq_ase_asc_resources, ARRAY_SIZE(ltq_ase_asc_resources)); +} + +/* ethernet */ +static struct resource ltq_etop_resources = { + .name = "etop", + .start = LTQ_ETOP_BASE_ADDR, + .end = LTQ_ETOP_BASE_ADDR + LTQ_ETOP_SIZE - 1, + .flags = IORESOURCE_MEM, +}; + +static struct platform_device ltq_etop = { + .name = "ltq_etop", + .resource = <q_etop_resources, + .num_resources = 1, +}; + +void __init +ltq_register_etop(struct ltq_eth_data *eth) +{ + if (eth) { + ltq_etop.dev.platform_data = eth; + platform_device_register(<q_etop); + } +} diff --git a/trunk/arch/mips/lantiq/xway/devices.h b/trunk/arch/mips/lantiq/xway/devices.h new file mode 100644 index 000000000000..e90493471bc1 --- /dev/null +++ b/trunk/arch/mips/lantiq/xway/devices.h @@ -0,0 +1,20 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Copyright (C) 2010 John Crispin + */ + +#ifndef _LTQ_DEVICES_XWAY_H__ +#define _LTQ_DEVICES_XWAY_H__ + +#include "../devices.h" +#include + +extern void ltq_register_gpio(void); +extern void ltq_register_gpio_stp(void); +extern void ltq_register_ase_asc(void); +extern void ltq_register_etop(struct ltq_eth_data *eth); + +#endif diff --git a/trunk/arch/mips/lantiq/xway/dma.c b/trunk/arch/mips/lantiq/xway/dma.c new file mode 100644 index 000000000000..4278a459d6c4 --- /dev/null +++ b/trunk/arch/mips/lantiq/xway/dma.c @@ -0,0 +1,253 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) 2011 John Crispin + */ + +#include +#include +#include +#include + +#include +#include + +#define LTQ_DMA_CTRL 0x10 +#define LTQ_DMA_CPOLL 0x14 +#define LTQ_DMA_CS 0x18 +#define LTQ_DMA_CCTRL 0x1C +#define LTQ_DMA_CDBA 0x20 +#define LTQ_DMA_CDLEN 0x24 +#define LTQ_DMA_CIS 0x28 +#define LTQ_DMA_CIE 0x2C +#define LTQ_DMA_PS 0x40 +#define LTQ_DMA_PCTRL 0x44 +#define LTQ_DMA_IRNEN 0xf4 + +#define DMA_DESCPT BIT(3) /* descriptor complete irq */ +#define DMA_TX BIT(8) /* TX channel direction */ +#define DMA_CHAN_ON BIT(0) /* channel on / off bit */ +#define DMA_PDEN BIT(6) /* enable packet drop */ +#define DMA_CHAN_RST BIT(1) /* channel on / off bit */ +#define DMA_RESET BIT(0) /* channel on / off bit */ +#define DMA_IRQ_ACK 0x7e /* IRQ status register */ +#define DMA_POLL BIT(31) /* turn on channel polling */ +#define DMA_CLK_DIV4 BIT(6) /* polling clock divider */ +#define DMA_2W_BURST BIT(1) /* 2 word burst length */ +#define DMA_MAX_CHANNEL 20 /* the soc has 20 channels */ +#define DMA_ETOP_ENDIANESS (0xf << 8) /* endianess swap etop channels */ +#define DMA_WEIGHT (BIT(17) | BIT(16)) /* default channel wheight */ + +#define ltq_dma_r32(x) ltq_r32(ltq_dma_membase + (x)) +#define ltq_dma_w32(x, y) ltq_w32(x, ltq_dma_membase + (y)) +#define ltq_dma_w32_mask(x, y, z) ltq_w32_mask(x, y, \ + ltq_dma_membase + (z)) + +static struct resource ltq_dma_resource = { + .name = "dma", + .start = LTQ_DMA_BASE_ADDR, + .end = LTQ_DMA_BASE_ADDR + LTQ_DMA_SIZE - 1, + .flags = IORESOURCE_MEM, +}; + +static void __iomem *ltq_dma_membase; + +void +ltq_dma_enable_irq(struct ltq_dma_channel *ch) +{ + unsigned long flags; + + local_irq_save(flags); + ltq_dma_w32(ch->nr, LTQ_DMA_CS); + ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN); + local_irq_restore(flags); +} +EXPORT_SYMBOL_GPL(ltq_dma_enable_irq); + +void +ltq_dma_disable_irq(struct ltq_dma_channel *ch) +{ + unsigned long flags; + + local_irq_save(flags); + ltq_dma_w32(ch->nr, LTQ_DMA_CS); + ltq_dma_w32_mask(1 << ch->nr, 0, LTQ_DMA_IRNEN); + local_irq_restore(flags); +} +EXPORT_SYMBOL_GPL(ltq_dma_disable_irq); + +void +ltq_dma_ack_irq(struct ltq_dma_channel *ch) +{ + unsigned long flags; + + local_irq_save(flags); + ltq_dma_w32(ch->nr, LTQ_DMA_CS); + ltq_dma_w32(DMA_IRQ_ACK, LTQ_DMA_CIS); + local_irq_restore(flags); +} +EXPORT_SYMBOL_GPL(ltq_dma_ack_irq); + +void +ltq_dma_open(struct ltq_dma_channel *ch) +{ + unsigned long flag; + + local_irq_save(flag); + ltq_dma_w32(ch->nr, LTQ_DMA_CS); + ltq_dma_w32_mask(0, DMA_CHAN_ON, LTQ_DMA_CCTRL); + ltq_dma_enable_irq(ch); + local_irq_restore(flag); +} +EXPORT_SYMBOL_GPL(ltq_dma_open); + +void +ltq_dma_close(struct ltq_dma_channel *ch) +{ + unsigned long flag; + + local_irq_save(flag); + ltq_dma_w32(ch->nr, LTQ_DMA_CS); + ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL); + ltq_dma_disable_irq(ch); + local_irq_restore(flag); +} +EXPORT_SYMBOL_GPL(ltq_dma_close); + +static void +ltq_dma_alloc(struct ltq_dma_channel *ch) +{ + unsigned long flags; + + ch->desc = 0; + ch->desc_base = dma_alloc_coherent(NULL, + LTQ_DESC_NUM * LTQ_DESC_SIZE, + &ch->phys, GFP_ATOMIC); + memset(ch->desc_base, 0, LTQ_DESC_NUM * LTQ_DESC_SIZE); + + local_irq_save(flags); + ltq_dma_w32(ch->nr, LTQ_DMA_CS); + ltq_dma_w32(ch->phys, LTQ_DMA_CDBA); + ltq_dma_w32(LTQ_DESC_NUM, LTQ_DMA_CDLEN); + ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL); + wmb(); + ltq_dma_w32_mask(0, DMA_CHAN_RST, LTQ_DMA_CCTRL); + while (ltq_dma_r32(LTQ_DMA_CCTRL) & DMA_CHAN_RST) + ; + local_irq_restore(flags); +} + +void +ltq_dma_alloc_tx(struct ltq_dma_channel *ch) +{ + unsigned long flags; + + ltq_dma_alloc(ch); + + local_irq_save(flags); + ltq_dma_w32(DMA_DESCPT, LTQ_DMA_CIE); + ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN); + ltq_dma_w32(DMA_WEIGHT | DMA_TX, LTQ_DMA_CCTRL); + local_irq_restore(flags); +} +EXPORT_SYMBOL_GPL(ltq_dma_alloc_tx); + +void +ltq_dma_alloc_rx(struct ltq_dma_channel *ch) +{ + unsigned long flags; + + ltq_dma_alloc(ch); + + local_irq_save(flags); + ltq_dma_w32(DMA_DESCPT, LTQ_DMA_CIE); + ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN); + ltq_dma_w32(DMA_WEIGHT, LTQ_DMA_CCTRL); + local_irq_restore(flags); +} +EXPORT_SYMBOL_GPL(ltq_dma_alloc_rx); + +void +ltq_dma_free(struct ltq_dma_channel *ch) +{ + if (!ch->desc_base) + return; + ltq_dma_close(ch); + dma_free_coherent(NULL, LTQ_DESC_NUM * LTQ_DESC_SIZE, + ch->desc_base, ch->phys); +} +EXPORT_SYMBOL_GPL(ltq_dma_free); + +void +ltq_dma_init_port(int p) +{ + ltq_dma_w32(p, LTQ_DMA_PS); + switch (p) { + case DMA_PORT_ETOP: + /* + * Tell the DMA engine to swap the endianess of data frames and + * drop packets if the channel arbitration fails. + */ + ltq_dma_w32_mask(0, DMA_ETOP_ENDIANESS | DMA_PDEN, + LTQ_DMA_PCTRL); + break; + + case DMA_PORT_DEU: + ltq_dma_w32((DMA_2W_BURST << 4) | (DMA_2W_BURST << 2), + LTQ_DMA_PCTRL); + break; + + default: + break; + } +} +EXPORT_SYMBOL_GPL(ltq_dma_init_port); + +int __init +ltq_dma_init(void) +{ + int i; + + /* insert and request the memory region */ + if (insert_resource(&iomem_resource, <q_dma_resource) < 0) + panic("Failed to insert dma memory\n"); + + if (request_mem_region(ltq_dma_resource.start, + resource_size(<q_dma_resource), "dma") < 0) + panic("Failed to request dma memory\n"); + + /* remap dma register range */ + ltq_dma_membase = ioremap_nocache(ltq_dma_resource.start, + resource_size(<q_dma_resource)); + if (!ltq_dma_membase) + panic("Failed to remap dma memory\n"); + + /* power up and reset the dma engine */ + ltq_pmu_enable(PMU_DMA); + ltq_dma_w32_mask(0, DMA_RESET, LTQ_DMA_CTRL); + + /* disable all interrupts */ + ltq_dma_w32(0, LTQ_DMA_IRNEN); + + /* reset/configure each channel */ + for (i = 0; i < DMA_MAX_CHANNEL; i++) { + ltq_dma_w32(i, LTQ_DMA_CS); + ltq_dma_w32(DMA_CHAN_RST, LTQ_DMA_CCTRL); + ltq_dma_w32(DMA_POLL | DMA_CLK_DIV4, LTQ_DMA_CPOLL); + ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL); + } + return 0; +} + +postcore_initcall(ltq_dma_init); diff --git a/trunk/arch/mips/lantiq/xway/ebu.c b/trunk/arch/mips/lantiq/xway/ebu.c new file mode 100644 index 000000000000..66eb52fa50a1 --- /dev/null +++ b/trunk/arch/mips/lantiq/xway/ebu.c @@ -0,0 +1,53 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * EBU - the external bus unit attaches PCI, NOR and NAND + * + * Copyright (C) 2010 John Crispin + */ + +#include +#include +#include +#include + +#include + +/* all access to the ebu must be locked */ +DEFINE_SPINLOCK(ebu_lock); +EXPORT_SYMBOL_GPL(ebu_lock); + +static struct resource ltq_ebu_resource = { + .name = "ebu", + .start = LTQ_EBU_BASE_ADDR, + .end = LTQ_EBU_BASE_ADDR + LTQ_EBU_SIZE - 1, + .flags = IORESOURCE_MEM, +}; + +/* remapped base addr of the clock unit and external bus unit */ +void __iomem *ltq_ebu_membase; + +static int __init lantiq_ebu_init(void) +{ + /* insert and request the memory region */ + if (insert_resource(&iomem_resource, <q_ebu_resource) < 0) + panic("Failed to insert ebu memory\n"); + + if (request_mem_region(ltq_ebu_resource.start, + resource_size(<q_ebu_resource), "ebu") < 0) + panic("Failed to request ebu memory\n"); + + /* remap ebu register range */ + ltq_ebu_membase = ioremap_nocache(ltq_ebu_resource.start, + resource_size(<q_ebu_resource)); + if (!ltq_ebu_membase) + panic("Failed to remap ebu memory\n"); + + /* make sure to unprotect the memory region where flash is located */ + ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_BUSCON0) & ~EBU_WRDIS, LTQ_EBU_BUSCON0); + return 0; +} + +postcore_initcall(lantiq_ebu_init); diff --git a/trunk/arch/mips/lantiq/xway/gpio.c b/trunk/arch/mips/lantiq/xway/gpio.c new file mode 100644 index 000000000000..a321451a5455 --- /dev/null +++ b/trunk/arch/mips/lantiq/xway/gpio.c @@ -0,0 +1,195 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Copyright (C) 2010 John Crispin + */ + +#include +#include +#include +#include +#include +#include + +#include + +#define LTQ_GPIO_OUT 0x00 +#define LTQ_GPIO_IN 0x04 +#define LTQ_GPIO_DIR 0x08 +#define LTQ_GPIO_ALTSEL0 0x0C +#define LTQ_GPIO_ALTSEL1 0x10 +#define LTQ_GPIO_OD 0x14 + +#define PINS_PER_PORT 16 +#define MAX_PORTS 3 + +#define ltq_gpio_getbit(m, r, p) (!!(ltq_r32(m + r) & (1 << p))) +#define ltq_gpio_setbit(m, r, p) ltq_w32_mask(0, (1 << p), m + r) +#define ltq_gpio_clearbit(m, r, p) ltq_w32_mask((1 << p), 0, m + r) + +struct ltq_gpio { + void __iomem *membase; + struct gpio_chip chip; +}; + +static struct ltq_gpio ltq_gpio_port[MAX_PORTS]; + +int gpio_to_irq(unsigned int gpio) +{ + return -EINVAL; +} +EXPORT_SYMBOL(gpio_to_irq); + +int irq_to_gpio(unsigned int gpio) +{ + return -EINVAL; +} +EXPORT_SYMBOL(irq_to_gpio); + +int ltq_gpio_request(unsigned int pin, unsigned int alt0, + unsigned int alt1, unsigned int dir, const char *name) +{ + int id = 0; + + if (pin >= (MAX_PORTS * PINS_PER_PORT)) + return -EINVAL; + if (gpio_request(pin, name)) { + pr_err("failed to setup lantiq gpio: %s\n", name); + return -EBUSY; + } + if (dir) + gpio_direction_output(pin, 1); + else + gpio_direction_input(pin); + while (pin >= PINS_PER_PORT) { + pin -= PINS_PER_PORT; + id++; + } + if (alt0) + ltq_gpio_setbit(ltq_gpio_port[id].membase, + LTQ_GPIO_ALTSEL0, pin); + else + ltq_gpio_clearbit(ltq_gpio_port[id].membase, + LTQ_GPIO_ALTSEL0, pin); + if (alt1) + ltq_gpio_setbit(ltq_gpio_port[id].membase, + LTQ_GPIO_ALTSEL1, pin); + else + ltq_gpio_clearbit(ltq_gpio_port[id].membase, + LTQ_GPIO_ALTSEL1, pin); + return 0; +} +EXPORT_SYMBOL(ltq_gpio_request); + +static void ltq_gpio_set(struct gpio_chip *chip, unsigned int offset, int value) +{ + struct ltq_gpio *ltq_gpio = container_of(chip, struct ltq_gpio, chip); + + if (value) + ltq_gpio_setbit(ltq_gpio->membase, LTQ_GPIO_OUT, offset); + else + ltq_gpio_clearbit(ltq_gpio->membase, LTQ_GPIO_OUT, offset); +} + +static int ltq_gpio_get(struct gpio_chip *chip, unsigned int offset) +{ + struct ltq_gpio *ltq_gpio = container_of(chip, struct ltq_gpio, chip); + + return ltq_gpio_getbit(ltq_gpio->membase, LTQ_GPIO_IN, offset); +} + +static int ltq_gpio_direction_input(struct gpio_chip *chip, unsigned int offset) +{ + struct ltq_gpio *ltq_gpio = container_of(chip, struct ltq_gpio, chip); + + ltq_gpio_clearbit(ltq_gpio->membase, LTQ_GPIO_OD, offset); + ltq_gpio_clearbit(ltq_gpio->membase, LTQ_GPIO_DIR, offset); + + return 0; +} + +static int ltq_gpio_direction_output(struct gpio_chip *chip, + unsigned int offset, int value) +{ + struct ltq_gpio *ltq_gpio = container_of(chip, struct ltq_gpio, chip); + + ltq_gpio_setbit(ltq_gpio->membase, LTQ_GPIO_OD, offset); + ltq_gpio_setbit(ltq_gpio->membase, LTQ_GPIO_DIR, offset); + ltq_gpio_set(chip, offset, value); + + return 0; +} + +static int ltq_gpio_req(struct gpio_chip *chip, unsigned offset) +{ + struct ltq_gpio *ltq_gpio = container_of(chip, struct ltq_gpio, chip); + + ltq_gpio_clearbit(ltq_gpio->membase, LTQ_GPIO_ALTSEL0, offset); + ltq_gpio_clearbit(ltq_gpio->membase, LTQ_GPIO_ALTSEL1, offset); + return 0; +} + +static int ltq_gpio_probe(struct platform_device *pdev) +{ + struct resource *res; + + if (pdev->id >= MAX_PORTS) { + dev_err(&pdev->dev, "invalid gpio port %d\n", + pdev->id); + return -EINVAL; + } + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "failed to get memory for gpio port %d\n", + pdev->id); + return -ENOENT; + } + res = devm_request_mem_region(&pdev->dev, res->start, + resource_size(res), dev_name(&pdev->dev)); + if (!res) { + dev_err(&pdev->dev, + "failed to request memory for gpio port %d\n", + pdev->id); + return -EBUSY; + } + ltq_gpio_port[pdev->id].membase = devm_ioremap_nocache(&pdev->dev, + res->start, resource_size(res)); + if (!ltq_gpio_port[pdev->id].membase) { + dev_err(&pdev->dev, "failed to remap memory for gpio port %d\n", + pdev->id); + return -ENOMEM; + } + ltq_gpio_port[pdev->id].chip.label = "ltq_gpio"; + ltq_gpio_port[pdev->id].chip.direction_input = ltq_gpio_direction_input; + ltq_gpio_port[pdev->id].chip.direction_output = + ltq_gpio_direction_output; + ltq_gpio_port[pdev->id].chip.get = ltq_gpio_get; + ltq_gpio_port[pdev->id].chip.set = ltq_gpio_set; + ltq_gpio_port[pdev->id].chip.request = ltq_gpio_req; + ltq_gpio_port[pdev->id].chip.base = PINS_PER_PORT * pdev->id; + ltq_gpio_port[pdev->id].chip.ngpio = PINS_PER_PORT; + platform_set_drvdata(pdev, <q_gpio_port[pdev->id]); + return gpiochip_add(<q_gpio_port[pdev->id].chip); +} + +static struct platform_driver +ltq_gpio_driver = { + .probe = ltq_gpio_probe, + .driver = { + .name = "ltq_gpio", + .owner = THIS_MODULE, + }, +}; + +int __init ltq_gpio_init(void) +{ + int ret = platform_driver_register(<q_gpio_driver); + + if (ret) + pr_info("ltq_gpio : Error registering platfom driver!"); + return ret; +} + +postcore_initcall(ltq_gpio_init); diff --git a/trunk/arch/mips/lantiq/xway/gpio_ebu.c b/trunk/arch/mips/lantiq/xway/gpio_ebu.c new file mode 100644 index 000000000000..a479355abdb9 --- /dev/null +++ b/trunk/arch/mips/lantiq/xway/gpio_ebu.c @@ -0,0 +1,126 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Copyright (C) 2010 John Crispin + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +/* + * By attaching hardware latches to the EBU it is possible to create output + * only gpios. This driver configures a special memory address, which when + * written to outputs 16 bit to the latches. + */ + +#define LTQ_EBU_BUSCON 0x1e7ff /* 16 bit access, slowest timing */ +#define LTQ_EBU_WP 0x80000000 /* write protect bit */ + +/* we keep a shadow value of the last value written to the ebu */ +static int ltq_ebu_gpio_shadow = 0x0; +static void __iomem *ltq_ebu_gpio_membase; + +static void ltq_ebu_apply(void) +{ + unsigned long flags; + + spin_lock_irqsave(&ebu_lock, flags); + ltq_ebu_w32(LTQ_EBU_BUSCON, LTQ_EBU_BUSCON1); + *((__u16 *)ltq_ebu_gpio_membase) = ltq_ebu_gpio_shadow; + ltq_ebu_w32(LTQ_EBU_BUSCON | LTQ_EBU_WP, LTQ_EBU_BUSCON1); + spin_unlock_irqrestore(&ebu_lock, flags); +} + +static void ltq_ebu_set(struct gpio_chip *chip, unsigned offset, int value) +{ + if (value) + ltq_ebu_gpio_shadow |= (1 << offset); + else + ltq_ebu_gpio_shadow &= ~(1 << offset); + ltq_ebu_apply(); +} + +static int ltq_ebu_direction_output(struct gpio_chip *chip, unsigned offset, + int value) +{ + ltq_ebu_set(chip, offset, value); + + return 0; +} + +static struct gpio_chip ltq_ebu_chip = { + .label = "ltq_ebu", + .direction_output = ltq_ebu_direction_output, + .set = ltq_ebu_set, + .base = 72, + .ngpio = 16, + .can_sleep = 1, + .owner = THIS_MODULE, +}; + +static int ltq_ebu_probe(struct platform_device *pdev) +{ + int ret = 0; + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + if (!res) { + dev_err(&pdev->dev, "failed to get memory resource\n"); + return -ENOENT; + } + + res = devm_request_mem_region(&pdev->dev, res->start, + resource_size(res), dev_name(&pdev->dev)); + if (!res) { + dev_err(&pdev->dev, "failed to request memory resource\n"); + return -EBUSY; + } + + ltq_ebu_gpio_membase = devm_ioremap_nocache(&pdev->dev, res->start, + resource_size(res)); + if (!ltq_ebu_gpio_membase) { + dev_err(&pdev->dev, "Failed to ioremap mem region\n"); + return -ENOMEM; + } + + /* grab the default shadow value passed form the platform code */ + ltq_ebu_gpio_shadow = (unsigned int) pdev->dev.platform_data; + + /* tell the ebu controller which memory address we will be using */ + ltq_ebu_w32(pdev->resource->start | 0x1, LTQ_EBU_ADDRSEL1); + + /* write protect the region */ + ltq_ebu_w32(LTQ_EBU_BUSCON | LTQ_EBU_WP, LTQ_EBU_BUSCON1); + + ret = gpiochip_add(<q_ebu_chip); + if (!ret) + ltq_ebu_apply(); + return ret; +} + +static struct platform_driver ltq_ebu_driver = { + .probe = ltq_ebu_probe, + .driver = { + .name = "ltq_ebu", + .owner = THIS_MODULE, + }, +}; + +static int __init ltq_ebu_init(void) +{ + int ret = platform_driver_register(<q_ebu_driver); + + if (ret) + pr_info("ltq_ebu : Error registering platfom driver!"); + return ret; +} + +postcore_initcall(ltq_ebu_init); diff --git a/trunk/arch/mips/lantiq/xway/gpio_stp.c b/trunk/arch/mips/lantiq/xway/gpio_stp.c new file mode 100644 index 000000000000..67d59d690340 --- /dev/null +++ b/trunk/arch/mips/lantiq/xway/gpio_stp.c @@ -0,0 +1,157 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Copyright (C) 2007 John Crispin + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define LTQ_STP_CON0 0x00 +#define LTQ_STP_CON1 0x04 +#define LTQ_STP_CPU0 0x08 +#define LTQ_STP_CPU1 0x0C +#define LTQ_STP_AR 0x10 + +#define LTQ_STP_CON_SWU (1 << 31) +#define LTQ_STP_2HZ 0 +#define LTQ_STP_4HZ (1 << 23) +#define LTQ_STP_8HZ (2 << 23) +#define LTQ_STP_10HZ (3 << 23) +#define LTQ_STP_SPEED_MASK (0xf << 23) +#define LTQ_STP_UPD_FPI (1 << 31) +#define LTQ_STP_UPD_MASK (3 << 30) +#define LTQ_STP_ADSL_SRC (3 << 24) + +#define LTQ_STP_GROUP0 (1 << 0) + +#define LTQ_STP_RISING 0 +#define LTQ_STP_FALLING (1 << 26) +#define LTQ_STP_EDGE_MASK (1 << 26) + +#define ltq_stp_r32(reg) __raw_readl(ltq_stp_membase + reg) +#define ltq_stp_w32(val, reg) __raw_writel(val, ltq_stp_membase + reg) +#define ltq_stp_w32_mask(clear, set, reg) \ + ltq_w32((ltq_r32(ltq_stp_membase + reg) & ~(clear)) | (set), \ + ltq_stp_membase + (reg)) + +static int ltq_stp_shadow = 0xffff; +static void __iomem *ltq_stp_membase; + +static void ltq_stp_set(struct gpio_chip *chip, unsigned offset, int value) +{ + if (value) + ltq_stp_shadow |= (1 << offset); + else + ltq_stp_shadow &= ~(1 << offset); + ltq_stp_w32(ltq_stp_shadow, LTQ_STP_CPU0); +} + +static int ltq_stp_direction_output(struct gpio_chip *chip, unsigned offset, + int value) +{ + ltq_stp_set(chip, offset, value); + + return 0; +} + +static struct gpio_chip ltq_stp_chip = { + .label = "ltq_stp", + .direction_output = ltq_stp_direction_output, + .set = ltq_stp_set, + .base = 48, + .ngpio = 24, + .can_sleep = 1, + .owner = THIS_MODULE, +}; + +static int ltq_stp_hw_init(void) +{ + /* the 3 pins used to control the external stp */ + ltq_gpio_request(4, 1, 0, 1, "stp-st"); + ltq_gpio_request(5, 1, 0, 1, "stp-d"); + ltq_gpio_request(6, 1, 0, 1, "stp-sh"); + + /* sane defaults */ + ltq_stp_w32(0, LTQ_STP_AR); + ltq_stp_w32(0, LTQ_STP_CPU0); + ltq_stp_w32(0, LTQ_STP_CPU1); + ltq_stp_w32(LTQ_STP_CON_SWU, LTQ_STP_CON0); + ltq_stp_w32(0, LTQ_STP_CON1); + + /* rising or falling edge */ + ltq_stp_w32_mask(LTQ_STP_EDGE_MASK, LTQ_STP_FALLING, LTQ_STP_CON0); + + /* per default stp 15-0 are set */ + ltq_stp_w32_mask(0, LTQ_STP_GROUP0, LTQ_STP_CON1); + + /* stp are update periodically by the FPI bus */ + ltq_stp_w32_mask(LTQ_STP_UPD_MASK, LTQ_STP_UPD_FPI, LTQ_STP_CON1); + + /* set stp update speed */ + ltq_stp_w32_mask(LTQ_STP_SPEED_MASK, LTQ_STP_8HZ, LTQ_STP_CON1); + + /* tell the hardware that pin (led) 0 and 1 are controlled + * by the dsl arc + */ + ltq_stp_w32_mask(0, LTQ_STP_ADSL_SRC, LTQ_STP_CON0); + + ltq_pmu_enable(PMU_LED); + return 0; +} + +static int __devinit ltq_stp_probe(struct platform_device *pdev) +{ + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + int ret = 0; + + if (!res) + return -ENOENT; + res = devm_request_mem_region(&pdev->dev, res->start, + resource_size(res), dev_name(&pdev->dev)); + if (!res) { + dev_err(&pdev->dev, "failed to request STP memory\n"); + return -EBUSY; + } + ltq_stp_membase = devm_ioremap_nocache(&pdev->dev, res->start, + resource_size(res)); + if (!ltq_stp_membase) { + dev_err(&pdev->dev, "failed to remap STP memory\n"); + return -ENOMEM; + } + ret = gpiochip_add(<q_stp_chip); + if (!ret) + ret = ltq_stp_hw_init(); + + return ret; +} + +static struct platform_driver ltq_stp_driver = { + .probe = ltq_stp_probe, + .driver = { + .name = "ltq_stp", + .owner = THIS_MODULE, + }, +}; + +int __init ltq_stp_init(void) +{ + int ret = platform_driver_register(<q_stp_driver); + + if (ret) + pr_info("ltq_stp: error registering platfom driver"); + return ret; +} + +postcore_initcall(ltq_stp_init); diff --git a/trunk/arch/mips/lantiq/xway/mach-easy50601.c b/trunk/arch/mips/lantiq/xway/mach-easy50601.c new file mode 100644 index 000000000000..d5aaf637ab19 --- /dev/null +++ b/trunk/arch/mips/lantiq/xway/mach-easy50601.c @@ -0,0 +1,57 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Copyright (C) 2010 John Crispin + */ + +#include +#include +#include +#include +#include +#include + +#include + +#include "../machtypes.h" +#include "devices.h" + +static struct mtd_partition easy50601_partitions[] = { + { + .name = "uboot", + .offset = 0x0, + .size = 0x10000, + }, + { + .name = "uboot_env", + .offset = 0x10000, + .size = 0x10000, + }, + { + .name = "linux", + .offset = 0x20000, + .size = 0xE0000, + }, + { + .name = "rootfs", + .offset = 0x100000, + .size = 0x300000, + }, +}; + +static struct physmap_flash_data easy50601_flash_data = { + .nr_parts = ARRAY_SIZE(easy50601_partitions), + .parts = easy50601_partitions, +}; + +static void __init easy50601_init(void) +{ + ltq_register_nor(&easy50601_flash_data); +} + +MIPS_MACHINE(LTQ_MACH_EASY50601, + "EASY50601", + "EASY50601 Eval Board", + easy50601_init); diff --git a/trunk/arch/mips/lantiq/xway/mach-easy50712.c b/trunk/arch/mips/lantiq/xway/mach-easy50712.c new file mode 100644 index 000000000000..ea5027b3239d --- /dev/null +++ b/trunk/arch/mips/lantiq/xway/mach-easy50712.c @@ -0,0 +1,74 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Copyright (C) 2010 John Crispin + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "../machtypes.h" +#include "devices.h" + +static struct mtd_partition easy50712_partitions[] = { + { + .name = "uboot", + .offset = 0x0, + .size = 0x10000, + }, + { + .name = "uboot_env", + .offset = 0x10000, + .size = 0x10000, + }, + { + .name = "linux", + .offset = 0x20000, + .size = 0xe0000, + }, + { + .name = "rootfs", + .offset = 0x100000, + .size = 0x300000, + }, +}; + +static struct physmap_flash_data easy50712_flash_data = { + .nr_parts = ARRAY_SIZE(easy50712_partitions), + .parts = easy50712_partitions, +}; + +static struct ltq_pci_data ltq_pci_data = { + .clock = PCI_CLOCK_INT, + .gpio = PCI_GNT1 | PCI_REQ1, + .irq = { + [14] = INT_NUM_IM0_IRL0 + 22, + }, +}; + +static struct ltq_eth_data ltq_eth_data = { + .mii_mode = PHY_INTERFACE_MODE_MII, +}; + +static void __init easy50712_init(void) +{ + ltq_register_gpio_stp(); + ltq_register_nor(&easy50712_flash_data); + ltq_register_pci(<q_pci_data); + ltq_register_etop(<q_eth_data); +} + +MIPS_MACHINE(LTQ_MACH_EASY50712, + "EASY50712", + "EASY50712 Eval Board", + easy50712_init); diff --git a/trunk/arch/mips/lantiq/xway/pmu.c b/trunk/arch/mips/lantiq/xway/pmu.c new file mode 100644 index 000000000000..9d69f01e352b --- /dev/null +++ b/trunk/arch/mips/lantiq/xway/pmu.c @@ -0,0 +1,70 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Copyright (C) 2010 John Crispin + */ + +#include +#include +#include +#include + +#include + +/* PMU - the power management unit allows us to turn part of the core + * on and off + */ + +/* the enable / disable registers */ +#define LTQ_PMU_PWDCR 0x1C +#define LTQ_PMU_PWDSR 0x20 + +#define ltq_pmu_w32(x, y) ltq_w32((x), ltq_pmu_membase + (y)) +#define ltq_pmu_r32(x) ltq_r32(ltq_pmu_membase + (x)) + +static struct resource ltq_pmu_resource = { + .name = "pmu", + .start = LTQ_PMU_BASE_ADDR, + .end = LTQ_PMU_BASE_ADDR + LTQ_PMU_SIZE - 1, + .flags = IORESOURCE_MEM, +}; + +static void __iomem *ltq_pmu_membase; + +void ltq_pmu_enable(unsigned int module) +{ + int err = 1000000; + + ltq_pmu_w32(ltq_pmu_r32(LTQ_PMU_PWDCR) & ~module, LTQ_PMU_PWDCR); + do {} while (--err && (ltq_pmu_r32(LTQ_PMU_PWDSR) & module)); + + if (!err) + panic("activating PMU module failed!\n"); +} +EXPORT_SYMBOL(ltq_pmu_enable); + +void ltq_pmu_disable(unsigned int module) +{ + ltq_pmu_w32(ltq_pmu_r32(LTQ_PMU_PWDCR) | module, LTQ_PMU_PWDCR); +} +EXPORT_SYMBOL(ltq_pmu_disable); + +int __init ltq_pmu_init(void) +{ + if (insert_resource(&iomem_resource, <q_pmu_resource) < 0) + panic("Failed to insert pmu memory\n"); + + if (request_mem_region(ltq_pmu_resource.start, + resource_size(<q_pmu_resource), "pmu") < 0) + panic("Failed to request pmu memory\n"); + + ltq_pmu_membase = ioremap_nocache(ltq_pmu_resource.start, + resource_size(<q_pmu_resource)); + if (!ltq_pmu_membase) + panic("Failed to remap pmu memory\n"); + return 0; +} + +core_initcall(ltq_pmu_init); diff --git a/trunk/arch/mips/lantiq/xway/prom-ase.c b/trunk/arch/mips/lantiq/xway/prom-ase.c new file mode 100644 index 000000000000..abe49f4db57f --- /dev/null +++ b/trunk/arch/mips/lantiq/xway/prom-ase.c @@ -0,0 +1,39 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Copyright (C) 2010 John Crispin + */ + +#include +#include +#include +#include + +#include + +#include "../prom.h" + +#define SOC_AMAZON_SE "Amazon_SE" + +#define PART_SHIFT 12 +#define PART_MASK 0x0FFFFFFF +#define REV_SHIFT 28 +#define REV_MASK 0xF0000000 + +void __init ltq_soc_detect(struct ltq_soc_info *i) +{ + i->partnum = (ltq_r32(LTQ_MPS_CHIPID) & PART_MASK) >> PART_SHIFT; + i->rev = (ltq_r32(LTQ_MPS_CHIPID) & REV_MASK) >> REV_SHIFT; + switch (i->partnum) { + case SOC_ID_AMAZON_SE: + i->name = SOC_AMAZON_SE; + i->type = SOC_TYPE_AMAZON_SE; + break; + + default: + unreachable(); + break; + } +} diff --git a/trunk/arch/mips/lantiq/xway/prom-xway.c b/trunk/arch/mips/lantiq/xway/prom-xway.c new file mode 100644 index 000000000000..1686692ac24d --- /dev/null +++ b/trunk/arch/mips/lantiq/xway/prom-xway.c @@ -0,0 +1,54 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Copyright (C) 2010 John Crispin + */ + +#include +#include +#include +#include + +#include + +#include "../prom.h" + +#define SOC_DANUBE "Danube" +#define SOC_TWINPASS "Twinpass" +#define SOC_AR9 "AR9" + +#define PART_SHIFT 12 +#define PART_MASK 0x0FFFFFFF +#define REV_SHIFT 28 +#define REV_MASK 0xF0000000 + +void __init ltq_soc_detect(struct ltq_soc_info *i) +{ + i->partnum = (ltq_r32(LTQ_MPS_CHIPID) & PART_MASK) >> PART_SHIFT; + i->rev = (ltq_r32(LTQ_MPS_CHIPID) & REV_MASK) >> REV_SHIFT; + switch (i->partnum) { + case SOC_ID_DANUBE1: + case SOC_ID_DANUBE2: + i->name = SOC_DANUBE; + i->type = SOC_TYPE_DANUBE; + break; + + case SOC_ID_TWINPASS: + i->name = SOC_TWINPASS; + i->type = SOC_TYPE_DANUBE; + break; + + case SOC_ID_ARX188: + case SOC_ID_ARX168: + case SOC_ID_ARX182: + i->name = SOC_AR9; + i->type = SOC_TYPE_AR9; + break; + + default: + unreachable(); + break; + } +} diff --git a/trunk/arch/mips/lantiq/xway/reset.c b/trunk/arch/mips/lantiq/xway/reset.c new file mode 100644 index 000000000000..a1be36d0e490 --- /dev/null +++ b/trunk/arch/mips/lantiq/xway/reset.c @@ -0,0 +1,91 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Copyright (C) 2010 John Crispin + */ + +#include +#include +#include +#include +#include +#include + +#include + +#define ltq_rcu_w32(x, y) ltq_w32((x), ltq_rcu_membase + (y)) +#define ltq_rcu_r32(x) ltq_r32(ltq_rcu_membase + (x)) + +/* register definitions */ +#define LTQ_RCU_RST 0x0010 +#define LTQ_RCU_RST_ALL 0x40000000 + +#define LTQ_RCU_RST_STAT 0x0014 +#define LTQ_RCU_STAT_SHIFT 26 + +static struct resource ltq_rcu_resource = { + .name = "rcu", + .start = LTQ_RCU_BASE_ADDR, + .end = LTQ_RCU_BASE_ADDR + LTQ_RCU_SIZE - 1, + .flags = IORESOURCE_MEM, +}; + +/* remapped base addr of the reset control unit */ +static void __iomem *ltq_rcu_membase; + +/* This function is used by the watchdog driver */ +int ltq_reset_cause(void) +{ + u32 val = ltq_rcu_r32(LTQ_RCU_RST_STAT); + return val >> LTQ_RCU_STAT_SHIFT; +} +EXPORT_SYMBOL_GPL(ltq_reset_cause); + +static void ltq_machine_restart(char *command) +{ + pr_notice("System restart\n"); + local_irq_disable(); + ltq_rcu_w32(ltq_rcu_r32(LTQ_RCU_RST) | LTQ_RCU_RST_ALL, LTQ_RCU_RST); + unreachable(); +} + +static void ltq_machine_halt(void) +{ + pr_notice("System halted.\n"); + local_irq_disable(); + unreachable(); +} + +static void ltq_machine_power_off(void) +{ + pr_notice("Please turn off the power now.\n"); + local_irq_disable(); + unreachable(); +} + +static int __init mips_reboot_setup(void) +{ + /* insert and request the memory region */ + if (insert_resource(&iomem_resource, <q_rcu_resource) < 0) + panic("Failed to insert rcu memory\n"); + + if (request_mem_region(ltq_rcu_resource.start, + resource_size(<q_rcu_resource), "rcu") < 0) + panic("Failed to request rcu memory\n"); + + /* remap rcu register range */ + ltq_rcu_membase = ioremap_nocache(ltq_rcu_resource.start, + resource_size(<q_rcu_resource)); + if (!ltq_rcu_membase) + panic("Failed to remap rcu memory\n"); + + _machine_restart = ltq_machine_restart; + _machine_halt = ltq_machine_halt; + pm_power_off = ltq_machine_power_off; + + return 0; +} + +arch_initcall(mips_reboot_setup); diff --git a/trunk/arch/mips/lantiq/xway/setup-ase.c b/trunk/arch/mips/lantiq/xway/setup-ase.c new file mode 100644 index 000000000000..f6f326798a39 --- /dev/null +++ b/trunk/arch/mips/lantiq/xway/setup-ase.c @@ -0,0 +1,19 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Copyright (C) 2011 John Crispin + */ + +#include + +#include "../prom.h" +#include "devices.h" + +void __init ltq_soc_setup(void) +{ + ltq_register_ase_asc(); + ltq_register_gpio(); + ltq_register_wdt(); +} diff --git a/trunk/arch/mips/lantiq/xway/setup-xway.c b/trunk/arch/mips/lantiq/xway/setup-xway.c new file mode 100644 index 000000000000..c292f643a858 --- /dev/null +++ b/trunk/arch/mips/lantiq/xway/setup-xway.c @@ -0,0 +1,20 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Copyright (C) 2011 John Crispin + */ + +#include + +#include "../prom.h" +#include "devices.h" + +void __init ltq_soc_setup(void) +{ + ltq_register_asc(0); + ltq_register_asc(1); + ltq_register_gpio(); + ltq_register_wdt(); +} diff --git a/trunk/arch/mips/lib/Makefile b/trunk/arch/mips/lib/Makefile index 2adead5a8a37..b2cad4fd5fc4 100644 --- a/trunk/arch/mips/lib/Makefile +++ b/trunk/arch/mips/lib/Makefile @@ -28,6 +28,7 @@ obj-$(CONFIG_CPU_TX39XX) += r3k_dump_tlb.o obj-$(CONFIG_CPU_TX49XX) += dump_tlb.o obj-$(CONFIG_CPU_VR41XX) += dump_tlb.o obj-$(CONFIG_CPU_CAVIUM_OCTEON) += dump_tlb.o +obj-$(CONFIG_CPU_XLR) += dump_tlb.o # libgcc-style stuff needed in the kernel obj-y += ashldi3.o ashrdi3.o cmpdi2.o lshrdi3.o ucmpdi2.o diff --git a/trunk/arch/mips/loongson/common/cs5536/cs5536_mfgpt.c b/trunk/arch/mips/loongson/common/cs5536/cs5536_mfgpt.c index 8c807c965199..0cb1b9760e34 100644 --- a/trunk/arch/mips/loongson/common/cs5536/cs5536_mfgpt.c +++ b/trunk/arch/mips/loongson/common/cs5536/cs5536_mfgpt.c @@ -201,8 +201,6 @@ static struct clocksource clocksource_mfgpt = { .rating = 120, /* Functional for real use, but not desired */ .read = mfgpt_read, .mask = CLOCKSOURCE_MASK(32), - .mult = 0, - .shift = 22, }; int __init init_mfgpt_clocksource(void) @@ -210,8 +208,7 @@ int __init init_mfgpt_clocksource(void) if (num_possible_cpus() > 1) /* MFGPT does not scale! */ return 0; - clocksource_mfgpt.mult = clocksource_hz2mult(MFGPT_TICK_RATE, 22); - return clocksource_register(&clocksource_mfgpt); + return clocksource_register_hz(&clocksource_mfgpt, MFGPT_TICK_RATE); } arch_initcall(init_mfgpt_clocksource); diff --git a/trunk/arch/mips/loongson/common/env.c b/trunk/arch/mips/loongson/common/env.c index 11b193f848f8..d93830ad6113 100644 --- a/trunk/arch/mips/loongson/common/env.c +++ b/trunk/arch/mips/loongson/common/env.c @@ -29,9 +29,10 @@ unsigned long memsize, highmemsize; #define parse_even_earlier(res, option, p) \ do { \ - int ret; \ + unsigned int tmp __maybe_unused; \ + \ if (strncmp(option, (char *)p, strlen(option)) == 0) \ - ret = strict_strtol((char *)p + strlen(option"="), 10, &res); \ + tmp = strict_strtol((char *)p + strlen(option"="), 10, &res); \ } while (0) void __init prom_init_env(void) diff --git a/trunk/arch/mips/mm/Makefile b/trunk/arch/mips/mm/Makefile index d679c772d082..4d8c1623eee2 100644 --- a/trunk/arch/mips/mm/Makefile +++ b/trunk/arch/mips/mm/Makefile @@ -3,7 +3,8 @@ # obj-y += cache.o dma-default.o extable.o fault.o \ - init.o tlbex.o tlbex-fault.o uasm.o page.o + init.o mmap.o tlbex.o tlbex-fault.o uasm.o \ + page.o obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o obj-$(CONFIG_64BIT) += pgtable-64.o @@ -29,6 +30,7 @@ obj-$(CONFIG_CPU_TX39XX) += c-tx39.o tlb-r3k.o obj-$(CONFIG_CPU_TX49XX) += c-r4k.o cex-gen.o tlb-r4k.o obj-$(CONFIG_CPU_VR41XX) += c-r4k.o cex-gen.o tlb-r4k.o obj-$(CONFIG_CPU_CAVIUM_OCTEON) += c-octeon.o cex-oct.o tlb-r4k.o +obj-$(CONFIG_CPU_XLR) += c-r4k.o tlb-r4k.o cex-gen.o obj-$(CONFIG_IP22_CPU_SCACHE) += sc-ip22.o obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o diff --git a/trunk/arch/mips/mm/c-r4k.c b/trunk/arch/mips/mm/c-r4k.c index b4923a75cb4b..d9bc5d3593b6 100644 --- a/trunk/arch/mips/mm/c-r4k.c +++ b/trunk/arch/mips/mm/c-r4k.c @@ -1006,6 +1006,7 @@ static void __cpuinit probe_pcache(void) case CPU_25KF: case CPU_SB1: case CPU_SB1A: + case CPU_XLR: c->dcache.flags |= MIPS_CACHE_PINDEX; break; @@ -1075,7 +1076,6 @@ static int __cpuinit probe_scache(void) unsigned long flags, addr, begin, end, pow2; unsigned int config = read_c0_config(); struct cpuinfo_mips *c = ¤t_cpu_data; - int tmp; if (config & CONF_SC) return 0; @@ -1108,7 +1108,6 @@ static int __cpuinit probe_scache(void) /* Now search for the wrap around point. */ pow2 = (128 * 1024); - tmp = 0; for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) { cache_op(Index_Load_Tag_SD, addr); __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */ diff --git a/trunk/arch/mips/mm/mmap.c b/trunk/arch/mips/mm/mmap.c new file mode 100644 index 000000000000..ae3c20a9556e --- /dev/null +++ b/trunk/arch/mips/mm/mmap.c @@ -0,0 +1,122 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2011 Wind River Systems, + * written by Ralf Baechle + */ +#include +#include +#include +#include +#include +#include + +unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ + +EXPORT_SYMBOL(shm_align_mask); + +#define COLOUR_ALIGN(addr,pgoff) \ + ((((addr) + shm_align_mask) & ~shm_align_mask) + \ + (((pgoff) << PAGE_SHIFT) & shm_align_mask)) + +unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags) +{ + struct vm_area_struct * vmm; + int do_color_align; + + if (len > TASK_SIZE) + return -ENOMEM; + + if (flags & MAP_FIXED) { + /* Even MAP_FIXED mappings must reside within TASK_SIZE. */ + if (TASK_SIZE - len < addr) + return -EINVAL; + + /* + * We do not accept a shared mapping if it would violate + * cache aliasing constraints. + */ + if ((flags & MAP_SHARED) && + ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) + return -EINVAL; + return addr; + } + + do_color_align = 0; + if (filp || (flags & MAP_SHARED)) + do_color_align = 1; + if (addr) { + if (do_color_align) + addr = COLOUR_ALIGN(addr, pgoff); + else + addr = PAGE_ALIGN(addr); + vmm = find_vma(current->mm, addr); + if (TASK_SIZE - len >= addr && + (!vmm || addr + len <= vmm->vm_start)) + return addr; + } + addr = current->mm->mmap_base; + if (do_color_align) + addr = COLOUR_ALIGN(addr, pgoff); + else + addr = PAGE_ALIGN(addr); + + for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) { + /* At this point: (!vmm || addr < vmm->vm_end). */ + if (TASK_SIZE - len < addr) + return -ENOMEM; + if (!vmm || addr + len <= vmm->vm_start) + return addr; + addr = vmm->vm_end; + if (do_color_align) + addr = COLOUR_ALIGN(addr, pgoff); + } +} + +void arch_pick_mmap_layout(struct mm_struct *mm) +{ + unsigned long random_factor = 0UL; + + if (current->flags & PF_RANDOMIZE) { + random_factor = get_random_int(); + random_factor = random_factor << PAGE_SHIFT; + if (TASK_IS_32BIT_ADDR) + random_factor &= 0xfffffful; + else + random_factor &= 0xffffffful; + } + + mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; + mm->get_unmapped_area = arch_get_unmapped_area; + mm->unmap_area = arch_unmap_area; +} + +static inline unsigned long brk_rnd(void) +{ + unsigned long rnd = get_random_int(); + + rnd = rnd << PAGE_SHIFT; + /* 8MB for 32bit, 256MB for 64bit */ + if (TASK_IS_32BIT_ADDR) + rnd = rnd & 0x7ffffful; + else + rnd = rnd & 0xffffffful; + + return rnd; +} + +unsigned long arch_randomize_brk(struct mm_struct *mm) +{ + unsigned long base = mm->brk; + unsigned long ret; + + ret = PAGE_ALIGN(base + brk_rnd()); + + if (ret < mm->brk) + return mm->brk; + + return ret; +} diff --git a/trunk/arch/mips/mm/tlbex.c b/trunk/arch/mips/mm/tlbex.c index 5ef294fbb6e7..424ed4b92e6d 100644 --- a/trunk/arch/mips/mm/tlbex.c +++ b/trunk/arch/mips/mm/tlbex.c @@ -404,6 +404,7 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l, case CPU_5KC: case CPU_TX49XX: case CPU_PR4450: + case CPU_XLR: uasm_i_nop(p); tlbw(p); break; @@ -1151,8 +1152,8 @@ static void __cpuinit build_r4000_tlb_refill_handler(void) struct uasm_reloc *r = relocs; u32 *f; unsigned int final_len; - struct mips_huge_tlb_info htlb_info; - enum vmalloc64_mode vmalloc_mode; + struct mips_huge_tlb_info htlb_info __maybe_unused; + enum vmalloc64_mode vmalloc_mode __maybe_unused; memset(tlb_handler, 0, sizeof(tlb_handler)); memset(labels, 0, sizeof(labels)); diff --git a/trunk/arch/mips/mti-malta/malta-init.c b/trunk/arch/mips/mti-malta/malta-init.c index 414f0c99b196..31180c321a1a 100644 --- a/trunk/arch/mips/mti-malta/malta-init.c +++ b/trunk/arch/mips/mti-malta/malta-init.c @@ -193,8 +193,6 @@ extern struct plat_smp_ops msmtc_smp_ops; void __init prom_init(void) { - int result; - prom_argc = fw_arg0; _prom_argv = (int *) fw_arg1; _prom_envp = (int *) fw_arg2; @@ -360,20 +358,14 @@ void __init prom_init(void) #ifdef CONFIG_SERIAL_8250_CONSOLE console_config(); #endif - /* Early detection of CMP support */ - result = gcmp_probe(GCMP_BASE_ADDR, GCMP_ADDRSPACE_SZ); - #ifdef CONFIG_MIPS_CMP - if (result) + /* Early detection of CMP support */ + if (gcmp_probe(GCMP_BASE_ADDR, GCMP_ADDRSPACE_SZ)) register_smp_ops(&cmp_smp_ops); + else #endif #ifdef CONFIG_MIPS_MT_SMP -#ifdef CONFIG_MIPS_CMP - if (!result) register_smp_ops(&vsmp_smp_ops); -#else - register_smp_ops(&vsmp_smp_ops); -#endif #endif #ifdef CONFIG_MIPS_MT_SMTC register_smp_ops(&msmtc_smp_ops); diff --git a/trunk/arch/mips/mti-malta/malta-int.c b/trunk/arch/mips/mti-malta/malta-int.c index 9027061f0ead..1d36c511a7a5 100644 --- a/trunk/arch/mips/mti-malta/malta-int.c +++ b/trunk/arch/mips/mti-malta/malta-int.c @@ -56,7 +56,6 @@ static DEFINE_RAW_SPINLOCK(mips_irq_lock); static inline int mips_pcibios_iack(void) { int irq; - u32 dummy; /* * Determine highest priority pending interrupt by performing @@ -83,7 +82,7 @@ static inline int mips_pcibios_iack(void) BONITO_PCIMAP_CFG = 0x20000; /* Flush Bonito register block */ - dummy = BONITO_PCIMAP_CFG; + (void) BONITO_PCIMAP_CFG; iob(); /* sync */ irq = __raw_readl((u32 *)_pcictrl_bonito_pcicfg); @@ -309,6 +308,8 @@ static void ipi_call_dispatch(void) static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) { + scheduler_ipi(); + return IRQ_HANDLED; } diff --git a/trunk/arch/mips/netlogic/Kconfig b/trunk/arch/mips/netlogic/Kconfig new file mode 100644 index 000000000000..a5ca743613f2 --- /dev/null +++ b/trunk/arch/mips/netlogic/Kconfig @@ -0,0 +1,5 @@ +config NLM_COMMON + bool + +config NLM_XLR + bool diff --git a/trunk/arch/mips/netlogic/xlr/Makefile b/trunk/arch/mips/netlogic/xlr/Makefile new file mode 100644 index 000000000000..9bd3f731f62e --- /dev/null +++ b/trunk/arch/mips/netlogic/xlr/Makefile @@ -0,0 +1,5 @@ +obj-y += setup.o platform.o irq.o setup.o time.o +obj-$(CONFIG_SMP) += smp.o smpboot.o +obj-$(CONFIG_EARLY_PRINTK) += xlr_console.o + +EXTRA_CFLAGS += -Werror diff --git a/trunk/arch/mips/netlogic/xlr/irq.c b/trunk/arch/mips/netlogic/xlr/irq.c new file mode 100644 index 000000000000..1446d58e364c --- /dev/null +++ b/trunk/arch/mips/netlogic/xlr/irq.c @@ -0,0 +1,300 @@ +/* + * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights + * reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the NetLogic + * license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +#include +#include + +static u64 nlm_irq_mask; +static DEFINE_SPINLOCK(nlm_pic_lock); + +static void xlr_pic_enable(struct irq_data *d) +{ + nlm_reg_t *mmio = netlogic_io_mmio(NETLOGIC_IO_PIC_OFFSET); + unsigned long flags; + nlm_reg_t reg; + int irq = d->irq; + + WARN(!PIC_IRQ_IS_IRT(irq), "Bad irq %d", irq); + + spin_lock_irqsave(&nlm_pic_lock, flags); + reg = netlogic_read_reg(mmio, PIC_IRT_1_BASE + irq - PIC_IRQ_BASE); + netlogic_write_reg(mmio, PIC_IRT_1_BASE + irq - PIC_IRQ_BASE, + reg | (1 << 6) | (1 << 30) | (1 << 31)); + spin_unlock_irqrestore(&nlm_pic_lock, flags); +} + +static void xlr_pic_mask(struct irq_data *d) +{ + nlm_reg_t *mmio = netlogic_io_mmio(NETLOGIC_IO_PIC_OFFSET); + unsigned long flags; + nlm_reg_t reg; + int irq = d->irq; + + WARN(!PIC_IRQ_IS_IRT(irq), "Bad irq %d", irq); + + spin_lock_irqsave(&nlm_pic_lock, flags); + reg = netlogic_read_reg(mmio, PIC_IRT_1_BASE + irq - PIC_IRQ_BASE); + netlogic_write_reg(mmio, PIC_IRT_1_BASE + irq - PIC_IRQ_BASE, + reg | (1 << 6) | (1 << 30) | (0 << 31)); + spin_unlock_irqrestore(&nlm_pic_lock, flags); +} + +#ifdef CONFIG_PCI +/* Extra ACK needed for XLR on chip PCI controller */ +static void xlr_pci_ack(struct irq_data *d) +{ + nlm_reg_t *pci_mmio = netlogic_io_mmio(NETLOGIC_IO_PCIX_OFFSET); + + netlogic_read_reg(pci_mmio, (0x140 >> 2)); +} + +/* Extra ACK needed for XLS on chip PCIe controller */ +static void xls_pcie_ack(struct irq_data *d) +{ + nlm_reg_t *pcie_mmio_le = netlogic_io_mmio(NETLOGIC_IO_PCIE_1_OFFSET); + + switch (d->irq) { + case PIC_PCIE_LINK0_IRQ: + netlogic_write_reg(pcie_mmio_le, (0x90 >> 2), 0xffffffff); + break; + case PIC_PCIE_LINK1_IRQ: + netlogic_write_reg(pcie_mmio_le, (0x94 >> 2), 0xffffffff); + break; + case PIC_PCIE_LINK2_IRQ: + netlogic_write_reg(pcie_mmio_le, (0x190 >> 2), 0xffffffff); + break; + case PIC_PCIE_LINK3_IRQ: + netlogic_write_reg(pcie_mmio_le, (0x194 >> 2), 0xffffffff); + break; + } +} + +/* For XLS B silicon, the 3,4 PCI interrupts are different */ +static void xls_pcie_ack_b(struct irq_data *d) +{ + nlm_reg_t *pcie_mmio_le = netlogic_io_mmio(NETLOGIC_IO_PCIE_1_OFFSET); + + switch (d->irq) { + case PIC_PCIE_LINK0_IRQ: + netlogic_write_reg(pcie_mmio_le, (0x90 >> 2), 0xffffffff); + break; + case PIC_PCIE_LINK1_IRQ: + netlogic_write_reg(pcie_mmio_le, (0x94 >> 2), 0xffffffff); + break; + case PIC_PCIE_XLSB0_LINK2_IRQ: + netlogic_write_reg(pcie_mmio_le, (0x190 >> 2), 0xffffffff); + break; + case PIC_PCIE_XLSB0_LINK3_IRQ: + netlogic_write_reg(pcie_mmio_le, (0x194 >> 2), 0xffffffff); + break; + } +} +#endif + +static void xlr_pic_ack(struct irq_data *d) +{ + unsigned long flags; + nlm_reg_t *mmio; + int irq = d->irq; + void *hd = irq_data_get_irq_handler_data(d); + + WARN(!PIC_IRQ_IS_IRT(irq), "Bad irq %d", irq); + + if (hd) { + void (*extra_ack)(void *) = hd; + extra_ack(d); + } + mmio = netlogic_io_mmio(NETLOGIC_IO_PIC_OFFSET); + spin_lock_irqsave(&nlm_pic_lock, flags); + netlogic_write_reg(mmio, PIC_INT_ACK, (1 << (irq - PIC_IRQ_BASE))); + spin_unlock_irqrestore(&nlm_pic_lock, flags); +} + +/* + * This chip definition handles interrupts routed thru the XLR + * hardware PIC, currently IRQs 8-39 are mapped to hardware intr + * 0-31 wired the XLR PIC + */ +static struct irq_chip xlr_pic = { + .name = "XLR-PIC", + .irq_enable = xlr_pic_enable, + .irq_mask = xlr_pic_mask, + .irq_ack = xlr_pic_ack, +}; + +static void rsvd_irq_handler(struct irq_data *d) +{ + WARN(d->irq >= PIC_IRQ_BASE, "Bad irq %d", d->irq); +} + +/* + * Chip definition for CPU originated interrupts(timer, msg) and + * IPIs + */ +struct irq_chip nlm_cpu_intr = { + .name = "XLR-CPU-INTR", + .irq_enable = rsvd_irq_handler, + .irq_mask = rsvd_irq_handler, + .irq_ack = rsvd_irq_handler, +}; + +void __init init_xlr_irqs(void) +{ + nlm_reg_t *mmio = netlogic_io_mmio(NETLOGIC_IO_PIC_OFFSET); + uint32_t thread_mask = 1; + int level, i; + + pr_info("Interrupt thread mask [%x]\n", thread_mask); + for (i = 0; i < PIC_NUM_IRTS; i++) { + level = PIC_IRQ_IS_EDGE_TRIGGERED(i); + + /* Bind all PIC irqs to boot cpu */ + netlogic_write_reg(mmio, PIC_IRT_0_BASE + i, thread_mask); + + /* + * Use local scheduling and high polarity for all IRTs + * Invalidate all IRTs, by default + */ + netlogic_write_reg(mmio, PIC_IRT_1_BASE + i, + (level << 30) | (1 << 6) | (PIC_IRQ_BASE + i)); + } + + /* Make all IRQs as level triggered by default */ + for (i = 0; i < NR_IRQS; i++) { + if (PIC_IRQ_IS_IRT(i)) + irq_set_chip_and_handler(i, &xlr_pic, handle_level_irq); + else + irq_set_chip_and_handler(i, &nlm_cpu_intr, + handle_level_irq); + } +#ifdef CONFIG_SMP + irq_set_chip_and_handler(IRQ_IPI_SMP_FUNCTION, &nlm_cpu_intr, + nlm_smp_function_ipi_handler); + irq_set_chip_and_handler(IRQ_IPI_SMP_RESCHEDULE, &nlm_cpu_intr, + nlm_smp_resched_ipi_handler); + nlm_irq_mask |= + ((1ULL << IRQ_IPI_SMP_FUNCTION) | (1ULL << IRQ_IPI_SMP_RESCHEDULE)); +#endif + +#ifdef CONFIG_PCI + /* + * For PCI interrupts, we need to ack the PIC controller too, overload + * irq handler data to do this + */ + if (nlm_chip_is_xls()) { + if (nlm_chip_is_xls_b()) { + irq_set_handler_data(PIC_PCIE_LINK0_IRQ, + xls_pcie_ack_b); + irq_set_handler_data(PIC_PCIE_LINK1_IRQ, + xls_pcie_ack_b); + irq_set_handler_data(PIC_PCIE_XLSB0_LINK2_IRQ, + xls_pcie_ack_b); + irq_set_handler_data(PIC_PCIE_XLSB0_LINK3_IRQ, + xls_pcie_ack_b); + } else { + irq_set_handler_data(PIC_PCIE_LINK0_IRQ, xls_pcie_ack); + irq_set_handler_data(PIC_PCIE_LINK1_IRQ, xls_pcie_ack); + irq_set_handler_data(PIC_PCIE_LINK2_IRQ, xls_pcie_ack); + irq_set_handler_data(PIC_PCIE_LINK3_IRQ, xls_pcie_ack); + } + } else { + /* XLR PCI controller ACK */ + irq_set_handler_data(PIC_PCIE_XLSB0_LINK3_IRQ, xlr_pci_ack); + } +#endif + /* unmask all PIC related interrupts. If no handler is installed by the + * drivers, it'll just ack the interrupt and return + */ + for (i = PIC_IRT_FIRST_IRQ; i <= PIC_IRT_LAST_IRQ; i++) + nlm_irq_mask |= (1ULL << i); + + nlm_irq_mask |= (1ULL << IRQ_TIMER); +} + +void __init arch_init_irq(void) +{ + /* Initialize the irq descriptors */ + init_xlr_irqs(); + write_c0_eimr(nlm_irq_mask); +} + +void __cpuinit nlm_smp_irq_init(void) +{ + /* set interrupt mask for non-zero cpus */ + write_c0_eimr(nlm_irq_mask); +} + +asmlinkage void plat_irq_dispatch(void) +{ + uint64_t eirr; + int i; + + eirr = read_c0_eirr() & read_c0_eimr(); + if (!eirr) + return; + + /* no need of EIRR here, writing compare clears interrupt */ + if (eirr & (1 << IRQ_TIMER)) { + do_IRQ(IRQ_TIMER); + return; + } + + /* use dcltz: optimize below code */ + for (i = 63; i != -1; i--) { + if (eirr & (1ULL << i)) + break; + } + if (i == -1) { + pr_err("no interrupt !!\n"); + return; + } + + /* Ack eirr */ + write_c0_eirr(1ULL << i); + + do_IRQ(i); +} diff --git a/trunk/arch/mips/netlogic/xlr/platform.c b/trunk/arch/mips/netlogic/xlr/platform.c new file mode 100644 index 000000000000..609ec2534642 --- /dev/null +++ b/trunk/arch/mips/netlogic/xlr/platform.c @@ -0,0 +1,98 @@ +/* + * Copyright 2011, Netlogic Microsystems. + * Copyright 2004, Matt Porter + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +unsigned int nlm_xlr_uart_in(struct uart_port *p, int offset) +{ + nlm_reg_t *mmio; + unsigned int value; + + /* XLR uart does not need any mapping of regs */ + mmio = (nlm_reg_t *)(p->membase + (offset << p->regshift)); + value = netlogic_read_reg(mmio, 0); + + /* See XLR/XLS errata */ + if (offset == UART_MSR) + value ^= 0xF0; + else if (offset == UART_MCR) + value ^= 0x3; + + return value; +} + +void nlm_xlr_uart_out(struct uart_port *p, int offset, int value) +{ + nlm_reg_t *mmio; + + /* XLR uart does not need any mapping of regs */ + mmio = (nlm_reg_t *)(p->membase + (offset << p->regshift)); + + /* See XLR/XLS errata */ + if (offset == UART_MSR) + value ^= 0xF0; + else if (offset == UART_MCR) + value ^= 0x3; + + netlogic_write_reg(mmio, 0, value); +} + +#define PORT(_irq) \ + { \ + .irq = _irq, \ + .regshift = 2, \ + .iotype = UPIO_MEM32, \ + .flags = (UPF_SKIP_TEST | \ + UPF_FIXED_TYPE | UPF_BOOT_AUTOCONF),\ + .uartclk = PIC_CLKS_PER_SEC, \ + .type = PORT_16550A, \ + .serial_in = nlm_xlr_uart_in, \ + .serial_out = nlm_xlr_uart_out, \ + } + +static struct plat_serial8250_port xlr_uart_data[] = { + PORT(PIC_UART_0_IRQ), + PORT(PIC_UART_1_IRQ), + {}, +}; + +static struct platform_device uart_device = { + .name = "serial8250", + .id = PLAT8250_DEV_PLATFORM, + .dev = { + .platform_data = xlr_uart_data, + }, +}; + +static int __init nlm_uart_init(void) +{ + nlm_reg_t *mmio; + + mmio = netlogic_io_mmio(NETLOGIC_IO_UART_0_OFFSET); + xlr_uart_data[0].membase = (void __iomem *)mmio; + xlr_uart_data[0].mapbase = CPHYSADDR((unsigned long)mmio); + + mmio = netlogic_io_mmio(NETLOGIC_IO_UART_1_OFFSET); + xlr_uart_data[1].membase = (void __iomem *)mmio; + xlr_uart_data[1].mapbase = CPHYSADDR((unsigned long)mmio); + + return platform_device_register(&uart_device); +} + +arch_initcall(nlm_uart_init); diff --git a/trunk/arch/mips/netlogic/xlr/setup.c b/trunk/arch/mips/netlogic/xlr/setup.c new file mode 100644 index 000000000000..482802569e74 --- /dev/null +++ b/trunk/arch/mips/netlogic/xlr/setup.c @@ -0,0 +1,188 @@ +/* + * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights + * reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the NetLogic + * license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include + +unsigned long netlogic_io_base = (unsigned long)(DEFAULT_NETLOGIC_IO_BASE); +unsigned long nlm_common_ebase = 0x0; +struct psb_info nlm_prom_info; + +static void nlm_early_serial_setup(void) +{ + struct uart_port s; + nlm_reg_t *uart_base; + + uart_base = netlogic_io_mmio(NETLOGIC_IO_UART_0_OFFSET); + memset(&s, 0, sizeof(s)); + s.flags = ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST; + s.iotype = UPIO_MEM32; + s.regshift = 2; + s.irq = PIC_UART_0_IRQ; + s.uartclk = PIC_CLKS_PER_SEC; + s.serial_in = nlm_xlr_uart_in; + s.serial_out = nlm_xlr_uart_out; + s.mapbase = (unsigned long)uart_base; + s.membase = (unsigned char __iomem *)uart_base; + early_serial_setup(&s); +} + +static void nlm_linux_exit(void) +{ + nlm_reg_t *mmio; + + mmio = netlogic_io_mmio(NETLOGIC_IO_GPIO_OFFSET); + /* trigger a chip reset by writing 1 to GPIO_SWRESET_REG */ + netlogic_write_reg(mmio, NETLOGIC_GPIO_SWRESET_REG, 1); + for ( ; ; ) + cpu_wait(); +} + +void __init plat_mem_setup(void) +{ + panic_timeout = 5; + _machine_restart = (void (*)(char *))nlm_linux_exit; + _machine_halt = nlm_linux_exit; + pm_power_off = nlm_linux_exit; +} + +const char *get_system_type(void) +{ + return "Netlogic XLR/XLS Series"; +} + +void __init prom_free_prom_memory(void) +{ + /* Nothing yet */ +} + +static void build_arcs_cmdline(int *argv) +{ + int i, remain, len; + char *arg; + + remain = sizeof(arcs_cmdline) - 1; + arcs_cmdline[0] = '\0'; + for (i = 0; argv[i] != 0; i++) { + arg = (char *)(long)argv[i]; + len = strlen(arg); + if (len + 1 > remain) + break; + strcat(arcs_cmdline, arg); + strcat(arcs_cmdline, " "); + remain -= len + 1; + } + + /* Add the default options here */ + if ((strstr(arcs_cmdline, "console=")) == NULL) { + arg = "console=ttyS0,38400 "; + len = strlen(arg); + if (len > remain) + goto fail; + strcat(arcs_cmdline, arg); + remain -= len; + } +#ifdef CONFIG_BLK_DEV_INITRD + if ((strstr(arcs_cmdline, "rdinit=")) == NULL) { + arg = "rdinit=/sbin/init "; + len = strlen(arg); + if (len > remain) + goto fail; + strcat(arcs_cmdline, arg); + remain -= len; + } +#endif + return; +fail: + panic("Cannot add %s, command line too big!", arg); +} + +static void prom_add_memory(void) +{ + struct nlm_boot_mem_map *bootm; + u64 start, size; + u64 pref_backup = 512; /* avoid pref walking beyond end */ + int i; + + bootm = (void *)(long)nlm_prom_info.psb_mem_map; + for (i = 0; i < bootm->nr_map; i++) { + if (bootm->map[i].type != BOOT_MEM_RAM) + continue; + start = bootm->map[i].addr; + size = bootm->map[i].size; + + /* Work around for using bootloader mem */ + if (i == 0 && start == 0 && size == 0x0c000000) + size = 0x0ff00000; + + add_memory_region(start, size - pref_backup, BOOT_MEM_RAM); + } +} + +void __init prom_init(void) +{ + int *argv, *envp; /* passed as 32 bit ptrs */ + struct psb_info *prom_infop; + + /* truncate to 32 bit and sign extend all args */ + argv = (int *)(long)(int)fw_arg1; + envp = (int *)(long)(int)fw_arg2; + prom_infop = (struct psb_info *)(long)(int)fw_arg3; + + nlm_prom_info = *prom_infop; + + nlm_early_serial_setup(); + build_arcs_cmdline(argv); + nlm_common_ebase = read_c0_ebase() & (~((1 << 12) - 1)); + prom_add_memory(); + +#ifdef CONFIG_SMP + nlm_wakeup_secondary_cpus(nlm_prom_info.online_cpu_map); + register_smp_ops(&nlm_smp_ops); +#endif +} diff --git a/trunk/arch/mips/netlogic/xlr/smp.c b/trunk/arch/mips/netlogic/xlr/smp.c new file mode 100644 index 000000000000..b495a7f1433b --- /dev/null +++ b/trunk/arch/mips/netlogic/xlr/smp.c @@ -0,0 +1,225 @@ +/* + * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights + * reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the NetLogic + * license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include + +#include + +#include +#include + +#include +#include +#include + +void core_send_ipi(int logical_cpu, unsigned int action) +{ + int cpu = cpu_logical_map(logical_cpu); + u32 tid = cpu & 0x3; + u32 pid = (cpu >> 2) & 0x07; + u32 ipi = (tid << 16) | (pid << 20); + + if (action & SMP_CALL_FUNCTION) + ipi |= IRQ_IPI_SMP_FUNCTION; + else if (action & SMP_RESCHEDULE_YOURSELF) + ipi |= IRQ_IPI_SMP_RESCHEDULE; + else + return; + + pic_send_ipi(ipi); +} + +void nlm_send_ipi_single(int cpu, unsigned int action) +{ + core_send_ipi(cpu, action); +} + +void nlm_send_ipi_mask(const struct cpumask *mask, unsigned int action) +{ + int cpu; + + for_each_cpu(cpu, mask) { + core_send_ipi(cpu, action); + } +} + +/* IRQ_IPI_SMP_FUNCTION Handler */ +void nlm_smp_function_ipi_handler(unsigned int irq, struct irq_desc *desc) +{ + smp_call_function_interrupt(); +} + +/* IRQ_IPI_SMP_RESCHEDULE handler */ +void nlm_smp_resched_ipi_handler(unsigned int irq, struct irq_desc *desc) +{ + set_need_resched(); +} + +void nlm_common_ipi_handler(int irq, struct pt_regs *regs) +{ + if (irq == IRQ_IPI_SMP_FUNCTION) { + smp_call_function_interrupt(); + } else { + /* Announce that we are for reschduling */ + set_need_resched(); + } +} + +/* + * Called before going into mips code, early cpu init + */ +void nlm_early_init_secondary(void) +{ + write_c0_ebase((uint32_t)nlm_common_ebase); + /* TLB partition here later */ +} + +/* + * Code to run on secondary just after probing the CPU + */ +static void __cpuinit nlm_init_secondary(void) +{ + nlm_smp_irq_init(); +} + +void nlm_smp_finish(void) +{ +#ifdef notyet + nlm_common_msgring_cpu_init(); +#endif +} + +void nlm_cpus_done(void) +{ +} + +/* + * Boot all other cpus in the system, initialize them, and bring them into + * the boot function + */ +int nlm_cpu_unblock[NR_CPUS]; +int nlm_cpu_ready[NR_CPUS]; +unsigned long nlm_next_gp; +unsigned long nlm_next_sp; +cpumask_t phys_cpu_present_map; + +void nlm_boot_secondary(int logical_cpu, struct task_struct *idle) +{ + unsigned long gp = (unsigned long)task_thread_info(idle); + unsigned long sp = (unsigned long)__KSTK_TOS(idle); + int cpu = cpu_logical_map(logical_cpu); + + nlm_next_sp = sp; + nlm_next_gp = gp; + + /* barrier */ + __sync(); + nlm_cpu_unblock[cpu] = 1; +} + +void __init nlm_smp_setup(void) +{ + unsigned int boot_cpu; + int num_cpus, i; + + boot_cpu = hard_smp_processor_id(); + cpus_clear(phys_cpu_present_map); + + cpu_set(boot_cpu, phys_cpu_present_map); + __cpu_number_map[boot_cpu] = 0; + __cpu_logical_map[0] = boot_cpu; + cpu_set(0, cpu_possible_map); + + num_cpus = 1; + for (i = 0; i < NR_CPUS; i++) { + if (nlm_cpu_ready[i]) { + cpu_set(i, phys_cpu_present_map); + __cpu_number_map[i] = num_cpus; + __cpu_logical_map[num_cpus] = i; + cpu_set(num_cpus, cpu_possible_map); + ++num_cpus; + } + } + + pr_info("Phys CPU present map: %lx, possible map %lx\n", + (unsigned long)phys_cpu_present_map.bits[0], + (unsigned long)cpu_possible_map.bits[0]); + + pr_info("Detected %i Slave CPU(s)\n", num_cpus); +} + +void nlm_prepare_cpus(unsigned int max_cpus) +{ +} + +struct plat_smp_ops nlm_smp_ops = { + .send_ipi_single = nlm_send_ipi_single, + .send_ipi_mask = nlm_send_ipi_mask, + .init_secondary = nlm_init_secondary, + .smp_finish = nlm_smp_finish, + .cpus_done = nlm_cpus_done, + .boot_secondary = nlm_boot_secondary, + .smp_setup = nlm_smp_setup, + .prepare_cpus = nlm_prepare_cpus, +}; + +unsigned long secondary_entry_point; + +int nlm_wakeup_secondary_cpus(u32 wakeup_mask) +{ + unsigned int tid, pid, ipi, i, boot_cpu; + void *reset_vec; + + secondary_entry_point = (unsigned long)prom_pre_boot_secondary_cpus; + reset_vec = (void *)CKSEG1ADDR(0x1fc00000); + memcpy(reset_vec, nlm_boot_smp_nmi, 0x80); + boot_cpu = hard_smp_processor_id(); + + for (i = 0; i < NR_CPUS; i++) { + if (i == boot_cpu) + continue; + if (wakeup_mask & (1u << i)) { + tid = i & 0x3; + pid = (i >> 2) & 0x7; + ipi = (tid << 16) | (pid << 20) | (1 << 8); + pic_send_ipi(ipi); + } + } + + return 0; +} diff --git a/trunk/arch/mips/netlogic/xlr/smpboot.S b/trunk/arch/mips/netlogic/xlr/smpboot.S new file mode 100644 index 000000000000..b8e074402c99 --- /dev/null +++ b/trunk/arch/mips/netlogic/xlr/smpboot.S @@ -0,0 +1,94 @@ +/* + * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights + * reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the NetLogic + * license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include + + +/* Don't jump to linux function from Bootloader stack. Change it + * here. Kernel might allocate bootloader memory before all the CPUs are + * brought up (eg: Inode cache region) and we better don't overwrite this + * memory + */ +NESTED(prom_pre_boot_secondary_cpus, 16, sp) + .set mips64 + mfc0 t0, $15, 1 # read ebase + andi t0, 0x1f # t0 has the processor_id() + sll t0, 2 # offset in cpu array + + PTR_LA t1, nlm_cpu_ready # mark CPU ready + PTR_ADDU t1, t0 + li t2, 1 + sw t2, 0(t1) + + PTR_LA t1, nlm_cpu_unblock + PTR_ADDU t1, t0 +1: lw t2, 0(t1) # wait till unblocked + beqz t2, 1b + nop + + PTR_LA t1, nlm_next_sp + PTR_L sp, 0(t1) + PTR_LA t1, nlm_next_gp + PTR_L gp, 0(t1) + + PTR_LA t0, nlm_early_init_secondary + jalr t0 + nop + + PTR_LA t0, smp_bootstrap + jr t0 + nop +END(prom_pre_boot_secondary_cpus) + +NESTED(nlm_boot_smp_nmi, 0, sp) + .set push + .set noat + .set mips64 + .set noreorder + + /* Clear the NMI and BEV bits */ + MFC0 k0, CP0_STATUS + li k1, 0xffb7ffff + and k0, k0, k1 + MTC0 k0, CP0_STATUS + + PTR_LA k1, secondary_entry_point + PTR_L k0, 0(k1) + jr k0 + nop + .set pop +END(nlm_boot_smp_nmi) diff --git a/trunk/arch/mips/netlogic/xlr/time.c b/trunk/arch/mips/netlogic/xlr/time.c new file mode 100644 index 000000000000..0d81b262593c --- /dev/null +++ b/trunk/arch/mips/netlogic/xlr/time.c @@ -0,0 +1,51 @@ +/* + * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights + * reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the NetLogic + * license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include + +#include +#include +#include + +unsigned int __cpuinit get_c0_compare_int(void) +{ + return IRQ_TIMER; +} + +void __init plat_time_init(void) +{ + mips_hpt_frequency = nlm_prom_info.cpu_frequency; + pr_info("MIPS counter frequency [%ld]\n", + (unsigned long)mips_hpt_frequency); +} diff --git a/trunk/arch/mips/netlogic/xlr/xlr_console.c b/trunk/arch/mips/netlogic/xlr/xlr_console.c new file mode 100644 index 000000000000..759df0692201 --- /dev/null +++ b/trunk/arch/mips/netlogic/xlr/xlr_console.c @@ -0,0 +1,46 @@ +/* + * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights + * reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the NetLogic + * license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include + +void prom_putchar(char c) +{ + nlm_reg_t *mmio; + + mmio = netlogic_io_mmio(NETLOGIC_IO_UART_0_OFFSET); + while (netlogic_read_reg(mmio, 0x5) == 0) + ; + netlogic_write_reg(mmio, 0x0, c); +} diff --git a/trunk/arch/mips/pci/Makefile b/trunk/arch/mips/pci/Makefile index c9209ca6c8e7..4df879937446 100644 --- a/trunk/arch/mips/pci/Makefile +++ b/trunk/arch/mips/pci/Makefile @@ -41,6 +41,7 @@ obj-$(CONFIG_SIBYTE_SB1250) += fixup-sb1250.o pci-sb1250.o obj-$(CONFIG_SIBYTE_BCM112X) += fixup-sb1250.o pci-sb1250.o obj-$(CONFIG_SIBYTE_BCM1x80) += pci-bcm1480.o pci-bcm1480ht.o obj-$(CONFIG_SNI_RM) += fixup-sni.o ops-sni.o +obj-$(CONFIG_SOC_XWAY) += pci-lantiq.o ops-lantiq.o obj-$(CONFIG_TANBAC_TB0219) += fixup-tb0219.o obj-$(CONFIG_TANBAC_TB0226) += fixup-tb0226.o obj-$(CONFIG_TANBAC_TB0287) += fixup-tb0287.o @@ -55,6 +56,7 @@ obj-$(CONFIG_ZAO_CAPCELLA) += fixup-capcella.o obj-$(CONFIG_WR_PPMC) += fixup-wrppmc.o obj-$(CONFIG_MIKROTIK_RB532) += pci-rc32434.o ops-rc32434.o fixup-rc32434.o obj-$(CONFIG_CPU_CAVIUM_OCTEON) += pci-octeon.o pcie-octeon.o +obj-$(CONFIG_NLM_XLR) += pci-xlr.o ifdef CONFIG_PCI_MSI obj-$(CONFIG_CPU_CAVIUM_OCTEON) += msi-octeon.o diff --git a/trunk/arch/mips/pci/ops-lantiq.c b/trunk/arch/mips/pci/ops-lantiq.c new file mode 100644 index 000000000000..1f2afb55cc71 --- /dev/null +++ b/trunk/arch/mips/pci/ops-lantiq.c @@ -0,0 +1,116 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Copyright (C) 2010 John Crispin + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "pci-lantiq.h" + +#define LTQ_PCI_CFG_BUSNUM_SHF 16 +#define LTQ_PCI_CFG_DEVNUM_SHF 11 +#define LTQ_PCI_CFG_FUNNUM_SHF 8 + +#define PCI_ACCESS_READ 0 +#define PCI_ACCESS_WRITE 1 + +static int ltq_pci_config_access(unsigned char access_type, struct pci_bus *bus, + unsigned int devfn, unsigned int where, u32 *data) +{ + unsigned long cfg_base; + unsigned long flags; + u32 temp; + + /* we support slot from 0 to 15 dev_fn & 0x68 (AD29) is the + SoC itself */ + if ((bus->number != 0) || ((devfn & 0xf8) > 0x78) + || ((devfn & 0xf8) == 0) || ((devfn & 0xf8) == 0x68)) + return 1; + + spin_lock_irqsave(&ebu_lock, flags); + + cfg_base = (unsigned long) ltq_pci_mapped_cfg; + cfg_base |= (bus->number << LTQ_PCI_CFG_BUSNUM_SHF) | (devfn << + LTQ_PCI_CFG_FUNNUM_SHF) | (where & ~0x3); + + /* Perform access */ + if (access_type == PCI_ACCESS_WRITE) { + ltq_w32(swab32(*data), ((u32 *)cfg_base)); + } else { + *data = ltq_r32(((u32 *)(cfg_base))); + *data = swab32(*data); + } + wmb(); + + /* clean possible Master abort */ + cfg_base = (unsigned long) ltq_pci_mapped_cfg; + cfg_base |= (0x0 << LTQ_PCI_CFG_FUNNUM_SHF) + 4; + temp = ltq_r32(((u32 *)(cfg_base))); + temp = swab32(temp); + cfg_base = (unsigned long) ltq_pci_mapped_cfg; + cfg_base |= (0x68 << LTQ_PCI_CFG_FUNNUM_SHF) + 4; + ltq_w32(temp, ((u32 *)cfg_base)); + + spin_unlock_irqrestore(&ebu_lock, flags); + + if (((*data) == 0xffffffff) && (access_type == PCI_ACCESS_READ)) + return 1; + + return 0; +} + +int ltq_pci_read_config_dword(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + u32 data = 0; + + if (ltq_pci_config_access(PCI_ACCESS_READ, bus, devfn, where, &data)) + return PCIBIOS_DEVICE_NOT_FOUND; + + if (size == 1) + *val = (data >> ((where & 3) << 3)) & 0xff; + else if (size == 2) + *val = (data >> ((where & 3) << 3)) & 0xffff; + else + *val = data; + + return PCIBIOS_SUCCESSFUL; +} + +int ltq_pci_write_config_dword(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + u32 data = 0; + + if (size == 4) { + data = val; + } else { + if (ltq_pci_config_access(PCI_ACCESS_READ, bus, + devfn, where, &data)) + return PCIBIOS_DEVICE_NOT_FOUND; + + if (size == 1) + data = (data & ~(0xff << ((where & 3) << 3))) | + (val << ((where & 3) << 3)); + else if (size == 2) + data = (data & ~(0xffff << ((where & 3) << 3))) | + (val << ((where & 3) << 3)); + } + + if (ltq_pci_config_access(PCI_ACCESS_WRITE, bus, devfn, where, &data)) + return PCIBIOS_DEVICE_NOT_FOUND; + + return PCIBIOS_SUCCESSFUL; +} diff --git a/trunk/arch/mips/pci/pci-lantiq.c b/trunk/arch/mips/pci/pci-lantiq.c new file mode 100644 index 000000000000..603d7493e966 --- /dev/null +++ b/trunk/arch/mips/pci/pci-lantiq.c @@ -0,0 +1,297 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Copyright (C) 2010 John Crispin + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + +#include "pci-lantiq.h" + +#define LTQ_PCI_CFG_BASE 0x17000000 +#define LTQ_PCI_CFG_SIZE 0x00008000 +#define LTQ_PCI_MEM_BASE 0x18000000 +#define LTQ_PCI_MEM_SIZE 0x02000000 +#define LTQ_PCI_IO_BASE 0x1AE00000 +#define LTQ_PCI_IO_SIZE 0x00200000 + +#define PCI_CR_FCI_ADDR_MAP0 0x00C0 +#define PCI_CR_FCI_ADDR_MAP1 0x00C4 +#define PCI_CR_FCI_ADDR_MAP2 0x00C8 +#define PCI_CR_FCI_ADDR_MAP3 0x00CC +#define PCI_CR_FCI_ADDR_MAP4 0x00D0 +#define PCI_CR_FCI_ADDR_MAP5 0x00D4 +#define PCI_CR_FCI_ADDR_MAP6 0x00D8 +#define PCI_CR_FCI_ADDR_MAP7 0x00DC +#define PCI_CR_CLK_CTRL 0x0000 +#define PCI_CR_PCI_MOD 0x0030 +#define PCI_CR_PC_ARB 0x0080 +#define PCI_CR_FCI_ADDR_MAP11hg 0x00E4 +#define PCI_CR_BAR11MASK 0x0044 +#define PCI_CR_BAR12MASK 0x0048 +#define PCI_CR_BAR13MASK 0x004C +#define PCI_CS_BASE_ADDR1 0x0010 +#define PCI_CR_PCI_ADDR_MAP11 0x0064 +#define PCI_CR_FCI_BURST_LENGTH 0x00E8 +#define PCI_CR_PCI_EOI 0x002C +#define PCI_CS_STS_CMD 0x0004 + +#define PCI_MASTER0_REQ_MASK_2BITS 8 +#define PCI_MASTER1_REQ_MASK_2BITS 10 +#define PCI_MASTER2_REQ_MASK_2BITS 12 +#define INTERNAL_ARB_ENABLE_BIT 0 + +#define LTQ_CGU_IFCCR 0x0018 +#define LTQ_CGU_PCICR 0x0034 + +#define ltq_pci_w32(x, y) ltq_w32((x), ltq_pci_membase + (y)) +#define ltq_pci_r32(x) ltq_r32(ltq_pci_membase + (x)) + +#define ltq_pci_cfg_w32(x, y) ltq_w32((x), ltq_pci_mapped_cfg + (y)) +#define ltq_pci_cfg_r32(x) ltq_r32(ltq_pci_mapped_cfg + (x)) + +struct ltq_pci_gpio_map { + int pin; + int alt0; + int alt1; + int dir; + char *name; +}; + +/* the pci core can make use of the following gpios */ +static struct ltq_pci_gpio_map ltq_pci_gpio_map[] = { + { 0, 1, 0, 0, "pci-exin0" }, + { 1, 1, 0, 0, "pci-exin1" }, + { 2, 1, 0, 0, "pci-exin2" }, + { 39, 1, 0, 0, "pci-exin3" }, + { 10, 1, 0, 0, "pci-exin4" }, + { 9, 1, 0, 0, "pci-exin5" }, + { 30, 1, 0, 1, "pci-gnt1" }, + { 23, 1, 0, 1, "pci-gnt2" }, + { 19, 1, 0, 1, "pci-gnt3" }, + { 38, 1, 0, 1, "pci-gnt4" }, + { 29, 1, 0, 0, "pci-req1" }, + { 31, 1, 0, 0, "pci-req2" }, + { 3, 1, 0, 0, "pci-req3" }, + { 37, 1, 0, 0, "pci-req4" }, +}; + +__iomem void *ltq_pci_mapped_cfg; +static __iomem void *ltq_pci_membase; + +int (*ltqpci_plat_dev_init)(struct pci_dev *dev) = NULL; + +/* Since the PCI REQ pins can be reused for other functionality, make it + possible to exclude those from interpretation by the PCI controller */ +static int ltq_pci_req_mask = 0xf; + +static int *ltq_pci_irq_map; + +struct pci_ops ltq_pci_ops = { + .read = ltq_pci_read_config_dword, + .write = ltq_pci_write_config_dword +}; + +static struct resource pci_io_resource = { + .name = "pci io space", + .start = LTQ_PCI_IO_BASE, + .end = LTQ_PCI_IO_BASE + LTQ_PCI_IO_SIZE - 1, + .flags = IORESOURCE_IO +}; + +static struct resource pci_mem_resource = { + .name = "pci memory space", + .start = LTQ_PCI_MEM_BASE, + .end = LTQ_PCI_MEM_BASE + LTQ_PCI_MEM_SIZE - 1, + .flags = IORESOURCE_MEM +}; + +static struct pci_controller ltq_pci_controller = { + .pci_ops = <q_pci_ops, + .mem_resource = &pci_mem_resource, + .mem_offset = 0x00000000UL, + .io_resource = &pci_io_resource, + .io_offset = 0x00000000UL, +}; + +int pcibios_plat_dev_init(struct pci_dev *dev) +{ + if (ltqpci_plat_dev_init) + return ltqpci_plat_dev_init(dev); + + return 0; +} + +static u32 ltq_calc_bar11mask(void) +{ + u32 mem, bar11mask; + + /* BAR11MASK value depends on available memory on system. */ + mem = num_physpages * PAGE_SIZE; + bar11mask = (0x0ffffff0 & ~((1 << (fls(mem) - 1)) - 1)) | 8; + + return bar11mask; +} + +static void ltq_pci_setup_gpio(int gpio) +{ + int i; + for (i = 0; i < ARRAY_SIZE(ltq_pci_gpio_map); i++) { + if (gpio & (1 << i)) { + ltq_gpio_request(ltq_pci_gpio_map[i].pin, + ltq_pci_gpio_map[i].alt0, + ltq_pci_gpio_map[i].alt1, + ltq_pci_gpio_map[i].dir, + ltq_pci_gpio_map[i].name); + } + } + ltq_gpio_request(21, 0, 0, 1, "pci-reset"); + ltq_pci_req_mask = (gpio >> PCI_REQ_SHIFT) & PCI_REQ_MASK; +} + +static int __devinit ltq_pci_startup(struct ltq_pci_data *conf) +{ + u32 temp_buffer; + + /* set clock to 33Mhz */ + ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) & ~0xf00000, LTQ_CGU_IFCCR); + ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) | 0x800000, LTQ_CGU_IFCCR); + + /* external or internal clock ? */ + if (conf->clock) { + ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) & ~(1 << 16), + LTQ_CGU_IFCCR); + ltq_cgu_w32((1 << 30), LTQ_CGU_PCICR); + } else { + ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) | (1 << 16), + LTQ_CGU_IFCCR); + ltq_cgu_w32((1 << 31) | (1 << 30), LTQ_CGU_PCICR); + } + + /* setup pci clock and gpis used by pci */ + ltq_pci_setup_gpio(conf->gpio); + + /* enable auto-switching between PCI and EBU */ + ltq_pci_w32(0xa, PCI_CR_CLK_CTRL); + + /* busy, i.e. configuration is not done, PCI access has to be retried */ + ltq_pci_w32(ltq_pci_r32(PCI_CR_PCI_MOD) & ~(1 << 24), PCI_CR_PCI_MOD); + wmb(); + /* BUS Master/IO/MEM access */ + ltq_pci_cfg_w32(ltq_pci_cfg_r32(PCI_CS_STS_CMD) | 7, PCI_CS_STS_CMD); + + /* enable external 2 PCI masters */ + temp_buffer = ltq_pci_r32(PCI_CR_PC_ARB); + temp_buffer &= (~(ltq_pci_req_mask << 16)); + /* enable internal arbiter */ + temp_buffer |= (1 << INTERNAL_ARB_ENABLE_BIT); + /* enable internal PCI master reqest */ + temp_buffer &= (~(3 << PCI_MASTER0_REQ_MASK_2BITS)); + + /* enable EBU request */ + temp_buffer &= (~(3 << PCI_MASTER1_REQ_MASK_2BITS)); + + /* enable all external masters request */ + temp_buffer &= (~(3 << PCI_MASTER2_REQ_MASK_2BITS)); + ltq_pci_w32(temp_buffer, PCI_CR_PC_ARB); + wmb(); + + /* setup BAR memory regions */ + ltq_pci_w32(0x18000000, PCI_CR_FCI_ADDR_MAP0); + ltq_pci_w32(0x18400000, PCI_CR_FCI_ADDR_MAP1); + ltq_pci_w32(0x18800000, PCI_CR_FCI_ADDR_MAP2); + ltq_pci_w32(0x18c00000, PCI_CR_FCI_ADDR_MAP3); + ltq_pci_w32(0x19000000, PCI_CR_FCI_ADDR_MAP4); + ltq_pci_w32(0x19400000, PCI_CR_FCI_ADDR_MAP5); + ltq_pci_w32(0x19800000, PCI_CR_FCI_ADDR_MAP6); + ltq_pci_w32(0x19c00000, PCI_CR_FCI_ADDR_MAP7); + ltq_pci_w32(0x1ae00000, PCI_CR_FCI_ADDR_MAP11hg); + ltq_pci_w32(ltq_calc_bar11mask(), PCI_CR_BAR11MASK); + ltq_pci_w32(0, PCI_CR_PCI_ADDR_MAP11); + ltq_pci_w32(0, PCI_CS_BASE_ADDR1); + /* both TX and RX endian swap are enabled */ + ltq_pci_w32(ltq_pci_r32(PCI_CR_PCI_EOI) | 3, PCI_CR_PCI_EOI); + wmb(); + ltq_pci_w32(ltq_pci_r32(PCI_CR_BAR12MASK) | 0x80000000, + PCI_CR_BAR12MASK); + ltq_pci_w32(ltq_pci_r32(PCI_CR_BAR13MASK) | 0x80000000, + PCI_CR_BAR13MASK); + /*use 8 dw burst length */ + ltq_pci_w32(0x303, PCI_CR_FCI_BURST_LENGTH); + ltq_pci_w32(ltq_pci_r32(PCI_CR_PCI_MOD) | (1 << 24), PCI_CR_PCI_MOD); + wmb(); + + /* setup irq line */ + ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_CON) | 0xc, LTQ_EBU_PCC_CON); + ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_IEN) | 0x10, LTQ_EBU_PCC_IEN); + + /* toggle reset pin */ + __gpio_set_value(21, 0); + wmb(); + mdelay(1); + __gpio_set_value(21, 1); + return 0; +} + +int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + if (ltq_pci_irq_map[slot]) + return ltq_pci_irq_map[slot]; + printk(KERN_ERR "lq_pci: trying to map irq for unknown slot %d\n", + slot); + + return 0; +} + +static int __devinit ltq_pci_probe(struct platform_device *pdev) +{ + struct ltq_pci_data *ltq_pci_data = + (struct ltq_pci_data *) pdev->dev.platform_data; + pci_probe_only = 0; + ltq_pci_irq_map = ltq_pci_data->irq; + ltq_pci_membase = ioremap_nocache(PCI_CR_BASE_ADDR, PCI_CR_SIZE); + ltq_pci_mapped_cfg = + ioremap_nocache(LTQ_PCI_CFG_BASE, LTQ_PCI_CFG_BASE); + ltq_pci_controller.io_map_base = + (unsigned long)ioremap(LTQ_PCI_IO_BASE, LTQ_PCI_IO_SIZE - 1); + ltq_pci_startup(ltq_pci_data); + register_pci_controller(<q_pci_controller); + + return 0; +} + +static struct platform_driver +ltq_pci_driver = { + .probe = ltq_pci_probe, + .driver = { + .name = "ltq_pci", + .owner = THIS_MODULE, + }, +}; + +int __init pcibios_init(void) +{ + int ret = platform_driver_register(<q_pci_driver); + if (ret) + printk(KERN_INFO "ltq_pci: Error registering platfom driver!"); + return ret; +} + +arch_initcall(pcibios_init); diff --git a/trunk/arch/mips/pci/pci-lantiq.h b/trunk/arch/mips/pci/pci-lantiq.h new file mode 100644 index 000000000000..66bf6cd6be3c --- /dev/null +++ b/trunk/arch/mips/pci/pci-lantiq.h @@ -0,0 +1,18 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Copyright (C) 2010 John Crispin + */ + +#ifndef _LTQ_PCI_H__ +#define _LTQ_PCI_H__ + +extern __iomem void *ltq_pci_mapped_cfg; +extern int ltq_pci_read_config_dword(struct pci_bus *bus, + unsigned int devfn, int where, int size, u32 *val); +extern int ltq_pci_write_config_dword(struct pci_bus *bus, + unsigned int devfn, int where, int size, u32 val); + +#endif diff --git a/trunk/arch/mips/pci/pci-xlr.c b/trunk/arch/mips/pci/pci-xlr.c new file mode 100644 index 000000000000..38fece16c435 --- /dev/null +++ b/trunk/arch/mips/pci/pci-xlr.c @@ -0,0 +1,214 @@ +/* + * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights + * reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the NetLogic + * license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +static void *pci_config_base; + +#define pci_cfg_addr(bus, devfn, off) (((bus) << 16) | ((devfn) << 8) | (off)) + +/* PCI ops */ +static inline u32 pci_cfg_read_32bit(struct pci_bus *bus, unsigned int devfn, + int where) +{ + u32 data; + u32 *cfgaddr; + + cfgaddr = (u32 *)(pci_config_base + + pci_cfg_addr(bus->number, devfn, where & ~3)); + data = *cfgaddr; + return cpu_to_le32(data); +} + +static inline void pci_cfg_write_32bit(struct pci_bus *bus, unsigned int devfn, + int where, u32 data) +{ + u32 *cfgaddr; + + cfgaddr = (u32 *)(pci_config_base + + pci_cfg_addr(bus->number, devfn, where & ~3)); + *cfgaddr = cpu_to_le32(data); +} + +static int nlm_pcibios_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + u32 data; + + if ((size == 2) && (where & 1)) + return PCIBIOS_BAD_REGISTER_NUMBER; + else if ((size == 4) && (where & 3)) + return PCIBIOS_BAD_REGISTER_NUMBER; + + data = pci_cfg_read_32bit(bus, devfn, where); + + if (size == 1) + *val = (data >> ((where & 3) << 3)) & 0xff; + else if (size == 2) + *val = (data >> ((where & 3) << 3)) & 0xffff; + else + *val = data; + + return PCIBIOS_SUCCESSFUL; +} + + +static int nlm_pcibios_write(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + u32 data; + + if ((size == 2) && (where & 1)) + return PCIBIOS_BAD_REGISTER_NUMBER; + else if ((size == 4) && (where & 3)) + return PCIBIOS_BAD_REGISTER_NUMBER; + + data = pci_cfg_read_32bit(bus, devfn, where); + + if (size == 1) + data = (data & ~(0xff << ((where & 3) << 3))) | + (val << ((where & 3) << 3)); + else if (size == 2) + data = (data & ~(0xffff << ((where & 3) << 3))) | + (val << ((where & 3) << 3)); + else + data = val; + + pci_cfg_write_32bit(bus, devfn, where, data); + + return PCIBIOS_SUCCESSFUL; +} + +struct pci_ops nlm_pci_ops = { + .read = nlm_pcibios_read, + .write = nlm_pcibios_write +}; + +static struct resource nlm_pci_mem_resource = { + .name = "XLR PCI MEM", + .start = 0xd0000000UL, /* 256MB PCI mem @ 0xd000_0000 */ + .end = 0xdfffffffUL, + .flags = IORESOURCE_MEM, +}; + +static struct resource nlm_pci_io_resource = { + .name = "XLR IO MEM", + .start = 0x10000000UL, /* 16MB PCI IO @ 0x1000_0000 */ + .end = 0x100fffffUL, + .flags = IORESOURCE_IO, +}; + +struct pci_controller nlm_pci_controller = { + .index = 0, + .pci_ops = &nlm_pci_ops, + .mem_resource = &nlm_pci_mem_resource, + .mem_offset = 0x00000000UL, + .io_resource = &nlm_pci_io_resource, + .io_offset = 0x00000000UL, +}; + +int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + if (!nlm_chip_is_xls()) + return PIC_PCIX_IRQ; /* for XLR just one IRQ*/ + + /* + * For XLS PCIe, there is an IRQ per Link, find out which + * link the device is on to assign interrupts + */ + if (dev->bus->self == NULL) + return 0; + + switch (dev->bus->self->devfn) { + case 0x0: + return PIC_PCIE_LINK0_IRQ; + case 0x8: + return PIC_PCIE_LINK1_IRQ; + case 0x10: + if (nlm_chip_is_xls_b()) + return PIC_PCIE_XLSB0_LINK2_IRQ; + else + return PIC_PCIE_LINK2_IRQ; + case 0x18: + if (nlm_chip_is_xls_b()) + return PIC_PCIE_XLSB0_LINK3_IRQ; + else + return PIC_PCIE_LINK3_IRQ; + } + WARN(1, "Unexpected devfn %d\n", dev->bus->self->devfn); + return 0; +} + +/* Do platform specific device initialization at pci_enable_device() time */ +int pcibios_plat_dev_init(struct pci_dev *dev) +{ + return 0; +} + +static int __init pcibios_init(void) +{ + /* PSB assigns PCI resources */ + pci_probe_only = 1; + pci_config_base = ioremap(DEFAULT_PCI_CONFIG_BASE, 16 << 20); + + /* Extend IO port for memory mapped io */ + ioport_resource.start = 0; + ioport_resource.end = ~0; + + set_io_port_base(CKSEG1); + nlm_pci_controller.io_map_base = CKSEG1; + + pr_info("Registering XLR/XLS PCIX/PCIE Controller.\n"); + register_pci_controller(&nlm_pci_controller); + + return 0; +} + +arch_initcall(pcibios_init); + +struct pci_fixup pcibios_fixups[] = { + {0} +}; diff --git a/trunk/arch/mips/pmc-sierra/msp71xx/msp_irq_per.c b/trunk/arch/mips/pmc-sierra/msp71xx/msp_irq_per.c index f9b9dcdfa9dd..98fd0099d964 100644 --- a/trunk/arch/mips/pmc-sierra/msp71xx/msp_irq_per.c +++ b/trunk/arch/mips/pmc-sierra/msp71xx/msp_irq_per.c @@ -97,7 +97,7 @@ static int msp_per_irq_set_affinity(struct irq_data *d, static struct irq_chip msp_per_irq_controller = { .name = "MSP_PER", - .irq_enable = unmask_per_irq. + .irq_enable = unmask_per_irq, .irq_disable = mask_per_irq, .irq_ack = msp_per_irq_ack, #ifdef CONFIG_SMP diff --git a/trunk/arch/mips/pmc-sierra/yosemite/smp.c b/trunk/arch/mips/pmc-sierra/yosemite/smp.c index efc9e889b349..2608752898c0 100644 --- a/trunk/arch/mips/pmc-sierra/yosemite/smp.c +++ b/trunk/arch/mips/pmc-sierra/yosemite/smp.c @@ -55,6 +55,8 @@ void titan_mailbox_irq(void) if (status & 0x2) smp_call_function_interrupt(); + if (status & 0x4) + scheduler_ipi(); break; case 1: @@ -63,6 +65,8 @@ void titan_mailbox_irq(void) if (status & 0x2) smp_call_function_interrupt(); + if (status & 0x4) + scheduler_ipi(); break; } } diff --git a/trunk/arch/mips/power/hibernate.S b/trunk/arch/mips/power/hibernate.S index dbb5c7b4b70f..f8a751c03282 100644 --- a/trunk/arch/mips/power/hibernate.S +++ b/trunk/arch/mips/power/hibernate.S @@ -35,7 +35,7 @@ LEAF(swsusp_arch_resume) 0: PTR_L t1, PBE_ADDRESS(t0) /* source */ PTR_L t2, PBE_ORIG_ADDRESS(t0) /* destination */ - PTR_ADDIU t3, t1, PAGE_SIZE + PTR_ADDU t3, t1, PAGE_SIZE 1: REG_L t8, (t1) REG_S t8, (t2) diff --git a/trunk/arch/mips/rb532/gpio.c b/trunk/arch/mips/rb532/gpio.c index 37de05d595e7..6c47dfeb7be3 100644 --- a/trunk/arch/mips/rb532/gpio.c +++ b/trunk/arch/mips/rb532/gpio.c @@ -185,7 +185,7 @@ int __init rb532_gpio_init(void) struct resource *r; r = rb532_gpio_reg0_res; - rb532_gpio_chip->regbase = ioremap_nocache(r->start, r->end - r->start); + rb532_gpio_chip->regbase = ioremap_nocache(r->start, resource_size(r)); if (!rb532_gpio_chip->regbase) { printk(KERN_ERR "rb532: cannot remap GPIO register 0\n"); diff --git a/trunk/arch/mips/sgi-ip22/ip22-platform.c b/trunk/arch/mips/sgi-ip22/ip22-platform.c index deddbf0ebe5c..698904daf901 100644 --- a/trunk/arch/mips/sgi-ip22/ip22-platform.c +++ b/trunk/arch/mips/sgi-ip22/ip22-platform.c @@ -132,7 +132,7 @@ static struct platform_device eth1_device = { */ static int __init sgiseeq_devinit(void) { - unsigned int tmp; + unsigned int pbdma __maybe_unused; int res, i; eth0_pd.hpc = hpc3c0; @@ -151,7 +151,7 @@ static int __init sgiseeq_devinit(void) /* Second HPC is missing? */ if (ip22_is_fullhouse() || - get_dbe(tmp, (unsigned int *)&hpc3c1->pbdma[1])) + get_dbe(pbdma, (unsigned int *)&hpc3c1->pbdma[1])) return 0; sgimc->giopar |= SGIMC_GIOPAR_MASTEREXP1 | SGIMC_GIOPAR_EXP164 | diff --git a/trunk/arch/mips/sgi-ip22/ip22-time.c b/trunk/arch/mips/sgi-ip22/ip22-time.c index 603fc91c1030..1a94c9894188 100644 --- a/trunk/arch/mips/sgi-ip22/ip22-time.c +++ b/trunk/arch/mips/sgi-ip22/ip22-time.c @@ -32,7 +32,7 @@ static unsigned long dosample(void) { u32 ct0, ct1; - u8 msb, lsb; + u8 msb; /* Start the counter. */ sgint->tcword = (SGINT_TCWORD_CNT2 | SGINT_TCWORD_CALL | @@ -46,7 +46,7 @@ static unsigned long dosample(void) /* Latch and spin until top byte of counter2 is zero */ do { writeb(SGINT_TCWORD_CNT2 | SGINT_TCWORD_CLAT, &sgint->tcword); - lsb = readb(&sgint->tcnt2); + (void) readb(&sgint->tcnt2); msb = readb(&sgint->tcnt2); ct1 = read_c0_count(); } while (msb); diff --git a/trunk/arch/mips/sgi-ip27/ip27-hubio.c b/trunk/arch/mips/sgi-ip27/ip27-hubio.c index a1fa4abb3f6a..cd0d5b06cd83 100644 --- a/trunk/arch/mips/sgi-ip27/ip27-hubio.c +++ b/trunk/arch/mips/sgi-ip27/ip27-hubio.c @@ -29,7 +29,6 @@ unsigned long hub_pio_map(cnodeid_t cnode, xwidgetnum_t widget, unsigned long xtalk_addr, size_t size) { nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode); - volatile hubreg_t junk; unsigned i; /* use small-window mapping if possible */ @@ -64,7 +63,7 @@ unsigned long hub_pio_map(cnodeid_t cnode, xwidgetnum_t widget, * after we write it. */ IIO_ITTE_PUT(nasid, i, HUB_PIO_MAP_TO_MEM, widget, xtalk_addr); - junk = HUB_L(IIO_ITTE_GET(nasid, i)); + (void) HUB_L(IIO_ITTE_GET(nasid, i)); return NODE_BWIN_BASE(nasid, widget) + (xtalk_addr % BWIN_SIZE); } diff --git a/trunk/arch/mips/sgi-ip27/ip27-irq.c b/trunk/arch/mips/sgi-ip27/ip27-irq.c index 0a04603d577c..b18b04e48577 100644 --- a/trunk/arch/mips/sgi-ip27/ip27-irq.c +++ b/trunk/arch/mips/sgi-ip27/ip27-irq.c @@ -147,8 +147,10 @@ static void ip27_do_irq_mask0(void) #ifdef CONFIG_SMP if (pend0 & (1UL << CPU_RESCHED_A_IRQ)) { LOCAL_HUB_CLR_INTR(CPU_RESCHED_A_IRQ); + scheduler_ipi(); } else if (pend0 & (1UL << CPU_RESCHED_B_IRQ)) { LOCAL_HUB_CLR_INTR(CPU_RESCHED_B_IRQ); + scheduler_ipi(); } else if (pend0 & (1UL << CPU_CALL_A_IRQ)) { LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ); smp_call_function_interrupt(); diff --git a/trunk/arch/mips/sgi-ip27/ip27-klnuma.c b/trunk/arch/mips/sgi-ip27/ip27-klnuma.c index c3d30a88daf3..1d1919a44e88 100644 --- a/trunk/arch/mips/sgi-ip27/ip27-klnuma.c +++ b/trunk/arch/mips/sgi-ip27/ip27-klnuma.c @@ -54,11 +54,8 @@ void __init setup_replication_mask(void) static __init void set_ktext_source(nasid_t client_nasid, nasid_t server_nasid) { - cnodeid_t client_cnode; kern_vars_t *kvp; - client_cnode = NASID_TO_COMPACT_NODEID(client_nasid); - kvp = &hub_data(client_nasid)->kern_vars; KERN_VARS_ADDR(client_nasid) = (unsigned long)kvp; diff --git a/trunk/arch/mips/sgi-ip27/ip27-timer.c b/trunk/arch/mips/sgi-ip27/ip27-timer.c index a152538d3c97..ef74f3267f91 100644 --- a/trunk/arch/mips/sgi-ip27/ip27-timer.c +++ b/trunk/arch/mips/sgi-ip27/ip27-timer.c @@ -66,18 +66,7 @@ static int rt_next_event(unsigned long delta, struct clock_event_device *evt) static void rt_set_mode(enum clock_event_mode mode, struct clock_event_device *evt) { - switch (mode) { - case CLOCK_EVT_MODE_ONESHOT: - /* The only mode supported */ - break; - - case CLOCK_EVT_MODE_PERIODIC: - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - case CLOCK_EVT_MODE_RESUME: - /* Nothing to do */ - break; - } + /* Nothing to do ... */ } int rt_timer_irq; @@ -174,8 +163,7 @@ static void __init hub_rt_clocksource_init(void) { struct clocksource *cs = &hub_rt_clocksource; - clocksource_set_clock(cs, CYCLES_PER_SEC); - clocksource_register(cs); + clocksource_register_hz(cs, CYCLES_PER_SEC); } void __init plat_time_init(void) diff --git a/trunk/arch/mips/sibyte/bcm1480/smp.c b/trunk/arch/mips/sibyte/bcm1480/smp.c index 47b347c992ea..d667875be564 100644 --- a/trunk/arch/mips/sibyte/bcm1480/smp.c +++ b/trunk/arch/mips/sibyte/bcm1480/smp.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -189,10 +190,8 @@ void bcm1480_mailbox_interrupt(void) /* Clear the mailbox to clear the interrupt */ __raw_writeq(((u64)action)<<48, mailbox_0_clear_regs[cpu]); - /* - * Nothing to do for SMP_RESCHEDULE_YOURSELF; returning from the - * interrupt will do the reschedule for us - */ + if (action & SMP_RESCHEDULE_YOURSELF) + scheduler_ipi(); if (action & SMP_CALL_FUNCTION) smp_call_function_interrupt(); diff --git a/trunk/arch/mips/sibyte/sb1250/smp.c b/trunk/arch/mips/sibyte/sb1250/smp.c index c00a5cb1128d..38e7f6bd7922 100644 --- a/trunk/arch/mips/sibyte/sb1250/smp.c +++ b/trunk/arch/mips/sibyte/sb1250/smp.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include @@ -177,10 +178,8 @@ void sb1250_mailbox_interrupt(void) /* Clear the mailbox to clear the interrupt */ ____raw_writeq(((u64)action) << 48, mailbox_clear_regs[cpu]); - /* - * Nothing to do for SMP_RESCHEDULE_YOURSELF; returning from the - * interrupt will do the reschedule for us - */ + if (action & SMP_RESCHEDULE_YOURSELF) + scheduler_ipi(); if (action & SMP_CALL_FUNCTION) smp_call_function_interrupt(); diff --git a/trunk/arch/mips/sni/time.c b/trunk/arch/mips/sni/time.c index c76151b56568..0904d4d30cb3 100644 --- a/trunk/arch/mips/sni/time.c +++ b/trunk/arch/mips/sni/time.c @@ -95,7 +95,7 @@ static void __init sni_a20r_timer_setup(void) static __init unsigned long dosample(void) { u32 ct0, ct1; - volatile u8 msb, lsb; + volatile u8 msb; /* Start the counter. */ outb_p(0x34, 0x43); @@ -108,7 +108,7 @@ static __init unsigned long dosample(void) /* Latch and spin until top byte of counter0 is zero */ do { outb(0x00, 0x43); - lsb = inb(0x40); + (void) inb(0x40); msb = inb(0x40); ct1 = read_c0_count(); } while (msb); diff --git a/trunk/arch/mn10300/kernel/smp.c b/trunk/arch/mn10300/kernel/smp.c index 226c826a2194..83fb27912231 100644 --- a/trunk/arch/mn10300/kernel/smp.c +++ b/trunk/arch/mn10300/kernel/smp.c @@ -494,14 +494,11 @@ void smp_send_stop(void) * @irq: The interrupt number. * @dev_id: The device ID. * - * We need do nothing here, since the scheduling will be effected on our way - * back through entry.S. - * * Returns IRQ_HANDLED to indicate we handled the interrupt successfully. */ static irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id) { - /* do nothing */ + scheduler_ipi(); return IRQ_HANDLED; } diff --git a/trunk/arch/parisc/kernel/smp.c b/trunk/arch/parisc/kernel/smp.c index 69d63d354ef0..828305f19cff 100644 --- a/trunk/arch/parisc/kernel/smp.c +++ b/trunk/arch/parisc/kernel/smp.c @@ -155,10 +155,7 @@ ipi_interrupt(int irq, void *dev_id) case IPI_RESCHEDULE: smp_debug(100, KERN_DEBUG "CPU%d IPI_RESCHEDULE\n", this_cpu); - /* - * Reschedule callback. Everything to be - * done is done by the interrupt return path. - */ + scheduler_ipi(); break; case IPI_CALL_FUNC: diff --git a/trunk/arch/parisc/kernel/vmlinux.lds.S b/trunk/arch/parisc/kernel/vmlinux.lds.S index 8f1e4efd143e..2d9a5c7c76f5 100644 --- a/trunk/arch/parisc/kernel/vmlinux.lds.S +++ b/trunk/arch/parisc/kernel/vmlinux.lds.S @@ -69,6 +69,9 @@ SECTIONS /* End of text section */ _etext = .; + /* Start of data section */ + _sdata = .; + RODATA /* writeable */ diff --git a/trunk/arch/parisc/mm/init.c b/trunk/arch/parisc/mm/init.c index b7ed8d7a9b33..b1d126258dee 100644 --- a/trunk/arch/parisc/mm/init.c +++ b/trunk/arch/parisc/mm/init.c @@ -266,8 +266,10 @@ static void __init setup_bootmem(void) } memset(pfnnid_map, 0xff, sizeof(pfnnid_map)); - for (i = 0; i < npmem_ranges; i++) + for (i = 0; i < npmem_ranges; i++) { + node_set_state(i, N_NORMAL_MEMORY); node_set_online(i); + } #endif /* diff --git a/trunk/arch/powerpc/Kconfig b/trunk/arch/powerpc/Kconfig index b6ff882f695b..a3128ca0fe11 100644 --- a/trunk/arch/powerpc/Kconfig +++ b/trunk/arch/powerpc/Kconfig @@ -193,6 +193,12 @@ config SYS_SUPPORTS_APM_EMULATION default y if PMAC_APM_EMU bool +config EPAPR_BOOT + bool + help + Used to allow a board to specify it wants an ePAPR compliant wrapper. + default n + config DEFAULT_UIMAGE bool help @@ -209,7 +215,7 @@ config ARCH_HIBERNATION_POSSIBLE config ARCH_SUSPEND_POSSIBLE def_bool y depends on ADB_PMU || PPC_EFIKA || PPC_LITE5200 || PPC_83xx || \ - PPC_85xx || PPC_86xx || PPC_PSERIES || 44x || 40x + (PPC_85xx && !SMP) || PPC_86xx || PPC_PSERIES || 44x || 40x config PPC_DCR_NATIVE bool diff --git a/trunk/arch/powerpc/Kconfig.debug b/trunk/arch/powerpc/Kconfig.debug index 2d38a50e66ba..a597dd77b903 100644 --- a/trunk/arch/powerpc/Kconfig.debug +++ b/trunk/arch/powerpc/Kconfig.debug @@ -267,6 +267,11 @@ config PPC_EARLY_DEBUG_USBGECKO Select this to enable early debugging for Nintendo GameCube/Wii consoles via an external USB Gecko adapter. +config PPC_EARLY_DEBUG_WSP + bool "Early debugging via WSP's internal UART" + depends on PPC_WSP + select PPC_UDBG_16550 + endchoice config PPC_EARLY_DEBUG_44x_PHYSLOW diff --git a/trunk/arch/powerpc/boot/Makefile b/trunk/arch/powerpc/boot/Makefile index 89178164af5e..c26200b40a47 100644 --- a/trunk/arch/powerpc/boot/Makefile +++ b/trunk/arch/powerpc/boot/Makefile @@ -69,7 +69,8 @@ src-wlib := string.S crt0.S crtsavres.S stdio.c main.c \ cpm-serial.c stdlib.c mpc52xx-psc.c planetcore.c uartlite.c \ fsl-soc.c mpc8xx.c pq2.c ugecon.c src-plat := of.c cuboot-52xx.c cuboot-824x.c cuboot-83xx.c cuboot-85xx.c holly.c \ - cuboot-ebony.c cuboot-hotfoot.c treeboot-ebony.c prpmc2800.c \ + cuboot-ebony.c cuboot-hotfoot.c epapr.c treeboot-ebony.c \ + prpmc2800.c \ ps3-head.S ps3-hvcall.S ps3.c treeboot-bamboo.c cuboot-8xx.c \ cuboot-pq2.c cuboot-sequoia.c treeboot-walnut.c \ cuboot-bamboo.c cuboot-mpc7448hpc2.c cuboot-taishan.c \ @@ -127,7 +128,7 @@ quiet_cmd_bootas = BOOTAS $@ cmd_bootas = $(CROSS32CC) -Wp,-MD,$(depfile) $(BOOTAFLAGS) -c -o $@ $< quiet_cmd_bootar = BOOTAR $@ - cmd_bootar = $(CROSS32AR) -cr $@.$$$$ $(filter-out FORCE,$^); mv $@.$$$$ $@ + cmd_bootar = $(CROSS32AR) -cr$(KBUILD_ARFLAGS) $@.$$$$ $(filter-out FORCE,$^); mv $@.$$$$ $@ $(obj-libfdt): $(obj)/%.o: $(srctree)/scripts/dtc/libfdt/%.c FORCE $(call if_changed_dep,bootcc) @@ -182,6 +183,7 @@ image-$(CONFIG_PPC_HOLLY) += dtbImage.holly image-$(CONFIG_PPC_PRPMC2800) += dtbImage.prpmc2800 image-$(CONFIG_PPC_ISERIES) += zImage.iseries image-$(CONFIG_DEFAULT_UIMAGE) += uImage +image-$(CONFIG_EPAPR_BOOT) += zImage.epapr # # Targets which embed a device tree blob diff --git a/trunk/arch/powerpc/boot/crt0.S b/trunk/arch/powerpc/boot/crt0.S index f1c4dfc635be..0f7428a37efb 100644 --- a/trunk/arch/powerpc/boot/crt0.S +++ b/trunk/arch/powerpc/boot/crt0.S @@ -6,16 +6,28 @@ * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * - * NOTE: this code runs in 32 bit mode and is packaged as ELF32. + * NOTE: this code runs in 32 bit mode, is position-independent, + * and is packaged as ELF32. */ #include "ppc_asm.h" .text - /* a procedure descriptor used when booting this as a COFF file */ + /* A procedure descriptor used when booting this as a COFF file. + * When making COFF, this comes first in the link and we're + * linked at 0x500000. + */ .globl _zimage_start_opd _zimage_start_opd: - .long _zimage_start, 0, 0, 0 + .long 0x500000, 0, 0, 0 + +p_start: .long _start +p_etext: .long _etext +p_bss_start: .long __bss_start +p_end: .long _end + + .weak _platform_stack_top +p_pstack: .long _platform_stack_top .weak _zimage_start .globl _zimage_start @@ -24,37 +36,65 @@ _zimage_start: _zimage_start_lib: /* Work out the offset between the address we were linked at and the address where we're running. */ - bl 1f -1: mflr r0 - lis r9,1b@ha - addi r9,r9,1b@l - subf. r0,r9,r0 - beq 3f /* if running at same address as linked */ + bl .+4 +p_base: mflr r10 /* r10 now points to runtime addr of p_base */ + /* grab the link address of the dynamic section in r11 */ + addis r11,r10,(_GLOBAL_OFFSET_TABLE_-p_base)@ha + lwz r11,(_GLOBAL_OFFSET_TABLE_-p_base)@l(r11) + cmpwi r11,0 + beq 3f /* if not linked -pie */ + /* get the runtime address of the dynamic section in r12 */ + .weak __dynamic_start + addis r12,r10,(__dynamic_start-p_base)@ha + addi r12,r12,(__dynamic_start-p_base)@l + subf r11,r11,r12 /* runtime - linktime offset */ + + /* The dynamic section contains a series of tagged entries. + * We need the RELA and RELACOUNT entries. */ +RELA = 7 +RELACOUNT = 0x6ffffff9 + li r9,0 + li r0,0 +9: lwz r8,0(r12) /* get tag */ + cmpwi r8,0 + beq 10f /* end of list */ + cmpwi r8,RELA + bne 11f + lwz r9,4(r12) /* get RELA pointer in r9 */ + b 12f +11: addis r8,r8,(-RELACOUNT)@ha + cmpwi r8,RELACOUNT@l + bne 12f + lwz r0,4(r12) /* get RELACOUNT value in r0 */ +12: addi r12,r12,8 + b 9b - /* The .got2 section contains a list of addresses, so add - the address offset onto each entry. */ - lis r9,__got2_start@ha - addi r9,r9,__got2_start@l - lis r8,__got2_end@ha - addi r8,r8,__got2_end@l - subf. r8,r9,r8 + /* The relocation section contains a list of relocations. + * We now do the R_PPC_RELATIVE ones, which point to words + * which need to be initialized with addend + offset. + * The R_PPC_RELATIVE ones come first and there are RELACOUNT + * of them. */ +10: /* skip relocation if we don't have both */ + cmpwi r0,0 beq 3f - srwi. r8,r8,2 - mtctr r8 - add r9,r0,r9 -2: lwz r8,0(r9) - add r8,r8,r0 - stw r8,0(r9) - addi r9,r9,4 + cmpwi r9,0 + beq 3f + + add r9,r9,r11 /* Relocate RELA pointer */ + mtctr r0 +2: lbz r0,4+3(r9) /* ELF32_R_INFO(reloc->r_info) */ + cmpwi r0,22 /* R_PPC_RELATIVE */ + bne 3f + lwz r12,0(r9) /* reloc->r_offset */ + lwz r0,8(r9) /* reloc->r_addend */ + add r0,r0,r11 + stwx r0,r11,r12 + addi r9,r9,12 bdnz 2b /* Do a cache flush for our text, in case the loader didn't */ -3: lis r9,_start@ha - addi r9,r9,_start@l - add r9,r0,r9 - lis r8,_etext@ha - addi r8,r8,_etext@l - add r8,r0,r8 +3: lwz r9,p_start-p_base(r10) /* note: these are relocated now */ + lwz r8,p_etext-p_base(r10) 4: dcbf r0,r9 icbi r0,r9 addi r9,r9,0x20 @@ -64,27 +104,19 @@ _zimage_start_lib: isync /* Clear the BSS */ - lis r9,__bss_start@ha - addi r9,r9,__bss_start@l - add r9,r0,r9 - lis r8,_end@ha - addi r8,r8,_end@l - add r8,r0,r8 - li r10,0 -5: stw r10,0(r9) + lwz r9,p_bss_start-p_base(r10) + lwz r8,p_end-p_base(r10) + li r0,0 +5: stw r0,0(r9) addi r9,r9,4 cmplw cr0,r9,r8 blt 5b /* Possibly set up a custom stack */ -.weak _platform_stack_top - lis r8,_platform_stack_top@ha - addi r8,r8,_platform_stack_top@l + lwz r8,p_pstack-p_base(r10) cmpwi r8,0 beq 6f - add r8,r0,r8 lwz r1,0(r8) - add r1,r0,r1 li r0,0 stwu r0,-16(r1) /* establish a stack frame */ 6: diff --git a/trunk/arch/powerpc/boot/dts/p1020rdb.dts b/trunk/arch/powerpc/boot/dts/p1020rdb.dts index e0668f877794..d6a8ae458137 100644 --- a/trunk/arch/powerpc/boot/dts/p1020rdb.dts +++ b/trunk/arch/powerpc/boot/dts/p1020rdb.dts @@ -9,12 +9,11 @@ * option) any later version. */ -/dts-v1/; +/include/ "p1020si.dtsi" + / { - model = "fsl,P1020"; + model = "fsl,P1020RDB"; compatible = "fsl,P1020RDB"; - #address-cells = <2>; - #size-cells = <2>; aliases { serial0 = &serial0; @@ -26,34 +25,11 @@ pci1 = &pci1; }; - cpus { - #address-cells = <1>; - #size-cells = <0>; - - PowerPC,P1020@0 { - device_type = "cpu"; - reg = <0x0>; - next-level-cache = <&L2>; - }; - - PowerPC,P1020@1 { - device_type = "cpu"; - reg = <0x1>; - next-level-cache = <&L2>; - }; - }; - memory { device_type = "memory"; }; localbus@ffe05000 { - #address-cells = <2>; - #size-cells = <1>; - compatible = "fsl,p1020-elbc", "fsl,elbc", "simple-bus"; - reg = <0 0xffe05000 0 0x1000>; - interrupts = <19 2>; - interrupt-parent = <&mpic>; /* NOR, NAND Flashes and Vitesse 5 port L2 switch */ ranges = <0x0 0x0 0x0 0xef000000 0x01000000 @@ -165,88 +141,14 @@ }; soc@ffe00000 { - #address-cells = <1>; - #size-cells = <1>; - device_type = "soc"; - compatible = "fsl,p1020-immr", "simple-bus"; - ranges = <0x0 0x0 0xffe00000 0x100000>; - bus-frequency = <0>; // Filled out by uboot. - - ecm-law@0 { - compatible = "fsl,ecm-law"; - reg = <0x0 0x1000>; - fsl,num-laws = <12>; - }; - - ecm@1000 { - compatible = "fsl,p1020-ecm", "fsl,ecm"; - reg = <0x1000 0x1000>; - interrupts = <16 2>; - interrupt-parent = <&mpic>; - }; - - memory-controller@2000 { - compatible = "fsl,p1020-memory-controller"; - reg = <0x2000 0x1000>; - interrupt-parent = <&mpic>; - interrupts = <16 2>; - }; - i2c@3000 { - #address-cells = <1>; - #size-cells = <0>; - cell-index = <0>; - compatible = "fsl-i2c"; - reg = <0x3000 0x100>; - interrupts = <43 2>; - interrupt-parent = <&mpic>; - dfsrr; rtc@68 { compatible = "dallas,ds1339"; reg = <0x68>; }; }; - i2c@3100 { - #address-cells = <1>; - #size-cells = <0>; - cell-index = <1>; - compatible = "fsl-i2c"; - reg = <0x3100 0x100>; - interrupts = <43 2>; - interrupt-parent = <&mpic>; - dfsrr; - }; - - serial0: serial@4500 { - cell-index = <0>; - device_type = "serial"; - compatible = "ns16550"; - reg = <0x4500 0x100>; - clock-frequency = <0>; - interrupts = <42 2>; - interrupt-parent = <&mpic>; - }; - - serial1: serial@4600 { - cell-index = <1>; - device_type = "serial"; - compatible = "ns16550"; - reg = <0x4600 0x100>; - clock-frequency = <0>; - interrupts = <42 2>; - interrupt-parent = <&mpic>; - }; - spi@7000 { - cell-index = <0>; - #address-cells = <1>; - #size-cells = <0>; - compatible = "fsl,espi"; - reg = <0x7000 0x1000>; - interrupts = <59 0x2>; - interrupt-parent = <&mpic>; - mode = "cpu"; fsl_m25p80@0 { #address-cells = <1>; @@ -294,66 +196,7 @@ }; }; - gpio: gpio-controller@f000 { - #gpio-cells = <2>; - compatible = "fsl,mpc8572-gpio"; - reg = <0xf000 0x100>; - interrupts = <47 0x2>; - interrupt-parent = <&mpic>; - gpio-controller; - }; - - L2: l2-cache-controller@20000 { - compatible = "fsl,p1020-l2-cache-controller"; - reg = <0x20000 0x1000>; - cache-line-size = <32>; // 32 bytes - cache-size = <0x40000>; // L2,256K - interrupt-parent = <&mpic>; - interrupts = <16 2>; - }; - - dma@21300 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "fsl,eloplus-dma"; - reg = <0x21300 0x4>; - ranges = <0x0 0x21100 0x200>; - cell-index = <0>; - dma-channel@0 { - compatible = "fsl,eloplus-dma-channel"; - reg = <0x0 0x80>; - cell-index = <0>; - interrupt-parent = <&mpic>; - interrupts = <20 2>; - }; - dma-channel@80 { - compatible = "fsl,eloplus-dma-channel"; - reg = <0x80 0x80>; - cell-index = <1>; - interrupt-parent = <&mpic>; - interrupts = <21 2>; - }; - dma-channel@100 { - compatible = "fsl,eloplus-dma-channel"; - reg = <0x100 0x80>; - cell-index = <2>; - interrupt-parent = <&mpic>; - interrupts = <22 2>; - }; - dma-channel@180 { - compatible = "fsl,eloplus-dma-channel"; - reg = <0x180 0x80>; - cell-index = <3>; - interrupt-parent = <&mpic>; - interrupts = <23 2>; - }; - }; - mdio@24000 { - #address-cells = <1>; - #size-cells = <0>; - compatible = "fsl,etsec2-mdio"; - reg = <0x24000 0x1000 0xb0030 0x4>; phy0: ethernet-phy@0 { interrupt-parent = <&mpic>; @@ -369,10 +212,6 @@ }; mdio@25000 { - #address-cells = <1>; - #size-cells = <0>; - compatible = "fsl,etsec2-tbi"; - reg = <0x25000 0x1000 0xb1030 0x4>; tbi0: tbi-phy@11 { reg = <0x11>; @@ -381,97 +220,25 @@ }; enet0: ethernet@b0000 { - #address-cells = <1>; - #size-cells = <1>; - device_type = "network"; - model = "eTSEC"; - compatible = "fsl,etsec2"; - fsl,num_rx_queues = <0x8>; - fsl,num_tx_queues = <0x8>; - local-mac-address = [ 00 00 00 00 00 00 ]; - interrupt-parent = <&mpic>; fixed-link = <1 1 1000 0 0>; phy-connection-type = "rgmii-id"; - queue-group@0 { - #address-cells = <1>; - #size-cells = <1>; - reg = <0xb0000 0x1000>; - interrupts = <29 2 30 2 34 2>; - }; - - queue-group@1 { - #address-cells = <1>; - #size-cells = <1>; - reg = <0xb4000 0x1000>; - interrupts = <17 2 18 2 24 2>; - }; }; enet1: ethernet@b1000 { - #address-cells = <1>; - #size-cells = <1>; - device_type = "network"; - model = "eTSEC"; - compatible = "fsl,etsec2"; - fsl,num_rx_queues = <0x8>; - fsl,num_tx_queues = <0x8>; - local-mac-address = [ 00 00 00 00 00 00 ]; - interrupt-parent = <&mpic>; phy-handle = <&phy0>; tbi-handle = <&tbi0>; phy-connection-type = "sgmii"; - queue-group@0 { - #address-cells = <1>; - #size-cells = <1>; - reg = <0xb1000 0x1000>; - interrupts = <35 2 36 2 40 2>; - }; - - queue-group@1 { - #address-cells = <1>; - #size-cells = <1>; - reg = <0xb5000 0x1000>; - interrupts = <51 2 52 2 67 2>; - }; }; enet2: ethernet@b2000 { - #address-cells = <1>; - #size-cells = <1>; - device_type = "network"; - model = "eTSEC"; - compatible = "fsl,etsec2"; - fsl,num_rx_queues = <0x8>; - fsl,num_tx_queues = <0x8>; - local-mac-address = [ 00 00 00 00 00 00 ]; - interrupt-parent = <&mpic>; phy-handle = <&phy1>; phy-connection-type = "rgmii-id"; - queue-group@0 { - #address-cells = <1>; - #size-cells = <1>; - reg = <0xb2000 0x1000>; - interrupts = <31 2 32 2 33 2>; - }; - - queue-group@1 { - #address-cells = <1>; - #size-cells = <1>; - reg = <0xb6000 0x1000>; - interrupts = <25 2 26 2 27 2>; - }; }; usb@22000 { - #address-cells = <1>; - #size-cells = <0>; - compatible = "fsl-usb2-dr"; - reg = <0x22000 0x1000>; - interrupt-parent = <&mpic>; - interrupts = <28 0x2>; phy_type = "ulpi"; }; @@ -481,82 +248,23 @@ it enables USB2. OTOH, U-Boot does create a new node when there isn't any. So, just comment it out. usb@23000 { - #address-cells = <1>; - #size-cells = <0>; - compatible = "fsl-usb2-dr"; - reg = <0x23000 0x1000>; - interrupt-parent = <&mpic>; - interrupts = <46 0x2>; phy_type = "ulpi"; }; */ - sdhci@2e000 { - compatible = "fsl,p1020-esdhc", "fsl,esdhc"; - reg = <0x2e000 0x1000>; - interrupts = <72 0x2>; - interrupt-parent = <&mpic>; - /* Filled in by U-Boot */ - clock-frequency = <0>; - }; - - crypto@30000 { - compatible = "fsl,sec3.1", "fsl,sec3.0", "fsl,sec2.4", - "fsl,sec2.2", "fsl,sec2.1", "fsl,sec2.0"; - reg = <0x30000 0x10000>; - interrupts = <45 2 58 2>; - interrupt-parent = <&mpic>; - fsl,num-channels = <4>; - fsl,channel-fifo-len = <24>; - fsl,exec-units-mask = <0xbfe>; - fsl,descriptor-types-mask = <0x3ab0ebf>; - }; - - mpic: pic@40000 { - interrupt-controller; - #address-cells = <0>; - #interrupt-cells = <2>; - reg = <0x40000 0x40000>; - compatible = "chrp,open-pic"; - device_type = "open-pic"; - }; - - msi@41600 { - compatible = "fsl,p1020-msi", "fsl,mpic-msi"; - reg = <0x41600 0x80>; - msi-available-ranges = <0 0x100>; - interrupts = < - 0xe0 0 - 0xe1 0 - 0xe2 0 - 0xe3 0 - 0xe4 0 - 0xe5 0 - 0xe6 0 - 0xe7 0>; - interrupt-parent = <&mpic>; - }; - - global-utilities@e0000 { //global utilities block - compatible = "fsl,p1020-guts"; - reg = <0xe0000 0x1000>; - fsl,has-rstcr; - }; }; pci0: pcie@ffe09000 { - compatible = "fsl,mpc8548-pcie"; - device_type = "pci"; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - reg = <0 0xffe09000 0 0x1000>; - bus-range = <0 255>; ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>; - clock-frequency = <33333333>; - interrupt-parent = <&mpic>; - interrupts = <16 2>; + interrupt-map-mask = <0xf800 0x0 0x0 0x7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0x0 0x0 0x1 &mpic 0x4 0x1 + 0000 0x0 0x0 0x2 &mpic 0x5 0x1 + 0000 0x0 0x0 0x3 &mpic 0x6 0x1 + 0000 0x0 0x0 0x4 &mpic 0x7 0x1 + >; pcie@0 { reg = <0x0 0x0 0x0 0x0 0x0>; #size-cells = <2>; @@ -573,18 +281,16 @@ }; pci1: pcie@ffe0a000 { - compatible = "fsl,mpc8548-pcie"; - device_type = "pci"; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - reg = <0 0xffe0a000 0 0x1000>; - bus-range = <0 255>; ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>; - clock-frequency = <33333333>; - interrupt-parent = <&mpic>; - interrupts = <16 2>; + interrupt-map-mask = <0xf800 0x0 0x0 0x7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0x0 0x0 0x1 &mpic 0x0 0x1 + 0000 0x0 0x0 0x2 &mpic 0x1 0x1 + 0000 0x0 0x0 0x3 &mpic 0x2 0x1 + 0000 0x0 0x0 0x4 &mpic 0x3 0x1 + >; pcie@0 { reg = <0x0 0x0 0x0 0x0 0x0>; #size-cells = <2>; diff --git a/trunk/arch/powerpc/boot/dts/p1020rdb_camp_core0.dts b/trunk/arch/powerpc/boot/dts/p1020rdb_camp_core0.dts new file mode 100644 index 000000000000..f0bf7f42f097 --- /dev/null +++ b/trunk/arch/powerpc/boot/dts/p1020rdb_camp_core0.dts @@ -0,0 +1,213 @@ +/* + * P1020 RDB Core0 Device Tree Source in CAMP mode. + * + * In CAMP mode, each core needs to have its own dts. Only mpic and L2 cache + * can be shared, all the other devices must be assigned to one core only. + * This dts file allows core0 to have memory, l2, i2c, spi, gpio, tdm, dma, usb, + * eth1, eth2, sdhc, crypto, global-util, message, pci0, pci1, msi. + * + * Please note to add "-b 0" for core0's dts compiling. + * + * Copyright 2011 Freescale Semiconductor Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +/include/ "p1020si.dtsi" + +/ { + model = "fsl,P1020RDB"; + compatible = "fsl,P1020RDB", "fsl,MPC85XXRDB-CAMP"; + + aliases { + ethernet1 = &enet1; + ethernet2 = &enet2; + serial0 = &serial0; + pci0 = &pci0; + pci1 = &pci1; + }; + + cpus { + PowerPC,P1020@1 { + status = "disabled"; + }; + }; + + memory { + device_type = "memory"; + }; + + localbus@ffe05000 { + status = "disabled"; + }; + + soc@ffe00000 { + i2c@3000 { + rtc@68 { + compatible = "dallas,ds1339"; + reg = <0x68>; + }; + }; + + serial1: serial@4600 { + status = "disabled"; + }; + + spi@7000 { + fsl_m25p80@0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,espi-flash"; + reg = <0>; + linux,modalias = "fsl_m25p80"; + spi-max-frequency = <40000000>; + + partition@0 { + /* 512KB for u-boot Bootloader Image */ + reg = <0x0 0x00080000>; + label = "SPI (RO) U-Boot Image"; + read-only; + }; + + partition@80000 { + /* 512KB for DTB Image */ + reg = <0x00080000 0x00080000>; + label = "SPI (RO) DTB Image"; + read-only; + }; + + partition@100000 { + /* 4MB for Linux Kernel Image */ + reg = <0x00100000 0x00400000>; + label = "SPI (RO) Linux Kernel Image"; + read-only; + }; + + partition@500000 { + /* 4MB for Compressed RFS Image */ + reg = <0x00500000 0x00400000>; + label = "SPI (RO) Compressed RFS Image"; + read-only; + }; + + partition@900000 { + /* 7MB for JFFS2 based RFS */ + reg = <0x00900000 0x00700000>; + label = "SPI (RW) JFFS2 RFS"; + }; + }; + }; + + mdio@24000 { + phy0: ethernet-phy@0 { + interrupt-parent = <&mpic>; + interrupts = <3 1>; + reg = <0x0>; + }; + phy1: ethernet-phy@1 { + interrupt-parent = <&mpic>; + interrupts = <2 1>; + reg = <0x1>; + }; + }; + + mdio@25000 { + tbi0: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; + }; + + enet0: ethernet@b0000 { + status = "disabled"; + }; + + enet1: ethernet@b1000 { + phy-handle = <&phy0>; + tbi-handle = <&tbi0>; + phy-connection-type = "sgmii"; + }; + + enet2: ethernet@b2000 { + phy-handle = <&phy1>; + phy-connection-type = "rgmii-id"; + }; + + usb@22000 { + phy_type = "ulpi"; + }; + + /* USB2 is shared with localbus, so it must be disabled + by default. We can't put 'status = "disabled";' here + since U-Boot doesn't clear the status property when + it enables USB2. OTOH, U-Boot does create a new node + when there isn't any. So, just comment it out. + usb@23000 { + phy_type = "ulpi"; + }; + */ + + mpic: pic@40000 { + protected-sources = < + 42 29 30 34 /* serial1, enet0-queue-group0 */ + 17 18 24 45 /* enet0-queue-group1, crypto */ + >; + }; + + }; + + pci0: pcie@ffe09000 { + ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000 + 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>; + interrupt-map-mask = <0xf800 0x0 0x0 0x7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0x0 0x0 0x1 &mpic 0x4 0x1 + 0000 0x0 0x0 0x2 &mpic 0x5 0x1 + 0000 0x0 0x0 0x3 &mpic 0x6 0x1 + 0000 0x0 0x0 0x4 &mpic 0x7 0x1 + >; + pcie@0 { + reg = <0x0 0x0 0x0 0x0 0x0>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; + ranges = <0x2000000 0x0 0xa0000000 + 0x2000000 0x0 0xa0000000 + 0x0 0x20000000 + + 0x1000000 0x0 0x0 + 0x1000000 0x0 0x0 + 0x0 0x100000>; + }; + }; + + pci1: pcie@ffe0a000 { + ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000 + 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>; + interrupt-map-mask = <0xf800 0x0 0x0 0x7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0x0 0x0 0x1 &mpic 0x0 0x1 + 0000 0x0 0x0 0x2 &mpic 0x1 0x1 + 0000 0x0 0x0 0x3 &mpic 0x2 0x1 + 0000 0x0 0x0 0x4 &mpic 0x3 0x1 + >; + pcie@0 { + reg = <0x0 0x0 0x0 0x0 0x0>; + #size-cells = <2>; + #address-cells = <3>; + device_type = "pci"; + ranges = <0x2000000 0x0 0x80000000 + 0x2000000 0x0 0x80000000 + 0x0 0x20000000 + + 0x1000000 0x0 0x0 + 0x1000000 0x0 0x0 + 0x0 0x100000>; + }; + }; +}; diff --git a/trunk/arch/powerpc/boot/dts/p1020rdb_camp_core1.dts b/trunk/arch/powerpc/boot/dts/p1020rdb_camp_core1.dts new file mode 100644 index 000000000000..6ec02204a44e --- /dev/null +++ b/trunk/arch/powerpc/boot/dts/p1020rdb_camp_core1.dts @@ -0,0 +1,148 @@ +/* + * P1020 RDB Core1 Device Tree Source in CAMP mode. + * + * In CAMP mode, each core needs to have its own dts. Only mpic and L2 cache + * can be shared, all the other devices must be assigned to one core only. + * This dts allows core1 to have l2, eth0, crypto. + * + * Please note to add "-b 1" for core1's dts compiling. + * + * Copyright 2011 Freescale Semiconductor Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +/include/ "p1020si.dtsi" + +/ { + model = "fsl,P1020RDB"; + compatible = "fsl,P1020RDB", "fsl,MPC85XXRDB-CAMP"; + + aliases { + ethernet0 = &enet0; + serial0 = &serial1; + }; + + cpus { + PowerPC,P1020@0 { + status = "disabled"; + }; + }; + + memory { + device_type = "memory"; + }; + + localbus@ffe05000 { + status = "disabled"; + }; + + soc@ffe00000 { + ecm-law@0 { + status = "disabled"; + }; + + ecm@1000 { + status = "disabled"; + }; + + memory-controller@2000 { + status = "disabled"; + }; + + i2c@3000 { + status = "disabled"; + }; + + i2c@3100 { + status = "disabled"; + }; + + serial0: serial@4500 { + status = "disabled"; + }; + + spi@7000 { + status = "disabled"; + }; + + gpio: gpio-controller@f000 { + status = "disabled"; + }; + + dma@21300 { + status = "disabled"; + }; + + mdio@24000 { + status = "disabled"; + }; + + mdio@25000 { + status = "disabled"; + }; + + enet0: ethernet@b0000 { + fixed-link = <1 1 1000 0 0>; + phy-connection-type = "rgmii-id"; + + }; + + enet1: ethernet@b1000 { + status = "disabled"; + }; + + enet2: ethernet@b2000 { + status = "disabled"; + }; + + usb@22000 { + status = "disabled"; + }; + + sdhci@2e000 { + status = "disabled"; + }; + + mpic: pic@40000 { + protected-sources = < + 16 /* ecm, mem, L2, pci0, pci1 */ + 43 42 59 /* i2c, serial0, spi */ + 47 63 62 /* gpio, tdm */ + 20 21 22 23 /* dma */ + 03 02 /* mdio */ + 35 36 40 /* enet1-queue-group0 */ + 51 52 67 /* enet1-queue-group1 */ + 31 32 33 /* enet2-queue-group0 */ + 25 26 27 /* enet2-queue-group1 */ + 28 72 58 /* usb, sdhci, crypto */ + 0xb0 0xb1 0xb2 /* message */ + 0xb3 0xb4 0xb5 + 0xb6 0xb7 + 0xe0 0xe1 0xe2 /* msi */ + 0xe3 0xe4 0xe5 + 0xe6 0xe7 /* sdhci, crypto , pci */ + >; + }; + + msi@41600 { + status = "disabled"; + }; + + global-utilities@e0000 { //global utilities block + status = "disabled"; + }; + + }; + + pci0: pcie@ffe09000 { + status = "disabled"; + }; + + pci1: pcie@ffe0a000 { + status = "disabled"; + }; +}; diff --git a/trunk/arch/powerpc/boot/dts/p1020si.dtsi b/trunk/arch/powerpc/boot/dts/p1020si.dtsi new file mode 100644 index 000000000000..5c5acb66c3fc --- /dev/null +++ b/trunk/arch/powerpc/boot/dts/p1020si.dtsi @@ -0,0 +1,377 @@ +/* + * P1020si Device Tree Source + * + * Copyright 2011 Freescale Semiconductor Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +/dts-v1/; +/ { + compatible = "fsl,P1020"; + #address-cells = <2>; + #size-cells = <2>; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + + PowerPC,P1020@0 { + device_type = "cpu"; + reg = <0x0>; + next-level-cache = <&L2>; + }; + + PowerPC,P1020@1 { + device_type = "cpu"; + reg = <0x1>; + next-level-cache = <&L2>; + }; + }; + + localbus@ffe05000 { + #address-cells = <2>; + #size-cells = <1>; + compatible = "fsl,p1020-elbc", "fsl,elbc", "simple-bus"; + reg = <0 0xffe05000 0 0x1000>; + interrupts = <19 2>; + interrupt-parent = <&mpic>; + }; + + soc@ffe00000 { + #address-cells = <1>; + #size-cells = <1>; + device_type = "soc"; + compatible = "fsl,p1020-immr", "simple-bus"; + ranges = <0x0 0x0 0xffe00000 0x100000>; + bus-frequency = <0>; // Filled out by uboot. + + ecm-law@0 { + compatible = "fsl,ecm-law"; + reg = <0x0 0x1000>; + fsl,num-laws = <12>; + }; + + ecm@1000 { + compatible = "fsl,p1020-ecm", "fsl,ecm"; + reg = <0x1000 0x1000>; + interrupts = <16 2>; + interrupt-parent = <&mpic>; + }; + + memory-controller@2000 { + compatible = "fsl,p1020-memory-controller"; + reg = <0x2000 0x1000>; + interrupt-parent = <&mpic>; + interrupts = <16 2>; + }; + + i2c@3000 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <0>; + compatible = "fsl-i2c"; + reg = <0x3000 0x100>; + interrupts = <43 2>; + interrupt-parent = <&mpic>; + dfsrr; + }; + + i2c@3100 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <1>; + compatible = "fsl-i2c"; + reg = <0x3100 0x100>; + interrupts = <43 2>; + interrupt-parent = <&mpic>; + dfsrr; + }; + + serial0: serial@4500 { + cell-index = <0>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x4500 0x100>; + clock-frequency = <0>; + interrupts = <42 2>; + interrupt-parent = <&mpic>; + }; + + serial1: serial@4600 { + cell-index = <1>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x4600 0x100>; + clock-frequency = <0>; + interrupts = <42 2>; + interrupt-parent = <&mpic>; + }; + + spi@7000 { + cell-index = <0>; + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,espi"; + reg = <0x7000 0x1000>; + interrupts = <59 0x2>; + interrupt-parent = <&mpic>; + mode = "cpu"; + }; + + gpio: gpio-controller@f000 { + #gpio-cells = <2>; + compatible = "fsl,mpc8572-gpio"; + reg = <0xf000 0x100>; + interrupts = <47 0x2>; + interrupt-parent = <&mpic>; + gpio-controller; + }; + + L2: l2-cache-controller@20000 { + compatible = "fsl,p1020-l2-cache-controller"; + reg = <0x20000 0x1000>; + cache-line-size = <32>; // 32 bytes + cache-size = <0x40000>; // L2,256K + interrupt-parent = <&mpic>; + interrupts = <16 2>; + }; + + dma@21300 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,eloplus-dma"; + reg = <0x21300 0x4>; + ranges = <0x0 0x21100 0x200>; + cell-index = <0>; + dma-channel@0 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x0 0x80>; + cell-index = <0>; + interrupt-parent = <&mpic>; + interrupts = <20 2>; + }; + dma-channel@80 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x80 0x80>; + cell-index = <1>; + interrupt-parent = <&mpic>; + interrupts = <21 2>; + }; + dma-channel@100 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x100 0x80>; + cell-index = <2>; + interrupt-parent = <&mpic>; + interrupts = <22 2>; + }; + dma-channel@180 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x180 0x80>; + cell-index = <3>; + interrupt-parent = <&mpic>; + interrupts = <23 2>; + }; + }; + + mdio@24000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,etsec2-mdio"; + reg = <0x24000 0x1000 0xb0030 0x4>; + + }; + + mdio@25000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,etsec2-tbi"; + reg = <0x25000 0x1000 0xb1030 0x4>; + + }; + + enet0: ethernet@b0000 { + #address-cells = <1>; + #size-cells = <1>; + device_type = "network"; + model = "eTSEC"; + compatible = "fsl,etsec2"; + fsl,num_rx_queues = <0x8>; + fsl,num_tx_queues = <0x8>; + local-mac-address = [ 00 00 00 00 00 00 ]; + interrupt-parent = <&mpic>; + + queue-group@0 { + #address-cells = <1>; + #size-cells = <1>; + reg = <0xb0000 0x1000>; + interrupts = <29 2 30 2 34 2>; + }; + + queue-group@1 { + #address-cells = <1>; + #size-cells = <1>; + reg = <0xb4000 0x1000>; + interrupts = <17 2 18 2 24 2>; + }; + }; + + enet1: ethernet@b1000 { + #address-cells = <1>; + #size-cells = <1>; + device_type = "network"; + model = "eTSEC"; + compatible = "fsl,etsec2"; + fsl,num_rx_queues = <0x8>; + fsl,num_tx_queues = <0x8>; + local-mac-address = [ 00 00 00 00 00 00 ]; + interrupt-parent = <&mpic>; + + queue-group@0 { + #address-cells = <1>; + #size-cells = <1>; + reg = <0xb1000 0x1000>; + interrupts = <35 2 36 2 40 2>; + }; + + queue-group@1 { + #address-cells = <1>; + #size-cells = <1>; + reg = <0xb5000 0x1000>; + interrupts = <51 2 52 2 67 2>; + }; + }; + + enet2: ethernet@b2000 { + #address-cells = <1>; + #size-cells = <1>; + device_type = "network"; + model = "eTSEC"; + compatible = "fsl,etsec2"; + fsl,num_rx_queues = <0x8>; + fsl,num_tx_queues = <0x8>; + local-mac-address = [ 00 00 00 00 00 00 ]; + interrupt-parent = <&mpic>; + + queue-group@0 { + #address-cells = <1>; + #size-cells = <1>; + reg = <0xb2000 0x1000>; + interrupts = <31 2 32 2 33 2>; + }; + + queue-group@1 { + #address-cells = <1>; + #size-cells = <1>; + reg = <0xb6000 0x1000>; + interrupts = <25 2 26 2 27 2>; + }; + }; + + usb@22000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl-usb2-dr"; + reg = <0x22000 0x1000>; + interrupt-parent = <&mpic>; + interrupts = <28 0x2>; + }; + + /* USB2 is shared with localbus, so it must be disabled + by default. We can't put 'status = "disabled";' here + since U-Boot doesn't clear the status property when + it enables USB2. OTOH, U-Boot does create a new node + when there isn't any. So, just comment it out. + usb@23000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl-usb2-dr"; + reg = <0x23000 0x1000>; + interrupt-parent = <&mpic>; + interrupts = <46 0x2>; + phy_type = "ulpi"; + }; + */ + + sdhci@2e000 { + compatible = "fsl,p1020-esdhc", "fsl,esdhc"; + reg = <0x2e000 0x1000>; + interrupts = <72 0x2>; + interrupt-parent = <&mpic>; + /* Filled in by U-Boot */ + clock-frequency = <0>; + }; + + crypto@30000 { + compatible = "fsl,sec3.1", "fsl,sec3.0", "fsl,sec2.4", + "fsl,sec2.2", "fsl,sec2.1", "fsl,sec2.0"; + reg = <0x30000 0x10000>; + interrupts = <45 2 58 2>; + interrupt-parent = <&mpic>; + fsl,num-channels = <4>; + fsl,channel-fifo-len = <24>; + fsl,exec-units-mask = <0xbfe>; + fsl,descriptor-types-mask = <0x3ab0ebf>; + }; + + mpic: pic@40000 { + interrupt-controller; + #address-cells = <0>; + #interrupt-cells = <2>; + reg = <0x40000 0x40000>; + compatible = "chrp,open-pic"; + device_type = "open-pic"; + }; + + msi@41600 { + compatible = "fsl,p1020-msi", "fsl,mpic-msi"; + reg = <0x41600 0x80>; + msi-available-ranges = <0 0x100>; + interrupts = < + 0xe0 0 + 0xe1 0 + 0xe2 0 + 0xe3 0 + 0xe4 0 + 0xe5 0 + 0xe6 0 + 0xe7 0>; + interrupt-parent = <&mpic>; + }; + + global-utilities@e0000 { //global utilities block + compatible = "fsl,p1020-guts","fsl,p2020-guts"; + reg = <0xe0000 0x1000>; + fsl,has-rstcr; + }; + }; + + pci0: pcie@ffe09000 { + compatible = "fsl,mpc8548-pcie"; + device_type = "pci"; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + reg = <0 0xffe09000 0 0x1000>; + bus-range = <0 255>; + clock-frequency = <33333333>; + interrupt-parent = <&mpic>; + interrupts = <16 2>; + }; + + pci1: pcie@ffe0a000 { + compatible = "fsl,mpc8548-pcie"; + device_type = "pci"; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + reg = <0 0xffe0a000 0 0x1000>; + bus-range = <0 255>; + clock-frequency = <33333333>; + interrupt-parent = <&mpic>; + interrupts = <16 2>; + }; +}; diff --git a/trunk/arch/powerpc/boot/dts/p1022ds.dts b/trunk/arch/powerpc/boot/dts/p1022ds.dts index 59ef405c1c91..4f685a779f4c 100644 --- a/trunk/arch/powerpc/boot/dts/p1022ds.dts +++ b/trunk/arch/powerpc/boot/dts/p1022ds.dts @@ -52,7 +52,7 @@ #size-cells = <1>; compatible = "fsl,p1022-elbc", "fsl,elbc", "simple-bus"; reg = <0 0xffe05000 0 0x1000>; - interrupts = <19 2>; + interrupts = <19 2 0 0>; ranges = <0x0 0x0 0xf 0xe8000000 0x08000000 0x1 0x0 0xf 0xe0000000 0x08000000 @@ -157,7 +157,7 @@ * IRQ8 is generated if the "EVENT" switch is pressed * and PX_CTL[EVESEL] is set to 00. */ - interrupts = <8 8>; + interrupts = <8 8 0 0>; }; }; @@ -178,13 +178,13 @@ ecm@1000 { compatible = "fsl,p1022-ecm", "fsl,ecm"; reg = <0x1000 0x1000>; - interrupts = <16 2>; + interrupts = <16 2 0 0>; }; memory-controller@2000 { compatible = "fsl,p1022-memory-controller"; reg = <0x2000 0x1000>; - interrupts = <16 2>; + interrupts = <16 2 0 0>; }; i2c@3000 { @@ -193,7 +193,7 @@ cell-index = <0>; compatible = "fsl-i2c"; reg = <0x3000 0x100>; - interrupts = <43 2>; + interrupts = <43 2 0 0>; dfsrr; }; @@ -203,7 +203,7 @@ cell-index = <1>; compatible = "fsl-i2c"; reg = <0x3100 0x100>; - interrupts = <43 2>; + interrupts = <43 2 0 0>; dfsrr; wm8776:codec@1a { @@ -220,7 +220,7 @@ compatible = "ns16550"; reg = <0x4500 0x100>; clock-frequency = <0>; - interrupts = <42 2>; + interrupts = <42 2 0 0>; }; serial1: serial@4600 { @@ -229,7 +229,7 @@ compatible = "ns16550"; reg = <0x4600 0x100>; clock-frequency = <0>; - interrupts = <42 2>; + interrupts = <42 2 0 0>; }; spi@7000 { @@ -238,7 +238,7 @@ #size-cells = <0>; compatible = "fsl,espi"; reg = <0x7000 0x1000>; - interrupts = <59 0x2>; + interrupts = <59 0x2 0 0>; espi,num-ss-bits = <4>; mode = "cpu"; @@ -275,7 +275,7 @@ compatible = "fsl,mpc8610-ssi"; cell-index = <0>; reg = <0x15000 0x100>; - interrupts = <75 2>; + interrupts = <75 2 0 0>; fsl,mode = "i2s-slave"; codec-handle = <&wm8776>; fsl,playback-dma = <&dma00>; @@ -294,25 +294,25 @@ compatible = "fsl,ssi-dma-channel"; reg = <0x0 0x80>; cell-index = <0>; - interrupts = <76 2>; + interrupts = <76 2 0 0>; }; dma01: dma-channel@80 { compatible = "fsl,ssi-dma-channel"; reg = <0x80 0x80>; cell-index = <1>; - interrupts = <77 2>; + interrupts = <77 2 0 0>; }; dma-channel@100 { compatible = "fsl,eloplus-dma-channel"; reg = <0x100 0x80>; cell-index = <2>; - interrupts = <78 2>; + interrupts = <78 2 0 0>; }; dma-channel@180 { compatible = "fsl,eloplus-dma-channel"; reg = <0x180 0x80>; cell-index = <3>; - interrupts = <79 2>; + interrupts = <79 2 0 0>; }; }; @@ -320,7 +320,7 @@ #gpio-cells = <2>; compatible = "fsl,mpc8572-gpio"; reg = <0xf000 0x100>; - interrupts = <47 0x2>; + interrupts = <47 0x2 0 0>; gpio-controller; }; @@ -329,7 +329,7 @@ reg = <0x20000 0x1000>; cache-line-size = <32>; // 32 bytes cache-size = <0x40000>; // L2, 256K - interrupts = <16 2>; + interrupts = <16 2 0 0>; }; dma@21300 { @@ -343,25 +343,25 @@ compatible = "fsl,eloplus-dma-channel"; reg = <0x0 0x80>; cell-index = <0>; - interrupts = <20 2>; + interrupts = <20 2 0 0>; }; dma-channel@80 { compatible = "fsl,eloplus-dma-channel"; reg = <0x80 0x80>; cell-index = <1>; - interrupts = <21 2>; + interrupts = <21 2 0 0>; }; dma-channel@100 { compatible = "fsl,eloplus-dma-channel"; reg = <0x100 0x80>; cell-index = <2>; - interrupts = <22 2>; + interrupts = <22 2 0 0>; }; dma-channel@180 { compatible = "fsl,eloplus-dma-channel"; reg = <0x180 0x80>; cell-index = <3>; - interrupts = <23 2>; + interrupts = <23 2 0 0>; }; }; @@ -370,7 +370,7 @@ #size-cells = <0>; compatible = "fsl-usb2-dr"; reg = <0x22000 0x1000>; - interrupts = <28 0x2>; + interrupts = <28 0x2 0 0>; phy_type = "ulpi"; }; @@ -381,11 +381,11 @@ reg = <0x24000 0x1000 0xb0030 0x4>; phy0: ethernet-phy@0 { - interrupts = <3 1>; + interrupts = <3 1 0 0>; reg = <0x1>; }; phy1: ethernet-phy@1 { - interrupts = <9 1>; + interrupts = <9 1 0 0>; reg = <0x2>; }; }; @@ -416,13 +416,13 @@ #address-cells = <1>; #size-cells = <1>; reg = <0xB0000 0x1000>; - interrupts = <29 2 30 2 34 2>; + interrupts = <29 2 0 0 30 2 0 0 34 2 0 0>; }; queue-group@1{ #address-cells = <1>; #size-cells = <1>; reg = <0xB4000 0x1000>; - interrupts = <17 2 18 2 24 2>; + interrupts = <17 2 0 0 18 2 0 0 24 2 0 0>; }; }; @@ -443,20 +443,20 @@ #address-cells = <1>; #size-cells = <1>; reg = <0xB1000 0x1000>; - interrupts = <35 2 36 2 40 2>; + interrupts = <35 2 0 0 36 2 0 0 40 2 0 0>; }; queue-group@1{ #address-cells = <1>; #size-cells = <1>; reg = <0xB5000 0x1000>; - interrupts = <51 2 52 2 67 2>; + interrupts = <51 2 0 0 52 2 0 0 67 2 0 0>; }; }; sdhci@2e000 { compatible = "fsl,p1022-esdhc", "fsl,esdhc"; reg = <0x2e000 0x1000>; - interrupts = <72 0x2>; + interrupts = <72 0x2 0 0>; fsl,sdhci-auto-cmd12; /* Filled in by U-Boot */ clock-frequency = <0>; @@ -467,7 +467,7 @@ "fsl,sec2.4", "fsl,sec2.2", "fsl,sec2.1", "fsl,sec2.0"; reg = <0x30000 0x10000>; - interrupts = <45 2 58 2>; + interrupts = <45 2 0 0 58 2 0 0>; fsl,num-channels = <4>; fsl,channel-fifo-len = <24>; fsl,exec-units-mask = <0x97c>; @@ -478,14 +478,14 @@ compatible = "fsl,p1022-sata", "fsl,pq-sata-v2"; reg = <0x18000 0x1000>; cell-index = <1>; - interrupts = <74 0x2>; + interrupts = <74 0x2 0 0>; }; sata@19000 { compatible = "fsl,p1022-sata", "fsl,pq-sata-v2"; reg = <0x19000 0x1000>; cell-index = <2>; - interrupts = <41 0x2>; + interrupts = <41 0x2 0 0>; }; power@e0070{ @@ -496,21 +496,33 @@ display@10000 { compatible = "fsl,diu", "fsl,p1022-diu"; reg = <0x10000 1000>; - interrupts = <64 2>; + interrupts = <64 2 0 0>; }; timer@41100 { compatible = "fsl,mpic-global-timer"; - reg = <0x41100 0x204>; - interrupts = <0xf7 0x2>; + reg = <0x41100 0x100 0x41300 4>; + interrupts = <0 0 3 0 + 1 0 3 0 + 2 0 3 0 + 3 0 3 0>; + }; + + timer@42100 { + compatible = "fsl,mpic-global-timer"; + reg = <0x42100 0x100 0x42300 4>; + interrupts = <4 0 3 0 + 5 0 3 0 + 6 0 3 0 + 7 0 3 0>; }; mpic: pic@40000 { interrupt-controller; #address-cells = <0>; - #interrupt-cells = <2>; + #interrupt-cells = <4>; reg = <0x40000 0x40000>; - compatible = "chrp,open-pic"; + compatible = "fsl,mpic"; device_type = "open-pic"; }; @@ -519,14 +531,14 @@ reg = <0x41600 0x80>; msi-available-ranges = <0 0x100>; interrupts = < - 0xe0 0 - 0xe1 0 - 0xe2 0 - 0xe3 0 - 0xe4 0 - 0xe5 0 - 0xe6 0 - 0xe7 0>; + 0xe0 0 0 0 + 0xe1 0 0 0 + 0xe2 0 0 0 + 0xe3 0 0 0 + 0xe4 0 0 0 + 0xe5 0 0 0 + 0xe6 0 0 0 + 0xe7 0 0 0>; }; global-utilities@e0000 { //global utilities block @@ -547,7 +559,7 @@ ranges = <0x2000000 0x0 0xa0000000 0xc 0x20000000 0x0 0x20000000 0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x10000>; clock-frequency = <33333333>; - interrupts = <16 2>; + interrupts = <16 2 0 0>; interrupt-map-mask = <0xf800 0 0 7>; interrupt-map = < /* IDSEL 0x0 */ @@ -582,7 +594,7 @@ ranges = <0x2000000 0x0 0xc0000000 0xc 0x40000000 0x0 0x20000000 0x1000000 0x0 0x00000000 0xf 0xffc20000 0x0 0x10000>; clock-frequency = <33333333>; - interrupts = <16 2>; + interrupts = <16 2 0 0>; interrupt-map-mask = <0xf800 0 0 7>; interrupt-map = < /* IDSEL 0x0 */ @@ -618,7 +630,7 @@ ranges = <0x2000000 0x0 0x80000000 0xc 0x00000000 0x0 0x20000000 0x1000000 0x0 0x00000000 0xf 0xffc00000 0x0 0x10000>; clock-frequency = <33333333>; - interrupts = <16 2>; + interrupts = <16 2 0 0>; interrupt-map-mask = <0xf800 0 0 7>; interrupt-map = < /* IDSEL 0x0 */ diff --git a/trunk/arch/powerpc/boot/dts/p2020ds.dts b/trunk/arch/powerpc/boot/dts/p2020ds.dts index 11019142813c..2bcf3683d223 100644 --- a/trunk/arch/powerpc/boot/dts/p2020ds.dts +++ b/trunk/arch/powerpc/boot/dts/p2020ds.dts @@ -1,7 +1,7 @@ /* * P2020 DS Device Tree Source * - * Copyright 2009 Freescale Semiconductor Inc. + * Copyright 2009-2011 Freescale Semiconductor Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -9,12 +9,11 @@ * option) any later version. */ -/dts-v1/; +/include/ "p2020si.dtsi" + / { - model = "fsl,P2020"; + model = "fsl,P2020DS"; compatible = "fsl,P2020DS"; - #address-cells = <2>; - #size-cells = <2>; aliases { ethernet0 = &enet0; @@ -27,35 +26,13 @@ pci2 = &pci2; }; - cpus { - #address-cells = <1>; - #size-cells = <0>; - - PowerPC,P2020@0 { - device_type = "cpu"; - reg = <0x0>; - next-level-cache = <&L2>; - }; - - PowerPC,P2020@1 { - device_type = "cpu"; - reg = <0x1>; - next-level-cache = <&L2>; - }; - }; memory { device_type = "memory"; }; localbus@ffe05000 { - #address-cells = <2>; - #size-cells = <1>; compatible = "fsl,elbc", "simple-bus"; - reg = <0 0xffe05000 0 0x1000>; - interrupts = <19 2>; - interrupt-parent = <&mpic>; - ranges = <0x0 0x0 0x0 0xe8000000 0x08000000 0x1 0x0 0x0 0xe0000000 0x08000000 0x2 0x0 0x0 0xffa00000 0x00040000 @@ -158,352 +135,77 @@ }; soc@ffe00000 { - #address-cells = <1>; - #size-cells = <1>; - device_type = "soc"; - compatible = "fsl,p2020-immr", "simple-bus"; - ranges = <0x0 0 0xffe00000 0x100000>; - bus-frequency = <0>; // Filled out by uboot. - - ecm-law@0 { - compatible = "fsl,ecm-law"; - reg = <0x0 0x1000>; - fsl,num-laws = <12>; - }; - - ecm@1000 { - compatible = "fsl,p2020-ecm", "fsl,ecm"; - reg = <0x1000 0x1000>; - interrupts = <17 2>; - interrupt-parent = <&mpic>; - }; - - memory-controller@2000 { - compatible = "fsl,p2020-memory-controller"; - reg = <0x2000 0x1000>; - interrupt-parent = <&mpic>; - interrupts = <18 2>; - }; - - i2c@3000 { - #address-cells = <1>; - #size-cells = <0>; - cell-index = <0>; - compatible = "fsl-i2c"; - reg = <0x3000 0x100>; - interrupts = <43 2>; - interrupt-parent = <&mpic>; - dfsrr; - }; - - i2c@3100 { - #address-cells = <1>; - #size-cells = <0>; - cell-index = <1>; - compatible = "fsl-i2c"; - reg = <0x3100 0x100>; - interrupts = <43 2>; - interrupt-parent = <&mpic>; - dfsrr; - }; - serial0: serial@4500 { - cell-index = <0>; - device_type = "serial"; - compatible = "ns16550"; - reg = <0x4500 0x100>; - clock-frequency = <0>; - interrupts = <42 2>; - interrupt-parent = <&mpic>; - }; - - serial1: serial@4600 { - cell-index = <1>; - device_type = "serial"; - compatible = "ns16550"; - reg = <0x4600 0x100>; - clock-frequency = <0>; - interrupts = <42 2>; - interrupt-parent = <&mpic>; - }; - - spi@7000 { - compatible = "fsl,espi"; - reg = <0x7000 0x1000>; - interrupts = <59 0x2>; - interrupt-parent = <&mpic>; + usb@22000 { + phy_type = "ulpi"; }; - dma@c300 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "fsl,eloplus-dma"; - reg = <0xc300 0x4>; - ranges = <0x0 0xc100 0x200>; - cell-index = <1>; - dma-channel@0 { - compatible = "fsl,eloplus-dma-channel"; - reg = <0x0 0x80>; - cell-index = <0>; + mdio@24520 { + phy0: ethernet-phy@0 { interrupt-parent = <&mpic>; - interrupts = <76 2>; + interrupts = <3 1>; + reg = <0x0>; }; - dma-channel@80 { - compatible = "fsl,eloplus-dma-channel"; - reg = <0x80 0x80>; - cell-index = <1>; + phy1: ethernet-phy@1 { interrupt-parent = <&mpic>; - interrupts = <77 2>; + interrupts = <3 1>; + reg = <0x1>; }; - dma-channel@100 { - compatible = "fsl,eloplus-dma-channel"; - reg = <0x100 0x80>; - cell-index = <2>; + phy2: ethernet-phy@2 { interrupt-parent = <&mpic>; - interrupts = <78 2>; + interrupts = <3 1>; + reg = <0x2>; }; - dma-channel@180 { - compatible = "fsl,eloplus-dma-channel"; - reg = <0x180 0x80>; - cell-index = <3>; - interrupt-parent = <&mpic>; - interrupts = <79 2>; + tbi0: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; }; - }; - gpio: gpio-controller@f000 { - #gpio-cells = <2>; - compatible = "fsl,mpc8572-gpio"; - reg = <0xf000 0x100>; - interrupts = <47 0x2>; - interrupt-parent = <&mpic>; - gpio-controller; }; - L2: l2-cache-controller@20000 { - compatible = "fsl,p2020-l2-cache-controller"; - reg = <0x20000 0x1000>; - cache-line-size = <32>; // 32 bytes - cache-size = <0x80000>; // L2, 512k - interrupt-parent = <&mpic>; - interrupts = <16 2>; + mdio@25520 { + tbi1: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; + }; }; - dma@21300 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "fsl,eloplus-dma"; - reg = <0x21300 0x4>; - ranges = <0x0 0x21100 0x200>; - cell-index = <0>; - dma-channel@0 { - compatible = "fsl,eloplus-dma-channel"; - reg = <0x0 0x80>; - cell-index = <0>; - interrupt-parent = <&mpic>; - interrupts = <20 2>; - }; - dma-channel@80 { - compatible = "fsl,eloplus-dma-channel"; - reg = <0x80 0x80>; - cell-index = <1>; - interrupt-parent = <&mpic>; - interrupts = <21 2>; + mdio@26520 { + tbi2: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; }; - dma-channel@100 { - compatible = "fsl,eloplus-dma-channel"; - reg = <0x100 0x80>; - cell-index = <2>; - interrupt-parent = <&mpic>; - interrupts = <22 2>; - }; - dma-channel@180 { - compatible = "fsl,eloplus-dma-channel"; - reg = <0x180 0x80>; - cell-index = <3>; - interrupt-parent = <&mpic>; - interrupts = <23 2>; - }; - }; - usb@22000 { - #address-cells = <1>; - #size-cells = <0>; - compatible = "fsl-usb2-dr"; - reg = <0x22000 0x1000>; - interrupt-parent = <&mpic>; - interrupts = <28 0x2>; - phy_type = "ulpi"; }; enet0: ethernet@24000 { - #address-cells = <1>; - #size-cells = <1>; - cell-index = <0>; - device_type = "network"; - model = "eTSEC"; - compatible = "gianfar"; - reg = <0x24000 0x1000>; - ranges = <0x0 0x24000 0x1000>; - local-mac-address = [ 00 00 00 00 00 00 ]; - interrupts = <29 2 30 2 34 2>; - interrupt-parent = <&mpic>; tbi-handle = <&tbi0>; phy-handle = <&phy0>; phy-connection-type = "rgmii-id"; - - mdio@520 { - #address-cells = <1>; - #size-cells = <0>; - compatible = "fsl,gianfar-mdio"; - reg = <0x520 0x20>; - - phy0: ethernet-phy@0 { - interrupt-parent = <&mpic>; - interrupts = <3 1>; - reg = <0x0>; - }; - phy1: ethernet-phy@1 { - interrupt-parent = <&mpic>; - interrupts = <3 1>; - reg = <0x1>; - }; - phy2: ethernet-phy@2 { - interrupt-parent = <&mpic>; - interrupts = <3 1>; - reg = <0x2>; - }; - tbi0: tbi-phy@11 { - reg = <0x11>; - device_type = "tbi-phy"; - }; - }; }; enet1: ethernet@25000 { - #address-cells = <1>; - #size-cells = <1>; - cell-index = <1>; - device_type = "network"; - model = "eTSEC"; - compatible = "gianfar"; - reg = <0x25000 0x1000>; - ranges = <0x0 0x25000 0x1000>; - local-mac-address = [ 00 00 00 00 00 00 ]; - interrupts = <35 2 36 2 40 2>; - interrupt-parent = <&mpic>; tbi-handle = <&tbi1>; phy-handle = <&phy1>; phy-connection-type = "rgmii-id"; - mdio@520 { - #address-cells = <1>; - #size-cells = <0>; - compatible = "fsl,gianfar-tbi"; - reg = <0x520 0x20>; - - tbi1: tbi-phy@11 { - reg = <0x11>; - device_type = "tbi-phy"; - }; - }; }; enet2: ethernet@26000 { - #address-cells = <1>; - #size-cells = <1>; - cell-index = <2>; - device_type = "network"; - model = "eTSEC"; - compatible = "gianfar"; - reg = <0x26000 0x1000>; - ranges = <0x0 0x26000 0x1000>; - local-mac-address = [ 00 00 00 00 00 00 ]; - interrupts = <31 2 32 2 33 2>; - interrupt-parent = <&mpic>; tbi-handle = <&tbi2>; phy-handle = <&phy2>; phy-connection-type = "rgmii-id"; - - mdio@520 { - #address-cells = <1>; - #size-cells = <0>; - compatible = "fsl,gianfar-tbi"; - reg = <0x520 0x20>; - - tbi2: tbi-phy@11 { - reg = <0x11>; - device_type = "tbi-phy"; - }; - }; - }; - - sdhci@2e000 { - compatible = "fsl,p2020-esdhc", "fsl,esdhc"; - reg = <0x2e000 0x1000>; - interrupts = <72 0x2>; - interrupt-parent = <&mpic>; - /* Filled in by U-Boot */ - clock-frequency = <0>; - }; - - crypto@30000 { - compatible = "fsl,sec3.1", "fsl,sec3.0", "fsl,sec2.4", - "fsl,sec2.2", "fsl,sec2.1", "fsl,sec2.0"; - reg = <0x30000 0x10000>; - interrupts = <45 2 58 2>; - interrupt-parent = <&mpic>; - fsl,num-channels = <4>; - fsl,channel-fifo-len = <24>; - fsl,exec-units-mask = <0xbfe>; - fsl,descriptor-types-mask = <0x3ab0ebf>; }; - mpic: pic@40000 { - interrupt-controller; - #address-cells = <0>; - #interrupt-cells = <2>; - reg = <0x40000 0x40000>; - compatible = "chrp,open-pic"; - device_type = "open-pic"; - }; msi@41600 { compatible = "fsl,mpic-msi"; - reg = <0x41600 0x80>; - msi-available-ranges = <0 0x100>; - interrupts = < - 0xe0 0 - 0xe1 0 - 0xe2 0 - 0xe3 0 - 0xe4 0 - 0xe5 0 - 0xe6 0 - 0xe7 0>; - interrupt-parent = <&mpic>; - }; - - global-utilities@e0000 { //global utilities block - compatible = "fsl,p2020-guts"; - reg = <0xe0000 0x1000>; - fsl,has-rstcr; }; }; pci0: pcie@ffe08000 { - compatible = "fsl,mpc8548-pcie"; - device_type = "pci"; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - reg = <0 0xffe08000 0 0x1000>; - bus-range = <0 255>; ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>; - clock-frequency = <33333333>; - interrupt-parent = <&mpic>; - interrupts = <24 2>; interrupt-map-mask = <0xf800 0x0 0x0 0x7>; interrupt-map = < /* IDSEL 0x0 */ @@ -528,18 +230,8 @@ }; pci1: pcie@ffe09000 { - compatible = "fsl,mpc8548-pcie"; - device_type = "pci"; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - reg = <0 0xffe09000 0 0x1000>; - bus-range = <0 255>; ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>; - clock-frequency = <33333333>; - interrupt-parent = <&mpic>; - interrupts = <25 2>; interrupt-map-mask = <0xff00 0x0 0x0 0x7>; interrupt-map = < @@ -667,18 +359,8 @@ }; pci2: pcie@ffe0a000 { - compatible = "fsl,mpc8548-pcie"; - device_type = "pci"; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - reg = <0 0xffe0a000 0 0x1000>; - bus-range = <0 255>; ranges = <0x2000000 0x0 0xc0000000 0 0xc0000000 0x0 0x20000000 0x1000000 0x0 0x00000000 0 0xffc20000 0x0 0x10000>; - clock-frequency = <33333333>; - interrupt-parent = <&mpic>; - interrupts = <26 2>; interrupt-map-mask = <0xf800 0x0 0x0 0x7>; interrupt-map = < /* IDSEL 0x0 */ diff --git a/trunk/arch/powerpc/boot/dts/p2020rdb.dts b/trunk/arch/powerpc/boot/dts/p2020rdb.dts index e2d48fd4416e..3782a58f13be 100644 --- a/trunk/arch/powerpc/boot/dts/p2020rdb.dts +++ b/trunk/arch/powerpc/boot/dts/p2020rdb.dts @@ -9,12 +9,11 @@ * option) any later version. */ -/dts-v1/; +/include/ "p2020si.dtsi" + / { - model = "fsl,P2020"; + model = "fsl,P2020RDB"; compatible = "fsl,P2020RDB"; - #address-cells = <2>; - #size-cells = <2>; aliases { ethernet0 = &enet0; @@ -26,34 +25,11 @@ pci1 = &pci1; }; - cpus { - #address-cells = <1>; - #size-cells = <0>; - - PowerPC,P2020@0 { - device_type = "cpu"; - reg = <0x0>; - next-level-cache = <&L2>; - }; - - PowerPC,P2020@1 { - device_type = "cpu"; - reg = <0x1>; - next-level-cache = <&L2>; - }; - }; - memory { device_type = "memory"; }; localbus@ffe05000 { - #address-cells = <2>; - #size-cells = <1>; - compatible = "fsl,p2020-elbc", "fsl,elbc", "simple-bus"; - reg = <0 0xffe05000 0 0x1000>; - interrupts = <19 2>; - interrupt-parent = <&mpic>; /* NOR and NAND Flashes */ ranges = <0x0 0x0 0x0 0xef000000 0x01000000 @@ -165,90 +141,16 @@ }; soc@ffe00000 { - #address-cells = <1>; - #size-cells = <1>; - device_type = "soc"; - compatible = "fsl,p2020-immr", "simple-bus"; - ranges = <0x0 0x0 0xffe00000 0x100000>; - bus-frequency = <0>; // Filled out by uboot. - - ecm-law@0 { - compatible = "fsl,ecm-law"; - reg = <0x0 0x1000>; - fsl,num-laws = <12>; - }; - - ecm@1000 { - compatible = "fsl,p2020-ecm", "fsl,ecm"; - reg = <0x1000 0x1000>; - interrupts = <17 2>; - interrupt-parent = <&mpic>; - }; - - memory-controller@2000 { - compatible = "fsl,p2020-memory-controller"; - reg = <0x2000 0x1000>; - interrupt-parent = <&mpic>; - interrupts = <18 2>; - }; - i2c@3000 { - #address-cells = <1>; - #size-cells = <0>; - cell-index = <0>; - compatible = "fsl-i2c"; - reg = <0x3000 0x100>; - interrupts = <43 2>; - interrupt-parent = <&mpic>; - dfsrr; rtc@68 { compatible = "dallas,ds1339"; reg = <0x68>; }; }; - i2c@3100 { - #address-cells = <1>; - #size-cells = <0>; - cell-index = <1>; - compatible = "fsl-i2c"; - reg = <0x3100 0x100>; - interrupts = <43 2>; - interrupt-parent = <&mpic>; - dfsrr; - }; - - serial0: serial@4500 { - cell-index = <0>; - device_type = "serial"; - compatible = "ns16550"; - reg = <0x4500 0x100>; - clock-frequency = <0>; - interrupts = <42 2>; - interrupt-parent = <&mpic>; - }; - - serial1: serial@4600 { - cell-index = <1>; - device_type = "serial"; - compatible = "ns16550"; - reg = <0x4600 0x100>; - clock-frequency = <0>; - interrupts = <42 2>; - interrupt-parent = <&mpic>; - }; + spi@7000 { - spi@7000 { - cell-index = <0>; - #address-cells = <1>; - #size-cells = <0>; - compatible = "fsl,espi"; - reg = <0x7000 0x1000>; - interrupts = <59 0x2>; - interrupt-parent = <&mpic>; - mode = "cpu"; - - fsl_m25p80@0 { + fsl_m25p80@0 { #address-cells = <1>; #size-cells = <1>; compatible = "fsl,espi-flash"; @@ -294,254 +196,68 @@ }; }; - dma@c300 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "fsl,eloplus-dma"; - reg = <0xc300 0x4>; - ranges = <0x0 0xc100 0x200>; - cell-index = <1>; - dma-channel@0 { - compatible = "fsl,eloplus-dma-channel"; - reg = <0x0 0x80>; - cell-index = <0>; - interrupt-parent = <&mpic>; - interrupts = <76 2>; - }; - dma-channel@80 { - compatible = "fsl,eloplus-dma-channel"; - reg = <0x80 0x80>; - cell-index = <1>; - interrupt-parent = <&mpic>; - interrupts = <77 2>; - }; - dma-channel@100 { - compatible = "fsl,eloplus-dma-channel"; - reg = <0x100 0x80>; - cell-index = <2>; - interrupt-parent = <&mpic>; - interrupts = <78 2>; - }; - dma-channel@180 { - compatible = "fsl,eloplus-dma-channel"; - reg = <0x180 0x80>; - cell-index = <3>; - interrupt-parent = <&mpic>; - interrupts = <79 2>; - }; - }; - - gpio: gpio-controller@f000 { - #gpio-cells = <2>; - compatible = "fsl,mpc8572-gpio"; - reg = <0xf000 0x100>; - interrupts = <47 0x2>; - interrupt-parent = <&mpic>; - gpio-controller; - }; - - L2: l2-cache-controller@20000 { - compatible = "fsl,p2020-l2-cache-controller"; - reg = <0x20000 0x1000>; - cache-line-size = <32>; // 32 bytes - cache-size = <0x80000>; // L2,512K - interrupt-parent = <&mpic>; - interrupts = <16 2>; + usb@22000 { + phy_type = "ulpi"; }; - dma@21300 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "fsl,eloplus-dma"; - reg = <0x21300 0x4>; - ranges = <0x0 0x21100 0x200>; - cell-index = <0>; - dma-channel@0 { - compatible = "fsl,eloplus-dma-channel"; - reg = <0x0 0x80>; - cell-index = <0>; + mdio@24520 { + phy0: ethernet-phy@0 { interrupt-parent = <&mpic>; - interrupts = <20 2>; - }; - dma-channel@80 { - compatible = "fsl,eloplus-dma-channel"; - reg = <0x80 0x80>; - cell-index = <1>; - interrupt-parent = <&mpic>; - interrupts = <21 2>; - }; - dma-channel@100 { - compatible = "fsl,eloplus-dma-channel"; - reg = <0x100 0x80>; - cell-index = <2>; - interrupt-parent = <&mpic>; - interrupts = <22 2>; - }; - dma-channel@180 { - compatible = "fsl,eloplus-dma-channel"; - reg = <0x180 0x80>; - cell-index = <3>; + interrupts = <3 1>; + reg = <0x0>; + }; + phy1: ethernet-phy@1 { interrupt-parent = <&mpic>; - interrupts = <23 2>; + interrupts = <3 1>; + reg = <0x1>; + }; + }; + + mdio@25520 { + tbi0: tbi-phy@11 { + reg = <0x11>; + device_type = "tbi-phy"; }; }; - usb@22000 { - #address-cells = <1>; - #size-cells = <0>; - compatible = "fsl-usb2-dr"; - reg = <0x22000 0x1000>; - interrupt-parent = <&mpic>; - interrupts = <28 0x2>; - phy_type = "ulpi"; + mdio@26520 { + status = "disabled"; }; enet0: ethernet@24000 { - #address-cells = <1>; - #size-cells = <1>; - cell-index = <0>; - device_type = "network"; - model = "eTSEC"; - compatible = "gianfar"; - reg = <0x24000 0x1000>; - ranges = <0x0 0x24000 0x1000>; - local-mac-address = [ 00 00 00 00 00 00 ]; - interrupts = <29 2 30 2 34 2>; - interrupt-parent = <&mpic>; fixed-link = <1 1 1000 0 0>; phy-connection-type = "rgmii-id"; - - mdio@520 { - #address-cells = <1>; - #size-cells = <0>; - compatible = "fsl,gianfar-mdio"; - reg = <0x520 0x20>; - - phy0: ethernet-phy@0 { - interrupt-parent = <&mpic>; - interrupts = <3 1>; - reg = <0x0>; - }; - phy1: ethernet-phy@1 { - interrupt-parent = <&mpic>; - interrupts = <3 1>; - reg = <0x1>; - }; - }; }; enet1: ethernet@25000 { - #address-cells = <1>; - #size-cells = <1>; - cell-index = <1>; - device_type = "network"; - model = "eTSEC"; - compatible = "gianfar"; - reg = <0x25000 0x1000>; - ranges = <0x0 0x25000 0x1000>; - local-mac-address = [ 00 00 00 00 00 00 ]; - interrupts = <35 2 36 2 40 2>; - interrupt-parent = <&mpic>; tbi-handle = <&tbi0>; phy-handle = <&phy0>; phy-connection-type = "sgmii"; - - mdio@520 { - #address-cells = <1>; - #size-cells = <0>; - compatible = "fsl,gianfar-tbi"; - reg = <0x520 0x20>; - - tbi0: tbi-phy@11 { - reg = <0x11>; - device_type = "tbi-phy"; - }; - }; }; enet2: ethernet@26000 { - #address-cells = <1>; - #size-cells = <1>; - cell-index = <2>; - device_type = "network"; - model = "eTSEC"; - compatible = "gianfar"; - reg = <0x26000 0x1000>; - ranges = <0x0 0x26000 0x1000>; - local-mac-address = [ 00 00 00 00 00 00 ]; - interrupts = <31 2 32 2 33 2>; - interrupt-parent = <&mpic>; phy-handle = <&phy1>; phy-connection-type = "rgmii-id"; }; - sdhci@2e000 { - compatible = "fsl,p2020-esdhc", "fsl,esdhc"; - reg = <0x2e000 0x1000>; - interrupts = <72 0x2>; - interrupt-parent = <&mpic>; - /* Filled in by U-Boot */ - clock-frequency = <0>; - }; - - crypto@30000 { - compatible = "fsl,sec3.1", "fsl,sec3.0", "fsl,sec2.4", - "fsl,sec2.2", "fsl,sec2.1", "fsl,sec2.0"; - reg = <0x30000 0x10000>; - interrupts = <45 2 58 2>; - interrupt-parent = <&mpic>; - fsl,num-channels = <4>; - fsl,channel-fifo-len = <24>; - fsl,exec-units-mask = <0xbfe>; - fsl,descriptor-types-mask = <0x3ab0ebf>; - }; - - mpic: pic@40000 { - interrupt-controller; - #address-cells = <0>; - #interrupt-cells = <2>; - reg = <0x40000 0x40000>; - compatible = "chrp,open-pic"; - device_type = "open-pic"; - }; - - msi@41600 { - compatible = "fsl,p2020-msi", "fsl,mpic-msi"; - reg = <0x41600 0x80>; - msi-available-ranges = <0 0x100>; - interrupts = < - 0xe0 0 - 0xe1 0 - 0xe2 0 - 0xe3 0 - 0xe4 0 - 0xe5 0 - 0xe6 0 - 0xe7 0>; - interrupt-parent = <&mpic>; - }; + }; - global-utilities@e0000 { //global utilities block - compatible = "fsl,p2020-guts"; - reg = <0xe0000 0x1000>; - fsl,has-rstcr; - }; + pci0: pcie@ffe08000 { + status = "disabled"; }; - pci0: pcie@ffe09000 { - compatible = "fsl,mpc8548-pcie"; - device_type = "pci"; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - reg = <0 0xffe09000 0 0x1000>; - bus-range = <0 255>; + pci1: pcie@ffe09000 { ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>; - clock-frequency = <33333333>; - interrupt-parent = <&mpic>; - interrupts = <25 2>; - pcie@0 { + interrupt-map-mask = <0xf800 0x0 0x0 0x7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0x0 0x0 0x1 &mpic 0x4 0x1 + 0000 0x0 0x0 0x2 &mpic 0x5 0x1 + 0000 0x0 0x0 0x3 &mpic 0x6 0x1 + 0000 0x0 0x0 0x4 &mpic 0x7 0x1 + >; + pcie@0 { reg = <0x0 0x0 0x0 0x0 0x0>; #size-cells = <2>; #address-cells = <3>; @@ -556,19 +272,17 @@ }; }; - pci1: pcie@ffe0a000 { - compatible = "fsl,mpc8548-pcie"; - device_type = "pci"; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - reg = <0 0xffe0a000 0 0x1000>; - bus-range = <0 255>; + pci2: pcie@ffe0a000 { ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>; - clock-frequency = <33333333>; - interrupt-parent = <&mpic>; - interrupts = <26 2>; + interrupt-map-mask = <0xf800 0x0 0x0 0x7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0x0 0x0 0x1 &mpic 0x0 0x1 + 0000 0x0 0x0 0x2 &mpic 0x1 0x1 + 0000 0x0 0x0 0x3 &mpic 0x2 0x1 + 0000 0x0 0x0 0x4 &mpic 0x3 0x1 + >; pcie@0 { reg = <0x0 0x0 0x0 0x0 0x0>; #size-cells = <2>; diff --git a/trunk/arch/powerpc/boot/dts/p2020rdb_camp_core0.dts b/trunk/arch/powerpc/boot/dts/p2020rdb_camp_core0.dts index b69c3a5dc858..fc8ddddfccb6 100644 --- a/trunk/arch/powerpc/boot/dts/p2020rdb_camp_core0.dts +++ b/trunk/arch/powerpc/boot/dts/p2020rdb_camp_core0.dts @@ -14,12 +14,11 @@ * option) any later version. */ -/dts-v1/; +/include/ "p2020si.dtsi" + / { - model = "fsl,P2020"; + model = "fsl,P2020RDB"; compatible = "fsl,P2020RDB", "fsl,MPC85XXRDB-CAMP"; - #address-cells = <2>; - #size-cells = <2>; aliases { ethernet1 = &enet1; @@ -29,91 +28,33 @@ }; cpus { - #address-cells = <1>; - #size-cells = <0>; - - PowerPC,P2020@0 { - device_type = "cpu"; - reg = <0x0>; - next-level-cache = <&L2>; + PowerPC,P2020@1 { + status = "disabled"; }; + }; memory { device_type = "memory"; }; - soc@ffe00000 { - #address-cells = <1>; - #size-cells = <1>; - device_type = "soc"; - compatible = "fsl,p2020-immr", "simple-bus"; - ranges = <0x0 0x0 0xffe00000 0x100000>; - bus-frequency = <0>; // Filled out by uboot. - - ecm-law@0 { - compatible = "fsl,ecm-law"; - reg = <0x0 0x1000>; - fsl,num-laws = <12>; - }; - - ecm@1000 { - compatible = "fsl,p2020-ecm", "fsl,ecm"; - reg = <0x1000 0x1000>; - interrupts = <17 2>; - interrupt-parent = <&mpic>; - }; - - memory-controller@2000 { - compatible = "fsl,p2020-memory-controller"; - reg = <0x2000 0x1000>; - interrupt-parent = <&mpic>; - interrupts = <18 2>; - }; + localbus@ffe05000 { + status = "disabled"; + }; + soc@ffe00000 { i2c@3000 { - #address-cells = <1>; - #size-cells = <0>; - cell-index = <0>; - compatible = "fsl-i2c"; - reg = <0x3000 0x100>; - interrupts = <43 2>; - interrupt-parent = <&mpic>; - dfsrr; rtc@68 { compatible = "dallas,ds1339"; reg = <0x68>; }; }; - i2c@3100 { - #address-cells = <1>; - #size-cells = <0>; - cell-index = <1>; - compatible = "fsl-i2c"; - reg = <0x3100 0x100>; - interrupts = <43 2>; - interrupt-parent = <&mpic>; - dfsrr; - }; - - serial0: serial@4500 { - cell-index = <0>; - device_type = "serial"; - compatible = "ns16550"; - reg = <0x4500 0x100>; - clock-frequency = <0>; + serial1: serial@4600 { + status = "disabled"; }; spi@7000 { - cell-index = <0>; - #address-cells = <1>; - #size-cells = <0>; - compatible = "fsl,espi"; - reg = <0x7000 0x1000>; - interrupts = <59 0x2>; - interrupt-parent = <&mpic>; - mode = "cpu"; fsl_m25p80@0 { #address-cells = <1>; @@ -161,76 +102,15 @@ }; }; - gpio: gpio-controller@f000 { - #gpio-cells = <2>; - compatible = "fsl,mpc8572-gpio"; - reg = <0xf000 0x100>; - interrupts = <47 0x2>; - interrupt-parent = <&mpic>; - gpio-controller; - }; - - L2: l2-cache-controller@20000 { - compatible = "fsl,p2020-l2-cache-controller"; - reg = <0x20000 0x1000>; - cache-line-size = <32>; // 32 bytes - cache-size = <0x80000>; // L2,512K - interrupt-parent = <&mpic>; - interrupts = <16 2>; - }; - - dma@21300 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "fsl,eloplus-dma"; - reg = <0x21300 0x4>; - ranges = <0x0 0x21100 0x200>; - cell-index = <0>; - dma-channel@0 { - compatible = "fsl,eloplus-dma-channel"; - reg = <0x0 0x80>; - cell-index = <0>; - interrupt-parent = <&mpic>; - interrupts = <20 2>; - }; - dma-channel@80 { - compatible = "fsl,eloplus-dma-channel"; - reg = <0x80 0x80>; - cell-index = <1>; - interrupt-parent = <&mpic>; - interrupts = <21 2>; - }; - dma-channel@100 { - compatible = "fsl,eloplus-dma-channel"; - reg = <0x100 0x80>; - cell-index = <2>; - interrupt-parent = <&mpic>; - interrupts = <22 2>; - }; - dma-channel@180 { - compatible = "fsl,eloplus-dma-channel"; - reg = <0x180 0x80>; - cell-index = <3>; - interrupt-parent = <&mpic>; - interrupts = <23 2>; - }; + dma@c300 { + status = "disabled"; }; usb@22000 { - #address-cells = <1>; - #size-cells = <0>; - compatible = "fsl-usb2-dr"; - reg = <0x22000 0x1000>; - interrupt-parent = <&mpic>; - interrupts = <28 0x2>; phy_type = "ulpi"; }; mdio@24520 { - #address-cells = <1>; - #size-cells = <0>; - compatible = "fsl,gianfar-mdio"; - reg = <0x24520 0x20>; phy0: ethernet-phy@0 { interrupt-parent = <&mpic>; @@ -245,29 +125,21 @@ }; mdio@25520 { - #address-cells = <1>; - #size-cells = <0>; - compatible = "fsl,gianfar-tbi"; - reg = <0x26520 0x20>; - tbi0: tbi-phy@11 { reg = <0x11>; device_type = "tbi-phy"; }; }; + mdio@26520 { + status = "disabled"; + }; + + enet0: ethernet@24000 { + status = "disabled"; + }; + enet1: ethernet@25000 { - #address-cells = <1>; - #size-cells = <1>; - cell-index = <1>; - device_type = "network"; - model = "eTSEC"; - compatible = "gianfar"; - reg = <0x25000 0x1000>; - ranges = <0x0 0x25000 0x1000>; - local-mac-address = [ 00 00 00 00 00 00 ]; - interrupts = <35 2 36 2 40 2>; - interrupt-parent = <&mpic>; tbi-handle = <&tbi0>; phy-handle = <&phy0>; phy-connection-type = "sgmii"; @@ -275,49 +147,12 @@ }; enet2: ethernet@26000 { - #address-cells = <1>; - #size-cells = <1>; - cell-index = <2>; - device_type = "network"; - model = "eTSEC"; - compatible = "gianfar"; - reg = <0x26000 0x1000>; - ranges = <0x0 0x26000 0x1000>; - local-mac-address = [ 00 00 00 00 00 00 ]; - interrupts = <31 2 32 2 33 2>; - interrupt-parent = <&mpic>; phy-handle = <&phy1>; phy-connection-type = "rgmii-id"; }; - sdhci@2e000 { - compatible = "fsl,p2020-esdhc", "fsl,esdhc"; - reg = <0x2e000 0x1000>; - interrupts = <72 0x2>; - interrupt-parent = <&mpic>; - /* Filled in by U-Boot */ - clock-frequency = <0>; - }; - - crypto@30000 { - compatible = "fsl,sec3.1", "fsl,sec3.0", "fsl,sec2.4", - "fsl,sec2.2", "fsl,sec2.1", "fsl,sec2.0"; - reg = <0x30000 0x10000>; - interrupts = <45 2 58 2>; - interrupt-parent = <&mpic>; - fsl,num-channels = <4>; - fsl,channel-fifo-len = <24>; - fsl,exec-units-mask = <0xbfe>; - fsl,descriptor-types-mask = <0x3ab0ebf>; - }; mpic: pic@40000 { - interrupt-controller; - #address-cells = <0>; - #interrupt-cells = <2>; - reg = <0x40000 0x40000>; - compatible = "chrp,open-pic"; - device_type = "open-pic"; protected-sources = < 42 76 77 78 79 /* serial1 , dma2 */ 29 30 34 26 /* enet0, pci1 */ @@ -326,26 +161,28 @@ >; }; - global-utilities@e0000 { - compatible = "fsl,p2020-guts"; - reg = <0xe0000 0x1000>; - fsl,has-rstcr; + msi@41600 { + status = "disabled"; }; + + }; - pci0: pcie@ffe09000 { - compatible = "fsl,mpc8548-pcie"; - device_type = "pci"; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - reg = <0 0xffe09000 0 0x1000>; - bus-range = <0 255>; + pci0: pcie@ffe08000 { + status = "disabled"; + }; + + pci1: pcie@ffe09000 { ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>; - clock-frequency = <33333333>; - interrupt-parent = <&mpic>; - interrupts = <25 2>; + interrupt-map-mask = <0xf800 0x0 0x0 0x7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0x0 0x0 0x1 &mpic 0x4 0x1 + 0000 0x0 0x0 0x2 &mpic 0x5 0x1 + 0000 0x0 0x0 0x3 &mpic 0x6 0x1 + 0000 0x0 0x0 0x4 &mpic 0x7 0x1 + >; pcie@0 { reg = <0x0 0x0 0x0 0x0 0x0>; #size-cells = <2>; @@ -360,4 +197,8 @@ 0x0 0x100000>; }; }; + + pci2: pcie@ffe0a000 { + status = "disabled"; + }; }; diff --git a/trunk/arch/powerpc/boot/dts/p2020rdb_camp_core1.dts b/trunk/arch/powerpc/boot/dts/p2020rdb_camp_core1.dts index 7a31d46c01b0..261c34ba45ec 100644 --- a/trunk/arch/powerpc/boot/dts/p2020rdb_camp_core1.dts +++ b/trunk/arch/powerpc/boot/dts/p2020rdb_camp_core1.dts @@ -15,27 +15,21 @@ * option) any later version. */ -/dts-v1/; +/include/ "p2020si.dtsi" + / { - model = "fsl,P2020"; + model = "fsl,P2020RDB"; compatible = "fsl,P2020RDB", "fsl,MPC85XXRDB-CAMP"; - #address-cells = <2>; - #size-cells = <2>; aliases { ethernet0 = &enet0; - serial0 = &serial0; + serial0 = &serial1; pci1 = &pci1; }; cpus { - #address-cells = <1>; - #size-cells = <0>; - - PowerPC,P2020@1 { - device_type = "cpu"; - reg = <0x1>; - next-level-cache = <&L2>; + PowerPC,P2020@0 { + status = "disabled"; }; }; @@ -43,20 +37,37 @@ device_type = "memory"; }; + localbus@ffe05000 { + status = "disabled"; + }; + soc@ffe00000 { - #address-cells = <1>; - #size-cells = <1>; - device_type = "soc"; - compatible = "fsl,p2020-immr", "simple-bus"; - ranges = <0x0 0x0 0xffe00000 0x100000>; - bus-frequency = <0>; // Filled out by uboot. - - serial0: serial@4600 { - cell-index = <1>; - device_type = "serial"; - compatible = "ns16550"; - reg = <0x4600 0x100>; - clock-frequency = <0>; + ecm-law@0 { + status = "disabled"; + }; + + ecm@1000 { + status = "disabled"; + }; + + memory-controller@2000 { + status = "disabled"; + }; + + i2c@3000 { + status = "disabled"; + }; + + i2c@3100 { + status = "disabled"; + }; + + serial0: serial@4500 { + status = "disabled"; + }; + + spi@7000 { + status = "disabled"; }; dma@c300 { @@ -96,6 +107,10 @@ }; }; + gpio: gpio-controller@f000 { + status = "disabled"; + }; + L2: l2-cache-controller@20000 { compatible = "fsl,p2020-l2-cache-controller"; reg = <0x20000 0x1000>; @@ -104,31 +119,49 @@ interrupt-parent = <&mpic>; }; + dma@21300 { + status = "disabled"; + }; + + usb@22000 { + status = "disabled"; + }; + + mdio@24520 { + status = "disabled"; + }; + + mdio@25520 { + status = "disabled"; + }; + + mdio@26520 { + status = "disabled"; + }; enet0: ethernet@24000 { - #address-cells = <1>; - #size-cells = <1>; - cell-index = <0>; - device_type = "network"; - model = "eTSEC"; - compatible = "gianfar"; - reg = <0x24000 0x1000>; - ranges = <0x0 0x24000 0x1000>; - local-mac-address = [ 00 00 00 00 00 00 ]; - interrupts = <29 2 30 2 34 2>; - interrupt-parent = <&mpic>; fixed-link = <1 1 1000 0 0>; phy-connection-type = "rgmii-id"; }; + enet1: ethernet@25000 { + status = "disabled"; + }; + + enet2: ethernet@26000 { + status = "disabled"; + }; + + sdhci@2e000 { + status = "disabled"; + }; + + crypto@30000 { + status = "disabled"; + }; + mpic: pic@40000 { - interrupt-controller; - #address-cells = <0>; - #interrupt-cells = <2>; - reg = <0x40000 0x40000>; - compatible = "chrp,open-pic"; - device_type = "open-pic"; protected-sources = < 17 18 43 42 59 47 /*ecm, mem, i2c, serial0, spi,gpio */ 16 20 21 22 23 28 /* L2, dma1, USB */ @@ -152,21 +185,32 @@ 0xe7 0>; interrupt-parent = <&mpic>; }; + + global-utilities@e0000 { //global utilities block + status = "disabled"; + }; + }; - pci1: pcie@ffe0a000 { - compatible = "fsl,mpc8548-pcie"; - device_type = "pci"; - #interrupt-cells = <1>; - #size-cells = <2>; - #address-cells = <3>; - reg = <0 0xffe0a000 0 0x1000>; - bus-range = <0 255>; + pci0: pcie@ffe08000 { + status = "disabled"; + }; + + pci1: pcie@ffe09000 { + status = "disabled"; + }; + + pci2: pcie@ffe0a000 { ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>; - clock-frequency = <33333333>; - interrupt-parent = <&mpic>; - interrupts = <26 2>; + interrupt-map-mask = <0xf800 0x0 0x0 0x7>; + interrupt-map = < + /* IDSEL 0x0 */ + 0000 0x0 0x0 0x1 &mpic 0x0 0x1 + 0000 0x0 0x0 0x2 &mpic 0x1 0x1 + 0000 0x0 0x0 0x3 &mpic 0x2 0x1 + 0000 0x0 0x0 0x4 &mpic 0x3 0x1 + >; pcie@0 { reg = <0x0 0x0 0x0 0x0 0x0>; #size-cells = <2>; diff --git a/trunk/arch/powerpc/boot/dts/p2020si.dtsi b/trunk/arch/powerpc/boot/dts/p2020si.dtsi new file mode 100644 index 000000000000..6def17f265d3 --- /dev/null +++ b/trunk/arch/powerpc/boot/dts/p2020si.dtsi @@ -0,0 +1,382 @@ +/* + * P2020 Device Tree Source + * + * Copyright 2011 Freescale Semiconductor Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +/dts-v1/; +/ { + compatible = "fsl,P2020"; + #address-cells = <2>; + #size-cells = <2>; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + + PowerPC,P2020@0 { + device_type = "cpu"; + reg = <0x0>; + next-level-cache = <&L2>; + }; + + PowerPC,P2020@1 { + device_type = "cpu"; + reg = <0x1>; + next-level-cache = <&L2>; + }; + }; + + localbus@ffe05000 { + #address-cells = <2>; + #size-cells = <1>; + compatible = "fsl,p2020-elbc", "fsl,elbc", "simple-bus"; + reg = <0 0xffe05000 0 0x1000>; + interrupts = <19 2>; + interrupt-parent = <&mpic>; + }; + + soc@ffe00000 { + #address-cells = <1>; + #size-cells = <1>; + device_type = "soc"; + compatible = "fsl,p2020-immr", "simple-bus"; + ranges = <0x0 0x0 0xffe00000 0x100000>; + bus-frequency = <0>; // Filled out by uboot. + + ecm-law@0 { + compatible = "fsl,ecm-law"; + reg = <0x0 0x1000>; + fsl,num-laws = <12>; + }; + + ecm@1000 { + compatible = "fsl,p2020-ecm", "fsl,ecm"; + reg = <0x1000 0x1000>; + interrupts = <17 2>; + interrupt-parent = <&mpic>; + }; + + memory-controller@2000 { + compatible = "fsl,p2020-memory-controller"; + reg = <0x2000 0x1000>; + interrupt-parent = <&mpic>; + interrupts = <18 2>; + }; + + i2c@3000 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <0>; + compatible = "fsl-i2c"; + reg = <0x3000 0x100>; + interrupts = <43 2>; + interrupt-parent = <&mpic>; + dfsrr; + }; + + i2c@3100 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <1>; + compatible = "fsl-i2c"; + reg = <0x3100 0x100>; + interrupts = <43 2>; + interrupt-parent = <&mpic>; + dfsrr; + }; + + serial0: serial@4500 { + cell-index = <0>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x4500 0x100>; + clock-frequency = <0>; + interrupts = <42 2>; + interrupt-parent = <&mpic>; + }; + + serial1: serial@4600 { + cell-index = <1>; + device_type = "serial"; + compatible = "ns16550"; + reg = <0x4600 0x100>; + clock-frequency = <0>; + interrupts = <42 2>; + interrupt-parent = <&mpic>; + }; + + spi@7000 { + cell-index = <0>; + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,espi"; + reg = <0x7000 0x1000>; + interrupts = <59 0x2>; + interrupt-parent = <&mpic>; + mode = "cpu"; + }; + + dma@c300 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,eloplus-dma"; + reg = <0xc300 0x4>; + ranges = <0x0 0xc100 0x200>; + cell-index = <1>; + dma-channel@0 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x0 0x80>; + cell-index = <0>; + interrupt-parent = <&mpic>; + interrupts = <76 2>; + }; + dma-channel@80 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x80 0x80>; + cell-index = <1>; + interrupt-parent = <&mpic>; + interrupts = <77 2>; + }; + dma-channel@100 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x100 0x80>; + cell-index = <2>; + interrupt-parent = <&mpic>; + interrupts = <78 2>; + }; + dma-channel@180 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x180 0x80>; + cell-index = <3>; + interrupt-parent = <&mpic>; + interrupts = <79 2>; + }; + }; + + gpio: gpio-controller@f000 { + #gpio-cells = <2>; + compatible = "fsl,mpc8572-gpio"; + reg = <0xf000 0x100>; + interrupts = <47 0x2>; + interrupt-parent = <&mpic>; + gpio-controller; + }; + + L2: l2-cache-controller@20000 { + compatible = "fsl,p2020-l2-cache-controller"; + reg = <0x20000 0x1000>; + cache-line-size = <32>; // 32 bytes + cache-size = <0x80000>; // L2,512K + interrupt-parent = <&mpic>; + interrupts = <16 2>; + }; + + dma@21300 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,eloplus-dma"; + reg = <0x21300 0x4>; + ranges = <0x0 0x21100 0x200>; + cell-index = <0>; + dma-channel@0 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x0 0x80>; + cell-index = <0>; + interrupt-parent = <&mpic>; + interrupts = <20 2>; + }; + dma-channel@80 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x80 0x80>; + cell-index = <1>; + interrupt-parent = <&mpic>; + interrupts = <21 2>; + }; + dma-channel@100 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x100 0x80>; + cell-index = <2>; + interrupt-parent = <&mpic>; + interrupts = <22 2>; + }; + dma-channel@180 { + compatible = "fsl,eloplus-dma-channel"; + reg = <0x180 0x80>; + cell-index = <3>; + interrupt-parent = <&mpic>; + interrupts = <23 2>; + }; + }; + + usb@22000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl-usb2-dr"; + reg = <0x22000 0x1000>; + interrupt-parent = <&mpic>; + interrupts = <28 0x2>; + }; + + mdio@24520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-mdio"; + reg = <0x24520 0x20>; + }; + + mdio@25520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x26520 0x20>; + }; + + mdio@26520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-tbi"; + reg = <0x520 0x20>; + }; + + enet0: ethernet@24000 { + #address-cells = <1>; + #size-cells = <1>; + cell-index = <0>; + device_type = "network"; + model = "eTSEC"; + compatible = "gianfar"; + reg = <0x24000 0x1000>; + ranges = <0x0 0x24000 0x1000>; + local-mac-address = [ 00 00 00 00 00 00 ]; + interrupts = <29 2 30 2 34 2>; + interrupt-parent = <&mpic>; + }; + + enet1: ethernet@25000 { + #address-cells = <1>; + #size-cells = <1>; + cell-index = <1>; + device_type = "network"; + model = "eTSEC"; + compatible = "gianfar"; + reg = <0x25000 0x1000>; + ranges = <0x0 0x25000 0x1000>; + local-mac-address = [ 00 00 00 00 00 00 ]; + interrupts = <35 2 36 2 40 2>; + interrupt-parent = <&mpic>; + + }; + + enet2: ethernet@26000 { + #address-cells = <1>; + #size-cells = <1>; + cell-index = <2>; + device_type = "network"; + model = "eTSEC"; + compatible = "gianfar"; + reg = <0x26000 0x1000>; + ranges = <0x0 0x26000 0x1000>; + local-mac-address = [ 00 00 00 00 00 00 ]; + interrupts = <31 2 32 2 33 2>; + interrupt-parent = <&mpic>; + + }; + + sdhci@2e000 { + compatible = "fsl,p2020-esdhc", "fsl,esdhc"; + reg = <0x2e000 0x1000>; + interrupts = <72 0x2>; + interrupt-parent = <&mpic>; + /* Filled in by U-Boot */ + clock-frequency = <0>; + }; + + crypto@30000 { + compatible = "fsl,sec3.1", "fsl,sec3.0", "fsl,sec2.4", + "fsl,sec2.2", "fsl,sec2.1", "fsl,sec2.0"; + reg = <0x30000 0x10000>; + interrupts = <45 2 58 2>; + interrupt-parent = <&mpic>; + fsl,num-channels = <4>; + fsl,channel-fifo-len = <24>; + fsl,exec-units-mask = <0xbfe>; + fsl,descriptor-types-mask = <0x3ab0ebf>; + }; + + mpic: pic@40000 { + interrupt-controller; + #address-cells = <0>; + #interrupt-cells = <2>; + reg = <0x40000 0x40000>; + compatible = "chrp,open-pic"; + device_type = "open-pic"; + }; + + msi@41600 { + compatible = "fsl,p2020-msi", "fsl,mpic-msi"; + reg = <0x41600 0x80>; + msi-available-ranges = <0 0x100>; + interrupts = < + 0xe0 0 + 0xe1 0 + 0xe2 0 + 0xe3 0 + 0xe4 0 + 0xe5 0 + 0xe6 0 + 0xe7 0>; + interrupt-parent = <&mpic>; + }; + + global-utilities@e0000 { //global utilities block + compatible = "fsl,p2020-guts"; + reg = <0xe0000 0x1000>; + fsl,has-rstcr; + }; + }; + + pci0: pcie@ffe08000 { + compatible = "fsl,mpc8548-pcie"; + device_type = "pci"; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + reg = <0 0xffe08000 0 0x1000>; + bus-range = <0 255>; + clock-frequency = <33333333>; + interrupt-parent = <&mpic>; + interrupts = <24 2>; + }; + + pci1: pcie@ffe09000 { + compatible = "fsl,mpc8548-pcie"; + device_type = "pci"; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + reg = <0 0xffe09000 0 0x1000>; + bus-range = <0 255>; + clock-frequency = <33333333>; + interrupt-parent = <&mpic>; + interrupts = <25 2>; + }; + + pci2: pcie@ffe0a000 { + compatible = "fsl,mpc8548-pcie"; + device_type = "pci"; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + reg = <0 0xffe0a000 0 0x1000>; + bus-range = <0 255>; + clock-frequency = <33333333>; + interrupt-parent = <&mpic>; + interrupts = <26 2>; + }; +}; diff --git a/trunk/arch/powerpc/boot/epapr.c b/trunk/arch/powerpc/boot/epapr.c new file mode 100644 index 000000000000..06c1961bd124 --- /dev/null +++ b/trunk/arch/powerpc/boot/epapr.c @@ -0,0 +1,66 @@ +/* + * Bootwrapper for ePAPR compliant firmwares + * + * Copyright 2010 David Gibson , IBM Corporation. + * + * Based on earlier bootwrappers by: + * (c) Benjamin Herrenschmidt , IBM Corp,\ + * and + * Scott Wood + * Copyright (c) 2007 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#include "ops.h" +#include "stdio.h" +#include "io.h" +#include + +BSS_STACK(4096); + +#define EPAPR_SMAGIC 0x65504150 +#define EPAPR_EMAGIC 0x45504150 + +static unsigned epapr_magic; +static unsigned long ima_size; +static unsigned long fdt_addr; + +static void platform_fixups(void) +{ + if ((epapr_magic != EPAPR_EMAGIC) + && (epapr_magic != EPAPR_SMAGIC)) + fatal("r6 contained 0x%08x instead of ePAPR magic number\n", + epapr_magic); + + if (ima_size < (unsigned long)_end) + printf("WARNING: Image loaded outside IMA!" + " (_end=%p, ima_size=0x%lx)\n", _end, ima_size); + if (ima_size < fdt_addr) + printf("WARNING: Device tree address is outside IMA!" + "(fdt_addr=0x%lx, ima_size=0x%lx)\n", fdt_addr, + ima_size); + if (ima_size < fdt_addr + fdt_totalsize((void *)fdt_addr)) + printf("WARNING: Device tree extends outside IMA!" + " (fdt_addr=0x%lx, size=0x%x, ima_size=0x%lx\n", + fdt_addr, fdt_totalsize((void *)fdt_addr), ima_size); +} + +void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7) +{ + epapr_magic = r6; + ima_size = r7; + fdt_addr = r3; + + /* FIXME: we should process reserve entries */ + + simple_alloc_init(_end, ima_size - (unsigned long)_end, 32, 64); + + fdt_init((void *)fdt_addr); + + serial_console_init(); + platform_ops.fixups = platform_fixups; +} diff --git a/trunk/arch/powerpc/boot/wrapper b/trunk/arch/powerpc/boot/wrapper index cb97e7511d7e..c74531af72c0 100755 --- a/trunk/arch/powerpc/boot/wrapper +++ b/trunk/arch/powerpc/boot/wrapper @@ -39,6 +39,7 @@ dts= cacheit= binary= gzip=.gz +pie= # cross-compilation prefix CROSS= @@ -157,9 +158,10 @@ pmac|chrp) platformo=$object/of.o ;; coff) - platformo=$object/of.o + platformo="$object/crt0.o $object/of.o" lds=$object/zImage.coff.lds link_address='0x500000' + pie= ;; miboot|uboot) # miboot and U-boot want just the bare bits, not an ELF binary @@ -208,6 +210,7 @@ ps3) ksection=.kernel:vmlinux.bin isection=.kernel:initrd link_address='' + pie= ;; ep88xc|ep405|ep8248e) platformo="$object/fixed-head.o $object/$platform.o" @@ -244,6 +247,10 @@ gamecube|wii) treeboot-iss4xx-mpic) platformo="$object/treeboot-iss4xx.o" ;; +epapr) + link_address='0x20000000' + pie=-pie + ;; esac vmz="$tmpdir/`basename \"$kernel\"`.$ext" @@ -251,7 +258,7 @@ if [ -z "$cacheit" -o ! -f "$vmz$gzip" -o "$vmz$gzip" -ot "$kernel" ]; then ${CROSS}objcopy $objflags "$kernel" "$vmz.$$" if [ -n "$gzip" ]; then - gzip -f -9 "$vmz.$$" + gzip -n -f -9 "$vmz.$$" fi if [ -n "$cacheit" ]; then @@ -310,9 +317,9 @@ fi if [ "$platform" != "miboot" ]; then if [ -n "$link_address" ] ; then - text_start="-Ttext $link_address --defsym _start=$link_address" + text_start="-Ttext $link_address" fi - ${CROSS}ld -m elf32ppc -T $lds $text_start -o "$ofile" \ + ${CROSS}ld -m elf32ppc -T $lds $text_start $pie -o "$ofile" \ $platformo $tmp $object/wrapper.a rm $tmp fi @@ -336,7 +343,7 @@ coff) $objbin/hack-coff "$ofile" ;; cuboot*) - gzip -f -9 "$ofile" + gzip -n -f -9 "$ofile" ${MKIMAGE} -A ppc -O linux -T kernel -C gzip -a "$base" -e "$entry" \ $uboot_version -d "$ofile".gz "$ofile" ;; @@ -383,6 +390,6 @@ ps3) odir="$(dirname "$ofile.bin")" rm -f "$odir/otheros.bld" - gzip --force -9 --stdout "$ofile.bin" > "$odir/otheros.bld" + gzip -n --force -9 --stdout "$ofile.bin" > "$odir/otheros.bld" ;; esac diff --git a/trunk/arch/powerpc/boot/zImage.coff.lds.S b/trunk/arch/powerpc/boot/zImage.coff.lds.S index 856dc78b14ef..de4c9e3c9344 100644 --- a/trunk/arch/powerpc/boot/zImage.coff.lds.S +++ b/trunk/arch/powerpc/boot/zImage.coff.lds.S @@ -3,13 +3,13 @@ ENTRY(_zimage_start_opd) EXTERN(_zimage_start_opd) SECTIONS { - _start = .; .text : { + _start = .; *(.text) *(.fixup) + _etext = .; } - _etext = .; . = ALIGN(4096); .data : { @@ -17,9 +17,7 @@ SECTIONS *(.data*) *(__builtin_*) *(.sdata*) - __got2_start = .; *(.got2) - __got2_end = .; _dtb_start = .; *(.kernel:dtb) diff --git a/trunk/arch/powerpc/boot/zImage.lds.S b/trunk/arch/powerpc/boot/zImage.lds.S index 0962d62bdb50..2bd8731f1365 100644 --- a/trunk/arch/powerpc/boot/zImage.lds.S +++ b/trunk/arch/powerpc/boot/zImage.lds.S @@ -3,49 +3,64 @@ ENTRY(_zimage_start) EXTERN(_zimage_start) SECTIONS { - _start = .; .text : { + _start = .; *(.text) *(.fixup) + _etext = .; } - _etext = .; . = ALIGN(4096); .data : { *(.rodata*) *(.data*) *(.sdata*) - __got2_start = .; *(.got2) - __got2_end = .; } + .dynsym : { *(.dynsym) } + .dynstr : { *(.dynstr) } + .dynamic : + { + __dynamic_start = .; + *(.dynamic) + } + .hash : { *(.hash) } + .interp : { *(.interp) } + .rela.dyn : { *(.rela*) } . = ALIGN(8); - _dtb_start = .; - .kernel:dtb : { *(.kernel:dtb) } - _dtb_end = .; - - . = ALIGN(4096); - _vmlinux_start = .; - .kernel:vmlinux.strip : { *(.kernel:vmlinux.strip) } - _vmlinux_end = .; + .kernel:dtb : + { + _dtb_start = .; + *(.kernel:dtb) + _dtb_end = .; + } . = ALIGN(4096); - _initrd_start = .; - .kernel:initrd : { *(.kernel:initrd) } - _initrd_end = .; + .kernel:vmlinux.strip : + { + _vmlinux_start = .; + *(.kernel:vmlinux.strip) + _vmlinux_end = .; + } . = ALIGN(4096); - _edata = .; + .kernel:initrd : + { + _initrd_start = .; + *(.kernel:initrd) + _initrd_end = .; + } . = ALIGN(4096); - __bss_start = .; .bss : { - *(.sbss) - *(.bss) + _edata = .; + __bss_start = .; + *(.sbss) + *(.bss) + *(COMMON) + _end = . ; } - . = ALIGN(4096); - _end = . ; } diff --git a/trunk/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig b/trunk/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig index c683bce4c26e..126ef1b08a01 100644 --- a/trunk/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig +++ b/trunk/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig @@ -104,7 +104,6 @@ CONFIG_ROOT_NFS=y CONFIG_PARTITION_ADVANCED=y CONFIG_DEBUG_KERNEL=y CONFIG_DETECT_HUNG_TASK=y -# CONFIG_DEBUG_BUGVERBOSE is not set # CONFIG_RCU_CPU_STALL_DETECTOR is not set CONFIG_SYSCTL_SYSCALL_CHECK=y CONFIG_CRYPTO_PCBC=m diff --git a/trunk/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig b/trunk/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig index a721cd3d793f..abcf00ad939e 100644 --- a/trunk/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig +++ b/trunk/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig @@ -101,7 +101,6 @@ CONFIG_ROOT_NFS=y CONFIG_PARTITION_ADVANCED=y CONFIG_DEBUG_KERNEL=y CONFIG_DETECT_HUNG_TASK=y -# CONFIG_DEBUG_BUGVERBOSE is not set # CONFIG_RCU_CPU_STALL_DETECTOR is not set CONFIG_SYSCTL_SYSCALL_CHECK=y CONFIG_CRYPTO_PCBC=m diff --git a/trunk/arch/powerpc/configs/85xx/mpc8540_ads_defconfig b/trunk/arch/powerpc/configs/85xx/mpc8540_ads_defconfig index 55e0725500dc..11662c217ac0 100644 --- a/trunk/arch/powerpc/configs/85xx/mpc8540_ads_defconfig +++ b/trunk/arch/powerpc/configs/85xx/mpc8540_ads_defconfig @@ -58,7 +58,6 @@ CONFIG_PARTITION_ADVANCED=y CONFIG_DEBUG_KERNEL=y CONFIG_DETECT_HUNG_TASK=y CONFIG_DEBUG_MUTEXES=y -# CONFIG_DEBUG_BUGVERBOSE is not set # CONFIG_RCU_CPU_STALL_DETECTOR is not set CONFIG_SYSCTL_SYSCALL_CHECK=y # CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/trunk/arch/powerpc/configs/85xx/mpc8560_ads_defconfig b/trunk/arch/powerpc/configs/85xx/mpc8560_ads_defconfig index d724095530a6..ebe9b30b0721 100644 --- a/trunk/arch/powerpc/configs/85xx/mpc8560_ads_defconfig +++ b/trunk/arch/powerpc/configs/85xx/mpc8560_ads_defconfig @@ -59,7 +59,6 @@ CONFIG_PARTITION_ADVANCED=y CONFIG_DEBUG_KERNEL=y CONFIG_DETECT_HUNG_TASK=y CONFIG_DEBUG_MUTEXES=y -# CONFIG_DEBUG_BUGVERBOSE is not set # CONFIG_RCU_CPU_STALL_DETECTOR is not set CONFIG_SYSCTL_SYSCALL_CHECK=y # CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/trunk/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig b/trunk/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig index 4b44beaa21ae..eb25229b387a 100644 --- a/trunk/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig +++ b/trunk/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig @@ -63,7 +63,6 @@ CONFIG_PARTITION_ADVANCED=y CONFIG_DEBUG_KERNEL=y CONFIG_DETECT_HUNG_TASK=y CONFIG_DEBUG_MUTEXES=y -# CONFIG_DEBUG_BUGVERBOSE is not set # CONFIG_RCU_CPU_STALL_DETECTOR is not set CONFIG_SYSCTL_SYSCALL_CHECK=y # CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/trunk/arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig b/trunk/arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig index b614508d6fd2..f51c7ebc181e 100644 --- a/trunk/arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig +++ b/trunk/arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig @@ -168,7 +168,6 @@ CONFIG_MAC_PARTITION=y CONFIG_CRC_T10DIF=y CONFIG_DEBUG_KERNEL=y CONFIG_DETECT_HUNG_TASK=y -# CONFIG_DEBUG_BUGVERBOSE is not set CONFIG_DEBUG_INFO=y # CONFIG_RCU_CPU_STALL_DETECTOR is not set CONFIG_SYSCTL_SYSCALL_CHECK=y diff --git a/trunk/arch/powerpc/configs/c2k_defconfig b/trunk/arch/powerpc/configs/c2k_defconfig index f9e6a3ea5a64..2a84fd7f631c 100644 --- a/trunk/arch/powerpc/configs/c2k_defconfig +++ b/trunk/arch/powerpc/configs/c2k_defconfig @@ -132,8 +132,8 @@ CONFIG_NET_CLS_RSVP=m CONFIG_NET_CLS_RSVP6=m CONFIG_NET_CLS_IND=y CONFIG_BT=m -CONFIG_BT_L2CAP=m -CONFIG_BT_SCO=m +CONFIG_BT_L2CAP=y +CONFIG_BT_SCO=y CONFIG_BT_RFCOMM=m CONFIG_BT_RFCOMM_TTY=y CONFIG_BT_BNEP=m diff --git a/trunk/arch/powerpc/configs/e55xx_smp_defconfig b/trunk/arch/powerpc/configs/e55xx_smp_defconfig index 9fa1613e5e2b..d32283555b53 100644 --- a/trunk/arch/powerpc/configs/e55xx_smp_defconfig +++ b/trunk/arch/powerpc/configs/e55xx_smp_defconfig @@ -6,10 +6,10 @@ CONFIG_NR_CPUS=2 CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_BSD_PROCESS_ACCT=y +CONFIG_SPARSE_IRQ=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 -CONFIG_SYSFS_DEPRECATED_V2=y CONFIG_BLK_DEV_INITRD=y # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_EXPERT=y @@ -25,8 +25,32 @@ CONFIG_P5020_DS=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y CONFIG_BINFMT_MISC=m -CONFIG_SPARSE_IRQ=y # CONFIG_PCI is not set +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=y +CONFIG_NET_KEY=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_IP_PNP_RARP=y +CONFIG_NET_IPIP=y +CONFIG_IP_MROUTE=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_ARPD=y +CONFIG_INET_ESP=y +# CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_LRO is not set +CONFIG_IPV6=y +CONFIG_IP_SCTP=m CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_PROC_DEVICETREE=y CONFIG_BLK_DEV_LOOP=y @@ -34,6 +58,9 @@ CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=131072 CONFIG_MISC_DEVICES=y CONFIG_EEPROM_LEGACY=y +CONFIG_NETDEVICES=y +CONFIG_DUMMY=y +CONFIG_NET_ETHERNET=y CONFIG_INPUT_FF_MEMLESS=m # CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set @@ -64,22 +91,14 @@ CONFIG_NLS=y CONFIG_NLS_UTF8=m CONFIG_CRC_T10DIF=y CONFIG_CRC_ITU_T=m -CONFIG_LIBCRC32C=m CONFIG_FRAME_WARN=1024 CONFIG_DEBUG_FS=y CONFIG_DEBUG_KERNEL=y CONFIG_DETECT_HUNG_TASK=y -# CONFIG_DEBUG_BUGVERBOSE is not set CONFIG_DEBUG_INFO=y # CONFIG_RCU_CPU_STALL_DETECTOR is not set CONFIG_SYSCTL_SYSCALL_CHECK=y CONFIG_VIRQ_DEBUG=y -CONFIG_CRYPTO=y -CONFIG_CRYPTO_CBC=y CONFIG_CRYPTO_PCBC=m -CONFIG_CRYPTO_HMAC=y -CONFIG_CRYPTO_MD5=y -CONFIG_CRYPTO_SHA1=m -CONFIG_CRYPTO_DES=y # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_DEV_TALITOS=y diff --git a/trunk/arch/powerpc/configs/mpc85xx_defconfig b/trunk/arch/powerpc/configs/mpc85xx_defconfig index c06a86c33098..96b89df7752a 100644 --- a/trunk/arch/powerpc/configs/mpc85xx_defconfig +++ b/trunk/arch/powerpc/configs/mpc85xx_defconfig @@ -204,7 +204,6 @@ CONFIG_CRC_T10DIF=y CONFIG_DEBUG_FS=y CONFIG_DEBUG_KERNEL=y CONFIG_DETECT_HUNG_TASK=y -# CONFIG_DEBUG_BUGVERBOSE is not set CONFIG_DEBUG_INFO=y # CONFIG_RCU_CPU_STALL_DETECTOR is not set CONFIG_SYSCTL_SYSCALL_CHECK=y diff --git a/trunk/arch/powerpc/configs/mpc85xx_smp_defconfig b/trunk/arch/powerpc/configs/mpc85xx_smp_defconfig index 942ced90557c..de65841aa04e 100644 --- a/trunk/arch/powerpc/configs/mpc85xx_smp_defconfig +++ b/trunk/arch/powerpc/configs/mpc85xx_smp_defconfig @@ -206,7 +206,6 @@ CONFIG_CRC_T10DIF=y CONFIG_DEBUG_FS=y CONFIG_DEBUG_KERNEL=y CONFIG_DETECT_HUNG_TASK=y -# CONFIG_DEBUG_BUGVERBOSE is not set CONFIG_DEBUG_INFO=y # CONFIG_RCU_CPU_STALL_DETECTOR is not set CONFIG_SYSCTL_SYSCALL_CHECK=y diff --git a/trunk/arch/powerpc/configs/mpc86xx_defconfig b/trunk/arch/powerpc/configs/mpc86xx_defconfig index 038a308cbfc4..a1cc8179e9fd 100644 --- a/trunk/arch/powerpc/configs/mpc86xx_defconfig +++ b/trunk/arch/powerpc/configs/mpc86xx_defconfig @@ -171,7 +171,6 @@ CONFIG_MAC_PARTITION=y CONFIG_CRC_T10DIF=y CONFIG_DEBUG_KERNEL=y CONFIG_DETECT_HUNG_TASK=y -# CONFIG_DEBUG_BUGVERBOSE is not set CONFIG_DEBUG_INFO=y # CONFIG_RCU_CPU_STALL_DETECTOR is not set CONFIG_SYSCTL_SYSCALL_CHECK=y diff --git a/trunk/arch/powerpc/configs/pmac32_defconfig b/trunk/arch/powerpc/configs/pmac32_defconfig index ac4fc41035f6..f8b394a76ac3 100644 --- a/trunk/arch/powerpc/configs/pmac32_defconfig +++ b/trunk/arch/powerpc/configs/pmac32_defconfig @@ -112,8 +112,8 @@ CONFIG_IRDA_CACHE_LAST_LSAP=y CONFIG_IRDA_FAST_RR=y CONFIG_IRTTY_SIR=m CONFIG_BT=m -CONFIG_BT_L2CAP=m -CONFIG_BT_SCO=m +CONFIG_BT_L2CAP=y +CONFIG_BT_SCO=y CONFIG_BT_RFCOMM=m CONFIG_BT_RFCOMM_TTY=y CONFIG_BT_BNEP=m diff --git a/trunk/arch/powerpc/configs/ppc6xx_defconfig b/trunk/arch/powerpc/configs/ppc6xx_defconfig index 0a10fb009ef7..214208924a9c 100644 --- a/trunk/arch/powerpc/configs/ppc6xx_defconfig +++ b/trunk/arch/powerpc/configs/ppc6xx_defconfig @@ -351,8 +351,8 @@ CONFIG_VLSI_FIR=m CONFIG_VIA_FIR=m CONFIG_MCS_FIR=m CONFIG_BT=m -CONFIG_BT_L2CAP=m -CONFIG_BT_SCO=m +CONFIG_BT_L2CAP=y +CONFIG_BT_SCO=y CONFIG_BT_RFCOMM=m CONFIG_BT_RFCOMM_TTY=y CONFIG_BT_BNEP=m diff --git a/trunk/arch/powerpc/configs/ps3_defconfig b/trunk/arch/powerpc/configs/ps3_defconfig index caba919f65d8..6472322bf13b 100644 --- a/trunk/arch/powerpc/configs/ps3_defconfig +++ b/trunk/arch/powerpc/configs/ps3_defconfig @@ -52,8 +52,8 @@ CONFIG_IP_PNP_DHCP=y # CONFIG_INET_DIAG is not set CONFIG_IPV6=y CONFIG_BT=m -CONFIG_BT_L2CAP=m -CONFIG_BT_SCO=m +CONFIG_BT_L2CAP=y +CONFIG_BT_SCO=y CONFIG_BT_RFCOMM=m CONFIG_BT_RFCOMM_TTY=y CONFIG_BT_BNEP=m diff --git a/trunk/arch/powerpc/configs/pseries_defconfig b/trunk/arch/powerpc/configs/pseries_defconfig index 249ddd0a27cd..7de13865508c 100644 --- a/trunk/arch/powerpc/configs/pseries_defconfig +++ b/trunk/arch/powerpc/configs/pseries_defconfig @@ -146,12 +146,18 @@ CONFIG_SCSI_MULTI_LUN=y CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_FC_ATTRS=y CONFIG_SCSI_SAS_ATTRS=m +CONFIG_SCSI_CXGB3_ISCSI=m +CONFIG_SCSI_CXGB4_ISCSI=m +CONFIG_SCSI_BNX2_ISCSI=m +CONFIG_SCSI_BNX2_ISCSI=m +CONFIG_BE2ISCSI=m CONFIG_SCSI_IBMVSCSI=y CONFIG_SCSI_IBMVFC=m CONFIG_SCSI_SYM53C8XX_2=y CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0 CONFIG_SCSI_IPR=y CONFIG_SCSI_QLA_FC=m +CONFIG_SCSI_QLA_ISCSI=m CONFIG_SCSI_LPFC=m CONFIG_ATA=y # CONFIG_ATA_SFF is not set @@ -197,6 +203,8 @@ CONFIG_S2IO=m CONFIG_MYRI10GE=m CONFIG_NETXEN_NIC=m CONFIG_MLX4_EN=m +CONFIG_QLGE=m +CONFIG_BE2NET=m CONFIG_PPP=m CONFIG_PPP_ASYNC=m CONFIG_PPP_SYNC_TTY=m diff --git a/trunk/arch/powerpc/include/asm/cputable.h b/trunk/arch/powerpc/include/asm/cputable.h index be3cdf9134ce..c0d842cfd012 100644 --- a/trunk/arch/powerpc/include/asm/cputable.h +++ b/trunk/arch/powerpc/include/asm/cputable.h @@ -157,6 +157,7 @@ extern const char *powerpc_base_platform; #define CPU_FTR_476_DD2 ASM_CONST(0x0000000000010000) #define CPU_FTR_NEED_COHERENT ASM_CONST(0x0000000000020000) #define CPU_FTR_NO_BTIC ASM_CONST(0x0000000000040000) +#define CPU_FTR_DEBUG_LVL_EXC ASM_CONST(0x0000000000080000) #define CPU_FTR_NODSISRALIGN ASM_CONST(0x0000000000100000) #define CPU_FTR_PPC_LE ASM_CONST(0x0000000000200000) #define CPU_FTR_REAL_LE ASM_CONST(0x0000000000400000) @@ -178,22 +179,18 @@ extern const char *powerpc_base_platform; #define LONG_ASM_CONST(x) 0 #endif -#define CPU_FTR_SLB LONG_ASM_CONST(0x0000000100000000) -#define CPU_FTR_16M_PAGE LONG_ASM_CONST(0x0000000200000000) -#define CPU_FTR_TLBIEL LONG_ASM_CONST(0x0000000400000000) + +#define CPU_FTR_HVMODE_206 LONG_ASM_CONST(0x0000000800000000) +#define CPU_FTR_CFAR LONG_ASM_CONST(0x0000001000000000) #define CPU_FTR_IABR LONG_ASM_CONST(0x0000002000000000) #define CPU_FTR_MMCRA LONG_ASM_CONST(0x0000004000000000) #define CPU_FTR_CTRL LONG_ASM_CONST(0x0000008000000000) #define CPU_FTR_SMT LONG_ASM_CONST(0x0000010000000000) -#define CPU_FTR_LOCKLESS_TLBIE LONG_ASM_CONST(0x0000040000000000) -#define CPU_FTR_CI_LARGE_PAGE LONG_ASM_CONST(0x0000100000000000) #define CPU_FTR_PAUSE_ZERO LONG_ASM_CONST(0x0000200000000000) #define CPU_FTR_PURR LONG_ASM_CONST(0x0000400000000000) #define CPU_FTR_CELL_TB_BUG LONG_ASM_CONST(0x0000800000000000) #define CPU_FTR_SPURR LONG_ASM_CONST(0x0001000000000000) #define CPU_FTR_DSCR LONG_ASM_CONST(0x0002000000000000) -#define CPU_FTR_1T_SEGMENT LONG_ASM_CONST(0x0004000000000000) -#define CPU_FTR_NO_SLBIE_B LONG_ASM_CONST(0x0008000000000000) #define CPU_FTR_VSX LONG_ASM_CONST(0x0010000000000000) #define CPU_FTR_SAO LONG_ASM_CONST(0x0020000000000000) #define CPU_FTR_CP_USE_DCBTZ LONG_ASM_CONST(0x0040000000000000) @@ -202,12 +199,14 @@ extern const char *powerpc_base_platform; #define CPU_FTR_STCX_CHECKS_ADDRESS LONG_ASM_CONST(0x0200000000000000) #define CPU_FTR_POPCNTB LONG_ASM_CONST(0x0400000000000000) #define CPU_FTR_POPCNTD LONG_ASM_CONST(0x0800000000000000) +#define CPU_FTR_ICSWX LONG_ASM_CONST(0x1000000000000000) #ifndef __ASSEMBLY__ -#define CPU_FTR_PPCAS_ARCH_V2 (CPU_FTR_SLB | \ - CPU_FTR_TLBIEL | CPU_FTR_NOEXECUTE | \ - CPU_FTR_NODSISRALIGN | CPU_FTR_16M_PAGE) +#define CPU_FTR_PPCAS_ARCH_V2 (CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN) + +#define MMU_FTR_PPCAS_ARCH_V2 (MMU_FTR_SLB | MMU_FTR_TLBIEL | \ + MMU_FTR_16M_PAGE) /* We only set the altivec features if the kernel was compiled with altivec * support @@ -382,10 +381,13 @@ extern const char *powerpc_base_platform; #define CPU_FTRS_E500_2 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | \ CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE) -#define CPU_FTRS_E500MC (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ - CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \ +#define CPU_FTRS_E500MC (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \ CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ CPU_FTR_DBELL) +#define CPU_FTRS_E5500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \ + CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ + CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ + CPU_FTR_DEBUG_LVL_EXC) #define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN) /* 64-bit CPUs */ @@ -405,41 +407,46 @@ extern const char *powerpc_base_platform; #define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_MMCRA | CPU_FTR_SMT | \ - CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \ - CPU_FTR_PURR | CPU_FTR_STCX_CHECKS_ADDRESS | \ - CPU_FTR_POPCNTB) + CPU_FTR_COHERENT_ICACHE | CPU_FTR_PURR | \ + CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB) #define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_MMCRA | CPU_FTR_SMT | \ - CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \ + CPU_FTR_COHERENT_ICACHE | \ CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD | \ - CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB) + CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_CFAR) #define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ - CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ + CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_HVMODE_206 |\ CPU_FTR_MMCRA | CPU_FTR_SMT | \ - CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \ + CPU_FTR_COHERENT_ICACHE | \ CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \ - CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD) + CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ + CPU_FTR_ICSWX | CPU_FTR_CFAR) #define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ - CPU_FTR_PAUSE_ZERO | CPU_FTR_CI_LARGE_PAGE | \ - CPU_FTR_CELL_TB_BUG | CPU_FTR_CP_USE_DCBTZ | \ + CPU_FTR_PAUSE_ZERO | CPU_FTR_CELL_TB_BUG | CPU_FTR_CP_USE_DCBTZ | \ CPU_FTR_UNALIGNED_LD_STD) #define CPU_FTRS_PA6T (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ - CPU_FTR_PPCAS_ARCH_V2 | \ - CPU_FTR_ALTIVEC_COMP | CPU_FTR_CI_LARGE_PAGE | \ - CPU_FTR_PURR | CPU_FTR_REAL_LE | CPU_FTR_NO_SLBIE_B) + CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP | \ + CPU_FTR_PURR | CPU_FTR_REAL_LE) #define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2) +#define CPU_FTRS_A2 (CPU_FTR_USE_TB | CPU_FTR_SMT | CPU_FTR_DBELL | \ + CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN) + #ifdef __powerpc64__ +#ifdef CONFIG_PPC_BOOK3E +#define CPU_FTRS_POSSIBLE (CPU_FTRS_E5500 | CPU_FTRS_A2) +#else #define CPU_FTRS_POSSIBLE \ (CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 | \ CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | CPU_FTRS_POWER6 | \ CPU_FTRS_POWER7 | CPU_FTRS_CELL | CPU_FTRS_PA6T | \ - CPU_FTR_1T_SEGMENT | CPU_FTR_VSX) + CPU_FTR_VSX) +#endif #else enum { CPU_FTRS_POSSIBLE = @@ -473,16 +480,21 @@ enum { #endif #ifdef CONFIG_E500 CPU_FTRS_E500 | CPU_FTRS_E500_2 | CPU_FTRS_E500MC | + CPU_FTRS_E5500 | #endif 0, }; #endif /* __powerpc64__ */ #ifdef __powerpc64__ +#ifdef CONFIG_PPC_BOOK3E +#define CPU_FTRS_ALWAYS (CPU_FTRS_E5500 & CPU_FTRS_A2) +#else #define CPU_FTRS_ALWAYS \ (CPU_FTRS_POWER3 & CPU_FTRS_RS64 & CPU_FTRS_POWER4 & \ CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & CPU_FTRS_POWER6 & \ CPU_FTRS_POWER7 & CPU_FTRS_CELL & CPU_FTRS_PA6T & CPU_FTRS_POSSIBLE) +#endif #else enum { CPU_FTRS_ALWAYS = @@ -513,6 +525,7 @@ enum { #endif #ifdef CONFIG_E500 CPU_FTRS_E500 & CPU_FTRS_E500_2 & CPU_FTRS_E500MC & + CPU_FTRS_E5500 & #endif CPU_FTRS_POSSIBLE, }; diff --git a/trunk/arch/powerpc/include/asm/cputhreads.h b/trunk/arch/powerpc/include/asm/cputhreads.h index f71bb4c118b4..ce516e5eb0d3 100644 --- a/trunk/arch/powerpc/include/asm/cputhreads.h +++ b/trunk/arch/powerpc/include/asm/cputhreads.h @@ -37,16 +37,16 @@ extern cpumask_t threads_core_mask; * This can typically be used for things like IPI for tlb invalidations * since those need to be done only once per core/TLB */ -static inline cpumask_t cpu_thread_mask_to_cores(cpumask_t threads) +static inline cpumask_t cpu_thread_mask_to_cores(const struct cpumask *threads) { cpumask_t tmp, res; int i; - res = CPU_MASK_NONE; + cpumask_clear(&res); for (i = 0; i < NR_CPUS; i += threads_per_core) { - cpus_shift_left(tmp, threads_core_mask, i); - if (cpus_intersects(threads, tmp)) - cpu_set(i, res); + cpumask_shift_left(&tmp, &threads_core_mask, i); + if (cpumask_intersects(threads, &tmp)) + cpumask_set_cpu(i, &res); } return res; } @@ -58,7 +58,7 @@ static inline int cpu_nr_cores(void) static inline cpumask_t cpu_online_cores_map(void) { - return cpu_thread_mask_to_cores(cpu_online_map); + return cpu_thread_mask_to_cores(cpu_online_mask); } #ifdef CONFIG_SMP diff --git a/trunk/arch/powerpc/include/asm/dbell.h b/trunk/arch/powerpc/include/asm/dbell.h index 0893ab9343a6..9c70d0ca96d4 100644 --- a/trunk/arch/powerpc/include/asm/dbell.h +++ b/trunk/arch/powerpc/include/asm/dbell.h @@ -27,9 +27,8 @@ enum ppc_dbell { PPC_G_DBELL_MC = 4, /* guest mcheck doorbell */ }; -extern void doorbell_message_pass(int target, int msg); +extern void doorbell_cause_ipi(int cpu, unsigned long data); extern void doorbell_exception(struct pt_regs *regs); -extern void doorbell_check_self(void); extern void doorbell_setup_this_cpu(void); static inline void ppc_msgsnd(enum ppc_dbell type, u32 flags, u32 tag) diff --git a/trunk/arch/powerpc/include/asm/emulated_ops.h b/trunk/arch/powerpc/include/asm/emulated_ops.h index f0fb4fc1f6e6..45921672b97a 100644 --- a/trunk/arch/powerpc/include/asm/emulated_ops.h +++ b/trunk/arch/powerpc/include/asm/emulated_ops.h @@ -52,6 +52,10 @@ extern struct ppc_emulated { #ifdef CONFIG_VSX struct ppc_emulated_entry vsx; #endif +#ifdef CONFIG_PPC64 + struct ppc_emulated_entry mfdscr; + struct ppc_emulated_entry mtdscr; +#endif } ppc_emulated; extern u32 ppc_warn_emulated; diff --git a/trunk/arch/powerpc/include/asm/exception-64s.h b/trunk/arch/powerpc/include/asm/exception-64s.h index 7778d6f0c878..f5dfe3411f64 100644 --- a/trunk/arch/powerpc/include/asm/exception-64s.h +++ b/trunk/arch/powerpc/include/asm/exception-64s.h @@ -46,6 +46,7 @@ #define EX_CCR 60 #define EX_R3 64 #define EX_LR 72 +#define EX_CFAR 80 /* * We're short on space and time in the exception prolog, so we can't @@ -56,30 +57,40 @@ #define LOAD_HANDLER(reg, label) \ addi reg,reg,(label)-_stext; /* virt addr of handler ... */ -#define EXCEPTION_PROLOG_1(area) \ - mfspr r13,SPRN_SPRG_PACA; /* get paca address into r13 */ \ +/* Exception register prefixes */ +#define EXC_HV H +#define EXC_STD + +#define EXCEPTION_PROLOG_1(area) \ + GET_PACA(r13); \ std r9,area+EX_R9(r13); /* save r9 - r12 */ \ std r10,area+EX_R10(r13); \ std r11,area+EX_R11(r13); \ std r12,area+EX_R12(r13); \ - mfspr r9,SPRN_SPRG_SCRATCH0; \ + BEGIN_FTR_SECTION_NESTED(66); \ + mfspr r10,SPRN_CFAR; \ + std r10,area+EX_CFAR(r13); \ + END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \ + GET_SCRATCH0(r9); \ std r9,area+EX_R13(r13); \ mfcr r9 -#define EXCEPTION_PROLOG_PSERIES_1(label) \ +#define __EXCEPTION_PROLOG_PSERIES_1(label, h) \ ld r12,PACAKBASE(r13); /* get high part of &label */ \ ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \ - mfspr r11,SPRN_SRR0; /* save SRR0 */ \ + mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \ LOAD_HANDLER(r12,label) \ - mtspr SPRN_SRR0,r12; \ - mfspr r12,SPRN_SRR1; /* and SRR1 */ \ - mtspr SPRN_SRR1,r10; \ - rfid; \ + mtspr SPRN_##h##SRR0,r12; \ + mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \ + mtspr SPRN_##h##SRR1,r10; \ + h##rfid; \ b . /* prevent speculative execution */ +#define EXCEPTION_PROLOG_PSERIES_1(label, h) \ + __EXCEPTION_PROLOG_PSERIES_1(label, h) -#define EXCEPTION_PROLOG_PSERIES(area, label) \ +#define EXCEPTION_PROLOG_PSERIES(area, label, h) \ EXCEPTION_PROLOG_1(area); \ - EXCEPTION_PROLOG_PSERIES_1(label); + EXCEPTION_PROLOG_PSERIES_1(label, h); /* * The common exception prolog is used for all except a few exceptions @@ -98,10 +109,11 @@ beq- 1f; \ ld r1,PACAKSAVE(r13); /* kernel stack to use */ \ 1: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \ - bge- cr1,2f; /* abort if it is */ \ - b 3f; \ -2: li r1,(n); /* will be reloaded later */ \ + blt+ cr1,3f; /* abort if it is */ \ + li r1,(n); /* will be reloaded later */ \ sth r1,PACA_TRAP_SAVE(r13); \ + std r3,area+EX_R3(r13); \ + addi r3,r13,area; /* r3 -> where regs are saved*/ \ b bad_stack; \ 3: std r9,_CCR(r1); /* save CR in stackframe */ \ std r11,_NIP(r1); /* save SRR0 in stackframe */ \ @@ -123,6 +135,10 @@ std r9,GPR11(r1); \ std r10,GPR12(r1); \ std r11,GPR13(r1); \ + BEGIN_FTR_SECTION_NESTED(66); \ + ld r10,area+EX_CFAR(r13); \ + std r10,ORIG_GPR3(r1); \ + END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \ ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \ mflr r9; /* save LR in stackframe */ \ std r9,_LINK(r1); \ @@ -143,57 +159,62 @@ /* * Exception vectors. */ -#define STD_EXCEPTION_PSERIES(n, label) \ - . = n; \ +#define STD_EXCEPTION_PSERIES(loc, vec, label) \ + . = loc; \ .globl label##_pSeries; \ label##_pSeries: \ HMT_MEDIUM; \ - DO_KVM n; \ - mtspr SPRN_SPRG_SCRATCH0,r13; /* save r13 */ \ - EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common) + DO_KVM vec; \ + SET_SCRATCH0(r13); /* save r13 */ \ + EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, EXC_STD) -#define HSTD_EXCEPTION_PSERIES(n, label) \ - . = n; \ - .globl label##_pSeries; \ -label##_pSeries: \ +#define STD_EXCEPTION_HV(loc, vec, label) \ + . = loc; \ + .globl label##_hv; \ +label##_hv: \ HMT_MEDIUM; \ - mtspr SPRN_SPRG_SCRATCH0,r20; /* save r20 */ \ - mfspr r20,SPRN_HSRR0; /* copy HSRR0 to SRR0 */ \ - mtspr SPRN_SRR0,r20; \ - mfspr r20,SPRN_HSRR1; /* copy HSRR0 to SRR0 */ \ - mtspr SPRN_SRR1,r20; \ - mfspr r20,SPRN_SPRG_SCRATCH0; /* restore r20 */ \ - mtspr SPRN_SPRG_SCRATCH0,r13; /* save r13 */ \ - EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common) + DO_KVM vec; \ + SET_SCRATCH0(r13); /* save r13 */ \ + EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, EXC_HV) - -#define MASKABLE_EXCEPTION_PSERIES(n, label) \ - . = n; \ - .globl label##_pSeries; \ -label##_pSeries: \ +#define __MASKABLE_EXCEPTION_PSERIES(vec, label, h) \ HMT_MEDIUM; \ - DO_KVM n; \ - mtspr SPRN_SPRG_SCRATCH0,r13; /* save r13 */ \ - mfspr r13,SPRN_SPRG_PACA; /* get paca address into r13 */ \ + DO_KVM vec; \ + SET_SCRATCH0(r13); /* save r13 */ \ + GET_PACA(r13); \ std r9,PACA_EXGEN+EX_R9(r13); /* save r9, r10 */ \ std r10,PACA_EXGEN+EX_R10(r13); \ lbz r10,PACASOFTIRQEN(r13); \ mfcr r9; \ cmpwi r10,0; \ - beq masked_interrupt; \ - mfspr r10,SPRN_SPRG_SCRATCH0; \ + beq masked_##h##interrupt; \ + GET_SCRATCH0(r10); \ std r10,PACA_EXGEN+EX_R13(r13); \ std r11,PACA_EXGEN+EX_R11(r13); \ std r12,PACA_EXGEN+EX_R12(r13); \ ld r12,PACAKBASE(r13); /* get high part of &label */ \ ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \ - mfspr r11,SPRN_SRR0; /* save SRR0 */ \ + mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \ LOAD_HANDLER(r12,label##_common) \ - mtspr SPRN_SRR0,r12; \ - mfspr r12,SPRN_SRR1; /* and SRR1 */ \ - mtspr SPRN_SRR1,r10; \ - rfid; \ + mtspr SPRN_##h##SRR0,r12; \ + mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \ + mtspr SPRN_##h##SRR1,r10; \ + h##rfid; \ b . /* prevent speculative execution */ +#define _MASKABLE_EXCEPTION_PSERIES(vec, label, h) \ + __MASKABLE_EXCEPTION_PSERIES(vec, label, h) + +#define MASKABLE_EXCEPTION_PSERIES(loc, vec, label) \ + . = loc; \ + .globl label##_pSeries; \ +label##_pSeries: \ + _MASKABLE_EXCEPTION_PSERIES(vec, label, EXC_STD) + +#define MASKABLE_EXCEPTION_HV(loc, vec, label) \ + . = loc; \ + .globl label##_hv; \ +label##_hv: \ + _MASKABLE_EXCEPTION_PSERIES(vec, label, EXC_HV) #ifdef CONFIG_PPC_ISERIES #define DISABLE_INTS \ diff --git a/trunk/arch/powerpc/include/asm/feature-fixups.h b/trunk/arch/powerpc/include/asm/feature-fixups.h index 921a8470e18a..9a67a38bf7b9 100644 --- a/trunk/arch/powerpc/include/asm/feature-fixups.h +++ b/trunk/arch/powerpc/include/asm/feature-fixups.h @@ -49,7 +49,7 @@ label##5: \ FTR_ENTRY_OFFSET label##2b-label##5b; \ FTR_ENTRY_OFFSET label##3b-label##5b; \ FTR_ENTRY_OFFSET label##4b-label##5b; \ - .ifgt (label##4b-label##3b)-(label##2b-label##1b); \ + .ifgt (label##4b- label##3b)-(label##2b- label##1b); \ .error "Feature section else case larger than body"; \ .endif; \ .popsection; @@ -146,6 +146,19 @@ label##5: \ #ifndef __ASSEMBLY__ +#define ASM_FTR_IF(section_if, section_else, msk, val) \ + stringify_in_c(BEGIN_FTR_SECTION) \ + section_if "; " \ + stringify_in_c(FTR_SECTION_ELSE) \ + section_else "; " \ + stringify_in_c(ALT_FTR_SECTION_END((msk), (val))) + +#define ASM_FTR_IFSET(section_if, section_else, msk) \ + ASM_FTR_IF(section_if, section_else, (msk), (msk)) + +#define ASM_FTR_IFCLR(section_if, section_else, msk) \ + ASM_FTR_IF(section_if, section_else, (msk), 0) + #define ASM_MMU_FTR_IF(section_if, section_else, msk, val) \ stringify_in_c(BEGIN_MMU_FTR_SECTION) \ section_if "; " \ diff --git a/trunk/arch/powerpc/include/asm/firmware.h b/trunk/arch/powerpc/include/asm/firmware.h index 4ef662e4a31d..3a6c586c4e40 100644 --- a/trunk/arch/powerpc/include/asm/firmware.h +++ b/trunk/arch/powerpc/include/asm/firmware.h @@ -47,6 +47,7 @@ #define FW_FEATURE_BEAT ASM_CONST(0x0000000001000000) #define FW_FEATURE_CMO ASM_CONST(0x0000000002000000) #define FW_FEATURE_VPHN ASM_CONST(0x0000000004000000) +#define FW_FEATURE_XCMO ASM_CONST(0x0000000008000000) #ifndef __ASSEMBLY__ @@ -60,7 +61,7 @@ enum { FW_FEATURE_VIO | FW_FEATURE_RDMA | FW_FEATURE_LLAN | FW_FEATURE_BULK_REMOVE | FW_FEATURE_XDABR | FW_FEATURE_MULTITCE | FW_FEATURE_SPLPAR | FW_FEATURE_LPAR | - FW_FEATURE_CMO | FW_FEATURE_VPHN, + FW_FEATURE_CMO | FW_FEATURE_VPHN | FW_FEATURE_XCMO, FW_FEATURE_PSERIES_ALWAYS = 0, FW_FEATURE_ISERIES_POSSIBLE = FW_FEATURE_ISERIES | FW_FEATURE_LPAR, FW_FEATURE_ISERIES_ALWAYS = FW_FEATURE_ISERIES | FW_FEATURE_LPAR, diff --git a/trunk/arch/powerpc/include/asm/hvcall.h b/trunk/arch/powerpc/include/asm/hvcall.h index 8edec710cc6d..852b8c1c09db 100644 --- a/trunk/arch/powerpc/include/asm/hvcall.h +++ b/trunk/arch/powerpc/include/asm/hvcall.h @@ -102,6 +102,7 @@ #define H_ANDCOND (1UL<<(63-33)) #define H_ICACHE_INVALIDATE (1UL<<(63-40)) /* icbi, etc. (ignored for IO pages) */ #define H_ICACHE_SYNCHRONIZE (1UL<<(63-41)) /* dcbst, icbi, etc (ignored for IO pages */ +#define H_COALESCE_CAND (1UL<<(63-42)) /* page is a good candidate for coalescing */ #define H_ZERO_PAGE (1UL<<(63-48)) /* zero the page before mapping (ignored for IO pages) */ #define H_COPY_PAGE (1UL<<(63-49)) #define H_N (1UL<<(63-61)) @@ -234,6 +235,7 @@ #define H_GET_MPP 0x2D4 #define H_HOME_NODE_ASSOCIATIVITY 0x2EC #define H_BEST_ENERGY 0x2F4 +#define H_GET_MPP_X 0x314 #define MAX_HCALL_OPCODE H_BEST_ENERGY #ifndef __ASSEMBLY__ @@ -312,6 +314,16 @@ struct hvcall_mpp_data { int h_get_mpp(struct hvcall_mpp_data *); +struct hvcall_mpp_x_data { + unsigned long coalesced_bytes; + unsigned long pool_coalesced_bytes; + unsigned long pool_purr_cycles; + unsigned long pool_spurr_cycles; + unsigned long reserved[3]; +}; + +int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data); + #ifdef CONFIG_PPC_PSERIES extern int CMO_PrPSP; extern int CMO_SecPSP; diff --git a/trunk/arch/powerpc/platforms/cell/io-workarounds.h b/trunk/arch/powerpc/include/asm/io-workarounds.h similarity index 97% rename from trunk/arch/powerpc/platforms/cell/io-workarounds.h rename to trunk/arch/powerpc/include/asm/io-workarounds.h index 6efc7782ebf2..fbae49286926 100644 --- a/trunk/arch/powerpc/platforms/cell/io-workarounds.h +++ b/trunk/arch/powerpc/include/asm/io-workarounds.h @@ -31,7 +31,6 @@ struct iowa_bus { void *private; }; -void __devinit io_workaround_init(void); void __devinit iowa_register_bus(struct pci_controller *, struct ppc_pci_io *, int (*)(struct iowa_bus *, void *), void *); struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR); diff --git a/trunk/arch/powerpc/include/asm/io.h b/trunk/arch/powerpc/include/asm/io.h index 001f2f11c19b..45698d55cd6a 100644 --- a/trunk/arch/powerpc/include/asm/io.h +++ b/trunk/arch/powerpc/include/asm/io.h @@ -2,6 +2,8 @@ #define _ASM_POWERPC_IO_H #ifdef __KERNEL__ +#define ARCH_HAS_IOREMAP_WC + /* * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -481,10 +483,16 @@ __do_out_asm(_rec_outl, "stwbrx") _memcpy_fromio(dst,PCI_FIX_ADDR(src),n) #endif /* !CONFIG_EEH */ -#ifdef CONFIG_PPC_INDIRECT_IO -#define DEF_PCI_HOOK(x) x +#ifdef CONFIG_PPC_INDIRECT_PIO +#define DEF_PCI_HOOK_pio(x) x +#else +#define DEF_PCI_HOOK_pio(x) NULL +#endif + +#ifdef CONFIG_PPC_INDIRECT_MMIO +#define DEF_PCI_HOOK_mem(x) x #else -#define DEF_PCI_HOOK(x) NULL +#define DEF_PCI_HOOK_mem(x) NULL #endif /* Structure containing all the hooks */ @@ -504,7 +512,7 @@ extern struct ppc_pci_io { #define DEF_PCI_AC_RET(name, ret, at, al, space, aa) \ static inline ret name at \ { \ - if (DEF_PCI_HOOK(ppc_pci_io.name) != NULL) \ + if (DEF_PCI_HOOK_##space(ppc_pci_io.name) != NULL) \ return ppc_pci_io.name al; \ return __do_##name al; \ } @@ -512,7 +520,7 @@ static inline ret name at \ #define DEF_PCI_AC_NORET(name, at, al, space, aa) \ static inline void name at \ { \ - if (DEF_PCI_HOOK(ppc_pci_io.name) != NULL) \ + if (DEF_PCI_HOOK_##space(ppc_pci_io.name) != NULL) \ ppc_pci_io.name al; \ else \ __do_##name al; \ @@ -616,12 +624,13 @@ static inline void iosync(void) * * ioremap is the standard one and provides non-cacheable guarded mappings * and can be hooked by the platform via ppc_md * - * * ioremap_flags allows to specify the page flags as an argument and can - * also be hooked by the platform via ppc_md. ioremap_prot is the exact - * same thing as ioremap_flags. + * * ioremap_prot allows to specify the page flags as an argument and can + * also be hooked by the platform via ppc_md. * * * ioremap_nocache is identical to ioremap * + * * ioremap_wc enables write combining + * * * iounmap undoes such a mapping and can be hooked * * * __ioremap_at (and the pending __iounmap_at) are low level functions to @@ -629,7 +638,7 @@ static inline void iosync(void) * currently be hooked. Must be page aligned. * * * __ioremap is the low level implementation used by ioremap and - * ioremap_flags and cannot be hooked (but can be used by a hook on one + * ioremap_prot and cannot be hooked (but can be used by a hook on one * of the previous ones) * * * __ioremap_caller is the same as above but takes an explicit caller @@ -640,10 +649,10 @@ static inline void iosync(void) * */ extern void __iomem *ioremap(phys_addr_t address, unsigned long size); -extern void __iomem *ioremap_flags(phys_addr_t address, unsigned long size, - unsigned long flags); +extern void __iomem *ioremap_prot(phys_addr_t address, unsigned long size, + unsigned long flags); +extern void __iomem *ioremap_wc(phys_addr_t address, unsigned long size); #define ioremap_nocache(addr, size) ioremap((addr), (size)) -#define ioremap_prot(addr, size, prot) ioremap_flags((addr), (size), (prot)) extern void iounmap(volatile void __iomem *addr); diff --git a/trunk/arch/powerpc/include/asm/io_event_irq.h b/trunk/arch/powerpc/include/asm/io_event_irq.h new file mode 100644 index 000000000000..b1a9a1be3c21 --- /dev/null +++ b/trunk/arch/powerpc/include/asm/io_event_irq.h @@ -0,0 +1,54 @@ +/* + * Copyright 2010, 2011 Mark Nelson and Tseng-Hui (Frank) Lin, IBM Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _ASM_POWERPC_IO_EVENT_IRQ_H +#define _ASM_POWERPC_IO_EVENT_IRQ_H + +#include +#include + +#define PSERIES_IOEI_RPC_MAX_LEN 216 + +#define PSERIES_IOEI_TYPE_ERR_DETECTED 0x01 +#define PSERIES_IOEI_TYPE_ERR_RECOVERED 0x02 +#define PSERIES_IOEI_TYPE_EVENT 0x03 +#define PSERIES_IOEI_TYPE_RPC_PASS_THRU 0x04 + +#define PSERIES_IOEI_SUBTYPE_NOT_APP 0x00 +#define PSERIES_IOEI_SUBTYPE_REBALANCE_REQ 0x01 +#define PSERIES_IOEI_SUBTYPE_NODE_ONLINE 0x03 +#define PSERIES_IOEI_SUBTYPE_NODE_OFFLINE 0x04 +#define PSERIES_IOEI_SUBTYPE_DUMP_SIZE_CHANGE 0x05 +#define PSERIES_IOEI_SUBTYPE_TORRENT_IRV_UPDATE 0x06 +#define PSERIES_IOEI_SUBTYPE_TORRENT_HFI_CFGED 0x07 + +#define PSERIES_IOEI_SCOPE_NOT_APP 0x00 +#define PSERIES_IOEI_SCOPE_RIO_HUB 0x36 +#define PSERIES_IOEI_SCOPE_RIO_BRIDGE 0x37 +#define PSERIES_IOEI_SCOPE_PHB 0x38 +#define PSERIES_IOEI_SCOPE_EADS_GLOBAL 0x39 +#define PSERIES_IOEI_SCOPE_EADS_SLOT 0x3A +#define PSERIES_IOEI_SCOPE_TORRENT_HUB 0x3B +#define PSERIES_IOEI_SCOPE_SERVICE_PROC 0x51 + +/* Platform Event Log Format, Version 6, data portition of IO event section */ +struct pseries_io_event { + uint8_t event_type; /* 0x00 IO-Event Type */ + uint8_t rpc_data_len; /* 0x01 RPC data length */ + uint8_t scope; /* 0x02 Error/Event Scope */ + uint8_t event_subtype; /* 0x03 I/O-Event Sub-Type */ + uint32_t drc_index; /* 0x04 DRC Index */ + uint8_t rpc_data[PSERIES_IOEI_RPC_MAX_LEN]; + /* 0x08 RPC Data (0-216 bytes, */ + /* padded to 4 bytes alignment) */ +}; + +extern struct atomic_notifier_head pseries_ioei_notifier_list; + +#endif /* _ASM_POWERPC_IO_EVENT_IRQ_H */ diff --git a/trunk/arch/powerpc/include/asm/irq.h b/trunk/arch/powerpc/include/asm/irq.h index 67ab5fb7d153..1bff591f7f72 100644 --- a/trunk/arch/powerpc/include/asm/irq.h +++ b/trunk/arch/powerpc/include/asm/irq.h @@ -88,9 +88,6 @@ struct irq_host_ops { /* Dispose of such a mapping */ void (*unmap)(struct irq_host *h, unsigned int virq); - /* Update of such a mapping */ - void (*remap)(struct irq_host *h, unsigned int virq, irq_hw_number_t hw); - /* Translate device-tree interrupt specifier from raw format coming * from the firmware to a irq_hw_number_t (interrupt line number) and * type (sense) that can be passed to set_irq_type(). In the absence @@ -128,19 +125,10 @@ struct irq_host { struct device_node *of_node; }; -/* The main irq map itself is an array of NR_IRQ entries containing the - * associate host and irq number. An entry with a host of NULL is free. - * An entry can be allocated if it's free, the allocator always then sets - * hwirq first to the host's invalid irq number and then fills ops. - */ -struct irq_map_entry { - irq_hw_number_t hwirq; - struct irq_host *host; -}; - -extern struct irq_map_entry irq_map[NR_IRQS]; - +struct irq_data; +extern irq_hw_number_t irqd_to_hwirq(struct irq_data *d); extern irq_hw_number_t virq_to_hw(unsigned int virq); +extern bool virq_is_host(unsigned int virq, struct irq_host *host); /** * irq_alloc_host - Allocate a new irq_host data structure diff --git a/trunk/arch/powerpc/include/asm/kexec.h b/trunk/arch/powerpc/include/asm/kexec.h index f54408d995b5..8a33698c61bd 100644 --- a/trunk/arch/powerpc/include/asm/kexec.h +++ b/trunk/arch/powerpc/include/asm/kexec.h @@ -76,7 +76,7 @@ extern void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)); extern cpumask_t cpus_in_sr; static inline int kexec_sr_activated(int cpu) { - return cpu_isset(cpu,cpus_in_sr); + return cpumask_test_cpu(cpu, &cpus_in_sr); } struct kimage; diff --git a/trunk/arch/powerpc/include/asm/kvm_asm.h b/trunk/arch/powerpc/include/asm/kvm_asm.h index 5b7504674397..0951b17f4eb5 100644 --- a/trunk/arch/powerpc/include/asm/kvm_asm.h +++ b/trunk/arch/powerpc/include/asm/kvm_asm.h @@ -59,6 +59,7 @@ #define BOOK3S_INTERRUPT_INST_SEGMENT 0x480 #define BOOK3S_INTERRUPT_EXTERNAL 0x500 #define BOOK3S_INTERRUPT_EXTERNAL_LEVEL 0x501 +#define BOOK3S_INTERRUPT_EXTERNAL_HV 0x502 #define BOOK3S_INTERRUPT_ALIGNMENT 0x600 #define BOOK3S_INTERRUPT_PROGRAM 0x700 #define BOOK3S_INTERRUPT_FP_UNAVAIL 0x800 diff --git a/trunk/arch/powerpc/include/asm/kvm_book3s_asm.h b/trunk/arch/powerpc/include/asm/kvm_book3s_asm.h index 36fdb3aff30b..d5a8a3861635 100644 --- a/trunk/arch/powerpc/include/asm/kvm_book3s_asm.h +++ b/trunk/arch/powerpc/include/asm/kvm_book3s_asm.h @@ -34,6 +34,7 @@ (\intno == BOOK3S_INTERRUPT_DATA_SEGMENT) || \ (\intno == BOOK3S_INTERRUPT_INST_SEGMENT) || \ (\intno == BOOK3S_INTERRUPT_EXTERNAL) || \ + (\intno == BOOK3S_INTERRUPT_EXTERNAL_HV) || \ (\intno == BOOK3S_INTERRUPT_ALIGNMENT) || \ (\intno == BOOK3S_INTERRUPT_PROGRAM) || \ (\intno == BOOK3S_INTERRUPT_FP_UNAVAIL) || \ diff --git a/trunk/arch/powerpc/include/asm/lppaca.h b/trunk/arch/powerpc/include/asm/lppaca.h index a077adc0b35e..e0298d26ce5d 100644 --- a/trunk/arch/powerpc/include/asm/lppaca.h +++ b/trunk/arch/powerpc/include/asm/lppaca.h @@ -210,6 +210,8 @@ struct dtl_entry { #define DISPATCH_LOG_BYTES 4096 /* bytes per cpu */ #define N_DISPATCH_LOG (DISPATCH_LOG_BYTES / sizeof(struct dtl_entry)) +extern struct kmem_cache *dtl_cache; + /* * When CONFIG_VIRT_CPU_ACCOUNTING = y, the cpu accounting code controls * reading from the dispatch trace log. If other code wants to consume diff --git a/trunk/arch/powerpc/include/asm/machdep.h b/trunk/arch/powerpc/include/asm/machdep.h index e4f01915fbb0..47cacddb14cf 100644 --- a/trunk/arch/powerpc/include/asm/machdep.h +++ b/trunk/arch/powerpc/include/asm/machdep.h @@ -29,21 +29,6 @@ struct file; struct pci_controller; struct kimage; -#ifdef CONFIG_SMP -struct smp_ops_t { - void (*message_pass)(int target, int msg); - int (*probe)(void); - void (*kick_cpu)(int nr); - void (*setup_cpu)(int nr); - void (*bringup_done)(void); - void (*take_timebase)(void); - void (*give_timebase)(void); - int (*cpu_disable)(void); - void (*cpu_die)(unsigned int nr); - int (*cpu_bootable)(unsigned int nr); -}; -#endif - struct machdep_calls { char *name; #ifdef CONFIG_PPC64 @@ -267,6 +252,7 @@ struct machdep_calls { extern void e500_idle(void); extern void power4_idle(void); +extern void power7_idle(void); extern void ppc6xx_idle(void); extern void book3e_idle(void); @@ -311,12 +297,6 @@ extern sys_ctrler_t sys_ctrler; #endif /* CONFIG_PPC_PMAC */ -#ifdef CONFIG_SMP -/* Poor default implementations */ -extern void __devinit smp_generic_give_timebase(void); -extern void __devinit smp_generic_take_timebase(void); -#endif /* CONFIG_SMP */ - /* Functions to produce codes on the leds. * The SRC code should be unique for the message category and should diff --git a/trunk/arch/powerpc/include/asm/mmu-book3e.h b/trunk/arch/powerpc/include/asm/mmu-book3e.h index 17194fcd4040..3ea0f9a259d8 100644 --- a/trunk/arch/powerpc/include/asm/mmu-book3e.h +++ b/trunk/arch/powerpc/include/asm/mmu-book3e.h @@ -43,6 +43,7 @@ #define MAS0_TLBSEL(x) (((x) << 28) & 0x30000000) #define MAS0_ESEL(x) (((x) << 16) & 0x0FFF0000) #define MAS0_NV(x) ((x) & 0x00000FFF) +#define MAS0_ESEL_MASK 0x0FFF0000 #define MAS0_HES 0x00004000 #define MAS0_WQ_ALLWAYS 0x00000000 #define MAS0_WQ_COND 0x00001000 @@ -137,6 +138,21 @@ #define MMUCSR0_TLB2PS 0x00078000 /* TLB2 Page Size */ #define MMUCSR0_TLB3PS 0x00780000 /* TLB3 Page Size */ +/* MMUCFG bits */ +#define MMUCFG_MAVN_NASK 0x00000003 +#define MMUCFG_MAVN_V1_0 0x00000000 +#define MMUCFG_MAVN_V2_0 0x00000001 +#define MMUCFG_NTLB_MASK 0x0000000c +#define MMUCFG_NTLB_SHIFT 2 +#define MMUCFG_PIDSIZE_MASK 0x000007c0 +#define MMUCFG_PIDSIZE_SHIFT 6 +#define MMUCFG_TWC 0x00008000 +#define MMUCFG_LRAT 0x00010000 +#define MMUCFG_RASIZE_MASK 0x00fe0000 +#define MMUCFG_RASIZE_SHIFT 17 +#define MMUCFG_LPIDSIZE_MASK 0x0f000000 +#define MMUCFG_LPIDSIZE_SHIFT 24 + /* TLBnCFG encoding */ #define TLBnCFG_N_ENTRY 0x00000fff /* number of entries */ #define TLBnCFG_HES 0x00002000 /* HW select supported */ @@ -229,6 +245,10 @@ extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; extern int mmu_linear_psize; extern int mmu_vmemmap_psize; +#ifdef CONFIG_PPC64 +extern unsigned long linear_map_top; +#endif + #endif /* !__ASSEMBLY__ */ #endif /* _ASM_POWERPC_MMU_BOOK3E_H_ */ diff --git a/trunk/arch/powerpc/include/asm/mmu-hash64.h b/trunk/arch/powerpc/include/asm/mmu-hash64.h index ae7b3efec8e5..d865bd909c7d 100644 --- a/trunk/arch/powerpc/include/asm/mmu-hash64.h +++ b/trunk/arch/powerpc/include/asm/mmu-hash64.h @@ -408,6 +408,7 @@ static inline void subpage_prot_init_new_context(struct mm_struct *mm) { } #endif /* CONFIG_PPC_SUBPAGE_PROT */ typedef unsigned long mm_context_id_t; +struct spinlock; typedef struct { mm_context_id_t id; @@ -423,6 +424,11 @@ typedef struct { #ifdef CONFIG_PPC_SUBPAGE_PROT struct subpage_prot_table spt; #endif /* CONFIG_PPC_SUBPAGE_PROT */ +#ifdef CONFIG_PPC_ICSWX + struct spinlock *cop_lockp; /* guard acop and cop_pid */ + unsigned long acop; /* mask of enabled coprocessor types */ + unsigned int cop_pid; /* pid value used with coprocessors */ +#endif /* CONFIG_PPC_ICSWX */ } mm_context_t; diff --git a/trunk/arch/powerpc/include/asm/mmu.h b/trunk/arch/powerpc/include/asm/mmu.h index bb40a06d3b77..4138b21ae80a 100644 --- a/trunk/arch/powerpc/include/asm/mmu.h +++ b/trunk/arch/powerpc/include/asm/mmu.h @@ -56,11 +56,6 @@ */ #define MMU_FTR_NEED_DTLB_SW_LRU ASM_CONST(0x00200000) -/* This indicates that the processor uses the ISA 2.06 server tlbie - * mnemonics - */ -#define MMU_FTR_TLBIE_206 ASM_CONST(0x00400000) - /* Enable use of TLB reservation. Processor should support tlbsrx. * instruction and MAS0[WQ]. */ @@ -70,6 +65,53 @@ */ #define MMU_FTR_USE_PAIRED_MAS ASM_CONST(0x01000000) +/* MMU is SLB-based + */ +#define MMU_FTR_SLB ASM_CONST(0x02000000) + +/* Support 16M large pages + */ +#define MMU_FTR_16M_PAGE ASM_CONST(0x04000000) + +/* Supports TLBIEL variant + */ +#define MMU_FTR_TLBIEL ASM_CONST(0x08000000) + +/* Supports tlbies w/o locking + */ +#define MMU_FTR_LOCKLESS_TLBIE ASM_CONST(0x10000000) + +/* Large pages can be marked CI + */ +#define MMU_FTR_CI_LARGE_PAGE ASM_CONST(0x20000000) + +/* 1T segments available + */ +#define MMU_FTR_1T_SEGMENT ASM_CONST(0x40000000) + +/* Doesn't support the B bit (1T segment) in SLBIE + */ +#define MMU_FTR_NO_SLBIE_B ASM_CONST(0x80000000) + +/* MMU feature bit sets for various CPUs */ +#define MMU_FTRS_DEFAULT_HPTE_ARCH_V2 \ + MMU_FTR_HPTE_TABLE | MMU_FTR_PPCAS_ARCH_V2 +#define MMU_FTRS_POWER4 MMU_FTRS_DEFAULT_HPTE_ARCH_V2 +#define MMU_FTRS_PPC970 MMU_FTRS_POWER4 +#define MMU_FTRS_POWER5 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE +#define MMU_FTRS_POWER6 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE +#define MMU_FTRS_POWER7 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE +#define MMU_FTRS_CELL MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \ + MMU_FTR_CI_LARGE_PAGE +#define MMU_FTRS_PA6T MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \ + MMU_FTR_CI_LARGE_PAGE | MMU_FTR_NO_SLBIE_B +#define MMU_FTRS_A2 MMU_FTR_TYPE_3E | MMU_FTR_USE_TLBILX | \ + MMU_FTR_USE_TLBIVAX_BCAST | \ + MMU_FTR_LOCK_BCAST_INVAL | \ + MMU_FTR_USE_TLBRSRV | \ + MMU_FTR_USE_PAIRED_MAS | \ + MMU_FTR_TLBIEL | \ + MMU_FTR_16M_PAGE #ifndef __ASSEMBLY__ #include diff --git a/trunk/arch/powerpc/include/asm/mmu_context.h b/trunk/arch/powerpc/include/asm/mmu_context.h index 81fb41289d6c..a73668a5f30d 100644 --- a/trunk/arch/powerpc/include/asm/mmu_context.h +++ b/trunk/arch/powerpc/include/asm/mmu_context.h @@ -32,6 +32,10 @@ extern void __destroy_context(unsigned long context_id); extern void mmu_context_init(void); #endif +extern void switch_cop(struct mm_struct *next); +extern int use_cop(unsigned long acop, struct mm_struct *mm); +extern void drop_cop(unsigned long acop, struct mm_struct *mm); + /* * switch_mm is the entry point called from the architecture independent * code in kernel/sched.c @@ -55,6 +59,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, if (prev == next) return; +#ifdef CONFIG_PPC_ICSWX + /* Switch coprocessor context only if prev or next uses a coprocessor */ + if (prev->context.acop || next->context.acop) + switch_cop(next); +#endif /* CONFIG_PPC_ICSWX */ + /* We must stop all altivec streams before changing the HW * context */ @@ -67,7 +77,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, * sub architectures. */ #ifdef CONFIG_PPC_STD_MMU_64 - if (cpu_has_feature(CPU_FTR_SLB)) + if (mmu_has_feature(MMU_FTR_SLB)) switch_slb(tsk, next); else switch_stab(tsk, next); diff --git a/trunk/arch/powerpc/include/asm/mpic.h b/trunk/arch/powerpc/include/asm/mpic.h index 7005ee0b074d..df18989e78d4 100644 --- a/trunk/arch/powerpc/include/asm/mpic.h +++ b/trunk/arch/powerpc/include/asm/mpic.h @@ -3,7 +3,6 @@ #ifdef __KERNEL__ #include -#include #include #include @@ -263,6 +262,7 @@ struct mpic #ifdef CONFIG_SMP struct irq_chip hc_ipi; #endif + struct irq_chip hc_tm; const char *name; /* Flags */ unsigned int flags; @@ -281,7 +281,7 @@ struct mpic /* vector numbers used for internal sources (ipi/timers) */ unsigned int ipi_vecs[4]; - unsigned int timer_vecs[4]; + unsigned int timer_vecs[8]; /* Spurious vector to program into unused sources */ unsigned int spurious_vec; @@ -320,8 +320,6 @@ struct mpic /* link */ struct mpic *next; - struct sys_device sysdev; - #ifdef CONFIG_PM struct mpic_irq_save *save_data; #endif @@ -371,6 +369,8 @@ struct mpic * NOTE: This flag trumps MPIC_WANTS_RESET. */ #define MPIC_NO_RESET 0x00004000 +/* Freescale MPIC (compatible includes "fsl,mpic") */ +#define MPIC_FSL 0x00008000 /* MPIC HW modification ID */ #define MPIC_REGSET_MASK 0xf0000000 diff --git a/trunk/arch/powerpc/include/asm/pSeries_reconfig.h b/trunk/arch/powerpc/include/asm/pSeries_reconfig.h index d4b4bfa26fb3..89d2f99c1bf4 100644 --- a/trunk/arch/powerpc/include/asm/pSeries_reconfig.h +++ b/trunk/arch/powerpc/include/asm/pSeries_reconfig.h @@ -18,13 +18,18 @@ extern int pSeries_reconfig_notifier_register(struct notifier_block *); extern void pSeries_reconfig_notifier_unregister(struct notifier_block *); extern struct blocking_notifier_head pSeries_reconfig_chain; +/* Not the best place to put this, will be fixed when we move some + * of the rtas suspend-me stuff to pseries */ +extern void pSeries_coalesce_init(void); #else /* !CONFIG_PPC_PSERIES */ static inline int pSeries_reconfig_notifier_register(struct notifier_block *nb) { return 0; } static inline void pSeries_reconfig_notifier_unregister(struct notifier_block *nb) { } +static inline void pSeries_coalesce_init(void) { } #endif /* CONFIG_PPC_PSERIES */ + #endif /* __KERNEL__ */ #endif /* _PPC64_PSERIES_RECONFIG_H */ diff --git a/trunk/arch/powerpc/include/asm/paca.h b/trunk/arch/powerpc/include/asm/paca.h index ec57540cd7af..74126765106a 100644 --- a/trunk/arch/powerpc/include/asm/paca.h +++ b/trunk/arch/powerpc/include/asm/paca.h @@ -92,9 +92,9 @@ struct paca_struct { * Now, starting in cacheline 2, the exception save areas */ /* used for most interrupts/exceptions */ - u64 exgen[10] __attribute__((aligned(0x80))); - u64 exmc[10]; /* used for machine checks */ - u64 exslb[10]; /* used for SLB/segment table misses + u64 exgen[11] __attribute__((aligned(0x80))); + u64 exmc[11]; /* used for machine checks */ + u64 exslb[11]; /* used for SLB/segment table misses * on the linear mapping */ /* SLB related definitions */ u16 vmalloc_sllp; @@ -106,7 +106,8 @@ struct paca_struct { pgd_t *pgd; /* Current PGD */ pgd_t *kernel_pgd; /* Kernel PGD */ u64 exgen[8] __attribute__((aligned(0x80))); - u64 extlb[EX_TLB_SIZE*3] __attribute__((aligned(0x80))); + /* We can have up to 3 levels of reentrancy in the TLB miss handler */ + u64 extlb[3][EX_TLB_SIZE / sizeof(u64)] __attribute__((aligned(0x80))); u64 exmc[8]; /* used for machine checks */ u64 excrit[8]; /* used for crit interrupts */ u64 exdbg[8]; /* used for debug interrupts */ @@ -125,7 +126,7 @@ struct paca_struct { struct task_struct *__current; /* Pointer to current */ u64 kstack; /* Saved Kernel stack addr */ u64 stab_rr; /* stab/slb round-robin counter */ - u64 saved_r1; /* r1 save for RTAS calls */ + u64 saved_r1; /* r1 save for RTAS calls or PM */ u64 saved_msr; /* MSR saved here by enter_rtas */ u16 trap_save; /* Used when bad stack is encountered */ u8 soft_enabled; /* irq soft-enable flag */ diff --git a/trunk/arch/powerpc/include/asm/page_64.h b/trunk/arch/powerpc/include/asm/page_64.h index 812b2cd80aed..9356262fd3cc 100644 --- a/trunk/arch/powerpc/include/asm/page_64.h +++ b/trunk/arch/powerpc/include/asm/page_64.h @@ -59,24 +59,7 @@ static __inline__ void clear_page(void *addr) : "ctr", "memory"); } -extern void copy_4K_page(void *to, void *from); - -#ifdef CONFIG_PPC_64K_PAGES -static inline void copy_page(void *to, void *from) -{ - unsigned int i; - for (i=0; i < (1 << (PAGE_SHIFT - 12)); i++) { - copy_4K_page(to, from); - to += 4096; - from += 4096; - } -} -#else /* CONFIG_PPC_64K_PAGES */ -static inline void copy_page(void *to, void *from) -{ - copy_4K_page(to, from); -} -#endif /* CONFIG_PPC_64K_PAGES */ +extern void copy_page(void *to, void *from); /* Log 2 of page table size */ extern u64 ppc64_pft_size; @@ -130,7 +113,7 @@ extern void slice_set_user_psize(struct mm_struct *mm, unsigned int psize); extern void slice_set_range_psize(struct mm_struct *mm, unsigned long start, unsigned long len, unsigned int psize); -#define slice_mm_new_context(mm) ((mm)->context.id == 0) +#define slice_mm_new_context(mm) ((mm)->context.id == MMU_NO_CONTEXT) #endif /* __ASSEMBLY__ */ #else diff --git a/trunk/arch/powerpc/include/asm/pgtable-ppc64.h b/trunk/arch/powerpc/include/asm/pgtable-ppc64.h index 2b09cd522d33..81576ee0cfb1 100644 --- a/trunk/arch/powerpc/include/asm/pgtable-ppc64.h +++ b/trunk/arch/powerpc/include/asm/pgtable-ppc64.h @@ -257,21 +257,20 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm, static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - unsigned long old; - if ((pte_val(*ptep) & _PAGE_RW) == 0) - return; - old = pte_update(mm, addr, ptep, _PAGE_RW, 0); + if ((pte_val(*ptep) & _PAGE_RW) == 0) + return; + + pte_update(mm, addr, ptep, _PAGE_RW, 0); } static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - unsigned long old; - if ((pte_val(*ptep) & _PAGE_RW) == 0) return; - old = pte_update(mm, addr, ptep, _PAGE_RW, 1); + + pte_update(mm, addr, ptep, _PAGE_RW, 1); } /* diff --git a/trunk/arch/powerpc/include/asm/ppc-opcode.h b/trunk/arch/powerpc/include/asm/ppc-opcode.h index 1255569387b6..e472659d906c 100644 --- a/trunk/arch/powerpc/include/asm/ppc-opcode.h +++ b/trunk/arch/powerpc/include/asm/ppc-opcode.h @@ -41,6 +41,10 @@ #define PPC_INST_RFCI 0x4c000066 #define PPC_INST_RFDI 0x4c00004e #define PPC_INST_RFMCI 0x4c00004c +#define PPC_INST_MFSPR_DSCR 0x7c1102a6 +#define PPC_INST_MFSPR_DSCR_MASK 0xfc1fffff +#define PPC_INST_MTSPR_DSCR 0x7c1103a6 +#define PPC_INST_MTSPR_DSCR_MASK 0xfc1fffff #define PPC_INST_STRING 0x7c00042a #define PPC_INST_STRING_MASK 0xfc0007fe @@ -56,6 +60,17 @@ #define PPC_INST_TLBSRX_DOT 0x7c0006a5 #define PPC_INST_XXLOR 0xf0000510 +#define PPC_INST_NAP 0x4c000364 +#define PPC_INST_SLEEP 0x4c0003a4 + +/* A2 specific instructions */ +#define PPC_INST_ERATWE 0x7c0001a6 +#define PPC_INST_ERATRE 0x7c000166 +#define PPC_INST_ERATILX 0x7c000066 +#define PPC_INST_ERATIVAX 0x7c000666 +#define PPC_INST_ERATSX 0x7c000126 +#define PPC_INST_ERATSX_DOT 0x7c000127 + /* macros to insert fields into opcodes */ #define __PPC_RA(a) (((a) & 0x1f) << 16) #define __PPC_RB(b) (((b) & 0x1f) << 11) @@ -67,6 +82,8 @@ #define __PPC_XT(s) __PPC_XS(s) #define __PPC_T_TLB(t) (((t) & 0x3) << 21) #define __PPC_WC(w) (((w) & 0x3) << 21) +#define __PPC_WS(w) (((w) & 0x1f) << 11) + /* * Only use the larx hint bit on 64bit CPUs. e500v1/v2 based CPUs will treat a * larx with EH set as an illegal instruction. @@ -113,6 +130,21 @@ #define PPC_TLBIVAX(a,b) stringify_in_c(.long PPC_INST_TLBIVAX | \ __PPC_RA(a) | __PPC_RB(b)) +#define PPC_ERATWE(s, a, w) stringify_in_c(.long PPC_INST_ERATWE | \ + __PPC_RS(s) | __PPC_RA(a) | __PPC_WS(w)) +#define PPC_ERATRE(s, a, w) stringify_in_c(.long PPC_INST_ERATRE | \ + __PPC_RS(s) | __PPC_RA(a) | __PPC_WS(w)) +#define PPC_ERATILX(t, a, b) stringify_in_c(.long PPC_INST_ERATILX | \ + __PPC_T_TLB(t) | __PPC_RA(a) | \ + __PPC_RB(b)) +#define PPC_ERATIVAX(s, a, b) stringify_in_c(.long PPC_INST_ERATIVAX | \ + __PPC_RS(s) | __PPC_RA(a) | __PPC_RB(b)) +#define PPC_ERATSX(t, a, w) stringify_in_c(.long PPC_INST_ERATSX | \ + __PPC_RS(t) | __PPC_RA(a) | __PPC_RB(b)) +#define PPC_ERATSX_DOT(t, a, w) stringify_in_c(.long PPC_INST_ERATSX_DOT | \ + __PPC_RS(t) | __PPC_RA(a) | __PPC_RB(b)) + + /* * Define what the VSX XX1 form instructions will look like, then add * the 128 bit load store instructions based on that. @@ -126,4 +158,7 @@ #define XXLOR(t, a, b) stringify_in_c(.long PPC_INST_XXLOR | \ VSX_XX3((t), (a), (b))) +#define PPC_NAP stringify_in_c(.long PPC_INST_NAP) +#define PPC_SLEEP stringify_in_c(.long PPC_INST_SLEEP) + #endif /* _ASM_POWERPC_PPC_OPCODE_H */ diff --git a/trunk/arch/powerpc/include/asm/ppc_asm.h b/trunk/arch/powerpc/include/asm/ppc_asm.h index 98210067c1cc..1b422381fc16 100644 --- a/trunk/arch/powerpc/include/asm/ppc_asm.h +++ b/trunk/arch/powerpc/include/asm/ppc_asm.h @@ -170,6 +170,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) #define HMT_MEDIUM or 2,2,2 #define HMT_MEDIUM_HIGH or 5,5,5 # medium high priority #define HMT_HIGH or 3,3,3 +#define HMT_EXTRA_HIGH or 7,7,7 # power7 only #ifdef __KERNEL__ #ifdef CONFIG_PPC64 diff --git a/trunk/arch/powerpc/include/asm/processor.h b/trunk/arch/powerpc/include/asm/processor.h index de1967a1ff57..d50c2b6d9bc3 100644 --- a/trunk/arch/powerpc/include/asm/processor.h +++ b/trunk/arch/powerpc/include/asm/processor.h @@ -238,6 +238,10 @@ struct thread_struct { #ifdef CONFIG_KVM_BOOK3S_32_HANDLER void* kvm_shadow_vcpu; /* KVM internal data */ #endif /* CONFIG_KVM_BOOK3S_32_HANDLER */ +#ifdef CONFIG_PPC64 + unsigned long dscr; + int dscr_inherit; +#endif }; #define ARCH_MIN_TASKALIGN 16 diff --git a/trunk/arch/powerpc/include/asm/pte-common.h b/trunk/arch/powerpc/include/asm/pte-common.h index 811f04ac3660..8d1569c29042 100644 --- a/trunk/arch/powerpc/include/asm/pte-common.h +++ b/trunk/arch/powerpc/include/asm/pte-common.h @@ -162,7 +162,7 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void); * on platforms where such control is possible. */ #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\ - defined(CONFIG_KPROBES) + defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE) #define PAGE_KERNEL_TEXT PAGE_KERNEL_X #else #define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX diff --git a/trunk/arch/powerpc/include/asm/reg.h b/trunk/arch/powerpc/include/asm/reg.h index 7e4abebe76c0..c5cae0dd176c 100644 --- a/trunk/arch/powerpc/include/asm/reg.h +++ b/trunk/arch/powerpc/include/asm/reg.h @@ -99,17 +99,23 @@ #define MSR_LE __MASK(MSR_LE_LG) /* Little Endian */ #if defined(CONFIG_PPC_BOOK3S_64) +#define MSR_64BIT MSR_SF + /* Server variant */ #define MSR_ MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF |MSR_HV -#define MSR_KERNEL MSR_ | MSR_SF +#define MSR_KERNEL MSR_ | MSR_64BIT #define MSR_USER32 MSR_ | MSR_PR | MSR_EE -#define MSR_USER64 MSR_USER32 | MSR_SF +#define MSR_USER64 MSR_USER32 | MSR_64BIT #elif defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_8xx) /* Default MSR for kernel mode. */ #define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR) #define MSR_USER (MSR_KERNEL|MSR_PR|MSR_EE) #endif +#ifndef MSR_64BIT +#define MSR_64BIT 0 +#endif + /* Floating Point Status and Control Register (FPSCR) Fields */ #define FPSCR_FX 0x80000000 /* FPU exception summary */ #define FPSCR_FEX 0x40000000 /* FPU enabled exception summary */ @@ -182,6 +188,8 @@ #define SPRN_CTR 0x009 /* Count Register */ #define SPRN_DSCR 0x11 +#define SPRN_CFAR 0x1c /* Come From Address Register */ +#define SPRN_ACOP 0x1F /* Available Coprocessor Register */ #define SPRN_CTRLF 0x088 #define SPRN_CTRLT 0x098 #define CTRL_CT 0xc0000000 /* current thread */ @@ -210,8 +218,43 @@ #define SPRN_TBWL 0x11C /* Time Base Lower Register (super, R/W) */ #define SPRN_TBWU 0x11D /* Time Base Upper Register (super, R/W) */ #define SPRN_SPURR 0x134 /* Scaled PURR */ +#define SPRN_HSPRG0 0x130 /* Hypervisor Scratch 0 */ +#define SPRN_HSPRG1 0x131 /* Hypervisor Scratch 1 */ +#define SPRN_HDSISR 0x132 +#define SPRN_HDAR 0x133 +#define SPRN_HDEC 0x136 /* Hypervisor Decrementer */ #define SPRN_HIOR 0x137 /* 970 Hypervisor interrupt offset */ +#define SPRN_RMOR 0x138 /* Real mode offset register */ +#define SPRN_HRMOR 0x139 /* Real mode offset register */ +#define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */ +#define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */ #define SPRN_LPCR 0x13E /* LPAR Control Register */ +#define LPCR_VPM0 (1ul << (63-0)) +#define LPCR_VPM1 (1ul << (63-1)) +#define LPCR_ISL (1ul << (63-2)) +#define LPCR_DPFD_SH (63-11) +#define LPCR_VRMA_L (1ul << (63-12)) +#define LPCR_VRMA_LP0 (1ul << (63-15)) +#define LPCR_VRMA_LP1 (1ul << (63-16)) +#define LPCR_RMLS 0x1C000000 /* impl dependent rmo limit sel */ +#define LPCR_ILE 0x02000000 /* !HV irqs set MSR:LE */ +#define LPCR_PECE 0x00007000 /* powersave exit cause enable */ +#define LPCR_PECE0 0x00004000 /* ext. exceptions can cause exit */ +#define LPCR_PECE1 0x00002000 /* decrementer can cause exit */ +#define LPCR_PECE2 0x00001000 /* machine check etc can cause exit */ +#define LPCR_MER 0x00000800 /* Mediated External Exception */ +#define LPCR_LPES0 0x00000008 /* LPAR Env selector 0 */ +#define LPCR_LPES1 0x00000004 /* LPAR Env selector 1 */ +#define LPCR_RMI 0x00000002 /* real mode is cache inhibit */ +#define LPCR_HDICE 0x00000001 /* Hyp Decr enable (HV,PR,EE) */ +#define SPRN_LPID 0x13F /* Logical Partition Identifier */ +#define SPRN_HMER 0x150 /* Hardware m? error recovery */ +#define SPRN_HMEER 0x151 /* Hardware m? enable error recovery */ +#define SPRN_HEIR 0x153 /* Hypervisor Emulated Instruction Register */ +#define SPRN_TLBINDEXR 0x154 /* P7 TLB control register */ +#define SPRN_TLBVPNR 0x155 /* P7 TLB control register */ +#define SPRN_TLBRPNR 0x156 /* P7 TLB control register */ +#define SPRN_TLBLPIDR 0x157 /* P7 TLB control register */ #define SPRN_DBAT0L 0x219 /* Data BAT 0 Lower Register */ #define SPRN_DBAT0U 0x218 /* Data BAT 0 Upper Register */ #define SPRN_DBAT1L 0x21B /* Data BAT 1 Lower Register */ @@ -434,16 +477,23 @@ #define SPRN_SRR0 0x01A /* Save/Restore Register 0 */ #define SPRN_SRR1 0x01B /* Save/Restore Register 1 */ #define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */ -#define SRR1_WAKERESET 0x00380000 /* System reset */ #define SRR1_WAKESYSERR 0x00300000 /* System error */ #define SRR1_WAKEEE 0x00200000 /* External interrupt */ #define SRR1_WAKEMT 0x00280000 /* mtctrl */ +#define SRR1_WAKEHMI 0x00280000 /* Hypervisor maintenance */ #define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */ #define SRR1_WAKETHERM 0x00100000 /* Thermal management interrupt */ +#define SRR1_WAKERESET 0x00100000 /* System reset */ +#define SRR1_WAKESTATE 0x00030000 /* Powersave exit mask [46:47] */ +#define SRR1_WS_DEEPEST 0x00030000 /* Some resources not maintained, + * may not be recoverable */ +#define SRR1_WS_DEEPER 0x00020000 /* Some resources not maintained */ +#define SRR1_WS_DEEP 0x00010000 /* All resources maintained */ #define SRR1_PROGFPE 0x00100000 /* Floating Point Enabled */ #define SRR1_PROGPRIV 0x00040000 /* Privileged instruction */ #define SRR1_PROGTRAP 0x00020000 /* Trap */ #define SRR1_PROGADDR 0x00010000 /* SRR0 contains subsequent addr */ + #define SPRN_HSRR0 0x13A /* Save/Restore Register 0 */ #define SPRN_HSRR1 0x13B /* Save/Restore Register 1 */ @@ -673,12 +723,15 @@ * SPRG usage: * * All 64-bit: - * - SPRG1 stores PACA pointer + * - SPRG1 stores PACA pointer except 64-bit server in + * HV mode in which case it is HSPRG0 * * 64-bit server: * - SPRG0 unused (reserved for HV on Power4) * - SPRG2 scratch for exception vectors * - SPRG3 unused (user visible) + * - HSPRG0 stores PACA in HV mode + * - HSPRG1 scratch for "HV" exceptions * * 64-bit embedded * - SPRG0 generic exception scratch @@ -741,6 +794,41 @@ #ifdef CONFIG_PPC_BOOK3S_64 #define SPRN_SPRG_SCRATCH0 SPRN_SPRG2 +#define SPRN_SPRG_HPACA SPRN_HSPRG0 +#define SPRN_SPRG_HSCRATCH0 SPRN_HSPRG1 + +#define GET_PACA(rX) \ + BEGIN_FTR_SECTION_NESTED(66); \ + mfspr rX,SPRN_SPRG_PACA; \ + FTR_SECTION_ELSE_NESTED(66); \ + mfspr rX,SPRN_SPRG_HPACA; \ + ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE_206, 66) + +#define SET_PACA(rX) \ + BEGIN_FTR_SECTION_NESTED(66); \ + mtspr SPRN_SPRG_PACA,rX; \ + FTR_SECTION_ELSE_NESTED(66); \ + mtspr SPRN_SPRG_HPACA,rX; \ + ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE_206, 66) + +#define GET_SCRATCH0(rX) \ + BEGIN_FTR_SECTION_NESTED(66); \ + mfspr rX,SPRN_SPRG_SCRATCH0; \ + FTR_SECTION_ELSE_NESTED(66); \ + mfspr rX,SPRN_SPRG_HSCRATCH0; \ + ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE_206, 66) + +#define SET_SCRATCH0(rX) \ + BEGIN_FTR_SECTION_NESTED(66); \ + mtspr SPRN_SPRG_SCRATCH0,rX; \ + FTR_SECTION_ELSE_NESTED(66); \ + mtspr SPRN_SPRG_HSCRATCH0,rX; \ + ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE_206, 66) + +#else /* CONFIG_PPC_BOOK3S_64 */ +#define GET_SCRATCH0(rX) mfspr rX,SPRN_SPRG_SCRATCH0 +#define SET_SCRATCH0(rX) mtspr SPRN_SPRG_SCRATCH0,rX + #endif #ifdef CONFIG_PPC_BOOK3E_64 @@ -750,6 +838,10 @@ #define SPRN_SPRG_TLB_EXFRAME SPRN_SPRG2 #define SPRN_SPRG_TLB_SCRATCH SPRN_SPRG6 #define SPRN_SPRG_GEN_SCRATCH SPRN_SPRG0 + +#define SET_PACA(rX) mtspr SPRN_SPRG_PACA,rX +#define GET_PACA(rX) mfspr rX,SPRN_SPRG_PACA + #endif #ifdef CONFIG_PPC_BOOK3S_32 @@ -800,6 +892,8 @@ #define SPRN_SPRG_SCRATCH1 SPRN_SPRG1 #endif + + /* * An mtfsf instruction with the L bit set. On CPUs that support this a * full 64bits of FPSCR is restored and on other CPUs the L bit is ignored. @@ -894,6 +988,8 @@ #define PV_POWER5p 0x003B #define PV_POWER7 0x003F #define PV_970FX 0x003C +#define PV_POWER6 0x003E +#define PV_POWER7 0x003F #define PV_630 0x0040 #define PV_630p 0x0041 #define PV_970MP 0x0044 diff --git a/trunk/arch/powerpc/include/asm/reg_a2.h b/trunk/arch/powerpc/include/asm/reg_a2.h new file mode 100644 index 000000000000..3d52a1132f3d --- /dev/null +++ b/trunk/arch/powerpc/include/asm/reg_a2.h @@ -0,0 +1,165 @@ +/* + * Register definitions specific to the A2 core + * + * Copyright (C) 2008 Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef __ASM_POWERPC_REG_A2_H__ +#define __ASM_POWERPC_REG_A2_H__ + +#define SPRN_TENSR 0x1b5 +#define SPRN_TENS 0x1b6 /* Thread ENable Set */ +#define SPRN_TENC 0x1b7 /* Thread ENable Clear */ + +#define SPRN_A2_CCR0 0x3f0 /* Core Configuration Register 0 */ +#define SPRN_A2_CCR1 0x3f1 /* Core Configuration Register 1 */ +#define SPRN_A2_CCR2 0x3f2 /* Core Configuration Register 2 */ +#define SPRN_MMUCR0 0x3fc /* MMU Control Register 0 */ +#define SPRN_MMUCR1 0x3fd /* MMU Control Register 1 */ +#define SPRN_MMUCR2 0x3fe /* MMU Control Register 2 */ +#define SPRN_MMUCR3 0x3ff /* MMU Control Register 3 */ + +#define SPRN_IAR 0x372 + +#define SPRN_IUCR0 0x3f3 +#define IUCR0_ICBI_ACK 0x1000 + +#define SPRN_XUCR0 0x3f6 /* Execution Unit Config Register 0 */ + +#define A2_IERAT_SIZE 16 +#define A2_DERAT_SIZE 32 + +/* A2 MMUCR0 bits */ +#define MMUCR0_ECL 0x80000000 /* Extended Class for TLB fills */ +#define MMUCR0_TID_NZ 0x40000000 /* TID is non-zero */ +#define MMUCR0_TS 0x10000000 /* Translation space for TLB fills */ +#define MMUCR0_TGS 0x20000000 /* Guest space for TLB fills */ +#define MMUCR0_TLBSEL 0x0c000000 /* TLB or ERAT target for TLB fills */ +#define MMUCR0_TLBSEL_U 0x00000000 /* TLBSEL = UTLB */ +#define MMUCR0_TLBSEL_I 0x08000000 /* TLBSEL = I-ERAT */ +#define MMUCR0_TLBSEL_D 0x0c000000 /* TLBSEL = D-ERAT */ +#define MMUCR0_LOCKSRSH 0x02000000 /* Use TLB lock on tlbsx. */ +#define MMUCR0_TID_MASK 0x000000ff /* TID field */ + +/* A2 MMUCR1 bits */ +#define MMUCR1_IRRE 0x80000000 /* I-ERAT round robin enable */ +#define MMUCR1_DRRE 0x40000000 /* D-ERAT round robin enable */ +#define MMUCR1_REE 0x20000000 /* Reference Exception Enable*/ +#define MMUCR1_CEE 0x10000000 /* Change exception enable */ +#define MMUCR1_CSINV_ALL 0x00000000 /* Inval ERAT on all CS evts */ +#define MMUCR1_CSINV_NISYNC 0x04000000 /* Inval ERAT on all ex isync*/ +#define MMUCR1_CSINV_NEVER 0x0c000000 /* Don't inval ERAT on CS */ +#define MMUCR1_ICTID 0x00080000 /* IERAT class field as TID */ +#define MMUCR1_ITTID 0x00040000 /* IERAT thdid field as TID */ +#define MMUCR1_DCTID 0x00020000 /* DERAT class field as TID */ +#define MMUCR1_DTTID 0x00010000 /* DERAT thdid field as TID */ +#define MMUCR1_DCCD 0x00008000 /* DERAT class ignore */ +#define MMUCR1_TLBWE_BINV 0x00004000 /* back invalidate on tlbwe */ + +/* A2 MMUCR2 bits */ +#define MMUCR2_PSSEL_SHIFT 4 + +/* A2 MMUCR3 bits */ +#define MMUCR3_THID 0x0000000f /* Thread ID */ + +/* *** ERAT TLB bits definitions */ +#define TLB0_EPN_MASK ASM_CONST(0xfffffffffffff000) +#define TLB0_CLASS_MASK ASM_CONST(0x0000000000000c00) +#define TLB0_CLASS_00 ASM_CONST(0x0000000000000000) +#define TLB0_CLASS_01 ASM_CONST(0x0000000000000400) +#define TLB0_CLASS_10 ASM_CONST(0x0000000000000800) +#define TLB0_CLASS_11 ASM_CONST(0x0000000000000c00) +#define TLB0_V ASM_CONST(0x0000000000000200) +#define TLB0_X ASM_CONST(0x0000000000000100) +#define TLB0_SIZE_MASK ASM_CONST(0x00000000000000f0) +#define TLB0_SIZE_4K ASM_CONST(0x0000000000000010) +#define TLB0_SIZE_64K ASM_CONST(0x0000000000000030) +#define TLB0_SIZE_1M ASM_CONST(0x0000000000000050) +#define TLB0_SIZE_16M ASM_CONST(0x0000000000000070) +#define TLB0_SIZE_1G ASM_CONST(0x00000000000000a0) +#define TLB0_THDID_MASK ASM_CONST(0x000000000000000f) +#define TLB0_THDID_0 ASM_CONST(0x0000000000000001) +#define TLB0_THDID_1 ASM_CONST(0x0000000000000002) +#define TLB0_THDID_2 ASM_CONST(0x0000000000000004) +#define TLB0_THDID_3 ASM_CONST(0x0000000000000008) +#define TLB0_THDID_ALL ASM_CONST(0x000000000000000f) + +#define TLB1_RESVATTR ASM_CONST(0x00f0000000000000) +#define TLB1_U0 ASM_CONST(0x0008000000000000) +#define TLB1_U1 ASM_CONST(0x0004000000000000) +#define TLB1_U2 ASM_CONST(0x0002000000000000) +#define TLB1_U3 ASM_CONST(0x0001000000000000) +#define TLB1_R ASM_CONST(0x0000800000000000) +#define TLB1_C ASM_CONST(0x0000400000000000) +#define TLB1_RPN_MASK ASM_CONST(0x000003fffffff000) +#define TLB1_W ASM_CONST(0x0000000000000800) +#define TLB1_I ASM_CONST(0x0000000000000400) +#define TLB1_M ASM_CONST(0x0000000000000200) +#define TLB1_G ASM_CONST(0x0000000000000100) +#define TLB1_E ASM_CONST(0x0000000000000080) +#define TLB1_VF ASM_CONST(0x0000000000000040) +#define TLB1_UX ASM_CONST(0x0000000000000020) +#define TLB1_SX ASM_CONST(0x0000000000000010) +#define TLB1_UW ASM_CONST(0x0000000000000008) +#define TLB1_SW ASM_CONST(0x0000000000000004) +#define TLB1_UR ASM_CONST(0x0000000000000002) +#define TLB1_SR ASM_CONST(0x0000000000000001) + +#ifdef CONFIG_PPC_EARLY_DEBUG_WSP +#define WSP_UART_PHYS 0xffc000c000 +/* This needs to be careful chosen to hit a !0 congruence class + * in the TLB since we bolt it in way 3, which is already occupied + * by our linear mapping primary bolted entry in CC 0. + */ +#define WSP_UART_VIRT 0xf000000000001000 +#endif + +/* A2 erativax attributes definitions */ +#define ERATIVAX_RS_IS_ALL 0x000 +#define ERATIVAX_RS_IS_TID 0x040 +#define ERATIVAX_RS_IS_CLASS 0x080 +#define ERATIVAX_RS_IS_FULLMATCH 0x0c0 +#define ERATIVAX_CLASS_00 0x000 +#define ERATIVAX_CLASS_01 0x010 +#define ERATIVAX_CLASS_10 0x020 +#define ERATIVAX_CLASS_11 0x030 +#define ERATIVAX_PSIZE_4K (TLB_PSIZE_4K >> 1) +#define ERATIVAX_PSIZE_64K (TLB_PSIZE_64K >> 1) +#define ERATIVAX_PSIZE_1M (TLB_PSIZE_1M >> 1) +#define ERATIVAX_PSIZE_16M (TLB_PSIZE_16M >> 1) +#define ERATIVAX_PSIZE_1G (TLB_PSIZE_1G >> 1) + +/* A2 eratilx attributes definitions */ +#define ERATILX_T_ALL 0 +#define ERATILX_T_TID 1 +#define ERATILX_T_TGS 2 +#define ERATILX_T_FULLMATCH 3 +#define ERATILX_T_CLASS0 4 +#define ERATILX_T_CLASS1 5 +#define ERATILX_T_CLASS2 6 +#define ERATILX_T_CLASS3 7 + +/* XUCR0 bits */ +#define XUCR0_TRACE_UM_T0 0x40000000 /* Thread 0 */ +#define XUCR0_TRACE_UM_T1 0x20000000 /* Thread 1 */ +#define XUCR0_TRACE_UM_T2 0x10000000 /* Thread 2 */ +#define XUCR0_TRACE_UM_T3 0x08000000 /* Thread 3 */ + +/* A2 CCR0 register */ +#define A2_CCR0_PME_DISABLED 0x00000000 +#define A2_CCR0_PME_SLEEP 0x40000000 +#define A2_CCR0_PME_RVW 0x80000000 +#define A2_CCR0_PME_DISABLED2 0xc0000000 + +/* A2 CCR2 register */ +#define A2_CCR2_ERAT_ONLY_MODE 0x00000001 +#define A2_CCR2_ENABLE_ICSWX 0x00000002 +#define A2_CCR2_ENABLE_PC 0x20000000 +#define A2_CCR2_ENABLE_TRACE 0x40000000 + +#endif /* __ASM_POWERPC_REG_A2_H__ */ diff --git a/trunk/arch/powerpc/include/asm/reg_booke.h b/trunk/arch/powerpc/include/asm/reg_booke.h index b316794aa2b5..0f0ad9fa01c1 100644 --- a/trunk/arch/powerpc/include/asm/reg_booke.h +++ b/trunk/arch/powerpc/include/asm/reg_booke.h @@ -27,10 +27,12 @@ #define MSR_CM (1<<31) /* Computation Mode (0=32-bit, 1=64-bit) */ #if defined(CONFIG_PPC_BOOK3E_64) +#define MSR_64BIT MSR_CM + #define MSR_ MSR_ME | MSR_CE -#define MSR_KERNEL MSR_ | MSR_CM +#define MSR_KERNEL MSR_ | MSR_64BIT #define MSR_USER32 MSR_ | MSR_PR | MSR_EE | MSR_DE -#define MSR_USER64 MSR_USER32 | MSR_CM | MSR_DE +#define MSR_USER64 MSR_USER32 | MSR_64BIT #elif defined (CONFIG_40x) #define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR|MSR_CE) #define MSR_USER (MSR_KERNEL|MSR_PR|MSR_EE) @@ -81,6 +83,10 @@ #define SPRN_IVOR13 0x19D /* Interrupt Vector Offset Register 13 */ #define SPRN_IVOR14 0x19E /* Interrupt Vector Offset Register 14 */ #define SPRN_IVOR15 0x19F /* Interrupt Vector Offset Register 15 */ +#define SPRN_IVOR38 0x1B0 /* Interrupt Vector Offset Register 38 */ +#define SPRN_IVOR39 0x1B1 /* Interrupt Vector Offset Register 39 */ +#define SPRN_IVOR40 0x1B2 /* Interrupt Vector Offset Register 40 */ +#define SPRN_IVOR41 0x1B3 /* Interrupt Vector Offset Register 41 */ #define SPRN_SPEFSCR 0x200 /* SPE & Embedded FP Status & Control */ #define SPRN_BBEAR 0x201 /* Branch Buffer Entry Address Register */ #define SPRN_BBTAR 0x202 /* Branch Buffer Target Address Register */ diff --git a/trunk/arch/powerpc/include/asm/rtas.h b/trunk/arch/powerpc/include/asm/rtas.h index 9a1193e30f26..58625d1e7802 100644 --- a/trunk/arch/powerpc/include/asm/rtas.h +++ b/trunk/arch/powerpc/include/asm/rtas.h @@ -158,7 +158,50 @@ struct rtas_error_log { unsigned long target:4; /* Target of failed operation */ unsigned long type:8; /* General event or error*/ unsigned long extended_log_length:32; /* length in bytes */ - unsigned char buffer[1]; + unsigned char buffer[1]; /* Start of extended log */ + /* Variable length. */ +}; + +#define RTAS_V6EXT_LOG_FORMAT_EVENT_LOG 14 + +#define RTAS_V6EXT_COMPANY_ID_IBM (('I' << 24) | ('B' << 16) | ('M' << 8)) + +/* RTAS general extended event log, Version 6. The extended log starts + * from "buffer" field of struct rtas_error_log defined above. + */ +struct rtas_ext_event_log_v6 { + /* Byte 0 */ + uint32_t log_valid:1; /* 1:Log valid */ + uint32_t unrecoverable_error:1; /* 1:Unrecoverable error */ + uint32_t recoverable_error:1; /* 1:recoverable (correctable */ + /* or successfully retried) */ + uint32_t degraded_operation:1; /* 1:Unrecoverable err, bypassed*/ + /* - degraded operation (e.g. */ + /* CPU or mem taken off-line) */ + uint32_t predictive_error:1; + uint32_t new_log:1; /* 1:"New" log (Always 1 for */ + /* data returned from RTAS */ + uint32_t big_endian:1; /* 1: Big endian */ + uint32_t :1; /* reserved */ + /* Byte 1 */ + uint32_t :8; /* reserved */ + /* Byte 2 */ + uint32_t powerpc_format:1; /* Set to 1 (indicating log is */ + /* in PowerPC format */ + uint32_t :3; /* reserved */ + uint32_t log_format:4; /* Log format indicator. Define */ + /* format used for byte 12-2047 */ + /* Byte 3 */ + uint32_t :8; /* reserved */ + /* Byte 4-11 */ + uint8_t reserved[8]; /* reserved */ + /* Byte 12-15 */ + uint32_t company_id; /* Company ID of the company */ + /* that defines the format for */ + /* the vendor specific log type */ + /* Byte 16-end of log */ + uint8_t vendor_log[1]; /* Start of vendor specific log */ + /* Variable length. */ }; /* diff --git a/trunk/arch/powerpc/include/asm/scom.h b/trunk/arch/powerpc/include/asm/scom.h new file mode 100644 index 000000000000..0cabfd7bc2d1 --- /dev/null +++ b/trunk/arch/powerpc/include/asm/scom.h @@ -0,0 +1,156 @@ +/* + * Copyright 2010 Benjamin Herrenschmidt, IBM Corp + * + * and David Gibson, IBM Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See + * the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _ASM_POWERPC_SCOM_H +#define _ASM_POWERPC_SCOM_H + +#ifdef __KERNEL__ +#ifndef __ASSEMBLY__ +#ifdef CONFIG_PPC_SCOM + +/* + * The SCOM bus is a sideband bus used for accessing various internal + * registers of the processor or the chipset. The implementation details + * differ between processors and platforms, and the access method as + * well. + * + * This API allows to "map" ranges of SCOM register numbers associated + * with a given SCOM controller. The later must be represented by a + * device node, though some implementations might support NULL if there + * is no possible ambiguity + * + * Then, scom_read/scom_write can be used to accesses registers inside + * that range. The argument passed is a register number relative to + * the beginning of the range mapped. + */ + +typedef void *scom_map_t; + +/* Value for an invalid SCOM map */ +#define SCOM_MAP_INVALID (NULL) + +/* The scom_controller data structure is what the platform passes + * to the core code in scom_init, it provides the actual implementation + * of all the SCOM functions + */ +struct scom_controller { + scom_map_t (*map)(struct device_node *ctrl_dev, u64 reg, u64 count); + void (*unmap)(scom_map_t map); + + u64 (*read)(scom_map_t map, u32 reg); + void (*write)(scom_map_t map, u32 reg, u64 value); +}; + +extern const struct scom_controller *scom_controller; + +/** + * scom_init - Initialize the SCOM backend, called by the platform + * @controller: The platform SCOM controller + */ +static inline void scom_init(const struct scom_controller *controller) +{ + scom_controller = controller; +} + +/** + * scom_map_ok - Test is a SCOM mapping is successful + * @map: The result of scom_map to test + */ +static inline int scom_map_ok(scom_map_t map) +{ + return map != SCOM_MAP_INVALID; +} + +/** + * scom_map - Map a block of SCOM registers + * @ctrl_dev: Device node of the SCOM controller + * some implementations allow NULL here + * @reg: first SCOM register to map + * @count: Number of SCOM registers to map + */ + +static inline scom_map_t scom_map(struct device_node *ctrl_dev, + u64 reg, u64 count) +{ + return scom_controller->map(ctrl_dev, reg, count); +} + +/** + * scom_find_parent - Find the SCOM controller for a device + * @dev: OF node of the device + * + * This is not meant for general usage, but in combination with + * scom_map() allows to map registers not represented by the + * device own scom-reg property. Useful for applying HW workarounds + * on things not properly represented in the device-tree for example. + */ +struct device_node *scom_find_parent(struct device_node *dev); + + +/** + * scom_map_device - Map a device's block of SCOM registers + * @dev: OF node of the device + * @index: Register bank index (index in "scom-reg" property) + * + * This function will use the device-tree binding for SCOM which + * is to follow "scom-parent" properties until it finds a node with + * a "scom-controller" property to find the controller. It will then + * use the "scom-reg" property which is made of reg/count pairs, + * each of them having a size defined by the controller's #scom-cells + * property + */ +extern scom_map_t scom_map_device(struct device_node *dev, int index); + + +/** + * scom_unmap - Unmap a block of SCOM registers + * @map: Result of scom_map is to be unmapped + */ +static inline void scom_unmap(scom_map_t map) +{ + if (scom_map_ok(map)) + scom_controller->unmap(map); +} + +/** + * scom_read - Read a SCOM register + * @map: Result of scom_map + * @reg: Register index within that map + */ +static inline u64 scom_read(scom_map_t map, u32 reg) +{ + return scom_controller->read(map, reg); +} + +/** + * scom_write - Write to a SCOM register + * @map: Result of scom_map + * @reg: Register index within that map + * @value: Value to write + */ +static inline void scom_write(scom_map_t map, u32 reg, u64 value) +{ + scom_controller->write(map, reg, value); +} + +#endif /* CONFIG_PPC_SCOM */ +#endif /* __ASSEMBLY__ */ +#endif /* __KERNEL__ */ +#endif /* _ASM_POWERPC_SCOM_H */ diff --git a/trunk/arch/powerpc/include/asm/smp.h b/trunk/arch/powerpc/include/asm/smp.h index a902a0d3ae0d..880b8c1e6e53 100644 --- a/trunk/arch/powerpc/include/asm/smp.h +++ b/trunk/arch/powerpc/include/asm/smp.h @@ -20,6 +20,7 @@ #include #include #include +#include #ifndef __ASSEMBLY__ @@ -29,14 +30,32 @@ #include extern int boot_cpuid; +extern int boot_cpu_count; extern void cpu_die(void); #ifdef CONFIG_SMP -extern void smp_send_debugger_break(int cpu); -extern void smp_message_recv(int); +struct smp_ops_t { + void (*message_pass)(int cpu, int msg); +#ifdef CONFIG_PPC_SMP_MUXED_IPI + void (*cause_ipi)(int cpu, unsigned long data); +#endif + int (*probe)(void); + int (*kick_cpu)(int nr); + void (*setup_cpu)(int nr); + void (*bringup_done)(void); + void (*take_timebase)(void); + void (*give_timebase)(void); + int (*cpu_disable)(void); + void (*cpu_die)(unsigned int nr); + int (*cpu_bootable)(unsigned int nr); +}; + +extern void smp_send_debugger_break(void); extern void start_secondary_resume(void); +extern void __devinit smp_generic_give_timebase(void); +extern void __devinit smp_generic_take_timebase(void); DECLARE_PER_CPU(unsigned int, cpu_pvr); @@ -93,13 +112,16 @@ extern int cpu_to_core_id(int cpu); #define PPC_MSG_CALL_FUNC_SINGLE 2 #define PPC_MSG_DEBUGGER_BREAK 3 -/* - * irq controllers that have dedicated ipis per message and don't - * need additional code in the action handler may use this - */ +/* for irq controllers that have dedicated ipis per message (4) */ extern int smp_request_message_ipi(int virq, int message); extern const char *smp_ipi_name[]; +/* for irq controllers with only a single ipi */ +extern void smp_muxed_ipi_set_data(int cpu, unsigned long data); +extern void smp_muxed_ipi_message_pass(int cpu, int msg); +extern void smp_muxed_ipi_resend(void); +extern irqreturn_t smp_ipi_demux(void); + void smp_init_iSeries(void); void smp_init_pSeries(void); void smp_init_cell(void); @@ -149,7 +171,7 @@ extern int smt_enabled_at_boot; extern int smp_mpic_probe(void); extern void smp_mpic_setup_cpu(int cpu); -extern void smp_generic_kick_cpu(int nr); +extern int smp_generic_kick_cpu(int nr); extern void smp_generic_give_timebase(void); extern void smp_generic_take_timebase(void); @@ -169,6 +191,8 @@ extern unsigned long __secondary_hold_spinloop; extern unsigned long __secondary_hold_acknowledge; extern char __secondary_hold; +extern irqreturn_t debug_ipi_action(int irq, void *data); + #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ diff --git a/trunk/arch/powerpc/include/asm/system.h b/trunk/arch/powerpc/include/asm/system.h index 5e474ddd2273..2dc595dda03b 100644 --- a/trunk/arch/powerpc/include/asm/system.h +++ b/trunk/arch/powerpc/include/asm/system.h @@ -219,8 +219,6 @@ extern int mem_init_done; /* set on boot once kmalloc can be called */ extern int init_bootmem_done; /* set once bootmem is available */ extern phys_addr_t memory_limit; extern unsigned long klimit; - -extern void *alloc_maybe_bootmem(size_t size, gfp_t mask); extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask); extern int powersave_nap; /* set if nap mode can be used in idle loop */ diff --git a/trunk/arch/powerpc/include/asm/tlbflush.h b/trunk/arch/powerpc/include/asm/tlbflush.h index d50a380b2b6f..81143fcbd113 100644 --- a/trunk/arch/powerpc/include/asm/tlbflush.h +++ b/trunk/arch/powerpc/include/asm/tlbflush.h @@ -79,6 +79,8 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm) #elif defined(CONFIG_PPC_STD_MMU_64) +#define MMU_NO_CONTEXT 0 + /* * TLB flushing for 64-bit hash-MMU CPUs */ diff --git a/trunk/arch/powerpc/include/asm/udbg.h b/trunk/arch/powerpc/include/asm/udbg.h index 11ae699135ba..58580e94a2bb 100644 --- a/trunk/arch/powerpc/include/asm/udbg.h +++ b/trunk/arch/powerpc/include/asm/udbg.h @@ -52,6 +52,7 @@ extern void __init udbg_init_44x_as1(void); extern void __init udbg_init_40x_realmode(void); extern void __init udbg_init_cpm(void); extern void __init udbg_init_usbgecko(void); +extern void __init udbg_init_wsp(void); #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_UDBG_H */ diff --git a/trunk/arch/powerpc/include/asm/uninorth.h b/trunk/arch/powerpc/include/asm/uninorth.h index ae9c899c8a6d..d12b11d7641e 100644 --- a/trunk/arch/powerpc/include/asm/uninorth.h +++ b/trunk/arch/powerpc/include/asm/uninorth.h @@ -60,7 +60,7 @@ * * Obviously, the GART is not cache coherent and so any change to it * must be flushed to memory (or maybe just make the GART space non - * cachable). AGP memory itself does't seem to be cache coherent neither. + * cachable). AGP memory itself doesn't seem to be cache coherent neither. * * In order to invalidate the GART (which is probably necessary to inval * the bridge internal TLBs), the following sequence has to be written, diff --git a/trunk/arch/powerpc/include/asm/wsp.h b/trunk/arch/powerpc/include/asm/wsp.h new file mode 100644 index 000000000000..c7dc83088a33 --- /dev/null +++ b/trunk/arch/powerpc/include/asm/wsp.h @@ -0,0 +1,14 @@ +/* + * Copyright 2011 Michael Ellerman, IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef __ASM_POWERPC_WSP_H +#define __ASM_POWERPC_WSP_H + +extern int wsp_get_chip_id(struct device_node *dn); + +#endif /* __ASM_POWERPC_WSP_H */ diff --git a/trunk/arch/powerpc/include/asm/xics.h b/trunk/arch/powerpc/include/asm/xics.h new file mode 100644 index 000000000000..b183a4062011 --- /dev/null +++ b/trunk/arch/powerpc/include/asm/xics.h @@ -0,0 +1,142 @@ +/* + * Common definitions accross all variants of ICP and ICS interrupt + * controllers. + */ + +#ifndef _XICS_H +#define _XICS_H + +#include + +#define XICS_IPI 2 +#define XICS_IRQ_SPURIOUS 0 + +/* Want a priority other than 0. Various HW issues require this. */ +#define DEFAULT_PRIORITY 5 + +/* + * Mark IPIs as higher priority so we can take them inside interrupts that + * arent marked IRQF_DISABLED + */ +#define IPI_PRIORITY 4 + +/* The least favored priority */ +#define LOWEST_PRIORITY 0xFF + +/* The number of priorities defined above */ +#define MAX_NUM_PRIORITIES 3 + +/* Native ICP */ +extern int icp_native_init(void); + +/* PAPR ICP */ +extern int icp_hv_init(void); + +/* ICP ops */ +struct icp_ops { + unsigned int (*get_irq)(void); + void (*eoi)(struct irq_data *d); + void (*set_priority)(unsigned char prio); + void (*teardown_cpu)(void); + void (*flush_ipi)(void); +#ifdef CONFIG_SMP + void (*cause_ipi)(int cpu, unsigned long data); + irq_handler_t ipi_action; +#endif +}; + +extern const struct icp_ops *icp_ops; + +/* Native ICS */ +extern int ics_native_init(void); + +/* RTAS ICS */ +extern int ics_rtas_init(void); + +/* ICS instance, hooked up to chip_data of an irq */ +struct ics { + struct list_head link; + int (*map)(struct ics *ics, unsigned int virq); + void (*mask_unknown)(struct ics *ics, unsigned long vec); + long (*get_server)(struct ics *ics, unsigned long vec); + int (*host_match)(struct ics *ics, struct device_node *node); + char data[]; +}; + +/* Commons */ +extern unsigned int xics_default_server; +extern unsigned int xics_default_distrib_server; +extern unsigned int xics_interrupt_server_size; +extern struct irq_host *xics_host; + +struct xics_cppr { + unsigned char stack[MAX_NUM_PRIORITIES]; + int index; +}; + +DECLARE_PER_CPU(struct xics_cppr, xics_cppr); + +static inline void xics_push_cppr(unsigned int vec) +{ + struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); + + if (WARN_ON(os_cppr->index >= MAX_NUM_PRIORITIES - 1)) + return; + + if (vec == XICS_IPI) + os_cppr->stack[++os_cppr->index] = IPI_PRIORITY; + else + os_cppr->stack[++os_cppr->index] = DEFAULT_PRIORITY; +} + +static inline unsigned char xics_pop_cppr(void) +{ + struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); + + if (WARN_ON(os_cppr->index < 1)) + return LOWEST_PRIORITY; + + return os_cppr->stack[--os_cppr->index]; +} + +static inline void xics_set_base_cppr(unsigned char cppr) +{ + struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); + + /* we only really want to set the priority when there's + * just one cppr value on the stack + */ + WARN_ON(os_cppr->index != 0); + + os_cppr->stack[0] = cppr; +} + +static inline unsigned char xics_cppr_top(void) +{ + struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); + + return os_cppr->stack[os_cppr->index]; +} + +DECLARE_PER_CPU_SHARED_ALIGNED(unsigned long, xics_ipi_message); + +extern void xics_init(void); +extern void xics_setup_cpu(void); +extern void xics_update_irq_servers(void); +extern void xics_set_cpu_giq(unsigned int gserver, unsigned int join); +extern void xics_mask_unknown_vec(unsigned int vec); +extern irqreturn_t xics_ipi_dispatch(int cpu); +extern int xics_smp_probe(void); +extern void xics_register_ics(struct ics *ics); +extern void xics_teardown_cpu(void); +extern void xics_kexec_teardown_cpu(int secondary); +extern void xics_migrate_irqs_away(void); +#ifdef CONFIG_SMP +extern int xics_get_irq_server(unsigned int virq, const struct cpumask *cpumask, + unsigned int strict_check); +#else +#define xics_get_irq_server(virq, cpumask, strict_check) (xics_default_server) +#endif + + +#endif /* _XICS_H */ diff --git a/trunk/arch/powerpc/kernel/Makefile b/trunk/arch/powerpc/kernel/Makefile index 3bb2a3e6a337..9aab36312572 100644 --- a/trunk/arch/powerpc/kernel/Makefile +++ b/trunk/arch/powerpc/kernel/Makefile @@ -38,11 +38,14 @@ obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \ paca.o nvram_64.o firmware.o obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o +obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power7.o obj64-$(CONFIG_RELOCATABLE) += reloc_64.o obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o +obj-$(CONFIG_PPC_A2) += cpu_setup_a2.o obj-$(CONFIG_PPC64) += vdso64/ obj-$(CONFIG_ALTIVEC) += vecemu.o obj-$(CONFIG_PPC_970_NAP) += idle_power4.o +obj-$(CONFIG_PPC_P7_NAP) += idle_power7.o obj-$(CONFIG_PPC_OF) += of_platform.o prom_parse.o obj-$(CONFIG_PPC_CLOCK) += clock.o procfs-y := proc_powerpc.o @@ -75,7 +78,6 @@ obj-$(CONFIG_PPC_FSL_BOOK3E) += cpu_setup_fsl_booke.o dbell.o obj-$(CONFIG_PPC_BOOK3E_64) += dbell.o extra-y := head_$(CONFIG_WORD_SIZE).o -extra-$(CONFIG_PPC_BOOK3E_32) := head_new_booke.o extra-$(CONFIG_40x) := head_40x.o extra-$(CONFIG_44x) := head_44x.o extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o @@ -103,6 +105,8 @@ obj-$(CONFIG_KEXEC) += machine_kexec.o crash.o \ obj-$(CONFIG_AUDIT) += audit.o obj64-$(CONFIG_AUDIT) += compat_audit.o +obj-$(CONFIG_PPC_IO_WORKAROUNDS) += io-workarounds.o + obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o diff --git a/trunk/arch/powerpc/kernel/asm-offsets.c b/trunk/arch/powerpc/kernel/asm-offsets.c index 23e6a93145ab..6887661ac072 100644 --- a/trunk/arch/powerpc/kernel/asm-offsets.c +++ b/trunk/arch/powerpc/kernel/asm-offsets.c @@ -74,6 +74,7 @@ int main(void) DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context)); DEFINE(SIGSEGV, SIGSEGV); DEFINE(NMI_MASK, NMI_MASK); + DEFINE(THREAD_DSCR, offsetof(struct thread_struct, dscr)); #else DEFINE(THREAD_INFO, offsetof(struct task_struct, stack)); #endif /* CONFIG_PPC64 */ diff --git a/trunk/arch/powerpc/kernel/cpu_setup_a2.S b/trunk/arch/powerpc/kernel/cpu_setup_a2.S new file mode 100644 index 000000000000..7f818feaa7a5 --- /dev/null +++ b/trunk/arch/powerpc/kernel/cpu_setup_a2.S @@ -0,0 +1,114 @@ +/* + * A2 specific assembly support code + * + * Copyright 2009 Ben Herrenschmidt, IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include + +/* + * Disable thdid and class fields in ERATs to bump PID to full 14 bits capacity. + * This also prevents external LPID accesses but that isn't a problem when not a + * guest. Under PV, this setting will be ignored and MMUCR will return the right + * number of PID bits we can use. + */ +#define MMUCR1_EXTEND_PID \ + (MMUCR1_ICTID | MMUCR1_ITTID | MMUCR1_DCTID | \ + MMUCR1_DTTID | MMUCR1_DCCD) + +/* + * Use extended PIDs if enabled. + * Don't clear the ERATs on context sync events and enable I & D LRU. + * Enable ERAT back invalidate when tlbwe overwrites an entry. + */ +#define INITIAL_MMUCR1 \ + (MMUCR1_EXTEND_PID | MMUCR1_CSINV_NEVER | MMUCR1_IRRE | \ + MMUCR1_DRRE | MMUCR1_TLBWE_BINV) + +_GLOBAL(__setup_cpu_a2) + /* Some of these are actually thread local and some are + * core local but doing it always won't hurt + */ + +#ifdef CONFIG_PPC_WSP_COPRO + /* Make sure ACOP starts out as zero */ + li r3,0 + mtspr SPRN_ACOP,r3 + + /* Enable icswx instruction */ + mfspr r3,SPRN_A2_CCR2 + ori r3,r3,A2_CCR2_ENABLE_ICSWX + mtspr SPRN_A2_CCR2,r3 + + /* Unmask all CTs in HACOP */ + li r3,-1 + mtspr SPRN_HACOP,r3 +#endif /* CONFIG_PPC_WSP_COPRO */ + + /* Enable doorbell */ + mfspr r3,SPRN_A2_CCR2 + oris r3,r3,A2_CCR2_ENABLE_PC@h + mtspr SPRN_A2_CCR2,r3 + isync + + /* Setup CCR0 to disable power saving for now as it's busted + * in the current implementations. Setup CCR1 to wake on + * interrupts normally (we write the default value but who + * knows what FW may have clobbered...) + */ + li r3,0 + mtspr SPRN_A2_CCR0, r3 + LOAD_REG_IMMEDIATE(r3,0x0f0f0f0f) + mtspr SPRN_A2_CCR1, r3 + + /* Initialise MMUCR1 */ + lis r3,INITIAL_MMUCR1@h + ori r3,r3,INITIAL_MMUCR1@l + mtspr SPRN_MMUCR1,r3 + + /* Set MMUCR2 to enable 4K, 64K, 1M, 16M and 1G pages */ + LOAD_REG_IMMEDIATE(r3, 0x000a7531) + mtspr SPRN_MMUCR2,r3 + + /* Set MMUCR3 to write all thids bit to the TLB */ + LOAD_REG_IMMEDIATE(r3, 0x0000000f) + mtspr SPRN_MMUCR3,r3 + + /* Don't do ERAT stuff if running guest mode */ + mfmsr r3 + andis. r0,r3,MSR_GS@h + bne 1f + + /* Now set the I-ERAT watermark to 15 */ + lis r4,(MMUCR0_TLBSEL_I|MMUCR0_ECL)@h + mtspr SPRN_MMUCR0, r4 + li r4,A2_IERAT_SIZE-1 + PPC_ERATWE(r4,r4,3) + + /* Now set the D-ERAT watermark to 31 */ + lis r4,(MMUCR0_TLBSEL_D|MMUCR0_ECL)@h + mtspr SPRN_MMUCR0, r4 + li r4,A2_DERAT_SIZE-1 + PPC_ERATWE(r4,r4,3) + + /* And invalidate the beast just in case. That won't get rid of + * a bolted entry though it will be in LRU and so will go away eventually + * but let's not bother for now + */ + PPC_ERATILX(0,0,0) +1: + blr + +_GLOBAL(__restore_cpu_a2) + b __setup_cpu_a2 diff --git a/trunk/arch/powerpc/kernel/cpu_setup_fsl_booke.S b/trunk/arch/powerpc/kernel/cpu_setup_fsl_booke.S index 913611105c1f..8053db02b85e 100644 --- a/trunk/arch/powerpc/kernel/cpu_setup_fsl_booke.S +++ b/trunk/arch/powerpc/kernel/cpu_setup_fsl_booke.S @@ -88,6 +88,9 @@ _GLOBAL(__setup_cpu_e5500) bl __e500_dcache_setup #ifdef CONFIG_PPC_BOOK3E_64 bl .__setup_base_ivors + bl .setup_perfmon_ivor + bl .setup_doorbell_ivors + bl .setup_ehv_ivors #else bl __setup_e500mc_ivors #endif diff --git a/trunk/arch/powerpc/kernel/cpu_setup_power7.S b/trunk/arch/powerpc/kernel/cpu_setup_power7.S new file mode 100644 index 000000000000..4f9a93fcfe07 --- /dev/null +++ b/trunk/arch/powerpc/kernel/cpu_setup_power7.S @@ -0,0 +1,91 @@ +/* + * This file contains low level CPU setup functions. + * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#include +#include +#include +#include +#include +#include + +/* Entry: r3 = crap, r4 = ptr to cputable entry + * + * Note that we can be called twice for pseudo-PVRs + */ +_GLOBAL(__setup_cpu_power7) + mflr r11 + bl __init_hvmode_206 + mtlr r11 + beqlr + li r0,0 + mtspr SPRN_LPID,r0 + bl __init_LPCR + bl __init_TLB + mtlr r11 + blr + +_GLOBAL(__restore_cpu_power7) + mflr r11 + mfmsr r3 + rldicl. r0,r3,4,63 + beqlr + li r0,0 + mtspr SPRN_LPID,r0 + bl __init_LPCR + bl __init_TLB + mtlr r11 + blr + +__init_hvmode_206: + /* Disable CPU_FTR_HVMODE_206 and exit if MSR:HV is not set */ + mfmsr r3 + rldicl. r0,r3,4,63 + bnelr + ld r5,CPU_SPEC_FEATURES(r4) + LOAD_REG_IMMEDIATE(r6,CPU_FTR_HVMODE_206) + xor r5,r5,r6 + std r5,CPU_SPEC_FEATURES(r4) + blr + +__init_LPCR: + /* Setup a sane LPCR: + * + * LPES = 0b01 (HSRR0/1 used for 0x500) + * PECE = 0b111 + * DPFD = 4 + * + * Other bits untouched for now + */ + mfspr r3,SPRN_LPCR + ori r3,r3,(LPCR_LPES0|LPCR_LPES1) + xori r3,r3, LPCR_LPES0 + ori r3,r3,(LPCR_PECE0|LPCR_PECE1|LPCR_PECE2) + li r5,7 + sldi r5,r5,LPCR_DPFD_SH + andc r3,r3,r5 + li r5,4 + sldi r5,r5,LPCR_DPFD_SH + or r3,r3,r5 + mtspr SPRN_LPCR,r3 + isync + blr + +__init_TLB: + /* Clear the TLB */ + li r6,128 + mtctr r6 + li r7,0xc00 /* IS field = 0b11 */ + ptesync +2: tlbiel r7 + addi r7,r7,0x1000 + bdnz 2b + ptesync +1: blr diff --git a/trunk/arch/powerpc/kernel/cputable.c b/trunk/arch/powerpc/kernel/cputable.c index c9b68d07ac4f..34d2722b9451 100644 --- a/trunk/arch/powerpc/kernel/cputable.c +++ b/trunk/arch/powerpc/kernel/cputable.c @@ -62,10 +62,12 @@ extern void __setup_cpu_745x(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_ppc970MP(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_pa6t(unsigned long offset, struct cpu_spec* spec); +extern void __setup_cpu_a2(unsigned long offset, struct cpu_spec* spec); extern void __restore_cpu_pa6t(void); extern void __restore_cpu_ppc970(void); extern void __setup_cpu_power7(unsigned long offset, struct cpu_spec* spec); extern void __restore_cpu_power7(void); +extern void __restore_cpu_a2(void); #endif /* CONFIG_PPC64 */ #if defined(CONFIG_E500) extern void __setup_cpu_e5500(unsigned long offset, struct cpu_spec* spec); @@ -199,7 +201,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "POWER4 (gp)", .cpu_features = CPU_FTRS_POWER4, .cpu_user_features = COMMON_USER_POWER4, - .mmu_features = MMU_FTR_HPTE_TABLE, + .mmu_features = MMU_FTRS_POWER4, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, @@ -214,7 +216,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "POWER4+ (gq)", .cpu_features = CPU_FTRS_POWER4, .cpu_user_features = COMMON_USER_POWER4, - .mmu_features = MMU_FTR_HPTE_TABLE, + .mmu_features = MMU_FTRS_POWER4, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, @@ -230,7 +232,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_PPC970, .cpu_user_features = COMMON_USER_POWER4 | PPC_FEATURE_HAS_ALTIVEC_COMP, - .mmu_features = MMU_FTR_HPTE_TABLE, + .mmu_features = MMU_FTRS_PPC970, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, @@ -248,7 +250,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_PPC970, .cpu_user_features = COMMON_USER_POWER4 | PPC_FEATURE_HAS_ALTIVEC_COMP, - .mmu_features = MMU_FTR_HPTE_TABLE, + .mmu_features = MMU_FTRS_PPC970, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, @@ -284,7 +286,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_PPC970, .cpu_user_features = COMMON_USER_POWER4 | PPC_FEATURE_HAS_ALTIVEC_COMP, - .mmu_features = MMU_FTR_HPTE_TABLE, + .mmu_features = MMU_FTRS_PPC970, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, @@ -302,7 +304,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_PPC970, .cpu_user_features = COMMON_USER_POWER4 | PPC_FEATURE_HAS_ALTIVEC_COMP, - .mmu_features = MMU_FTR_HPTE_TABLE, + .mmu_features = MMU_FTRS_PPC970, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, @@ -318,7 +320,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "POWER5 (gr)", .cpu_features = CPU_FTRS_POWER5, .cpu_user_features = COMMON_USER_POWER5, - .mmu_features = MMU_FTR_HPTE_TABLE, + .mmu_features = MMU_FTRS_POWER5, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 6, @@ -338,7 +340,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "POWER5+ (gs)", .cpu_features = CPU_FTRS_POWER5, .cpu_user_features = COMMON_USER_POWER5_PLUS, - .mmu_features = MMU_FTR_HPTE_TABLE, + .mmu_features = MMU_FTRS_POWER5, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 6, @@ -354,7 +356,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "POWER5+ (gs)", .cpu_features = CPU_FTRS_POWER5, .cpu_user_features = COMMON_USER_POWER5_PLUS, - .mmu_features = MMU_FTR_HPTE_TABLE, + .mmu_features = MMU_FTRS_POWER5, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 6, @@ -371,7 +373,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "POWER5+", .cpu_features = CPU_FTRS_POWER5, .cpu_user_features = COMMON_USER_POWER5_PLUS, - .mmu_features = MMU_FTR_HPTE_TABLE, + .mmu_features = MMU_FTRS_POWER5, .icache_bsize = 128, .dcache_bsize = 128, .oprofile_cpu_type = "ppc64/ibm-compat-v1", @@ -385,7 +387,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_POWER6, .cpu_user_features = COMMON_USER_POWER6 | PPC_FEATURE_POWER6_EXT, - .mmu_features = MMU_FTR_HPTE_TABLE, + .mmu_features = MMU_FTRS_POWER6, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 6, @@ -404,7 +406,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "POWER6 (architected)", .cpu_features = CPU_FTRS_POWER6, .cpu_user_features = COMMON_USER_POWER6, - .mmu_features = MMU_FTR_HPTE_TABLE, + .mmu_features = MMU_FTRS_POWER6, .icache_bsize = 128, .dcache_bsize = 128, .oprofile_cpu_type = "ppc64/ibm-compat-v1", @@ -417,12 +419,13 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "POWER7 (architected)", .cpu_features = CPU_FTRS_POWER7, .cpu_user_features = COMMON_USER_POWER7, - .mmu_features = MMU_FTR_HPTE_TABLE | - MMU_FTR_TLBIE_206, + .mmu_features = MMU_FTRS_POWER7, .icache_bsize = 128, .dcache_bsize = 128, .oprofile_type = PPC_OPROFILE_POWER4, .oprofile_cpu_type = "ppc64/ibm-compat-v1", + .cpu_setup = __setup_cpu_power7, + .cpu_restore = __restore_cpu_power7, .platform = "power7", }, { /* Power7 */ @@ -431,14 +434,15 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "POWER7 (raw)", .cpu_features = CPU_FTRS_POWER7, .cpu_user_features = COMMON_USER_POWER7, - .mmu_features = MMU_FTR_HPTE_TABLE | - MMU_FTR_TLBIE_206, + .mmu_features = MMU_FTRS_POWER7, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 6, .pmc_type = PPC_PMC_IBM, .oprofile_cpu_type = "ppc64/power7", .oprofile_type = PPC_OPROFILE_POWER4, + .cpu_setup = __setup_cpu_power7, + .cpu_restore = __restore_cpu_power7, .platform = "power7", }, { /* Power7+ */ @@ -447,14 +451,15 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "POWER7+ (raw)", .cpu_features = CPU_FTRS_POWER7, .cpu_user_features = COMMON_USER_POWER7, - .mmu_features = MMU_FTR_HPTE_TABLE | - MMU_FTR_TLBIE_206, + .mmu_features = MMU_FTRS_POWER7, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 6, .pmc_type = PPC_PMC_IBM, .oprofile_cpu_type = "ppc64/power7", .oprofile_type = PPC_OPROFILE_POWER4, + .cpu_setup = __setup_cpu_power7, + .cpu_restore = __restore_cpu_power7, .platform = "power7+", }, { /* Cell Broadband Engine */ @@ -465,7 +470,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_user_features = COMMON_USER_PPC64 | PPC_FEATURE_CELL | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_SMT, - .mmu_features = MMU_FTR_HPTE_TABLE, + .mmu_features = MMU_FTRS_CELL, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 4, @@ -480,7 +485,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "PA6T", .cpu_features = CPU_FTRS_PA6T, .cpu_user_features = COMMON_USER_PA6T, - .mmu_features = MMU_FTR_HPTE_TABLE, + .mmu_features = MMU_FTRS_PA6T, .icache_bsize = 64, .dcache_bsize = 64, .num_pmcs = 6, @@ -497,7 +502,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_name = "POWER4 (compatible)", .cpu_features = CPU_FTRS_COMPATIBLE, .cpu_user_features = COMMON_USER_PPC64, - .mmu_features = MMU_FTR_HPTE_TABLE, + .mmu_features = MMU_FTRS_DEFAULT_HPTE_ARCH_V2, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 6, @@ -1973,7 +1978,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .pvr_mask = 0xffff0000, .pvr_value = 0x80240000, .cpu_name = "e5500", - .cpu_features = CPU_FTRS_E500MC, + .cpu_features = CPU_FTRS_E5500, .cpu_user_features = COMMON_USER_BOOKE, .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | MMU_FTR_USE_TLBILX, @@ -2005,7 +2010,22 @@ static struct cpu_spec __initdata cpu_specs[] = { #endif /* CONFIG_PPC32 */ #endif /* CONFIG_E500 */ -#ifdef CONFIG_PPC_BOOK3E_64 +#ifdef CONFIG_PPC_A2 + { /* Standard A2 (>= DD2) + FPU core */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x00480000, + .cpu_name = "A2 (>= DD2)", + .cpu_features = CPU_FTRS_A2, + .cpu_user_features = COMMON_USER_PPC64, + .mmu_features = MMU_FTRS_A2, + .icache_bsize = 64, + .dcache_bsize = 64, + .num_pmcs = 0, + .cpu_setup = __setup_cpu_a2, + .cpu_restore = __restore_cpu_a2, + .machine_check = machine_check_generic, + .platform = "ppca2", + }, { /* This is a default entry to get going, to be replaced by * a real one at some stage */ @@ -2026,7 +2046,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .machine_check = machine_check_generic, .platform = "power6", }, -#endif +#endif /* CONFIG_PPC_A2 */ }; static struct cpu_spec the_cpu_spec; diff --git a/trunk/arch/powerpc/kernel/crash.c b/trunk/arch/powerpc/kernel/crash.c index 3d3d416339dd..4e6ee944495a 100644 --- a/trunk/arch/powerpc/kernel/crash.c +++ b/trunk/arch/powerpc/kernel/crash.c @@ -64,9 +64,9 @@ void crash_ipi_callback(struct pt_regs *regs) return; hard_irq_disable(); - if (!cpu_isset(cpu, cpus_in_crash)) + if (!cpumask_test_cpu(cpu, &cpus_in_crash)) crash_save_cpu(regs, cpu); - cpu_set(cpu, cpus_in_crash); + cpumask_set_cpu(cpu, &cpus_in_crash); /* * Entered via soft-reset - could be the kdump @@ -77,8 +77,8 @@ void crash_ipi_callback(struct pt_regs *regs) * Tell the kexec CPU that entered via soft-reset and ready * to go down. */ - if (cpu_isset(cpu, cpus_in_sr)) { - cpu_clear(cpu, cpus_in_sr); + if (cpumask_test_cpu(cpu, &cpus_in_sr)) { + cpumask_clear_cpu(cpu, &cpus_in_sr); atomic_inc(&enter_on_soft_reset); } @@ -87,7 +87,7 @@ void crash_ipi_callback(struct pt_regs *regs) * This barrier is needed to make sure that all CPUs are stopped. * If not, soft-reset will be invoked to bring other CPUs. */ - while (!cpu_isset(crashing_cpu, cpus_in_crash)) + while (!cpumask_test_cpu(crashing_cpu, &cpus_in_crash)) cpu_relax(); if (ppc_md.kexec_cpu_down) @@ -109,7 +109,7 @@ static void crash_soft_reset_check(int cpu) { unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */ - cpu_clear(cpu, cpus_in_sr); + cpumask_clear_cpu(cpu, &cpus_in_sr); while (atomic_read(&enter_on_soft_reset) != ncpus) cpu_relax(); } @@ -132,7 +132,7 @@ static void crash_kexec_prepare_cpus(int cpu) */ printk(KERN_EMERG "Sending IPI to other cpus...\n"); msecs = 10000; - while ((cpus_weight(cpus_in_crash) < ncpus) && (--msecs > 0)) { + while ((cpumask_weight(&cpus_in_crash) < ncpus) && (--msecs > 0)) { cpu_relax(); mdelay(1); } @@ -144,54 +144,24 @@ static void crash_kexec_prepare_cpus(int cpu) * user to do soft reset such that we get all. * Soft-reset will be used until better mechanism is implemented. */ - if (cpus_weight(cpus_in_crash) < ncpus) { + if (cpumask_weight(&cpus_in_crash) < ncpus) { printk(KERN_EMERG "done waiting: %d cpu(s) not responding\n", - ncpus - cpus_weight(cpus_in_crash)); + ncpus - cpumask_weight(&cpus_in_crash)); printk(KERN_EMERG "Activate soft-reset to stop other cpu(s)\n"); - cpus_in_sr = CPU_MASK_NONE; + cpumask_clear(&cpus_in_sr); atomic_set(&enter_on_soft_reset, 0); - while (cpus_weight(cpus_in_crash) < ncpus) + while (cpumask_weight(&cpus_in_crash) < ncpus) cpu_relax(); } /* * Make sure all CPUs are entered via soft-reset if the kdump is * invoked using soft-reset. */ - if (cpu_isset(cpu, cpus_in_sr)) + if (cpumask_test_cpu(cpu, &cpus_in_sr)) crash_soft_reset_check(cpu); /* Leave the IPI callback set */ } -/* wait for all the CPUs to hit real mode but timeout if they don't come in */ -#if defined(CONFIG_PPC_STD_MMU_64) && defined(CONFIG_SMP) -static void crash_kexec_wait_realmode(int cpu) -{ - unsigned int msecs; - int i; - - msecs = 10000; - for (i=0; i < NR_CPUS && msecs > 0; i++) { - if (i == cpu) - continue; - - while (paca[i].kexec_state < KEXEC_STATE_REAL_MODE) { - barrier(); - if (!cpu_possible(i)) { - break; - } - if (!cpu_online(i)) { - break; - } - msecs--; - mdelay(1); - } - } - mb(); -} -#else -static inline void crash_kexec_wait_realmode(int cpu) {} -#endif - /* * This function will be called by secondary cpus or by kexec cpu * if soft-reset is activated to stop some CPUs. @@ -212,7 +182,7 @@ void crash_kexec_secondary(struct pt_regs *regs) * exited using 'x'(exit and recover) or * kexec_should_crash() failed for all running tasks. */ - cpu_clear(cpu, cpus_in_sr); + cpumask_clear_cpu(cpu, &cpus_in_sr); local_irq_restore(flags); return; } @@ -226,7 +196,7 @@ void crash_kexec_secondary(struct pt_regs *regs) * then start kexec boot. */ crash_soft_reset_check(cpu); - cpu_set(crashing_cpu, cpus_in_crash); + cpumask_set_cpu(crashing_cpu, &cpus_in_crash); if (ppc_md.kexec_cpu_down) ppc_md.kexec_cpu_down(1, 0); machine_kexec(kexec_crash_image); @@ -235,7 +205,8 @@ void crash_kexec_secondary(struct pt_regs *regs) crash_ipi_callback(regs); } -#else +#else /* ! CONFIG_SMP */ + static void crash_kexec_prepare_cpus(int cpu) { /* @@ -253,9 +224,39 @@ static void crash_kexec_prepare_cpus(int cpu) void crash_kexec_secondary(struct pt_regs *regs) { - cpus_in_sr = CPU_MASK_NONE; + cpumask_clear(&cpus_in_sr); } -#endif +#endif /* CONFIG_SMP */ + +/* wait for all the CPUs to hit real mode but timeout if they don't come in */ +#if defined(CONFIG_SMP) && defined(CONFIG_PPC_STD_MMU_64) +static void crash_kexec_wait_realmode(int cpu) +{ + unsigned int msecs; + int i; + + msecs = 10000; + for (i=0; i < nr_cpu_ids && msecs > 0; i++) { + if (i == cpu) + continue; + + while (paca[i].kexec_state < KEXEC_STATE_REAL_MODE) { + barrier(); + if (!cpu_possible(i)) { + break; + } + if (!cpu_online(i)) { + break; + } + msecs--; + mdelay(1); + } + } + mb(); +} +#else +static inline void crash_kexec_wait_realmode(int cpu) {} +#endif /* CONFIG_SMP && CONFIG_PPC_STD_MMU_64 */ /* * Register a function to be called on shutdown. Only use this if you @@ -345,7 +346,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs) crashing_cpu = smp_processor_id(); crash_save_cpu(regs, crashing_cpu); crash_kexec_prepare_cpus(crashing_cpu); - cpu_set(crashing_cpu, cpus_in_crash); + cpumask_set_cpu(crashing_cpu, &cpus_in_crash); crash_kexec_wait_realmode(crashing_cpu); machine_kexec_mask_interrupts(); diff --git a/trunk/arch/powerpc/kernel/dbell.c b/trunk/arch/powerpc/kernel/dbell.c index 3307a52d797f..2cc451aaaca7 100644 --- a/trunk/arch/powerpc/kernel/dbell.c +++ b/trunk/arch/powerpc/kernel/dbell.c @@ -13,84 +13,35 @@ #include #include #include -#include +#include #include #include #ifdef CONFIG_SMP -struct doorbell_cpu_info { - unsigned long messages; /* current messages bits */ - unsigned int tag; /* tag value */ -}; - -static DEFINE_PER_CPU(struct doorbell_cpu_info, doorbell_cpu_info); - void doorbell_setup_this_cpu(void) { - struct doorbell_cpu_info *info = &__get_cpu_var(doorbell_cpu_info); + unsigned long tag = mfspr(SPRN_PIR) & 0x3fff; - info->messages = 0; - info->tag = mfspr(SPRN_PIR) & 0x3fff; + smp_muxed_ipi_set_data(smp_processor_id(), tag); } -void doorbell_message_pass(int target, int msg) +void doorbell_cause_ipi(int cpu, unsigned long data) { - struct doorbell_cpu_info *info; - int i; - - if (target < NR_CPUS) { - info = &per_cpu(doorbell_cpu_info, target); - set_bit(msg, &info->messages); - ppc_msgsnd(PPC_DBELL, 0, info->tag); - } - else if (target == MSG_ALL_BUT_SELF) { - for_each_online_cpu(i) { - if (i == smp_processor_id()) - continue; - info = &per_cpu(doorbell_cpu_info, i); - set_bit(msg, &info->messages); - ppc_msgsnd(PPC_DBELL, 0, info->tag); - } - } - else { /* target == MSG_ALL */ - for_each_online_cpu(i) { - info = &per_cpu(doorbell_cpu_info, i); - set_bit(msg, &info->messages); - } - ppc_msgsnd(PPC_DBELL, PPC_DBELL_MSG_BRDCAST, 0); - } + ppc_msgsnd(PPC_DBELL, 0, data); } void doorbell_exception(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); - struct doorbell_cpu_info *info = &__get_cpu_var(doorbell_cpu_info); - int msg; - /* Warning: regs can be NULL when called from irq enable */ + irq_enter(); - if (!info->messages || (num_online_cpus() < 2)) - goto out; + smp_ipi_demux(); - for (msg = 0; msg < 4; msg++) - if (test_and_clear_bit(msg, &info->messages)) - smp_message_recv(msg); - -out: + irq_exit(); set_irq_regs(old_regs); } - -void doorbell_check_self(void) -{ - struct doorbell_cpu_info *info = &__get_cpu_var(doorbell_cpu_info); - - if (!info->messages) - return; - - ppc_msgsnd(PPC_DBELL, 0, info->tag); -} - #else /* CONFIG_SMP */ void doorbell_exception(struct pt_regs *regs) { diff --git a/trunk/arch/powerpc/kernel/entry_64.S b/trunk/arch/powerpc/kernel/entry_64.S index d82878c4daa6..d834425186ae 100644 --- a/trunk/arch/powerpc/kernel/entry_64.S +++ b/trunk/arch/powerpc/kernel/entry_64.S @@ -421,6 +421,12 @@ BEGIN_FTR_SECTION std r24,THREAD_VRSAVE(r3) END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) #endif /* CONFIG_ALTIVEC */ +#ifdef CONFIG_PPC64 +BEGIN_FTR_SECTION + mfspr r25,SPRN_DSCR + std r25,THREAD_DSCR(r3) +END_FTR_SECTION_IFSET(CPU_FTR_DSCR) +#endif and. r0,r0,r22 beq+ 1f andc r22,r22,r0 @@ -462,10 +468,10 @@ BEGIN_FTR_SECTION FTR_SECTION_ELSE_NESTED(95) clrrdi r6,r8,40 /* get its 1T ESID */ clrrdi r9,r1,40 /* get current sp 1T ESID */ - ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_1T_SEGMENT, 95) + ALT_MMU_FTR_SECTION_END_NESTED_IFCLR(MMU_FTR_1T_SEGMENT, 95) FTR_SECTION_ELSE b 2f -ALT_FTR_SECTION_END_IFSET(CPU_FTR_SLB) +ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_SLB) clrldi. r0,r6,2 /* is new ESID c00000000? */ cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */ cror eq,4*cr1+eq,eq @@ -479,7 +485,7 @@ BEGIN_FTR_SECTION li r9,MMU_SEGSIZE_1T /* insert B field */ oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0 -END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) +END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) /* Update the last bolted SLB. No write barriers are needed * here, provided we only update the current CPU's SLB shadow @@ -491,7 +497,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */ std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */ - /* No need to check for CPU_FTR_NO_SLBIE_B here, since when + /* No need to check for MMU_FTR_NO_SLBIE_B here, since when * we have 1TB segments, the only CPUs known to have the errata * only support less than 1TB of system memory and we'll never * actually hit this code path. @@ -522,6 +528,15 @@ BEGIN_FTR_SECTION mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) #endif /* CONFIG_ALTIVEC */ +#ifdef CONFIG_PPC64 +BEGIN_FTR_SECTION + ld r0,THREAD_DSCR(r4) + cmpd r0,r25 + beq 1f + mtspr SPRN_DSCR,r0 +1: +END_FTR_SECTION_IFSET(CPU_FTR_DSCR) +#endif /* r3-r13 are destroyed -- Cort */ REST_8GPRS(14, r1) @@ -838,7 +853,7 @@ _GLOBAL(enter_rtas) _STATIC(rtas_return_loc) /* relocation is off at this point */ - mfspr r4,SPRN_SPRG_PACA /* Get PACA */ + GET_PACA(r4) clrldi r4,r4,2 /* convert to realmode address */ bcl 20,31,$+4 @@ -869,7 +884,7 @@ _STATIC(rtas_restore_regs) REST_8GPRS(14, r1) /* Restore the non-volatiles */ REST_10GPRS(22, r1) /* ditto */ - mfspr r13,SPRN_SPRG_PACA + GET_PACA(r13) ld r4,_CCR(r1) mtcr r4 diff --git a/trunk/arch/powerpc/kernel/exceptions-64e.S b/trunk/arch/powerpc/kernel/exceptions-64e.S index 9651acc3504a..d24d4400cc79 100644 --- a/trunk/arch/powerpc/kernel/exceptions-64e.S +++ b/trunk/arch/powerpc/kernel/exceptions-64e.S @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -252,9 +253,6 @@ exception_marker: .balign 0x1000 .globl interrupt_base_book3e interrupt_base_book3e: /* fake trap */ - /* Note: If real debug exceptions are supported by the HW, the vector - * below will have to be patched up to point to an appropriate handler - */ EXCEPTION_STUB(0x000, machine_check) /* 0x0200 */ EXCEPTION_STUB(0x020, critical_input) /* 0x0580 */ EXCEPTION_STUB(0x040, debug_crit) /* 0x0d00 */ @@ -271,8 +269,13 @@ interrupt_base_book3e: /* fake trap */ EXCEPTION_STUB(0x1a0, watchdog) /* 0x09f0 */ EXCEPTION_STUB(0x1c0, data_tlb_miss) EXCEPTION_STUB(0x1e0, instruction_tlb_miss) + EXCEPTION_STUB(0x260, perfmon) EXCEPTION_STUB(0x280, doorbell) EXCEPTION_STUB(0x2a0, doorbell_crit) + EXCEPTION_STUB(0x2c0, guest_doorbell) + EXCEPTION_STUB(0x2e0, guest_doorbell_crit) + EXCEPTION_STUB(0x300, hypercall) + EXCEPTION_STUB(0x320, ehpriv) .globl interrupt_end_book3e interrupt_end_book3e: @@ -454,6 +457,70 @@ interrupt_end_book3e: kernel_dbg_exc: b . /* NYI */ +/* Debug exception as a debug interrupt*/ + START_EXCEPTION(debug_debug); + DBG_EXCEPTION_PROLOG(0xd00, PROLOG_ADDITION_2REGS) + + /* + * If there is a single step or branch-taken exception in an + * exception entry sequence, it was probably meant to apply to + * the code where the exception occurred (since exception entry + * doesn't turn off DE automatically). We simulate the effect + * of turning off DE on entry to an exception handler by turning + * off DE in the DSRR1 value and clearing the debug status. + */ + + mfspr r14,SPRN_DBSR /* check single-step/branch taken */ + andis. r15,r14,DBSR_IC@h + beq+ 1f + + LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e) + LOAD_REG_IMMEDIATE(r15,interrupt_end_book3e) + cmpld cr0,r10,r14 + cmpld cr1,r10,r15 + blt+ cr0,1f + bge+ cr1,1f + + /* here it looks like we got an inappropriate debug exception. */ + lis r14,DBSR_IC@h /* clear the IC event */ + rlwinm r11,r11,0,~MSR_DE /* clear DE in the DSRR1 value */ + mtspr SPRN_DBSR,r14 + mtspr SPRN_DSRR1,r11 + lwz r10,PACA_EXDBG+EX_CR(r13) /* restore registers */ + ld r1,PACA_EXDBG+EX_R1(r13) + ld r14,PACA_EXDBG+EX_R14(r13) + ld r15,PACA_EXDBG+EX_R15(r13) + mtcr r10 + ld r10,PACA_EXDBG+EX_R10(r13) /* restore registers */ + ld r11,PACA_EXDBG+EX_R11(r13) + mfspr r13,SPRN_SPRG_DBG_SCRATCH + rfdi + + /* Normal debug exception */ + /* XXX We only handle coming from userspace for now since we can't + * quite save properly an interrupted kernel state yet + */ +1: andi. r14,r11,MSR_PR; /* check for userspace again */ + beq kernel_dbg_exc; /* if from kernel mode */ + + /* Now we mash up things to make it look like we are coming on a + * normal exception + */ + mfspr r15,SPRN_SPRG_DBG_SCRATCH + mtspr SPRN_SPRG_GEN_SCRATCH,r15 + mfspr r14,SPRN_DBSR + EXCEPTION_COMMON(0xd00, PACA_EXDBG, INTS_DISABLE_ALL) + std r14,_DSISR(r1) + addi r3,r1,STACK_FRAME_OVERHEAD + mr r4,r14 + ld r14,PACA_EXDBG+EX_R14(r13) + ld r15,PACA_EXDBG+EX_R15(r13) + bl .save_nvgprs + bl .DebugException + b .ret_from_except + + MASKABLE_EXCEPTION(0x260, perfmon, .performance_monitor_exception, ACK_NONE) + /* Doorbell interrupt */ MASKABLE_EXCEPTION(0x2070, doorbell, .doorbell_exception, ACK_NONE) @@ -468,6 +535,11 @@ kernel_dbg_exc: // b ret_from_crit_except b . + MASKABLE_EXCEPTION(0x2c0, guest_doorbell, .unknown_exception, ACK_NONE) + MASKABLE_EXCEPTION(0x2e0, guest_doorbell_crit, .unknown_exception, ACK_NONE) + MASKABLE_EXCEPTION(0x310, hypercall, .unknown_exception, ACK_NONE) + MASKABLE_EXCEPTION(0x320, ehpriv, .unknown_exception, ACK_NONE) + /* * An interrupt came in while soft-disabled; clear EE in SRR1, @@ -587,7 +659,12 @@ fast_exception_return: BAD_STACK_TRAMPOLINE(0x000) BAD_STACK_TRAMPOLINE(0x100) BAD_STACK_TRAMPOLINE(0x200) +BAD_STACK_TRAMPOLINE(0x260) +BAD_STACK_TRAMPOLINE(0x2c0) +BAD_STACK_TRAMPOLINE(0x2e0) BAD_STACK_TRAMPOLINE(0x300) +BAD_STACK_TRAMPOLINE(0x310) +BAD_STACK_TRAMPOLINE(0x320) BAD_STACK_TRAMPOLINE(0x400) BAD_STACK_TRAMPOLINE(0x500) BAD_STACK_TRAMPOLINE(0x600) @@ -864,8 +941,23 @@ have_hes: * that will have to be made dependent on whether we are running under * a hypervisor I suppose. */ - ori r3,r3,MAS0_HES | MAS0_WQ_ALLWAYS - mtspr SPRN_MAS0,r3 + + /* BEWARE, MAGIC + * This code is called as an ordinary function on the boot CPU. But to + * avoid duplication, this code is also used in SCOM bringup of + * secondary CPUs. We read the code between the initial_tlb_code_start + * and initial_tlb_code_end labels one instruction at a time and RAM it + * into the new core via SCOM. That doesn't process branches, so there + * must be none between those two labels. It also means if this code + * ever takes any parameters, the SCOM code must also be updated to + * provide them. + */ + .globl a2_tlbinit_code_start +a2_tlbinit_code_start: + + ori r11,r3,MAS0_WQ_ALLWAYS + oris r11,r11,MAS0_ESEL(3)@h /* Use way 3: workaround A2 erratum 376 */ + mtspr SPRN_MAS0,r11 lis r3,(MAS1_VALID | MAS1_IPROT)@h ori r3,r3,BOOK3E_PAGESZ_1GB << MAS1_TSIZE_SHIFT mtspr SPRN_MAS1,r3 @@ -879,18 +971,86 @@ have_hes: /* Write the TLB entry */ tlbwe + .globl a2_tlbinit_after_linear_map +a2_tlbinit_after_linear_map: + /* Now we branch the new virtual address mapped by this entry */ LOAD_REG_IMMEDIATE(r3,1f) mtctr r3 bctr 1: /* We are now running at PAGE_OFFSET, clean the TLB of everything - * else (XXX we should scan for bolted crap from the firmware too) + * else (including IPROTed things left by firmware) + * r4 = TLBnCFG + * r3 = current address (more or less) */ + + li r5,0 + mtspr SPRN_MAS6,r5 + tlbsx 0,r3 + + rlwinm r9,r4,0,TLBnCFG_N_ENTRY + rlwinm r10,r4,8,0xff + addi r10,r10,-1 /* Get inner loop mask */ + + li r3,1 + + mfspr r5,SPRN_MAS1 + rlwinm r5,r5,0,(~(MAS1_VALID|MAS1_IPROT)) + + mfspr r6,SPRN_MAS2 + rldicr r6,r6,0,51 /* Extract EPN */ + + mfspr r7,SPRN_MAS0 + rlwinm r7,r7,0,0xffff0fff /* Clear HES and WQ */ + + rlwinm r8,r7,16,0xfff /* Extract ESEL */ + +2: add r4,r3,r8 + and r4,r4,r10 + + rlwimi r7,r4,16,MAS0_ESEL_MASK + + mtspr SPRN_MAS0,r7 + mtspr SPRN_MAS1,r5 + mtspr SPRN_MAS2,r6 + tlbwe + + addi r3,r3,1 + and. r4,r3,r10 + + bne 3f + addis r6,r6,(1<<30)@h +3: + cmpw r3,r9 + blt 2b + + .globl a2_tlbinit_after_iprot_flush +a2_tlbinit_after_iprot_flush: + +#ifdef CONFIG_PPC_EARLY_DEBUG_WSP + /* Now establish early debug mappings if applicable */ + /* Restore the MAS0 we used for linear mapping load */ + mtspr SPRN_MAS0,r11 + + lis r3,(MAS1_VALID | MAS1_IPROT)@h + ori r3,r3,(BOOK3E_PAGESZ_4K << MAS1_TSIZE_SHIFT) + mtspr SPRN_MAS1,r3 + LOAD_REG_IMMEDIATE(r3, WSP_UART_VIRT | MAS2_I | MAS2_G) + mtspr SPRN_MAS2,r3 + LOAD_REG_IMMEDIATE(r3, WSP_UART_PHYS | MAS3_SR | MAS3_SW) + mtspr SPRN_MAS7_MAS3,r3 + /* re-use the MAS8 value from the linear mapping */ + tlbwe +#endif /* CONFIG_PPC_EARLY_DEBUG_WSP */ + PPC_TLBILX(0,0,0) sync isync + .globl a2_tlbinit_code_end +a2_tlbinit_code_end: + /* We translate LR and return */ mflr r3 tovirt(r3,r3) @@ -1040,3 +1200,33 @@ _GLOBAL(__setup_base_ivors) sync blr + +_GLOBAL(setup_perfmon_ivor) + SET_IVOR(35, 0x260) /* Performance Monitor */ + blr + +_GLOBAL(setup_doorbell_ivors) + SET_IVOR(36, 0x280) /* Processor Doorbell */ + SET_IVOR(37, 0x2a0) /* Processor Doorbell Crit */ + + /* Check MMUCFG[LPIDSIZE] to determine if we have category E.HV */ + mfspr r10,SPRN_MMUCFG + rlwinm. r10,r10,0,MMUCFG_LPIDSIZE + beqlr + + SET_IVOR(38, 0x2c0) /* Guest Processor Doorbell */ + SET_IVOR(39, 0x2e0) /* Guest Processor Doorbell Crit/MC */ + blr + +_GLOBAL(setup_ehv_ivors) + /* + * We may be running as a guest and lack E.HV even on a chip + * that normally has it. + */ + mfspr r10,SPRN_MMUCFG + rlwinm. r10,r10,0,MMUCFG_LPIDSIZE + beqlr + + SET_IVOR(40, 0x300) /* Embedded Hypervisor System Call */ + SET_IVOR(41, 0x320) /* Embedded Hypervisor Privilege */ + blr diff --git a/trunk/arch/powerpc/kernel/exceptions-64s.S b/trunk/arch/powerpc/kernel/exceptions-64s.S index aeb739e18769..a85f4874cba7 100644 --- a/trunk/arch/powerpc/kernel/exceptions-64s.S +++ b/trunk/arch/powerpc/kernel/exceptions-64s.S @@ -37,23 +37,51 @@ .globl __start_interrupts __start_interrupts: - STD_EXCEPTION_PSERIES(0x100, system_reset) + .globl system_reset_pSeries; +system_reset_pSeries: + HMT_MEDIUM; + DO_KVM 0x100; + SET_SCRATCH0(r13) +#ifdef CONFIG_PPC_P7_NAP +BEGIN_FTR_SECTION + /* Running native on arch 2.06 or later, check if we are + * waking up from nap. We only handle no state loss and + * supervisor state loss. We do -not- handle hypervisor + * state loss at this time. + */ + mfspr r13,SPRN_SRR1 + rlwinm r13,r13,47-31,30,31 + cmpwi cr0,r13,1 + bne 1f + b .power7_wakeup_noloss +1: cmpwi cr0,r13,2 + bne 1f + b .power7_wakeup_loss + /* Total loss of HV state is fatal, we could try to use the + * PIR to locate a PACA, then use an emergency stack etc... + * but for now, let's just stay stuck here + */ +1: cmpwi cr0,r13,3 + beq . +END_FTR_SECTION_IFSET(CPU_FTR_HVMODE_206) +#endif /* CONFIG_PPC_P7_NAP */ + EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD) . = 0x200 _machine_check_pSeries: HMT_MEDIUM DO_KVM 0x200 - mtspr SPRN_SPRG_SCRATCH0,r13 /* save r13 */ - EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) + SET_SCRATCH0(r13) + EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common, EXC_STD) . = 0x300 .globl data_access_pSeries data_access_pSeries: HMT_MEDIUM DO_KVM 0x300 - mtspr SPRN_SPRG_SCRATCH0,r13 + SET_SCRATCH0(r13) BEGIN_FTR_SECTION - mfspr r13,SPRN_SPRG_PACA + GET_PACA(r13) std r9,PACA_EXSLB+EX_R9(r13) std r10,PACA_EXSLB+EX_R10(r13) mfspr r10,SPRN_DAR @@ -67,22 +95,22 @@ BEGIN_FTR_SECTION std r11,PACA_EXGEN+EX_R11(r13) ld r11,PACA_EXSLB+EX_R9(r13) std r12,PACA_EXGEN+EX_R12(r13) - mfspr r12,SPRN_SPRG_SCRATCH0 + GET_SCRATCH0(r12) std r10,PACA_EXGEN+EX_R10(r13) std r11,PACA_EXGEN+EX_R9(r13) std r12,PACA_EXGEN+EX_R13(r13) - EXCEPTION_PROLOG_PSERIES_1(data_access_common) + EXCEPTION_PROLOG_PSERIES_1(data_access_common, EXC_STD) FTR_SECTION_ELSE - EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common) -ALT_FTR_SECTION_END_IFCLR(CPU_FTR_SLB) + EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD) +ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_SLB) . = 0x380 .globl data_access_slb_pSeries data_access_slb_pSeries: HMT_MEDIUM DO_KVM 0x380 - mtspr SPRN_SPRG_SCRATCH0,r13 - mfspr r13,SPRN_SPRG_PACA /* get paca address into r13 */ + SET_SCRATCH0(r13) + GET_PACA(r13) std r3,PACA_EXSLB+EX_R3(r13) mfspr r3,SPRN_DAR std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ @@ -95,7 +123,7 @@ data_access_slb_pSeries: std r10,PACA_EXSLB+EX_R10(r13) std r11,PACA_EXSLB+EX_R11(r13) std r12,PACA_EXSLB+EX_R12(r13) - mfspr r10,SPRN_SPRG_SCRATCH0 + GET_SCRATCH0(r10) std r10,PACA_EXSLB+EX_R13(r13) mfspr r12,SPRN_SRR1 /* and SRR1 */ #ifndef CONFIG_RELOCATABLE @@ -113,15 +141,15 @@ data_access_slb_pSeries: bctr #endif - STD_EXCEPTION_PSERIES(0x400, instruction_access) + STD_EXCEPTION_PSERIES(0x400, 0x400, instruction_access) . = 0x480 .globl instruction_access_slb_pSeries instruction_access_slb_pSeries: HMT_MEDIUM DO_KVM 0x480 - mtspr SPRN_SPRG_SCRATCH0,r13 - mfspr r13,SPRN_SPRG_PACA /* get paca address into r13 */ + SET_SCRATCH0(r13) + GET_PACA(r13) std r3,PACA_EXSLB+EX_R3(r13) mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ @@ -134,7 +162,7 @@ instruction_access_slb_pSeries: std r10,PACA_EXSLB+EX_R10(r13) std r11,PACA_EXSLB+EX_R11(r13) std r12,PACA_EXSLB+EX_R12(r13) - mfspr r10,SPRN_SPRG_SCRATCH0 + GET_SCRATCH0(r10) std r10,PACA_EXSLB+EX_R13(r13) mfspr r12,SPRN_SRR1 /* and SRR1 */ #ifndef CONFIG_RELOCATABLE @@ -147,13 +175,29 @@ instruction_access_slb_pSeries: bctr #endif - MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt) - STD_EXCEPTION_PSERIES(0x600, alignment) - STD_EXCEPTION_PSERIES(0x700, program_check) - STD_EXCEPTION_PSERIES(0x800, fp_unavailable) - MASKABLE_EXCEPTION_PSERIES(0x900, decrementer) - STD_EXCEPTION_PSERIES(0xa00, trap_0a) - STD_EXCEPTION_PSERIES(0xb00, trap_0b) + /* We open code these as we can't have a ". = x" (even with + * x = "." within a feature section + */ + . = 0x500; + .globl hardware_interrupt_pSeries; + .globl hardware_interrupt_hv; +hardware_interrupt_pSeries: +hardware_interrupt_hv: + BEGIN_FTR_SECTION + _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt, EXC_STD) + FTR_SECTION_ELSE + _MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt, EXC_HV) + ALT_FTR_SECTION_END_IFCLR(CPU_FTR_HVMODE_206) + + STD_EXCEPTION_PSERIES(0x600, 0x600, alignment) + STD_EXCEPTION_PSERIES(0x700, 0x700, program_check) + STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable) + + MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer) + MASKABLE_EXCEPTION_HV(0x980, 0x980, decrementer) + + STD_EXCEPTION_PSERIES(0xa00, 0xa00, trap_0a) + STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b) . = 0xc00 .globl system_call_pSeries @@ -165,13 +209,13 @@ BEGIN_FTR_SECTION beq- 1f END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) mr r9,r13 - mfspr r13,SPRN_SPRG_PACA + GET_PACA(r13) mfspr r11,SPRN_SRR0 - ld r12,PACAKBASE(r13) - ld r10,PACAKMSR(r13) - LOAD_HANDLER(r12, system_call_entry) - mtspr SPRN_SRR0,r12 mfspr r12,SPRN_SRR1 + ld r10,PACAKBASE(r13) + LOAD_HANDLER(r10, system_call_entry) + mtspr SPRN_SRR0,r10 + ld r10,PACAKMSR(r13) mtspr SPRN_SRR1,r10 rfid b . /* prevent speculative execution */ @@ -183,8 +227,21 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) rfid /* return to userspace */ b . - STD_EXCEPTION_PSERIES(0xd00, single_step) - STD_EXCEPTION_PSERIES(0xe00, trap_0e) + STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step) + + /* At 0xe??? we have a bunch of hypervisor exceptions, we branch + * out of line to handle them + */ + . = 0xe00 + b h_data_storage_hv + . = 0xe20 + b h_instr_storage_hv + . = 0xe40 + b emulation_assist_hv + . = 0xe50 + b hmi_exception_hv + . = 0xe60 + b hmi_exception_hv /* We need to deal with the Altivec unavailable exception * here which is at 0xf20, thus in the middle of the @@ -193,39 +250,42 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) */ performance_monitor_pSeries_1: . = 0xf00 - DO_KVM 0xf00 b performance_monitor_pSeries altivec_unavailable_pSeries_1: . = 0xf20 - DO_KVM 0xf20 b altivec_unavailable_pSeries vsx_unavailable_pSeries_1: . = 0xf40 - DO_KVM 0xf40 b vsx_unavailable_pSeries #ifdef CONFIG_CBE_RAS - HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error) + STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error) #endif /* CONFIG_CBE_RAS */ - STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint) + STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint) #ifdef CONFIG_CBE_RAS - HSTD_EXCEPTION_PSERIES(0x1600, cbe_maintenance) + STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance) #endif /* CONFIG_CBE_RAS */ - STD_EXCEPTION_PSERIES(0x1700, altivec_assist) + STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist) #ifdef CONFIG_CBE_RAS - HSTD_EXCEPTION_PSERIES(0x1800, cbe_thermal) + STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal) #endif /* CONFIG_CBE_RAS */ . = 0x3000 -/*** pSeries interrupt support ***/ +/*** Out of line interrupts support ***/ + + /* moved from 0xe00 */ + STD_EXCEPTION_HV(., 0xe00, h_data_storage) + STD_EXCEPTION_HV(., 0xe20, h_instr_storage) + STD_EXCEPTION_HV(., 0xe40, emulation_assist) + STD_EXCEPTION_HV(., 0xe60, hmi_exception) /* need to flush cache ? */ /* moved from 0xf00 */ - STD_EXCEPTION_PSERIES(., performance_monitor) - STD_EXCEPTION_PSERIES(., altivec_unavailable) - STD_EXCEPTION_PSERIES(., vsx_unavailable) + STD_EXCEPTION_PSERIES(., 0xf00, performance_monitor) + STD_EXCEPTION_PSERIES(., 0xf20, altivec_unavailable) + STD_EXCEPTION_PSERIES(., 0xf40, vsx_unavailable) /* * An interrupt came in while soft-disabled; clear EE in SRR1, @@ -240,17 +300,30 @@ masked_interrupt: rotldi r10,r10,16 mtspr SPRN_SRR1,r10 ld r10,PACA_EXGEN+EX_R10(r13) - mfspr r13,SPRN_SPRG_SCRATCH0 + GET_SCRATCH0(r13) rfid b . +masked_Hinterrupt: + stb r10,PACAHARDIRQEN(r13) + mtcrf 0x80,r9 + ld r9,PACA_EXGEN+EX_R9(r13) + mfspr r10,SPRN_HSRR1 + rldicl r10,r10,48,1 /* clear MSR_EE */ + rotldi r10,r10,16 + mtspr SPRN_HSRR1,r10 + ld r10,PACA_EXGEN+EX_R10(r13) + GET_SCRATCH0(r13) + hrfid + b . + .align 7 do_stab_bolted_pSeries: std r11,PACA_EXSLB+EX_R11(r13) std r12,PACA_EXSLB+EX_R12(r13) - mfspr r10,SPRN_SPRG_SCRATCH0 + GET_SCRATCH0(r10) std r10,PACA_EXSLB+EX_R13(r13) - EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted) + EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD) #ifdef CONFIG_PPC_PSERIES /* @@ -260,15 +333,15 @@ do_stab_bolted_pSeries: .align 7 system_reset_fwnmi: HMT_MEDIUM - mtspr SPRN_SPRG_SCRATCH0,r13 /* save r13 */ - EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common) + SET_SCRATCH0(r13) /* save r13 */ + EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD) .globl machine_check_fwnmi .align 7 machine_check_fwnmi: HMT_MEDIUM - mtspr SPRN_SPRG_SCRATCH0,r13 /* save r13 */ - EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) + SET_SCRATCH0(r13) /* save r13 */ + EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common, EXC_STD) #endif /* CONFIG_PPC_PSERIES */ @@ -282,7 +355,7 @@ slb_miss_user_pseries: std r10,PACA_EXGEN+EX_R10(r13) std r11,PACA_EXGEN+EX_R11(r13) std r12,PACA_EXGEN+EX_R12(r13) - mfspr r10,SPRG_SCRATCH0 + GET_SCRATCH0(r10) ld r11,PACA_EXSLB+EX_R9(r13) ld r12,PACA_EXSLB+EX_R3(r13) std r10,PACA_EXGEN+EX_R13(r13) @@ -342,6 +415,8 @@ machine_check_common: STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) + STD_EXCEPTION_COMMON(0xe40, emulation_assist, .program_check_exception) + STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception) STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception) STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception) #ifdef CONFIG_ALTIVEC @@ -386,9 +461,24 @@ bad_stack: std r12,_XER(r1) SAVE_GPR(0,r1) SAVE_GPR(2,r1) - SAVE_4GPRS(3,r1) - SAVE_2GPRS(7,r1) - SAVE_10GPRS(12,r1) + ld r10,EX_R3(r3) + std r10,GPR3(r1) + SAVE_GPR(4,r1) + SAVE_4GPRS(5,r1) + ld r9,EX_R9(r3) + ld r10,EX_R10(r3) + SAVE_2GPRS(9,r1) + ld r9,EX_R11(r3) + ld r10,EX_R12(r3) + ld r11,EX_R13(r3) + std r9,GPR11(r1) + std r10,GPR12(r1) + std r11,GPR13(r1) +BEGIN_FTR_SECTION + ld r10,EX_CFAR(r3) + std r10,ORIG_GPR3(r1) +END_FTR_SECTION_IFSET(CPU_FTR_CFAR) + SAVE_8GPRS(14,r1) SAVE_10GPRS(22,r1) lhz r12,PACA_TRAP_SAVE(r13) std r12,_TRAP(r1) @@ -397,6 +487,9 @@ bad_stack: li r12,0 std r12,0(r11) ld r2,PACATOC(r13) + ld r11,exception_marker@toc(r2) + std r12,RESULT(r1) + std r11,STACK_FRAME_OVERHEAD-16(r1) 1: addi r3,r1,STACK_FRAME_OVERHEAD bl .kernel_bad_stack b 1b @@ -419,6 +512,19 @@ data_access_common: li r5,0x300 b .do_hash_page /* Try to handle as hpte fault */ + .align 7 + .globl h_data_storage_common +h_data_storage_common: + mfspr r10,SPRN_HDAR + std r10,PACA_EXGEN+EX_DAR(r13) + mfspr r10,SPRN_HDSISR + stw r10,PACA_EXGEN+EX_DSISR(r13) + EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN) + bl .save_nvgprs + addi r3,r1,STACK_FRAME_OVERHEAD + bl .unknown_exception + b .ret_from_except + .align 7 .globl instruction_access_common instruction_access_common: @@ -428,6 +534,8 @@ instruction_access_common: li r5,0x400 b .do_hash_page /* Try to handle as hpte fault */ + STD_EXCEPTION_COMMON(0xe20, h_instr_storage, .unknown_exception) + /* * Here is the common SLB miss user that is used when going to virtual * mode for SLB misses, that is currently not used @@ -750,7 +858,7 @@ _STATIC(do_hash_page) BEGIN_FTR_SECTION andis. r0,r4,0x0020 /* Is it a segment table fault? */ bne- do_ste_alloc /* If so handle it */ -END_FTR_SECTION_IFCLR(CPU_FTR_SLB) +END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB) clrrdi r11,r1,THREAD_SHIFT lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ diff --git a/trunk/arch/powerpc/kernel/head_32.S b/trunk/arch/powerpc/kernel/head_32.S index c5c24beb8387..ba250d505e07 100644 --- a/trunk/arch/powerpc/kernel/head_32.S +++ b/trunk/arch/powerpc/kernel/head_32.S @@ -805,19 +805,6 @@ _ENTRY(copy_and_flush) blr #ifdef CONFIG_SMP -#ifdef CONFIG_GEMINI - .globl __secondary_start_gemini -__secondary_start_gemini: - mfspr r4,SPRN_HID0 - ori r4,r4,HID0_ICFI - li r3,0 - ori r3,r3,HID0_ICE - andc r4,r4,r3 - mtspr SPRN_HID0,r4 - sync - b __secondary_start -#endif /* CONFIG_GEMINI */ - .globl __secondary_start_mpc86xx __secondary_start_mpc86xx: mfspr r3, SPRN_PIR @@ -890,15 +877,6 @@ __secondary_start: mtspr SPRN_SRR1,r4 SYNC RFI - -_GLOBAL(start_secondary_resume) - /* Reset stack */ - rlwinm r1,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ - addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD - li r3,0 - std r3,0(r1) /* Zero the stack frame pointer */ - bl start_secondary - b . #endif /* CONFIG_SMP */ #ifdef CONFIG_KVM_BOOK3S_HANDLER diff --git a/trunk/arch/powerpc/kernel/head_64.S b/trunk/arch/powerpc/kernel/head_64.S index 3a319f9c9d3e..ba504099844a 100644 --- a/trunk/arch/powerpc/kernel/head_64.S +++ b/trunk/arch/powerpc/kernel/head_64.S @@ -147,6 +147,8 @@ __secondary_hold: mtctr r4 mr r3,r24 li r4,0 + /* Make sure that patched code is visible */ + isync bctr #else BUG_OPCODE @@ -216,19 +218,25 @@ generic_secondary_common_init: */ LOAD_REG_ADDR(r13, paca) /* Load paca pointer */ ld r13,0(r13) /* Get base vaddr of paca array */ +#ifndef CONFIG_SMP + addi r13,r13,PACA_SIZE /* know r13 if used accidentally */ + b .kexec_wait /* wait for next kernel if !SMP */ +#else + LOAD_REG_ADDR(r7, nr_cpu_ids) /* Load nr_cpu_ids address */ + lwz r7,0(r7) /* also the max paca allocated */ li r5,0 /* logical cpu id */ 1: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */ cmpw r6,r24 /* Compare to our id */ beq 2f addi r13,r13,PACA_SIZE /* Loop to next PACA on miss */ addi r5,r5,1 - cmpwi r5,NR_CPUS + cmpw r5,r7 /* Check if more pacas exist */ blt 1b mr r3,r24 /* not found, copy phys to r3 */ b .kexec_wait /* next kernel might do better */ -2: mtspr SPRN_SPRG_PACA,r13 /* Save vaddr of paca in an SPRG */ +2: SET_PACA(r13) #ifdef CONFIG_PPC_BOOK3E addi r12,r13,PACA_EXTLB /* and TLB exc frame in another */ mtspr SPRN_SPRG_TLB_EXFRAME,r12 @@ -236,34 +244,39 @@ generic_secondary_common_init: /* From now on, r24 is expected to be logical cpuid */ mr r24,r5 -3: HMT_LOW - lbz r23,PACAPROCSTART(r13) /* Test if this processor should */ - /* start. */ - -#ifndef CONFIG_SMP - b 3b /* Never go on non-SMP */ -#else - cmpwi 0,r23,0 - beq 3b /* Loop until told to go */ - - sync /* order paca.run and cur_cpu_spec */ /* See if we need to call a cpu state restore handler */ LOAD_REG_ADDR(r23, cur_cpu_spec) ld r23,0(r23) ld r23,CPU_SPEC_RESTORE(r23) cmpdi 0,r23,0 - beq 4f + beq 3f ld r23,0(r23) mtctr r23 bctrl -4: /* Create a temp kernel stack for use before relocation is on. */ +3: LOAD_REG_ADDR(r3, boot_cpu_count) /* Decrement boot_cpu_count */ + lwarx r4,0,r3 + subi r4,r4,1 + stwcx. r4,0,r3 + bne 3b + isync + +4: HMT_LOW + lbz r23,PACAPROCSTART(r13) /* Test if this processor should */ + /* start. */ + cmpwi 0,r23,0 + beq 4b /* Loop until told to go */ + + sync /* order paca.run and cur_cpu_spec */ + isync /* In case code patching happened */ + + /* Create a temp kernel stack for use before relocation is on. */ ld r1,PACAEMERGSP(r13) subi r1,r1,STACK_FRAME_OVERHEAD b __secondary_start -#endif +#endif /* SMP */ /* * Turn the MMU off. @@ -534,7 +547,7 @@ _GLOBAL(pmac_secondary_start) ld r4,0(r4) /* Get base vaddr of paca array */ mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */ add r13,r13,r4 /* for this processor. */ - mtspr SPRN_SPRG_PACA,r13 /* Save vaddr of paca in an SPRG*/ + SET_PACA(r13) /* Save vaddr of paca in an SPRG*/ /* Mark interrupts soft and hard disabled (they might be enabled * in the PACA when doing hotplug) @@ -645,7 +658,7 @@ _GLOBAL(enable_64b_mode) oris r11,r11,0x8000 /* CM bit set, we'll set ICM later */ mtmsr r11 #else /* CONFIG_PPC_BOOK3E */ - li r12,(MSR_SF | MSR_ISF)@highest + li r12,(MSR_64BIT | MSR_ISF)@highest sldi r12,r12,48 or r11,r11,r12 mtmsrd r11 diff --git a/trunk/arch/powerpc/kernel/ibmebus.c b/trunk/arch/powerpc/kernel/ibmebus.c index c00d4ca1ee15..28581f1ad2c0 100644 --- a/trunk/arch/powerpc/kernel/ibmebus.c +++ b/trunk/arch/powerpc/kernel/ibmebus.c @@ -527,7 +527,7 @@ static int ibmebus_bus_pm_resume_noirq(struct device *dev) #endif /* !CONFIG_SUSPEND */ -#ifdef CONFIG_HIBERNATION +#ifdef CONFIG_HIBERNATE_CALLBACKS static int ibmebus_bus_pm_freeze(struct device *dev) { @@ -665,7 +665,7 @@ static int ibmebus_bus_pm_restore_noirq(struct device *dev) return ret; } -#else /* !CONFIG_HIBERNATION */ +#else /* !CONFIG_HIBERNATE_CALLBACKS */ #define ibmebus_bus_pm_freeze NULL #define ibmebus_bus_pm_thaw NULL @@ -676,7 +676,7 @@ static int ibmebus_bus_pm_restore_noirq(struct device *dev) #define ibmebus_bus_pm_poweroff_noirq NULL #define ibmebus_bus_pm_restore_noirq NULL -#endif /* !CONFIG_HIBERNATION */ +#endif /* !CONFIG_HIBERNATE_CALLBACKS */ static struct dev_pm_ops ibmebus_bus_dev_pm_ops = { .prepare = ibmebus_bus_pm_prepare, diff --git a/trunk/arch/powerpc/kernel/idle_power7.S b/trunk/arch/powerpc/kernel/idle_power7.S new file mode 100644 index 000000000000..f8f0bc7f1d4f --- /dev/null +++ b/trunk/arch/powerpc/kernel/idle_power7.S @@ -0,0 +1,97 @@ +/* + * This file contains the power_save function for 970-family CPUs. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#undef DEBUG + + .text + +_GLOBAL(power7_idle) + /* Now check if user or arch enabled NAP mode */ + LOAD_REG_ADDRBASE(r3,powersave_nap) + lwz r4,ADDROFF(powersave_nap)(r3) + cmpwi 0,r4,0 + beqlr + + /* NAP is a state loss, we create a regs frame on the + * stack, fill it up with the state we care about and + * stick a pointer to it in PACAR1. We really only + * need to save PC, some CR bits and the NV GPRs, + * but for now an interrupt frame will do. + */ + mflr r0 + std r0,16(r1) + stdu r1,-INT_FRAME_SIZE(r1) + std r0,_LINK(r1) + std r0,_NIP(r1) + +#ifndef CONFIG_SMP + /* Make sure FPU, VSX etc... are flushed as we may lose + * state when going to nap mode + */ + bl .discard_lazy_cpu_state +#endif /* CONFIG_SMP */ + + /* Hard disable interrupts */ + mfmsr r9 + rldicl r9,r9,48,1 + rotldi r9,r9,16 + mtmsrd r9,1 /* hard-disable interrupts */ + li r0,0 + stb r0,PACASOFTIRQEN(r13) /* we'll hard-enable shortly */ + stb r0,PACAHARDIRQEN(r13) + + /* Continue saving state */ + SAVE_GPR(2, r1) + SAVE_NVGPRS(r1) + mfcr r3 + std r3,_CCR(r1) + std r9,_MSR(r1) + std r1,PACAR1(r13) + + /* Magic NAP mode enter sequence */ + std r0,0(r1) + ptesync + ld r0,0(r1) +1: cmp cr0,r0,r0 + bne 1b + PPC_NAP + b . + +_GLOBAL(power7_wakeup_loss) + GET_PACA(r13) + ld r1,PACAR1(r13) + REST_NVGPRS(r1) + REST_GPR(2, r1) + ld r3,_CCR(r1) + ld r4,_MSR(r1) + ld r5,_NIP(r1) + addi r1,r1,INT_FRAME_SIZE + mtcr r3 + mtspr SPRN_SRR1,r4 + mtspr SPRN_SRR0,r5 + rfid + +_GLOBAL(power7_wakeup_noloss) + GET_PACA(r13) + ld r1,PACAR1(r13) + ld r4,_MSR(r1) + ld r5,_NIP(r1) + addi r1,r1,INT_FRAME_SIZE + mtspr SPRN_SRR1,r4 + mtspr SPRN_SRR0,r5 + rfid diff --git a/trunk/arch/powerpc/platforms/cell/io-workarounds.c b/trunk/arch/powerpc/kernel/io-workarounds.c similarity index 95% rename from trunk/arch/powerpc/platforms/cell/io-workarounds.c rename to trunk/arch/powerpc/kernel/io-workarounds.c index 5c1118e31940..ffafaea3d261 100644 --- a/trunk/arch/powerpc/platforms/cell/io-workarounds.c +++ b/trunk/arch/powerpc/kernel/io-workarounds.c @@ -17,8 +17,7 @@ #include #include #include - -#include "io-workarounds.h" +#include #define IOWA_MAX_BUS 8 @@ -145,7 +144,19 @@ static void __iomem *iowa_ioremap(phys_addr_t addr, unsigned long size, return res; } -/* Regist new bus to support workaround */ +/* Enable IO workaround */ +static void __devinit io_workaround_init(void) +{ + static int io_workaround_inited; + + if (io_workaround_inited) + return; + ppc_pci_io = iowa_pci_io; + ppc_md.ioremap = iowa_ioremap; + io_workaround_inited = 1; +} + +/* Register new bus to support workaround */ void __devinit iowa_register_bus(struct pci_controller *phb, struct ppc_pci_io *ops, int (*initfunc)(struct iowa_bus *, void *), void *data) @@ -153,6 +164,8 @@ void __devinit iowa_register_bus(struct pci_controller *phb, struct iowa_bus *bus; struct device_node *np = phb->dn; + io_workaround_init(); + if (iowa_bus_count >= IOWA_MAX_BUS) { pr_err("IOWA:Too many pci bridges, " "workarounds disabled for %s\n", np->full_name); @@ -162,6 +175,7 @@ void __devinit iowa_register_bus(struct pci_controller *phb, bus = &iowa_busses[iowa_bus_count]; bus->phb = phb; bus->ops = ops; + bus->private = data; if (initfunc) if ((*initfunc)(bus, data)) @@ -172,14 +186,3 @@ void __devinit iowa_register_bus(struct pci_controller *phb, pr_debug("IOWA:[%d]Add bus, %s.\n", iowa_bus_count-1, np->full_name); } -/* enable IO workaround */ -void __devinit io_workaround_init(void) -{ - static int io_workaround_inited; - - if (io_workaround_inited) - return; - ppc_pci_io = iowa_pci_io; - ppc_md.ioremap = iowa_ioremap; - io_workaround_inited = 1; -} diff --git a/trunk/arch/powerpc/kernel/irq.c b/trunk/arch/powerpc/kernel/irq.c index f621b7d2d869..a24d37d4cf51 100644 --- a/trunk/arch/powerpc/kernel/irq.c +++ b/trunk/arch/powerpc/kernel/irq.c @@ -66,7 +66,6 @@ #include #include #include -#include #include #ifdef CONFIG_PPC64 @@ -160,7 +159,8 @@ notrace void arch_local_irq_restore(unsigned long en) #if defined(CONFIG_BOOKE) && defined(CONFIG_SMP) /* Check for pending doorbell interrupts and resend to ourself */ - doorbell_check_self(); + if (cpu_has_feature(CPU_FTR_DBELL)) + smp_muxed_ipi_resend(); #endif /* @@ -397,24 +397,28 @@ struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly; void exc_lvl_ctx_init(void) { struct thread_info *tp; - int i, hw_cpu; + int i, cpu_nr; for_each_possible_cpu(i) { - hw_cpu = get_hard_smp_processor_id(i); - memset((void *)critirq_ctx[hw_cpu], 0, THREAD_SIZE); - tp = critirq_ctx[hw_cpu]; - tp->cpu = i; +#ifdef CONFIG_PPC64 + cpu_nr = i; +#else + cpu_nr = get_hard_smp_processor_id(i); +#endif + memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE); + tp = critirq_ctx[cpu_nr]; + tp->cpu = cpu_nr; tp->preempt_count = 0; #ifdef CONFIG_BOOKE - memset((void *)dbgirq_ctx[hw_cpu], 0, THREAD_SIZE); - tp = dbgirq_ctx[hw_cpu]; - tp->cpu = i; + memset((void *)dbgirq_ctx[cpu_nr], 0, THREAD_SIZE); + tp = dbgirq_ctx[cpu_nr]; + tp->cpu = cpu_nr; tp->preempt_count = 0; - memset((void *)mcheckirq_ctx[hw_cpu], 0, THREAD_SIZE); - tp = mcheckirq_ctx[hw_cpu]; - tp->cpu = i; + memset((void *)mcheckirq_ctx[cpu_nr], 0, THREAD_SIZE); + tp = mcheckirq_ctx[cpu_nr]; + tp->cpu = cpu_nr; tp->preempt_count = HARDIRQ_OFFSET; #endif } @@ -477,20 +481,41 @@ void do_softirq(void) * IRQ controller and virtual interrupts */ +/* The main irq map itself is an array of NR_IRQ entries containing the + * associate host and irq number. An entry with a host of NULL is free. + * An entry can be allocated if it's free, the allocator always then sets + * hwirq first to the host's invalid irq number and then fills ops. + */ +struct irq_map_entry { + irq_hw_number_t hwirq; + struct irq_host *host; +}; + static LIST_HEAD(irq_hosts); static DEFINE_RAW_SPINLOCK(irq_big_lock); -static unsigned int revmap_trees_allocated; static DEFINE_MUTEX(revmap_trees_mutex); -struct irq_map_entry irq_map[NR_IRQS]; +static struct irq_map_entry irq_map[NR_IRQS]; static unsigned int irq_virq_count = NR_IRQS; static struct irq_host *irq_default_host; +irq_hw_number_t irqd_to_hwirq(struct irq_data *d) +{ + return irq_map[d->irq].hwirq; +} +EXPORT_SYMBOL_GPL(irqd_to_hwirq); + irq_hw_number_t virq_to_hw(unsigned int virq) { return irq_map[virq].hwirq; } EXPORT_SYMBOL_GPL(virq_to_hw); +bool virq_is_host(unsigned int virq, struct irq_host *host) +{ + return irq_map[virq].host == host; +} +EXPORT_SYMBOL_GPL(virq_is_host); + static int default_irq_host_match(struct irq_host *h, struct device_node *np) { return h->of_node != NULL && h->of_node == np; @@ -511,7 +536,7 @@ struct irq_host *irq_alloc_host(struct device_node *of_node, /* Allocate structure and revmap table if using linear mapping */ if (revmap_type == IRQ_HOST_MAP_LINEAR) size += revmap_arg * sizeof(unsigned int); - host = zalloc_maybe_bootmem(size, GFP_KERNEL); + host = kzalloc(size, GFP_KERNEL); if (host == NULL) return NULL; @@ -561,14 +586,14 @@ struct irq_host *irq_alloc_host(struct device_node *of_node, irq_map[i].host = host; smp_wmb(); - /* Clear norequest flags */ - irq_clear_status_flags(i, IRQ_NOREQUEST); - /* Legacy flags are left to default at this point, * one can then use irq_create_mapping() to * explicitly change them */ ops->map(host, i, i); + + /* Clear norequest flags */ + irq_clear_status_flags(i, IRQ_NOREQUEST); } break; case IRQ_HOST_MAP_LINEAR: @@ -579,6 +604,9 @@ struct irq_host *irq_alloc_host(struct device_node *of_node, smp_wmb(); host->revmap_data.linear.revmap = rmap; break; + case IRQ_HOST_MAP_TREE: + INIT_RADIX_TREE(&host->revmap_data.tree, GFP_KERNEL); + break; default: break; } @@ -636,8 +664,6 @@ static int irq_setup_virq(struct irq_host *host, unsigned int virq, goto error; } - irq_clear_status_flags(virq, IRQ_NOREQUEST); - /* map it */ smp_wmb(); irq_map[virq].hwirq = hwirq; @@ -648,6 +674,8 @@ static int irq_setup_virq(struct irq_host *host, unsigned int virq, goto errdesc; } + irq_clear_status_flags(virq, IRQ_NOREQUEST); + return 0; errdesc: @@ -704,8 +732,6 @@ unsigned int irq_create_mapping(struct irq_host *host, */ virq = irq_find_mapping(host, hwirq); if (virq != NO_IRQ) { - if (host->ops->remap) - host->ops->remap(host, virq, hwirq); pr_debug("irq: -> existing mapping on virq %d\n", virq); return virq; } @@ -786,14 +812,15 @@ void irq_dispose_mapping(unsigned int virq) return; host = irq_map[virq].host; - WARN_ON (host == NULL); - if (host == NULL) + if (WARN_ON(host == NULL)) return; /* Never unmap legacy interrupts */ if (host->revmap_type == IRQ_HOST_MAP_LEGACY) return; + irq_set_status_flags(virq, IRQ_NOREQUEST); + /* remove chip and handler */ irq_set_chip_and_handler(virq, NULL, NULL); @@ -813,13 +840,6 @@ void irq_dispose_mapping(unsigned int virq) host->revmap_data.linear.revmap[hwirq] = NO_IRQ; break; case IRQ_HOST_MAP_TREE: - /* - * Check if radix tree allocated yet, if not then nothing to - * remove. - */ - smp_rmb(); - if (revmap_trees_allocated < 1) - break; mutex_lock(&revmap_trees_mutex); radix_tree_delete(&host->revmap_data.tree, hwirq); mutex_unlock(&revmap_trees_mutex); @@ -830,8 +850,6 @@ void irq_dispose_mapping(unsigned int virq) smp_mb(); irq_map[virq].hwirq = host->inval_irq; - irq_set_status_flags(virq, IRQ_NOREQUEST); - irq_free_descs(virq, 1); /* Free it */ irq_free_virt(virq, 1); @@ -877,16 +895,9 @@ unsigned int irq_radix_revmap_lookup(struct irq_host *host, struct irq_map_entry *ptr; unsigned int virq; - WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE); - - /* - * Check if the radix tree exists and has bee initialized. - * If not, we fallback to slow mode - */ - if (revmap_trees_allocated < 2) + if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_TREE)) return irq_find_mapping(host, hwirq); - /* Now try to resolve */ /* * No rcu_read_lock(ing) needed, the ptr returned can't go under us * as it's referencing an entry in the static irq_map table. @@ -909,16 +920,7 @@ unsigned int irq_radix_revmap_lookup(struct irq_host *host, void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq, irq_hw_number_t hwirq) { - - WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE); - - /* - * Check if the radix tree exists yet. - * If not, then the irq will be inserted into the tree when it gets - * initialized. - */ - smp_rmb(); - if (revmap_trees_allocated < 1) + if (WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE)) return; if (virq != NO_IRQ) { @@ -934,7 +936,8 @@ unsigned int irq_linear_revmap(struct irq_host *host, { unsigned int *revmap; - WARN_ON(host->revmap_type != IRQ_HOST_MAP_LINEAR); + if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_LINEAR)) + return irq_find_mapping(host, hwirq); /* Check revmap bounds */ if (unlikely(hwirq >= host->revmap_data.linear.size)) @@ -1028,53 +1031,6 @@ int arch_early_irq_init(void) return 0; } -/* We need to create the radix trees late */ -static int irq_late_init(void) -{ - struct irq_host *h; - unsigned int i; - - /* - * No mutual exclusion with respect to accessors of the tree is needed - * here as the synchronization is done via the state variable - * revmap_trees_allocated. - */ - list_for_each_entry(h, &irq_hosts, link) { - if (h->revmap_type == IRQ_HOST_MAP_TREE) - INIT_RADIX_TREE(&h->revmap_data.tree, GFP_KERNEL); - } - - /* - * Make sure the radix trees inits are visible before setting - * the flag - */ - smp_wmb(); - revmap_trees_allocated = 1; - - /* - * Insert the reverse mapping for those interrupts already present - * in irq_map[]. - */ - mutex_lock(&revmap_trees_mutex); - for (i = 0; i < irq_virq_count; i++) { - if (irq_map[i].host && - (irq_map[i].host->revmap_type == IRQ_HOST_MAP_TREE)) - radix_tree_insert(&irq_map[i].host->revmap_data.tree, - irq_map[i].hwirq, &irq_map[i]); - } - mutex_unlock(&revmap_trees_mutex); - - /* - * Make sure the radix trees insertions are visible before setting - * the flag - */ - smp_wmb(); - revmap_trees_allocated = 2; - - return 0; -} -arch_initcall(irq_late_init); - #ifdef CONFIG_VIRQ_DEBUG static int virq_debug_show(struct seq_file *m, void *private) { @@ -1082,10 +1038,11 @@ static int virq_debug_show(struct seq_file *m, void *private) struct irq_desc *desc; const char *p; static const char none[] = "none"; + void *data; int i; - seq_printf(m, "%-5s %-7s %-15s %s\n", "virq", "hwirq", - "chip name", "host name"); + seq_printf(m, "%-5s %-7s %-15s %-18s %s\n", "virq", "hwirq", + "chip name", "chip data", "host name"); for (i = 1; i < nr_irqs; i++) { desc = irq_to_desc(i); @@ -1098,7 +1055,7 @@ static int virq_debug_show(struct seq_file *m, void *private) struct irq_chip *chip; seq_printf(m, "%5d ", i); - seq_printf(m, "0x%05lx ", virq_to_hw(i)); + seq_printf(m, "0x%05lx ", irq_map[i].hwirq); chip = irq_desc_get_chip(desc); if (chip && chip->name) @@ -1107,6 +1064,9 @@ static int virq_debug_show(struct seq_file *m, void *private) p = none; seq_printf(m, "%-15s ", p); + data = irq_desc_get_chip_data(desc); + seq_printf(m, "0x%16p ", data); + if (irq_map[i].host && irq_map[i].host->of_node) p = irq_map[i].host->of_node->full_name; else diff --git a/trunk/arch/powerpc/kernel/kgdb.c b/trunk/arch/powerpc/kernel/kgdb.c index 42850ee00ada..bd9d35f59cf4 100644 --- a/trunk/arch/powerpc/kernel/kgdb.c +++ b/trunk/arch/powerpc/kernel/kgdb.c @@ -109,7 +109,7 @@ static int kgdb_call_nmi_hook(struct pt_regs *regs) #ifdef CONFIG_SMP void kgdb_roundup_cpus(unsigned long flags) { - smp_send_debugger_break(MSG_ALL_BUT_SELF); + smp_send_debugger_break(); } #endif diff --git a/trunk/arch/powerpc/kernel/legacy_serial.c b/trunk/arch/powerpc/kernel/legacy_serial.c index c834757bebc0..2b97b80d6d7d 100644 --- a/trunk/arch/powerpc/kernel/legacy_serial.c +++ b/trunk/arch/powerpc/kernel/legacy_serial.c @@ -330,9 +330,11 @@ void __init find_legacy_serial_ports(void) if (!parent) continue; if (of_match_node(legacy_serial_parents, parent) != NULL) { - index = add_legacy_soc_port(np, np); - if (index >= 0 && np == stdout) - legacy_serial_console = index; + if (of_device_is_available(np)) { + index = add_legacy_soc_port(np, np); + if (index >= 0 && np == stdout) + legacy_serial_console = index; + } } of_node_put(parent); } diff --git a/trunk/arch/powerpc/kernel/lparcfg.c b/trunk/arch/powerpc/kernel/lparcfg.c index 301db65f05a1..84daabe2fcba 100644 --- a/trunk/arch/powerpc/kernel/lparcfg.c +++ b/trunk/arch/powerpc/kernel/lparcfg.c @@ -132,34 +132,6 @@ static int iseries_lparcfg_data(struct seq_file *m, void *v) /* * Methods used to fetch LPAR data when running on a pSeries platform. */ -/** - * h_get_mpp - * H_GET_MPP hcall returns info in 7 parms - */ -int h_get_mpp(struct hvcall_mpp_data *mpp_data) -{ - int rc; - unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; - - rc = plpar_hcall9(H_GET_MPP, retbuf); - - mpp_data->entitled_mem = retbuf[0]; - mpp_data->mapped_mem = retbuf[1]; - - mpp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff; - mpp_data->pool_num = retbuf[2] & 0xffff; - - mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff; - mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff; - mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffff; - - mpp_data->pool_size = retbuf[4]; - mpp_data->loan_request = retbuf[5]; - mpp_data->backing_mem = retbuf[6]; - - return rc; -} -EXPORT_SYMBOL(h_get_mpp); struct hvcall_ppp_data { u64 entitlement; @@ -345,6 +317,30 @@ static void parse_mpp_data(struct seq_file *m) seq_printf(m, "backing_memory=%ld bytes\n", mpp_data.backing_mem); } +/** + * parse_mpp_x_data + * Parse out data returned from h_get_mpp_x + */ +static void parse_mpp_x_data(struct seq_file *m) +{ + struct hvcall_mpp_x_data mpp_x_data; + + if (!firmware_has_feature(FW_FEATURE_XCMO)) + return; + if (h_get_mpp_x(&mpp_x_data)) + return; + + seq_printf(m, "coalesced_bytes=%ld\n", mpp_x_data.coalesced_bytes); + + if (mpp_x_data.pool_coalesced_bytes) + seq_printf(m, "pool_coalesced_bytes=%ld\n", + mpp_x_data.pool_coalesced_bytes); + if (mpp_x_data.pool_purr_cycles) + seq_printf(m, "coalesce_pool_purr=%ld\n", mpp_x_data.pool_purr_cycles); + if (mpp_x_data.pool_spurr_cycles) + seq_printf(m, "coalesce_pool_spurr=%ld\n", mpp_x_data.pool_spurr_cycles); +} + #define SPLPAR_CHARACTERISTICS_TOKEN 20 #define SPLPAR_MAXLENGTH 1026*(sizeof(char)) @@ -520,6 +516,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v) parse_system_parameter_string(m); parse_ppp_data(m); parse_mpp_data(m); + parse_mpp_x_data(m); pseries_cmo_data(m); splpar_dispatch_data(m); diff --git a/trunk/arch/powerpc/kernel/misc_32.S b/trunk/arch/powerpc/kernel/misc_32.S index 094bd9821ad4..402560e957bd 100644 --- a/trunk/arch/powerpc/kernel/misc_32.S +++ b/trunk/arch/powerpc/kernel/misc_32.S @@ -694,6 +694,17 @@ _GLOBAL(kernel_thread) addi r1,r1,16 blr +#ifdef CONFIG_SMP +_GLOBAL(start_secondary_resume) + /* Reset stack */ + rlwinm r1,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ + addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD + li r3,0 + std r3,0(r1) /* Zero the stack frame pointer */ + bl start_secondary + b . +#endif /* CONFIG_SMP */ + /* * This routine is just here to keep GCC happy - sigh... */ diff --git a/trunk/arch/powerpc/kernel/misc_64.S b/trunk/arch/powerpc/kernel/misc_64.S index 206a321a71d3..e89df59cdc5a 100644 --- a/trunk/arch/powerpc/kernel/misc_64.S +++ b/trunk/arch/powerpc/kernel/misc_64.S @@ -462,7 +462,8 @@ _GLOBAL(disable_kernel_fp) * wait for the flag to change, indicating this kernel is going away but * the slave code for the next one is at addresses 0 to 100. * - * This is used by all slaves. + * This is used by all slaves, even those that did not find a matching + * paca in the secondary startup code. * * Physical (hardware) cpu id should be in r3. */ @@ -471,10 +472,6 @@ _GLOBAL(kexec_wait) 1: mflr r5 addi r5,r5,kexec_flag-1b - li r4,KEXEC_STATE_REAL_MODE - stb r4,PACAKEXECSTATE(r13) - SYNC - 99: HMT_LOW #ifdef CONFIG_KEXEC /* use no memory without kexec */ lwz r4,0(r5) @@ -499,11 +496,17 @@ kexec_flag: * * get phys id from paca * switch to real mode + * mark the paca as no longer used * join other cpus in kexec_wait(phys_id) */ _GLOBAL(kexec_smp_wait) lhz r3,PACAHWCPUID(r13) bl real_mode + + li r4,KEXEC_STATE_REAL_MODE + stb r4,PACAKEXECSTATE(r13) + SYNC + b .kexec_wait /* diff --git a/trunk/arch/powerpc/kernel/paca.c b/trunk/arch/powerpc/kernel/paca.c index 10f0aadee95b..efeb88184182 100644 --- a/trunk/arch/powerpc/kernel/paca.c +++ b/trunk/arch/powerpc/kernel/paca.c @@ -7,7 +7,7 @@ * 2 of the License, or (at your option) any later version. */ -#include +#include #include #include @@ -156,18 +156,29 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu) /* Put the paca pointer into r13 and SPRG_PACA */ void setup_paca(struct paca_struct *new_paca) { + /* Setup r13 */ local_paca = new_paca; - mtspr(SPRN_SPRG_PACA, local_paca); + #ifdef CONFIG_PPC_BOOK3E + /* On Book3E, initialize the TLB miss exception frames */ mtspr(SPRN_SPRG_TLB_EXFRAME, local_paca->extlb); +#else + /* In HV mode, we setup both HPACA and PACA to avoid problems + * if we do a GET_PACA() before the feature fixups have been + * applied + */ + if (cpu_has_feature(CPU_FTR_HVMODE_206)) + mtspr(SPRN_SPRG_HPACA, local_paca); #endif + mtspr(SPRN_SPRG_PACA, local_paca); + } static int __initdata paca_size; void __init allocate_pacas(void) { - int nr_cpus, cpu, limit; + int cpu, limit; /* * We can't take SLB misses on the paca, and we want to access them @@ -179,23 +190,18 @@ void __init allocate_pacas(void) if (firmware_has_feature(FW_FEATURE_ISERIES)) limit = min(limit, HvPagesToMap * HVPAGESIZE); - nr_cpus = NR_CPUS; - /* On iSeries we know we can never have more than 64 cpus */ - if (firmware_has_feature(FW_FEATURE_ISERIES)) - nr_cpus = min(64, nr_cpus); - - paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpus); + paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpu_ids); paca = __va(memblock_alloc_base(paca_size, PAGE_SIZE, limit)); memset(paca, 0, paca_size); printk(KERN_DEBUG "Allocated %u bytes for %d pacas at %p\n", - paca_size, nr_cpus, paca); + paca_size, nr_cpu_ids, paca); - allocate_lppacas(nr_cpus, limit); + allocate_lppacas(nr_cpu_ids, limit); /* Can't use for_each_*_cpu, as they aren't functional yet */ - for (cpu = 0; cpu < nr_cpus; cpu++) + for (cpu = 0; cpu < nr_cpu_ids; cpu++) initialise_paca(&paca[cpu], cpu); } diff --git a/trunk/arch/powerpc/kernel/pci_dn.c b/trunk/arch/powerpc/kernel/pci_dn.c index d225d99fe39d..6baabc13306a 100644 --- a/trunk/arch/powerpc/kernel/pci_dn.c +++ b/trunk/arch/powerpc/kernel/pci_dn.c @@ -43,10 +43,9 @@ void * __devinit update_dn_pci_info(struct device_node *dn, void *data) const u32 *regs; struct pci_dn *pdn; - pdn = alloc_maybe_bootmem(sizeof(*pdn), GFP_KERNEL); + pdn = zalloc_maybe_bootmem(sizeof(*pdn), GFP_KERNEL); if (pdn == NULL) return NULL; - memset(pdn, 0, sizeof(*pdn)); dn->data = pdn; pdn->node = dn; pdn->phb = phb; diff --git a/trunk/arch/powerpc/kernel/perf_event.c b/trunk/arch/powerpc/kernel/perf_event.c index c4063b7f49a0..822f63008ae1 100644 --- a/trunk/arch/powerpc/kernel/perf_event.c +++ b/trunk/arch/powerpc/kernel/perf_event.c @@ -398,6 +398,25 @@ static int check_excludes(struct perf_event **ctrs, unsigned int cflags[], return 0; } +static u64 check_and_compute_delta(u64 prev, u64 val) +{ + u64 delta = (val - prev) & 0xfffffffful; + + /* + * POWER7 can roll back counter values, if the new value is smaller + * than the previous value it will cause the delta and the counter to + * have bogus values unless we rolled a counter over. If a coutner is + * rolled back, it will be smaller, but within 256, which is the maximum + * number of events to rollback at once. If we dectect a rollback + * return 0. This can lead to a small lack of precision in the + * counters. + */ + if (prev > val && (prev - val) < 256) + delta = 0; + + return delta; +} + static void power_pmu_read(struct perf_event *event) { s64 val, delta, prev; @@ -416,10 +435,11 @@ static void power_pmu_read(struct perf_event *event) prev = local64_read(&event->hw.prev_count); barrier(); val = read_pmc(event->hw.idx); + delta = check_and_compute_delta(prev, val); + if (!delta) + return; } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); - /* The counters are only 32 bits wide */ - delta = (val - prev) & 0xfffffffful; local64_add(delta, &event->count); local64_sub(delta, &event->hw.period_left); } @@ -449,8 +469,9 @@ static void freeze_limited_counters(struct cpu_hw_events *cpuhw, val = (event->hw.idx == 5) ? pmc5 : pmc6; prev = local64_read(&event->hw.prev_count); event->hw.idx = 0; - delta = (val - prev) & 0xfffffffful; - local64_add(delta, &event->count); + delta = check_and_compute_delta(prev, val); + if (delta) + local64_add(delta, &event->count); } } @@ -458,14 +479,16 @@ static void thaw_limited_counters(struct cpu_hw_events *cpuhw, unsigned long pmc5, unsigned long pmc6) { struct perf_event *event; - u64 val; + u64 val, prev; int i; for (i = 0; i < cpuhw->n_limited; ++i) { event = cpuhw->limited_counter[i]; event->hw.idx = cpuhw->limited_hwidx[i]; val = (event->hw.idx == 5) ? pmc5 : pmc6; - local64_set(&event->hw.prev_count, val); + prev = local64_read(&event->hw.prev_count); + if (check_and_compute_delta(prev, val)) + local64_set(&event->hw.prev_count, val); perf_event_update_userpage(event); } } @@ -1197,7 +1220,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val, /* we don't have to worry about interrupts here */ prev = local64_read(&event->hw.prev_count); - delta = (val - prev) & 0xfffffffful; + delta = check_and_compute_delta(prev, val); local64_add(delta, &event->count); /* diff --git a/trunk/arch/powerpc/kernel/ppc_ksyms.c b/trunk/arch/powerpc/kernel/ppc_ksyms.c index ef3ef566235e..7d28f540200c 100644 --- a/trunk/arch/powerpc/kernel/ppc_ksyms.c +++ b/trunk/arch/powerpc/kernel/ppc_ksyms.c @@ -54,7 +54,6 @@ extern void single_step_exception(struct pt_regs *regs); extern int sys_sigreturn(struct pt_regs *regs); EXPORT_SYMBOL(clear_pages); -EXPORT_SYMBOL(copy_page); EXPORT_SYMBOL(ISA_DMA_THRESHOLD); EXPORT_SYMBOL(DMA_MODE_READ); EXPORT_SYMBOL(DMA_MODE_WRITE); @@ -88,9 +87,7 @@ EXPORT_SYMBOL(__copy_tofrom_user); EXPORT_SYMBOL(__clear_user); EXPORT_SYMBOL(__strncpy_from_user); EXPORT_SYMBOL(__strnlen_user); -#ifdef CONFIG_PPC64 -EXPORT_SYMBOL(copy_4K_page); -#endif +EXPORT_SYMBOL(copy_page); #if defined(CONFIG_PCI) && defined(CONFIG_PPC32) EXPORT_SYMBOL(isa_io_base); diff --git a/trunk/arch/powerpc/kernel/process.c b/trunk/arch/powerpc/kernel/process.c index f74f355a9617..095043d79946 100644 --- a/trunk/arch/powerpc/kernel/process.c +++ b/trunk/arch/powerpc/kernel/process.c @@ -702,6 +702,8 @@ void prepare_to_copy(struct task_struct *tsk) /* * Copy a thread.. */ +extern unsigned long dscr_default; /* defined in arch/powerpc/kernel/sysfs.c */ + int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long unused, struct task_struct *p, struct pt_regs *regs) @@ -755,11 +757,11 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, _ALIGN_UP(sizeof(struct thread_info), 16); #ifdef CONFIG_PPC_STD_MMU_64 - if (cpu_has_feature(CPU_FTR_SLB)) { + if (mmu_has_feature(MMU_FTR_SLB)) { unsigned long sp_vsid; unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp; - if (cpu_has_feature(CPU_FTR_1T_SEGMENT)) + if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T) << SLB_VSID_SHIFT_1T; else @@ -769,6 +771,20 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, p->thread.ksp_vsid = sp_vsid; } #endif /* CONFIG_PPC_STD_MMU_64 */ +#ifdef CONFIG_PPC64 + if (cpu_has_feature(CPU_FTR_DSCR)) { + if (current->thread.dscr_inherit) { + p->thread.dscr_inherit = 1; + p->thread.dscr = current->thread.dscr; + } else if (0 != dscr_default) { + p->thread.dscr_inherit = 1; + p->thread.dscr = dscr_default; + } else { + p->thread.dscr_inherit = 0; + p->thread.dscr = 0; + } + } +#endif /* * The PPC64 ABI makes use of a TOC to contain function diff --git a/trunk/arch/powerpc/kernel/prom.c b/trunk/arch/powerpc/kernel/prom.c index e74fa12afc82..48aeb55faae9 100644 --- a/trunk/arch/powerpc/kernel/prom.c +++ b/trunk/arch/powerpc/kernel/prom.c @@ -68,6 +68,7 @@ int __initdata iommu_force_on; unsigned long tce_alloc_start, tce_alloc_end; u64 ppc64_rma_size; #endif +static phys_addr_t first_memblock_size; static int __init early_parse_mem(char *p) { @@ -123,18 +124,19 @@ static void __init move_device_tree(void) */ static struct ibm_pa_feature { unsigned long cpu_features; /* CPU_FTR_xxx bit */ + unsigned long mmu_features; /* MMU_FTR_xxx bit */ unsigned int cpu_user_ftrs; /* PPC_FEATURE_xxx bit */ unsigned char pabyte; /* byte number in ibm,pa-features */ unsigned char pabit; /* bit number (big-endian) */ unsigned char invert; /* if 1, pa bit set => clear feature */ } ibm_pa_features[] __initdata = { - {0, PPC_FEATURE_HAS_MMU, 0, 0, 0}, - {0, PPC_FEATURE_HAS_FPU, 0, 1, 0}, - {CPU_FTR_SLB, 0, 0, 2, 0}, - {CPU_FTR_CTRL, 0, 0, 3, 0}, - {CPU_FTR_NOEXECUTE, 0, 0, 6, 0}, - {CPU_FTR_NODSISRALIGN, 0, 1, 1, 1}, - {CPU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0}, + {0, 0, PPC_FEATURE_HAS_MMU, 0, 0, 0}, + {0, 0, PPC_FEATURE_HAS_FPU, 0, 1, 0}, + {0, MMU_FTR_SLB, 0, 0, 2, 0}, + {CPU_FTR_CTRL, 0, 0, 0, 3, 0}, + {CPU_FTR_NOEXECUTE, 0, 0, 0, 6, 0}, + {CPU_FTR_NODSISRALIGN, 0, 0, 1, 1, 1}, + {0, MMU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0}, {CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0}, }; @@ -166,9 +168,11 @@ static void __init scan_features(unsigned long node, unsigned char *ftrs, if (bit ^ fp->invert) { cur_cpu_spec->cpu_features |= fp->cpu_features; cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs; + cur_cpu_spec->mmu_features |= fp->mmu_features; } else { cur_cpu_spec->cpu_features &= ~fp->cpu_features; cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs; + cur_cpu_spec->mmu_features &= ~fp->mmu_features; } } } @@ -268,13 +272,13 @@ static int __init early_init_dt_scan_cpus(unsigned long node, const char *uname, int depth, void *data) { - static int logical_cpuid = 0; char *type = of_get_flat_dt_prop(node, "device_type", NULL); const u32 *prop; const u32 *intserv; int i, nthreads; unsigned long len; - int found = 0; + int found = -1; + int found_thread = 0; /* We are scanning "cpu" nodes only */ if (type == NULL || strcmp(type, "cpu") != 0) @@ -298,11 +302,10 @@ static int __init early_init_dt_scan_cpus(unsigned long node, * version 2 of the kexec param format adds the phys cpuid of * booted proc. */ - if (initial_boot_params && initial_boot_params->version >= 2) { - if (intserv[i] == - initial_boot_params->boot_cpuid_phys) { - found = 1; - break; + if (initial_boot_params->version >= 2) { + if (intserv[i] == initial_boot_params->boot_cpuid_phys) { + found = boot_cpu_count; + found_thread = i; } } else { /* @@ -311,23 +314,20 @@ static int __init early_init_dt_scan_cpus(unsigned long node, * off secondary threads. */ if (of_get_flat_dt_prop(node, - "linux,boot-cpu", NULL) != NULL) { - found = 1; - break; - } + "linux,boot-cpu", NULL) != NULL) + found = boot_cpu_count; } - #ifdef CONFIG_SMP /* logical cpu id is always 0 on UP kernels */ - logical_cpuid++; + boot_cpu_count++; #endif } - if (found) { - DBG("boot cpu: logical %d physical %d\n", logical_cpuid, - intserv[i]); - boot_cpuid = logical_cpuid; - set_hard_smp_processor_id(boot_cpuid, intserv[i]); + if (found >= 0) { + DBG("boot cpu: logical %d physical %d\n", found, + intserv[found_thread]); + boot_cpuid = found; + set_hard_smp_processor_id(found, intserv[found_thread]); /* * PAPR defines "logical" PVR values for cpus that @@ -509,11 +509,14 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size) size = 0x80000000ul - base; } #endif - - /* First MEMBLOCK added, do some special initializations */ - if (memstart_addr == ~(phys_addr_t)0) - setup_initial_memory_limit(base, size); - memstart_addr = min((u64)memstart_addr, base); + /* Keep track of the beginning of memory -and- the size of + * the very first block in the device-tree as it represents + * the RMA on ppc64 server + */ + if (base < memstart_addr) { + memstart_addr = base; + first_memblock_size = size; + } /* Add the chunk to the MEMBLOCK list */ memblock_add(base, size); @@ -698,6 +701,7 @@ void __init early_init_devtree(void *params) of_scan_flat_dt(early_init_dt_scan_root, NULL); of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL); + setup_initial_memory_limit(memstart_addr, first_memblock_size); /* Save command line for /proc/cmdline and then parse parameters */ strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE); diff --git a/trunk/arch/powerpc/kernel/prom_init.c b/trunk/arch/powerpc/kernel/prom_init.c index 941ff4dbc567..c016033ba78d 100644 --- a/trunk/arch/powerpc/kernel/prom_init.c +++ b/trunk/arch/powerpc/kernel/prom_init.c @@ -335,6 +335,7 @@ static void __init prom_printf(const char *format, ...) const char *p, *q, *s; va_list args; unsigned long v; + long vs; struct prom_t *_prom = &RELOC(prom); va_start(args, format); @@ -368,12 +369,35 @@ static void __init prom_printf(const char *format, ...) v = va_arg(args, unsigned long); prom_print_hex(v); break; + case 'd': + ++q; + vs = va_arg(args, int); + if (vs < 0) { + prom_print(RELOC("-")); + vs = -vs; + } + prom_print_dec(vs); + break; case 'l': ++q; - if (*q == 'u') { /* '%lu' */ + if (*q == 0) + break; + else if (*q == 'x') { + ++q; + v = va_arg(args, unsigned long); + prom_print_hex(v); + } else if (*q == 'u') { /* '%lu' */ ++q; v = va_arg(args, unsigned long); prom_print_dec(v); + } else if (*q == 'd') { /* %ld */ + ++q; + vs = va_arg(args, long); + if (vs < 0) { + prom_print(RELOC("-")); + vs = -vs; + } + prom_print_dec(vs); } break; } @@ -676,8 +700,10 @@ static void __init early_cmdline_parse(void) #endif /* CONFIG_PCI_MSI */ #ifdef CONFIG_PPC_SMLPAR #define OV5_CMO 0x80 /* Cooperative Memory Overcommitment */ +#define OV5_XCMO 0x40 /* Page Coalescing */ #else #define OV5_CMO 0x00 +#define OV5_XCMO 0x00 #endif #define OV5_TYPE1_AFFINITY 0x80 /* Type 1 NUMA affinity */ @@ -732,7 +758,7 @@ static unsigned char ibm_architecture_vec[] = { OV5_LPAR | OV5_SPLPAR | OV5_LARGE_PAGES | OV5_DRCONF_MEMORY | OV5_DONATE_DEDICATE_CPU | OV5_MSI, 0, - OV5_CMO, + OV5_CMO | OV5_XCMO, OV5_TYPE1_AFFINITY, 0, 0, diff --git a/trunk/arch/powerpc/kernel/ptrace.c b/trunk/arch/powerpc/kernel/ptrace.c index 55613e33e263..a6ae1cfad86c 100644 --- a/trunk/arch/powerpc/kernel/ptrace.c +++ b/trunk/arch/powerpc/kernel/ptrace.c @@ -933,12 +933,16 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, if (data && !(data & DABR_TRANSLATION)) return -EIO; #ifdef CONFIG_HAVE_HW_BREAKPOINT + if (ptrace_get_breakpoints(task) < 0) + return -ESRCH; + bp = thread->ptrace_bps[0]; if ((!data) || !(data & (DABR_DATA_WRITE | DABR_DATA_READ))) { if (bp) { unregister_hw_breakpoint(bp); thread->ptrace_bps[0] = NULL; } + ptrace_put_breakpoints(task); return 0; } if (bp) { @@ -948,9 +952,12 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, (DABR_DATA_WRITE | DABR_DATA_READ), &attr.bp_type); ret = modify_user_hw_breakpoint(bp, &attr); - if (ret) + if (ret) { + ptrace_put_breakpoints(task); return ret; + } thread->ptrace_bps[0] = bp; + ptrace_put_breakpoints(task); thread->dabr = data; return 0; } @@ -965,9 +972,12 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, ptrace_triggered, task); if (IS_ERR(bp)) { thread->ptrace_bps[0] = NULL; + ptrace_put_breakpoints(task); return PTR_ERR(bp); } + ptrace_put_breakpoints(task); + #endif /* CONFIG_HAVE_HW_BREAKPOINT */ /* Move contents to the DABR register */ diff --git a/trunk/arch/powerpc/kernel/rtas.c b/trunk/arch/powerpc/kernel/rtas.c index 2097f2b3cba8..271ff6318eda 100644 --- a/trunk/arch/powerpc/kernel/rtas.c +++ b/trunk/arch/powerpc/kernel/rtas.c @@ -42,6 +42,7 @@ #include #include #include +#include struct rtas_t rtas = { .lock = __ARCH_SPIN_LOCK_UNLOCKED @@ -494,7 +495,7 @@ unsigned int rtas_busy_delay(int status) might_sleep(); ms = rtas_busy_delay_time(status); - if (ms) + if (ms && need_resched()) msleep(ms); return ms; @@ -731,6 +732,7 @@ static int __rtas_suspend_last_cpu(struct rtas_suspend_me_data *data, int wake_w atomic_set(&data->error, rc); start_topology_update(); + pSeries_coalesce_init(); if (wake_when_done) { atomic_set(&data->done, 1); diff --git a/trunk/arch/powerpc/kernel/setup-common.c b/trunk/arch/powerpc/kernel/setup-common.c index 21f30cb68077..79fca2651b65 100644 --- a/trunk/arch/powerpc/kernel/setup-common.c +++ b/trunk/arch/powerpc/kernel/setup-common.c @@ -381,7 +381,7 @@ static void __init cpu_init_thread_core_maps(int tpc) int i; threads_per_core = tpc; - threads_core_mask = CPU_MASK_NONE; + cpumask_clear(&threads_core_mask); /* This implementation only supports power of 2 number of threads * for simplicity and performance @@ -390,7 +390,7 @@ static void __init cpu_init_thread_core_maps(int tpc) BUG_ON(tpc != (1 << threads_shift)); for (i = 0; i < tpc; i++) - cpu_set(i, threads_core_mask); + cpumask_set_cpu(i, &threads_core_mask); printk(KERN_INFO "CPU maps initialized for %d thread%s per core\n", tpc, tpc > 1 ? "s" : ""); @@ -404,7 +404,7 @@ static void __init cpu_init_thread_core_maps(int tpc) * cpu_present_mask * * Having the possible map set up early allows us to restrict allocations - * of things like irqstacks to num_possible_cpus() rather than NR_CPUS. + * of things like irqstacks to nr_cpu_ids rather than NR_CPUS. * * We do not initialize the online map here; cpus set their own bits in * cpu_online_mask as they come up. @@ -424,7 +424,7 @@ void __init smp_setup_cpu_maps(void) DBG("smp_setup_cpu_maps()\n"); - while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < NR_CPUS) { + while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < nr_cpu_ids) { const int *intserv; int j, len; @@ -443,7 +443,7 @@ void __init smp_setup_cpu_maps(void) intserv = &cpu; /* assume logical == phys */ } - for (j = 0; j < nthreads && cpu < NR_CPUS; j++) { + for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) { DBG(" thread %d -> cpu %d (hard id %d)\n", j, cpu, intserv[j]); set_cpu_present(cpu, true); @@ -483,12 +483,12 @@ void __init smp_setup_cpu_maps(void) if (cpu_has_feature(CPU_FTR_SMT)) maxcpus *= nthreads; - if (maxcpus > NR_CPUS) { + if (maxcpus > nr_cpu_ids) { printk(KERN_WARNING "Partition configured for %d cpus, " "operating system maximum is %d.\n", - maxcpus, NR_CPUS); - maxcpus = NR_CPUS; + maxcpus, nr_cpu_ids); + maxcpus = nr_cpu_ids; } else printk(KERN_INFO "Partition configured for %d cpus.\n", maxcpus); @@ -510,7 +510,7 @@ void __init smp_setup_cpu_maps(void) cpu_init_thread_core_maps(nthreads); /* Now that possible cpus are set, set nr_cpu_ids for later use */ - nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1; + setup_nr_cpu_ids(); free_unused_pacas(); } @@ -602,6 +602,10 @@ int check_legacy_ioport(unsigned long base_port) * name instead */ if (!np) np = of_find_node_by_name(NULL, "8042"); + if (np) { + of_i8042_kbd_irq = 1; + of_i8042_aux_irq = 12; + } break; case FDC_BASE: /* FDC1 */ np = of_find_node_by_type(NULL, "fdc"); diff --git a/trunk/arch/powerpc/kernel/setup_32.c b/trunk/arch/powerpc/kernel/setup_32.c index 1d2fbc905303..620d792b52e4 100644 --- a/trunk/arch/powerpc/kernel/setup_32.c +++ b/trunk/arch/powerpc/kernel/setup_32.c @@ -48,6 +48,7 @@ extern void bootx_init(unsigned long r4, unsigned long phys); int boot_cpuid = -1; EXPORT_SYMBOL_GPL(boot_cpuid); +int __initdata boot_cpu_count; int boot_cpuid_phys; int smp_hw_index[NR_CPUS]; diff --git a/trunk/arch/powerpc/kernel/setup_64.c b/trunk/arch/powerpc/kernel/setup_64.c index 5a0401fcaebd..a88bf2713d41 100644 --- a/trunk/arch/powerpc/kernel/setup_64.c +++ b/trunk/arch/powerpc/kernel/setup_64.c @@ -62,6 +62,7 @@ #include #include #include +#include #include "setup.h" @@ -72,6 +73,7 @@ #endif int boot_cpuid = 0; +int __initdata boot_cpu_count; u64 ppc64_pft_size; /* Pick defaults since we might want to patch instructions @@ -233,6 +235,7 @@ void early_setup_secondary(void) void smp_release_cpus(void) { unsigned long *ptr; + int i; DBG(" -> smp_release_cpus()\n"); @@ -245,7 +248,16 @@ void smp_release_cpus(void) ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop - PHYSICAL_START); *ptr = __pa(generic_secondary_smp_init); - mb(); + + /* And wait a bit for them to catch up */ + for (i = 0; i < 100000; i++) { + mb(); + HMT_low(); + if (boot_cpu_count == 0) + break; + udelay(1); + } + DBG("boot_cpu_count = %d\n", boot_cpu_count); DBG(" <- smp_release_cpus()\n"); } @@ -423,17 +435,30 @@ void __init setup_system(void) DBG(" <- setup_system()\n"); } -static u64 slb0_limit(void) +/* This returns the limit below which memory accesses to the linear + * mapping are guarnateed not to cause a TLB or SLB miss. This is + * used to allocate interrupt or emergency stacks for which our + * exception entry path doesn't deal with being interrupted. + */ +static u64 safe_stack_limit(void) { - if (cpu_has_feature(CPU_FTR_1T_SEGMENT)) { +#ifdef CONFIG_PPC_BOOK3E + /* Freescale BookE bolts the entire linear mapping */ + if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) + return linear_map_top; + /* Other BookE, we assume the first GB is bolted */ + return 1ul << 30; +#else + /* BookS, the first segment is bolted */ + if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) return 1UL << SID_SHIFT_1T; - } return 1UL << SID_SHIFT; +#endif } static void __init irqstack_early_init(void) { - u64 limit = slb0_limit(); + u64 limit = safe_stack_limit(); unsigned int i; /* @@ -453,6 +478,9 @@ static void __init irqstack_early_init(void) #ifdef CONFIG_PPC_BOOK3E static void __init exc_lvl_early_init(void) { + extern unsigned int interrupt_base_book3e; + extern unsigned int exc_debug_debug_book3e; + unsigned int i; for_each_possible_cpu(i) { @@ -463,6 +491,10 @@ static void __init exc_lvl_early_init(void) mcheckirq_ctx[i] = (struct thread_info *) __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); } + + if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) + patch_branch(&interrupt_base_book3e + (0x040 / 4) + 1, + (unsigned long)&exc_debug_debug_book3e, 0); } #else #define exc_lvl_early_init() @@ -486,7 +518,7 @@ static void __init emergency_stack_init(void) * bringup, we need to get at them in real mode. This means they * must also be within the RMO region. */ - limit = min(slb0_limit(), ppc64_rma_size); + limit = min(safe_stack_limit(), ppc64_rma_size); for_each_possible_cpu(i) { unsigned long sp; diff --git a/trunk/arch/powerpc/kernel/signal_64.c b/trunk/arch/powerpc/kernel/signal_64.c index 27c4a4584f80..da989fff19cc 100644 --- a/trunk/arch/powerpc/kernel/signal_64.c +++ b/trunk/arch/powerpc/kernel/signal_64.c @@ -381,7 +381,7 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5, regs, uc, &uc->uc_mcontext); #endif if (show_unhandled_signals && printk_ratelimit()) - printk(regs->msr & MSR_SF ? fmt64 : fmt32, + printk(regs->msr & MSR_64BIT ? fmt64 : fmt32, current->comm, current->pid, "rt_sigreturn", (long)uc, regs->nip, regs->link); @@ -469,7 +469,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info, regs, frame, newsp); #endif if (show_unhandled_signals && printk_ratelimit()) - printk(regs->msr & MSR_SF ? fmt64 : fmt32, + printk(regs->msr & MSR_64BIT ? fmt64 : fmt32, current->comm, current->pid, "setup_rt_frame", (long)frame, regs->nip, regs->link); diff --git a/trunk/arch/powerpc/kernel/smp.c b/trunk/arch/powerpc/kernel/smp.c index cbdbb14be4b0..4a6f2ec7e761 100644 --- a/trunk/arch/powerpc/kernel/smp.c +++ b/trunk/arch/powerpc/kernel/smp.c @@ -95,7 +95,7 @@ int smt_enabled_at_boot = 1; static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL; #ifdef CONFIG_PPC64 -void __devinit smp_generic_kick_cpu(int nr) +int __devinit smp_generic_kick_cpu(int nr) { BUG_ON(nr < 0 || nr >= NR_CPUS); @@ -106,37 +106,10 @@ void __devinit smp_generic_kick_cpu(int nr) */ paca[nr].cpu_start = 1; smp_mb(); -} -#endif -void smp_message_recv(int msg) -{ - switch(msg) { - case PPC_MSG_CALL_FUNCTION: - generic_smp_call_function_interrupt(); - break; - case PPC_MSG_RESCHEDULE: - /* we notice need_resched on exit */ - break; - case PPC_MSG_CALL_FUNC_SINGLE: - generic_smp_call_function_single_interrupt(); - break; - case PPC_MSG_DEBUGGER_BREAK: - if (crash_ipi_function_ptr) { - crash_ipi_function_ptr(get_irq_regs()); - break; - } -#ifdef CONFIG_DEBUGGER - debugger_ipi(get_irq_regs()); - break; -#endif /* CONFIG_DEBUGGER */ - /* FALLTHROUGH */ - default: - printk("SMP %d: smp_message_recv(): unknown msg %d\n", - smp_processor_id(), msg); - break; - } + return 0; } +#endif static irqreturn_t call_function_action(int irq, void *data) { @@ -146,7 +119,7 @@ static irqreturn_t call_function_action(int irq, void *data) static irqreturn_t reschedule_action(int irq, void *data) { - /* we just need the return path side effect of checking need_resched */ + scheduler_ipi(); return IRQ_HANDLED; } @@ -156,9 +129,17 @@ static irqreturn_t call_function_single_action(int irq, void *data) return IRQ_HANDLED; } -static irqreturn_t debug_ipi_action(int irq, void *data) +irqreturn_t debug_ipi_action(int irq, void *data) { - smp_message_recv(PPC_MSG_DEBUGGER_BREAK); + if (crash_ipi_function_ptr) { + crash_ipi_function_ptr(get_irq_regs()); + return IRQ_HANDLED; + } + +#ifdef CONFIG_DEBUGGER + debugger_ipi(get_irq_regs()); +#endif /* CONFIG_DEBUGGER */ + return IRQ_HANDLED; } @@ -197,6 +178,66 @@ int smp_request_message_ipi(int virq, int msg) return err; } +#ifdef CONFIG_PPC_SMP_MUXED_IPI +struct cpu_messages { + int messages; /* current messages */ + unsigned long data; /* data for cause ipi */ +}; +static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message); + +void smp_muxed_ipi_set_data(int cpu, unsigned long data) +{ + struct cpu_messages *info = &per_cpu(ipi_message, cpu); + + info->data = data; +} + +void smp_muxed_ipi_message_pass(int cpu, int msg) +{ + struct cpu_messages *info = &per_cpu(ipi_message, cpu); + char *message = (char *)&info->messages; + + message[msg] = 1; + mb(); + smp_ops->cause_ipi(cpu, info->data); +} + +void smp_muxed_ipi_resend(void) +{ + struct cpu_messages *info = &__get_cpu_var(ipi_message); + + if (info->messages) + smp_ops->cause_ipi(smp_processor_id(), info->data); +} + +irqreturn_t smp_ipi_demux(void) +{ + struct cpu_messages *info = &__get_cpu_var(ipi_message); + unsigned int all; + + mb(); /* order any irq clear */ + + do { + all = xchg_local(&info->messages, 0); + +#ifdef __BIG_ENDIAN + if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNCTION))) + generic_smp_call_function_interrupt(); + if (all & (1 << (24 - 8 * PPC_MSG_RESCHEDULE))) + scheduler_ipi(); + if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNC_SINGLE))) + generic_smp_call_function_single_interrupt(); + if (all & (1 << (24 - 8 * PPC_MSG_DEBUGGER_BREAK))) + debug_ipi_action(0, NULL); +#else +#error Unsupported ENDIAN +#endif + } while (info->messages); + + return IRQ_HANDLED; +} +#endif /* CONFIG_PPC_SMP_MUXED_IPI */ + void smp_send_reschedule(int cpu) { if (likely(smp_ops)) @@ -216,11 +257,18 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask) smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION); } -#ifdef CONFIG_DEBUGGER -void smp_send_debugger_break(int cpu) +#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) +void smp_send_debugger_break(void) { - if (likely(smp_ops)) - smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK); + int cpu; + int me = raw_smp_processor_id(); + + if (unlikely(!smp_ops)) + return; + + for_each_online_cpu(cpu) + if (cpu != me) + smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK); } #endif @@ -228,9 +276,9 @@ void smp_send_debugger_break(int cpu) void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)) { crash_ipi_function_ptr = crash_ipi_callback; - if (crash_ipi_callback && smp_ops) { + if (crash_ipi_callback) { mb(); - smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_DEBUGGER_BREAK); + smp_send_debugger_break(); } } #endif @@ -410,8 +458,6 @@ int __cpuinit __cpu_up(unsigned int cpu) { int rc, c; - secondary_ti = current_set[cpu]; - if (smp_ops == NULL || (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) return -EINVAL; @@ -421,6 +467,8 @@ int __cpuinit __cpu_up(unsigned int cpu) if (rc) return rc; + secondary_ti = current_set[cpu]; + /* Make sure callin-map entry is 0 (can be leftover a CPU * hotplug */ @@ -434,7 +482,11 @@ int __cpuinit __cpu_up(unsigned int cpu) /* wake up cpus */ DBG("smp: kicking cpu %d\n", cpu); - smp_ops->kick_cpu(cpu); + rc = smp_ops->kick_cpu(cpu); + if (rc) { + pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc); + return rc; + } /* * wait to see if the cpu made a callin (is actually up). @@ -507,7 +559,7 @@ int cpu_first_thread_of_core(int core) } EXPORT_SYMBOL_GPL(cpu_first_thread_of_core); -/* Must be called when no change can occur to cpu_present_map, +/* Must be called when no change can occur to cpu_present_mask, * i.e. during cpu online or offline. */ static struct device_node *cpu_to_l2cache(int cpu) @@ -608,7 +660,7 @@ void __init smp_cpus_done(unsigned int max_cpus) * se we pin us down to CPU 0 for a short while */ alloc_cpumask_var(&old_mask, GFP_NOWAIT); - cpumask_copy(old_mask, ¤t->cpus_allowed); + cpumask_copy(old_mask, tsk_cpus_allowed(current)); set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid)); if (smp_ops && smp_ops->setup_cpu) diff --git a/trunk/arch/powerpc/kernel/sysfs.c b/trunk/arch/powerpc/kernel/sysfs.c index c0d8c2006bf4..f0f2199e64e1 100644 --- a/trunk/arch/powerpc/kernel/sysfs.c +++ b/trunk/arch/powerpc/kernel/sysfs.c @@ -182,6 +182,41 @@ static SYSDEV_ATTR(mmcra, 0600, show_mmcra, store_mmcra); static SYSDEV_ATTR(spurr, 0600, show_spurr, NULL); static SYSDEV_ATTR(dscr, 0600, show_dscr, store_dscr); static SYSDEV_ATTR(purr, 0600, show_purr, store_purr); + +unsigned long dscr_default = 0; +EXPORT_SYMBOL(dscr_default); + +static ssize_t show_dscr_default(struct sysdev_class *class, + struct sysdev_class_attribute *attr, char *buf) +{ + return sprintf(buf, "%lx\n", dscr_default); +} + +static ssize_t __used store_dscr_default(struct sysdev_class *class, + struct sysdev_class_attribute *attr, const char *buf, + size_t count) +{ + unsigned long val; + int ret = 0; + + ret = sscanf(buf, "%lx", &val); + if (ret != 1) + return -EINVAL; + dscr_default = val; + + return count; +} + +static SYSDEV_CLASS_ATTR(dscr_default, 0600, + show_dscr_default, store_dscr_default); + +static void sysfs_create_dscr_default(void) +{ + int err = 0; + if (cpu_has_feature(CPU_FTR_DSCR)) + err = sysfs_create_file(&cpu_sysdev_class.kset.kobj, + &attr_dscr_default.attr); +} #endif /* CONFIG_PPC64 */ #ifdef HAS_PPC_PMC_PA6T @@ -617,6 +652,9 @@ static int __init topology_init(void) if (cpu_online(cpu)) register_cpu_online(cpu); } +#ifdef CONFIG_PPC64 + sysfs_create_dscr_default(); +#endif /* CONFIG_PPC64 */ return 0; } diff --git a/trunk/arch/powerpc/kernel/time.c b/trunk/arch/powerpc/kernel/time.c index 375480c56eb9..f33acfd872ad 100644 --- a/trunk/arch/powerpc/kernel/time.c +++ b/trunk/arch/powerpc/kernel/time.c @@ -229,6 +229,9 @@ static u64 scan_dispatch_log(u64 stop_tb) u64 stolen = 0; u64 dtb; + if (!dtl) + return 0; + if (i == vpa->dtl_idx) return 0; while (i < vpa->dtl_idx) { diff --git a/trunk/arch/powerpc/kernel/traps.c b/trunk/arch/powerpc/kernel/traps.c index 5ddb801bc154..b13306b0d925 100644 --- a/trunk/arch/powerpc/kernel/traps.c +++ b/trunk/arch/powerpc/kernel/traps.c @@ -143,7 +143,6 @@ int die(const char *str, struct pt_regs *regs, long err) #endif printk("%s\n", ppc_md.name ? ppc_md.name : ""); - sysfs_printk_last_file(); if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP) return 1; @@ -199,7 +198,7 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) } else if (show_unhandled_signals && unhandled_signal(current, signr) && printk_ratelimit()) { - printk(regs->msr & MSR_SF ? fmt64 : fmt32, + printk(regs->msr & MSR_64BIT ? fmt64 : fmt32, current->comm, current->pid, signr, addr, regs->nip, regs->link, code); } @@ -221,7 +220,7 @@ void system_reset_exception(struct pt_regs *regs) } #ifdef CONFIG_KEXEC - cpu_set(smp_processor_id(), cpus_in_sr); + cpumask_set_cpu(smp_processor_id(), &cpus_in_sr); #endif die("System Reset", regs, SIGABRT); @@ -909,6 +908,26 @@ static int emulate_instruction(struct pt_regs *regs) return emulate_isel(regs, instword); } +#ifdef CONFIG_PPC64 + /* Emulate the mfspr rD, DSCR. */ + if (((instword & PPC_INST_MFSPR_DSCR_MASK) == PPC_INST_MFSPR_DSCR) && + cpu_has_feature(CPU_FTR_DSCR)) { + PPC_WARN_EMULATED(mfdscr, regs); + rd = (instword >> 21) & 0x1f; + regs->gpr[rd] = mfspr(SPRN_DSCR); + return 0; + } + /* Emulate the mtspr DSCR, rD. */ + if (((instword & PPC_INST_MTSPR_DSCR_MASK) == PPC_INST_MTSPR_DSCR) && + cpu_has_feature(CPU_FTR_DSCR)) { + PPC_WARN_EMULATED(mtdscr, regs); + rd = (instword >> 21) & 0x1f; + mtspr(SPRN_DSCR, regs->gpr[rd]); + current->thread.dscr_inherit = 1; + return 0; + } +#endif + return -EINVAL; } @@ -1506,6 +1525,10 @@ struct ppc_emulated ppc_emulated = { #ifdef CONFIG_VSX WARN_EMULATED_SETUP(vsx), #endif +#ifdef CONFIG_PPC64 + WARN_EMULATED_SETUP(mfdscr), + WARN_EMULATED_SETUP(mtdscr), +#endif }; u32 ppc_warn_emulated; diff --git a/trunk/arch/powerpc/kernel/udbg.c b/trunk/arch/powerpc/kernel/udbg.c index e39cad83c884..23d65abbedce 100644 --- a/trunk/arch/powerpc/kernel/udbg.c +++ b/trunk/arch/powerpc/kernel/udbg.c @@ -62,6 +62,8 @@ void __init udbg_early_init(void) udbg_init_cpm(); #elif defined(CONFIG_PPC_EARLY_DEBUG_USBGECKO) udbg_init_usbgecko(); +#elif defined(CONFIG_PPC_EARLY_DEBUG_WSP) + udbg_init_wsp(); #endif #ifdef CONFIG_PPC_EARLY_DEBUG diff --git a/trunk/arch/powerpc/kernel/udbg_16550.c b/trunk/arch/powerpc/kernel/udbg_16550.c index baa33a7517bc..6837f839ab78 100644 --- a/trunk/arch/powerpc/kernel/udbg_16550.c +++ b/trunk/arch/powerpc/kernel/udbg_16550.c @@ -11,6 +11,7 @@ #include #include #include +#include extern u8 real_readb(volatile u8 __iomem *addr); extern void real_writeb(u8 data, volatile u8 __iomem *addr); @@ -298,3 +299,53 @@ void __init udbg_init_40x_realmode(void) udbg_getc_poll = NULL; } #endif /* CONFIG_PPC_EARLY_DEBUG_40x */ + +#ifdef CONFIG_PPC_EARLY_DEBUG_WSP +static void udbg_wsp_flush(void) +{ + if (udbg_comport) { + while ((readb(&udbg_comport->lsr) & LSR_THRE) == 0) + /* wait for idle */; + } +} + +static void udbg_wsp_putc(char c) +{ + if (udbg_comport) { + if (c == '\n') + udbg_wsp_putc('\r'); + udbg_wsp_flush(); + writeb(c, &udbg_comport->thr); eieio(); + } +} + +static int udbg_wsp_getc(void) +{ + if (udbg_comport) { + while ((readb(&udbg_comport->lsr) & LSR_DR) == 0) + ; /* wait for char */ + return readb(&udbg_comport->rbr); + } + return -1; +} + +static int udbg_wsp_getc_poll(void) +{ + if (udbg_comport) + if (readb(&udbg_comport->lsr) & LSR_DR) + return readb(&udbg_comport->rbr); + return -1; +} + +void __init udbg_init_wsp(void) +{ + udbg_comport = (struct NS16550 __iomem *)WSP_UART_VIRT; + + udbg_init_uart(udbg_comport, 57600, 50000000); + + udbg_putc = udbg_wsp_putc; + udbg_flush = udbg_wsp_flush; + udbg_getc = udbg_wsp_getc; + udbg_getc_poll = udbg_wsp_getc_poll; +} +#endif /* CONFIG_PPC_EARLY_DEBUG_WSP */ diff --git a/trunk/arch/powerpc/kernel/vector.S b/trunk/arch/powerpc/kernel/vector.S index 9de6f396cf85..4d5a3edff49e 100644 --- a/trunk/arch/powerpc/kernel/vector.S +++ b/trunk/arch/powerpc/kernel/vector.S @@ -102,7 +102,7 @@ _GLOBAL(giveup_altivec) MTMSRD(r5) /* enable use of VMX now */ isync PPC_LCMPI 0,r3,0 - beqlr- /* if no previous owner, done */ + beqlr /* if no previous owner, done */ addi r3,r3,THREAD /* want THREAD of task */ PPC_LL r5,PT_REGS(r3) PPC_LCMPI 0,r5,0 diff --git a/trunk/arch/powerpc/kvm/book3s.c b/trunk/arch/powerpc/kvm/book3s.c index c961de40c676..0f95b5cce033 100644 --- a/trunk/arch/powerpc/kvm/book3s.c +++ b/trunk/arch/powerpc/kvm/book3s.c @@ -236,7 +236,7 @@ void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu) int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu) { - return test_bit(BOOK3S_INTERRUPT_DECREMENTER >> 7, &vcpu->arch.pending_exceptions); + return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions); } void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu) diff --git a/trunk/arch/powerpc/kvm/book3s_rmhandlers.S b/trunk/arch/powerpc/kvm/book3s_rmhandlers.S index 2b9c9088d00e..1a1b34487e71 100644 --- a/trunk/arch/powerpc/kvm/book3s_rmhandlers.S +++ b/trunk/arch/powerpc/kvm/book3s_rmhandlers.S @@ -35,9 +35,7 @@ #if defined(CONFIG_PPC_BOOK3S_64) -#define LOAD_SHADOW_VCPU(reg) \ - mfspr reg, SPRN_SPRG_PACA - +#define LOAD_SHADOW_VCPU(reg) GET_PACA(reg) #define SHADOW_VCPU_OFF PACA_KVM_SVCPU #define MSR_NOIRQ MSR_KERNEL & ~(MSR_IR | MSR_DR) #define FUNC(name) GLUE(.,name) @@ -72,7 +70,7 @@ .global kvmppc_trampoline_\intno kvmppc_trampoline_\intno: - mtspr SPRN_SPRG_SCRATCH0, r13 /* Save r13 */ + SET_SCRATCH0(r13) /* Save r13 */ /* * First thing to do is to find out if we're coming @@ -91,7 +89,7 @@ kvmppc_trampoline_\intno: lwz r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13) mtcr r12 PPC_LL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13) - mfspr r13, SPRN_SPRG_SCRATCH0 /* r13 = original r13 */ + GET_SCRATCH0(r13) /* r13 = original r13 */ b kvmppc_resume_\intno /* Get back original handler */ /* Now we know we're handling a KVM guest */ @@ -114,6 +112,9 @@ INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_MACHINE_CHECK INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DATA_STORAGE INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_INST_STORAGE INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_EXTERNAL +#ifdef CONFIG_PPC_BOOK3S_64 +INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_EXTERNAL_HV +#endif INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALIGNMENT INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PROGRAM INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_FP_UNAVAIL @@ -158,7 +159,7 @@ kvmppc_handler_skip_ins: lwz r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13) mtcr r12 PPC_LL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13) - mfspr r13, SPRN_SPRG_SCRATCH0 + GET_SCRATCH0(r13) /* And get back into the code */ RFI diff --git a/trunk/arch/powerpc/kvm/book3s_segment.S b/trunk/arch/powerpc/kvm/book3s_segment.S index 7c52ed0b7051..451264274b8c 100644 --- a/trunk/arch/powerpc/kvm/book3s_segment.S +++ b/trunk/arch/powerpc/kvm/book3s_segment.S @@ -155,14 +155,20 @@ kvmppc_handler_trampoline_exit: PPC_LL r2, (SHADOW_VCPU_OFF + SVCPU_HOST_R2)(r13) /* Save guest PC and MSR */ - mfsrr0 r3 + andi. r0,r12,0x2 + beq 1f + mfspr r3,SPRN_HSRR0 + mfspr r4,SPRN_HSRR1 + andi. r12,r12,0x3ffd + b 2f +1: mfsrr0 r3 mfsrr1 r4 - +2: PPC_STL r3, (SHADOW_VCPU_OFF + SVCPU_PC)(r13) PPC_STL r4, (SHADOW_VCPU_OFF + SVCPU_SHADOW_SRR1)(r13) /* Get scratch'ed off registers */ - mfspr r9, SPRN_SPRG_SCRATCH0 + GET_SCRATCH0(r9) PPC_LL r8, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13) lwz r7, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13) diff --git a/trunk/arch/powerpc/lib/alloc.c b/trunk/arch/powerpc/lib/alloc.c index f53e09c7dac7..13b676c20d12 100644 --- a/trunk/arch/powerpc/lib/alloc.c +++ b/trunk/arch/powerpc/lib/alloc.c @@ -6,14 +6,6 @@ #include -void * __init_refok alloc_maybe_bootmem(size_t size, gfp_t mask) -{ - if (mem_init_done) - return kmalloc(size, mask); - else - return alloc_bootmem(size); -} - void * __init_refok zalloc_maybe_bootmem(size_t size, gfp_t mask) { void *p; diff --git a/trunk/arch/powerpc/lib/copypage_64.S b/trunk/arch/powerpc/lib/copypage_64.S index 4d4eeb900486..53dcb6b1b708 100644 --- a/trunk/arch/powerpc/lib/copypage_64.S +++ b/trunk/arch/powerpc/lib/copypage_64.S @@ -6,6 +6,7 @@ * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ +#include #include #include #include @@ -15,9 +16,9 @@ PPC64_CACHES: .tc ppc64_caches[TC],ppc64_caches .section ".text" - -_GLOBAL(copy_4K_page) - li r5,4096 /* 4K page size */ +_GLOBAL(copy_page) + lis r5,PAGE_SIZE@h + ori r5,r5,PAGE_SIZE@l BEGIN_FTR_SECTION ld r10,PPC64_CACHES@toc(r2) lwz r11,DCACHEL1LOGLINESIZE(r10) /* log2 of cache line size */ diff --git a/trunk/arch/powerpc/lib/devres.c b/trunk/arch/powerpc/lib/devres.c index deac4d30daf4..e91615abae66 100644 --- a/trunk/arch/powerpc/lib/devres.c +++ b/trunk/arch/powerpc/lib/devres.c @@ -9,11 +9,11 @@ #include /* devres_*(), devm_ioremap_release() */ #include -#include /* ioremap_flags() */ +#include /* ioremap_prot() */ #include /* EXPORT_SYMBOL() */ /** - * devm_ioremap_prot - Managed ioremap_flags() + * devm_ioremap_prot - Managed ioremap_prot() * @dev: Generic device to remap IO address for * @offset: BUS offset to map * @size: Size of map @@ -31,7 +31,7 @@ void __iomem *devm_ioremap_prot(struct device *dev, resource_size_t offset, if (!ptr) return NULL; - addr = ioremap_flags(offset, size, flags); + addr = ioremap_prot(offset, size, flags); if (addr) { *ptr = addr; devres_add(dev, ptr); diff --git a/trunk/arch/powerpc/lib/sstep.c b/trunk/arch/powerpc/lib/sstep.c index ae5189ab0049..9a52349874ee 100644 --- a/trunk/arch/powerpc/lib/sstep.c +++ b/trunk/arch/powerpc/lib/sstep.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -44,6 +45,18 @@ extern int do_lxvd2x(int rn, unsigned long ea); extern int do_stxvd2x(int rn, unsigned long ea); #endif +/* + * Emulate the truncation of 64 bit values in 32-bit mode. + */ +static unsigned long truncate_if_32bit(unsigned long msr, unsigned long val) +{ +#ifdef __powerpc64__ + if ((msr & MSR_64BIT) == 0) + val &= 0xffffffffUL; +#endif + return val; +} + /* * Determine whether a conditional branch instruction would branch. */ @@ -90,11 +103,8 @@ static unsigned long __kprobes dform_ea(unsigned int instr, struct pt_regs *regs if (instr & 0x04000000) /* update forms */ regs->gpr[ra] = ea; } -#ifdef __powerpc64__ - if (!(regs->msr & MSR_SF)) - ea &= 0xffffffffUL; -#endif - return ea; + + return truncate_if_32bit(regs->msr, ea); } #ifdef __powerpc64__ @@ -113,9 +123,8 @@ static unsigned long __kprobes dsform_ea(unsigned int instr, struct pt_regs *reg if ((instr & 3) == 1) /* update forms */ regs->gpr[ra] = ea; } - if (!(regs->msr & MSR_SF)) - ea &= 0xffffffffUL; - return ea; + + return truncate_if_32bit(regs->msr, ea); } #endif /* __powerpc64 */ @@ -136,11 +145,8 @@ static unsigned long __kprobes xform_ea(unsigned int instr, struct pt_regs *regs if (do_update) /* update forms */ regs->gpr[ra] = ea; } -#ifdef __powerpc64__ - if (!(regs->msr & MSR_SF)) - ea &= 0xffffffffUL; -#endif - return ea; + + return truncate_if_32bit(regs->msr, ea); } /* @@ -466,7 +472,7 @@ static void __kprobes set_cr0(struct pt_regs *regs, int rd) regs->ccr = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000); #ifdef __powerpc64__ - if (!(regs->msr & MSR_SF)) + if (!(regs->msr & MSR_64BIT)) val = (int) val; #endif if (val < 0) @@ -487,7 +493,7 @@ static void __kprobes add_with_carry(struct pt_regs *regs, int rd, ++val; regs->gpr[rd] = val; #ifdef __powerpc64__ - if (!(regs->msr & MSR_SF)) { + if (!(regs->msr & MSR_64BIT)) { val = (unsigned int) val; val1 = (unsigned int) val1; } @@ -570,8 +576,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) if ((instr & 2) == 0) imm += regs->nip; regs->nip += 4; - if ((regs->msr & MSR_SF) == 0) - regs->nip &= 0xffffffffUL; + regs->nip = truncate_if_32bit(regs->msr, regs->nip); if (instr & 1) regs->link = regs->nip; if (branch_taken(instr, regs)) @@ -604,13 +609,9 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) imm -= 0x04000000; if ((instr & 2) == 0) imm += regs->nip; - if (instr & 1) { - regs->link = regs->nip + 4; - if ((regs->msr & MSR_SF) == 0) - regs->link &= 0xffffffffUL; - } - if ((regs->msr & MSR_SF) == 0) - imm &= 0xffffffffUL; + if (instr & 1) + regs->link = truncate_if_32bit(regs->msr, regs->nip + 4); + imm = truncate_if_32bit(regs->msr, imm); regs->nip = imm; return 1; case 19: @@ -618,11 +619,8 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) case 16: /* bclr */ case 528: /* bcctr */ imm = (instr & 0x400)? regs->ctr: regs->link; - regs->nip += 4; - if ((regs->msr & MSR_SF) == 0) { - regs->nip &= 0xffffffffUL; - imm &= 0xffffffffUL; - } + regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4); + imm = truncate_if_32bit(regs->msr, imm); if (instr & 1) regs->link = regs->nip; if (branch_taken(instr, regs)) @@ -1616,11 +1614,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) return 0; /* invoke DSI if -EFAULT? */ } instr_done: - regs->nip += 4; -#ifdef __powerpc64__ - if ((regs->msr & MSR_SF) == 0) - regs->nip &= 0xffffffffUL; -#endif + regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4); return 1; logical_done: diff --git a/trunk/arch/powerpc/mm/hash_low_64.S b/trunk/arch/powerpc/mm/hash_low_64.S index 5b7dd4ea02b5..a242b5d7cbe4 100644 --- a/trunk/arch/powerpc/mm/hash_low_64.S +++ b/trunk/arch/powerpc/mm/hash_low_64.S @@ -118,7 +118,7 @@ _GLOBAL(__hash_page_4K) BEGIN_FTR_SECTION cmpdi r9,0 /* check segment size */ bne 3f -END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) +END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) /* Calc va and put it in r29 */ rldicr r29,r5,28,63-28 rldicl r3,r3,0,36 @@ -401,7 +401,7 @@ _GLOBAL(__hash_page_4K) BEGIN_FTR_SECTION cmpdi r9,0 /* check segment size */ bne 3f -END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) +END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) /* Calc va and put it in r29 */ rldicr r29,r5,28,63-28 /* r29 = (vsid << 28) */ rldicl r3,r3,0,36 /* r3 = (ea & 0x0fffffff) */ @@ -715,7 +715,7 @@ BEGIN_FTR_SECTION andi. r0,r31,_PAGE_NO_CACHE /* If so, bail out and refault as a 4k page */ bne- ht64_bail_ok -END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE) +END_MMU_FTR_SECTION_IFCLR(MMU_FTR_CI_LARGE_PAGE) /* Prepare new PTE value (turn access RW into DIRTY, then * add BUSY and ACCESSED) */ @@ -736,7 +736,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE) BEGIN_FTR_SECTION cmpdi r9,0 /* check segment size */ bne 3f -END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) +END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) /* Calc va and put it in r29 */ rldicr r29,r5,28,63-28 rldicl r3,r3,0,36 diff --git a/trunk/arch/powerpc/mm/hash_native_64.c b/trunk/arch/powerpc/mm/hash_native_64.c index 784a400e0781..dfd764896db0 100644 --- a/trunk/arch/powerpc/mm/hash_native_64.c +++ b/trunk/arch/powerpc/mm/hash_native_64.c @@ -50,9 +50,8 @@ static inline void __tlbie(unsigned long va, int psize, int ssize) case MMU_PAGE_4K: va &= ~0xffful; va |= ssize << 8; - asm volatile(ASM_MMU_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), - %2) - : : "r" (va), "r"(0), "i" (MMU_FTR_TLBIE_206) + asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2) + : : "r" (va), "r"(0), "i" (CPU_FTR_HVMODE_206) : "memory"); break; default: @@ -61,9 +60,8 @@ static inline void __tlbie(unsigned long va, int psize, int ssize) va |= penc << 12; va |= ssize << 8; va |= 1; /* L */ - asm volatile(ASM_MMU_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), - %2) - : : "r" (va), "r"(0), "i" (MMU_FTR_TLBIE_206) + asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2) + : : "r" (va), "r"(0), "i" (CPU_FTR_HVMODE_206) : "memory"); break; } @@ -98,8 +96,8 @@ static inline void __tlbiel(unsigned long va, int psize, int ssize) static inline void tlbie(unsigned long va, int psize, int ssize, int local) { - unsigned int use_local = local && cpu_has_feature(CPU_FTR_TLBIEL); - int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE); + unsigned int use_local = local && mmu_has_feature(MMU_FTR_TLBIEL); + int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); if (use_local) use_local = mmu_psize_defs[psize].tlbiel; @@ -503,7 +501,7 @@ static void native_flush_hash_range(unsigned long number, int local) } pte_iterate_hashed_end(); } - if (cpu_has_feature(CPU_FTR_TLBIEL) && + if (mmu_has_feature(MMU_FTR_TLBIEL) && mmu_psize_defs[psize].tlbiel && local) { asm volatile("ptesync":::"memory"); for (i = 0; i < number; i++) { @@ -517,7 +515,7 @@ static void native_flush_hash_range(unsigned long number, int local) } asm volatile("ptesync":::"memory"); } else { - int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE); + int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); if (lock_tlbie) raw_spin_lock(&native_tlbie_lock); diff --git a/trunk/arch/powerpc/mm/hash_utils_64.c b/trunk/arch/powerpc/mm/hash_utils_64.c index 58a022d0f463..26b2872b3d00 100644 --- a/trunk/arch/powerpc/mm/hash_utils_64.c +++ b/trunk/arch/powerpc/mm/hash_utils_64.c @@ -53,6 +53,7 @@ #include #include #include +#include #ifdef DEBUG #define DBG(fmt...) udbg_printf(fmt) @@ -258,11 +259,11 @@ static int __init htab_dt_scan_seg_sizes(unsigned long node, for (; size >= 4; size -= 4, ++prop) { if (prop[0] == 40) { DBG("1T segment support detected\n"); - cur_cpu_spec->cpu_features |= CPU_FTR_1T_SEGMENT; + cur_cpu_spec->mmu_features |= MMU_FTR_1T_SEGMENT; return 1; } } - cur_cpu_spec->cpu_features &= ~CPU_FTR_NO_SLBIE_B; + cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B; return 0; } @@ -288,7 +289,7 @@ static int __init htab_dt_scan_page_sizes(unsigned long node, if (prop != NULL) { DBG("Page sizes from device-tree:\n"); size /= 4; - cur_cpu_spec->cpu_features &= ~(CPU_FTR_16M_PAGE); + cur_cpu_spec->mmu_features &= ~(MMU_FTR_16M_PAGE); while(size > 0) { unsigned int shift = prop[0]; unsigned int slbenc = prop[1]; @@ -316,7 +317,7 @@ static int __init htab_dt_scan_page_sizes(unsigned long node, break; case 0x18: idx = MMU_PAGE_16M; - cur_cpu_spec->cpu_features |= CPU_FTR_16M_PAGE; + cur_cpu_spec->mmu_features |= MMU_FTR_16M_PAGE; break; case 0x22: idx = MMU_PAGE_16G; @@ -411,7 +412,7 @@ static void __init htab_init_page_sizes(void) * Not in the device-tree, let's fallback on known size * list for 16M capable GP & GR */ - if (cpu_has_feature(CPU_FTR_16M_PAGE)) + if (mmu_has_feature(MMU_FTR_16M_PAGE)) memcpy(mmu_psize_defs, mmu_psize_defaults_gp, sizeof(mmu_psize_defaults_gp)); found: @@ -441,7 +442,7 @@ static void __init htab_init_page_sizes(void) mmu_vmalloc_psize = MMU_PAGE_64K; if (mmu_linear_psize == MMU_PAGE_4K) mmu_linear_psize = MMU_PAGE_64K; - if (cpu_has_feature(CPU_FTR_CI_LARGE_PAGE)) { + if (mmu_has_feature(MMU_FTR_CI_LARGE_PAGE)) { /* * Don't use 64k pages for ioremap on pSeries, since * that would stop us accessing the HEA ethernet. @@ -547,15 +548,7 @@ int remove_section_mapping(unsigned long start, unsigned long end) } #endif /* CONFIG_MEMORY_HOTPLUG */ -static inline void make_bl(unsigned int *insn_addr, void *func) -{ - unsigned long funcp = *((unsigned long *)func); - int offset = funcp - (unsigned long)insn_addr; - - *insn_addr = (unsigned int)(0x48000001 | (offset & 0x03fffffc)); - flush_icache_range((unsigned long)insn_addr, 4+ - (unsigned long)insn_addr); -} +#define FUNCTION_TEXT(A) ((*(unsigned long *)(A))) static void __init htab_finish_init(void) { @@ -570,16 +563,33 @@ static void __init htab_finish_init(void) extern unsigned int *ht64_call_hpte_remove; extern unsigned int *ht64_call_hpte_updatepp; - make_bl(ht64_call_hpte_insert1, ppc_md.hpte_insert); - make_bl(ht64_call_hpte_insert2, ppc_md.hpte_insert); - make_bl(ht64_call_hpte_remove, ppc_md.hpte_remove); - make_bl(ht64_call_hpte_updatepp, ppc_md.hpte_updatepp); + patch_branch(ht64_call_hpte_insert1, + FUNCTION_TEXT(ppc_md.hpte_insert), + BRANCH_SET_LINK); + patch_branch(ht64_call_hpte_insert2, + FUNCTION_TEXT(ppc_md.hpte_insert), + BRANCH_SET_LINK); + patch_branch(ht64_call_hpte_remove, + FUNCTION_TEXT(ppc_md.hpte_remove), + BRANCH_SET_LINK); + patch_branch(ht64_call_hpte_updatepp, + FUNCTION_TEXT(ppc_md.hpte_updatepp), + BRANCH_SET_LINK); + #endif /* CONFIG_PPC_HAS_HASH_64K */ - make_bl(htab_call_hpte_insert1, ppc_md.hpte_insert); - make_bl(htab_call_hpte_insert2, ppc_md.hpte_insert); - make_bl(htab_call_hpte_remove, ppc_md.hpte_remove); - make_bl(htab_call_hpte_updatepp, ppc_md.hpte_updatepp); + patch_branch(htab_call_hpte_insert1, + FUNCTION_TEXT(ppc_md.hpte_insert), + BRANCH_SET_LINK); + patch_branch(htab_call_hpte_insert2, + FUNCTION_TEXT(ppc_md.hpte_insert), + BRANCH_SET_LINK); + patch_branch(htab_call_hpte_remove, + FUNCTION_TEXT(ppc_md.hpte_remove), + BRANCH_SET_LINK); + patch_branch(htab_call_hpte_updatepp, + FUNCTION_TEXT(ppc_md.hpte_updatepp), + BRANCH_SET_LINK); } static void __init htab_initialize(void) @@ -598,7 +608,7 @@ static void __init htab_initialize(void) /* Initialize page sizes */ htab_init_page_sizes(); - if (cpu_has_feature(CPU_FTR_1T_SEGMENT)) { + if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) { mmu_kernel_ssize = MMU_SEGSIZE_1T; mmu_highuser_ssize = MMU_SEGSIZE_1T; printk(KERN_INFO "Using 1TB segments\n"); @@ -739,7 +749,7 @@ void __init early_init_mmu(void) /* Initialize stab / SLB management except on iSeries */ - if (cpu_has_feature(CPU_FTR_SLB)) + if (mmu_has_feature(MMU_FTR_SLB)) slb_initialize(); else if (!firmware_has_feature(FW_FEATURE_ISERIES)) stab_initialize(get_paca()->stab_real); @@ -756,7 +766,7 @@ void __cpuinit early_init_mmu_secondary(void) * in real mode on pSeries and we want a virtual address on * iSeries anyway */ - if (cpu_has_feature(CPU_FTR_SLB)) + if (mmu_has_feature(MMU_FTR_SLB)) slb_initialize(); else stab_initialize(get_paca()->stab_addr); diff --git a/trunk/arch/powerpc/mm/hugetlbpage.c b/trunk/arch/powerpc/mm/hugetlbpage.c index 9bb249c3046e..0b9a5c1901b9 100644 --- a/trunk/arch/powerpc/mm/hugetlbpage.c +++ b/trunk/arch/powerpc/mm/hugetlbpage.c @@ -529,7 +529,7 @@ static int __init hugetlbpage_init(void) { int psize; - if (!cpu_has_feature(CPU_FTR_16M_PAGE)) + if (!mmu_has_feature(MMU_FTR_16M_PAGE)) return -ENODEV; for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { diff --git a/trunk/arch/powerpc/mm/mmu_context_hash64.c b/trunk/arch/powerpc/mm/mmu_context_hash64.c index 2535828aa84b..3bafc3deca6d 100644 --- a/trunk/arch/powerpc/mm/mmu_context_hash64.c +++ b/trunk/arch/powerpc/mm/mmu_context_hash64.c @@ -20,9 +20,205 @@ #include #include #include +#include #include +#ifdef CONFIG_PPC_ICSWX +/* + * The processor and its L2 cache cause the icswx instruction to + * generate a COP_REQ transaction on PowerBus. The transaction has + * no address, and the processor does not perform an MMU access + * to authenticate the transaction. The command portion of the + * PowerBus COP_REQ transaction includes the LPAR_ID (LPID) and + * the coprocessor Process ID (PID), which the coprocessor compares + * to the authorized LPID and PID held in the coprocessor, to determine + * if the process is authorized to generate the transaction. + * The data of the COP_REQ transaction is 128-byte or less and is + * placed in cacheable memory on a 128-byte cache line boundary. + * + * The task to use a coprocessor should use use_cop() to allocate + * a coprocessor PID before executing icswx instruction. use_cop() + * also enables the coprocessor context switching. Drop_cop() is + * used to free the coprocessor PID. + * + * Example: + * Host Fabric Interface (HFI) is a PowerPC network coprocessor. + * Each HFI have multiple windows. Each HFI window serves as a + * network device sending to and receiving from HFI network. + * HFI immediate send function uses icswx instruction. The immediate + * send function allows small (single cache-line) packets be sent + * without using the regular HFI send FIFO and doorbell, which are + * much slower than immediate send. + * + * For each task intending to use HFI immediate send, the HFI driver + * calls use_cop() to obtain a coprocessor PID for the task. + * The HFI driver then allocate a free HFI window and save the + * coprocessor PID to the HFI window to allow the task to use the + * HFI window. + * + * The HFI driver repeatedly creates immediate send packets and + * issues icswx instruction to send data through the HFI window. + * The HFI compares the coprocessor PID in the CPU PID register + * to the PID held in the HFI window to determine if the transaction + * is allowed. + * + * When the task to release the HFI window, the HFI driver calls + * drop_cop() to release the coprocessor PID. + */ + +#define COP_PID_NONE 0 +#define COP_PID_MIN (COP_PID_NONE + 1) +#define COP_PID_MAX (0xFFFF) + +static DEFINE_SPINLOCK(mmu_context_acop_lock); +static DEFINE_IDA(cop_ida); + +void switch_cop(struct mm_struct *next) +{ + mtspr(SPRN_PID, next->context.cop_pid); + mtspr(SPRN_ACOP, next->context.acop); +} + +static int new_cop_pid(struct ida *ida, int min_id, int max_id, + spinlock_t *lock) +{ + int index; + int err; + +again: + if (!ida_pre_get(ida, GFP_KERNEL)) + return -ENOMEM; + + spin_lock(lock); + err = ida_get_new_above(ida, min_id, &index); + spin_unlock(lock); + + if (err == -EAGAIN) + goto again; + else if (err) + return err; + + if (index > max_id) { + spin_lock(lock); + ida_remove(ida, index); + spin_unlock(lock); + return -ENOMEM; + } + + return index; +} + +static void sync_cop(void *arg) +{ + struct mm_struct *mm = arg; + + if (mm == current->active_mm) + switch_cop(current->active_mm); +} + +/** + * Start using a coprocessor. + * @acop: mask of coprocessor to be used. + * @mm: The mm the coprocessor to associate with. Most likely current mm. + * + * Return a positive PID if successful. Negative errno otherwise. + * The returned PID will be fed to the coprocessor to determine if an + * icswx transaction is authenticated. + */ +int use_cop(unsigned long acop, struct mm_struct *mm) +{ + int ret; + + if (!cpu_has_feature(CPU_FTR_ICSWX)) + return -ENODEV; + + if (!mm || !acop) + return -EINVAL; + + /* We need to make sure mm_users doesn't change */ + down_read(&mm->mmap_sem); + spin_lock(mm->context.cop_lockp); + + if (mm->context.cop_pid == COP_PID_NONE) { + ret = new_cop_pid(&cop_ida, COP_PID_MIN, COP_PID_MAX, + &mmu_context_acop_lock); + if (ret < 0) + goto out; + + mm->context.cop_pid = ret; + } + mm->context.acop |= acop; + + sync_cop(mm); + + /* + * If this is a threaded process then there might be other threads + * running. We need to send an IPI to force them to pick up any + * change in PID and ACOP. + */ + if (atomic_read(&mm->mm_users) > 1) + smp_call_function(sync_cop, mm, 1); + + ret = mm->context.cop_pid; + +out: + spin_unlock(mm->context.cop_lockp); + up_read(&mm->mmap_sem); + + return ret; +} +EXPORT_SYMBOL_GPL(use_cop); + +/** + * Stop using a coprocessor. + * @acop: mask of coprocessor to be stopped. + * @mm: The mm the coprocessor associated with. + */ +void drop_cop(unsigned long acop, struct mm_struct *mm) +{ + int free_pid = COP_PID_NONE; + + if (!cpu_has_feature(CPU_FTR_ICSWX)) + return; + + if (WARN_ON_ONCE(!mm)) + return; + + /* We need to make sure mm_users doesn't change */ + down_read(&mm->mmap_sem); + spin_lock(mm->context.cop_lockp); + + mm->context.acop &= ~acop; + + if ((!mm->context.acop) && (mm->context.cop_pid != COP_PID_NONE)) { + free_pid = mm->context.cop_pid; + mm->context.cop_pid = COP_PID_NONE; + } + + sync_cop(mm); + + /* + * If this is a threaded process then there might be other threads + * running. We need to send an IPI to force them to pick up any + * change in PID and ACOP. + */ + if (atomic_read(&mm->mm_users) > 1) + smp_call_function(sync_cop, mm, 1); + + if (free_pid != COP_PID_NONE) { + spin_lock(&mmu_context_acop_lock); + ida_remove(&cop_ida, free_pid); + spin_unlock(&mmu_context_acop_lock); + } + + spin_unlock(mm->context.cop_lockp); + up_read(&mm->mmap_sem); +} +EXPORT_SYMBOL_GPL(drop_cop); + +#endif /* CONFIG_PPC_ICSWX */ + static DEFINE_SPINLOCK(mmu_context_lock); static DEFINE_IDA(mmu_context_ida); @@ -31,7 +227,6 @@ static DEFINE_IDA(mmu_context_ida); * Each segment contains 2^28 bytes. Each context maps 2^44 bytes, * so we can support 2^19-1 contexts (19 == 35 + 28 - 44). */ -#define NO_CONTEXT 0 #define MAX_CONTEXT ((1UL << 19) - 1) int __init_new_context(void) @@ -79,6 +274,16 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) slice_set_user_psize(mm, mmu_virtual_psize); subpage_prot_init_new_context(mm); mm->context.id = index; +#ifdef CONFIG_PPC_ICSWX + mm->context.cop_lockp = kmalloc(sizeof(spinlock_t), GFP_KERNEL); + if (!mm->context.cop_lockp) { + __destroy_context(index); + subpage_prot_free(mm); + mm->context.id = MMU_NO_CONTEXT; + return -ENOMEM; + } + spin_lock_init(mm->context.cop_lockp); +#endif /* CONFIG_PPC_ICSWX */ return 0; } @@ -93,7 +298,12 @@ EXPORT_SYMBOL_GPL(__destroy_context); void destroy_context(struct mm_struct *mm) { +#ifdef CONFIG_PPC_ICSWX + drop_cop(mm->context.acop, mm); + kfree(mm->context.cop_lockp); + mm->context.cop_lockp = NULL; +#endif /* CONFIG_PPC_ICSWX */ __destroy_context(mm->context.id); subpage_prot_free(mm); - mm->context.id = NO_CONTEXT; + mm->context.id = MMU_NO_CONTEXT; } diff --git a/trunk/arch/powerpc/mm/mmu_context_nohash.c b/trunk/arch/powerpc/mm/mmu_context_nohash.c index c0aab52da3a5..336807de550e 100644 --- a/trunk/arch/powerpc/mm/mmu_context_nohash.c +++ b/trunk/arch/powerpc/mm/mmu_context_nohash.c @@ -338,12 +338,14 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self, return NOTIFY_OK; switch (action) { - case CPU_ONLINE: - case CPU_ONLINE_FROZEN: + case CPU_UP_PREPARE: + case CPU_UP_PREPARE_FROZEN: pr_devel("MMU: Allocating stale context map for CPU %d\n", cpu); stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL); break; #ifdef CONFIG_HOTPLUG_CPU + case CPU_UP_CANCELED: + case CPU_UP_CANCELED_FROZEN: case CPU_DEAD: case CPU_DEAD_FROZEN: pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu); @@ -407,7 +409,17 @@ void __init mmu_context_init(void) } else if (mmu_has_feature(MMU_FTR_TYPE_47x)) { first_context = 1; last_context = 65535; - } else { + } else +#ifdef CONFIG_PPC_BOOK3E_MMU + if (mmu_has_feature(MMU_FTR_TYPE_3E)) { + u32 mmucfg = mfspr(SPRN_MMUCFG); + u32 pid_bits = (mmucfg & MMUCFG_PIDSIZE_MASK) + >> MMUCFG_PIDSIZE_SHIFT; + first_context = 1; + last_context = (1UL << (pid_bits + 1)) - 1; + } else +#endif + { first_context = 1; last_context = 255; } diff --git a/trunk/arch/powerpc/mm/numa.c b/trunk/arch/powerpc/mm/numa.c index 5ec1dad2a19d..2164006fe170 100644 --- a/trunk/arch/powerpc/mm/numa.c +++ b/trunk/arch/powerpc/mm/numa.c @@ -311,14 +311,13 @@ EXPORT_SYMBOL_GPL(of_node_to_nid); static int __init find_min_common_depth(void) { int depth; - struct device_node *rtas_root; struct device_node *chosen; + struct device_node *root; const char *vec5; - rtas_root = of_find_node_by_path("/rtas"); - - if (!rtas_root) - return -1; + root = of_find_node_by_path("/rtas"); + if (!root) + root = of_find_node_by_path("/"); /* * This property is a set of 32-bit integers, each representing @@ -332,7 +331,7 @@ static int __init find_min_common_depth(void) * NUMA boundary and the following are progressively less significant * boundaries. There can be more than one level of NUMA. */ - distance_ref_points = of_get_property(rtas_root, + distance_ref_points = of_get_property(root, "ibm,associativity-reference-points", &distance_ref_points_depth); @@ -376,11 +375,11 @@ static int __init find_min_common_depth(void) distance_ref_points_depth = MAX_DISTANCE_REF_POINTS; } - of_node_put(rtas_root); + of_node_put(root); return depth; err: - of_node_put(rtas_root); + of_node_put(root); return -1; } @@ -1453,7 +1452,7 @@ int arch_update_cpu_topology(void) unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0}; struct sys_device *sysdev; - for_each_cpu_mask(cpu, cpu_associativity_changes_mask) { + for_each_cpu(cpu,&cpu_associativity_changes_mask) { vphn_get_associativity(cpu, associativity); nid = associativity_to_nid(associativity); diff --git a/trunk/arch/powerpc/mm/pgtable_32.c b/trunk/arch/powerpc/mm/pgtable_32.c index 8dc41c0157fe..51f87956f8f8 100644 --- a/trunk/arch/powerpc/mm/pgtable_32.c +++ b/trunk/arch/powerpc/mm/pgtable_32.c @@ -133,7 +133,15 @@ ioremap(phys_addr_t addr, unsigned long size) EXPORT_SYMBOL(ioremap); void __iomem * -ioremap_flags(phys_addr_t addr, unsigned long size, unsigned long flags) +ioremap_wc(phys_addr_t addr, unsigned long size) +{ + return __ioremap_caller(addr, size, _PAGE_NO_CACHE, + __builtin_return_address(0)); +} +EXPORT_SYMBOL(ioremap_wc); + +void __iomem * +ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long flags) { /* writeable implies dirty for kernel addresses */ if (flags & _PAGE_RW) @@ -152,7 +160,7 @@ ioremap_flags(phys_addr_t addr, unsigned long size, unsigned long flags) return __ioremap_caller(addr, size, flags, __builtin_return_address(0)); } -EXPORT_SYMBOL(ioremap_flags); +EXPORT_SYMBOL(ioremap_prot); void __iomem * __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags) diff --git a/trunk/arch/powerpc/mm/pgtable_64.c b/trunk/arch/powerpc/mm/pgtable_64.c index 88927a05cdc2..6e595f6496d4 100644 --- a/trunk/arch/powerpc/mm/pgtable_64.c +++ b/trunk/arch/powerpc/mm/pgtable_64.c @@ -255,7 +255,17 @@ void __iomem * ioremap(phys_addr_t addr, unsigned long size) return __ioremap_caller(addr, size, flags, caller); } -void __iomem * ioremap_flags(phys_addr_t addr, unsigned long size, +void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size) +{ + unsigned long flags = _PAGE_NO_CACHE; + void *caller = __builtin_return_address(0); + + if (ppc_md.ioremap) + return ppc_md.ioremap(addr, size, flags, caller); + return __ioremap_caller(addr, size, flags, caller); +} + +void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long flags) { void *caller = __builtin_return_address(0); @@ -311,7 +321,8 @@ void iounmap(volatile void __iomem *token) } EXPORT_SYMBOL(ioremap); -EXPORT_SYMBOL(ioremap_flags); +EXPORT_SYMBOL(ioremap_wc); +EXPORT_SYMBOL(ioremap_prot); EXPORT_SYMBOL(__ioremap); EXPORT_SYMBOL(__ioremap_at); EXPORT_SYMBOL(iounmap); diff --git a/trunk/arch/powerpc/mm/slb.c b/trunk/arch/powerpc/mm/slb.c index 1d98ecc8eecd..e22276cb67a4 100644 --- a/trunk/arch/powerpc/mm/slb.c +++ b/trunk/arch/powerpc/mm/slb.c @@ -24,6 +24,7 @@ #include #include #include +#include extern void slb_allocate_realmode(unsigned long ea); @@ -166,7 +167,7 @@ static inline int esids_match(unsigned long addr1, unsigned long addr2) int esid_1t_count; /* System is not 1T segment size capable. */ - if (!cpu_has_feature(CPU_FTR_1T_SEGMENT)) + if (!mmu_has_feature(MMU_FTR_1T_SEGMENT)) return (GET_ESID(addr1) == GET_ESID(addr2)); esid_1t_count = (((addr1 >> SID_SHIFT_1T) != 0) + @@ -201,7 +202,7 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm) */ hard_irq_disable(); offset = get_paca()->slb_cache_ptr; - if (!cpu_has_feature(CPU_FTR_NO_SLBIE_B) && + if (!mmu_has_feature(MMU_FTR_NO_SLBIE_B) && offset <= SLB_CACHE_ENTRIES) { int i; asm volatile("isync" : : : "memory"); @@ -249,9 +250,8 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm) static inline void patch_slb_encoding(unsigned int *insn_addr, unsigned int immed) { - *insn_addr = (*insn_addr & 0xffff0000) | immed; - flush_icache_range((unsigned long)insn_addr, 4+ - (unsigned long)insn_addr); + int insn = (*insn_addr & 0xffff0000) | immed; + patch_instruction(insn_addr, insn); } void slb_set_size(u16 size) diff --git a/trunk/arch/powerpc/mm/slb_low.S b/trunk/arch/powerpc/mm/slb_low.S index 95ce35581696..ef653dc95b65 100644 --- a/trunk/arch/powerpc/mm/slb_low.S +++ b/trunk/arch/powerpc/mm/slb_low.S @@ -58,7 +58,7 @@ _GLOBAL(slb_miss_kernel_load_linear) li r11,0 BEGIN_FTR_SECTION b slb_finish_load -END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT) +END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) b slb_finish_load_1T 1: @@ -87,7 +87,7 @@ _GLOBAL(slb_miss_kernel_load_vmemmap) 6: BEGIN_FTR_SECTION b slb_finish_load -END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT) +END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) b slb_finish_load_1T 0: /* user address: proto-VSID = context << 15 | ESID. First check @@ -138,11 +138,11 @@ END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT) ld r9,PACACONTEXTID(r13) BEGIN_FTR_SECTION cmpldi r10,0x1000 -END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) +END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) rldimi r10,r9,USER_ESID_BITS,0 BEGIN_FTR_SECTION bge slb_finish_load_1T -END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) +END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) b slb_finish_load 8: /* invalid EA */ diff --git a/trunk/arch/powerpc/mm/stab.c b/trunk/arch/powerpc/mm/stab.c index 446a01842a73..41e31642a86a 100644 --- a/trunk/arch/powerpc/mm/stab.c +++ b/trunk/arch/powerpc/mm/stab.c @@ -243,7 +243,7 @@ void __init stabs_alloc(void) { int cpu; - if (cpu_has_feature(CPU_FTR_SLB)) + if (mmu_has_feature(MMU_FTR_SLB)) return; for_each_possible_cpu(cpu) { diff --git a/trunk/arch/powerpc/platforms/44x/iss4xx.c b/trunk/arch/powerpc/platforms/44x/iss4xx.c index aa46e9d1e771..19395f18b1db 100644 --- a/trunk/arch/powerpc/platforms/44x/iss4xx.c +++ b/trunk/arch/powerpc/platforms/44x/iss4xx.c @@ -87,7 +87,7 @@ static void __cpuinit smp_iss4xx_setup_cpu(int cpu) mpic_setup_this_cpu(); } -static void __cpuinit smp_iss4xx_kick_cpu(int cpu) +static int __cpuinit smp_iss4xx_kick_cpu(int cpu) { struct device_node *cpunode = of_get_cpu_node(cpu, NULL); const u64 *spin_table_addr_prop; @@ -104,7 +104,7 @@ static void __cpuinit smp_iss4xx_kick_cpu(int cpu) NULL); if (spin_table_addr_prop == NULL) { pr_err("CPU%d: Can't start, missing cpu-release-addr !\n", cpu); - return; + return -ENOENT; } /* Assume it's mapped as part of the linear mapping. This is a bit @@ -117,6 +117,8 @@ static void __cpuinit smp_iss4xx_kick_cpu(int cpu) smp_wmb(); spin_table[1] = __pa(start_secondary_47x); mb(); + + return 0; } static struct smp_ops_t iss_smp_ops = { diff --git a/trunk/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c b/trunk/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c index cfc4b2009982..9f09319352c0 100644 --- a/trunk/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c +++ b/trunk/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c @@ -61,7 +61,7 @@ irq_to_pic_bit(unsigned int irq) static void cpld_mask_irq(struct irq_data *d) { - unsigned int cpld_irq = (unsigned int)irq_map[d->irq].hwirq; + unsigned int cpld_irq = (unsigned int)irqd_to_hwirq(d); void __iomem *pic_mask = irq_to_pic_mask(cpld_irq); out_8(pic_mask, @@ -71,7 +71,7 @@ cpld_mask_irq(struct irq_data *d) static void cpld_unmask_irq(struct irq_data *d) { - unsigned int cpld_irq = (unsigned int)irq_map[d->irq].hwirq; + unsigned int cpld_irq = (unsigned int)irqd_to_hwirq(d); void __iomem *pic_mask = irq_to_pic_mask(cpld_irq); out_8(pic_mask, @@ -97,7 +97,7 @@ cpld_pic_get_irq(int offset, u8 ignore, u8 __iomem *statusp, status |= (ignore | mask); if (status == 0xff) - return NO_IRQ_IGNORE; + return NO_IRQ; cpld_irq = ffz(status) + offset; @@ -109,14 +109,14 @@ cpld_pic_cascade(unsigned int irq, struct irq_desc *desc) { irq = cpld_pic_get_irq(0, PCI_IGNORE, &cpld_regs->pci_status, &cpld_regs->pci_mask); - if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) { + if (irq != NO_IRQ) { generic_handle_irq(irq); return; } irq = cpld_pic_get_irq(8, MISC_IGNORE, &cpld_regs->misc_status, &cpld_regs->misc_mask); - if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) { + if (irq != NO_IRQ) { generic_handle_irq(irq); return; } diff --git a/trunk/arch/powerpc/platforms/52xx/media5200.c b/trunk/arch/powerpc/platforms/52xx/media5200.c index 57a6a349e932..96f85e5e0cd3 100644 --- a/trunk/arch/powerpc/platforms/52xx/media5200.c +++ b/trunk/arch/powerpc/platforms/52xx/media5200.c @@ -56,7 +56,7 @@ static void media5200_irq_unmask(struct irq_data *d) spin_lock_irqsave(&media5200_irq.lock, flags); val = in_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE); - val |= 1 << (MEDIA5200_IRQ_SHIFT + irq_map[d->irq].hwirq); + val |= 1 << (MEDIA5200_IRQ_SHIFT + irqd_to_hwirq(d)); out_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE, val); spin_unlock_irqrestore(&media5200_irq.lock, flags); } @@ -68,7 +68,7 @@ static void media5200_irq_mask(struct irq_data *d) spin_lock_irqsave(&media5200_irq.lock, flags); val = in_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE); - val &= ~(1 << (MEDIA5200_IRQ_SHIFT + irq_map[d->irq].hwirq)); + val &= ~(1 << (MEDIA5200_IRQ_SHIFT + irqd_to_hwirq(d))); out_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE, val); spin_unlock_irqrestore(&media5200_irq.lock, flags); } diff --git a/trunk/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/trunk/arch/powerpc/platforms/52xx/mpc52xx_pic.c index 1dd15400f6f0..1a9a49570579 100644 --- a/trunk/arch/powerpc/platforms/52xx/mpc52xx_pic.c +++ b/trunk/arch/powerpc/platforms/52xx/mpc52xx_pic.c @@ -157,48 +157,30 @@ static inline void io_be_clrbit(u32 __iomem *addr, int bitno) */ static void mpc52xx_extirq_mask(struct irq_data *d) { - int irq; - int l2irq; - - irq = irq_map[d->irq].hwirq; - l2irq = irq & MPC52xx_IRQ_L2_MASK; - + int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK; io_be_clrbit(&intr->ctrl, 11 - l2irq); } static void mpc52xx_extirq_unmask(struct irq_data *d) { - int irq; - int l2irq; - - irq = irq_map[d->irq].hwirq; - l2irq = irq & MPC52xx_IRQ_L2_MASK; - + int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK; io_be_setbit(&intr->ctrl, 11 - l2irq); } static void mpc52xx_extirq_ack(struct irq_data *d) { - int irq; - int l2irq; - - irq = irq_map[d->irq].hwirq; - l2irq = irq & MPC52xx_IRQ_L2_MASK; - + int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK; io_be_setbit(&intr->ctrl, 27-l2irq); } static int mpc52xx_extirq_set_type(struct irq_data *d, unsigned int flow_type) { u32 ctrl_reg, type; - int irq; - int l2irq; + int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK; void *handler = handle_level_irq; - irq = irq_map[d->irq].hwirq; - l2irq = irq & MPC52xx_IRQ_L2_MASK; - - pr_debug("%s: irq=%x. l2=%d flow_type=%d\n", __func__, irq, l2irq, flow_type); + pr_debug("%s: irq=%x. l2=%d flow_type=%d\n", __func__, + (int) irqd_to_hwirq(d), l2irq, flow_type); switch (flow_type) { case IRQF_TRIGGER_HIGH: type = 0; break; @@ -237,23 +219,13 @@ static int mpc52xx_null_set_type(struct irq_data *d, unsigned int flow_type) static void mpc52xx_main_mask(struct irq_data *d) { - int irq; - int l2irq; - - irq = irq_map[d->irq].hwirq; - l2irq = irq & MPC52xx_IRQ_L2_MASK; - + int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK; io_be_setbit(&intr->main_mask, 16 - l2irq); } static void mpc52xx_main_unmask(struct irq_data *d) { - int irq; - int l2irq; - - irq = irq_map[d->irq].hwirq; - l2irq = irq & MPC52xx_IRQ_L2_MASK; - + int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK; io_be_clrbit(&intr->main_mask, 16 - l2irq); } @@ -270,23 +242,13 @@ static struct irq_chip mpc52xx_main_irqchip = { */ static void mpc52xx_periph_mask(struct irq_data *d) { - int irq; - int l2irq; - - irq = irq_map[d->irq].hwirq; - l2irq = irq & MPC52xx_IRQ_L2_MASK; - + int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK; io_be_setbit(&intr->per_mask, 31 - l2irq); } static void mpc52xx_periph_unmask(struct irq_data *d) { - int irq; - int l2irq; - - irq = irq_map[d->irq].hwirq; - l2irq = irq & MPC52xx_IRQ_L2_MASK; - + int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK; io_be_clrbit(&intr->per_mask, 31 - l2irq); } @@ -303,34 +265,19 @@ static struct irq_chip mpc52xx_periph_irqchip = { */ static void mpc52xx_sdma_mask(struct irq_data *d) { - int irq; - int l2irq; - - irq = irq_map[d->irq].hwirq; - l2irq = irq & MPC52xx_IRQ_L2_MASK; - + int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK; io_be_setbit(&sdma->IntMask, l2irq); } static void mpc52xx_sdma_unmask(struct irq_data *d) { - int irq; - int l2irq; - - irq = irq_map[d->irq].hwirq; - l2irq = irq & MPC52xx_IRQ_L2_MASK; - + int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK; io_be_clrbit(&sdma->IntMask, l2irq); } static void mpc52xx_sdma_ack(struct irq_data *d) { - int irq; - int l2irq; - - irq = irq_map[d->irq].hwirq; - l2irq = irq & MPC52xx_IRQ_L2_MASK; - + int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK; out_be32(&sdma->IntPend, 1 << l2irq); } @@ -539,7 +486,7 @@ void __init mpc52xx_init_irq(void) unsigned int mpc52xx_get_irq(void) { u32 status; - int irq = NO_IRQ_IGNORE; + int irq; status = in_be32(&intr->enc_status); if (status & 0x00000400) { /* critical */ @@ -562,6 +509,8 @@ unsigned int mpc52xx_get_irq(void) } else { irq |= (MPC52xx_IRQ_L1_PERP << MPC52xx_IRQ_L1_OFFSET); } + } else { + return NO_IRQ; } return irq_linear_revmap(mpc52xx_irqhost, irq); diff --git a/trunk/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c b/trunk/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c index 4a4eb6ffa12f..8ccf9ed62fe2 100644 --- a/trunk/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c +++ b/trunk/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c @@ -42,7 +42,7 @@ struct pq2ads_pci_pic { static void pq2ads_pci_mask_irq(struct irq_data *d) { struct pq2ads_pci_pic *priv = irq_data_get_irq_chip_data(d); - int irq = NUM_IRQS - virq_to_hw(d->irq) - 1; + int irq = NUM_IRQS - irqd_to_hwirq(d) - 1; if (irq != -1) { unsigned long flags; @@ -58,7 +58,7 @@ static void pq2ads_pci_mask_irq(struct irq_data *d) static void pq2ads_pci_unmask_irq(struct irq_data *d) { struct pq2ads_pci_pic *priv = irq_data_get_irq_chip_data(d); - int irq = NUM_IRQS - virq_to_hw(d->irq) - 1; + int irq = NUM_IRQS - irqd_to_hwirq(d) - 1; if (irq != -1) { unsigned long flags; @@ -112,16 +112,8 @@ static int pci_pic_host_map(struct irq_host *h, unsigned int virq, return 0; } -static void pci_host_unmap(struct irq_host *h, unsigned int virq) -{ - /* remove chip and handler */ - irq_set_chip_data(virq, NULL); - irq_set_chip(virq, NULL); -} - static struct irq_host_ops pci_pic_host_ops = { .map = pci_pic_host_map, - .unmap = pci_host_unmap, }; int __init pq2ads_pci_init_irq(void) diff --git a/trunk/arch/powerpc/platforms/83xx/suspend.c b/trunk/arch/powerpc/platforms/83xx/suspend.c index 188272934cfb..104faa8aa23c 100644 --- a/trunk/arch/powerpc/platforms/83xx/suspend.c +++ b/trunk/arch/powerpc/platforms/83xx/suspend.c @@ -318,17 +318,20 @@ static const struct platform_suspend_ops mpc83xx_suspend_ops = { .end = mpc83xx_suspend_end, }; +static struct of_device_id pmc_match[]; static int pmc_probe(struct platform_device *ofdev) { + const struct of_device_id *match; struct device_node *np = ofdev->dev.of_node; struct resource res; struct pmc_type *type; int ret = 0; - if (!ofdev->dev.of_match) + match = of_match_device(pmc_match, &ofdev->dev); + if (!match) return -EINVAL; - type = ofdev->dev.of_match->data; + type = match->data; if (!of_device_is_available(np)) return -ENODEV; diff --git a/trunk/arch/powerpc/platforms/85xx/smp.c b/trunk/arch/powerpc/platforms/85xx/smp.c index 0d00ff9d05a0..d6a93a10c0f5 100644 --- a/trunk/arch/powerpc/platforms/85xx/smp.c +++ b/trunk/arch/powerpc/platforms/85xx/smp.c @@ -41,7 +41,7 @@ extern void __early_start(void); #define NUM_BOOT_ENTRY 8 #define SIZE_BOOT_ENTRY (NUM_BOOT_ENTRY * sizeof(u32)) -static void __init +static int __init smp_85xx_kick_cpu(int nr) { unsigned long flags; @@ -60,7 +60,7 @@ smp_85xx_kick_cpu(int nr) if (cpu_rel_addr == NULL) { printk(KERN_ERR "No cpu-release-addr for cpu %d\n", nr); - return; + return -ENOENT; } /* @@ -107,6 +107,8 @@ smp_85xx_kick_cpu(int nr) iounmap(bptr_vaddr); pr_debug("waited %d msecs for CPU #%d.\n", n, nr); + + return 0; } static void __init @@ -233,8 +235,10 @@ void __init mpc85xx_smp_init(void) smp_85xx_ops.message_pass = smp_mpic_message_pass; } - if (cpu_has_feature(CPU_FTR_DBELL)) - smp_85xx_ops.message_pass = doorbell_message_pass; + if (cpu_has_feature(CPU_FTR_DBELL)) { + smp_85xx_ops.message_pass = smp_muxed_ipi_message_pass; + smp_85xx_ops.cause_ipi = doorbell_cause_ipi; + } BUG_ON(!smp_85xx_ops.message_pass); diff --git a/trunk/arch/powerpc/platforms/85xx/socrates_fpga_pic.c b/trunk/arch/powerpc/platforms/85xx/socrates_fpga_pic.c index db864623b4ae..12cb9bb2cc68 100644 --- a/trunk/arch/powerpc/platforms/85xx/socrates_fpga_pic.c +++ b/trunk/arch/powerpc/platforms/85xx/socrates_fpga_pic.c @@ -48,8 +48,6 @@ static struct socrates_fpga_irq_info fpga_irqs[SOCRATES_FPGA_NUM_IRQS] = { [8] = {0, IRQ_TYPE_LEVEL_HIGH}, }; -#define socrates_fpga_irq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq) - static DEFINE_RAW_SPINLOCK(socrates_fpga_pic_lock); static void __iomem *socrates_fpga_pic_iobase; @@ -110,11 +108,9 @@ void socrates_fpga_pic_cascade(unsigned int irq, struct irq_desc *desc) static void socrates_fpga_pic_ack(struct irq_data *d) { unsigned long flags; - unsigned int hwirq, irq_line; + unsigned int irq_line, hwirq = irqd_to_hwirq(d); uint32_t mask; - hwirq = socrates_fpga_irq_to_hw(d->irq); - irq_line = fpga_irqs[hwirq].irq_line; raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) @@ -127,12 +123,10 @@ static void socrates_fpga_pic_ack(struct irq_data *d) static void socrates_fpga_pic_mask(struct irq_data *d) { unsigned long flags; - unsigned int hwirq; + unsigned int hwirq = irqd_to_hwirq(d); int irq_line; u32 mask; - hwirq = socrates_fpga_irq_to_hw(d->irq); - irq_line = fpga_irqs[hwirq].irq_line; raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) @@ -145,12 +139,10 @@ static void socrates_fpga_pic_mask(struct irq_data *d) static void socrates_fpga_pic_mask_ack(struct irq_data *d) { unsigned long flags; - unsigned int hwirq; + unsigned int hwirq = irqd_to_hwirq(d); int irq_line; u32 mask; - hwirq = socrates_fpga_irq_to_hw(d->irq); - irq_line = fpga_irqs[hwirq].irq_line; raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) @@ -164,12 +156,10 @@ static void socrates_fpga_pic_mask_ack(struct irq_data *d) static void socrates_fpga_pic_unmask(struct irq_data *d) { unsigned long flags; - unsigned int hwirq; + unsigned int hwirq = irqd_to_hwirq(d); int irq_line; u32 mask; - hwirq = socrates_fpga_irq_to_hw(d->irq); - irq_line = fpga_irqs[hwirq].irq_line; raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) @@ -182,12 +172,10 @@ static void socrates_fpga_pic_unmask(struct irq_data *d) static void socrates_fpga_pic_eoi(struct irq_data *d) { unsigned long flags; - unsigned int hwirq; + unsigned int hwirq = irqd_to_hwirq(d); int irq_line; u32 mask; - hwirq = socrates_fpga_irq_to_hw(d->irq); - irq_line = fpga_irqs[hwirq].irq_line; raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) @@ -201,12 +189,10 @@ static int socrates_fpga_pic_set_type(struct irq_data *d, unsigned int flow_type) { unsigned long flags; - unsigned int hwirq; + unsigned int hwirq = irqd_to_hwirq(d); int polarity; u32 mask; - hwirq = socrates_fpga_irq_to_hw(d->irq); - if (fpga_irqs[hwirq].type != IRQ_TYPE_NONE) return -EINVAL; diff --git a/trunk/arch/powerpc/platforms/86xx/gef_pic.c b/trunk/arch/powerpc/platforms/86xx/gef_pic.c index 0beec7d5566b..94594e58594c 100644 --- a/trunk/arch/powerpc/platforms/86xx/gef_pic.c +++ b/trunk/arch/powerpc/platforms/86xx/gef_pic.c @@ -46,8 +46,6 @@ #define GEF_PIC_CPU0_MCP_MASK GEF_PIC_MCP_MASK(0) #define GEF_PIC_CPU1_MCP_MASK GEF_PIC_MCP_MASK(1) -#define gef_irq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq) - static DEFINE_RAW_SPINLOCK(gef_pic_lock); @@ -113,11 +111,9 @@ void gef_pic_cascade(unsigned int irq, struct irq_desc *desc) static void gef_pic_mask(struct irq_data *d) { unsigned long flags; - unsigned int hwirq; + unsigned int hwirq = irqd_to_hwirq(d); u32 mask; - hwirq = gef_irq_to_hw(d->irq); - raw_spin_lock_irqsave(&gef_pic_lock, flags); mask = in_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0)); mask &= ~(1 << hwirq); @@ -136,11 +132,9 @@ static void gef_pic_mask_ack(struct irq_data *d) static void gef_pic_unmask(struct irq_data *d) { unsigned long flags; - unsigned int hwirq; + unsigned int hwirq = irqd_to_hwirq(d); u32 mask; - hwirq = gef_irq_to_hw(d->irq); - raw_spin_lock_irqsave(&gef_pic_lock, flags); mask = in_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0)); mask |= (1 << hwirq); diff --git a/trunk/arch/powerpc/platforms/86xx/mpc8610_hpcd.c b/trunk/arch/powerpc/platforms/86xx/mpc8610_hpcd.c index 018cc67be426..a896511690c2 100644 --- a/trunk/arch/powerpc/platforms/86xx/mpc8610_hpcd.c +++ b/trunk/arch/powerpc/platforms/86xx/mpc8610_hpcd.c @@ -66,7 +66,7 @@ static void __init mpc8610_suspend_init(void) return; } - ret = request_irq(irq, mpc8610_sw9_irq, 0, "sw9/wakeup", NULL); + ret = request_irq(irq, mpc8610_sw9_irq, 0, "sw9:wakeup", NULL); if (ret) { pr_err("%s: can't request pixis event IRQ: %d\n", __func__, ret); @@ -105,45 +105,77 @@ machine_device_initcall(mpc86xx_hpcd, mpc8610_declare_of_platform_devices); #if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE) -static u32 get_busfreq(void) -{ - struct device_node *node; - - u32 fs_busfreq = 0; - node = of_find_node_by_type(NULL, "cpu"); - if (node) { - unsigned int size; - const unsigned int *prop = - of_get_property(node, "bus-frequency", &size); - if (prop) - fs_busfreq = *prop; - of_node_put(node); - }; - return fs_busfreq; -} +/* + * DIU Area Descriptor + * + * The MPC8610 reference manual shows the bits of the AD register in + * little-endian order, which causes the BLUE_C field to be split into two + * parts. To simplify the definition of the MAKE_AD() macro, we define the + * fields in big-endian order and byte-swap the result. + * + * So even though the registers don't look like they're in the + * same bit positions as they are on the P1022, the same value is written to + * the AD register on the MPC8610 and on the P1022. + */ +#define AD_BYTE_F 0x10000000 +#define AD_ALPHA_C_MASK 0x0E000000 +#define AD_ALPHA_C_SHIFT 25 +#define AD_BLUE_C_MASK 0x01800000 +#define AD_BLUE_C_SHIFT 23 +#define AD_GREEN_C_MASK 0x00600000 +#define AD_GREEN_C_SHIFT 21 +#define AD_RED_C_MASK 0x00180000 +#define AD_RED_C_SHIFT 19 +#define AD_PALETTE 0x00040000 +#define AD_PIXEL_S_MASK 0x00030000 +#define AD_PIXEL_S_SHIFT 16 +#define AD_COMP_3_MASK 0x0000F000 +#define AD_COMP_3_SHIFT 12 +#define AD_COMP_2_MASK 0x00000F00 +#define AD_COMP_2_SHIFT 8 +#define AD_COMP_1_MASK 0x000000F0 +#define AD_COMP_1_SHIFT 4 +#define AD_COMP_0_MASK 0x0000000F +#define AD_COMP_0_SHIFT 0 + +#define MAKE_AD(alpha, red, blue, green, size, c0, c1, c2, c3) \ + cpu_to_le32(AD_BYTE_F | (alpha << AD_ALPHA_C_SHIFT) | \ + (blue << AD_BLUE_C_SHIFT) | (green << AD_GREEN_C_SHIFT) | \ + (red << AD_RED_C_SHIFT) | (c3 << AD_COMP_3_SHIFT) | \ + (c2 << AD_COMP_2_SHIFT) | (c1 << AD_COMP_1_SHIFT) | \ + (c0 << AD_COMP_0_SHIFT) | (size << AD_PIXEL_S_SHIFT)) unsigned int mpc8610hpcd_get_pixel_format(unsigned int bits_per_pixel, int monitor_port) { static const unsigned long pixelformat[][3] = { - {0x88882317, 0x88083218, 0x65052119}, - {0x88883316, 0x88082219, 0x65053118}, + { + MAKE_AD(3, 0, 2, 1, 3, 8, 8, 8, 8), + MAKE_AD(4, 2, 0, 1, 2, 8, 8, 8, 0), + MAKE_AD(4, 0, 2, 1, 1, 5, 6, 5, 0) + }, + { + MAKE_AD(3, 2, 0, 1, 3, 8, 8, 8, 8), + MAKE_AD(4, 0, 2, 1, 2, 8, 8, 8, 0), + MAKE_AD(4, 2, 0, 1, 1, 5, 6, 5, 0) + }, }; - unsigned int pix_fmt, arch_monitor; + unsigned int arch_monitor; + /* The DVI port is mis-wired on revision 1 of this board. */ arch_monitor = ((*pixis_arch == 0x01) && (monitor_port == 0))? 0 : 1; - /* DVI port for board version 0x01 */ - - if (bits_per_pixel == 32) - pix_fmt = pixelformat[arch_monitor][0]; - else if (bits_per_pixel == 24) - pix_fmt = pixelformat[arch_monitor][1]; - else if (bits_per_pixel == 16) - pix_fmt = pixelformat[arch_monitor][2]; - else - pix_fmt = pixelformat[1][0]; - - return pix_fmt; + + switch (bits_per_pixel) { + case 32: + return pixelformat[arch_monitor][0]; + case 24: + return pixelformat[arch_monitor][1]; + case 16: + return pixelformat[arch_monitor][2]; + default: + pr_err("fsl-diu: unsupported pixel depth %u\n", bits_per_pixel); + return 0; + } } void mpc8610hpcd_set_gamma_table(int monitor_port, char *gamma_table_base) @@ -190,8 +222,7 @@ void mpc8610hpcd_set_pixel_clock(unsigned int pixclock) } /* Pixel Clock configuration */ - pr_debug("DIU: Bus Frequency = %d\n", get_busfreq()); - speed_ccb = get_busfreq(); + speed_ccb = fsl_get_sys_freq(); /* Calculate the pixel clock with the smallest error */ /* calculate the following in steps to avoid overflow */ diff --git a/trunk/arch/powerpc/platforms/86xx/mpc86xx_smp.c b/trunk/arch/powerpc/platforms/86xx/mpc86xx_smp.c index eacea0e3fcc8..af09baee22cb 100644 --- a/trunk/arch/powerpc/platforms/86xx/mpc86xx_smp.c +++ b/trunk/arch/powerpc/platforms/86xx/mpc86xx_smp.c @@ -56,7 +56,7 @@ smp_86xx_release_core(int nr) } -static void __init +static int __init smp_86xx_kick_cpu(int nr) { unsigned int save_vector; @@ -65,7 +65,7 @@ smp_86xx_kick_cpu(int nr) unsigned int *vector = (unsigned int *)(KERNELBASE + 0x100); if (nr < 0 || nr >= NR_CPUS) - return; + return -ENOENT; pr_debug("smp_86xx_kick_cpu: kick CPU #%d\n", nr); @@ -92,6 +92,8 @@ smp_86xx_kick_cpu(int nr) local_irq_restore(flags); pr_debug("wait CPU #%d for %d msecs.\n", nr, n); + + return 0; } diff --git a/trunk/arch/powerpc/platforms/8xx/m8xx_setup.c b/trunk/arch/powerpc/platforms/8xx/m8xx_setup.c index 9ecce995dd4b..1e121088826f 100644 --- a/trunk/arch/powerpc/platforms/8xx/m8xx_setup.c +++ b/trunk/arch/powerpc/platforms/8xx/m8xx_setup.c @@ -150,7 +150,7 @@ void __init mpc8xx_calibrate_decr(void) */ cpu = of_find_node_by_type(NULL, "cpu"); virq= irq_of_parse_and_map(cpu, 0); - irq = irq_map[virq].hwirq; + irq = virq_to_hw(virq); sys_tmr2 = immr_map(im_sit); out_be16(&sys_tmr2->sit_tbscr, ((1 << (7 - (irq/2))) << 8) | diff --git a/trunk/arch/powerpc/platforms/Kconfig b/trunk/arch/powerpc/platforms/Kconfig index f7b07720aa30..f970ca2b180c 100644 --- a/trunk/arch/powerpc/platforms/Kconfig +++ b/trunk/arch/powerpc/platforms/Kconfig @@ -20,6 +20,7 @@ source "arch/powerpc/platforms/embedded6xx/Kconfig" source "arch/powerpc/platforms/44x/Kconfig" source "arch/powerpc/platforms/40x/Kconfig" source "arch/powerpc/platforms/amigaone/Kconfig" +source "arch/powerpc/platforms/wsp/Kconfig" config KVM_GUEST bool "KVM Guest support" @@ -56,16 +57,19 @@ config UDBG_RTAS_CONSOLE depends on PPC_RTAS default n +config PPC_SMP_MUXED_IPI + bool + help + Select this opton if your platform supports SMP and your + interrupt controller provides less than 4 interrupts to each + cpu. This will enable the generic code to multiplex the 4 + messages on to one ipi. + config PPC_UDBG_BEAT bool "BEAT based debug console" depends on PPC_CELLEB default n -config XICS - depends on PPC_PSERIES - bool - default y - config IPIC bool default n @@ -147,14 +151,27 @@ config PPC_970_NAP bool default n +config PPC_P7_NAP + bool + default n + config PPC_INDIRECT_IO bool select GENERIC_IOMAP - default n + +config PPC_INDIRECT_PIO + bool + select PPC_INDIRECT_IO + +config PPC_INDIRECT_MMIO + bool + select PPC_INDIRECT_IO + +config PPC_IO_WORKAROUNDS + bool config GENERIC_IOMAP bool - default n source "drivers/cpufreq/Kconfig" diff --git a/trunk/arch/powerpc/platforms/Kconfig.cputype b/trunk/arch/powerpc/platforms/Kconfig.cputype index 111138c55f9c..2165b65876f9 100644 --- a/trunk/arch/powerpc/platforms/Kconfig.cputype +++ b/trunk/arch/powerpc/platforms/Kconfig.cputype @@ -73,6 +73,7 @@ config PPC_BOOK3S_64 config PPC_BOOK3E_64 bool "Embedded processors" select PPC_FPU # Make it a choice ? + select PPC_SMP_MUXED_IPI endchoice @@ -107,6 +108,10 @@ config POWER4 depends on PPC64 && PPC_BOOK3S def_bool y +config PPC_A2 + bool + depends on PPC_BOOK3E_64 + config TUNE_CELL bool "Optimize for Cell Broadband Engine" depends on PPC64 && PPC_BOOK3S @@ -174,6 +179,7 @@ config FSL_BOOKE config PPC_FSL_BOOK3E bool select FSL_EMB_PERFMON + select PPC_SMP_MUXED_IPI default y if FSL_BOOKE config PTE_64BIT @@ -226,6 +232,24 @@ config VSX If in doubt, say Y here. +config PPC_ICSWX + bool "Support for PowerPC icswx coprocessor instruction" + depends on POWER4 + default n + ---help--- + + This option enables kernel support for the PowerPC Initiate + Coprocessor Store Word (icswx) coprocessor instruction on POWER7 + or newer processors. + + This option is only useful if you have a processor that supports + the icswx coprocessor instruction. It does not have any effect + on processors without the icswx coprocessor instruction. + + This option slightly increases kernel memory usage. + + If in doubt, say N here. + config SPE bool "SPE Support" depends on E200 || (E500 && !PPC_E500MC) diff --git a/trunk/arch/powerpc/platforms/Makefile b/trunk/arch/powerpc/platforms/Makefile index fdb9f0b0d7a8..73e2116cfeed 100644 --- a/trunk/arch/powerpc/platforms/Makefile +++ b/trunk/arch/powerpc/platforms/Makefile @@ -22,3 +22,4 @@ obj-$(CONFIG_PPC_CELL) += cell/ obj-$(CONFIG_PPC_PS3) += ps3/ obj-$(CONFIG_EMBEDDED6xx) += embedded6xx/ obj-$(CONFIG_AMIGAONE) += amigaone/ +obj-$(CONFIG_PPC_WSP) += wsp/ diff --git a/trunk/arch/powerpc/platforms/cell/Kconfig b/trunk/arch/powerpc/platforms/cell/Kconfig index 81239ebed83f..67d5009b4e86 100644 --- a/trunk/arch/powerpc/platforms/cell/Kconfig +++ b/trunk/arch/powerpc/platforms/cell/Kconfig @@ -6,7 +6,8 @@ config PPC_CELL_COMMON bool select PPC_CELL select PPC_DCR_MMIO - select PPC_INDIRECT_IO + select PPC_INDIRECT_PIO + select PPC_INDIRECT_MMIO select PPC_NATIVE select PPC_RTAS select IRQ_EDGE_EOI_HANDLER @@ -15,6 +16,7 @@ config PPC_CELL_NATIVE bool select PPC_CELL_COMMON select MPIC + select PPC_IO_WORKAROUNDS select IBM_NEW_EMAC_EMAC4 select IBM_NEW_EMAC_RGMII select IBM_NEW_EMAC_ZMII #test only diff --git a/trunk/arch/powerpc/platforms/cell/Makefile b/trunk/arch/powerpc/platforms/cell/Makefile index 83fafe922641..a4a89350bcfc 100644 --- a/trunk/arch/powerpc/platforms/cell/Makefile +++ b/trunk/arch/powerpc/platforms/cell/Makefile @@ -1,7 +1,7 @@ obj-$(CONFIG_PPC_CELL_COMMON) += cbe_regs.o interrupt.o pervasive.o obj-$(CONFIG_PPC_CELL_NATIVE) += iommu.o setup.o spider-pic.o \ - pmu.o io-workarounds.o spider-pci.o + pmu.o spider-pci.o obj-$(CONFIG_CBE_RAS) += ras.o obj-$(CONFIG_CBE_THERM) += cbe_thermal.o @@ -39,11 +39,10 @@ obj-y += celleb_setup.o \ celleb_pci.o celleb_scc_epci.o \ celleb_scc_pciex.o \ celleb_scc_uhc.o \ - io-workarounds.o spider-pci.o \ - beat.o beat_htab.o beat_hvCall.o \ - beat_interrupt.o beat_iommu.o + spider-pci.o beat.o beat_htab.o \ + beat_hvCall.o beat_interrupt.o \ + beat_iommu.o -obj-$(CONFIG_SMP) += beat_smp.o obj-$(CONFIG_PPC_UDBG_BEAT) += beat_udbg.o obj-$(CONFIG_SERIAL_TXX9) += celleb_scc_sio.o obj-$(CONFIG_SPU_BASE) += beat_spu_priv1.o diff --git a/trunk/arch/powerpc/platforms/cell/axon_msi.c b/trunk/arch/powerpc/platforms/cell/axon_msi.c index bb5ebf8fa80b..ac06903e136a 100644 --- a/trunk/arch/powerpc/platforms/cell/axon_msi.c +++ b/trunk/arch/powerpc/platforms/cell/axon_msi.c @@ -113,7 +113,7 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc) pr_devel("axon_msi: woff %x roff %x msi %x\n", write_offset, msic->read_offset, msi); - if (msi < NR_IRQS && irq_map[msi].host == msic->irq_host) { + if (msi < NR_IRQS && irq_get_chip_data(msi) == msic) { generic_handle_irq(msi); msic->fifo_virt[idx] = cpu_to_le32(0xffffffff); } else { @@ -320,6 +320,7 @@ static struct irq_chip msic_irq_chip = { static int msic_host_map(struct irq_host *h, unsigned int virq, irq_hw_number_t hw) { + irq_set_chip_data(virq, h->host_data); irq_set_chip_and_handler(virq, &msic_irq_chip, handle_simple_irq); return 0; diff --git a/trunk/arch/powerpc/platforms/cell/beat_interrupt.c b/trunk/arch/powerpc/platforms/cell/beat_interrupt.c index 4cb9e147c307..55015e1f6939 100644 --- a/trunk/arch/powerpc/platforms/cell/beat_interrupt.c +++ b/trunk/arch/powerpc/platforms/cell/beat_interrupt.c @@ -147,16 +147,6 @@ static int beatic_pic_host_map(struct irq_host *h, unsigned int virq, return 0; } -/* - * Update binding hardware IRQ number (hw) and Virtuql - * IRQ number (virq). This is called only once for a given mapping. - */ -static void beatic_pic_host_remap(struct irq_host *h, unsigned int virq, - irq_hw_number_t hw) -{ - beat_construct_and_connect_irq_plug(virq, hw); -} - /* * Translate device-tree interrupt spec to irq_hw_number_t style (ulong), * to pass away to irq_create_mapping(). @@ -184,7 +174,6 @@ static int beatic_pic_host_match(struct irq_host *h, struct device_node *np) static struct irq_host_ops beatic_pic_host_ops = { .map = beatic_pic_host_map, - .remap = beatic_pic_host_remap, .unmap = beatic_pic_host_unmap, .xlate = beatic_pic_host_xlate, .match = beatic_pic_host_match, @@ -257,22 +246,6 @@ void __init beatic_init_IRQ(void) irq_set_default_host(beatic_host); } -#ifdef CONFIG_SMP - -/* Nullified to compile with SMP mode */ -void beatic_setup_cpu(int cpu) -{ -} - -void beatic_cause_IPI(int cpu, int mesg) -{ -} - -void beatic_request_IPIs(void) -{ -} -#endif /* CONFIG_SMP */ - void beatic_deinit_IRQ(void) { int i; diff --git a/trunk/arch/powerpc/platforms/cell/beat_interrupt.h b/trunk/arch/powerpc/platforms/cell/beat_interrupt.h index b470fd0051f1..a7e52f91a078 100644 --- a/trunk/arch/powerpc/platforms/cell/beat_interrupt.h +++ b/trunk/arch/powerpc/platforms/cell/beat_interrupt.h @@ -24,9 +24,6 @@ extern void beatic_init_IRQ(void); extern unsigned int beatic_get_irq(void); -extern void beatic_cause_IPI(int cpu, int mesg); -extern void beatic_request_IPIs(void); -extern void beatic_setup_cpu(int); extern void beatic_deinit_IRQ(void); #endif diff --git a/trunk/arch/powerpc/platforms/cell/beat_smp.c b/trunk/arch/powerpc/platforms/cell/beat_smp.c deleted file mode 100644 index 26efc204c47f..000000000000 --- a/trunk/arch/powerpc/platforms/cell/beat_smp.c +++ /dev/null @@ -1,124 +0,0 @@ -/* - * SMP support for Celleb platform. (Incomplete) - * - * (C) Copyright 2006 TOSHIBA CORPORATION - * - * This code is based on arch/powerpc/platforms/cell/smp.c: - * Dave Engebretsen, Peter Bergner, and - * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com - * Plus various changes from other IBM teams... - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - */ - -#undef DEBUG - -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include "beat_interrupt.h" - -#ifdef DEBUG -#define DBG(fmt...) udbg_printf(fmt) -#else -#define DBG(fmt...) -#endif - -/* - * The primary thread of each non-boot processor is recorded here before - * smp init. - */ -/* static cpumask_t of_spin_map; */ - -/** - * smp_startup_cpu() - start the given cpu - * - * At boot time, there is nothing to do for primary threads which were - * started from Open Firmware. For anything else, call RTAS with the - * appropriate start location. - * - * Returns: - * 0 - failure - * 1 - success - */ -static inline int __devinit smp_startup_cpu(unsigned int lcpu) -{ - return 0; -} - -static void smp_beatic_message_pass(int target, int msg) -{ - unsigned int i; - - if (target < NR_CPUS) { - beatic_cause_IPI(target, msg); - } else { - for_each_online_cpu(i) { - if (target == MSG_ALL_BUT_SELF - && i == smp_processor_id()) - continue; - beatic_cause_IPI(i, msg); - } - } -} - -static int __init smp_beatic_probe(void) -{ - return cpus_weight(cpu_possible_map); -} - -static void __devinit smp_beatic_setup_cpu(int cpu) -{ - beatic_setup_cpu(cpu); -} - -static void __devinit smp_celleb_kick_cpu(int nr) -{ - BUG_ON(nr < 0 || nr >= NR_CPUS); - - if (!smp_startup_cpu(nr)) - return; -} - -static int smp_celleb_cpu_bootable(unsigned int nr) -{ - return 1; -} -static struct smp_ops_t bpa_beatic_smp_ops = { - .message_pass = smp_beatic_message_pass, - .probe = smp_beatic_probe, - .kick_cpu = smp_celleb_kick_cpu, - .setup_cpu = smp_beatic_setup_cpu, - .cpu_bootable = smp_celleb_cpu_bootable, -}; - -/* This is called very early */ -void __init smp_init_celleb(void) -{ - DBG(" -> smp_init_celleb()\n"); - - smp_ops = &bpa_beatic_smp_ops; - - DBG(" <- smp_init_celleb()\n"); -} diff --git a/trunk/arch/powerpc/platforms/cell/cbe_regs.c b/trunk/arch/powerpc/platforms/cell/cbe_regs.c index dbc338f187a2..f3917e7a5b44 100644 --- a/trunk/arch/powerpc/platforms/cell/cbe_regs.c +++ b/trunk/arch/powerpc/platforms/cell/cbe_regs.c @@ -45,8 +45,8 @@ static struct cbe_thread_map unsigned int cbe_id; } cbe_thread_map[NR_CPUS]; -static cpumask_t cbe_local_mask[MAX_CBE] = { [0 ... MAX_CBE-1] = CPU_MASK_NONE }; -static cpumask_t cbe_first_online_cpu = CPU_MASK_NONE; +static cpumask_t cbe_local_mask[MAX_CBE] = { [0 ... MAX_CBE-1] = {CPU_BITS_NONE} }; +static cpumask_t cbe_first_online_cpu = { CPU_BITS_NONE }; static struct cbe_regs_map *cbe_find_map(struct device_node *np) { @@ -159,7 +159,8 @@ EXPORT_SYMBOL_GPL(cbe_cpu_to_node); u32 cbe_node_to_cpu(int node) { - return find_first_bit( (unsigned long *) &cbe_local_mask[node], sizeof(cpumask_t)); + return cpumask_first(&cbe_local_mask[node]); + } EXPORT_SYMBOL_GPL(cbe_node_to_cpu); @@ -268,9 +269,9 @@ void __init cbe_regs_init(void) thread->regs = map; thread->cbe_id = cbe_id; map->be_node = thread->be_node; - cpu_set(i, cbe_local_mask[cbe_id]); + cpumask_set_cpu(i, &cbe_local_mask[cbe_id]); if(thread->thread_id == 0) - cpu_set(i, cbe_first_online_cpu); + cpumask_set_cpu(i, &cbe_first_online_cpu); } } diff --git a/trunk/arch/powerpc/platforms/cell/celleb_pci.c b/trunk/arch/powerpc/platforms/cell/celleb_pci.c index 404d1fc04d59..5822141aa63f 100644 --- a/trunk/arch/powerpc/platforms/cell/celleb_pci.c +++ b/trunk/arch/powerpc/platforms/cell/celleb_pci.c @@ -41,7 +41,6 @@ #include #include -#include "io-workarounds.h" #include "celleb_pci.h" #define MAX_PCI_DEVICES 32 @@ -320,7 +319,7 @@ static int __init celleb_setup_fake_pci_device(struct device_node *node, size = 256; config = &private->fake_config[devno][fn]; - *config = alloc_maybe_bootmem(size, GFP_KERNEL); + *config = zalloc_maybe_bootmem(size, GFP_KERNEL); if (*config == NULL) { printk(KERN_ERR "PCI: " "not enough memory for fake configuration space\n"); @@ -331,7 +330,7 @@ static int __init celleb_setup_fake_pci_device(struct device_node *node, size = sizeof(struct celleb_pci_resource); res = &private->res[devno][fn]; - *res = alloc_maybe_bootmem(size, GFP_KERNEL); + *res = zalloc_maybe_bootmem(size, GFP_KERNEL); if (*res == NULL) { printk(KERN_ERR "PCI: not enough memory for resource data space\n"); @@ -432,7 +431,7 @@ static int __init phb_set_bus_ranges(struct device_node *dev, static void __init celleb_alloc_private_mem(struct pci_controller *hose) { hose->private_data = - alloc_maybe_bootmem(sizeof(struct celleb_pci_private), + zalloc_maybe_bootmem(sizeof(struct celleb_pci_private), GFP_KERNEL); } @@ -469,18 +468,6 @@ static struct of_device_id celleb_phb_match[] __initdata = { }, }; -static int __init celleb_io_workaround_init(struct pci_controller *phb, - struct celleb_phb_spec *phb_spec) -{ - if (phb_spec->ops) { - iowa_register_bus(phb, phb_spec->ops, phb_spec->iowa_init, - phb_spec->iowa_data); - io_workaround_init(); - } - - return 0; -} - int __init celleb_setup_phb(struct pci_controller *phb) { struct device_node *dev = phb->dn; @@ -500,7 +487,11 @@ int __init celleb_setup_phb(struct pci_controller *phb) if (rc) return 1; - return celleb_io_workaround_init(phb, phb_spec); + if (phb_spec->ops) + iowa_register_bus(phb, phb_spec->ops, + phb_spec->iowa_init, + phb_spec->iowa_data); + return 0; } int celleb_pci_probe_mode(struct pci_bus *bus) diff --git a/trunk/arch/powerpc/platforms/cell/celleb_pci.h b/trunk/arch/powerpc/platforms/cell/celleb_pci.h index 4cba1523ec50..a801fcc5f389 100644 --- a/trunk/arch/powerpc/platforms/cell/celleb_pci.h +++ b/trunk/arch/powerpc/platforms/cell/celleb_pci.h @@ -26,8 +26,9 @@ #include #include #include +#include -#include "io-workarounds.h" +struct iowa_bus; struct celleb_phb_spec { int (*setup)(struct device_node *, struct pci_controller *); diff --git a/trunk/arch/powerpc/platforms/cell/celleb_setup.c b/trunk/arch/powerpc/platforms/cell/celleb_setup.c index e53845579770..d58d9bae4b9b 100644 --- a/trunk/arch/powerpc/platforms/cell/celleb_setup.c +++ b/trunk/arch/powerpc/platforms/cell/celleb_setup.c @@ -128,10 +128,6 @@ static void __init celleb_setup_arch_beat(void) spu_management_ops = &spu_management_of_ops; #endif -#ifdef CONFIG_SMP - smp_init_celleb(); -#endif - celleb_setup_arch_common(); } diff --git a/trunk/arch/powerpc/platforms/cell/interrupt.c b/trunk/arch/powerpc/platforms/cell/interrupt.c index 44cfd1bef89b..449c08c15862 100644 --- a/trunk/arch/powerpc/platforms/cell/interrupt.c +++ b/trunk/arch/powerpc/platforms/cell/interrupt.c @@ -196,8 +196,20 @@ static irqreturn_t iic_ipi_action(int irq, void *dev_id) { int ipi = (int)(long)dev_id; - smp_message_recv(ipi); - + switch(ipi) { + case PPC_MSG_CALL_FUNCTION: + generic_smp_call_function_interrupt(); + break; + case PPC_MSG_RESCHEDULE: + scheduler_ipi(); + break; + case PPC_MSG_CALL_FUNC_SINGLE: + generic_smp_call_function_single_interrupt(); + break; + case PPC_MSG_DEBUGGER_BREAK: + debug_ipi_action(0, NULL); + break; + } return IRQ_HANDLED; } static void iic_request_ipi(int ipi, const char *name) diff --git a/trunk/arch/powerpc/platforms/cell/qpace_setup.c b/trunk/arch/powerpc/platforms/cell/qpace_setup.c index d31c594cfdf3..51e290126bc1 100644 --- a/trunk/arch/powerpc/platforms/cell/qpace_setup.c +++ b/trunk/arch/powerpc/platforms/cell/qpace_setup.c @@ -42,7 +42,6 @@ #include "interrupt.h" #include "pervasive.h" #include "ras.h" -#include "io-workarounds.h" static void qpace_show_cpuinfo(struct seq_file *m) { diff --git a/trunk/arch/powerpc/platforms/cell/setup.c b/trunk/arch/powerpc/platforms/cell/setup.c index fd57bfe00edf..c73cf4c43fc2 100644 --- a/trunk/arch/powerpc/platforms/cell/setup.c +++ b/trunk/arch/powerpc/platforms/cell/setup.c @@ -51,11 +51,11 @@ #include #include #include +#include #include "interrupt.h" #include "pervasive.h" #include "ras.h" -#include "io-workarounds.h" #ifdef DEBUG #define DBG(fmt...) udbg_printf(fmt) @@ -136,8 +136,6 @@ static int __devinit cell_setup_phb(struct pci_controller *phb) iowa_register_bus(phb, &spiderpci_ops, &spiderpci_iowa_init, (void *)SPIDER_PCI_REG_BASE); - io_workaround_init(); - return 0; } diff --git a/trunk/arch/powerpc/platforms/cell/smp.c b/trunk/arch/powerpc/platforms/cell/smp.c index f774530075b7..d176e6148e3f 100644 --- a/trunk/arch/powerpc/platforms/cell/smp.c +++ b/trunk/arch/powerpc/platforms/cell/smp.c @@ -77,7 +77,7 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu) unsigned int pcpu; int start_cpu; - if (cpu_isset(lcpu, of_spin_map)) + if (cpumask_test_cpu(lcpu, &of_spin_map)) /* Already started by OF and sitting in spin loop */ return 1; @@ -103,27 +103,11 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu) return 1; } -static void smp_iic_message_pass(int target, int msg) -{ - unsigned int i; - - if (target < NR_CPUS) { - iic_cause_IPI(target, msg); - } else { - for_each_online_cpu(i) { - if (target == MSG_ALL_BUT_SELF - && i == smp_processor_id()) - continue; - iic_cause_IPI(i, msg); - } - } -} - static int __init smp_iic_probe(void) { iic_request_IPIs(); - return cpus_weight(cpu_possible_map); + return cpumask_weight(cpu_possible_mask); } static void __devinit smp_cell_setup_cpu(int cpu) @@ -137,12 +121,12 @@ static void __devinit smp_cell_setup_cpu(int cpu) mtspr(SPRN_DABRX, DABRX_KERNEL | DABRX_USER); } -static void __devinit smp_cell_kick_cpu(int nr) +static int __devinit smp_cell_kick_cpu(int nr) { BUG_ON(nr < 0 || nr >= NR_CPUS); if (!smp_startup_cpu(nr)) - return; + return -ENOENT; /* * The processor is currently spinning, waiting for the @@ -150,6 +134,8 @@ static void __devinit smp_cell_kick_cpu(int nr) * the processor will continue on to secondary_start */ paca[nr].cpu_start = 1; + + return 0; } static int smp_cell_cpu_bootable(unsigned int nr) @@ -166,7 +152,7 @@ static int smp_cell_cpu_bootable(unsigned int nr) return 1; } static struct smp_ops_t bpa_iic_smp_ops = { - .message_pass = smp_iic_message_pass, + .message_pass = iic_cause_IPI, .probe = smp_iic_probe, .kick_cpu = smp_cell_kick_cpu, .setup_cpu = smp_cell_setup_cpu, @@ -186,13 +172,12 @@ void __init smp_init_cell(void) if (cpu_has_feature(CPU_FTR_SMT)) { for_each_present_cpu(i) { if (cpu_thread_in_core(i) == 0) - cpu_set(i, of_spin_map); + cpumask_set_cpu(i, &of_spin_map); } - } else { - of_spin_map = cpu_present_map; - } + } else + cpumask_copy(&of_spin_map, cpu_present_mask); - cpu_clear(boot_cpuid, of_spin_map); + cpumask_clear_cpu(boot_cpuid, &of_spin_map); /* Non-lpar has additional take/give timebase */ if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) { diff --git a/trunk/arch/powerpc/platforms/cell/spider-pci.c b/trunk/arch/powerpc/platforms/cell/spider-pci.c index ca7731c0b595..f1f7878893f3 100644 --- a/trunk/arch/powerpc/platforms/cell/spider-pci.c +++ b/trunk/arch/powerpc/platforms/cell/spider-pci.c @@ -27,8 +27,7 @@ #include #include - -#include "io-workarounds.h" +#include #define SPIDER_PCI_DISABLE_PREFETCH diff --git a/trunk/arch/powerpc/platforms/cell/spider-pic.c b/trunk/arch/powerpc/platforms/cell/spider-pic.c index c5cf50e6b45a..442c28c00f88 100644 --- a/trunk/arch/powerpc/platforms/cell/spider-pic.c +++ b/trunk/arch/powerpc/platforms/cell/spider-pic.c @@ -68,9 +68,9 @@ struct spider_pic { }; static struct spider_pic spider_pics[SPIDER_CHIP_COUNT]; -static struct spider_pic *spider_virq_to_pic(unsigned int virq) +static struct spider_pic *spider_irq_data_to_pic(struct irq_data *d) { - return irq_map[virq].host->host_data; + return irq_data_get_irq_chip_data(d); } static void __iomem *spider_get_irq_config(struct spider_pic *pic, @@ -81,24 +81,24 @@ static void __iomem *spider_get_irq_config(struct spider_pic *pic, static void spider_unmask_irq(struct irq_data *d) { - struct spider_pic *pic = spider_virq_to_pic(d->irq); - void __iomem *cfg = spider_get_irq_config(pic, irq_map[d->irq].hwirq); + struct spider_pic *pic = spider_irq_data_to_pic(d); + void __iomem *cfg = spider_get_irq_config(pic, irqd_to_hwirq(d)); out_be32(cfg, in_be32(cfg) | 0x30000000u); } static void spider_mask_irq(struct irq_data *d) { - struct spider_pic *pic = spider_virq_to_pic(d->irq); - void __iomem *cfg = spider_get_irq_config(pic, irq_map[d->irq].hwirq); + struct spider_pic *pic = spider_irq_data_to_pic(d); + void __iomem *cfg = spider_get_irq_config(pic, irqd_to_hwirq(d)); out_be32(cfg, in_be32(cfg) & ~0x30000000u); } static void spider_ack_irq(struct irq_data *d) { - struct spider_pic *pic = spider_virq_to_pic(d->irq); - unsigned int src = irq_map[d->irq].hwirq; + struct spider_pic *pic = spider_irq_data_to_pic(d); + unsigned int src = irqd_to_hwirq(d); /* Reset edge detection logic if necessary */ @@ -116,8 +116,8 @@ static void spider_ack_irq(struct irq_data *d) static int spider_set_irq_type(struct irq_data *d, unsigned int type) { unsigned int sense = type & IRQ_TYPE_SENSE_MASK; - struct spider_pic *pic = spider_virq_to_pic(d->irq); - unsigned int hw = irq_map[d->irq].hwirq; + struct spider_pic *pic = spider_irq_data_to_pic(d); + unsigned int hw = irqd_to_hwirq(d); void __iomem *cfg = spider_get_irq_config(pic, hw); u32 old_mask; u32 ic; @@ -171,6 +171,7 @@ static struct irq_chip spider_pic = { static int spider_host_map(struct irq_host *h, unsigned int virq, irq_hw_number_t hw) { + irq_set_chip_data(virq, h->host_data); irq_set_chip_and_handler(virq, &spider_pic, handle_level_irq); /* Set default irq type */ diff --git a/trunk/arch/powerpc/platforms/cell/spu_base.c b/trunk/arch/powerpc/platforms/cell/spu_base.c index acfaccea5f4f..3675da73623f 100644 --- a/trunk/arch/powerpc/platforms/cell/spu_base.c +++ b/trunk/arch/powerpc/platforms/cell/spu_base.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include #include @@ -521,18 +522,8 @@ void spu_init_channels(struct spu *spu) } EXPORT_SYMBOL_GPL(spu_init_channels); -static int spu_shutdown(struct sys_device *sysdev) -{ - struct spu *spu = container_of(sysdev, struct spu, sysdev); - - spu_free_irqs(spu); - spu_destroy_spu(spu); - return 0; -} - static struct sysdev_class spu_sysdev_class = { .name = "spu", - .shutdown = spu_shutdown, }; int spu_add_sysdev_attr(struct sysdev_attribute *attr) @@ -797,6 +788,22 @@ static inline void crash_register_spus(struct list_head *list) } #endif +static void spu_shutdown(void) +{ + struct spu *spu; + + mutex_lock(&spu_full_list_mutex); + list_for_each_entry(spu, &spu_full_list, full_list) { + spu_free_irqs(spu); + spu_destroy_spu(spu); + } + mutex_unlock(&spu_full_list_mutex); +} + +static struct syscore_ops spu_syscore_ops = { + .shutdown = spu_shutdown, +}; + static int __init init_spu_base(void) { int i, ret = 0; @@ -830,6 +837,7 @@ static int __init init_spu_base(void) crash_register_spus(&spu_full_list); mutex_unlock(&spu_full_list_mutex); spu_add_sysdev_attr(&attr_stat); + register_syscore_ops(&spu_syscore_ops); spu_init_affinity(); diff --git a/trunk/arch/powerpc/platforms/cell/spufs/sched.c b/trunk/arch/powerpc/platforms/cell/spufs/sched.c index 65203857b0ce..32cb4e66d2cd 100644 --- a/trunk/arch/powerpc/platforms/cell/spufs/sched.c +++ b/trunk/arch/powerpc/platforms/cell/spufs/sched.c @@ -141,7 +141,7 @@ void __spu_update_sched_info(struct spu_context *ctx) * runqueue. The context will be rescheduled on the proper node * if it is timesliced or preempted. */ - ctx->cpus_allowed = current->cpus_allowed; + cpumask_copy(&ctx->cpus_allowed, tsk_cpus_allowed(current)); /* Save the current cpu id for spu interrupt routing. */ ctx->last_ran = raw_smp_processor_id(); diff --git a/trunk/arch/powerpc/platforms/chrp/smp.c b/trunk/arch/powerpc/platforms/chrp/smp.c index 02cafecc90e3..a800122e4dda 100644 --- a/trunk/arch/powerpc/platforms/chrp/smp.c +++ b/trunk/arch/powerpc/platforms/chrp/smp.c @@ -30,10 +30,12 @@ #include #include -static void __devinit smp_chrp_kick_cpu(int nr) +static int __devinit smp_chrp_kick_cpu(int nr) { *(unsigned long *)KERNELBASE = nr; asm volatile("dcbf 0,%0"::"r"(KERNELBASE):"memory"); + + return 0; } static void __devinit smp_chrp_setup_cpu(int cpu_nr) diff --git a/trunk/arch/powerpc/platforms/embedded6xx/flipper-pic.c b/trunk/arch/powerpc/platforms/embedded6xx/flipper-pic.c index 12aa62b6f227..f61a2dd96b99 100644 --- a/trunk/arch/powerpc/platforms/embedded6xx/flipper-pic.c +++ b/trunk/arch/powerpc/platforms/embedded6xx/flipper-pic.c @@ -48,7 +48,7 @@ static void flipper_pic_mask_and_ack(struct irq_data *d) { - int irq = virq_to_hw(d->irq); + int irq = irqd_to_hwirq(d); void __iomem *io_base = irq_data_get_irq_chip_data(d); u32 mask = 1 << irq; @@ -59,7 +59,7 @@ static void flipper_pic_mask_and_ack(struct irq_data *d) static void flipper_pic_ack(struct irq_data *d) { - int irq = virq_to_hw(d->irq); + int irq = irqd_to_hwirq(d); void __iomem *io_base = irq_data_get_irq_chip_data(d); /* this is at least needed for RSW */ @@ -68,7 +68,7 @@ static void flipper_pic_ack(struct irq_data *d) static void flipper_pic_mask(struct irq_data *d) { - int irq = virq_to_hw(d->irq); + int irq = irqd_to_hwirq(d); void __iomem *io_base = irq_data_get_irq_chip_data(d); clrbits32(io_base + FLIPPER_IMR, 1 << irq); @@ -76,7 +76,7 @@ static void flipper_pic_mask(struct irq_data *d) static void flipper_pic_unmask(struct irq_data *d) { - int irq = virq_to_hw(d->irq); + int irq = irqd_to_hwirq(d); void __iomem *io_base = irq_data_get_irq_chip_data(d); setbits32(io_base + FLIPPER_IMR, 1 << irq); @@ -107,12 +107,6 @@ static int flipper_pic_map(struct irq_host *h, unsigned int virq, return 0; } -static void flipper_pic_unmap(struct irq_host *h, unsigned int irq) -{ - irq_set_chip_data(irq, NULL); - irq_set_chip(irq, NULL); -} - static int flipper_pic_match(struct irq_host *h, struct device_node *np) { return 1; @@ -121,7 +115,6 @@ static int flipper_pic_match(struct irq_host *h, struct device_node *np) static struct irq_host_ops flipper_irq_host_ops = { .map = flipper_pic_map, - .unmap = flipper_pic_unmap, .match = flipper_pic_match, }; diff --git a/trunk/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/trunk/arch/powerpc/platforms/embedded6xx/hlwd-pic.c index 2bdddfc9d520..e4919170c6bc 100644 --- a/trunk/arch/powerpc/platforms/embedded6xx/hlwd-pic.c +++ b/trunk/arch/powerpc/platforms/embedded6xx/hlwd-pic.c @@ -43,7 +43,7 @@ static void hlwd_pic_mask_and_ack(struct irq_data *d) { - int irq = virq_to_hw(d->irq); + int irq = irqd_to_hwirq(d); void __iomem *io_base = irq_data_get_irq_chip_data(d); u32 mask = 1 << irq; @@ -53,7 +53,7 @@ static void hlwd_pic_mask_and_ack(struct irq_data *d) static void hlwd_pic_ack(struct irq_data *d) { - int irq = virq_to_hw(d->irq); + int irq = irqd_to_hwirq(d); void __iomem *io_base = irq_data_get_irq_chip_data(d); out_be32(io_base + HW_BROADWAY_ICR, 1 << irq); @@ -61,7 +61,7 @@ static void hlwd_pic_ack(struct irq_data *d) static void hlwd_pic_mask(struct irq_data *d) { - int irq = virq_to_hw(d->irq); + int irq = irqd_to_hwirq(d); void __iomem *io_base = irq_data_get_irq_chip_data(d); clrbits32(io_base + HW_BROADWAY_IMR, 1 << irq); @@ -69,7 +69,7 @@ static void hlwd_pic_mask(struct irq_data *d) static void hlwd_pic_unmask(struct irq_data *d) { - int irq = virq_to_hw(d->irq); + int irq = irqd_to_hwirq(d); void __iomem *io_base = irq_data_get_irq_chip_data(d); setbits32(io_base + HW_BROADWAY_IMR, 1 << irq); @@ -100,15 +100,8 @@ static int hlwd_pic_map(struct irq_host *h, unsigned int virq, return 0; } -static void hlwd_pic_unmap(struct irq_host *h, unsigned int irq) -{ - irq_set_chip_data(irq, NULL); - irq_set_chip(irq, NULL); -} - static struct irq_host_ops hlwd_irq_host_ops = { .map = hlwd_pic_map, - .unmap = hlwd_pic_unmap, }; static unsigned int __hlwd_pic_get_irq(struct irq_host *h) diff --git a/trunk/arch/powerpc/platforms/iseries/Kconfig b/trunk/arch/powerpc/platforms/iseries/Kconfig index e5bc9f75d474..b57cda3a0817 100644 --- a/trunk/arch/powerpc/platforms/iseries/Kconfig +++ b/trunk/arch/powerpc/platforms/iseries/Kconfig @@ -1,7 +1,9 @@ config PPC_ISERIES bool "IBM Legacy iSeries" depends on PPC64 && PPC_BOOK3S - select PPC_INDIRECT_IO + select PPC_SMP_MUXED_IPI + select PPC_INDIRECT_PIO + select PPC_INDIRECT_MMIO select PPC_PCI_CHOICE if EXPERT menu "iSeries device drivers" diff --git a/trunk/arch/powerpc/platforms/iseries/exception.S b/trunk/arch/powerpc/platforms/iseries/exception.S index 32a56c6dfa72..29c02f36b32f 100644 --- a/trunk/arch/powerpc/platforms/iseries/exception.S +++ b/trunk/arch/powerpc/platforms/iseries/exception.S @@ -31,6 +31,7 @@ #include #include #include +#include #include "exception.h" @@ -60,29 +61,31 @@ system_reset_iSeries: /* Spin on __secondary_hold_spinloop until it is updated by the boot cpu. */ /* In the UP case we'll yield() later, and we will not access the paca anyway */ #ifdef CONFIG_SMP -1: +iSeries_secondary_wait_paca: HMT_LOW LOAD_REG_ADDR(r23, __secondary_hold_spinloop) ld r23,0(r23) - sync - LOAD_REG_ADDR(r3,current_set) - sldi r28,r24,3 /* get current_set[cpu#] */ - ldx r3,r3,r28 - addi r1,r3,THREAD_SIZE - subi r1,r1,STACK_FRAME_OVERHEAD - cmpwi 0,r23,0 /* Keep poking the Hypervisor until */ - bne 2f /* we're released */ - /* Let the Hypervisor know we are alive */ + cmpdi 0,r23,0 + bne 2f /* go on when the master is ready */ + + /* Keep poking the Hypervisor until we're released */ /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */ lis r3,0x8002 rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */ li r0,-1 /* r0=-1 indicates a Hypervisor call */ sc /* Invoke the hypervisor via a system call */ - b 1b -#endif + b iSeries_secondary_wait_paca 2: + HMT_MEDIUM + sync + + LOAD_REG_ADDR(r3, nr_cpu_ids) /* get number of pacas allocated */ + lwz r3,0(r3) /* nr_cpus= or NR_CPUS can limit */ + cmpld 0,r24,r3 /* is our cpu number allocated? */ + bge iSeries_secondary_yield /* no, yield forever */ + /* Load our paca now that it's been allocated */ LOAD_REG_ADDR(r13, paca) ld r13,0(r13) @@ -93,10 +96,24 @@ system_reset_iSeries: ori r23,r23,MSR_RI mtmsrd r23 /* RI on */ - HMT_LOW -#ifdef CONFIG_SMP +iSeries_secondary_smp_loop: lbz r23,PACAPROCSTART(r13) /* Test if this processor * should start */ + cmpwi 0,r23,0 + bne 3f /* go on when we are told */ + + HMT_LOW + /* Let the Hypervisor know we are alive */ + /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */ + lis r3,0x8002 + rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */ + li r0,-1 /* r0=-1 indicates a Hypervisor call */ + sc /* Invoke the hypervisor via a system call */ + mfspr r13,SPRN_SPRG_PACA /* Put r13 back ???? */ + b iSeries_secondary_smp_loop /* wait for signal to start */ + +3: + HMT_MEDIUM sync LOAD_REG_ADDR(r3,current_set) sldi r28,r24,3 /* get current_set[cpu#] */ @@ -104,27 +121,22 @@ system_reset_iSeries: addi r1,r3,THREAD_SIZE subi r1,r1,STACK_FRAME_OVERHEAD - cmpwi 0,r23,0 - beq iSeries_secondary_smp_loop /* Loop until told to go */ b __secondary_start /* Loop until told to go */ -iSeries_secondary_smp_loop: - /* Let the Hypervisor know we are alive */ - /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */ - lis r3,0x8002 - rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */ -#else /* CONFIG_SMP */ +#endif /* CONFIG_SMP */ + +iSeries_secondary_yield: /* Yield the processor. This is required for non-SMP kernels which are running on multi-threaded machines. */ + HMT_LOW lis r3,0x8000 rldicr r3,r3,32,15 /* r3 = (r3 << 32) & 0xffff000000000000 */ addi r3,r3,18 /* r3 = 0x8000000000000012 which is "yield" */ li r4,0 /* "yield timed" */ li r5,-1 /* "yield forever" */ -#endif /* CONFIG_SMP */ li r0,-1 /* r0=-1 indicates a Hypervisor call */ sc /* Invoke the hypervisor via a system call */ mfspr r13,SPRN_SPRG_PACA /* Put r13 back ???? */ - b 2b /* If SMP not configured, secondaries + b iSeries_secondary_yield /* If SMP not configured, secondaries * loop forever */ /*** ISeries-LPAR interrupt handlers ***/ @@ -157,7 +169,7 @@ BEGIN_FTR_SECTION FTR_SECTION_ELSE EXCEPTION_PROLOG_1(PACA_EXGEN) EXCEPTION_PROLOG_ISERIES_1 -ALT_FTR_SECTION_END_IFCLR(CPU_FTR_SLB) +ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_SLB) b data_access_common .do_stab_bolted_iSeries: diff --git a/trunk/arch/powerpc/platforms/iseries/irq.c b/trunk/arch/powerpc/platforms/iseries/irq.c index 52a6889832c7..b2103453eb01 100644 --- a/trunk/arch/powerpc/platforms/iseries/irq.c +++ b/trunk/arch/powerpc/platforms/iseries/irq.c @@ -42,7 +42,6 @@ #include "irq.h" #include "pci.h" #include "call_pci.h" -#include "smp.h" #ifdef CONFIG_PCI @@ -171,7 +170,7 @@ static void iseries_enable_IRQ(struct irq_data *d) { u32 bus, dev_id, function, mask; const u32 sub_bus = 0; - unsigned int rirq = (unsigned int)irq_map[d->irq].hwirq; + unsigned int rirq = (unsigned int)irqd_to_hwirq(d); /* The IRQ has already been locked by the caller */ bus = REAL_IRQ_TO_BUS(rirq); @@ -188,7 +187,7 @@ static unsigned int iseries_startup_IRQ(struct irq_data *d) { u32 bus, dev_id, function, mask; const u32 sub_bus = 0; - unsigned int rirq = (unsigned int)irq_map[d->irq].hwirq; + unsigned int rirq = (unsigned int)irqd_to_hwirq(d); bus = REAL_IRQ_TO_BUS(rirq); function = REAL_IRQ_TO_FUNC(rirq); @@ -234,7 +233,7 @@ static void iseries_shutdown_IRQ(struct irq_data *d) { u32 bus, dev_id, function, mask; const u32 sub_bus = 0; - unsigned int rirq = (unsigned int)irq_map[d->irq].hwirq; + unsigned int rirq = (unsigned int)irqd_to_hwirq(d); /* irq should be locked by the caller */ bus = REAL_IRQ_TO_BUS(rirq); @@ -257,7 +256,7 @@ static void iseries_disable_IRQ(struct irq_data *d) { u32 bus, dev_id, function, mask; const u32 sub_bus = 0; - unsigned int rirq = (unsigned int)irq_map[d->irq].hwirq; + unsigned int rirq = (unsigned int)irqd_to_hwirq(d); /* The IRQ has already been locked by the caller */ bus = REAL_IRQ_TO_BUS(rirq); @@ -271,7 +270,7 @@ static void iseries_disable_IRQ(struct irq_data *d) static void iseries_end_IRQ(struct irq_data *d) { - unsigned int rirq = (unsigned int)irq_map[d->irq].hwirq; + unsigned int rirq = (unsigned int)irqd_to_hwirq(d); HvCallPci_eoi(REAL_IRQ_TO_BUS(rirq), REAL_IRQ_TO_SUBBUS(rirq), (REAL_IRQ_TO_IDSEL(rirq) << 4) + REAL_IRQ_TO_FUNC(rirq)); @@ -316,7 +315,7 @@ unsigned int iSeries_get_irq(void) #ifdef CONFIG_SMP if (get_lppaca()->int_dword.fields.ipi_cnt) { get_lppaca()->int_dword.fields.ipi_cnt = 0; - iSeries_smp_message_recv(); + smp_ipi_demux(); } #endif /* CONFIG_SMP */ if (hvlpevent_is_pending()) diff --git a/trunk/arch/powerpc/platforms/iseries/setup.c b/trunk/arch/powerpc/platforms/iseries/setup.c index 2946ae10fbfd..c25a0815c26b 100644 --- a/trunk/arch/powerpc/platforms/iseries/setup.c +++ b/trunk/arch/powerpc/platforms/iseries/setup.c @@ -249,7 +249,7 @@ static unsigned long iSeries_process_mainstore_vpd(struct MemoryBlock *mb_array, unsigned long i; unsigned long mem_blocks = 0; - if (cpu_has_feature(CPU_FTR_SLB)) + if (mmu_has_feature(MMU_FTR_SLB)) mem_blocks = iSeries_process_Regatta_mainstore_vpd(mb_array, max_entries); else @@ -634,7 +634,7 @@ static int __init iseries_probe(void) hpte_init_iSeries(); /* iSeries does not support 16M pages */ - cur_cpu_spec->cpu_features &= ~CPU_FTR_16M_PAGE; + cur_cpu_spec->mmu_features &= ~MMU_FTR_16M_PAGE; return 1; } @@ -685,6 +685,11 @@ void * __init iSeries_early_setup(void) powerpc_firmware_features |= FW_FEATURE_ISERIES; powerpc_firmware_features |= FW_FEATURE_LPAR; +#ifdef CONFIG_SMP + /* On iSeries we know we can never have more than 64 cpus */ + nr_cpu_ids = max(nr_cpu_ids, 64); +#endif + iSeries_fixup_klimit(); /* diff --git a/trunk/arch/powerpc/platforms/iseries/smp.c b/trunk/arch/powerpc/platforms/iseries/smp.c index 6c6029914dbc..e3265adde5d3 100644 --- a/trunk/arch/powerpc/platforms/iseries/smp.c +++ b/trunk/arch/powerpc/platforms/iseries/smp.c @@ -42,57 +42,23 @@ #include #include -#include "smp.h" - -static unsigned long iSeries_smp_message[NR_CPUS]; - -void iSeries_smp_message_recv(void) -{ - int cpu = smp_processor_id(); - int msg; - - if (num_online_cpus() < 2) - return; - - for (msg = 0; msg < 4; msg++) - if (test_and_clear_bit(msg, &iSeries_smp_message[cpu])) - smp_message_recv(msg); -} - -static inline void smp_iSeries_do_message(int cpu, int msg) +static void smp_iSeries_cause_ipi(int cpu, unsigned long data) { - set_bit(msg, &iSeries_smp_message[cpu]); HvCall_sendIPI(&(paca[cpu])); } -static void smp_iSeries_message_pass(int target, int msg) -{ - int i; - - if (target < NR_CPUS) - smp_iSeries_do_message(target, msg); - else { - for_each_online_cpu(i) { - if ((target == MSG_ALL_BUT_SELF) && - (i == smp_processor_id())) - continue; - smp_iSeries_do_message(i, msg); - } - } -} - static int smp_iSeries_probe(void) { return cpumask_weight(cpu_possible_mask); } -static void smp_iSeries_kick_cpu(int nr) +static int smp_iSeries_kick_cpu(int nr) { BUG_ON((nr < 0) || (nr >= NR_CPUS)); /* Verify that our partition has a processor nr */ if (lppaca_of(nr).dyn_proc_status >= 2) - return; + return -ENOENT; /* The processor is currently spinning, waiting * for the cpu_start field to become non-zero @@ -100,6 +66,8 @@ static void smp_iSeries_kick_cpu(int nr) * continue on to secondary_start in iSeries_head.S */ paca[nr].cpu_start = 1; + + return 0; } static void __devinit smp_iSeries_setup_cpu(int nr) @@ -107,7 +75,8 @@ static void __devinit smp_iSeries_setup_cpu(int nr) } static struct smp_ops_t iSeries_smp_ops = { - .message_pass = smp_iSeries_message_pass, + .message_pass = smp_muxed_ipi_message_pass, + .cause_ipi = smp_iSeries_cause_ipi, .probe = smp_iSeries_probe, .kick_cpu = smp_iSeries_kick_cpu, .setup_cpu = smp_iSeries_setup_cpu, diff --git a/trunk/arch/powerpc/platforms/iseries/smp.h b/trunk/arch/powerpc/platforms/iseries/smp.h deleted file mode 100644 index d501f7de01e7..000000000000 --- a/trunk/arch/powerpc/platforms/iseries/smp.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _PLATFORMS_ISERIES_SMP_H -#define _PLATFORMS_ISERIES_SMP_H - -extern void iSeries_smp_message_recv(void); - -#endif /* _PLATFORMS_ISERIES_SMP_H */ diff --git a/trunk/arch/powerpc/platforms/powermac/Kconfig b/trunk/arch/powerpc/platforms/powermac/Kconfig index 1e1a0873e1dd..1afd10f67858 100644 --- a/trunk/arch/powerpc/platforms/powermac/Kconfig +++ b/trunk/arch/powerpc/platforms/powermac/Kconfig @@ -18,4 +18,13 @@ config PPC_PMAC64 select PPC_970_NAP default y - +config PPC_PMAC32_PSURGE + bool "Support for powersurge upgrade cards" if EXPERT + depends on SMP && PPC32 && PPC_PMAC + select PPC_SMP_MUXED_IPI + default y + help + The powersurge cpu boards can be used in the generation + of powermacs that have a socket for an upgradeable cpu card, + including the 7500, 8500, 9500, 9600. Support exists for + both dual and quad socket upgrade cards. diff --git a/trunk/arch/powerpc/platforms/powermac/pic.c b/trunk/arch/powerpc/platforms/powermac/pic.c index 023f24086a0a..9089b0421191 100644 --- a/trunk/arch/powerpc/platforms/powermac/pic.c +++ b/trunk/arch/powerpc/platforms/powermac/pic.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include #include @@ -84,7 +84,7 @@ static void __pmac_retrigger(unsigned int irq_nr) static void pmac_mask_and_ack_irq(struct irq_data *d) { - unsigned int src = irq_map[d->irq].hwirq; + unsigned int src = irqd_to_hwirq(d); unsigned long bit = 1UL << (src & 0x1f); int i = src >> 5; unsigned long flags; @@ -106,7 +106,7 @@ static void pmac_mask_and_ack_irq(struct irq_data *d) static void pmac_ack_irq(struct irq_data *d) { - unsigned int src = irq_map[d->irq].hwirq; + unsigned int src = irqd_to_hwirq(d); unsigned long bit = 1UL << (src & 0x1f); int i = src >> 5; unsigned long flags; @@ -152,7 +152,7 @@ static void __pmac_set_irq_mask(unsigned int irq_nr, int nokicklost) static unsigned int pmac_startup_irq(struct irq_data *d) { unsigned long flags; - unsigned int src = irq_map[d->irq].hwirq; + unsigned int src = irqd_to_hwirq(d); unsigned long bit = 1UL << (src & 0x1f); int i = src >> 5; @@ -169,7 +169,7 @@ static unsigned int pmac_startup_irq(struct irq_data *d) static void pmac_mask_irq(struct irq_data *d) { unsigned long flags; - unsigned int src = irq_map[d->irq].hwirq; + unsigned int src = irqd_to_hwirq(d); raw_spin_lock_irqsave(&pmac_pic_lock, flags); __clear_bit(src, ppc_cached_irq_mask); @@ -180,7 +180,7 @@ static void pmac_mask_irq(struct irq_data *d) static void pmac_unmask_irq(struct irq_data *d) { unsigned long flags; - unsigned int src = irq_map[d->irq].hwirq; + unsigned int src = irqd_to_hwirq(d); raw_spin_lock_irqsave(&pmac_pic_lock, flags); __set_bit(src, ppc_cached_irq_mask); @@ -193,7 +193,7 @@ static int pmac_retrigger(struct irq_data *d) unsigned long flags; raw_spin_lock_irqsave(&pmac_pic_lock, flags); - __pmac_retrigger(irq_map[d->irq].hwirq); + __pmac_retrigger(irqd_to_hwirq(d)); raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); return 1; } @@ -239,15 +239,12 @@ static unsigned int pmac_pic_get_irq(void) unsigned long bits = 0; unsigned long flags; -#ifdef CONFIG_SMP - void psurge_smp_message_recv(void); - - /* IPI's are a hack on the powersurge -- Cort */ - if ( smp_processor_id() != 0 ) { - psurge_smp_message_recv(); - return NO_IRQ_IGNORE; /* ignore, already handled */ +#ifdef CONFIG_PPC_PMAC32_PSURGE + /* IPI's are a hack on the powersurge -- Cort */ + if (smp_processor_id() != 0) { + return psurge_secondary_virq; } -#endif /* CONFIG_SMP */ +#endif /* CONFIG_PPC_PMAC32_PSURGE */ raw_spin_lock_irqsave(&pmac_pic_lock, flags); for (irq = max_real_irqs; (irq -= 32) >= 0; ) { int i = irq >> 5; @@ -677,7 +674,7 @@ static int pmacpic_find_viaint(void) return viaint; } -static int pmacpic_suspend(struct sys_device *sysdev, pm_message_t state) +static int pmacpic_suspend(void) { int viaint = pmacpic_find_viaint(); @@ -698,7 +695,7 @@ static int pmacpic_suspend(struct sys_device *sysdev, pm_message_t state) return 0; } -static int pmacpic_resume(struct sys_device *sysdev) +static void pmacpic_resume(void) { int i; @@ -709,39 +706,19 @@ static int pmacpic_resume(struct sys_device *sysdev) for (i = 0; i < max_real_irqs; ++i) if (test_bit(i, sleep_save_mask)) pmac_unmask_irq(irq_get_irq_data(i)); - - return 0; } -#endif /* CONFIG_PM && CONFIG_PPC32 */ - -static struct sysdev_class pmacpic_sysclass = { - .name = "pmac_pic", -}; - -static struct sys_device device_pmacpic = { - .id = 0, - .cls = &pmacpic_sysclass, -}; - -static struct sysdev_driver driver_pmacpic = { -#if defined(CONFIG_PM) && defined(CONFIG_PPC32) - .suspend = &pmacpic_suspend, - .resume = &pmacpic_resume, -#endif /* CONFIG_PM && CONFIG_PPC32 */ +static struct syscore_ops pmacpic_syscore_ops = { + .suspend = pmacpic_suspend, + .resume = pmacpic_resume, }; -static int __init init_pmacpic_sysfs(void) +static int __init init_pmacpic_syscore(void) { -#ifdef CONFIG_PPC32 - if (max_irqs == 0) - return -ENODEV; -#endif - printk(KERN_DEBUG "Registering pmac pic with sysfs...\n"); - sysdev_class_register(&pmacpic_sysclass); - sysdev_register(&device_pmacpic); - sysdev_driver_register(&pmacpic_sysclass, &driver_pmacpic); + register_syscore_ops(&pmacpic_syscore_ops); return 0; } -machine_subsys_initcall(powermac, init_pmacpic_sysfs); +machine_subsys_initcall(powermac, init_pmacpic_syscore); + +#endif /* CONFIG_PM && CONFIG_PPC32 */ diff --git a/trunk/arch/powerpc/platforms/powermac/pic.h b/trunk/arch/powerpc/platforms/powermac/pic.h deleted file mode 100644 index d622a8345aaa..000000000000 --- a/trunk/arch/powerpc/platforms/powermac/pic.h +++ /dev/null @@ -1,11 +0,0 @@ -#ifndef __PPC_PLATFORMS_PMAC_PIC_H -#define __PPC_PLATFORMS_PMAC_PIC_H - -#include - -extern struct irq_chip pmac_pic; - -extern void pmac_pic_init(void); -extern int pmac_get_irq(void); - -#endif /* __PPC_PLATFORMS_PMAC_PIC_H */ diff --git a/trunk/arch/powerpc/platforms/powermac/pmac.h b/trunk/arch/powerpc/platforms/powermac/pmac.h index 20468f49aec0..8327cce2bdb0 100644 --- a/trunk/arch/powerpc/platforms/powermac/pmac.h +++ b/trunk/arch/powerpc/platforms/powermac/pmac.h @@ -33,6 +33,7 @@ extern void pmac_setup_pci_dma(void); extern void pmac_check_ht_link(void); extern void pmac_setup_smp(void); +extern int psurge_secondary_virq; extern void low_cpu_die(void) __attribute__((noreturn)); extern int pmac_nvram_init(void); diff --git a/trunk/arch/powerpc/platforms/powermac/smp.c b/trunk/arch/powerpc/platforms/powermac/smp.c index a830c5e80657..db092d7c4c5b 100644 --- a/trunk/arch/powerpc/platforms/powermac/smp.c +++ b/trunk/arch/powerpc/platforms/powermac/smp.c @@ -70,7 +70,7 @@ static void (*pmac_tb_freeze)(int freeze); static u64 timebase; static int tb_req; -#ifdef CONFIG_PPC32 +#ifdef CONFIG_PPC_PMAC32_PSURGE /* * Powersurge (old powermac SMP) support. @@ -124,6 +124,10 @@ static volatile u32 __iomem *psurge_start; /* what sort of powersurge board we have */ static int psurge_type = PSURGE_NONE; +/* irq for secondary cpus to report */ +static struct irq_host *psurge_host; +int psurge_secondary_virq; + /* * Set and clear IPIs for powersurge. */ @@ -156,51 +160,52 @@ static inline void psurge_clr_ipi(int cpu) /* * On powersurge (old SMP powermac architecture) we don't have * separate IPIs for separate messages like openpic does. Instead - * we have a bitmap for each processor, where a 1 bit means that - * the corresponding message is pending for that processor. - * Ideally each cpu's entry would be in a different cache line. + * use the generic demux helpers * -- paulus. */ -static unsigned long psurge_smp_message[NR_CPUS]; - -void psurge_smp_message_recv(void) +static irqreturn_t psurge_ipi_intr(int irq, void *d) { - int cpu = smp_processor_id(); - int msg; - - /* clear interrupt */ - psurge_clr_ipi(cpu); + psurge_clr_ipi(smp_processor_id()); + smp_ipi_demux(); - if (num_online_cpus() < 2) - return; + return IRQ_HANDLED; +} - /* make sure there is a message there */ - for (msg = 0; msg < 4; msg++) - if (test_and_clear_bit(msg, &psurge_smp_message[cpu])) - smp_message_recv(msg); +static void smp_psurge_cause_ipi(int cpu, unsigned long data) +{ + psurge_set_ipi(cpu); } -irqreturn_t psurge_primary_intr(int irq, void *d) +static int psurge_host_map(struct irq_host *h, unsigned int virq, + irq_hw_number_t hw) { - psurge_smp_message_recv(); - return IRQ_HANDLED; + irq_set_chip_and_handler(virq, &dummy_irq_chip, handle_percpu_irq); + + return 0; } -static void smp_psurge_message_pass(int target, int msg) +struct irq_host_ops psurge_host_ops = { + .map = psurge_host_map, +}; + +static int psurge_secondary_ipi_init(void) { - int i; + int rc = -ENOMEM; - if (num_online_cpus() < 2) - return; + psurge_host = irq_alloc_host(NULL, IRQ_HOST_MAP_NOMAP, 0, + &psurge_host_ops, 0); - for_each_online_cpu(i) { - if (target == MSG_ALL - || (target == MSG_ALL_BUT_SELF && i != smp_processor_id()) - || target == i) { - set_bit(msg, &psurge_smp_message[i]); - psurge_set_ipi(i); - } - } + if (psurge_host) + psurge_secondary_virq = irq_create_direct_mapping(psurge_host); + + if (psurge_secondary_virq) + rc = request_irq(psurge_secondary_virq, psurge_ipi_intr, + IRQF_DISABLED|IRQF_PERCPU, "IPI", NULL); + + if (rc) + pr_err("Failed to setup secondary cpu IPI\n"); + + return rc; } /* @@ -311,6 +316,9 @@ static int __init smp_psurge_probe(void) ncpus = 2; } + if (psurge_secondary_ipi_init()) + return 1; + psurge_start = ioremap(PSURGE_START, 4); psurge_pri_intr = ioremap(PSURGE_PRI_INTR, 4); @@ -329,7 +337,7 @@ static int __init smp_psurge_probe(void) return ncpus; } -static void __init smp_psurge_kick_cpu(int nr) +static int __init smp_psurge_kick_cpu(int nr) { unsigned long start = __pa(__secondary_start_pmac_0) + nr * 8; unsigned long a, flags; @@ -394,11 +402,13 @@ static void __init smp_psurge_kick_cpu(int nr) psurge_set_ipi(1); if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu - done", 0x354); + + return 0; } static struct irqaction psurge_irqaction = { - .handler = psurge_primary_intr, - .flags = IRQF_DISABLED, + .handler = psurge_ipi_intr, + .flags = IRQF_DISABLED|IRQF_PERCPU, .name = "primary IPI", }; @@ -437,14 +447,15 @@ void __init smp_psurge_give_timebase(void) /* PowerSurge-style Macs */ struct smp_ops_t psurge_smp_ops = { - .message_pass = smp_psurge_message_pass, + .message_pass = smp_muxed_ipi_message_pass, + .cause_ipi = smp_psurge_cause_ipi, .probe = smp_psurge_probe, .kick_cpu = smp_psurge_kick_cpu, .setup_cpu = smp_psurge_setup_cpu, .give_timebase = smp_psurge_give_timebase, .take_timebase = smp_psurge_take_timebase, }; -#endif /* CONFIG_PPC32 - actually powersurge support */ +#endif /* CONFIG_PPC_PMAC32_PSURGE */ /* * Core 99 and later support @@ -791,14 +802,14 @@ static int __init smp_core99_probe(void) return ncpus; } -static void __devinit smp_core99_kick_cpu(int nr) +static int __devinit smp_core99_kick_cpu(int nr) { unsigned int save_vector; unsigned long target, flags; unsigned int *vector = (unsigned int *)(PAGE_OFFSET+0x100); if (nr < 0 || nr > 3) - return; + return -ENOENT; if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu", 0x346); @@ -830,6 +841,8 @@ static void __devinit smp_core99_kick_cpu(int nr) local_irq_restore(flags); if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347); + + return 0; } static void __devinit smp_core99_setup_cpu(int cpu_nr) @@ -842,6 +855,7 @@ static void __devinit smp_core99_setup_cpu(int cpu_nr) mpic_setup_this_cpu(); } +#ifdef CONFIG_PPC64 #ifdef CONFIG_HOTPLUG_CPU static int smp_core99_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) @@ -879,7 +893,6 @@ static struct notifier_block __cpuinitdata smp_core99_cpu_nb = { static void __init smp_core99_bringup_done(void) { -#ifdef CONFIG_PPC64 extern void g5_phy_disable_cpu1(void); /* Close i2c bus if it was used for tb sync */ @@ -894,14 +907,14 @@ static void __init smp_core99_bringup_done(void) set_cpu_present(1, false); g5_phy_disable_cpu1(); } -#endif /* CONFIG_PPC64 */ - #ifdef CONFIG_HOTPLUG_CPU register_cpu_notifier(&smp_core99_cpu_nb); #endif + if (ppc_md.progress) ppc_md.progress("smp_core99_bringup_done", 0x349); } +#endif /* CONFIG_PPC64 */ #ifdef CONFIG_HOTPLUG_CPU @@ -975,7 +988,9 @@ static void pmac_cpu_die(void) struct smp_ops_t core99_smp_ops = { .message_pass = smp_mpic_message_pass, .probe = smp_core99_probe, +#ifdef CONFIG_PPC64 .bringup_done = smp_core99_bringup_done, +#endif .kick_cpu = smp_core99_kick_cpu, .setup_cpu = smp_core99_setup_cpu, .give_timebase = smp_core99_give_timebase, @@ -1000,7 +1015,7 @@ void __init pmac_setup_smp(void) of_node_put(np); smp_ops = &core99_smp_ops; } -#ifdef CONFIG_PPC32 +#ifdef CONFIG_PPC_PMAC32_PSURGE else { /* We have to set bits in cpu_possible_mask here since the * secondary CPU(s) aren't in the device tree. Various @@ -1013,7 +1028,7 @@ void __init pmac_setup_smp(void) set_cpu_possible(cpu, true); smp_ops = &psurge_smp_ops; } -#endif /* CONFIG_PPC32 */ +#endif /* CONFIG_PPC_PMAC32_PSURGE */ #ifdef CONFIG_HOTPLUG_CPU ppc_md.cpu_die = pmac_cpu_die; diff --git a/trunk/arch/powerpc/platforms/ps3/interrupt.c b/trunk/arch/powerpc/platforms/ps3/interrupt.c index f2f6413b81d3..600ed2c0ed59 100644 --- a/trunk/arch/powerpc/platforms/ps3/interrupt.c +++ b/trunk/arch/powerpc/platforms/ps3/interrupt.c @@ -197,7 +197,7 @@ static int ps3_virq_setup(enum ps3_cpu_binding cpu, unsigned long outlet, result = irq_set_chip_data(*virq, pd); if (result) { - pr_debug("%s:%d: set_irq_chip_data failed\n", + pr_debug("%s:%d: irq_set_chip_data failed\n", __func__, __LINE__); goto fail_set; } @@ -659,11 +659,6 @@ static void __maybe_unused _dump_mask(struct ps3_private *pd, static void dump_bmp(struct ps3_private* pd) {}; #endif /* defined(DEBUG) */ -static void ps3_host_unmap(struct irq_host *h, unsigned int virq) -{ - irq_set_chip_data(virq, NULL); -} - static int ps3_host_map(struct irq_host *h, unsigned int virq, irq_hw_number_t hwirq) { @@ -683,7 +678,6 @@ static int ps3_host_match(struct irq_host *h, struct device_node *np) static struct irq_host_ops ps3_host_ops = { .map = ps3_host_map, - .unmap = ps3_host_unmap, .match = ps3_host_match, }; diff --git a/trunk/arch/powerpc/platforms/ps3/smp.c b/trunk/arch/powerpc/platforms/ps3/smp.c index 51ffde40af2b..4c44794faac0 100644 --- a/trunk/arch/powerpc/platforms/ps3/smp.c +++ b/trunk/arch/powerpc/platforms/ps3/smp.c @@ -39,7 +39,7 @@ #define MSG_COUNT 4 static DEFINE_PER_CPU(unsigned int [MSG_COUNT], ps3_ipi_virqs); -static void do_message_pass(int target, int msg) +static void ps3_smp_message_pass(int cpu, int msg) { int result; unsigned int virq; @@ -49,28 +49,12 @@ static void do_message_pass(int target, int msg) return; } - virq = per_cpu(ps3_ipi_virqs, target)[msg]; + virq = per_cpu(ps3_ipi_virqs, cpu)[msg]; result = ps3_send_event_locally(virq); if (result) DBG("%s:%d: ps3_send_event_locally(%d, %d) failed" - " (%d)\n", __func__, __LINE__, target, msg, result); -} - -static void ps3_smp_message_pass(int target, int msg) -{ - int cpu; - - if (target < NR_CPUS) - do_message_pass(target, msg); - else if (target == MSG_ALL_BUT_SELF) { - for_each_online_cpu(cpu) - if (cpu != smp_processor_id()) - do_message_pass(cpu, msg); - } else { - for_each_online_cpu(cpu) - do_message_pass(cpu, msg); - } + " (%d)\n", __func__, __LINE__, cpu, msg, result); } static int ps3_smp_probe(void) diff --git a/trunk/arch/powerpc/platforms/ps3/spu.c b/trunk/arch/powerpc/platforms/ps3/spu.c index 39a472e9e80f..375a9f92158d 100644 --- a/trunk/arch/powerpc/platforms/ps3/spu.c +++ b/trunk/arch/powerpc/platforms/ps3/spu.c @@ -197,7 +197,7 @@ static void spu_unmap(struct spu *spu) * The current HV requires the spu shadow regs to be mapped with the * PTE page protection bits set as read-only (PP=3). This implementation * uses the low level __ioremap() to bypass the page protection settings - * inforced by ioremap_flags() to get the needed PTE bits set for the + * inforced by ioremap_prot() to get the needed PTE bits set for the * shadow regs. */ @@ -214,7 +214,7 @@ static int __init setup_areas(struct spu *spu) goto fail_ioremap; } - spu->local_store = (__force void *)ioremap_flags(spu->local_store_phys, + spu->local_store = (__force void *)ioremap_prot(spu->local_store_phys, LS_SIZE, _PAGE_NO_CACHE); if (!spu->local_store) { diff --git a/trunk/arch/powerpc/platforms/pseries/Kconfig b/trunk/arch/powerpc/platforms/pseries/Kconfig index 5b3da4b4ea79..71af4c5d6c05 100644 --- a/trunk/arch/powerpc/platforms/pseries/Kconfig +++ b/trunk/arch/powerpc/platforms/pseries/Kconfig @@ -3,7 +3,10 @@ config PPC_PSERIES bool "IBM pSeries & new (POWER5-based) iSeries" select MPIC select PCI_MSI - select XICS + select PPC_XICS + select PPC_ICP_NATIVE + select PPC_ICP_HV + select PPC_ICS_RTAS select PPC_I8259 select PPC_RTAS select PPC_RTAS_DAEMON @@ -47,6 +50,24 @@ config SCANLOG tristate "Scanlog dump interface" depends on RTAS_PROC && PPC_PSERIES +config IO_EVENT_IRQ + bool "IO Event Interrupt support" + depends on PPC_PSERIES + default y + help + Select this option, if you want to enable support for IO Event + interrupts. IO event interrupt is a mechanism provided by RTAS + to return information about hardware error and non-error events + which may need OS attention. RTAS returns events for multiple + event types and scopes. Device drivers can register their handlers + to receive events. + + This option will only enable the IO event platform code. You + will still need to enable or compile the actual drivers + that use this infrastruture to handle IO event interrupts. + + Say Y if you are unsure. + config LPARCFG bool "LPAR Configuration Data" depends on PPC_PSERIES || PPC_ISERIES diff --git a/trunk/arch/powerpc/platforms/pseries/Makefile b/trunk/arch/powerpc/platforms/pseries/Makefile index fc5237810ece..3556e402cbf5 100644 --- a/trunk/arch/powerpc/platforms/pseries/Makefile +++ b/trunk/arch/powerpc/platforms/pseries/Makefile @@ -5,7 +5,6 @@ obj-y := lpar.o hvCall.o nvram.o reconfig.o \ setup.o iommu.o event_sources.o ras.o \ firmware.o power.o dlpar.o mobility.o obj-$(CONFIG_SMP) += smp.o -obj-$(CONFIG_XICS) += xics.o obj-$(CONFIG_SCANLOG) += scanlog.o obj-$(CONFIG_EEH) += eeh.o eeh_cache.o eeh_driver.o eeh_event.o eeh_sysfs.o obj-$(CONFIG_KEXEC) += kexec.o @@ -22,6 +21,7 @@ obj-$(CONFIG_HCALL_STATS) += hvCall_inst.o obj-$(CONFIG_PHYP_DUMP) += phyp_dump.o obj-$(CONFIG_CMM) += cmm.o obj-$(CONFIG_DTL) += dtl.o +obj-$(CONFIG_IO_EVENT_IRQ) += io_event_irq.o ifeq ($(CONFIG_PPC_PSERIES),y) obj-$(CONFIG_SUSPEND) += suspend.o diff --git a/trunk/arch/powerpc/platforms/pseries/dtl.c b/trunk/arch/powerpc/platforms/pseries/dtl.c index c371bc06434b..e9190073bb97 100644 --- a/trunk/arch/powerpc/platforms/pseries/dtl.c +++ b/trunk/arch/powerpc/platforms/pseries/dtl.c @@ -52,10 +52,10 @@ static u8 dtl_event_mask = 0x7; /* - * Size of per-cpu log buffers. Default is just under 16 pages worth. + * Size of per-cpu log buffers. Firmware requires that the buffer does + * not cross a 4k boundary. */ -static int dtl_buf_entries = (16 * 85); - +static int dtl_buf_entries = N_DISPATCH_LOG; #ifdef CONFIG_VIRT_CPU_ACCOUNTING struct dtl_ring { @@ -151,7 +151,7 @@ static int dtl_start(struct dtl *dtl) /* Register our dtl buffer with the hypervisor. The HV expects the * buffer size to be passed in the second word of the buffer */ - ((u32 *)dtl->buf)[1] = dtl->buf_entries * sizeof(struct dtl_entry); + ((u32 *)dtl->buf)[1] = DISPATCH_LOG_BYTES; hwcpu = get_hard_smp_processor_id(dtl->cpu); addr = __pa(dtl->buf); @@ -196,13 +196,15 @@ static int dtl_enable(struct dtl *dtl) long int rc; struct dtl_entry *buf = NULL; + if (!dtl_cache) + return -ENOMEM; + /* only allow one reader */ if (dtl->buf) return -EBUSY; n_entries = dtl_buf_entries; - buf = kmalloc_node(n_entries * sizeof(struct dtl_entry), - GFP_KERNEL, cpu_to_node(dtl->cpu)); + buf = kmem_cache_alloc_node(dtl_cache, GFP_KERNEL, cpu_to_node(dtl->cpu)); if (!buf) { printk(KERN_WARNING "%s: buffer alloc failed for cpu %d\n", __func__, dtl->cpu); @@ -223,7 +225,7 @@ static int dtl_enable(struct dtl *dtl) spin_unlock(&dtl->lock); if (rc) - kfree(buf); + kmem_cache_free(dtl_cache, buf); return rc; } @@ -231,7 +233,7 @@ static void dtl_disable(struct dtl *dtl) { spin_lock(&dtl->lock); dtl_stop(dtl); - kfree(dtl->buf); + kmem_cache_free(dtl_cache, dtl->buf); dtl->buf = NULL; dtl->buf_entries = 0; spin_unlock(&dtl->lock); @@ -365,7 +367,7 @@ static int dtl_init(void) event_mask_file = debugfs_create_x8("dtl_event_mask", 0600, dtl_dir, &dtl_event_mask); - buf_entries_file = debugfs_create_u32("dtl_buf_entries", 0600, + buf_entries_file = debugfs_create_u32("dtl_buf_entries", 0400, dtl_dir, &dtl_buf_entries); if (!event_mask_file || !buf_entries_file) { diff --git a/trunk/arch/powerpc/platforms/pseries/eeh.c b/trunk/arch/powerpc/platforms/pseries/eeh.c index 89649173d3a3..46b55cf563e3 100644 --- a/trunk/arch/powerpc/platforms/pseries/eeh.c +++ b/trunk/arch/powerpc/platforms/pseries/eeh.c @@ -93,6 +93,7 @@ static int ibm_slot_error_detail; static int ibm_get_config_addr_info; static int ibm_get_config_addr_info2; static int ibm_configure_bridge; +static int ibm_configure_pe; int eeh_subsystem_enabled; EXPORT_SYMBOL(eeh_subsystem_enabled); @@ -261,6 +262,8 @@ void eeh_slot_error_detail(struct pci_dn *pdn, int severity) pci_regs_buf[0] = 0; rtas_pci_enable(pdn, EEH_THAW_MMIO); + rtas_configure_bridge(pdn); + eeh_restore_bars(pdn); loglen = gather_pci_data(pdn, pci_regs_buf, EEH_PCI_REGS_LOG_LEN); rtas_slot_error_detail(pdn, severity, pci_regs_buf, loglen); @@ -448,6 +451,39 @@ void eeh_clear_slot (struct device_node *dn, int mode_flag) raw_spin_unlock_irqrestore(&confirm_error_lock, flags); } +void __eeh_set_pe_freset(struct device_node *parent, unsigned int *freset) +{ + struct device_node *dn; + + for_each_child_of_node(parent, dn) { + if (PCI_DN(dn)) { + + struct pci_dev *dev = PCI_DN(dn)->pcidev; + + if (dev && dev->driver) + *freset |= dev->needs_freset; + + __eeh_set_pe_freset(dn, freset); + } + } +} + +void eeh_set_pe_freset(struct device_node *dn, unsigned int *freset) +{ + struct pci_dev *dev; + dn = find_device_pe(dn); + + /* Back up one, since config addrs might be shared */ + if (!pcibios_find_pci_bus(dn) && PCI_DN(dn->parent)) + dn = dn->parent; + + dev = PCI_DN(dn)->pcidev; + if (dev) + *freset |= dev->needs_freset; + + __eeh_set_pe_freset(dn, freset); +} + /** * eeh_dn_check_failure - check if all 1's data is due to EEH slot freeze * @dn device node @@ -692,15 +728,24 @@ rtas_pci_slot_reset(struct pci_dn *pdn, int state) if (pdn->eeh_pe_config_addr) config_addr = pdn->eeh_pe_config_addr; - rc = rtas_call(ibm_set_slot_reset,4,1, NULL, + rc = rtas_call(ibm_set_slot_reset, 4, 1, NULL, config_addr, BUID_HI(pdn->phb->buid), BUID_LO(pdn->phb->buid), state); - if (rc) - printk (KERN_WARNING "EEH: Unable to reset the failed slot," - " (%d) #RST=%d dn=%s\n", - rc, state, pdn->node->full_name); + + /* Fundamental-reset not supported on this PE, try hot-reset */ + if (rc == -8 && state == 3) { + rc = rtas_call(ibm_set_slot_reset, 4, 1, NULL, + config_addr, + BUID_HI(pdn->phb->buid), + BUID_LO(pdn->phb->buid), 1); + if (rc) + printk(KERN_WARNING + "EEH: Unable to reset the failed slot," + " #RST=%d dn=%s\n", + rc, pdn->node->full_name); + } } /** @@ -736,18 +781,21 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state stat /** * rtas_set_slot_reset -- assert the pci #RST line for 1/4 second * @pdn: pci device node to be reset. - * - * Return 0 if success, else a non-zero value. */ static void __rtas_set_slot_reset(struct pci_dn *pdn) { - struct pci_dev *dev = pdn->pcidev; + unsigned int freset = 0; - /* Determine type of EEH reset required by device, - * default hot reset or fundamental reset - */ - if (dev && dev->needs_freset) + /* Determine type of EEH reset required for + * Partitionable Endpoint, a hot-reset (1) + * or a fundamental reset (3). + * A fundamental reset required by any device under + * Partitionable Endpoint trumps hot-reset. + */ + eeh_set_pe_freset(pdn->node, &freset); + + if (freset) rtas_pci_slot_reset(pdn, 3); else rtas_pci_slot_reset(pdn, 1); @@ -895,13 +943,20 @@ rtas_configure_bridge(struct pci_dn *pdn) { int config_addr; int rc; + int token; /* Use PE configuration address, if present */ config_addr = pdn->eeh_config_addr; if (pdn->eeh_pe_config_addr) config_addr = pdn->eeh_pe_config_addr; - rc = rtas_call(ibm_configure_bridge,3,1, NULL, + /* Use new configure-pe function, if supported */ + if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) + token = ibm_configure_pe; + else + token = ibm_configure_bridge; + + rc = rtas_call(token, 3, 1, NULL, config_addr, BUID_HI(pdn->phb->buid), BUID_LO(pdn->phb->buid)); @@ -1077,6 +1132,7 @@ void __init eeh_init(void) ibm_get_config_addr_info = rtas_token("ibm,get-config-addr-info"); ibm_get_config_addr_info2 = rtas_token("ibm,get-config-addr-info2"); ibm_configure_bridge = rtas_token ("ibm,configure-bridge"); + ibm_configure_pe = rtas_token("ibm,configure-pe"); if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE) return; diff --git a/trunk/arch/powerpc/platforms/pseries/eeh_driver.c b/trunk/arch/powerpc/platforms/pseries/eeh_driver.c index b8d70f5d9aa9..1b6cb10589e0 100644 --- a/trunk/arch/powerpc/platforms/pseries/eeh_driver.c +++ b/trunk/arch/powerpc/platforms/pseries/eeh_driver.c @@ -328,7 +328,7 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event) struct pci_bus *frozen_bus; int rc = 0; enum pci_ers_result result = PCI_ERS_RESULT_NONE; - const char *location, *pci_str, *drv_str; + const char *location, *pci_str, *drv_str, *bus_pci_str, *bus_drv_str; frozen_dn = find_device_pe(event->dn); if (!frozen_dn) { @@ -364,13 +364,8 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event) frozen_pdn = PCI_DN(frozen_dn); frozen_pdn->eeh_freeze_count++; - if (frozen_pdn->pcidev) { - pci_str = pci_name (frozen_pdn->pcidev); - drv_str = pcid_name (frozen_pdn->pcidev); - } else { - pci_str = eeh_pci_name(event->dev); - drv_str = pcid_name (event->dev); - } + pci_str = eeh_pci_name(event->dev); + drv_str = pcid_name(event->dev); if (frozen_pdn->eeh_freeze_count > EEH_MAX_ALLOWED_FREEZES) goto excess_failures; @@ -378,8 +373,17 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event) printk(KERN_WARNING "EEH: This PCI device has failed %d times in the last hour:\n", frozen_pdn->eeh_freeze_count); + + if (frozen_pdn->pcidev) { + bus_pci_str = pci_name(frozen_pdn->pcidev); + bus_drv_str = pcid_name(frozen_pdn->pcidev); + printk(KERN_WARNING + "EEH: Bus location=%s driver=%s pci addr=%s\n", + location, bus_drv_str, bus_pci_str); + } + printk(KERN_WARNING - "EEH: location=%s driver=%s pci addr=%s\n", + "EEH: Device location=%s driver=%s pci addr=%s\n", location, drv_str, pci_str); /* Walk the various device drivers attached to this slot through diff --git a/trunk/arch/powerpc/platforms/pseries/hotplug-cpu.c b/trunk/arch/powerpc/platforms/pseries/hotplug-cpu.c index ef8c45489e20..46f13a3c5d09 100644 --- a/trunk/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/trunk/arch/powerpc/platforms/pseries/hotplug-cpu.c @@ -19,6 +19,7 @@ */ #include +#include #include #include #include @@ -28,7 +29,7 @@ #include #include #include -#include "xics.h" +#include #include "plpar_wrappers.h" #include "offline_states.h" @@ -280,7 +281,7 @@ static int pseries_add_processor(struct device_node *np) } for_each_cpu(cpu, tmp) { - BUG_ON(cpumask_test_cpu(cpu, cpu_present_mask)); + BUG_ON(cpu_present(cpu)); set_cpu_present(cpu, true); set_hard_smp_processor_id(cpu, *intserv++); } diff --git a/trunk/arch/powerpc/platforms/pseries/io_event_irq.c b/trunk/arch/powerpc/platforms/pseries/io_event_irq.c new file mode 100644 index 000000000000..c829e6067d54 --- /dev/null +++ b/trunk/arch/powerpc/platforms/pseries/io_event_irq.c @@ -0,0 +1,231 @@ +/* + * Copyright 2010 2011 Mark Nelson and Tseng-Hui (Frank) Lin, IBM Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "pseries.h" + +/* + * IO event interrupt is a mechanism provided by RTAS to return + * information about hardware error and non-error events. Device + * drivers can register their event handlers to receive events. + * Device drivers are expected to use atomic_notifier_chain_register() + * and atomic_notifier_chain_unregister() to register and unregister + * their event handlers. Since multiple IO event types and scopes + * share an IO event interrupt, the event handlers are called one + * by one until the IO event is claimed by one of the handlers. + * The event handlers are expected to return NOTIFY_OK if the + * event is handled by the event handler or NOTIFY_DONE if the + * event does not belong to the handler. + * + * Usage: + * + * Notifier function: + * #include + * int event_handler(struct notifier_block *nb, unsigned long val, void *data) { + * p = (struct pseries_io_event_sect_data *) data; + * if (! is_my_event(p->scope, p->event_type)) return NOTIFY_DONE; + * : + * : + * return NOTIFY_OK; + * } + * struct notifier_block event_nb = { + * .notifier_call = event_handler, + * } + * + * Registration: + * atomic_notifier_chain_register(&pseries_ioei_notifier_list, &event_nb); + * + * Unregistration: + * atomic_notifier_chain_unregister(&pseries_ioei_notifier_list, &event_nb); + */ + +ATOMIC_NOTIFIER_HEAD(pseries_ioei_notifier_list); +EXPORT_SYMBOL_GPL(pseries_ioei_notifier_list); + +static int ioei_check_exception_token; + +/* pSeries event log format */ + +/* Two bytes ASCII section IDs */ +#define PSERIES_ELOG_SECT_ID_PRIV_HDR (('P' << 8) | 'H') +#define PSERIES_ELOG_SECT_ID_USER_HDR (('U' << 8) | 'H') +#define PSERIES_ELOG_SECT_ID_PRIMARY_SRC (('P' << 8) | 'S') +#define PSERIES_ELOG_SECT_ID_EXTENDED_UH (('E' << 8) | 'H') +#define PSERIES_ELOG_SECT_ID_FAILING_MTMS (('M' << 8) | 'T') +#define PSERIES_ELOG_SECT_ID_SECONDARY_SRC (('S' << 8) | 'S') +#define PSERIES_ELOG_SECT_ID_DUMP_LOCATOR (('D' << 8) | 'H') +#define PSERIES_ELOG_SECT_ID_FW_ERROR (('S' << 8) | 'W') +#define PSERIES_ELOG_SECT_ID_IMPACT_PART_ID (('L' << 8) | 'P') +#define PSERIES_ELOG_SECT_ID_LOGIC_RESOURCE_ID (('L' << 8) | 'R') +#define PSERIES_ELOG_SECT_ID_HMC_ID (('H' << 8) | 'M') +#define PSERIES_ELOG_SECT_ID_EPOW (('E' << 8) | 'P') +#define PSERIES_ELOG_SECT_ID_IO_EVENT (('I' << 8) | 'E') +#define PSERIES_ELOG_SECT_ID_MANUFACT_INFO (('M' << 8) | 'I') +#define PSERIES_ELOG_SECT_ID_CALL_HOME (('C' << 8) | 'H') +#define PSERIES_ELOG_SECT_ID_USER_DEF (('U' << 8) | 'D') + +/* Vendor specific Platform Event Log Format, Version 6, section header */ +struct pseries_elog_section { + uint16_t id; /* 0x00 2-byte ASCII section ID */ + uint16_t length; /* 0x02 Section length in bytes */ + uint8_t version; /* 0x04 Section version */ + uint8_t subtype; /* 0x05 Section subtype */ + uint16_t creator_component; /* 0x06 Creator component ID */ + uint8_t data[]; /* 0x08 Start of section data */ +}; + +static char ioei_rtas_buf[RTAS_DATA_BUF_SIZE] __cacheline_aligned; + +/** + * Find data portion of a specific section in RTAS extended event log. + * @elog: RTAS error/event log. + * @sect_id: secsion ID. + * + * Return: + * pointer to the section data of the specified section + * NULL if not found + */ +static struct pseries_elog_section *find_xelog_section(struct rtas_error_log *elog, + uint16_t sect_id) +{ + struct rtas_ext_event_log_v6 *xelog = + (struct rtas_ext_event_log_v6 *) elog->buffer; + struct pseries_elog_section *sect; + unsigned char *p, *log_end; + + /* Check that we understand the format */ + if (elog->extended_log_length < sizeof(struct rtas_ext_event_log_v6) || + xelog->log_format != RTAS_V6EXT_LOG_FORMAT_EVENT_LOG || + xelog->company_id != RTAS_V6EXT_COMPANY_ID_IBM) + return NULL; + + log_end = elog->buffer + elog->extended_log_length; + p = xelog->vendor_log; + while (p < log_end) { + sect = (struct pseries_elog_section *)p; + if (sect->id == sect_id) + return sect; + p += sect->length; + } + return NULL; +} + +/** + * Find the data portion of an IO Event section from event log. + * @elog: RTAS error/event log. + * + * Return: + * pointer to a valid IO event section data. NULL if not found. + */ +static struct pseries_io_event * ioei_find_event(struct rtas_error_log *elog) +{ + struct pseries_elog_section *sect; + + /* We should only ever get called for io-event interrupts, but if + * we do get called for another type then something went wrong so + * make some noise about it. + * RTAS_TYPE_IO only exists in extended event log version 6 or later. + * No need to check event log version. + */ + if (unlikely(elog->type != RTAS_TYPE_IO)) { + printk_once(KERN_WARNING "io_event_irq: Unexpected event type %d", + elog->type); + return NULL; + } + + sect = find_xelog_section(elog, PSERIES_ELOG_SECT_ID_IO_EVENT); + if (unlikely(!sect)) { + printk_once(KERN_WARNING "io_event_irq: RTAS extended event " + "log does not contain an IO Event section. " + "Could be a bug in system firmware!\n"); + return NULL; + } + return (struct pseries_io_event *) §->data; +} + +/* + * PAPR: + * - check-exception returns the first found error or event and clear that + * error or event so it is reported once. + * - Each interrupt returns one event. If a plateform chooses to report + * multiple events through a single interrupt, it must ensure that the + * interrupt remains asserted until check-exception has been used to + * process all out-standing events for that interrupt. + * + * Implementation notes: + * - Events must be processed in the order they are returned. Hence, + * sequential in nature. + * - The owner of an event is determined by combinations of scope, + * event type, and sub-type. There is no easy way to pre-sort clients + * by scope or event type alone. For example, Torrent ISR route change + * event is reported with scope 0x00 (Not Applicatable) rather than + * 0x3B (Torrent-hub). It is better to let the clients to identify + * who owns the the event. + */ + +static irqreturn_t ioei_interrupt(int irq, void *dev_id) +{ + struct pseries_io_event *event; + int rtas_rc; + + for (;;) { + rtas_rc = rtas_call(ioei_check_exception_token, 6, 1, NULL, + RTAS_VECTOR_EXTERNAL_INTERRUPT, + virq_to_hw(irq), + RTAS_IO_EVENTS, 1 /* Time Critical */, + __pa(ioei_rtas_buf), + RTAS_DATA_BUF_SIZE); + if (rtas_rc != 0) + break; + + event = ioei_find_event((struct rtas_error_log *)ioei_rtas_buf); + if (!event) + continue; + + atomic_notifier_call_chain(&pseries_ioei_notifier_list, + 0, event); + } + return IRQ_HANDLED; +} + +static int __init ioei_init(void) +{ + struct device_node *np; + + ioei_check_exception_token = rtas_token("check-exception"); + if (ioei_check_exception_token == RTAS_UNKNOWN_SERVICE) { + pr_warning("IO Event IRQ not supported on this system !\n"); + return -ENODEV; + } + np = of_find_node_by_path("/event-sources/ibm,io-events"); + if (np) { + request_event_sources_irqs(np, ioei_interrupt, "IO_EVENT"); + of_node_put(np); + } else { + pr_err("io_event_irq: No ibm,io-events on system! " + "IO Event interrupt disabled.\n"); + return -ENODEV; + } + return 0; +} +machine_subsys_initcall(pseries, ioei_init); + diff --git a/trunk/arch/powerpc/platforms/pseries/iommu.c b/trunk/arch/powerpc/platforms/pseries/iommu.c index 6d5412a18b26..01faab9456ca 100644 --- a/trunk/arch/powerpc/platforms/pseries/iommu.c +++ b/trunk/arch/powerpc/platforms/pseries/iommu.c @@ -659,15 +659,18 @@ static void remove_ddw(struct device_node *np) { struct dynamic_dma_window_prop *dwp; struct property *win64; - const u32 *ddr_avail; + const u32 *ddw_avail; u64 liobn; int len, ret; - ddr_avail = of_get_property(np, "ibm,ddw-applicable", &len); + ddw_avail = of_get_property(np, "ibm,ddw-applicable", &len); win64 = of_find_property(np, DIRECT64_PROPNAME, NULL); - if (!win64 || !ddr_avail || len < 3 * sizeof(u32)) + if (!win64) return; + if (!ddw_avail || len < 3 * sizeof(u32) || win64->length < sizeof(*dwp)) + goto delprop; + dwp = win64->value; liobn = (u64)be32_to_cpu(dwp->liobn); @@ -681,28 +684,29 @@ static void remove_ddw(struct device_node *np) pr_debug("%s successfully cleared tces in window.\n", np->full_name); - ret = rtas_call(ddr_avail[2], 1, 1, NULL, liobn); + ret = rtas_call(ddw_avail[2], 1, 1, NULL, liobn); if (ret) pr_warning("%s: failed to remove direct window: rtas returned " "%d to ibm,remove-pe-dma-window(%x) %llx\n", - np->full_name, ret, ddr_avail[2], liobn); + np->full_name, ret, ddw_avail[2], liobn); else pr_debug("%s: successfully removed direct window: rtas returned " "%d to ibm,remove-pe-dma-window(%x) %llx\n", - np->full_name, ret, ddr_avail[2], liobn); -} + np->full_name, ret, ddw_avail[2], liobn); +delprop: + ret = prom_remove_property(np, win64); + if (ret) + pr_warning("%s: failed to remove direct window property: %d\n", + np->full_name, ret); +} -static int dupe_ddw_if_already_created(struct pci_dev *dev, struct device_node *pdn) +static u64 find_existing_ddw(struct device_node *pdn) { - struct device_node *dn; - struct pci_dn *pcidn; struct direct_window *window; const struct dynamic_dma_window_prop *direct64; u64 dma_addr = 0; - dn = pci_device_to_OF_node(dev); - pcidn = PCI_DN(dn); spin_lock(&direct_window_list_lock); /* check if we already created a window and dupe that config if so */ list_for_each_entry(window, &direct_window_list, list) { @@ -717,36 +721,40 @@ static int dupe_ddw_if_already_created(struct pci_dev *dev, struct device_node * return dma_addr; } -static u64 dupe_ddw_if_kexec(struct pci_dev *dev, struct device_node *pdn) +static int find_existing_ddw_windows(void) { - struct device_node *dn; - struct pci_dn *pcidn; int len; + struct device_node *pdn; struct direct_window *window; const struct dynamic_dma_window_prop *direct64; - u64 dma_addr = 0; - dn = pci_device_to_OF_node(dev); - pcidn = PCI_DN(dn); - direct64 = of_get_property(pdn, DIRECT64_PROPNAME, &len); - if (direct64) { + if (!firmware_has_feature(FW_FEATURE_LPAR)) + return 0; + + for_each_node_with_property(pdn, DIRECT64_PROPNAME) { + direct64 = of_get_property(pdn, DIRECT64_PROPNAME, &len); + if (!direct64) + continue; + window = kzalloc(sizeof(*window), GFP_KERNEL); - if (!window) { + if (!window || len < sizeof(struct dynamic_dma_window_prop)) { + kfree(window); remove_ddw(pdn); - } else { - window->device = pdn; - window->prop = direct64; - spin_lock(&direct_window_list_lock); - list_add(&window->list, &direct_window_list); - spin_unlock(&direct_window_list_lock); - dma_addr = direct64->dma_base; + continue; } + + window->device = pdn; + window->prop = direct64; + spin_lock(&direct_window_list_lock); + list_add(&window->list, &direct_window_list); + spin_unlock(&direct_window_list_lock); } - return dma_addr; + return 0; } +machine_arch_initcall(pseries, find_existing_ddw_windows); -static int query_ddw(struct pci_dev *dev, const u32 *ddr_avail, +static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail, struct ddw_query_response *query) { struct device_node *dn; @@ -767,15 +775,15 @@ static int query_ddw(struct pci_dev *dev, const u32 *ddr_avail, if (pcidn->eeh_pe_config_addr) cfg_addr = pcidn->eeh_pe_config_addr; buid = pcidn->phb->buid; - ret = rtas_call(ddr_avail[0], 3, 5, (u32 *)query, + ret = rtas_call(ddw_avail[0], 3, 5, (u32 *)query, cfg_addr, BUID_HI(buid), BUID_LO(buid)); dev_info(&dev->dev, "ibm,query-pe-dma-windows(%x) %x %x %x" - " returned %d\n", ddr_avail[0], cfg_addr, BUID_HI(buid), + " returned %d\n", ddw_avail[0], cfg_addr, BUID_HI(buid), BUID_LO(buid), ret); return ret; } -static int create_ddw(struct pci_dev *dev, const u32 *ddr_avail, +static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail, struct ddw_create_response *create, int page_shift, int window_shift) { @@ -800,12 +808,12 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddr_avail, do { /* extra outputs are LIOBN and dma-addr (hi, lo) */ - ret = rtas_call(ddr_avail[1], 5, 4, (u32 *)create, cfg_addr, + ret = rtas_call(ddw_avail[1], 5, 4, (u32 *)create, cfg_addr, BUID_HI(buid), BUID_LO(buid), page_shift, window_shift); } while (rtas_busy_delay(ret)); dev_info(&dev->dev, "ibm,create-pe-dma-window(%x) %x %x %x %x %x returned %d " - "(liobn = 0x%x starting addr = %x %x)\n", ddr_avail[1], + "(liobn = 0x%x starting addr = %x %x)\n", ddw_avail[1], cfg_addr, BUID_HI(buid), BUID_LO(buid), page_shift, window_shift, ret, create->liobn, create->addr_hi, create->addr_lo); @@ -831,18 +839,14 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn) int page_shift; u64 dma_addr, max_addr; struct device_node *dn; - const u32 *uninitialized_var(ddr_avail); + const u32 *uninitialized_var(ddw_avail); struct direct_window *window; - struct property *uninitialized_var(win64); + struct property *win64; struct dynamic_dma_window_prop *ddwprop; mutex_lock(&direct_window_init_mutex); - dma_addr = dupe_ddw_if_already_created(dev, pdn); - if (dma_addr != 0) - goto out_unlock; - - dma_addr = dupe_ddw_if_kexec(dev, pdn); + dma_addr = find_existing_ddw(pdn); if (dma_addr != 0) goto out_unlock; @@ -854,8 +858,8 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn) * for the given node in that order. * the property is actually in the parent, not the PE */ - ddr_avail = of_get_property(pdn, "ibm,ddw-applicable", &len); - if (!ddr_avail || len < 3 * sizeof(u32)) + ddw_avail = of_get_property(pdn, "ibm,ddw-applicable", &len); + if (!ddw_avail || len < 3 * sizeof(u32)) goto out_unlock; /* @@ -865,7 +869,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn) * of page sizes: supported and supported for migrate-dma. */ dn = pci_device_to_OF_node(dev); - ret = query_ddw(dev, ddr_avail, &query); + ret = query_ddw(dev, ddw_avail, &query); if (ret != 0) goto out_unlock; @@ -907,13 +911,14 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn) } win64->name = kstrdup(DIRECT64_PROPNAME, GFP_KERNEL); win64->value = ddwprop = kmalloc(sizeof(*ddwprop), GFP_KERNEL); + win64->length = sizeof(*ddwprop); if (!win64->name || !win64->value) { dev_info(&dev->dev, "couldn't allocate property name and value\n"); goto out_free_prop; } - ret = create_ddw(dev, ddr_avail, &create, page_shift, len); + ret = create_ddw(dev, ddw_avail, &create, page_shift, len); if (ret != 0) goto out_free_prop; @@ -1021,13 +1026,16 @@ static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask) const void *dma_window = NULL; u64 dma_offset; - if (!dev->dma_mask || !dma_supported(dev, dma_mask)) + if (!dev->dma_mask) return -EIO; + if (!dev_is_pci(dev)) + goto check_mask; + + pdev = to_pci_dev(dev); + /* only attempt to use a new window if 64-bit DMA is requested */ if (!disable_ddw && dma_mask == DMA_BIT_MASK(64)) { - pdev = to_pci_dev(dev); - dn = pci_device_to_OF_node(pdev); dev_dbg(dev, "node is %s\n", dn->full_name); @@ -1054,12 +1062,17 @@ static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask) } } - /* fall-through to iommu ops */ - if (!ddw_enabled) { - dev_info(dev, "Using 32-bit DMA via iommu\n"); + /* fall back on iommu ops, restore table pointer with ops */ + if (!ddw_enabled && get_dma_ops(dev) != &dma_iommu_ops) { + dev_info(dev, "Restoring 32-bit DMA via iommu\n"); set_dma_ops(dev, &dma_iommu_ops); + pci_dma_dev_setup_pSeriesLP(pdev); } +check_mask: + if (!dma_supported(dev, dma_mask)) + return -EIO; + *dev->dma_mask = dma_mask; return 0; } diff --git a/trunk/arch/powerpc/platforms/pseries/kexec.c b/trunk/arch/powerpc/platforms/pseries/kexec.c index 77d38a5e2ff9..54cf3a4aa16b 100644 --- a/trunk/arch/powerpc/platforms/pseries/kexec.c +++ b/trunk/arch/powerpc/platforms/pseries/kexec.c @@ -7,15 +7,18 @@ * 2 of the License, or (at your option) any later version. */ +#include +#include + #include #include #include #include #include +#include #include #include "pseries.h" -#include "xics.h" #include "plpar_wrappers.h" static void pseries_kexec_cpu_down(int crash_shutdown, int secondary) diff --git a/trunk/arch/powerpc/platforms/pseries/lpar.c b/trunk/arch/powerpc/platforms/pseries/lpar.c index ca5d5898d320..39e6e0a7b2fa 100644 --- a/trunk/arch/powerpc/platforms/pseries/lpar.c +++ b/trunk/arch/powerpc/platforms/pseries/lpar.c @@ -329,6 +329,8 @@ static long pSeries_lpar_hpte_insert(unsigned long hpte_group, /* Make pHyp happy */ if ((rflags & _PAGE_NO_CACHE) & !(rflags & _PAGE_WRITETHRU)) hpte_r &= ~_PAGE_COHERENT; + if (firmware_has_feature(FW_FEATURE_XCMO) && !(hpte_r & HPTE_R_N)) + flags |= H_COALESCE_CAND; lpar_rc = plpar_pte_enter(flags, hpte_group, hpte_v, hpte_r, &slot); if (unlikely(lpar_rc == H_PTEG_FULL)) { @@ -573,7 +575,7 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local) unsigned long i, pix, rc; unsigned long flags = 0; struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); - int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE); + int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); unsigned long param[9]; unsigned long va; unsigned long hash, index, shift, hidx, slot; @@ -771,3 +773,47 @@ void __trace_hcall_exit(long opcode, unsigned long retval, local_irq_restore(flags); } #endif + +/** + * h_get_mpp + * H_GET_MPP hcall returns info in 7 parms + */ +int h_get_mpp(struct hvcall_mpp_data *mpp_data) +{ + int rc; + unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; + + rc = plpar_hcall9(H_GET_MPP, retbuf); + + mpp_data->entitled_mem = retbuf[0]; + mpp_data->mapped_mem = retbuf[1]; + + mpp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff; + mpp_data->pool_num = retbuf[2] & 0xffff; + + mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff; + mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff; + mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffff; + + mpp_data->pool_size = retbuf[4]; + mpp_data->loan_request = retbuf[5]; + mpp_data->backing_mem = retbuf[6]; + + return rc; +} +EXPORT_SYMBOL(h_get_mpp); + +int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data) +{ + int rc; + unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = { 0 }; + + rc = plpar_hcall9(H_GET_MPP_X, retbuf); + + mpp_x_data->coalesced_bytes = retbuf[0]; + mpp_x_data->pool_coalesced_bytes = retbuf[1]; + mpp_x_data->pool_purr_cycles = retbuf[2]; + mpp_x_data->pool_spurr_cycles = retbuf[3]; + + return rc; +} diff --git a/trunk/arch/powerpc/platforms/pseries/plpar_wrappers.h b/trunk/arch/powerpc/platforms/pseries/plpar_wrappers.h index d9801117124b..4bf21207d7d3 100644 --- a/trunk/arch/powerpc/platforms/pseries/plpar_wrappers.h +++ b/trunk/arch/powerpc/platforms/pseries/plpar_wrappers.h @@ -270,31 +270,4 @@ static inline long plpar_put_term_char(unsigned long termno, unsigned long len, lbuf[1]); } -static inline long plpar_eoi(unsigned long xirr) -{ - return plpar_hcall_norets(H_EOI, xirr); -} - -static inline long plpar_cppr(unsigned long cppr) -{ - return plpar_hcall_norets(H_CPPR, cppr); -} - -static inline long plpar_ipi(unsigned long servernum, unsigned long mfrr) -{ - return plpar_hcall_norets(H_IPI, servernum, mfrr); -} - -static inline long plpar_xirr(unsigned long *xirr_ret, unsigned char cppr) -{ - long rc; - unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; - - rc = plpar_hcall(H_XIRR, retbuf, cppr); - - *xirr_ret = retbuf[0]; - - return rc; -} - #endif /* _PSERIES_PLPAR_WRAPPERS_H */ diff --git a/trunk/arch/powerpc/platforms/pseries/ras.c b/trunk/arch/powerpc/platforms/pseries/ras.c index c55d7ad9c648..086d2ae4e06a 100644 --- a/trunk/arch/powerpc/platforms/pseries/ras.c +++ b/trunk/arch/powerpc/platforms/pseries/ras.c @@ -122,7 +122,7 @@ static irqreturn_t ras_epow_interrupt(int irq, void *dev_id) status = rtas_call(ras_check_exception_token, 6, 1, NULL, RTAS_VECTOR_EXTERNAL_INTERRUPT, - irq_map[irq].hwirq, + virq_to_hw(irq), RTAS_EPOW_WARNING | RTAS_POWERMGM_EVENTS, critical, __pa(&ras_log_buf), rtas_get_error_log_max()); @@ -157,7 +157,7 @@ static irqreturn_t ras_error_interrupt(int irq, void *dev_id) status = rtas_call(ras_check_exception_token, 6, 1, NULL, RTAS_VECTOR_EXTERNAL_INTERRUPT, - irq_map[irq].hwirq, + virq_to_hw(irq), RTAS_INTERNAL_ERROR, 1 /*Time Critical */, __pa(&ras_log_buf), rtas_get_error_log_max()); @@ -227,7 +227,7 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs) struct rtas_error_log *h, *errhdr = NULL; if (!VALID_FWNMI_BUFFER(regs->gpr[3])) { - printk(KERN_ERR "FWNMI: corrupt r3\n"); + printk(KERN_ERR "FWNMI: corrupt r3 0x%016lx\n", regs->gpr[3]); return NULL; } diff --git a/trunk/arch/powerpc/platforms/pseries/setup.c b/trunk/arch/powerpc/platforms/pseries/setup.c index 000724149089..593acceeff96 100644 --- a/trunk/arch/powerpc/platforms/pseries/setup.c +++ b/trunk/arch/powerpc/platforms/pseries/setup.c @@ -53,9 +53,9 @@ #include #include #include -#include "xics.h" #include #include +#include #include #include #include @@ -205,6 +205,9 @@ static void __init pseries_mpic_init_IRQ(void) mpic_assign_isu(mpic, n, isuaddr); } + /* Setup top-level get_irq */ + ppc_md.get_irq = mpic_get_irq; + /* All ISUs are setup, complete initialization */ mpic_init(mpic); @@ -214,7 +217,7 @@ static void __init pseries_mpic_init_IRQ(void) static void __init pseries_xics_init_IRQ(void) { - xics_init_IRQ(); + xics_init(); pseries_setup_i8259_cascade(); } @@ -238,7 +241,6 @@ static void __init pseries_discover_pic(void) if (strstr(typep, "open-pic")) { pSeries_mpic_node = of_node_get(np); ppc_md.init_IRQ = pseries_mpic_init_IRQ; - ppc_md.get_irq = mpic_get_irq; setup_kexec_cpu_down_mpic(); smp_init_pseries_mpic(); return; @@ -276,6 +278,8 @@ static struct notifier_block pci_dn_reconfig_nb = { .notifier_call = pci_dn_reconfig_notifier, }; +struct kmem_cache *dtl_cache; + #ifdef CONFIG_VIRT_CPU_ACCOUNTING /* * Allocate space for the dispatch trace log for all possible cpus @@ -291,10 +295,12 @@ static int alloc_dispatch_logs(void) if (!firmware_has_feature(FW_FEATURE_SPLPAR)) return 0; + if (!dtl_cache) + return 0; + for_each_possible_cpu(cpu) { pp = &paca[cpu]; - dtl = kmalloc_node(DISPATCH_LOG_BYTES, GFP_KERNEL, - cpu_to_node(cpu)); + dtl = kmem_cache_alloc(dtl_cache, GFP_KERNEL); if (!dtl) { pr_warn("Failed to allocate dispatch trace log for cpu %d\n", cpu); @@ -324,10 +330,27 @@ static int alloc_dispatch_logs(void) return 0; } - -early_initcall(alloc_dispatch_logs); +#else /* !CONFIG_VIRT_CPU_ACCOUNTING */ +static inline int alloc_dispatch_logs(void) +{ + return 0; +} #endif /* CONFIG_VIRT_CPU_ACCOUNTING */ +static int alloc_dispatch_log_kmem_cache(void) +{ + dtl_cache = kmem_cache_create("dtl", DISPATCH_LOG_BYTES, + DISPATCH_LOG_BYTES, 0, NULL); + if (!dtl_cache) { + pr_warn("Failed to create dispatch trace log buffer cache\n"); + pr_warn("Stolen time statistics will be unreliable\n"); + return 0; + } + + return alloc_dispatch_logs(); +} +early_initcall(alloc_dispatch_log_kmem_cache); + static void __init pSeries_setup_arch(void) { /* Discover PIC type and setup ppc_md accordingly */ @@ -395,6 +418,16 @@ static int pseries_set_xdabr(unsigned long dabr) #define CMO_CHARACTERISTICS_TOKEN 44 #define CMO_MAXLENGTH 1026 +void pSeries_coalesce_init(void) +{ + struct hvcall_mpp_x_data mpp_x_data; + + if (firmware_has_feature(FW_FEATURE_CMO) && !h_get_mpp_x(&mpp_x_data)) + powerpc_firmware_features |= FW_FEATURE_XCMO; + else + powerpc_firmware_features &= ~FW_FEATURE_XCMO; +} + /** * fw_cmo_feature_init - FW_FEATURE_CMO is not stored in ibm,hypertas-functions, * handle that here. (Stolen from parse_system_parameter_string) @@ -464,6 +497,7 @@ void pSeries_cmo_feature_init(void) pr_debug("CMO enabled, PrPSP=%d, SecPSP=%d\n", CMO_PrPSP, CMO_SecPSP); powerpc_firmware_features |= FW_FEATURE_CMO; + pSeries_coalesce_init(); } else pr_debug("CMO not enabled, PrPSP=%d, SecPSP=%d\n", CMO_PrPSP, CMO_SecPSP); diff --git a/trunk/arch/powerpc/platforms/pseries/smp.c b/trunk/arch/powerpc/platforms/pseries/smp.c index a509c5292a67..fbffd7e47ab8 100644 --- a/trunk/arch/powerpc/platforms/pseries/smp.c +++ b/trunk/arch/powerpc/platforms/pseries/smp.c @@ -44,10 +44,11 @@ #include #include #include +#include +#include #include "plpar_wrappers.h" #include "pseries.h" -#include "xics.h" #include "offline_states.h" @@ -136,7 +137,6 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu) return 1; } -#ifdef CONFIG_XICS static void __devinit smp_xics_setup_cpu(int cpu) { if (cpu != boot_cpuid) @@ -151,14 +151,13 @@ static void __devinit smp_xics_setup_cpu(int cpu) set_default_offline_state(cpu); #endif } -#endif /* CONFIG_XICS */ -static void __devinit smp_pSeries_kick_cpu(int nr) +static int __devinit smp_pSeries_kick_cpu(int nr) { BUG_ON(nr < 0 || nr >= NR_CPUS); if (!smp_startup_cpu(nr)) - return; + return -ENOENT; /* * The processor is currently spinning, waiting for the @@ -180,6 +179,8 @@ static void __devinit smp_pSeries_kick_cpu(int nr) "Ret= %ld\n", nr, rc); } #endif + + return 0; } static int smp_pSeries_cpu_bootable(unsigned int nr) @@ -197,23 +198,22 @@ static int smp_pSeries_cpu_bootable(unsigned int nr) return 1; } -#ifdef CONFIG_MPIC + static struct smp_ops_t pSeries_mpic_smp_ops = { .message_pass = smp_mpic_message_pass, .probe = smp_mpic_probe, .kick_cpu = smp_pSeries_kick_cpu, .setup_cpu = smp_mpic_setup_cpu, }; -#endif -#ifdef CONFIG_XICS + static struct smp_ops_t pSeries_xics_smp_ops = { - .message_pass = smp_xics_message_pass, - .probe = smp_xics_probe, + .message_pass = smp_muxed_ipi_message_pass, + .cause_ipi = NULL, /* Filled at runtime by xics_smp_probe() */ + .probe = xics_smp_probe, .kick_cpu = smp_pSeries_kick_cpu, .setup_cpu = smp_xics_setup_cpu, .cpu_bootable = smp_pSeries_cpu_bootable, }; -#endif /* This is called very early */ static void __init smp_init_pseries(void) @@ -245,14 +245,12 @@ static void __init smp_init_pseries(void) pr_debug(" <- smp_init_pSeries()\n"); } -#ifdef CONFIG_MPIC void __init smp_init_pseries_mpic(void) { smp_ops = &pSeries_mpic_smp_ops; smp_init_pseries(); } -#endif void __init smp_init_pseries_xics(void) { diff --git a/trunk/arch/powerpc/platforms/pseries/xics.c b/trunk/arch/powerpc/platforms/pseries/xics.c deleted file mode 100644 index d6901334d66e..000000000000 --- a/trunk/arch/powerpc/platforms/pseries/xics.c +++ /dev/null @@ -1,949 +0,0 @@ -/* - * arch/powerpc/platforms/pseries/xics.c - * - * Copyright 2000 IBM Corporation. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -#include "xics.h" -#include "plpar_wrappers.h" - -static struct irq_host *xics_host; - -#define XICS_IPI 2 -#define XICS_IRQ_SPURIOUS 0 - -/* Want a priority other than 0. Various HW issues require this. */ -#define DEFAULT_PRIORITY 5 - -/* - * Mark IPIs as higher priority so we can take them inside interrupts that - * arent marked IRQF_DISABLED - */ -#define IPI_PRIORITY 4 - -/* The least favored priority */ -#define LOWEST_PRIORITY 0xFF - -/* The number of priorities defined above */ -#define MAX_NUM_PRIORITIES 3 - -static unsigned int default_server = 0xFF; -static unsigned int default_distrib_server = 0; -static unsigned int interrupt_server_size = 8; - -/* RTAS service tokens */ -static int ibm_get_xive; -static int ibm_set_xive; -static int ibm_int_on; -static int ibm_int_off; - -struct xics_cppr { - unsigned char stack[MAX_NUM_PRIORITIES]; - int index; -}; - -static DEFINE_PER_CPU(struct xics_cppr, xics_cppr); - -/* Direct hardware low level accessors */ - -/* The part of the interrupt presentation layer that we care about */ -struct xics_ipl { - union { - u32 word; - u8 bytes[4]; - } xirr_poll; - union { - u32 word; - u8 bytes[4]; - } xirr; - u32 dummy; - union { - u32 word; - u8 bytes[4]; - } qirr; -}; - -static struct xics_ipl __iomem *xics_per_cpu[NR_CPUS]; - -static inline unsigned int direct_xirr_info_get(void) -{ - int cpu = smp_processor_id(); - - return in_be32(&xics_per_cpu[cpu]->xirr.word); -} - -static inline void direct_xirr_info_set(unsigned int value) -{ - int cpu = smp_processor_id(); - - out_be32(&xics_per_cpu[cpu]->xirr.word, value); -} - -static inline void direct_cppr_info(u8 value) -{ - int cpu = smp_processor_id(); - - out_8(&xics_per_cpu[cpu]->xirr.bytes[0], value); -} - -static inline void direct_qirr_info(int n_cpu, u8 value) -{ - out_8(&xics_per_cpu[n_cpu]->qirr.bytes[0], value); -} - - -/* LPAR low level accessors */ - -static inline unsigned int lpar_xirr_info_get(unsigned char cppr) -{ - unsigned long lpar_rc; - unsigned long return_value; - - lpar_rc = plpar_xirr(&return_value, cppr); - if (lpar_rc != H_SUCCESS) - panic(" bad return code xirr - rc = %lx\n", lpar_rc); - return (unsigned int)return_value; -} - -static inline void lpar_xirr_info_set(unsigned int value) -{ - unsigned long lpar_rc; - - lpar_rc = plpar_eoi(value); - if (lpar_rc != H_SUCCESS) - panic("bad return code EOI - rc = %ld, value=%x\n", lpar_rc, - value); -} - -static inline void lpar_cppr_info(u8 value) -{ - unsigned long lpar_rc; - - lpar_rc = plpar_cppr(value); - if (lpar_rc != H_SUCCESS) - panic("bad return code cppr - rc = %lx\n", lpar_rc); -} - -static inline void lpar_qirr_info(int n_cpu , u8 value) -{ - unsigned long lpar_rc; - - lpar_rc = plpar_ipi(get_hard_smp_processor_id(n_cpu), value); - if (lpar_rc != H_SUCCESS) - panic("bad return code qirr - rc = %lx\n", lpar_rc); -} - - -/* Interface to generic irq subsystem */ - -#ifdef CONFIG_SMP -/* - * For the moment we only implement delivery to all cpus or one cpu. - * - * If the requested affinity is cpu_all_mask, we set global affinity. - * If not we set it to the first cpu in the mask, even if multiple cpus - * are set. This is so things like irqbalance (which set core and package - * wide affinities) do the right thing. - */ -static int get_irq_server(unsigned int virq, const struct cpumask *cpumask, - unsigned int strict_check) -{ - - if (!distribute_irqs) - return default_server; - - if (!cpumask_subset(cpu_possible_mask, cpumask)) { - int server = cpumask_first_and(cpu_online_mask, cpumask); - - if (server < nr_cpu_ids) - return get_hard_smp_processor_id(server); - - if (strict_check) - return -1; - } - - /* - * Workaround issue with some versions of JS20 firmware that - * deliver interrupts to cpus which haven't been started. This - * happens when using the maxcpus= boot option. - */ - if (cpumask_equal(cpu_online_mask, cpu_present_mask)) - return default_distrib_server; - - return default_server; -} -#else -#define get_irq_server(virq, cpumask, strict_check) (default_server) -#endif - -static void xics_unmask_irq(struct irq_data *d) -{ - unsigned int hwirq; - int call_status; - int server; - - pr_devel("xics: unmask virq %d\n", d->irq); - - hwirq = (unsigned int)irq_map[d->irq].hwirq; - pr_devel(" -> map to hwirq 0x%x\n", hwirq); - if (hwirq == XICS_IPI || hwirq == XICS_IRQ_SPURIOUS) - return; - - server = get_irq_server(d->irq, d->affinity, 0); - - call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hwirq, server, - DEFAULT_PRIORITY); - if (call_status != 0) { - printk(KERN_ERR - "%s: ibm_set_xive irq %u server %x returned %d\n", - __func__, hwirq, server, call_status); - return; - } - - /* Now unmask the interrupt (often a no-op) */ - call_status = rtas_call(ibm_int_on, 1, 1, NULL, hwirq); - if (call_status != 0) { - printk(KERN_ERR "%s: ibm_int_on irq=%u returned %d\n", - __func__, hwirq, call_status); - return; - } -} - -static unsigned int xics_startup(struct irq_data *d) -{ - /* - * The generic MSI code returns with the interrupt disabled on the - * card, using the MSI mask bits. Firmware doesn't appear to unmask - * at that level, so we do it here by hand. - */ - if (d->msi_desc) - unmask_msi_irq(d); - - /* unmask it */ - xics_unmask_irq(d); - return 0; -} - -static void xics_mask_real_irq(unsigned int hwirq) -{ - int call_status; - - if (hwirq == XICS_IPI) - return; - - call_status = rtas_call(ibm_int_off, 1, 1, NULL, hwirq); - if (call_status != 0) { - printk(KERN_ERR "%s: ibm_int_off irq=%u returned %d\n", - __func__, hwirq, call_status); - return; - } - - /* Have to set XIVE to 0xff to be able to remove a slot */ - call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hwirq, - default_server, 0xff); - if (call_status != 0) { - printk(KERN_ERR "%s: ibm_set_xive(0xff) irq=%u returned %d\n", - __func__, hwirq, call_status); - return; - } -} - -static void xics_mask_irq(struct irq_data *d) -{ - unsigned int hwirq; - - pr_devel("xics: mask virq %d\n", d->irq); - - hwirq = (unsigned int)irq_map[d->irq].hwirq; - if (hwirq == XICS_IPI || hwirq == XICS_IRQ_SPURIOUS) - return; - xics_mask_real_irq(hwirq); -} - -static void xics_mask_unknown_vec(unsigned int vec) -{ - printk(KERN_ERR "Interrupt %u (real) is invalid, disabling it.\n", vec); - xics_mask_real_irq(vec); -} - -static inline unsigned int xics_xirr_vector(unsigned int xirr) -{ - /* - * The top byte is the old cppr, to be restored on EOI. - * The remaining 24 bits are the vector. - */ - return xirr & 0x00ffffff; -} - -static void push_cppr(unsigned int vec) -{ - struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); - - if (WARN_ON(os_cppr->index >= MAX_NUM_PRIORITIES - 1)) - return; - - if (vec == XICS_IPI) - os_cppr->stack[++os_cppr->index] = IPI_PRIORITY; - else - os_cppr->stack[++os_cppr->index] = DEFAULT_PRIORITY; -} - -static unsigned int xics_get_irq_direct(void) -{ - unsigned int xirr = direct_xirr_info_get(); - unsigned int vec = xics_xirr_vector(xirr); - unsigned int irq; - - if (vec == XICS_IRQ_SPURIOUS) - return NO_IRQ; - - irq = irq_radix_revmap_lookup(xics_host, vec); - if (likely(irq != NO_IRQ)) { - push_cppr(vec); - return irq; - } - - /* We don't have a linux mapping, so have rtas mask it. */ - xics_mask_unknown_vec(vec); - - /* We might learn about it later, so EOI it */ - direct_xirr_info_set(xirr); - return NO_IRQ; -} - -static unsigned int xics_get_irq_lpar(void) -{ - struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); - unsigned int xirr = lpar_xirr_info_get(os_cppr->stack[os_cppr->index]); - unsigned int vec = xics_xirr_vector(xirr); - unsigned int irq; - - if (vec == XICS_IRQ_SPURIOUS) - return NO_IRQ; - - irq = irq_radix_revmap_lookup(xics_host, vec); - if (likely(irq != NO_IRQ)) { - push_cppr(vec); - return irq; - } - - /* We don't have a linux mapping, so have RTAS mask it. */ - xics_mask_unknown_vec(vec); - - /* We might learn about it later, so EOI it */ - lpar_xirr_info_set(xirr); - return NO_IRQ; -} - -static unsigned char pop_cppr(void) -{ - struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); - - if (WARN_ON(os_cppr->index < 1)) - return LOWEST_PRIORITY; - - return os_cppr->stack[--os_cppr->index]; -} - -static void xics_eoi_direct(struct irq_data *d) -{ - unsigned int hwirq = (unsigned int)irq_map[d->irq].hwirq; - - iosync(); - direct_xirr_info_set((pop_cppr() << 24) | hwirq); -} - -static void xics_eoi_lpar(struct irq_data *d) -{ - unsigned int hwirq = (unsigned int)irq_map[d->irq].hwirq; - - iosync(); - lpar_xirr_info_set((pop_cppr() << 24) | hwirq); -} - -static int -xics_set_affinity(struct irq_data *d, const struct cpumask *cpumask, bool force) -{ - unsigned int hwirq; - int status; - int xics_status[2]; - int irq_server; - - hwirq = (unsigned int)irq_map[d->irq].hwirq; - if (hwirq == XICS_IPI || hwirq == XICS_IRQ_SPURIOUS) - return -1; - - status = rtas_call(ibm_get_xive, 1, 3, xics_status, hwirq); - - if (status) { - printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n", - __func__, hwirq, status); - return -1; - } - - irq_server = get_irq_server(d->irq, cpumask, 1); - if (irq_server == -1) { - char cpulist[128]; - cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask); - printk(KERN_WARNING - "%s: No online cpus in the mask %s for irq %d\n", - __func__, cpulist, d->irq); - return -1; - } - - status = rtas_call(ibm_set_xive, 3, 1, NULL, - hwirq, irq_server, xics_status[1]); - - if (status) { - printk(KERN_ERR "%s: ibm,set-xive irq=%u returns %d\n", - __func__, hwirq, status); - return -1; - } - - return 0; -} - -static struct irq_chip xics_pic_direct = { - .name = "XICS", - .irq_startup = xics_startup, - .irq_mask = xics_mask_irq, - .irq_unmask = xics_unmask_irq, - .irq_eoi = xics_eoi_direct, - .irq_set_affinity = xics_set_affinity -}; - -static struct irq_chip xics_pic_lpar = { - .name = "XICS", - .irq_startup = xics_startup, - .irq_mask = xics_mask_irq, - .irq_unmask = xics_unmask_irq, - .irq_eoi = xics_eoi_lpar, - .irq_set_affinity = xics_set_affinity -}; - - -/* Interface to arch irq controller subsystem layer */ - -/* Points to the irq_chip we're actually using */ -static struct irq_chip *xics_irq_chip; - -static int xics_host_match(struct irq_host *h, struct device_node *node) -{ - /* IBM machines have interrupt parents of various funky types for things - * like vdevices, events, etc... The trick we use here is to match - * everything here except the legacy 8259 which is compatible "chrp,iic" - */ - return !of_device_is_compatible(node, "chrp,iic"); -} - -static int xics_host_map(struct irq_host *h, unsigned int virq, - irq_hw_number_t hw) -{ - pr_devel("xics: map virq %d, hwirq 0x%lx\n", virq, hw); - - /* Insert the interrupt mapping into the radix tree for fast lookup */ - irq_radix_revmap_insert(xics_host, virq, hw); - - irq_set_status_flags(virq, IRQ_LEVEL); - irq_set_chip_and_handler(virq, xics_irq_chip, handle_fasteoi_irq); - return 0; -} - -static int xics_host_xlate(struct irq_host *h, struct device_node *ct, - const u32 *intspec, unsigned int intsize, - irq_hw_number_t *out_hwirq, unsigned int *out_flags) - -{ - /* Current xics implementation translates everything - * to level. It is not technically right for MSIs but this - * is irrelevant at this point. We might get smarter in the future - */ - *out_hwirq = intspec[0]; - *out_flags = IRQ_TYPE_LEVEL_LOW; - - return 0; -} - -static struct irq_host_ops xics_host_ops = { - .match = xics_host_match, - .map = xics_host_map, - .xlate = xics_host_xlate, -}; - -static void __init xics_init_host(void) -{ - if (firmware_has_feature(FW_FEATURE_LPAR)) - xics_irq_chip = &xics_pic_lpar; - else - xics_irq_chip = &xics_pic_direct; - - xics_host = irq_alloc_host(NULL, IRQ_HOST_MAP_TREE, 0, &xics_host_ops, - XICS_IRQ_SPURIOUS); - BUG_ON(xics_host == NULL); - irq_set_default_host(xics_host); -} - - -/* Inter-processor interrupt support */ - -#ifdef CONFIG_SMP -/* - * XICS only has a single IPI, so encode the messages per CPU - */ -static DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, xics_ipi_message); - -static inline void smp_xics_do_message(int cpu, int msg) -{ - unsigned long *tgt = &per_cpu(xics_ipi_message, cpu); - - set_bit(msg, tgt); - mb(); - if (firmware_has_feature(FW_FEATURE_LPAR)) - lpar_qirr_info(cpu, IPI_PRIORITY); - else - direct_qirr_info(cpu, IPI_PRIORITY); -} - -void smp_xics_message_pass(int target, int msg) -{ - unsigned int i; - - if (target < NR_CPUS) { - smp_xics_do_message(target, msg); - } else { - for_each_online_cpu(i) { - if (target == MSG_ALL_BUT_SELF - && i == smp_processor_id()) - continue; - smp_xics_do_message(i, msg); - } - } -} - -static irqreturn_t xics_ipi_dispatch(int cpu) -{ - unsigned long *tgt = &per_cpu(xics_ipi_message, cpu); - - mb(); /* order mmio clearing qirr */ - while (*tgt) { - if (test_and_clear_bit(PPC_MSG_CALL_FUNCTION, tgt)) { - smp_message_recv(PPC_MSG_CALL_FUNCTION); - } - if (test_and_clear_bit(PPC_MSG_RESCHEDULE, tgt)) { - smp_message_recv(PPC_MSG_RESCHEDULE); - } - if (test_and_clear_bit(PPC_MSG_CALL_FUNC_SINGLE, tgt)) { - smp_message_recv(PPC_MSG_CALL_FUNC_SINGLE); - } -#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) - if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK, tgt)) { - smp_message_recv(PPC_MSG_DEBUGGER_BREAK); - } -#endif - } - return IRQ_HANDLED; -} - -static irqreturn_t xics_ipi_action_direct(int irq, void *dev_id) -{ - int cpu = smp_processor_id(); - - direct_qirr_info(cpu, 0xff); - - return xics_ipi_dispatch(cpu); -} - -static irqreturn_t xics_ipi_action_lpar(int irq, void *dev_id) -{ - int cpu = smp_processor_id(); - - lpar_qirr_info(cpu, 0xff); - - return xics_ipi_dispatch(cpu); -} - -static void xics_request_ipi(void) -{ - unsigned int ipi; - int rc; - - ipi = irq_create_mapping(xics_host, XICS_IPI); - BUG_ON(ipi == NO_IRQ); - - /* - * IPIs are marked IRQF_DISABLED as they must run with irqs - * disabled - */ - irq_set_handler(ipi, handle_percpu_irq); - if (firmware_has_feature(FW_FEATURE_LPAR)) - rc = request_irq(ipi, xics_ipi_action_lpar, - IRQF_DISABLED|IRQF_PERCPU, "IPI", NULL); - else - rc = request_irq(ipi, xics_ipi_action_direct, - IRQF_DISABLED|IRQF_PERCPU, "IPI", NULL); - BUG_ON(rc); -} - -int __init smp_xics_probe(void) -{ - xics_request_ipi(); - - return cpumask_weight(cpu_possible_mask); -} - -#endif /* CONFIG_SMP */ - - -/* Initialization */ - -static void xics_update_irq_servers(void) -{ - int i, j; - struct device_node *np; - u32 ilen; - const u32 *ireg; - u32 hcpuid; - - /* Find the server numbers for the boot cpu. */ - np = of_get_cpu_node(boot_cpuid, NULL); - BUG_ON(!np); - - ireg = of_get_property(np, "ibm,ppc-interrupt-gserver#s", &ilen); - if (!ireg) { - of_node_put(np); - return; - } - - i = ilen / sizeof(int); - hcpuid = get_hard_smp_processor_id(boot_cpuid); - - /* Global interrupt distribution server is specified in the last - * entry of "ibm,ppc-interrupt-gserver#s" property. Get the last - * entry fom this property for current boot cpu id and use it as - * default distribution server - */ - for (j = 0; j < i; j += 2) { - if (ireg[j] == hcpuid) { - default_server = hcpuid; - default_distrib_server = ireg[j+1]; - } - } - - of_node_put(np); -} - -static void __init xics_map_one_cpu(int hw_id, unsigned long addr, - unsigned long size) -{ - int i; - - /* This may look gross but it's good enough for now, we don't quite - * have a hard -> linux processor id matching. - */ - for_each_possible_cpu(i) { - if (!cpu_present(i)) - continue; - if (hw_id == get_hard_smp_processor_id(i)) { - xics_per_cpu[i] = ioremap(addr, size); - return; - } - } -} - -static void __init xics_init_one_node(struct device_node *np, - unsigned int *indx) -{ - unsigned int ilen; - const u32 *ireg; - - /* This code does the theorically broken assumption that the interrupt - * server numbers are the same as the hard CPU numbers. - * This happens to be the case so far but we are playing with fire... - * should be fixed one of these days. -BenH. - */ - ireg = of_get_property(np, "ibm,interrupt-server-ranges", NULL); - - /* Do that ever happen ? we'll know soon enough... but even good'old - * f80 does have that property .. - */ - WARN_ON(ireg == NULL); - if (ireg) { - /* - * set node starting index for this node - */ - *indx = *ireg; - } - ireg = of_get_property(np, "reg", &ilen); - if (!ireg) - panic("xics_init_IRQ: can't find interrupt reg property"); - - while (ilen >= (4 * sizeof(u32))) { - unsigned long addr, size; - - /* XXX Use proper OF parsing code here !!! */ - addr = (unsigned long)*ireg++ << 32; - ilen -= sizeof(u32); - addr |= *ireg++; - ilen -= sizeof(u32); - size = (unsigned long)*ireg++ << 32; - ilen -= sizeof(u32); - size |= *ireg++; - ilen -= sizeof(u32); - xics_map_one_cpu(*indx, addr, size); - (*indx)++; - } -} - -void __init xics_init_IRQ(void) -{ - struct device_node *np; - u32 indx = 0; - int found = 0; - const u32 *isize; - - ppc64_boot_msg(0x20, "XICS Init"); - - ibm_get_xive = rtas_token("ibm,get-xive"); - ibm_set_xive = rtas_token("ibm,set-xive"); - ibm_int_on = rtas_token("ibm,int-on"); - ibm_int_off = rtas_token("ibm,int-off"); - - for_each_node_by_type(np, "PowerPC-External-Interrupt-Presentation") { - found = 1; - if (firmware_has_feature(FW_FEATURE_LPAR)) { - of_node_put(np); - break; - } - xics_init_one_node(np, &indx); - } - if (found == 0) - return; - - /* get the bit size of server numbers */ - found = 0; - - for_each_compatible_node(np, NULL, "ibm,ppc-xics") { - isize = of_get_property(np, "ibm,interrupt-server#-size", NULL); - - if (!isize) - continue; - - if (!found) { - interrupt_server_size = *isize; - found = 1; - } else if (*isize != interrupt_server_size) { - printk(KERN_WARNING "XICS: " - "mismatched ibm,interrupt-server#-size\n"); - interrupt_server_size = max(*isize, - interrupt_server_size); - } - } - - xics_update_irq_servers(); - xics_init_host(); - - if (firmware_has_feature(FW_FEATURE_LPAR)) - ppc_md.get_irq = xics_get_irq_lpar; - else - ppc_md.get_irq = xics_get_irq_direct; - - xics_setup_cpu(); - - ppc64_boot_msg(0x21, "XICS Done"); -} - -/* Cpu startup, shutdown, and hotplug */ - -static void xics_set_cpu_priority(unsigned char cppr) -{ - struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); - - /* - * we only really want to set the priority when there's - * just one cppr value on the stack - */ - WARN_ON(os_cppr->index != 0); - - os_cppr->stack[0] = cppr; - - if (firmware_has_feature(FW_FEATURE_LPAR)) - lpar_cppr_info(cppr); - else - direct_cppr_info(cppr); - iosync(); -} - -/* Have the calling processor join or leave the specified global queue */ -static void xics_set_cpu_giq(unsigned int gserver, unsigned int join) -{ - int index; - int status; - - if (!rtas_indicator_present(GLOBAL_INTERRUPT_QUEUE, NULL)) - return; - - index = (1UL << interrupt_server_size) - 1 - gserver; - - status = rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE, index, join); - - WARN(status < 0, "set-indicator(%d, %d, %u) returned %d\n", - GLOBAL_INTERRUPT_QUEUE, index, join, status); -} - -void xics_setup_cpu(void) -{ - xics_set_cpu_priority(LOWEST_PRIORITY); - - xics_set_cpu_giq(default_distrib_server, 1); -} - -void xics_teardown_cpu(void) -{ - struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); - int cpu = smp_processor_id(); - - /* - * we have to reset the cppr index to 0 because we're - * not going to return from the IPI - */ - os_cppr->index = 0; - xics_set_cpu_priority(0); - - /* Clear any pending IPI request */ - if (firmware_has_feature(FW_FEATURE_LPAR)) - lpar_qirr_info(cpu, 0xff); - else - direct_qirr_info(cpu, 0xff); -} - -void xics_kexec_teardown_cpu(int secondary) -{ - xics_teardown_cpu(); - - /* - * we take the ipi irq but and never return so we - * need to EOI the IPI, but want to leave our priority 0 - * - * should we check all the other interrupts too? - * should we be flagging idle loop instead? - * or creating some task to be scheduled? - */ - - if (firmware_has_feature(FW_FEATURE_LPAR)) - lpar_xirr_info_set((0x00 << 24) | XICS_IPI); - else - direct_xirr_info_set((0x00 << 24) | XICS_IPI); - - /* - * Some machines need to have at least one cpu in the GIQ, - * so leave the master cpu in the group. - */ - if (secondary) - xics_set_cpu_giq(default_distrib_server, 0); -} - -#ifdef CONFIG_HOTPLUG_CPU - -/* Interrupts are disabled. */ -void xics_migrate_irqs_away(void) -{ - int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id(); - int virq; - - /* If we used to be the default server, move to the new "boot_cpuid" */ - if (hw_cpu == default_server) - xics_update_irq_servers(); - - /* Reject any interrupt that was queued to us... */ - xics_set_cpu_priority(0); - - /* Remove ourselves from the global interrupt queue */ - xics_set_cpu_giq(default_distrib_server, 0); - - /* Allow IPIs again... */ - xics_set_cpu_priority(DEFAULT_PRIORITY); - - for_each_irq(virq) { - struct irq_desc *desc; - struct irq_chip *chip; - unsigned int hwirq; - int xics_status[2]; - int status; - unsigned long flags; - - /* We can't set affinity on ISA interrupts */ - if (virq < NUM_ISA_INTERRUPTS) - continue; - if (irq_map[virq].host != xics_host) - continue; - hwirq = (unsigned int)irq_map[virq].hwirq; - /* We need to get IPIs still. */ - if (hwirq == XICS_IPI || hwirq == XICS_IRQ_SPURIOUS) - continue; - - desc = irq_to_desc(virq); - - /* We only need to migrate enabled IRQS */ - if (desc == NULL || desc->action == NULL) - continue; - - chip = irq_desc_get_chip(desc); - if (chip == NULL || chip->irq_set_affinity == NULL) - continue; - - raw_spin_lock_irqsave(&desc->lock, flags); - - status = rtas_call(ibm_get_xive, 1, 3, xics_status, hwirq); - if (status) { - printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n", - __func__, hwirq, status); - goto unlock; - } - - /* - * We only support delivery to all cpus or to one cpu. - * The irq has to be migrated only in the single cpu - * case. - */ - if (xics_status[0] != hw_cpu) - goto unlock; - - /* This is expected during cpu offline. */ - if (cpu_online(cpu)) - printk(KERN_WARNING "IRQ %u affinity broken off cpu %u\n", - virq, cpu); - - /* Reset affinity to all cpus */ - cpumask_setall(desc->irq_data.affinity); - chip->irq_set_affinity(&desc->irq_data, cpu_all_mask, true); -unlock: - raw_spin_unlock_irqrestore(&desc->lock, flags); - } -} -#endif diff --git a/trunk/arch/powerpc/platforms/pseries/xics.h b/trunk/arch/powerpc/platforms/pseries/xics.h deleted file mode 100644 index d1d5a83039ae..000000000000 --- a/trunk/arch/powerpc/platforms/pseries/xics.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * arch/powerpc/platforms/pseries/xics.h - * - * Copyright 2000 IBM Corporation. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#ifndef _POWERPC_KERNEL_XICS_H -#define _POWERPC_KERNEL_XICS_H - -extern void xics_init_IRQ(void); -extern void xics_setup_cpu(void); -extern void xics_teardown_cpu(void); -extern void xics_kexec_teardown_cpu(int secondary); -extern void xics_migrate_irqs_away(void); -extern int smp_xics_probe(void); -extern void smp_xics_message_pass(int target, int msg); - -#endif /* _POWERPC_KERNEL_XICS_H */ diff --git a/trunk/arch/powerpc/platforms/wsp/Kconfig b/trunk/arch/powerpc/platforms/wsp/Kconfig new file mode 100644 index 000000000000..c3c48eb62cc1 --- /dev/null +++ b/trunk/arch/powerpc/platforms/wsp/Kconfig @@ -0,0 +1,28 @@ +config PPC_WSP + bool + default n + +menu "WSP platform selection" + depends on PPC_BOOK3E_64 + +config PPC_PSR2 + bool "PSR-2 platform" + select PPC_A2 + select GENERIC_TBSYNC + select PPC_SCOM + select EPAPR_BOOT + select PPC_WSP + select PPC_XICS + select PPC_ICP_NATIVE + default y + +endmenu + +config PPC_A2_DD2 + bool "Support for DD2 based A2/WSP systems" + depends on PPC_A2 + +config WORKAROUND_ERRATUM_463 + depends on PPC_A2_DD2 + bool "Workaround erratum 463" + default y diff --git a/trunk/arch/powerpc/platforms/wsp/Makefile b/trunk/arch/powerpc/platforms/wsp/Makefile new file mode 100644 index 000000000000..095be73d6cd4 --- /dev/null +++ b/trunk/arch/powerpc/platforms/wsp/Makefile @@ -0,0 +1,6 @@ +ccflags-y += -mno-minimal-toc + +obj-y += setup.o ics.o +obj-$(CONFIG_PPC_PSR2) += psr2.o opb_pic.o +obj-$(CONFIG_PPC_WSP) += scom_wsp.o +obj-$(CONFIG_SMP) += smp.o scom_smp.o diff --git a/trunk/arch/powerpc/platforms/wsp/ics.c b/trunk/arch/powerpc/platforms/wsp/ics.c new file mode 100644 index 000000000000..e53bd9e7b125 --- /dev/null +++ b/trunk/arch/powerpc/platforms/wsp/ics.c @@ -0,0 +1,712 @@ +/* + * Copyright 2008-2011 IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "wsp.h" +#include "ics.h" + + +/* WSP ICS */ + +struct wsp_ics { + struct ics ics; + struct device_node *dn; + void __iomem *regs; + spinlock_t lock; + unsigned long *bitmap; + u32 chip_id; + u32 lsi_base; + u32 lsi_count; + u64 hwirq_start; + u64 count; +#ifdef CONFIG_SMP + int *hwirq_cpu_map; +#endif +}; + +#define to_wsp_ics(ics) container_of(ics, struct wsp_ics, ics) + +#define INT_SRC_LAYER_BUID_REG(base) ((base) + 0x00) +#define IODA_TBL_ADDR_REG(base) ((base) + 0x18) +#define IODA_TBL_DATA_REG(base) ((base) + 0x20) +#define XIVE_UPDATE_REG(base) ((base) + 0x28) +#define ICS_INT_CAPS_REG(base) ((base) + 0x30) + +#define TBL_AUTO_INCREMENT ((1UL << 63) | (1UL << 15)) +#define TBL_SELECT_XIST (1UL << 48) +#define TBL_SELECT_XIVT (1UL << 49) + +#define IODA_IRQ(irq) ((irq) & (0x7FFULL)) /* HRM 5.1.3.4 */ + +#define XIST_REQUIRED 0x8 +#define XIST_REJECTED 0x4 +#define XIST_PRESENTED 0x2 +#define XIST_PENDING 0x1 + +#define XIVE_SERVER_SHIFT 42 +#define XIVE_SERVER_MASK 0xFFFFULL +#define XIVE_PRIORITY_MASK 0xFFULL +#define XIVE_PRIORITY_SHIFT 32 +#define XIVE_WRITE_ENABLE (1ULL << 63) + +/* + * The docs refer to a 6 bit field called ChipID, which consists of a + * 3 bit NodeID and a 3 bit ChipID. On WSP the ChipID is always zero + * so we ignore it, and every where we use "chip id" in this code we + * mean the NodeID. + */ +#define WSP_ICS_CHIP_SHIFT 17 + + +static struct wsp_ics *ics_list; +static int num_ics; + +/* ICS Source controller accessors */ + +static u64 wsp_ics_get_xive(struct wsp_ics *ics, unsigned int irq) +{ + unsigned long flags; + u64 xive; + + spin_lock_irqsave(&ics->lock, flags); + out_be64(IODA_TBL_ADDR_REG(ics->regs), TBL_SELECT_XIVT | IODA_IRQ(irq)); + xive = in_be64(IODA_TBL_DATA_REG(ics->regs)); + spin_unlock_irqrestore(&ics->lock, flags); + + return xive; +} + +static void wsp_ics_set_xive(struct wsp_ics *ics, unsigned int irq, u64 xive) +{ + xive &= ~XIVE_ADDR_MASK; + xive |= (irq & XIVE_ADDR_MASK); + xive |= XIVE_WRITE_ENABLE; + + out_be64(XIVE_UPDATE_REG(ics->regs), xive); +} + +static u64 xive_set_server(u64 xive, unsigned int server) +{ + u64 mask = ~(XIVE_SERVER_MASK << XIVE_SERVER_SHIFT); + + xive &= mask; + xive |= (server & XIVE_SERVER_MASK) << XIVE_SERVER_SHIFT; + + return xive; +} + +static u64 xive_set_priority(u64 xive, unsigned int priority) +{ + u64 mask = ~(XIVE_PRIORITY_MASK << XIVE_PRIORITY_SHIFT); + + xive &= mask; + xive |= (priority & XIVE_PRIORITY_MASK) << XIVE_PRIORITY_SHIFT; + + return xive; +} + + +#ifdef CONFIG_SMP +/* Find logical CPUs within mask on a given chip and store result in ret */ +void cpus_on_chip(int chip_id, cpumask_t *mask, cpumask_t *ret) +{ + int cpu, chip; + struct device_node *cpu_dn, *dn; + const u32 *prop; + + cpumask_clear(ret); + for_each_cpu(cpu, mask) { + cpu_dn = of_get_cpu_node(cpu, NULL); + if (!cpu_dn) + continue; + + prop = of_get_property(cpu_dn, "at-node", NULL); + if (!prop) { + of_node_put(cpu_dn); + continue; + } + + dn = of_find_node_by_phandle(*prop); + of_node_put(cpu_dn); + + chip = wsp_get_chip_id(dn); + if (chip == chip_id) + cpumask_set_cpu(cpu, ret); + + of_node_put(dn); + } +} + +/* Store a suitable CPU to handle a hwirq in the ics->hwirq_cpu_map cache */ +static int cache_hwirq_map(struct wsp_ics *ics, unsigned int hwirq, + const cpumask_t *affinity) +{ + cpumask_var_t avail, newmask; + int ret = -ENOMEM, cpu, cpu_rover = 0, target; + int index = hwirq - ics->hwirq_start; + unsigned int nodeid; + + BUG_ON(index < 0 || index >= ics->count); + + if (!ics->hwirq_cpu_map) + return -ENOMEM; + + if (!distribute_irqs) { + ics->hwirq_cpu_map[hwirq - ics->hwirq_start] = xics_default_server; + return 0; + } + + /* Allocate needed CPU masks */ + if (!alloc_cpumask_var(&avail, GFP_KERNEL)) + goto ret; + if (!alloc_cpumask_var(&newmask, GFP_KERNEL)) + goto freeavail; + + /* Find PBus attached to the source of this IRQ */ + nodeid = (hwirq >> WSP_ICS_CHIP_SHIFT) & 0x3; /* 12:14 */ + + /* Find CPUs that could handle this IRQ */ + if (affinity) + cpumask_and(avail, cpu_online_mask, affinity); + else + cpumask_copy(avail, cpu_online_mask); + + /* Narrow selection down to logical CPUs on the same chip */ + cpus_on_chip(nodeid, avail, newmask); + + /* Ensure we haven't narrowed it down to 0 */ + if (unlikely(cpumask_empty(newmask))) { + if (unlikely(cpumask_empty(avail))) { + ret = -1; + goto out; + } + cpumask_copy(newmask, avail); + } + + /* Choose a CPU out of those we narrowed it down to in round robin */ + target = hwirq % cpumask_weight(newmask); + for_each_cpu(cpu, newmask) { + if (cpu_rover++ >= target) { + ics->hwirq_cpu_map[index] = get_hard_smp_processor_id(cpu); + ret = 0; + goto out; + } + } + + /* Shouldn't happen */ + WARN_ON(1); + +out: + free_cpumask_var(newmask); +freeavail: + free_cpumask_var(avail); +ret: + if (ret < 0) { + ics->hwirq_cpu_map[index] = cpumask_first(cpu_online_mask); + pr_warning("Error, falling hwirq 0x%x routing back to CPU %i\n", + hwirq, ics->hwirq_cpu_map[index]); + } + return ret; +} + +static void alloc_irq_map(struct wsp_ics *ics) +{ + int i; + + ics->hwirq_cpu_map = kmalloc(sizeof(int) * ics->count, GFP_KERNEL); + if (!ics->hwirq_cpu_map) { + pr_warning("Allocate hwirq_cpu_map failed, " + "IRQ balancing disabled\n"); + return; + } + + for (i=0; i < ics->count; i++) + ics->hwirq_cpu_map[i] = xics_default_server; +} + +static int get_irq_server(struct wsp_ics *ics, unsigned int hwirq) +{ + int index = hwirq - ics->hwirq_start; + + BUG_ON(index < 0 || index >= ics->count); + + if (!ics->hwirq_cpu_map) + return xics_default_server; + + return ics->hwirq_cpu_map[index]; +} +#else /* !CONFIG_SMP */ +static int cache_hwirq_map(struct wsp_ics *ics, unsigned int hwirq, + const cpumask_t *affinity) +{ + return 0; +} + +static int get_irq_server(struct wsp_ics *ics, unsigned int hwirq) +{ + return xics_default_server; +} + +static void alloc_irq_map(struct wsp_ics *ics) { } +#endif + +static void wsp_chip_unmask_irq(struct irq_data *d) +{ + unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); + struct wsp_ics *ics; + int server; + u64 xive; + + if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS) + return; + + ics = d->chip_data; + if (WARN_ON(!ics)) + return; + + server = get_irq_server(ics, hw_irq); + + xive = wsp_ics_get_xive(ics, hw_irq); + xive = xive_set_server(xive, server); + xive = xive_set_priority(xive, DEFAULT_PRIORITY); + wsp_ics_set_xive(ics, hw_irq, xive); +} + +static unsigned int wsp_chip_startup(struct irq_data *d) +{ + /* unmask it */ + wsp_chip_unmask_irq(d); + return 0; +} + +static void wsp_mask_real_irq(unsigned int hw_irq, struct wsp_ics *ics) +{ + u64 xive; + + if (hw_irq == XICS_IPI) + return; + + if (WARN_ON(!ics)) + return; + xive = wsp_ics_get_xive(ics, hw_irq); + xive = xive_set_server(xive, xics_default_server); + xive = xive_set_priority(xive, LOWEST_PRIORITY); + wsp_ics_set_xive(ics, hw_irq, xive); +} + +static void wsp_chip_mask_irq(struct irq_data *d) +{ + unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); + struct wsp_ics *ics = d->chip_data; + + if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS) + return; + + wsp_mask_real_irq(hw_irq, ics); +} + +static int wsp_chip_set_affinity(struct irq_data *d, + const struct cpumask *cpumask, bool force) +{ + unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); + struct wsp_ics *ics; + int ret; + u64 xive; + + if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS) + return -1; + + ics = d->chip_data; + if (WARN_ON(!ics)) + return -1; + xive = wsp_ics_get_xive(ics, hw_irq); + + /* + * For the moment only implement delivery to all cpus or one cpu. + * Get current irq_server for the given irq + */ + ret = cache_hwirq_map(ics, d->irq, cpumask); + if (ret == -1) { + char cpulist[128]; + cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask); + pr_warning("%s: No online cpus in the mask %s for irq %d\n", + __func__, cpulist, d->irq); + return -1; + } else if (ret == -ENOMEM) { + pr_warning("%s: Out of memory\n", __func__); + return -1; + } + + xive = xive_set_server(xive, get_irq_server(ics, hw_irq)); + wsp_ics_set_xive(ics, hw_irq, xive); + + return 0; +} + +static struct irq_chip wsp_irq_chip = { + .name = "WSP ICS", + .irq_startup = wsp_chip_startup, + .irq_mask = wsp_chip_mask_irq, + .irq_unmask = wsp_chip_unmask_irq, + .irq_set_affinity = wsp_chip_set_affinity +}; + +static int wsp_ics_host_match(struct ics *ics, struct device_node *dn) +{ + /* All ICSs in the system implement a global irq number space, + * so match against them all. */ + return of_device_is_compatible(dn, "ibm,ppc-xics"); +} + +static int wsp_ics_match_hwirq(struct wsp_ics *wsp_ics, unsigned int hwirq) +{ + if (hwirq >= wsp_ics->hwirq_start && + hwirq < wsp_ics->hwirq_start + wsp_ics->count) + return 1; + + return 0; +} + +static int wsp_ics_map(struct ics *ics, unsigned int virq) +{ + struct wsp_ics *wsp_ics = to_wsp_ics(ics); + unsigned int hw_irq = virq_to_hw(virq); + unsigned long flags; + + if (!wsp_ics_match_hwirq(wsp_ics, hw_irq)) + return -ENOENT; + + irq_set_chip_and_handler(virq, &wsp_irq_chip, handle_fasteoi_irq); + + irq_set_chip_data(virq, wsp_ics); + + spin_lock_irqsave(&wsp_ics->lock, flags); + bitmap_allocate_region(wsp_ics->bitmap, hw_irq - wsp_ics->hwirq_start, 0); + spin_unlock_irqrestore(&wsp_ics->lock, flags); + + return 0; +} + +static void wsp_ics_mask_unknown(struct ics *ics, unsigned long hw_irq) +{ + struct wsp_ics *wsp_ics = to_wsp_ics(ics); + + if (!wsp_ics_match_hwirq(wsp_ics, hw_irq)) + return; + + pr_err("%s: IRQ %lu (real) is invalid, disabling it.\n", __func__, hw_irq); + wsp_mask_real_irq(hw_irq, wsp_ics); +} + +static long wsp_ics_get_server(struct ics *ics, unsigned long hw_irq) +{ + struct wsp_ics *wsp_ics = to_wsp_ics(ics); + + if (!wsp_ics_match_hwirq(wsp_ics, hw_irq)) + return -ENOENT; + + return get_irq_server(wsp_ics, hw_irq); +} + +/* HW Number allocation API */ + +static struct wsp_ics *wsp_ics_find_dn_ics(struct device_node *dn) +{ + struct device_node *iparent; + int i; + + iparent = of_irq_find_parent(dn); + if (!iparent) { + pr_err("wsp_ics: Failed to find interrupt parent!\n"); + return NULL; + } + + for(i = 0; i < num_ics; i++) { + if(ics_list[i].dn == iparent) + break; + } + + if (i >= num_ics) { + pr_err("wsp_ics: Unable to find parent bitmap!\n"); + return NULL; + } + + return &ics_list[i]; +} + +int wsp_ics_alloc_irq(struct device_node *dn, int num) +{ + struct wsp_ics *ics; + int order, offset; + + ics = wsp_ics_find_dn_ics(dn); + if (!ics) + return -ENODEV; + + /* Fast, but overly strict if num isn't a power of two */ + order = get_count_order(num); + + spin_lock_irq(&ics->lock); + offset = bitmap_find_free_region(ics->bitmap, ics->count, order); + spin_unlock_irq(&ics->lock); + + if (offset < 0) + return offset; + + return offset + ics->hwirq_start; +} + +void wsp_ics_free_irq(struct device_node *dn, unsigned int irq) +{ + struct wsp_ics *ics; + + ics = wsp_ics_find_dn_ics(dn); + if (WARN_ON(!ics)) + return; + + spin_lock_irq(&ics->lock); + bitmap_release_region(ics->bitmap, irq, 0); + spin_unlock_irq(&ics->lock); +} + +/* Initialisation */ + +static int __init wsp_ics_bitmap_setup(struct wsp_ics *ics, + struct device_node *dn) +{ + int len, i, j, size; + u32 start, count; + const u32 *p; + + size = BITS_TO_LONGS(ics->count) * sizeof(long); + ics->bitmap = kzalloc(size, GFP_KERNEL); + if (!ics->bitmap) { + pr_err("wsp_ics: ENOMEM allocating IRQ bitmap!\n"); + return -ENOMEM; + } + + spin_lock_init(&ics->lock); + + p = of_get_property(dn, "available-ranges", &len); + if (!p || !len) { + /* FIXME this should be a WARN() once mambo is updated */ + pr_err("wsp_ics: No available-ranges defined for %s\n", + dn->full_name); + return 0; + } + + if (len % (2 * sizeof(u32)) != 0) { + /* FIXME this should be a WARN() once mambo is updated */ + pr_err("wsp_ics: Invalid available-ranges for %s\n", + dn->full_name); + return 0; + } + + bitmap_fill(ics->bitmap, ics->count); + + for (i = 0; i < len / sizeof(u32); i += 2) { + start = of_read_number(p + i, 1); + count = of_read_number(p + i + 1, 1); + + pr_devel("%s: start: %d count: %d\n", __func__, start, count); + + if ((start + count) > (ics->hwirq_start + ics->count) || + start < ics->hwirq_start) { + pr_err("wsp_ics: Invalid range! -> %d to %d\n", + start, start + count); + break; + } + + for (j = 0; j < count; j++) + bitmap_release_region(ics->bitmap, + (start + j) - ics->hwirq_start, 0); + } + + /* Ensure LSIs are not available for allocation */ + bitmap_allocate_region(ics->bitmap, ics->lsi_base, + get_count_order(ics->lsi_count)); + + return 0; +} + +static int __init wsp_ics_setup(struct wsp_ics *ics, struct device_node *dn) +{ + u32 lsi_buid, msi_buid, msi_base, msi_count; + void __iomem *regs; + const u32 *p; + int rc, len, i; + u64 caps, buid; + + p = of_get_property(dn, "interrupt-ranges", &len); + if (!p || len < (2 * sizeof(u32))) { + pr_err("wsp_ics: No/bad interrupt-ranges found on %s\n", + dn->full_name); + return -ENOENT; + } + + if (len > (2 * sizeof(u32))) { + pr_err("wsp_ics: Multiple ics ranges not supported.\n"); + return -EINVAL; + } + + regs = of_iomap(dn, 0); + if (!regs) { + pr_err("wsp_ics: of_iomap(%s) failed\n", dn->full_name); + return -ENXIO; + } + + ics->hwirq_start = of_read_number(p, 1); + ics->count = of_read_number(p + 1, 1); + ics->regs = regs; + + ics->chip_id = wsp_get_chip_id(dn); + if (WARN_ON(ics->chip_id < 0)) + ics->chip_id = 0; + + /* Get some informations about the critter */ + caps = in_be64(ICS_INT_CAPS_REG(ics->regs)); + buid = in_be64(INT_SRC_LAYER_BUID_REG(ics->regs)); + ics->lsi_count = caps >> 56; + msi_count = (caps >> 44) & 0x7ff; + + /* Note: LSI BUID is 9 bits, but really only 3 are BUID and the + * rest is mixed in the interrupt number. We store the whole + * thing though + */ + lsi_buid = (buid >> 48) & 0x1ff; + ics->lsi_base = (ics->chip_id << WSP_ICS_CHIP_SHIFT) | lsi_buid << 5; + msi_buid = (buid >> 37) & 0x7; + msi_base = (ics->chip_id << WSP_ICS_CHIP_SHIFT) | msi_buid << 11; + + pr_info("wsp_ics: Found %s\n", dn->full_name); + pr_info("wsp_ics: irq range : 0x%06llx..0x%06llx\n", + ics->hwirq_start, ics->hwirq_start + ics->count - 1); + pr_info("wsp_ics: %4d LSIs : 0x%06x..0x%06x\n", + ics->lsi_count, ics->lsi_base, + ics->lsi_base + ics->lsi_count - 1); + pr_info("wsp_ics: %4d MSIs : 0x%06x..0x%06x\n", + msi_count, msi_base, + msi_base + msi_count - 1); + + /* Let's check the HW config is sane */ + if (ics->lsi_base < ics->hwirq_start || + (ics->lsi_base + ics->lsi_count) > (ics->hwirq_start + ics->count)) + pr_warning("wsp_ics: WARNING ! LSIs out of interrupt-ranges !\n"); + if (msi_base < ics->hwirq_start || + (msi_base + msi_count) > (ics->hwirq_start + ics->count)) + pr_warning("wsp_ics: WARNING ! MSIs out of interrupt-ranges !\n"); + + /* We don't check for overlap between LSI and MSI, which will happen + * if we use the same BUID, I'm not sure yet how legit that is. + */ + + rc = wsp_ics_bitmap_setup(ics, dn); + if (rc) { + iounmap(regs); + return rc; + } + + ics->dn = of_node_get(dn); + alloc_irq_map(ics); + + for(i = 0; i < ics->count; i++) + wsp_mask_real_irq(ics->hwirq_start + i, ics); + + ics->ics.map = wsp_ics_map; + ics->ics.mask_unknown = wsp_ics_mask_unknown; + ics->ics.get_server = wsp_ics_get_server; + ics->ics.host_match = wsp_ics_host_match; + + xics_register_ics(&ics->ics); + + return 0; +} + +static void __init wsp_ics_set_default_server(void) +{ + struct device_node *np; + u32 hwid; + + /* Find the server number for the boot cpu. */ + np = of_get_cpu_node(boot_cpuid, NULL); + BUG_ON(!np); + + hwid = get_hard_smp_processor_id(boot_cpuid); + + pr_info("wsp_ics: default server is %#x, CPU %s\n", hwid, np->full_name); + xics_default_server = hwid; + + of_node_put(np); +} + +static int __init wsp_ics_init(void) +{ + struct device_node *dn; + struct wsp_ics *ics; + int rc, found; + + wsp_ics_set_default_server(); + + found = 0; + for_each_compatible_node(dn, NULL, "ibm,ppc-xics") + found++; + + if (found == 0) { + pr_err("wsp_ics: No ICS's found!\n"); + return -ENODEV; + } + + ics_list = kmalloc(sizeof(*ics) * found, GFP_KERNEL); + if (!ics_list) { + pr_err("wsp_ics: No memory for structs.\n"); + return -ENOMEM; + } + + num_ics = 0; + ics = ics_list; + for_each_compatible_node(dn, NULL, "ibm,wsp-xics") { + rc = wsp_ics_setup(ics, dn); + if (rc == 0) { + ics++; + num_ics++; + } + } + + if (found != num_ics) { + pr_err("wsp_ics: Failed setting up %d ICS's\n", + found - num_ics); + return -1; + } + + return 0; +} + +void __init wsp_init_irq(void) +{ + wsp_ics_init(); + xics_init(); + + /* We need to patch our irq chip's EOI to point to the right ICP */ + wsp_irq_chip.irq_eoi = icp_ops->eoi; +} diff --git a/trunk/arch/powerpc/platforms/wsp/ics.h b/trunk/arch/powerpc/platforms/wsp/ics.h new file mode 100644 index 000000000000..e34d53102640 --- /dev/null +++ b/trunk/arch/powerpc/platforms/wsp/ics.h @@ -0,0 +1,20 @@ +/* + * Copyright 2009 IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef __ICS_H +#define __ICS_H + +#define XIVE_ADDR_MASK 0x7FFULL + +extern void wsp_init_irq(void); + +extern int wsp_ics_alloc_irq(struct device_node *dn, int num); +extern void wsp_ics_free_irq(struct device_node *dn, unsigned int irq); + +#endif /* __ICS_H */ diff --git a/trunk/arch/powerpc/platforms/wsp/opb_pic.c b/trunk/arch/powerpc/platforms/wsp/opb_pic.c new file mode 100644 index 000000000000..be05631a3c1c --- /dev/null +++ b/trunk/arch/powerpc/platforms/wsp/opb_pic.c @@ -0,0 +1,332 @@ +/* + * IBM Onboard Peripheral Bus Interrupt Controller + * + * Copyright 2010 Jack Miller, IBM Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include +#include +#include + +#include +#include + +#define OPB_NR_IRQS 32 + +#define OPB_MLSASIER 0x04 /* MLS Accumulated Status IER */ +#define OPB_MLSIR 0x50 /* MLS Interrupt Register */ +#define OPB_MLSIER 0x54 /* MLS Interrupt Enable Register */ +#define OPB_MLSIPR 0x58 /* MLS Interrupt Polarity Register */ +#define OPB_MLSIIR 0x5c /* MLS Interrupt Inputs Register */ + +static int opb_index = 0; + +struct opb_pic { + struct irq_host *host; + void *regs; + int index; + spinlock_t lock; +}; + +static u32 opb_in(struct opb_pic *opb, int offset) +{ + return in_be32(opb->regs + offset); +} + +static void opb_out(struct opb_pic *opb, int offset, u32 val) +{ + out_be32(opb->regs + offset, val); +} + +static void opb_unmask_irq(struct irq_data *d) +{ + struct opb_pic *opb; + unsigned long flags; + u32 ier, bitset; + + opb = d->chip_data; + bitset = (1 << (31 - irqd_to_hwirq(d))); + + spin_lock_irqsave(&opb->lock, flags); + + ier = opb_in(opb, OPB_MLSIER); + opb_out(opb, OPB_MLSIER, ier | bitset); + ier = opb_in(opb, OPB_MLSIER); + + spin_unlock_irqrestore(&opb->lock, flags); +} + +static void opb_mask_irq(struct irq_data *d) +{ + struct opb_pic *opb; + unsigned long flags; + u32 ier, mask; + + opb = d->chip_data; + mask = ~(1 << (31 - irqd_to_hwirq(d))); + + spin_lock_irqsave(&opb->lock, flags); + + ier = opb_in(opb, OPB_MLSIER); + opb_out(opb, OPB_MLSIER, ier & mask); + ier = opb_in(opb, OPB_MLSIER); // Flush posted writes + + spin_unlock_irqrestore(&opb->lock, flags); +} + +static void opb_ack_irq(struct irq_data *d) +{ + struct opb_pic *opb; + unsigned long flags; + u32 bitset; + + opb = d->chip_data; + bitset = (1 << (31 - irqd_to_hwirq(d))); + + spin_lock_irqsave(&opb->lock, flags); + + opb_out(opb, OPB_MLSIR, bitset); + opb_in(opb, OPB_MLSIR); // Flush posted writes + + spin_unlock_irqrestore(&opb->lock, flags); +} + +static void opb_mask_ack_irq(struct irq_data *d) +{ + struct opb_pic *opb; + unsigned long flags; + u32 bitset; + u32 ier, ir; + + opb = d->chip_data; + bitset = (1 << (31 - irqd_to_hwirq(d))); + + spin_lock_irqsave(&opb->lock, flags); + + ier = opb_in(opb, OPB_MLSIER); + opb_out(opb, OPB_MLSIER, ier & ~bitset); + ier = opb_in(opb, OPB_MLSIER); // Flush posted writes + + opb_out(opb, OPB_MLSIR, bitset); + ir = opb_in(opb, OPB_MLSIR); // Flush posted writes + + spin_unlock_irqrestore(&opb->lock, flags); +} + +static int opb_set_irq_type(struct irq_data *d, unsigned int flow) +{ + struct opb_pic *opb; + unsigned long flags; + int invert, ipr, mask, bit; + + opb = d->chip_data; + + /* The only information we're interested in in the type is whether it's + * a high or low trigger. For high triggered interrupts, the polarity + * set for it in the MLS Interrupt Polarity Register is 0, for low + * interrupts it's 1 so that the proper input in the MLS Interrupt Input + * Register is interrupted as asserting the interrupt. */ + + switch (flow) { + case IRQ_TYPE_NONE: + opb_mask_irq(d); + return 0; + + case IRQ_TYPE_LEVEL_HIGH: + invert = 0; + break; + + case IRQ_TYPE_LEVEL_LOW: + invert = 1; + break; + + default: + return -EINVAL; + } + + bit = (1 << (31 - irqd_to_hwirq(d))); + mask = ~bit; + + spin_lock_irqsave(&opb->lock, flags); + + ipr = opb_in(opb, OPB_MLSIPR); + ipr = (ipr & mask) | (invert ? bit : 0); + opb_out(opb, OPB_MLSIPR, ipr); + ipr = opb_in(opb, OPB_MLSIPR); // Flush posted writes + + spin_unlock_irqrestore(&opb->lock, flags); + + /* Record the type in the interrupt descriptor */ + irqd_set_trigger_type(d, flow); + + return 0; +} + +static struct irq_chip opb_irq_chip = { + .name = "OPB", + .irq_mask = opb_mask_irq, + .irq_unmask = opb_unmask_irq, + .irq_mask_ack = opb_mask_ack_irq, + .irq_ack = opb_ack_irq, + .irq_set_type = opb_set_irq_type +}; + +static int opb_host_map(struct irq_host *host, unsigned int virq, + irq_hw_number_t hwirq) +{ + struct opb_pic *opb; + + opb = host->host_data; + + /* Most of the important stuff is handled by the generic host code, like + * the lookup, so just attach some info to the virtual irq */ + + irq_set_chip_data(virq, opb); + irq_set_chip_and_handler(virq, &opb_irq_chip, handle_level_irq); + irq_set_irq_type(virq, IRQ_TYPE_NONE); + + return 0; +} + +static int opb_host_xlate(struct irq_host *host, struct device_node *dn, + const u32 *intspec, unsigned int intsize, + irq_hw_number_t *out_hwirq, unsigned int *out_type) +{ + /* Interrupt size must == 2 */ + BUG_ON(intsize != 2); + *out_hwirq = intspec[0]; + *out_type = intspec[1]; + return 0; +} + +static struct irq_host_ops opb_host_ops = { + .map = opb_host_map, + .xlate = opb_host_xlate, +}; + +irqreturn_t opb_irq_handler(int irq, void *private) +{ + struct opb_pic *opb; + u32 ir, src, subvirq; + + opb = (struct opb_pic *) private; + + /* Read the OPB MLS Interrupt Register for + * asserted interrupts */ + ir = opb_in(opb, OPB_MLSIR); + if (!ir) + return IRQ_NONE; + + do { + /* Get 1 - 32 source, *NOT* bit */ + src = 32 - ffs(ir); + + /* Translate from the OPB's conception of interrupt number to + * Linux's virtual IRQ */ + + subvirq = irq_linear_revmap(opb->host, src); + + generic_handle_irq(subvirq); + } while ((ir = opb_in(opb, OPB_MLSIR))); + + return IRQ_HANDLED; +} + +struct opb_pic *opb_pic_init_one(struct device_node *dn) +{ + struct opb_pic *opb; + struct resource res; + + if (of_address_to_resource(dn, 0, &res)) { + printk(KERN_ERR "opb: Couldn't translate resource\n"); + return NULL; + } + + opb = kzalloc(sizeof(struct opb_pic), GFP_KERNEL); + if (!opb) { + printk(KERN_ERR "opb: Failed to allocate opb struct!\n"); + return NULL; + } + + /* Get access to the OPB MMIO registers */ + opb->regs = ioremap(res.start + 0x10000, 0x1000); + if (!opb->regs) { + printk(KERN_ERR "opb: Failed to allocate register space!\n"); + goto free_opb; + } + + /* Allocate an irq host so that Linux knows that despite only + * having one interrupt to issue, we're the controller for multiple + * hardware IRQs, so later we can lookup their virtual IRQs. */ + + opb->host = irq_alloc_host(dn, IRQ_HOST_MAP_LINEAR, + OPB_NR_IRQS, &opb_host_ops, -1); + + if (!opb->host) { + printk(KERN_ERR "opb: Failed to allocate IRQ host!\n"); + goto free_regs; + } + + opb->index = opb_index++; + spin_lock_init(&opb->lock); + opb->host->host_data = opb; + + /* Disable all interrupts by default */ + opb_out(opb, OPB_MLSASIER, 0); + opb_out(opb, OPB_MLSIER, 0); + + /* ACK any interrupts left by FW */ + opb_out(opb, OPB_MLSIR, 0xFFFFFFFF); + + return opb; + +free_regs: + iounmap(opb->regs); +free_opb: + kfree(opb); + return NULL; +} + +void __init opb_pic_init(void) +{ + struct device_node *dn; + struct opb_pic *opb; + int virq; + int rc; + + /* Call init_one for each OPB device */ + for_each_compatible_node(dn, NULL, "ibm,opb") { + + /* Fill in an OPB struct */ + opb = opb_pic_init_one(dn); + if (!opb) { + printk(KERN_WARNING "opb: Failed to init node, skipped!\n"); + continue; + } + + /* Map / get opb's hardware virtual irq */ + virq = irq_of_parse_and_map(dn, 0); + if (virq <= 0) { + printk("opb: irq_op_parse_and_map failed!\n"); + continue; + } + + /* Attach opb interrupt handler to new virtual IRQ */ + rc = request_irq(virq, opb_irq_handler, 0, "OPB LS Cascade", opb); + if (rc) { + printk("opb: request_irq failed: %d\n", rc); + continue; + } + + printk("OPB%d init with %d IRQs at %p\n", opb->index, + OPB_NR_IRQS, opb->regs); + } +} diff --git a/trunk/arch/powerpc/platforms/wsp/psr2.c b/trunk/arch/powerpc/platforms/wsp/psr2.c new file mode 100644 index 000000000000..40f28916ff6c --- /dev/null +++ b/trunk/arch/powerpc/platforms/wsp/psr2.c @@ -0,0 +1,95 @@ +/* + * Copyright 2008-2011, IBM Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "ics.h" +#include "wsp.h" + + +static void psr2_spin(void) +{ + hard_irq_disable(); + for (;;) ; +} + +static void psr2_restart(char *cmd) +{ + psr2_spin(); +} + +static int psr2_probe_devices(void) +{ + struct device_node *np; + + /* Our RTC is a ds1500. It seems to be programatically compatible + * with the ds1511 for which we have a driver so let's use that + */ + np = of_find_compatible_node(NULL, NULL, "dallas,ds1500"); + if (np != NULL) { + struct resource res; + if (of_address_to_resource(np, 0, &res) == 0) + platform_device_register_simple("ds1511", 0, &res, 1); + } + return 0; +} +machine_arch_initcall(psr2_md, psr2_probe_devices); + +static void __init psr2_setup_arch(void) +{ + /* init to some ~sane value until calibrate_delay() runs */ + loops_per_jiffy = 50000000; + + scom_init_wsp(); + + /* Setup SMP callback */ +#ifdef CONFIG_SMP + a2_setup_smp(); +#endif +} + +static int __init psr2_probe(void) +{ + unsigned long root = of_get_flat_dt_root(); + + if (!of_flat_dt_is_compatible(root, "ibm,psr2")) + return 0; + + return 1; +} + +static void __init psr2_init_irq(void) +{ + wsp_init_irq(); + opb_pic_init(); +} + +define_machine(psr2_md) { + .name = "PSR2 A2", + .probe = psr2_probe, + .setup_arch = psr2_setup_arch, + .restart = psr2_restart, + .power_off = psr2_spin, + .halt = psr2_spin, + .calibrate_decr = generic_calibrate_decr, + .init_IRQ = psr2_init_irq, + .progress = udbg_progress, + .power_save = book3e_idle, +}; diff --git a/trunk/arch/powerpc/platforms/wsp/scom_smp.c b/trunk/arch/powerpc/platforms/wsp/scom_smp.c new file mode 100644 index 000000000000..141e78032097 --- /dev/null +++ b/trunk/arch/powerpc/platforms/wsp/scom_smp.c @@ -0,0 +1,427 @@ +/* + * SCOM support for A2 platforms + * + * Copyright 2007-2011 Benjamin Herrenschmidt, David Gibson, + * Michael Ellerman, IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "wsp.h" + +#define SCOM_RAMC 0x2a /* Ram Command */ +#define SCOM_RAMC_TGT1_EXT 0x80000000 +#define SCOM_RAMC_SRC1_EXT 0x40000000 +#define SCOM_RAMC_SRC2_EXT 0x20000000 +#define SCOM_RAMC_SRC3_EXT 0x10000000 +#define SCOM_RAMC_ENABLE 0x00080000 +#define SCOM_RAMC_THREADSEL 0x00060000 +#define SCOM_RAMC_EXECUTE 0x00010000 +#define SCOM_RAMC_MSR_OVERRIDE 0x00008000 +#define SCOM_RAMC_MSR_PR 0x00004000 +#define SCOM_RAMC_MSR_GS 0x00002000 +#define SCOM_RAMC_FORCE 0x00001000 +#define SCOM_RAMC_FLUSH 0x00000800 +#define SCOM_RAMC_INTERRUPT 0x00000004 +#define SCOM_RAMC_ERROR 0x00000002 +#define SCOM_RAMC_DONE 0x00000001 +#define SCOM_RAMI 0x29 /* Ram Instruction */ +#define SCOM_RAMIC 0x28 /* Ram Instruction and Command */ +#define SCOM_RAMIC_INSN 0xffffffff00000000 +#define SCOM_RAMD 0x2d /* Ram Data */ +#define SCOM_RAMDH 0x2e /* Ram Data High */ +#define SCOM_RAMDL 0x2f /* Ram Data Low */ +#define SCOM_PCCR0 0x33 /* PC Configuration Register 0 */ +#define SCOM_PCCR0_ENABLE_DEBUG 0x80000000 +#define SCOM_PCCR0_ENABLE_RAM 0x40000000 +#define SCOM_THRCTL 0x30 /* Thread Control and Status */ +#define SCOM_THRCTL_T0_STOP 0x80000000 +#define SCOM_THRCTL_T1_STOP 0x40000000 +#define SCOM_THRCTL_T2_STOP 0x20000000 +#define SCOM_THRCTL_T3_STOP 0x10000000 +#define SCOM_THRCTL_T0_STEP 0x08000000 +#define SCOM_THRCTL_T1_STEP 0x04000000 +#define SCOM_THRCTL_T2_STEP 0x02000000 +#define SCOM_THRCTL_T3_STEP 0x01000000 +#define SCOM_THRCTL_T0_RUN 0x00800000 +#define SCOM_THRCTL_T1_RUN 0x00400000 +#define SCOM_THRCTL_T2_RUN 0x00200000 +#define SCOM_THRCTL_T3_RUN 0x00100000 +#define SCOM_THRCTL_T0_PM 0x00080000 +#define SCOM_THRCTL_T1_PM 0x00040000 +#define SCOM_THRCTL_T2_PM 0x00020000 +#define SCOM_THRCTL_T3_PM 0x00010000 +#define SCOM_THRCTL_T0_UDE 0x00008000 +#define SCOM_THRCTL_T1_UDE 0x00004000 +#define SCOM_THRCTL_T2_UDE 0x00002000 +#define SCOM_THRCTL_T3_UDE 0x00001000 +#define SCOM_THRCTL_ASYNC_DIS 0x00000800 +#define SCOM_THRCTL_TB_DIS 0x00000400 +#define SCOM_THRCTL_DEC_DIS 0x00000200 +#define SCOM_THRCTL_AND 0x31 /* Thread Control and Status */ +#define SCOM_THRCTL_OR 0x32 /* Thread Control and Status */ + + +static DEFINE_PER_CPU(scom_map_t, scom_ptrs); + +static scom_map_t get_scom(int cpu, struct device_node *np, int *first_thread) +{ + scom_map_t scom = per_cpu(scom_ptrs, cpu); + int tcpu; + + if (scom_map_ok(scom)) { + *first_thread = 0; + return scom; + } + + *first_thread = 1; + + scom = scom_map_device(np, 0); + + for (tcpu = cpu_first_thread_sibling(cpu); + tcpu <= cpu_last_thread_sibling(cpu); tcpu++) + per_cpu(scom_ptrs, tcpu) = scom; + + /* Hack: for the boot core, this will actually get called on + * the second thread up, not the first so our test above will + * set first_thread incorrectly. */ + if (cpu_first_thread_sibling(cpu) == 0) + *first_thread = 0; + + return scom; +} + +static int a2_scom_ram(scom_map_t scom, int thread, u32 insn, int extmask) +{ + u64 cmd, mask, val; + int n = 0; + + cmd = ((u64)insn << 32) | (((u64)extmask & 0xf) << 28) + | ((u64)thread << 17) | SCOM_RAMC_ENABLE | SCOM_RAMC_EXECUTE; + mask = SCOM_RAMC_DONE | SCOM_RAMC_INTERRUPT | SCOM_RAMC_ERROR; + + scom_write(scom, SCOM_RAMIC, cmd); + + while (!((val = scom_read(scom, SCOM_RAMC)) & mask)) { + pr_devel("Waiting on RAMC = 0x%llx\n", val); + if (++n == 3) { + pr_err("RAMC timeout on instruction 0x%08x, thread %d\n", + insn, thread); + return -1; + } + } + + if (val & SCOM_RAMC_INTERRUPT) { + pr_err("RAMC interrupt on instruction 0x%08x, thread %d\n", + insn, thread); + return -SCOM_RAMC_INTERRUPT; + } + + if (val & SCOM_RAMC_ERROR) { + pr_err("RAMC error on instruction 0x%08x, thread %d\n", + insn, thread); + return -SCOM_RAMC_ERROR; + } + + return 0; +} + +static int a2_scom_getgpr(scom_map_t scom, int thread, int gpr, int alt, + u64 *out_gpr) +{ + int rc; + + /* or rN, rN, rN */ + u32 insn = 0x7c000378 | (gpr << 21) | (gpr << 16) | (gpr << 11); + rc = a2_scom_ram(scom, thread, insn, alt ? 0xf : 0x0); + if (rc) + return rc; + + *out_gpr = scom_read(scom, SCOM_RAMD); + + return 0; +} + +static int a2_scom_getspr(scom_map_t scom, int thread, int spr, u64 *out_spr) +{ + int rc, sprhi, sprlo; + u32 insn; + + sprhi = spr >> 5; + sprlo = spr & 0x1f; + insn = 0x7c2002a6 | (sprlo << 16) | (sprhi << 11); /* mfspr r1,spr */ + + if (spr == 0x0ff0) + insn = 0x7c2000a6; /* mfmsr r1 */ + + rc = a2_scom_ram(scom, thread, insn, 0xf); + if (rc) + return rc; + return a2_scom_getgpr(scom, thread, 1, 1, out_spr); +} + +static int a2_scom_setgpr(scom_map_t scom, int thread, int gpr, + int alt, u64 val) +{ + u32 lis = 0x3c000000 | (gpr << 21); + u32 li = 0x38000000 | (gpr << 21); + u32 oris = 0x64000000 | (gpr << 21) | (gpr << 16); + u32 ori = 0x60000000 | (gpr << 21) | (gpr << 16); + u32 rldicr32 = 0x780007c6 | (gpr << 21) | (gpr << 16); + u32 highest = val >> 48; + u32 higher = (val >> 32) & 0xffff; + u32 high = (val >> 16) & 0xffff; + u32 low = val & 0xffff; + int lext = alt ? 0x8 : 0x0; + int oext = alt ? 0xf : 0x0; + int rc = 0; + + if (highest) + rc |= a2_scom_ram(scom, thread, lis | highest, lext); + + if (higher) { + if (highest) + rc |= a2_scom_ram(scom, thread, oris | higher, oext); + else + rc |= a2_scom_ram(scom, thread, li | higher, lext); + } + + if (highest || higher) + rc |= a2_scom_ram(scom, thread, rldicr32, oext); + + if (high) { + if (highest || higher) + rc |= a2_scom_ram(scom, thread, oris | high, oext); + else + rc |= a2_scom_ram(scom, thread, lis | high, lext); + } + + if (highest || higher || high) + rc |= a2_scom_ram(scom, thread, ori | low, oext); + else + rc |= a2_scom_ram(scom, thread, li | low, lext); + + return rc; +} + +static int a2_scom_setspr(scom_map_t scom, int thread, int spr, u64 val) +{ + int sprhi = spr >> 5; + int sprlo = spr & 0x1f; + /* mtspr spr, r1 */ + u32 insn = 0x7c2003a6 | (sprlo << 16) | (sprhi << 11); + + if (spr == 0x0ff0) + insn = 0x7c200124; /* mtmsr r1 */ + + if (a2_scom_setgpr(scom, thread, 1, 1, val)) + return -1; + + return a2_scom_ram(scom, thread, insn, 0xf); +} + +static int a2_scom_initial_tlb(scom_map_t scom, int thread) +{ + extern u32 a2_tlbinit_code_start[], a2_tlbinit_code_end[]; + extern u32 a2_tlbinit_after_iprot_flush[]; + extern u32 a2_tlbinit_after_linear_map[]; + u32 assoc, entries, i; + u64 epn, tlbcfg; + u32 *p; + int rc; + + /* Invalidate all entries (including iprot) */ + + rc = a2_scom_getspr(scom, thread, SPRN_TLB0CFG, &tlbcfg); + if (rc) + goto scom_fail; + entries = tlbcfg & TLBnCFG_N_ENTRY; + assoc = (tlbcfg & TLBnCFG_ASSOC) >> 24; + epn = 0; + + /* Set MMUCR2 to enable 4K, 64K, 1M, 16M and 1G pages */ + a2_scom_setspr(scom, thread, SPRN_MMUCR2, 0x000a7531); + /* Set MMUCR3 to write all thids bit to the TLB */ + a2_scom_setspr(scom, thread, SPRN_MMUCR3, 0x0000000f); + + /* Set MAS1 for 1G page size, and MAS2 to our initial EPN */ + a2_scom_setspr(scom, thread, SPRN_MAS1, MAS1_TSIZE(BOOK3E_PAGESZ_1GB)); + a2_scom_setspr(scom, thread, SPRN_MAS2, epn); + for (i = 0; i < entries; i++) { + + a2_scom_setspr(scom, thread, SPRN_MAS0, MAS0_ESEL(i % assoc)); + + /* tlbwe */ + rc = a2_scom_ram(scom, thread, 0x7c0007a4, 0); + if (rc) + goto scom_fail; + + /* Next entry is new address? */ + if((i + 1) % assoc == 0) { + epn += (1 << 30); + a2_scom_setspr(scom, thread, SPRN_MAS2, epn); + } + } + + /* Setup args for linear mapping */ + rc = a2_scom_setgpr(scom, thread, 3, 0, MAS0_TLBSEL(0)); + if (rc) + goto scom_fail; + + /* Linear mapping */ + for (p = a2_tlbinit_code_start; p < a2_tlbinit_after_linear_map; p++) { + rc = a2_scom_ram(scom, thread, *p, 0); + if (rc) + goto scom_fail; + } + + /* + * For the boot thread, between the linear mapping and the debug + * mappings there is a loop to flush iprot mappings. Ramming doesn't do + * branches, but the secondary threads don't need to be nearly as smart + * (i.e. we don't need to worry about invalidating the mapping we're + * standing on). + */ + + /* Debug mappings. Expects r11 = MAS0 from linear map (set above) */ + for (p = a2_tlbinit_after_iprot_flush; p < a2_tlbinit_code_end; p++) { + rc = a2_scom_ram(scom, thread, *p, 0); + if (rc) + goto scom_fail; + } + +scom_fail: + if (rc) + pr_err("Setting up initial TLB failed, err %d\n", rc); + + if (rc == -SCOM_RAMC_INTERRUPT) { + /* Interrupt, dump some status */ + int rc[10]; + u64 iar, srr0, srr1, esr, mas0, mas1, mas2, mas7_3, mas8, ccr2; + rc[0] = a2_scom_getspr(scom, thread, SPRN_IAR, &iar); + rc[1] = a2_scom_getspr(scom, thread, SPRN_SRR0, &srr0); + rc[2] = a2_scom_getspr(scom, thread, SPRN_SRR1, &srr1); + rc[3] = a2_scom_getspr(scom, thread, SPRN_ESR, &esr); + rc[4] = a2_scom_getspr(scom, thread, SPRN_MAS0, &mas0); + rc[5] = a2_scom_getspr(scom, thread, SPRN_MAS1, &mas1); + rc[6] = a2_scom_getspr(scom, thread, SPRN_MAS2, &mas2); + rc[7] = a2_scom_getspr(scom, thread, SPRN_MAS7_MAS3, &mas7_3); + rc[8] = a2_scom_getspr(scom, thread, SPRN_MAS8, &mas8); + rc[9] = a2_scom_getspr(scom, thread, SPRN_A2_CCR2, &ccr2); + pr_err(" -> retreived IAR =0x%llx (err %d)\n", iar, rc[0]); + pr_err(" retreived SRR0=0x%llx (err %d)\n", srr0, rc[1]); + pr_err(" retreived SRR1=0x%llx (err %d)\n", srr1, rc[2]); + pr_err(" retreived ESR =0x%llx (err %d)\n", esr, rc[3]); + pr_err(" retreived MAS0=0x%llx (err %d)\n", mas0, rc[4]); + pr_err(" retreived MAS1=0x%llx (err %d)\n", mas1, rc[5]); + pr_err(" retreived MAS2=0x%llx (err %d)\n", mas2, rc[6]); + pr_err(" retreived MS73=0x%llx (err %d)\n", mas7_3, rc[7]); + pr_err(" retreived MAS8=0x%llx (err %d)\n", mas8, rc[8]); + pr_err(" retreived CCR2=0x%llx (err %d)\n", ccr2, rc[9]); + } + + return rc; +} + +int __devinit a2_scom_startup_cpu(unsigned int lcpu, int thr_idx, + struct device_node *np) +{ + u64 init_iar, init_msr, init_ccr2; + unsigned long start_here; + int rc, core_setup; + scom_map_t scom; + u64 pccr0; + + scom = get_scom(lcpu, np, &core_setup); + if (!scom) { + printk(KERN_ERR "Couldn't map SCOM for CPU%d\n", lcpu); + return -1; + } + + pr_devel("Bringing up CPU%d using SCOM...\n", lcpu); + + pccr0 = scom_read(scom, SCOM_PCCR0); + scom_write(scom, SCOM_PCCR0, pccr0 | SCOM_PCCR0_ENABLE_DEBUG | + SCOM_PCCR0_ENABLE_RAM); + + /* Stop the thead with THRCTL. If we are setting up the TLB we stop all + * threads. We also disable asynchronous interrupts while RAMing. + */ + if (core_setup) + scom_write(scom, SCOM_THRCTL_OR, + SCOM_THRCTL_T0_STOP | + SCOM_THRCTL_T1_STOP | + SCOM_THRCTL_T2_STOP | + SCOM_THRCTL_T3_STOP | + SCOM_THRCTL_ASYNC_DIS); + else + scom_write(scom, SCOM_THRCTL_OR, SCOM_THRCTL_T0_STOP >> thr_idx); + + /* Flush its pipeline just in case */ + scom_write(scom, SCOM_RAMC, ((u64)thr_idx << 17) | + SCOM_RAMC_FLUSH | SCOM_RAMC_ENABLE); + + a2_scom_getspr(scom, thr_idx, SPRN_IAR, &init_iar); + a2_scom_getspr(scom, thr_idx, 0x0ff0, &init_msr); + a2_scom_getspr(scom, thr_idx, SPRN_A2_CCR2, &init_ccr2); + + /* Set MSR to MSR_CM (0x0ff0 is magic value for MSR_CM) */ + rc = a2_scom_setspr(scom, thr_idx, 0x0ff0, MSR_CM); + if (rc) { + pr_err("Failed to set MSR ! err %d\n", rc); + return rc; + } + + /* RAM in an sync/isync for the sake of it */ + a2_scom_ram(scom, thr_idx, 0x7c0004ac, 0); + a2_scom_ram(scom, thr_idx, 0x4c00012c, 0); + + if (core_setup) { + pr_devel("CPU%d is first thread in core, initializing TLB...\n", + lcpu); + rc = a2_scom_initial_tlb(scom, thr_idx); + if (rc) + goto fail; + } + + start_here = *(unsigned long *)(core_setup ? generic_secondary_smp_init + : generic_secondary_thread_init); + pr_devel("CPU%d entry point at 0x%lx...\n", lcpu, start_here); + + rc |= a2_scom_setspr(scom, thr_idx, SPRN_IAR, start_here); + rc |= a2_scom_setgpr(scom, thr_idx, 3, 0, + get_hard_smp_processor_id(lcpu)); + /* + * Tell book3e_secondary_core_init not to set up the TLB, we've + * already done that. + */ + rc |= a2_scom_setgpr(scom, thr_idx, 4, 0, 1); + + rc |= a2_scom_setspr(scom, thr_idx, SPRN_TENS, 0x1 << thr_idx); + + scom_write(scom, SCOM_RAMC, 0); + scom_write(scom, SCOM_THRCTL_AND, ~(SCOM_THRCTL_T0_STOP >> thr_idx)); + scom_write(scom, SCOM_PCCR0, pccr0); +fail: + pr_devel(" SCOM initialization %s\n", rc ? "failed" : "succeeded"); + if (rc) { + pr_err("Old IAR=0x%08llx MSR=0x%08llx CCR2=0x%08llx\n", + init_iar, init_msr, init_ccr2); + } + + return rc; +} diff --git a/trunk/arch/powerpc/platforms/wsp/scom_wsp.c b/trunk/arch/powerpc/platforms/wsp/scom_wsp.c new file mode 100644 index 000000000000..4052e2259f30 --- /dev/null +++ b/trunk/arch/powerpc/platforms/wsp/scom_wsp.c @@ -0,0 +1,77 @@ +/* + * SCOM backend for WSP + * + * Copyright 2010 Benjamin Herrenschmidt, IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "wsp.h" + + +static scom_map_t wsp_scom_map(struct device_node *dev, u64 reg, u64 count) +{ + struct resource r; + u64 xscom_addr; + + if (!of_get_property(dev, "scom-controller", NULL)) { + pr_err("%s: device %s is not a SCOM controller\n", + __func__, dev->full_name); + return SCOM_MAP_INVALID; + } + + if (of_address_to_resource(dev, 0, &r)) { + pr_debug("Failed to find SCOM controller address\n"); + return 0; + } + + /* Transform the SCOM address into an XSCOM offset */ + xscom_addr = ((reg & 0x7f000000) >> 1) | ((reg & 0xfffff) << 3); + + return (scom_map_t)ioremap(r.start + xscom_addr, count << 3); +} + +static void wsp_scom_unmap(scom_map_t map) +{ + iounmap((void *)map); +} + +static u64 wsp_scom_read(scom_map_t map, u32 reg) +{ + u64 __iomem *addr = (u64 __iomem *)map; + + return in_be64(addr + reg); +} + +static void wsp_scom_write(scom_map_t map, u32 reg, u64 value) +{ + u64 __iomem *addr = (u64 __iomem *)map; + + return out_be64(addr + reg, value); +} + +static const struct scom_controller wsp_scom_controller = { + .map = wsp_scom_map, + .unmap = wsp_scom_unmap, + .read = wsp_scom_read, + .write = wsp_scom_write +}; + +void scom_init_wsp(void) +{ + scom_init(&wsp_scom_controller); +} diff --git a/trunk/arch/powerpc/platforms/wsp/setup.c b/trunk/arch/powerpc/platforms/wsp/setup.c new file mode 100644 index 000000000000..11ac2f05e01c --- /dev/null +++ b/trunk/arch/powerpc/platforms/wsp/setup.c @@ -0,0 +1,36 @@ +/* + * Copyright 2010 Michael Ellerman, IBM Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include + +#include "wsp.h" + +/* + * Find chip-id by walking up device tree looking for ibm,wsp-chip-id property. + * Won't work for nodes that are not a descendant of a wsp node. + */ +int wsp_get_chip_id(struct device_node *dn) +{ + const u32 *p; + int rc; + + /* Start looking at the specified node, not its parent */ + dn = of_node_get(dn); + while (dn && !(p = of_get_property(dn, "ibm,wsp-chip-id", NULL))) + dn = of_get_next_parent(dn); + + if (!dn) + return -1; + + rc = *p; + of_node_put(dn); + + return rc; +} diff --git a/trunk/arch/powerpc/platforms/wsp/smp.c b/trunk/arch/powerpc/platforms/wsp/smp.c new file mode 100644 index 000000000000..9d20fa9d3710 --- /dev/null +++ b/trunk/arch/powerpc/platforms/wsp/smp.c @@ -0,0 +1,88 @@ +/* + * SMP Support for A2 platforms + * + * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "ics.h" +#include "wsp.h" + +static void __devinit smp_a2_setup_cpu(int cpu) +{ + doorbell_setup_this_cpu(); + + if (cpu != boot_cpuid) + xics_setup_cpu(); +} + +int __devinit smp_a2_kick_cpu(int nr) +{ + const char *enable_method; + struct device_node *np; + int thr_idx; + + if (nr < 0 || nr >= NR_CPUS) + return -ENOENT; + + np = of_get_cpu_node(nr, &thr_idx); + if (!np) + return -ENODEV; + + enable_method = of_get_property(np, "enable-method", NULL); + pr_devel("CPU%d has enable-method: \"%s\"\n", nr, enable_method); + + if (!enable_method) { + printk(KERN_ERR "CPU%d has no enable-method\n", nr); + return -ENOENT; + } else if (strcmp(enable_method, "ibm,a2-scom") == 0) { + if (a2_scom_startup_cpu(nr, thr_idx, np)) + return -1; + } else { + printk(KERN_ERR "CPU%d: Don't understand enable-method \"%s\"\n", + nr, enable_method); + return -EINVAL; + } + + /* + * The processor is currently spinning, waiting for the + * cpu_start field to become non-zero After we set cpu_start, + * the processor will continue on to secondary_start + */ + paca[nr].cpu_start = 1; + + return 0; +} + +static int __init smp_a2_probe(void) +{ + return cpus_weight(cpu_possible_map); +} + +static struct smp_ops_t a2_smp_ops = { + .message_pass = smp_muxed_ipi_message_pass, + .cause_ipi = doorbell_cause_ipi, + .probe = smp_a2_probe, + .kick_cpu = smp_a2_kick_cpu, + .setup_cpu = smp_a2_setup_cpu, +}; + +void __init a2_setup_smp(void) +{ + smp_ops = &a2_smp_ops; +} diff --git a/trunk/arch/powerpc/platforms/wsp/wsp.h b/trunk/arch/powerpc/platforms/wsp/wsp.h new file mode 100644 index 000000000000..7c3e087fd2f2 --- /dev/null +++ b/trunk/arch/powerpc/platforms/wsp/wsp.h @@ -0,0 +1,17 @@ +#ifndef __WSP_H +#define __WSP_H + +#include + +extern void wsp_setup_pci(void); +extern void scom_init_wsp(void); + +extern void a2_setup_smp(void); +extern int a2_scom_startup_cpu(unsigned int lcpu, int thr_idx, + struct device_node *np); +int smp_a2_cpu_bootable(unsigned int nr); +int __devinit smp_a2_kick_cpu(int nr); + +void opb_pic_init(void); + +#endif /* __WSP_H */ diff --git a/trunk/arch/powerpc/sysdev/Kconfig b/trunk/arch/powerpc/sysdev/Kconfig index 396582835cb5..d775fd148d13 100644 --- a/trunk/arch/powerpc/sysdev/Kconfig +++ b/trunk/arch/powerpc/sysdev/Kconfig @@ -12,3 +12,13 @@ config PPC_MSI_BITMAP depends on PCI_MSI default y if MPIC default y if FSL_PCI + +source "arch/powerpc/sysdev/xics/Kconfig" + +config PPC_SCOM + bool + +config SCOM_DEBUGFS + bool "Expose SCOM controllers via debugfs" + depends on PPC_SCOM + default n diff --git a/trunk/arch/powerpc/sysdev/Makefile b/trunk/arch/powerpc/sysdev/Makefile index 1e0c933ef772..6076e0074a87 100644 --- a/trunk/arch/powerpc/sysdev/Makefile +++ b/trunk/arch/powerpc/sysdev/Makefile @@ -57,3 +57,9 @@ obj-$(CONFIG_PPC_MPC52xx) += mpc5xxx_clocks.o ifeq ($(CONFIG_SUSPEND),y) obj-$(CONFIG_6xx) += 6xx-suspend.o endif + +obj-$(CONFIG_PPC_SCOM) += scom.o + +subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror + +obj-$(CONFIG_PPC_XICS) += xics/ diff --git a/trunk/arch/powerpc/sysdev/axonram.c b/trunk/arch/powerpc/sysdev/axonram.c index 1636dd896707..bd0d54060b94 100644 --- a/trunk/arch/powerpc/sysdev/axonram.c +++ b/trunk/arch/powerpc/sysdev/axonram.c @@ -216,7 +216,7 @@ static int axon_ram_probe(struct platform_device *device) AXON_RAM_DEVICE_NAME, axon_ram_bank_id, bank->size >> 20); bank->ph_addr = resource.start; - bank->io_addr = (unsigned long) ioremap_flags( + bank->io_addr = (unsigned long) ioremap_prot( bank->ph_addr, bank->size, _PAGE_NO_CACHE); if (bank->io_addr == 0) { dev_err(&device->dev, "ioremap() failed\n"); diff --git a/trunk/arch/powerpc/sysdev/cpm1.c b/trunk/arch/powerpc/sysdev/cpm1.c index e0bc944eb23f..350787c83e22 100644 --- a/trunk/arch/powerpc/sysdev/cpm1.c +++ b/trunk/arch/powerpc/sysdev/cpm1.c @@ -58,21 +58,21 @@ static struct irq_host *cpm_pic_host; static void cpm_mask_irq(struct irq_data *d) { - unsigned int cpm_vec = (unsigned int)irq_map[d->irq].hwirq; + unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d); clrbits32(&cpic_reg->cpic_cimr, (1 << cpm_vec)); } static void cpm_unmask_irq(struct irq_data *d) { - unsigned int cpm_vec = (unsigned int)irq_map[d->irq].hwirq; + unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d); setbits32(&cpic_reg->cpic_cimr, (1 << cpm_vec)); } static void cpm_end_irq(struct irq_data *d) { - unsigned int cpm_vec = (unsigned int)irq_map[d->irq].hwirq; + unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d); out_be32(&cpic_reg->cpic_cisr, (1 << cpm_vec)); } @@ -157,7 +157,7 @@ unsigned int cpm_pic_init(void) goto end; /* Initialize the CPM interrupt controller. */ - hwirq = (unsigned int)irq_map[sirq].hwirq; + hwirq = (unsigned int)virq_to_hw(sirq); out_be32(&cpic_reg->cpic_cicr, (CICR_SCD_SCC4 | CICR_SCC_SCC3 | CICR_SCB_SCC2 | CICR_SCA_SCC1) | ((hwirq/2) << 13) | CICR_HP_MASK); diff --git a/trunk/arch/powerpc/sysdev/cpm2_pic.c b/trunk/arch/powerpc/sysdev/cpm2_pic.c index 5495c1be472b..bcab50e2a9eb 100644 --- a/trunk/arch/powerpc/sysdev/cpm2_pic.c +++ b/trunk/arch/powerpc/sysdev/cpm2_pic.c @@ -81,7 +81,7 @@ static const u_char irq_to_siubit[] = { static void cpm2_mask_irq(struct irq_data *d) { int bit, word; - unsigned int irq_nr = virq_to_hw(d->irq); + unsigned int irq_nr = irqd_to_hwirq(d); bit = irq_to_siubit[irq_nr]; word = irq_to_siureg[irq_nr]; @@ -93,7 +93,7 @@ static void cpm2_mask_irq(struct irq_data *d) static void cpm2_unmask_irq(struct irq_data *d) { int bit, word; - unsigned int irq_nr = virq_to_hw(d->irq); + unsigned int irq_nr = irqd_to_hwirq(d); bit = irq_to_siubit[irq_nr]; word = irq_to_siureg[irq_nr]; @@ -105,7 +105,7 @@ static void cpm2_unmask_irq(struct irq_data *d) static void cpm2_ack(struct irq_data *d) { int bit, word; - unsigned int irq_nr = virq_to_hw(d->irq); + unsigned int irq_nr = irqd_to_hwirq(d); bit = irq_to_siubit[irq_nr]; word = irq_to_siureg[irq_nr]; @@ -116,7 +116,7 @@ static void cpm2_ack(struct irq_data *d) static void cpm2_end_irq(struct irq_data *d) { int bit, word; - unsigned int irq_nr = virq_to_hw(d->irq); + unsigned int irq_nr = irqd_to_hwirq(d); bit = irq_to_siubit[irq_nr]; word = irq_to_siureg[irq_nr]; @@ -133,7 +133,7 @@ static void cpm2_end_irq(struct irq_data *d) static int cpm2_set_irq_type(struct irq_data *d, unsigned int flow_type) { - unsigned int src = virq_to_hw(d->irq); + unsigned int src = irqd_to_hwirq(d); unsigned int vold, vnew, edibit; /* Port C interrupts are either IRQ_TYPE_EDGE_FALLING or diff --git a/trunk/arch/powerpc/sysdev/fsl_85xx_cache_sram.c b/trunk/arch/powerpc/sysdev/fsl_85xx_cache_sram.c index 54fb1922fe30..116415899176 100644 --- a/trunk/arch/powerpc/sysdev/fsl_85xx_cache_sram.c +++ b/trunk/arch/powerpc/sysdev/fsl_85xx_cache_sram.c @@ -106,10 +106,10 @@ int __init instantiate_cache_sram(struct platform_device *dev, goto out_free; } - cache_sram->base_virt = ioremap_flags(cache_sram->base_phys, + cache_sram->base_virt = ioremap_prot(cache_sram->base_phys, cache_sram->size, _PAGE_COHERENT | PAGE_KERNEL); if (!cache_sram->base_virt) { - dev_err(&dev->dev, "%s: ioremap_flags failed\n", + dev_err(&dev->dev, "%s: ioremap_prot failed\n", dev->dev.of_node->full_name); ret = -ENOMEM; goto out_release; diff --git a/trunk/arch/powerpc/sysdev/fsl_msi.c b/trunk/arch/powerpc/sysdev/fsl_msi.c index d5679dc1e20f..92e78333c47c 100644 --- a/trunk/arch/powerpc/sysdev/fsl_msi.c +++ b/trunk/arch/powerpc/sysdev/fsl_msi.c @@ -110,7 +110,7 @@ static void fsl_teardown_msi_irqs(struct pci_dev *pdev) list_for_each_entry(entry, &pdev->msi_list, list) { if (entry->irq == NO_IRQ) continue; - msi_data = irq_get_handler_data(entry->irq); + msi_data = irq_get_chip_data(entry->irq); irq_set_msi_desc(entry->irq, NULL); msi_bitmap_free_hwirqs(&msi_data->bitmap, virq_to_hw(entry->irq), 1); @@ -168,7 +168,7 @@ static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) rc = -ENOSPC; goto out_free; } - irq_set_handler_data(virq, msi_data); + /* chip_data is msi_data via host->hostdata in host->map() */ irq_set_msi_desc(virq, entry); fsl_compose_msi_msg(pdev, hwirq, &msg, msi_data); @@ -193,7 +193,7 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc) u32 have_shift = 0; struct fsl_msi_cascade_data *cascade_data; - cascade_data = (struct fsl_msi_cascade_data *)irq_get_handler_data(irq); + cascade_data = irq_get_handler_data(irq); msi_data = cascade_data->msi_data; raw_spin_lock(&desc->lock); @@ -253,7 +253,7 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc) static int fsl_of_msi_remove(struct platform_device *ofdev) { - struct fsl_msi *msi = ofdev->dev.platform_data; + struct fsl_msi *msi = platform_get_drvdata(ofdev); int virq, i; struct fsl_msi_cascade_data *cascade_data; @@ -304,8 +304,10 @@ static int __devinit fsl_msi_setup_hwirq(struct fsl_msi *msi, return 0; } +static const struct of_device_id fsl_of_msi_ids[]; static int __devinit fsl_of_msi_probe(struct platform_device *dev) { + const struct of_device_id *match; struct fsl_msi *msi; struct resource res; int err, i, j, irq_index, count; @@ -316,9 +318,10 @@ static int __devinit fsl_of_msi_probe(struct platform_device *dev) u32 offset; static const u32 all_avail[] = { 0, NR_MSI_IRQS }; - if (!dev->dev.of_match) + match = of_match_device(fsl_of_msi_ids, &dev->dev); + if (!match) return -EINVAL; - features = dev->dev.of_match->data; + features = match->data; printk(KERN_DEBUG "Setting up Freescale MSI support\n"); @@ -327,7 +330,7 @@ static int __devinit fsl_of_msi_probe(struct platform_device *dev) dev_err(&dev->dev, "No memory for MSI structure\n"); return -ENOMEM; } - dev->dev.platform_data = msi; + platform_set_drvdata(dev, msi); msi->irqhost = irq_alloc_host(dev->dev.of_node, IRQ_HOST_MAP_LINEAR, NR_MSI_IRQS, &fsl_msi_host_ops, 0); diff --git a/trunk/arch/powerpc/sysdev/fsl_pci.c b/trunk/arch/powerpc/sysdev/fsl_pci.c index f8f7f28c6343..68ca9290df94 100644 --- a/trunk/arch/powerpc/sysdev/fsl_pci.c +++ b/trunk/arch/powerpc/sysdev/fsl_pci.c @@ -324,6 +324,11 @@ int __init fsl_add_bridge(struct device_node *dev, int is_primary) struct resource rsrc; const int *bus_range; + if (!of_device_is_available(dev)) { + pr_warning("%s: disabled\n", dev->full_name); + return -ENODEV; + } + pr_debug("Adding PCI host bridge %s\n", dev->full_name); /* Fetch host bridge registers address */ diff --git a/trunk/arch/powerpc/sysdev/fsl_rio.c b/trunk/arch/powerpc/sysdev/fsl_rio.c index 14232d57369c..49798532b477 100644 --- a/trunk/arch/powerpc/sysdev/fsl_rio.c +++ b/trunk/arch/powerpc/sysdev/fsl_rio.c @@ -1457,7 +1457,6 @@ int fsl_rio_setup(struct platform_device *dev) port->ops = ops; port->priv = priv; port->phys_efptr = 0x100; - rio_register_mport(port); priv->regs_win = ioremap(regs.start, regs.end - regs.start + 1); rio_regs_win = priv->regs_win; @@ -1504,6 +1503,9 @@ int fsl_rio_setup(struct platform_device *dev) dev_info(&dev->dev, "RapidIO Common Transport System size: %d\n", port->sys_size ? 65536 : 256); + if (rio_register_mport(port)) + goto err; + if (port->host_deviceid >= 0) out_be32(priv->regs_win + RIO_GCCSR, RIO_PORT_GEN_HOST | RIO_PORT_GEN_MASTER | RIO_PORT_GEN_DISCOVERED); diff --git a/trunk/arch/powerpc/sysdev/i8259.c b/trunk/arch/powerpc/sysdev/i8259.c index 142770cb84b6..d18bb27e4df9 100644 --- a/trunk/arch/powerpc/sysdev/i8259.c +++ b/trunk/arch/powerpc/sysdev/i8259.c @@ -185,18 +185,6 @@ static int i8259_host_map(struct irq_host *h, unsigned int virq, return 0; } -static void i8259_host_unmap(struct irq_host *h, unsigned int virq) -{ - /* Make sure irq is masked in hardware */ - i8259_mask_irq(irq_get_irq_data(virq)); - - /* remove chip and handler */ - irq_set_chip_and_handler(virq, NULL, NULL); - - /* Make sure it's completed */ - synchronize_irq(virq); -} - static int i8259_host_xlate(struct irq_host *h, struct device_node *ct, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_flags) @@ -220,7 +208,6 @@ static int i8259_host_xlate(struct irq_host *h, struct device_node *ct, static struct irq_host_ops i8259_host_ops = { .match = i8259_host_match, .map = i8259_host_map, - .unmap = i8259_host_unmap, .xlate = i8259_host_xlate, }; diff --git a/trunk/arch/powerpc/sysdev/ipic.c b/trunk/arch/powerpc/sysdev/ipic.c index fa438be962b7..7367d17364cb 100644 --- a/trunk/arch/powerpc/sysdev/ipic.c +++ b/trunk/arch/powerpc/sysdev/ipic.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include #include #include @@ -521,12 +521,10 @@ static inline struct ipic * ipic_from_irq(unsigned int virq) return primary_ipic; } -#define ipic_irq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq) - static void ipic_unmask_irq(struct irq_data *d) { struct ipic *ipic = ipic_from_irq(d->irq); - unsigned int src = ipic_irq_to_hw(d->irq); + unsigned int src = irqd_to_hwirq(d); unsigned long flags; u32 temp; @@ -542,7 +540,7 @@ static void ipic_unmask_irq(struct irq_data *d) static void ipic_mask_irq(struct irq_data *d) { struct ipic *ipic = ipic_from_irq(d->irq); - unsigned int src = ipic_irq_to_hw(d->irq); + unsigned int src = irqd_to_hwirq(d); unsigned long flags; u32 temp; @@ -562,7 +560,7 @@ static void ipic_mask_irq(struct irq_data *d) static void ipic_ack_irq(struct irq_data *d) { struct ipic *ipic = ipic_from_irq(d->irq); - unsigned int src = ipic_irq_to_hw(d->irq); + unsigned int src = irqd_to_hwirq(d); unsigned long flags; u32 temp; @@ -581,7 +579,7 @@ static void ipic_ack_irq(struct irq_data *d) static void ipic_mask_irq_and_ack(struct irq_data *d) { struct ipic *ipic = ipic_from_irq(d->irq); - unsigned int src = ipic_irq_to_hw(d->irq); + unsigned int src = irqd_to_hwirq(d); unsigned long flags; u32 temp; @@ -604,7 +602,7 @@ static void ipic_mask_irq_and_ack(struct irq_data *d) static int ipic_set_irq_type(struct irq_data *d, unsigned int flow_type) { struct ipic *ipic = ipic_from_irq(d->irq); - unsigned int src = ipic_irq_to_hw(d->irq); + unsigned int src = irqd_to_hwirq(d); unsigned int vold, vnew, edibit; if (flow_type == IRQ_TYPE_NONE) @@ -793,7 +791,7 @@ struct ipic * __init ipic_init(struct device_node *node, unsigned int flags) int ipic_set_priority(unsigned int virq, unsigned int priority) { struct ipic *ipic = ipic_from_irq(virq); - unsigned int src = ipic_irq_to_hw(virq); + unsigned int src = virq_to_hw(virq); u32 temp; if (priority > 7) @@ -821,7 +819,7 @@ int ipic_set_priority(unsigned int virq, unsigned int priority) void ipic_set_highest_priority(unsigned int virq) { struct ipic *ipic = ipic_from_irq(virq); - unsigned int src = ipic_irq_to_hw(virq); + unsigned int src = virq_to_hw(virq); u32 temp; temp = ipic_read(ipic->regs, IPIC_SICFR); @@ -902,7 +900,7 @@ static struct { u32 sercr; } ipic_saved_state; -static int ipic_suspend(struct sys_device *sdev, pm_message_t state) +static int ipic_suspend(void) { struct ipic *ipic = primary_ipic; @@ -933,7 +931,7 @@ static int ipic_suspend(struct sys_device *sdev, pm_message_t state) return 0; } -static int ipic_resume(struct sys_device *sdev) +static void ipic_resume(void) { struct ipic *ipic = primary_ipic; @@ -949,44 +947,26 @@ static int ipic_resume(struct sys_device *sdev) ipic_write(ipic->regs, IPIC_SECNR, ipic_saved_state.secnr); ipic_write(ipic->regs, IPIC_SERMR, ipic_saved_state.sermr); ipic_write(ipic->regs, IPIC_SERCR, ipic_saved_state.sercr); - - return 0; } #else #define ipic_suspend NULL #define ipic_resume NULL #endif -static struct sysdev_class ipic_sysclass = { - .name = "ipic", +static struct syscore_ops ipic_syscore_ops = { .suspend = ipic_suspend, .resume = ipic_resume, }; -static struct sys_device device_ipic = { - .id = 0, - .cls = &ipic_sysclass, -}; - -static int __init init_ipic_sysfs(void) +static int __init init_ipic_syscore(void) { - int rc; - if (!primary_ipic || !primary_ipic->regs) return -ENODEV; - printk(KERN_DEBUG "Registering ipic with sysfs...\n"); - rc = sysdev_class_register(&ipic_sysclass); - if (rc) { - printk(KERN_ERR "Failed registering ipic sys class\n"); - return -ENODEV; - } - rc = sysdev_register(&device_ipic); - if (rc) { - printk(KERN_ERR "Failed registering ipic sys device\n"); - return -ENODEV; - } + printk(KERN_DEBUG "Registering ipic system core operations\n"); + register_syscore_ops(&ipic_syscore_ops); + return 0; } -subsys_initcall(init_ipic_sysfs); +subsys_initcall(init_ipic_syscore); diff --git a/trunk/arch/powerpc/sysdev/mmio_nvram.c b/trunk/arch/powerpc/sysdev/mmio_nvram.c index 207324209065..ddc877a3a23a 100644 --- a/trunk/arch/powerpc/sysdev/mmio_nvram.c +++ b/trunk/arch/powerpc/sysdev/mmio_nvram.c @@ -115,6 +115,8 @@ int __init mmio_nvram_init(void) int ret; nvram_node = of_find_node_by_type(NULL, "nvram"); + if (!nvram_node) + nvram_node = of_find_compatible_node(NULL, NULL, "nvram"); if (!nvram_node) { printk(KERN_WARNING "nvram: no node found in device-tree\n"); return -ENODEV; diff --git a/trunk/arch/powerpc/sysdev/mpc8xx_pic.c b/trunk/arch/powerpc/sysdev/mpc8xx_pic.c index a88800ff4d01..20924f2246f0 100644 --- a/trunk/arch/powerpc/sysdev/mpc8xx_pic.c +++ b/trunk/arch/powerpc/sysdev/mpc8xx_pic.c @@ -28,7 +28,7 @@ int cpm_get_irq(struct pt_regs *regs); static void mpc8xx_unmask_irq(struct irq_data *d) { int bit, word; - unsigned int irq_nr = (unsigned int)irq_map[d->irq].hwirq; + unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d); bit = irq_nr & 0x1f; word = irq_nr >> 5; @@ -40,7 +40,7 @@ static void mpc8xx_unmask_irq(struct irq_data *d) static void mpc8xx_mask_irq(struct irq_data *d) { int bit, word; - unsigned int irq_nr = (unsigned int)irq_map[d->irq].hwirq; + unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d); bit = irq_nr & 0x1f; word = irq_nr >> 5; @@ -52,7 +52,7 @@ static void mpc8xx_mask_irq(struct irq_data *d) static void mpc8xx_ack(struct irq_data *d) { int bit; - unsigned int irq_nr = (unsigned int)irq_map[d->irq].hwirq; + unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d); bit = irq_nr & 0x1f; out_be32(&siu_reg->sc_sipend, 1 << (31-bit)); @@ -61,7 +61,7 @@ static void mpc8xx_ack(struct irq_data *d) static void mpc8xx_end_irq(struct irq_data *d) { int bit, word; - unsigned int irq_nr = (unsigned int)irq_map[d->irq].hwirq; + unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d); bit = irq_nr & 0x1f; word = irq_nr >> 5; @@ -73,7 +73,7 @@ static void mpc8xx_end_irq(struct irq_data *d) static int mpc8xx_set_irq_type(struct irq_data *d, unsigned int flow_type) { if (flow_type & IRQ_TYPE_EDGE_FALLING) { - irq_hw_number_t hw = (unsigned int)irq_map[d->irq].hwirq; + irq_hw_number_t hw = (unsigned int)irqd_to_hwirq(d); unsigned int siel = in_be32(&siu_reg->sc_siel); /* only external IRQ senses are programmable */ diff --git a/trunk/arch/powerpc/sysdev/mpc8xxx_gpio.c b/trunk/arch/powerpc/sysdev/mpc8xxx_gpio.c index 0892a2841c2b..fb4963abdf55 100644 --- a/trunk/arch/powerpc/sysdev/mpc8xxx_gpio.c +++ b/trunk/arch/powerpc/sysdev/mpc8xxx_gpio.c @@ -163,7 +163,7 @@ static void mpc8xxx_irq_unmask(struct irq_data *d) spin_lock_irqsave(&mpc8xxx_gc->lock, flags); - setbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(virq_to_hw(d->irq))); + setbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(irqd_to_hwirq(d))); spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); } @@ -176,7 +176,7 @@ static void mpc8xxx_irq_mask(struct irq_data *d) spin_lock_irqsave(&mpc8xxx_gc->lock, flags); - clrbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(virq_to_hw(d->irq))); + clrbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(irqd_to_hwirq(d))); spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); } @@ -186,7 +186,7 @@ static void mpc8xxx_irq_ack(struct irq_data *d) struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d); struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; - out_be32(mm->regs + GPIO_IER, mpc8xxx_gpio2mask(virq_to_hw(d->irq))); + out_be32(mm->regs + GPIO_IER, mpc8xxx_gpio2mask(irqd_to_hwirq(d))); } static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type) @@ -199,14 +199,14 @@ static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type) case IRQ_TYPE_EDGE_FALLING: spin_lock_irqsave(&mpc8xxx_gc->lock, flags); setbits32(mm->regs + GPIO_ICR, - mpc8xxx_gpio2mask(virq_to_hw(d->irq))); + mpc8xxx_gpio2mask(irqd_to_hwirq(d))); spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); break; case IRQ_TYPE_EDGE_BOTH: spin_lock_irqsave(&mpc8xxx_gc->lock, flags); clrbits32(mm->regs + GPIO_ICR, - mpc8xxx_gpio2mask(virq_to_hw(d->irq))); + mpc8xxx_gpio2mask(irqd_to_hwirq(d))); spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); break; @@ -221,7 +221,7 @@ static int mpc512x_irq_set_type(struct irq_data *d, unsigned int flow_type) { struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d); struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; - unsigned long gpio = virq_to_hw(d->irq); + unsigned long gpio = irqd_to_hwirq(d); void __iomem *reg; unsigned int shift; unsigned long flags; diff --git a/trunk/arch/powerpc/sysdev/mpic.c b/trunk/arch/powerpc/sysdev/mpic.c index f91c065bed5a..3a8de5bb628a 100644 --- a/trunk/arch/powerpc/sysdev/mpic.c +++ b/trunk/arch/powerpc/sysdev/mpic.c @@ -6,6 +6,7 @@ * with various broken implementations of this HW. * * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp. + * Copyright 2010-2011 Freescale Semiconductor, Inc. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive @@ -27,6 +28,7 @@ #include #include #include +#include #include #include @@ -218,6 +220,28 @@ static inline void _mpic_ipi_write(struct mpic *mpic, unsigned int ipi, u32 valu _mpic_write(mpic->reg_type, &mpic->gregs, offset, value); } +static inline u32 _mpic_tm_read(struct mpic *mpic, unsigned int tm) +{ + unsigned int offset = MPIC_INFO(TIMER_VECTOR_PRI) + + ((tm & 3) * MPIC_INFO(TIMER_STRIDE)); + + if (tm >= 4) + offset += 0x1000 / 4; + + return _mpic_read(mpic->reg_type, &mpic->tmregs, offset); +} + +static inline void _mpic_tm_write(struct mpic *mpic, unsigned int tm, u32 value) +{ + unsigned int offset = MPIC_INFO(TIMER_VECTOR_PRI) + + ((tm & 3) * MPIC_INFO(TIMER_STRIDE)); + + if (tm >= 4) + offset += 0x1000 / 4; + + _mpic_write(mpic->reg_type, &mpic->tmregs, offset, value); +} + static inline u32 _mpic_cpu_read(struct mpic *mpic, unsigned int reg) { unsigned int cpu = mpic_processor_id(mpic); @@ -268,6 +292,8 @@ static inline void _mpic_irq_write(struct mpic *mpic, unsigned int src_no, #define mpic_write(b,r,v) _mpic_write(mpic->reg_type,&(b),(r),(v)) #define mpic_ipi_read(i) _mpic_ipi_read(mpic,(i)) #define mpic_ipi_write(i,v) _mpic_ipi_write(mpic,(i),(v)) +#define mpic_tm_read(i) _mpic_tm_read(mpic,(i)) +#define mpic_tm_write(i,v) _mpic_tm_write(mpic,(i),(v)) #define mpic_cpu_read(i) _mpic_cpu_read(mpic,(i)) #define mpic_cpu_write(i,v) _mpic_cpu_write(mpic,(i),(v)) #define mpic_irq_read(s,r) _mpic_irq_read(mpic,(s),(r)) @@ -607,8 +633,6 @@ static int irq_choose_cpu(const struct cpumask *mask) } #endif -#define mpic_irq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq) - /* Find an mpic associated with a given linux interrupt */ static struct mpic *mpic_find(unsigned int irq) { @@ -621,11 +645,18 @@ static struct mpic *mpic_find(unsigned int irq) /* Determine if the linux irq is an IPI */ static unsigned int mpic_is_ipi(struct mpic *mpic, unsigned int irq) { - unsigned int src = mpic_irq_to_hw(irq); + unsigned int src = virq_to_hw(irq); return (src >= mpic->ipi_vecs[0] && src <= mpic->ipi_vecs[3]); } +/* Determine if the linux irq is a timer */ +static unsigned int mpic_is_tm(struct mpic *mpic, unsigned int irq) +{ + unsigned int src = virq_to_hw(irq); + + return (src >= mpic->timer_vecs[0] && src <= mpic->timer_vecs[7]); +} /* Convert a cpu mask from logical to physical cpu numbers. */ static inline u32 mpic_physmask(u32 cpumask) @@ -633,7 +664,7 @@ static inline u32 mpic_physmask(u32 cpumask) int i; u32 mask = 0; - for (i = 0; i < NR_CPUS; ++i, cpumask >>= 1) + for (i = 0; i < min(32, NR_CPUS); ++i, cpumask >>= 1) mask |= (cpumask & 1) << get_hard_smp_processor_id(i); return mask; } @@ -674,7 +705,7 @@ void mpic_unmask_irq(struct irq_data *d) { unsigned int loops = 100000; struct mpic *mpic = mpic_from_irq_data(d); - unsigned int src = mpic_irq_to_hw(d->irq); + unsigned int src = irqd_to_hwirq(d); DBG("%p: %s: enable_irq: %d (src %d)\n", mpic, mpic->name, d->irq, src); @@ -695,7 +726,7 @@ void mpic_mask_irq(struct irq_data *d) { unsigned int loops = 100000; struct mpic *mpic = mpic_from_irq_data(d); - unsigned int src = mpic_irq_to_hw(d->irq); + unsigned int src = irqd_to_hwirq(d); DBG("%s: disable_irq: %d (src %d)\n", mpic->name, d->irq, src); @@ -733,7 +764,7 @@ void mpic_end_irq(struct irq_data *d) static void mpic_unmask_ht_irq(struct irq_data *d) { struct mpic *mpic = mpic_from_irq_data(d); - unsigned int src = mpic_irq_to_hw(d->irq); + unsigned int src = irqd_to_hwirq(d); mpic_unmask_irq(d); @@ -744,7 +775,7 @@ static void mpic_unmask_ht_irq(struct irq_data *d) static unsigned int mpic_startup_ht_irq(struct irq_data *d) { struct mpic *mpic = mpic_from_irq_data(d); - unsigned int src = mpic_irq_to_hw(d->irq); + unsigned int src = irqd_to_hwirq(d); mpic_unmask_irq(d); mpic_startup_ht_interrupt(mpic, src, irqd_is_level_type(d)); @@ -755,7 +786,7 @@ static unsigned int mpic_startup_ht_irq(struct irq_data *d) static void mpic_shutdown_ht_irq(struct irq_data *d) { struct mpic *mpic = mpic_from_irq_data(d); - unsigned int src = mpic_irq_to_hw(d->irq); + unsigned int src = irqd_to_hwirq(d); mpic_shutdown_ht_interrupt(mpic, src); mpic_mask_irq(d); @@ -764,7 +795,7 @@ static void mpic_shutdown_ht_irq(struct irq_data *d) static void mpic_end_ht_irq(struct irq_data *d) { struct mpic *mpic = mpic_from_irq_data(d); - unsigned int src = mpic_irq_to_hw(d->irq); + unsigned int src = irqd_to_hwirq(d); #ifdef DEBUG_IRQ DBG("%s: end_irq: %d\n", mpic->name, d->irq); @@ -785,7 +816,7 @@ static void mpic_end_ht_irq(struct irq_data *d) static void mpic_unmask_ipi(struct irq_data *d) { struct mpic *mpic = mpic_from_ipi(d); - unsigned int src = mpic_irq_to_hw(d->irq) - mpic->ipi_vecs[0]; + unsigned int src = virq_to_hw(d->irq) - mpic->ipi_vecs[0]; DBG("%s: enable_ipi: %d (ipi %d)\n", mpic->name, d->irq, src); mpic_ipi_write(src, mpic_ipi_read(src) & ~MPIC_VECPRI_MASK); @@ -812,27 +843,42 @@ static void mpic_end_ipi(struct irq_data *d) #endif /* CONFIG_SMP */ +static void mpic_unmask_tm(struct irq_data *d) +{ + struct mpic *mpic = mpic_from_irq_data(d); + unsigned int src = virq_to_hw(d->irq) - mpic->timer_vecs[0]; + + DBG("%s: enable_tm: %d (tm %d)\n", mpic->name, irq, src); + mpic_tm_write(src, mpic_tm_read(src) & ~MPIC_VECPRI_MASK); + mpic_tm_read(src); +} + +static void mpic_mask_tm(struct irq_data *d) +{ + struct mpic *mpic = mpic_from_irq_data(d); + unsigned int src = virq_to_hw(d->irq) - mpic->timer_vecs[0]; + + mpic_tm_write(src, mpic_tm_read(src) | MPIC_VECPRI_MASK); + mpic_tm_read(src); +} + int mpic_set_affinity(struct irq_data *d, const struct cpumask *cpumask, bool force) { struct mpic *mpic = mpic_from_irq_data(d); - unsigned int src = mpic_irq_to_hw(d->irq); + unsigned int src = irqd_to_hwirq(d); if (mpic->flags & MPIC_SINGLE_DEST_CPU) { int cpuid = irq_choose_cpu(cpumask); mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 1 << cpuid); } else { - cpumask_var_t tmp; - - alloc_cpumask_var(&tmp, GFP_KERNEL); + u32 mask = cpumask_bits(cpumask)[0]; - cpumask_and(tmp, cpumask, cpu_online_mask); + mask &= cpumask_bits(cpu_online_mask)[0]; mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), - mpic_physmask(cpumask_bits(tmp)[0])); - - free_cpumask_var(tmp); + mpic_physmask(mask)); } return 0; @@ -862,7 +908,7 @@ static unsigned int mpic_type_to_vecpri(struct mpic *mpic, unsigned int type) int mpic_set_irq_type(struct irq_data *d, unsigned int flow_type) { struct mpic *mpic = mpic_from_irq_data(d); - unsigned int src = mpic_irq_to_hw(d->irq); + unsigned int src = irqd_to_hwirq(d); unsigned int vecpri, vold, vnew; DBG("mpic: set_irq_type(mpic:@%p,virq:%d,src:0x%x,type:0x%x)\n", @@ -898,7 +944,7 @@ int mpic_set_irq_type(struct irq_data *d, unsigned int flow_type) void mpic_set_vector(unsigned int virq, unsigned int vector) { struct mpic *mpic = mpic_from_irq(virq); - unsigned int src = mpic_irq_to_hw(virq); + unsigned int src = virq_to_hw(virq); unsigned int vecpri; DBG("mpic: set_vector(mpic:@%p,virq:%d,src:%d,vector:0x%x)\n", @@ -916,7 +962,7 @@ void mpic_set_vector(unsigned int virq, unsigned int vector) void mpic_set_destination(unsigned int virq, unsigned int cpuid) { struct mpic *mpic = mpic_from_irq(virq); - unsigned int src = mpic_irq_to_hw(virq); + unsigned int src = virq_to_hw(virq); DBG("mpic: set_destination(mpic:@%p,virq:%d,src:%d,cpuid:0x%x)\n", mpic, virq, src, cpuid); @@ -942,6 +988,12 @@ static struct irq_chip mpic_ipi_chip = { }; #endif /* CONFIG_SMP */ +static struct irq_chip mpic_tm_chip = { + .irq_mask = mpic_mask_tm, + .irq_unmask = mpic_unmask_tm, + .irq_eoi = mpic_end_irq, +}; + #ifdef CONFIG_MPIC_U3_HT_IRQS static struct irq_chip mpic_irq_ht_chip = { .irq_startup = mpic_startup_ht_irq, @@ -985,6 +1037,16 @@ static int mpic_host_map(struct irq_host *h, unsigned int virq, } #endif /* CONFIG_SMP */ + if (hw >= mpic->timer_vecs[0] && hw <= mpic->timer_vecs[7]) { + WARN_ON(!(mpic->flags & MPIC_PRIMARY)); + + DBG("mpic: mapping as timer\n"); + irq_set_chip_data(virq, mpic); + irq_set_chip_and_handler(virq, &mpic->hc_tm, + handle_fasteoi_irq); + return 0; + } + if (hw >= mpic->irq_count) return -EINVAL; @@ -1025,6 +1087,7 @@ static int mpic_host_xlate(struct irq_host *h, struct device_node *ct, irq_hw_number_t *out_hwirq, unsigned int *out_flags) { + struct mpic *mpic = h->host_data; static unsigned char map_mpic_senses[4] = { IRQ_TYPE_EDGE_RISING, IRQ_TYPE_LEVEL_LOW, @@ -1033,7 +1096,38 @@ static int mpic_host_xlate(struct irq_host *h, struct device_node *ct, }; *out_hwirq = intspec[0]; - if (intsize > 1) { + if (intsize >= 4 && (mpic->flags & MPIC_FSL)) { + /* + * Freescale MPIC with extended intspec: + * First two cells are as usual. Third specifies + * an "interrupt type". Fourth is type-specific data. + * + * See Documentation/devicetree/bindings/powerpc/fsl/mpic.txt + */ + switch (intspec[2]) { + case 0: + case 1: /* no EISR/EIMR support for now, treat as shared IRQ */ + break; + case 2: + if (intspec[0] >= ARRAY_SIZE(mpic->ipi_vecs)) + return -EINVAL; + + *out_hwirq = mpic->ipi_vecs[intspec[0]]; + break; + case 3: + if (intspec[0] >= ARRAY_SIZE(mpic->timer_vecs)) + return -EINVAL; + + *out_hwirq = mpic->timer_vecs[intspec[0]]; + break; + default: + pr_debug("%s: unknown irq type %u\n", + __func__, intspec[2]); + return -EINVAL; + } + + *out_flags = map_mpic_senses[intspec[1] & 3]; + } else if (intsize > 1) { u32 mask = 0x3; /* Apple invented a new race of encoding on machines with @@ -1109,6 +1203,9 @@ struct mpic * __init mpic_alloc(struct device_node *node, mpic->hc_ipi.name = name; #endif /* CONFIG_SMP */ + mpic->hc_tm = mpic_tm_chip; + mpic->hc_tm.name = name; + mpic->flags = flags; mpic->isu_size = isu_size; mpic->irq_count = irq_count; @@ -1119,10 +1216,14 @@ struct mpic * __init mpic_alloc(struct device_node *node, else intvec_top = 255; - mpic->timer_vecs[0] = intvec_top - 8; - mpic->timer_vecs[1] = intvec_top - 7; - mpic->timer_vecs[2] = intvec_top - 6; - mpic->timer_vecs[3] = intvec_top - 5; + mpic->timer_vecs[0] = intvec_top - 12; + mpic->timer_vecs[1] = intvec_top - 11; + mpic->timer_vecs[2] = intvec_top - 10; + mpic->timer_vecs[3] = intvec_top - 9; + mpic->timer_vecs[4] = intvec_top - 8; + mpic->timer_vecs[5] = intvec_top - 7; + mpic->timer_vecs[6] = intvec_top - 6; + mpic->timer_vecs[7] = intvec_top - 5; mpic->ipi_vecs[0] = intvec_top - 4; mpic->ipi_vecs[1] = intvec_top - 3; mpic->ipi_vecs[2] = intvec_top - 2; @@ -1132,6 +1233,8 @@ struct mpic * __init mpic_alloc(struct device_node *node, /* Check for "big-endian" in device-tree */ if (node && of_get_property(node, "big-endian", NULL) != NULL) mpic->flags |= MPIC_BIG_ENDIAN; + if (node && of_device_is_compatible(node, "fsl,mpic")) + mpic->flags |= MPIC_FSL; /* Look for protected sources */ if (node) { @@ -1323,15 +1426,17 @@ void __init mpic_init(struct mpic *mpic) /* Set current processor priority to max */ mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0xf); - /* Initialize timers: just disable them all */ + /* Initialize timers to our reserved vectors and mask them for now */ for (i = 0; i < 4; i++) { mpic_write(mpic->tmregs, i * MPIC_INFO(TIMER_STRIDE) + - MPIC_INFO(TIMER_DESTINATION), 0); + MPIC_INFO(TIMER_DESTINATION), + 1 << hard_smp_processor_id()); mpic_write(mpic->tmregs, i * MPIC_INFO(TIMER_STRIDE) + MPIC_INFO(TIMER_VECTOR_PRI), MPIC_VECPRI_MASK | + (9 << MPIC_VECPRI_PRIORITY_SHIFT) | (mpic->timer_vecs[0] + i)); } @@ -1427,7 +1532,7 @@ void __init mpic_set_serial_int(struct mpic *mpic, int enable) void mpic_irq_set_priority(unsigned int irq, unsigned int pri) { struct mpic *mpic = mpic_find(irq); - unsigned int src = mpic_irq_to_hw(irq); + unsigned int src = virq_to_hw(irq); unsigned long flags; u32 reg; @@ -1440,6 +1545,11 @@ void mpic_irq_set_priority(unsigned int irq, unsigned int pri) ~MPIC_VECPRI_PRIORITY_MASK; mpic_ipi_write(src - mpic->ipi_vecs[0], reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT)); + } else if (mpic_is_tm(mpic, irq)) { + reg = mpic_tm_read(src - mpic->timer_vecs[0]) & + ~MPIC_VECPRI_PRIORITY_MASK; + mpic_tm_write(src - mpic->timer_vecs[0], + reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT)); } else { reg = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & ~MPIC_VECPRI_PRIORITY_MASK; @@ -1619,46 +1729,28 @@ void mpic_request_ipis(void) } } -static void mpic_send_ipi(unsigned int ipi_no, const struct cpumask *cpu_mask) +void smp_mpic_message_pass(int cpu, int msg) { struct mpic *mpic = mpic_primary; + u32 physmask; BUG_ON(mpic == NULL); -#ifdef DEBUG_IPI - DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, ipi_no); -#endif - - mpic_cpu_write(MPIC_INFO(CPU_IPI_DISPATCH_0) + - ipi_no * MPIC_INFO(CPU_IPI_DISPATCH_STRIDE), - mpic_physmask(cpumask_bits(cpu_mask)[0])); -} - -void smp_mpic_message_pass(int target, int msg) -{ - cpumask_var_t tmp; - /* make sure we're sending something that translates to an IPI */ if ((unsigned int)msg > 3) { printk("SMP %d: smp_message_pass: unknown msg %d\n", smp_processor_id(), msg); return; } - switch (target) { - case MSG_ALL: - mpic_send_ipi(msg, cpu_online_mask); - break; - case MSG_ALL_BUT_SELF: - alloc_cpumask_var(&tmp, GFP_NOWAIT); - cpumask_andnot(tmp, cpu_online_mask, - cpumask_of(smp_processor_id())); - mpic_send_ipi(msg, tmp); - free_cpumask_var(tmp); - break; - default: - mpic_send_ipi(msg, cpumask_of(target)); - break; - } + +#ifdef DEBUG_IPI + DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, msg); +#endif + + physmask = 1 << get_hard_smp_processor_id(cpu); + + mpic_cpu_write(MPIC_INFO(CPU_IPI_DISPATCH_0) + + msg * MPIC_INFO(CPU_IPI_DISPATCH_STRIDE), physmask); } int __init smp_mpic_probe(void) @@ -1702,9 +1794,8 @@ void mpic_reset_core(int cpu) #endif /* CONFIG_SMP */ #ifdef CONFIG_PM -static int mpic_suspend(struct sys_device *dev, pm_message_t state) +static void mpic_suspend_one(struct mpic *mpic) { - struct mpic *mpic = container_of(dev, struct mpic, sysdev); int i; for (i = 0; i < mpic->num_sources; i++) { @@ -1713,13 +1804,22 @@ static int mpic_suspend(struct sys_device *dev, pm_message_t state) mpic->save_data[i].dest = mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)); } +} + +static int mpic_suspend(void) +{ + struct mpic *mpic = mpics; + + while (mpic) { + mpic_suspend_one(mpic); + mpic = mpic->next; + } return 0; } -static int mpic_resume(struct sys_device *dev) +static void mpic_resume_one(struct mpic *mpic) { - struct mpic *mpic = container_of(dev, struct mpic, sysdev); int i; for (i = 0; i < mpic->num_sources; i++) { @@ -1746,33 +1846,28 @@ static int mpic_resume(struct sys_device *dev) } #endif } /* end for loop */ +} - return 0; +static void mpic_resume(void) +{ + struct mpic *mpic = mpics; + + while (mpic) { + mpic_resume_one(mpic); + mpic = mpic->next; + } } -#endif -static struct sysdev_class mpic_sysclass = { -#ifdef CONFIG_PM +static struct syscore_ops mpic_syscore_ops = { .resume = mpic_resume, .suspend = mpic_suspend, -#endif - .name = "mpic", }; static int mpic_init_sys(void) { - struct mpic *mpic = mpics; - int error, id = 0; - - error = sysdev_class_register(&mpic_sysclass); - - while (mpic && !error) { - mpic->sysdev.cls = &mpic_sysclass; - mpic->sysdev.id = id++; - error = sysdev_register(&mpic->sysdev); - mpic = mpic->next; - } - return error; + register_syscore_ops(&mpic_syscore_ops); + return 0; } device_initcall(mpic_init_sys); +#endif diff --git a/trunk/arch/powerpc/sysdev/mv64x60_pic.c b/trunk/arch/powerpc/sysdev/mv64x60_pic.c index e9c633c7c083..14d130268e7a 100644 --- a/trunk/arch/powerpc/sysdev/mv64x60_pic.c +++ b/trunk/arch/powerpc/sysdev/mv64x60_pic.c @@ -78,7 +78,7 @@ static struct irq_host *mv64x60_irq_host; static void mv64x60_mask_low(struct irq_data *d) { - int level2 = irq_map[d->irq].hwirq & MV64x60_LEVEL2_MASK; + int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK; unsigned long flags; spin_lock_irqsave(&mv64x60_lock, flags); @@ -91,7 +91,7 @@ static void mv64x60_mask_low(struct irq_data *d) static void mv64x60_unmask_low(struct irq_data *d) { - int level2 = irq_map[d->irq].hwirq & MV64x60_LEVEL2_MASK; + int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK; unsigned long flags; spin_lock_irqsave(&mv64x60_lock, flags); @@ -115,7 +115,7 @@ static struct irq_chip mv64x60_chip_low = { static void mv64x60_mask_high(struct irq_data *d) { - int level2 = irq_map[d->irq].hwirq & MV64x60_LEVEL2_MASK; + int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK; unsigned long flags; spin_lock_irqsave(&mv64x60_lock, flags); @@ -128,7 +128,7 @@ static void mv64x60_mask_high(struct irq_data *d) static void mv64x60_unmask_high(struct irq_data *d) { - int level2 = irq_map[d->irq].hwirq & MV64x60_LEVEL2_MASK; + int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK; unsigned long flags; spin_lock_irqsave(&mv64x60_lock, flags); @@ -152,7 +152,7 @@ static struct irq_chip mv64x60_chip_high = { static void mv64x60_mask_gpp(struct irq_data *d) { - int level2 = irq_map[d->irq].hwirq & MV64x60_LEVEL2_MASK; + int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK; unsigned long flags; spin_lock_irqsave(&mv64x60_lock, flags); @@ -165,7 +165,7 @@ static void mv64x60_mask_gpp(struct irq_data *d) static void mv64x60_mask_ack_gpp(struct irq_data *d) { - int level2 = irq_map[d->irq].hwirq & MV64x60_LEVEL2_MASK; + int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK; unsigned long flags; spin_lock_irqsave(&mv64x60_lock, flags); @@ -180,7 +180,7 @@ static void mv64x60_mask_ack_gpp(struct irq_data *d) static void mv64x60_unmask_gpp(struct irq_data *d) { - int level2 = irq_map[d->irq].hwirq & MV64x60_LEVEL2_MASK; + int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK; unsigned long flags; spin_lock_irqsave(&mv64x60_lock, flags); diff --git a/trunk/arch/powerpc/sysdev/qe_lib/qe_ic.c b/trunk/arch/powerpc/sysdev/qe_lib/qe_ic.c index 832d6924ad1c..b2acda07220d 100644 --- a/trunk/arch/powerpc/sysdev/qe_lib/qe_ic.c +++ b/trunk/arch/powerpc/sysdev/qe_lib/qe_ic.c @@ -197,12 +197,10 @@ static inline struct qe_ic *qe_ic_from_irq_data(struct irq_data *d) return irq_data_get_irq_chip_data(d); } -#define virq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq) - static void qe_ic_unmask_irq(struct irq_data *d) { struct qe_ic *qe_ic = qe_ic_from_irq_data(d); - unsigned int src = virq_to_hw(d->irq); + unsigned int src = irqd_to_hwirq(d); unsigned long flags; u32 temp; @@ -218,7 +216,7 @@ static void qe_ic_unmask_irq(struct irq_data *d) static void qe_ic_mask_irq(struct irq_data *d) { struct qe_ic *qe_ic = qe_ic_from_irq_data(d); - unsigned int src = virq_to_hw(d->irq); + unsigned int src = irqd_to_hwirq(d); unsigned long flags; u32 temp; diff --git a/trunk/arch/powerpc/sysdev/scom.c b/trunk/arch/powerpc/sysdev/scom.c new file mode 100644 index 000000000000..b2593ce30c9b --- /dev/null +++ b/trunk/arch/powerpc/sysdev/scom.c @@ -0,0 +1,192 @@ +/* + * Copyright 2010 Benjamin Herrenschmidt, IBM Corp + * + * and David Gibson, IBM Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See + * the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include +#include +#include + +const struct scom_controller *scom_controller; +EXPORT_SYMBOL_GPL(scom_controller); + +struct device_node *scom_find_parent(struct device_node *node) +{ + struct device_node *par, *tmp; + const u32 *p; + + for (par = of_node_get(node); par;) { + if (of_get_property(par, "scom-controller", NULL)) + break; + p = of_get_property(par, "scom-parent", NULL); + tmp = par; + if (p == NULL) + par = of_get_parent(par); + else + par = of_find_node_by_phandle(*p); + of_node_put(tmp); + } + return par; +} +EXPORT_SYMBOL_GPL(scom_find_parent); + +scom_map_t scom_map_device(struct device_node *dev, int index) +{ + struct device_node *parent; + unsigned int cells, size; + const u32 *prop; + u64 reg, cnt; + scom_map_t ret; + + parent = scom_find_parent(dev); + + if (parent == NULL) + return 0; + + prop = of_get_property(parent, "#scom-cells", NULL); + cells = prop ? *prop : 1; + + prop = of_get_property(dev, "scom-reg", &size); + if (!prop) + return 0; + size >>= 2; + + if (index >= (size / (2*cells))) + return 0; + + reg = of_read_number(&prop[index * cells * 2], cells); + cnt = of_read_number(&prop[index * cells * 2 + cells], cells); + + ret = scom_map(parent, reg, cnt); + of_node_put(parent); + + return ret; +} +EXPORT_SYMBOL_GPL(scom_map_device); + +#ifdef CONFIG_SCOM_DEBUGFS +struct scom_debug_entry { + struct device_node *dn; + unsigned long addr; + scom_map_t map; + spinlock_t lock; + char name[8]; + struct debugfs_blob_wrapper blob; +}; + +static int scom_addr_set(void *data, u64 val) +{ + struct scom_debug_entry *ent = data; + + ent->addr = 0; + scom_unmap(ent->map); + + ent->map = scom_map(ent->dn, val, 1); + if (scom_map_ok(ent->map)) + ent->addr = val; + else + return -EFAULT; + + return 0; +} + +static int scom_addr_get(void *data, u64 *val) +{ + struct scom_debug_entry *ent = data; + *val = ent->addr; + return 0; +} +DEFINE_SIMPLE_ATTRIBUTE(scom_addr_fops, scom_addr_get, scom_addr_set, + "0x%llx\n"); + +static int scom_val_set(void *data, u64 val) +{ + struct scom_debug_entry *ent = data; + + if (!scom_map_ok(ent->map)) + return -EFAULT; + + scom_write(ent->map, 0, val); + + return 0; +} + +static int scom_val_get(void *data, u64 *val) +{ + struct scom_debug_entry *ent = data; + + if (!scom_map_ok(ent->map)) + return -EFAULT; + + *val = scom_read(ent->map, 0); + return 0; +} +DEFINE_SIMPLE_ATTRIBUTE(scom_val_fops, scom_val_get, scom_val_set, + "0x%llx\n"); + +static int scom_debug_init_one(struct dentry *root, struct device_node *dn, + int i) +{ + struct scom_debug_entry *ent; + struct dentry *dir; + + ent = kzalloc(sizeof(*ent), GFP_KERNEL); + if (!ent) + return -ENOMEM; + + ent->dn = of_node_get(dn); + ent->map = SCOM_MAP_INVALID; + spin_lock_init(&ent->lock); + snprintf(ent->name, 8, "scom%d", i); + ent->blob.data = dn->full_name; + ent->blob.size = strlen(dn->full_name); + + dir = debugfs_create_dir(ent->name, root); + if (!dir) { + of_node_put(dn); + kfree(ent); + return -1; + } + + debugfs_create_file("addr", 0600, dir, ent, &scom_addr_fops); + debugfs_create_file("value", 0600, dir, ent, &scom_val_fops); + debugfs_create_blob("path", 0400, dir, &ent->blob); + + return 0; +} + +static int scom_debug_init(void) +{ + struct device_node *dn; + struct dentry *root; + int i, rc; + + root = debugfs_create_dir("scom", powerpc_debugfs_root); + if (!root) + return -1; + + i = rc = 0; + for_each_node_with_property(dn, "scom-controller") + rc |= scom_debug_init_one(root, dn, i++); + + return rc; +} +device_initcall(scom_debug_init); +#endif /* CONFIG_SCOM_DEBUGFS */ diff --git a/trunk/arch/powerpc/sysdev/uic.c b/trunk/arch/powerpc/sysdev/uic.c index 5d9138516628..984cd2029158 100644 --- a/trunk/arch/powerpc/sysdev/uic.c +++ b/trunk/arch/powerpc/sysdev/uic.c @@ -41,8 +41,6 @@ #define UIC_VR 0x7 #define UIC_VCR 0x8 -#define uic_irq_to_hw(virq) (irq_map[virq].hwirq) - struct uic *primary_uic; struct uic { @@ -58,7 +56,7 @@ struct uic { static void uic_unmask_irq(struct irq_data *d) { struct uic *uic = irq_data_get_irq_chip_data(d); - unsigned int src = uic_irq_to_hw(d->irq); + unsigned int src = irqd_to_hwirq(d); unsigned long flags; u32 er, sr; @@ -76,7 +74,7 @@ static void uic_unmask_irq(struct irq_data *d) static void uic_mask_irq(struct irq_data *d) { struct uic *uic = irq_data_get_irq_chip_data(d); - unsigned int src = uic_irq_to_hw(d->irq); + unsigned int src = irqd_to_hwirq(d); unsigned long flags; u32 er; @@ -90,7 +88,7 @@ static void uic_mask_irq(struct irq_data *d) static void uic_ack_irq(struct irq_data *d) { struct uic *uic = irq_data_get_irq_chip_data(d); - unsigned int src = uic_irq_to_hw(d->irq); + unsigned int src = irqd_to_hwirq(d); unsigned long flags; spin_lock_irqsave(&uic->lock, flags); @@ -101,7 +99,7 @@ static void uic_ack_irq(struct irq_data *d) static void uic_mask_ack_irq(struct irq_data *d) { struct uic *uic = irq_data_get_irq_chip_data(d); - unsigned int src = uic_irq_to_hw(d->irq); + unsigned int src = irqd_to_hwirq(d); unsigned long flags; u32 er, sr; @@ -126,7 +124,7 @@ static void uic_mask_ack_irq(struct irq_data *d) static int uic_set_irq_type(struct irq_data *d, unsigned int flow_type) { struct uic *uic = irq_data_get_irq_chip_data(d); - unsigned int src = uic_irq_to_hw(d->irq); + unsigned int src = irqd_to_hwirq(d); unsigned long flags; int trigger, polarity; u32 tr, pr, mask; diff --git a/trunk/arch/powerpc/sysdev/xics/Kconfig b/trunk/arch/powerpc/sysdev/xics/Kconfig new file mode 100644 index 000000000000..0031eda320c3 --- /dev/null +++ b/trunk/arch/powerpc/sysdev/xics/Kconfig @@ -0,0 +1,13 @@ +config PPC_XICS + def_bool n + select PPC_SMP_MUXED_IPI + +config PPC_ICP_NATIVE + def_bool n + +config PPC_ICP_HV + def_bool n + +config PPC_ICS_RTAS + def_bool n + diff --git a/trunk/arch/powerpc/sysdev/xics/Makefile b/trunk/arch/powerpc/sysdev/xics/Makefile new file mode 100644 index 000000000000..b75a6059337f --- /dev/null +++ b/trunk/arch/powerpc/sysdev/xics/Makefile @@ -0,0 +1,6 @@ +subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror + +obj-y += xics-common.o +obj-$(CONFIG_PPC_ICP_NATIVE) += icp-native.o +obj-$(CONFIG_PPC_ICP_HV) += icp-hv.o +obj-$(CONFIG_PPC_ICS_RTAS) += ics-rtas.o diff --git a/trunk/arch/powerpc/sysdev/xics/icp-hv.c b/trunk/arch/powerpc/sysdev/xics/icp-hv.c new file mode 100644 index 000000000000..9518d367a64f --- /dev/null +++ b/trunk/arch/powerpc/sysdev/xics/icp-hv.c @@ -0,0 +1,164 @@ +/* + * Copyright 2011 IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +static inline unsigned int icp_hv_get_xirr(unsigned char cppr) +{ + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + long rc; + + rc = plpar_hcall(H_XIRR, retbuf, cppr); + if (rc != H_SUCCESS) + panic(" bad return code xirr - rc = %lx\n", rc); + return (unsigned int)retbuf[0]; +} + +static inline void icp_hv_set_xirr(unsigned int value) +{ + long rc = plpar_hcall_norets(H_EOI, value); + if (rc != H_SUCCESS) + panic("bad return code EOI - rc = %ld, value=%x\n", rc, value); +} + +static inline void icp_hv_set_cppr(u8 value) +{ + long rc = plpar_hcall_norets(H_CPPR, value); + if (rc != H_SUCCESS) + panic("bad return code cppr - rc = %lx\n", rc); +} + +static inline void icp_hv_set_qirr(int n_cpu , u8 value) +{ + long rc = plpar_hcall_norets(H_IPI, get_hard_smp_processor_id(n_cpu), + value); + if (rc != H_SUCCESS) + panic("bad return code qirr - rc = %lx\n", rc); +} + +static void icp_hv_eoi(struct irq_data *d) +{ + unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); + + iosync(); + icp_hv_set_xirr((xics_pop_cppr() << 24) | hw_irq); +} + +static void icp_hv_teardown_cpu(void) +{ + int cpu = smp_processor_id(); + + /* Clear any pending IPI */ + icp_hv_set_qirr(cpu, 0xff); +} + +static void icp_hv_flush_ipi(void) +{ + /* We take the ipi irq but and never return so we + * need to EOI the IPI, but want to leave our priority 0 + * + * should we check all the other interrupts too? + * should we be flagging idle loop instead? + * or creating some task to be scheduled? + */ + + icp_hv_set_xirr((0x00 << 24) | XICS_IPI); +} + +static unsigned int icp_hv_get_irq(void) +{ + unsigned int xirr = icp_hv_get_xirr(xics_cppr_top()); + unsigned int vec = xirr & 0x00ffffff; + unsigned int irq; + + if (vec == XICS_IRQ_SPURIOUS) + return NO_IRQ; + + irq = irq_radix_revmap_lookup(xics_host, vec); + if (likely(irq != NO_IRQ)) { + xics_push_cppr(vec); + return irq; + } + + /* We don't have a linux mapping, so have rtas mask it. */ + xics_mask_unknown_vec(vec); + + /* We might learn about it later, so EOI it */ + icp_hv_set_xirr(xirr); + + return NO_IRQ; +} + +static void icp_hv_set_cpu_priority(unsigned char cppr) +{ + xics_set_base_cppr(cppr); + icp_hv_set_cppr(cppr); + iosync(); +} + +#ifdef CONFIG_SMP + +static void icp_hv_cause_ipi(int cpu, unsigned long data) +{ + icp_hv_set_qirr(cpu, IPI_PRIORITY); +} + +static irqreturn_t icp_hv_ipi_action(int irq, void *dev_id) +{ + int cpu = smp_processor_id(); + + icp_hv_set_qirr(cpu, 0xff); + + return smp_ipi_demux(); +} + +#endif /* CONFIG_SMP */ + +static const struct icp_ops icp_hv_ops = { + .get_irq = icp_hv_get_irq, + .eoi = icp_hv_eoi, + .set_priority = icp_hv_set_cpu_priority, + .teardown_cpu = icp_hv_teardown_cpu, + .flush_ipi = icp_hv_flush_ipi, +#ifdef CONFIG_SMP + .ipi_action = icp_hv_ipi_action, + .cause_ipi = icp_hv_cause_ipi, +#endif +}; + +int icp_hv_init(void) +{ + struct device_node *np; + + np = of_find_compatible_node(NULL, NULL, "ibm,ppc-xicp"); + if (!np) + np = of_find_node_by_type(NULL, + "PowerPC-External-Interrupt-Presentation"); + if (!np) + return -ENODEV; + + icp_ops = &icp_hv_ops; + + return 0; +} + diff --git a/trunk/arch/powerpc/sysdev/xics/icp-native.c b/trunk/arch/powerpc/sysdev/xics/icp-native.c new file mode 100644 index 000000000000..1f15ad436140 --- /dev/null +++ b/trunk/arch/powerpc/sysdev/xics/icp-native.c @@ -0,0 +1,293 @@ +/* + * Copyright 2011 IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +struct icp_ipl { + union { + u32 word; + u8 bytes[4]; + } xirr_poll; + union { + u32 word; + u8 bytes[4]; + } xirr; + u32 dummy; + union { + u32 word; + u8 bytes[4]; + } qirr; + u32 link_a; + u32 link_b; + u32 link_c; +}; + +static struct icp_ipl __iomem *icp_native_regs[NR_CPUS]; + +static inline unsigned int icp_native_get_xirr(void) +{ + int cpu = smp_processor_id(); + + return in_be32(&icp_native_regs[cpu]->xirr.word); +} + +static inline void icp_native_set_xirr(unsigned int value) +{ + int cpu = smp_processor_id(); + + out_be32(&icp_native_regs[cpu]->xirr.word, value); +} + +static inline void icp_native_set_cppr(u8 value) +{ + int cpu = smp_processor_id(); + + out_8(&icp_native_regs[cpu]->xirr.bytes[0], value); +} + +static inline void icp_native_set_qirr(int n_cpu, u8 value) +{ + out_8(&icp_native_regs[n_cpu]->qirr.bytes[0], value); +} + +static void icp_native_set_cpu_priority(unsigned char cppr) +{ + xics_set_base_cppr(cppr); + icp_native_set_cppr(cppr); + iosync(); +} + +static void icp_native_eoi(struct irq_data *d) +{ + unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); + + iosync(); + icp_native_set_xirr((xics_pop_cppr() << 24) | hw_irq); +} + +static void icp_native_teardown_cpu(void) +{ + int cpu = smp_processor_id(); + + /* Clear any pending IPI */ + icp_native_set_qirr(cpu, 0xff); +} + +static void icp_native_flush_ipi(void) +{ + /* We take the ipi irq but and never return so we + * need to EOI the IPI, but want to leave our priority 0 + * + * should we check all the other interrupts too? + * should we be flagging idle loop instead? + * or creating some task to be scheduled? + */ + + icp_native_set_xirr((0x00 << 24) | XICS_IPI); +} + +static unsigned int icp_native_get_irq(void) +{ + unsigned int xirr = icp_native_get_xirr(); + unsigned int vec = xirr & 0x00ffffff; + unsigned int irq; + + if (vec == XICS_IRQ_SPURIOUS) + return NO_IRQ; + + irq = irq_radix_revmap_lookup(xics_host, vec); + if (likely(irq != NO_IRQ)) { + xics_push_cppr(vec); + return irq; + } + + /* We don't have a linux mapping, so have rtas mask it. */ + xics_mask_unknown_vec(vec); + + /* We might learn about it later, so EOI it */ + icp_native_set_xirr(xirr); + + return NO_IRQ; +} + +#ifdef CONFIG_SMP + +static void icp_native_cause_ipi(int cpu, unsigned long data) +{ + icp_native_set_qirr(cpu, IPI_PRIORITY); +} + +static irqreturn_t icp_native_ipi_action(int irq, void *dev_id) +{ + int cpu = smp_processor_id(); + + icp_native_set_qirr(cpu, 0xff); + + return smp_ipi_demux(); +} + +#endif /* CONFIG_SMP */ + +static int __init icp_native_map_one_cpu(int hw_id, unsigned long addr, + unsigned long size) +{ + char *rname; + int i, cpu = -1; + + /* This may look gross but it's good enough for now, we don't quite + * have a hard -> linux processor id matching. + */ + for_each_possible_cpu(i) { + if (!cpu_present(i)) + continue; + if (hw_id == get_hard_smp_processor_id(i)) { + cpu = i; + break; + } + } + + /* Fail, skip that CPU. Don't print, it's normal, some XICS come up + * with way more entries in there than you have CPUs + */ + if (cpu == -1) + return 0; + + rname = kasprintf(GFP_KERNEL, "CPU %d [0x%x] Interrupt Presentation", + cpu, hw_id); + + if (!request_mem_region(addr, size, rname)) { + pr_warning("icp_native: Could not reserve ICP MMIO" + " for CPU %d, interrupt server #0x%x\n", + cpu, hw_id); + return -EBUSY; + } + + icp_native_regs[cpu] = ioremap(addr, size); + if (!icp_native_regs[cpu]) { + pr_warning("icp_native: Failed ioremap for CPU %d, " + "interrupt server #0x%x, addr %#lx\n", + cpu, hw_id, addr); + release_mem_region(addr, size); + return -ENOMEM; + } + return 0; +} + +static int __init icp_native_init_one_node(struct device_node *np, + unsigned int *indx) +{ + unsigned int ilen; + const u32 *ireg; + int i; + int reg_tuple_size; + int num_servers = 0; + + /* This code does the theorically broken assumption that the interrupt + * server numbers are the same as the hard CPU numbers. + * This happens to be the case so far but we are playing with fire... + * should be fixed one of these days. -BenH. + */ + ireg = of_get_property(np, "ibm,interrupt-server-ranges", &ilen); + + /* Do that ever happen ? we'll know soon enough... but even good'old + * f80 does have that property .. + */ + WARN_ON((ireg == NULL) || (ilen != 2*sizeof(u32))); + + if (ireg) { + *indx = of_read_number(ireg, 1); + if (ilen >= 2*sizeof(u32)) + num_servers = of_read_number(ireg + 1, 1); + } + + ireg = of_get_property(np, "reg", &ilen); + if (!ireg) { + pr_err("icp_native: Can't find interrupt reg property"); + return -1; + } + + reg_tuple_size = (of_n_addr_cells(np) + of_n_size_cells(np)) * 4; + if (((ilen % reg_tuple_size) != 0) + || (num_servers && (num_servers != (ilen / reg_tuple_size)))) { + pr_err("icp_native: ICP reg len (%d) != num servers (%d)", + ilen / reg_tuple_size, num_servers); + return -1; + } + + for (i = 0; i < (ilen / reg_tuple_size); i++) { + struct resource r; + int err; + + err = of_address_to_resource(np, i, &r); + if (err) { + pr_err("icp_native: Could not translate ICP MMIO" + " for interrupt server 0x%x (%d)\n", *indx, err); + return -1; + } + + if (icp_native_map_one_cpu(*indx, r.start, r.end - r.start)) + return -1; + + (*indx)++; + } + return 0; +} + +static const struct icp_ops icp_native_ops = { + .get_irq = icp_native_get_irq, + .eoi = icp_native_eoi, + .set_priority = icp_native_set_cpu_priority, + .teardown_cpu = icp_native_teardown_cpu, + .flush_ipi = icp_native_flush_ipi, +#ifdef CONFIG_SMP + .ipi_action = icp_native_ipi_action, + .cause_ipi = icp_native_cause_ipi, +#endif +}; + +int icp_native_init(void) +{ + struct device_node *np; + u32 indx = 0; + int found = 0; + + for_each_compatible_node(np, NULL, "ibm,ppc-xicp") + if (icp_native_init_one_node(np, &indx) == 0) + found = 1; + if (!found) { + for_each_node_by_type(np, + "PowerPC-External-Interrupt-Presentation") { + if (icp_native_init_one_node(np, &indx) == 0) + found = 1; + } + } + + if (found == 0) + return -ENODEV; + + icp_ops = &icp_native_ops; + + return 0; +} diff --git a/trunk/arch/powerpc/sysdev/xics/ics-rtas.c b/trunk/arch/powerpc/sysdev/xics/ics-rtas.c new file mode 100644 index 000000000000..c782f85cf7e4 --- /dev/null +++ b/trunk/arch/powerpc/sysdev/xics/ics-rtas.c @@ -0,0 +1,240 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +/* RTAS service tokens */ +static int ibm_get_xive; +static int ibm_set_xive; +static int ibm_int_on; +static int ibm_int_off; + +static int ics_rtas_map(struct ics *ics, unsigned int virq); +static void ics_rtas_mask_unknown(struct ics *ics, unsigned long vec); +static long ics_rtas_get_server(struct ics *ics, unsigned long vec); +static int ics_rtas_host_match(struct ics *ics, struct device_node *node); + +/* Only one global & state struct ics */ +static struct ics ics_rtas = { + .map = ics_rtas_map, + .mask_unknown = ics_rtas_mask_unknown, + .get_server = ics_rtas_get_server, + .host_match = ics_rtas_host_match, +}; + +static void ics_rtas_unmask_irq(struct irq_data *d) +{ + unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); + int call_status; + int server; + + pr_devel("xics: unmask virq %d [hw 0x%x]\n", d->irq, hw_irq); + + if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS) + return; + + server = xics_get_irq_server(d->irq, d->affinity, 0); + + call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hw_irq, server, + DEFAULT_PRIORITY); + if (call_status != 0) { + printk(KERN_ERR + "%s: ibm_set_xive irq %u server %x returned %d\n", + __func__, hw_irq, server, call_status); + return; + } + + /* Now unmask the interrupt (often a no-op) */ + call_status = rtas_call(ibm_int_on, 1, 1, NULL, hw_irq); + if (call_status != 0) { + printk(KERN_ERR "%s: ibm_int_on irq=%u returned %d\n", + __func__, hw_irq, call_status); + return; + } +} + +static unsigned int ics_rtas_startup(struct irq_data *d) +{ +#ifdef CONFIG_PCI_MSI + /* + * The generic MSI code returns with the interrupt disabled on the + * card, using the MSI mask bits. Firmware doesn't appear to unmask + * at that level, so we do it here by hand. + */ + if (d->msi_desc) + unmask_msi_irq(d); +#endif + /* unmask it */ + ics_rtas_unmask_irq(d); + return 0; +} + +static void ics_rtas_mask_real_irq(unsigned int hw_irq) +{ + int call_status; + + if (hw_irq == XICS_IPI) + return; + + call_status = rtas_call(ibm_int_off, 1, 1, NULL, hw_irq); + if (call_status != 0) { + printk(KERN_ERR "%s: ibm_int_off irq=%u returned %d\n", + __func__, hw_irq, call_status); + return; + } + + /* Have to set XIVE to 0xff to be able to remove a slot */ + call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hw_irq, + xics_default_server, 0xff); + if (call_status != 0) { + printk(KERN_ERR "%s: ibm_set_xive(0xff) irq=%u returned %d\n", + __func__, hw_irq, call_status); + return; + } +} + +static void ics_rtas_mask_irq(struct irq_data *d) +{ + unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); + + pr_devel("xics: mask virq %d [hw 0x%x]\n", d->irq, hw_irq); + + if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS) + return; + ics_rtas_mask_real_irq(hw_irq); +} + +static int ics_rtas_set_affinity(struct irq_data *d, + const struct cpumask *cpumask, + bool force) +{ + unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); + int status; + int xics_status[2]; + int irq_server; + + if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS) + return -1; + + status = rtas_call(ibm_get_xive, 1, 3, xics_status, hw_irq); + + if (status) { + printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n", + __func__, hw_irq, status); + return -1; + } + + irq_server = xics_get_irq_server(d->irq, cpumask, 1); + if (irq_server == -1) { + char cpulist[128]; + cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask); + printk(KERN_WARNING + "%s: No online cpus in the mask %s for irq %d\n", + __func__, cpulist, d->irq); + return -1; + } + + status = rtas_call(ibm_set_xive, 3, 1, NULL, + hw_irq, irq_server, xics_status[1]); + + if (status) { + printk(KERN_ERR "%s: ibm,set-xive irq=%u returns %d\n", + __func__, hw_irq, status); + return -1; + } + + return IRQ_SET_MASK_OK; +} + +static struct irq_chip ics_rtas_irq_chip = { + .name = "XICS", + .irq_startup = ics_rtas_startup, + .irq_mask = ics_rtas_mask_irq, + .irq_unmask = ics_rtas_unmask_irq, + .irq_eoi = NULL, /* Patched at init time */ + .irq_set_affinity = ics_rtas_set_affinity +}; + +static int ics_rtas_map(struct ics *ics, unsigned int virq) +{ + unsigned int hw_irq = (unsigned int)virq_to_hw(virq); + int status[2]; + int rc; + + if (WARN_ON(hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)) + return -EINVAL; + + /* Check if RTAS knows about this interrupt */ + rc = rtas_call(ibm_get_xive, 1, 3, status, hw_irq); + if (rc) + return -ENXIO; + + irq_set_chip_and_handler(virq, &ics_rtas_irq_chip, handle_fasteoi_irq); + irq_set_chip_data(virq, &ics_rtas); + + return 0; +} + +static void ics_rtas_mask_unknown(struct ics *ics, unsigned long vec) +{ + ics_rtas_mask_real_irq(vec); +} + +static long ics_rtas_get_server(struct ics *ics, unsigned long vec) +{ + int rc, status[2]; + + rc = rtas_call(ibm_get_xive, 1, 3, status, vec); + if (rc) + return -1; + return status[0]; +} + +static int ics_rtas_host_match(struct ics *ics, struct device_node *node) +{ + /* IBM machines have interrupt parents of various funky types for things + * like vdevices, events, etc... The trick we use here is to match + * everything here except the legacy 8259 which is compatible "chrp,iic" + */ + return !of_device_is_compatible(node, "chrp,iic"); +} + +int ics_rtas_init(void) +{ + ibm_get_xive = rtas_token("ibm,get-xive"); + ibm_set_xive = rtas_token("ibm,set-xive"); + ibm_int_on = rtas_token("ibm,int-on"); + ibm_int_off = rtas_token("ibm,int-off"); + + /* We enable the RTAS "ICS" if RTAS is present with the + * appropriate tokens + */ + if (ibm_get_xive == RTAS_UNKNOWN_SERVICE || + ibm_set_xive == RTAS_UNKNOWN_SERVICE) + return -ENODEV; + + /* We need to patch our irq chip's EOI to point to the + * right ICP + */ + ics_rtas_irq_chip.irq_eoi = icp_ops->eoi; + + /* Register ourselves */ + xics_register_ics(&ics_rtas); + + return 0; +} + diff --git a/trunk/arch/powerpc/sysdev/xics/xics-common.c b/trunk/arch/powerpc/sysdev/xics/xics-common.c new file mode 100644 index 000000000000..445c5a01b766 --- /dev/null +++ b/trunk/arch/powerpc/sysdev/xics/xics-common.c @@ -0,0 +1,443 @@ +/* + * Copyright 2011 IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Globals common to all ICP/ICS implementations */ +const struct icp_ops *icp_ops; + +unsigned int xics_default_server = 0xff; +unsigned int xics_default_distrib_server = 0; +unsigned int xics_interrupt_server_size = 8; + +DEFINE_PER_CPU(struct xics_cppr, xics_cppr); + +struct irq_host *xics_host; + +static LIST_HEAD(ics_list); + +void xics_update_irq_servers(void) +{ + int i, j; + struct device_node *np; + u32 ilen; + const u32 *ireg; + u32 hcpuid; + + /* Find the server numbers for the boot cpu. */ + np = of_get_cpu_node(boot_cpuid, NULL); + BUG_ON(!np); + + hcpuid = get_hard_smp_processor_id(boot_cpuid); + xics_default_server = xics_default_distrib_server = hcpuid; + + pr_devel("xics: xics_default_server = 0x%x\n", xics_default_server); + + ireg = of_get_property(np, "ibm,ppc-interrupt-gserver#s", &ilen); + if (!ireg) { + of_node_put(np); + return; + } + + i = ilen / sizeof(int); + + /* Global interrupt distribution server is specified in the last + * entry of "ibm,ppc-interrupt-gserver#s" property. Get the last + * entry fom this property for current boot cpu id and use it as + * default distribution server + */ + for (j = 0; j < i; j += 2) { + if (ireg[j] == hcpuid) { + xics_default_distrib_server = ireg[j+1]; + break; + } + } + pr_devel("xics: xics_default_distrib_server = 0x%x\n", + xics_default_distrib_server); + of_node_put(np); +} + +/* GIQ stuff, currently only supported on RTAS setups, will have + * to be sorted properly for bare metal + */ +void xics_set_cpu_giq(unsigned int gserver, unsigned int join) +{ +#ifdef CONFIG_PPC_RTAS + int index; + int status; + + if (!rtas_indicator_present(GLOBAL_INTERRUPT_QUEUE, NULL)) + return; + + index = (1UL << xics_interrupt_server_size) - 1 - gserver; + + status = rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE, index, join); + + WARN(status < 0, "set-indicator(%d, %d, %u) returned %d\n", + GLOBAL_INTERRUPT_QUEUE, index, join, status); +#endif +} + +void xics_setup_cpu(void) +{ + icp_ops->set_priority(LOWEST_PRIORITY); + + xics_set_cpu_giq(xics_default_distrib_server, 1); +} + +void xics_mask_unknown_vec(unsigned int vec) +{ + struct ics *ics; + + pr_err("Interrupt 0x%x (real) is invalid, disabling it.\n", vec); + + list_for_each_entry(ics, &ics_list, link) + ics->mask_unknown(ics, vec); +} + + +#ifdef CONFIG_SMP + +static void xics_request_ipi(void) +{ + unsigned int ipi; + + ipi = irq_create_mapping(xics_host, XICS_IPI); + BUG_ON(ipi == NO_IRQ); + + /* + * IPIs are marked IRQF_DISABLED as they must run with irqs + * disabled, and PERCPU. The handler was set in map. + */ + BUG_ON(request_irq(ipi, icp_ops->ipi_action, + IRQF_DISABLED|IRQF_PERCPU, "IPI", NULL)); +} + +int __init xics_smp_probe(void) +{ + /* Setup cause_ipi callback based on which ICP is used */ + smp_ops->cause_ipi = icp_ops->cause_ipi; + + /* Register all the IPIs */ + xics_request_ipi(); + + return cpumask_weight(cpu_possible_mask); +} + +#endif /* CONFIG_SMP */ + +void xics_teardown_cpu(void) +{ + struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); + + /* + * we have to reset the cppr index to 0 because we're + * not going to return from the IPI + */ + os_cppr->index = 0; + icp_ops->set_priority(0); + icp_ops->teardown_cpu(); +} + +void xics_kexec_teardown_cpu(int secondary) +{ + xics_teardown_cpu(); + + icp_ops->flush_ipi(); + + /* + * Some machines need to have at least one cpu in the GIQ, + * so leave the master cpu in the group. + */ + if (secondary) + xics_set_cpu_giq(xics_default_distrib_server, 0); +} + + +#ifdef CONFIG_HOTPLUG_CPU + +/* Interrupts are disabled. */ +void xics_migrate_irqs_away(void) +{ + int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id(); + unsigned int irq, virq; + + /* If we used to be the default server, move to the new "boot_cpuid" */ + if (hw_cpu == xics_default_server) + xics_update_irq_servers(); + + /* Reject any interrupt that was queued to us... */ + icp_ops->set_priority(0); + + /* Remove ourselves from the global interrupt queue */ + xics_set_cpu_giq(xics_default_distrib_server, 0); + + /* Allow IPIs again... */ + icp_ops->set_priority(DEFAULT_PRIORITY); + + for_each_irq(virq) { + struct irq_desc *desc; + struct irq_chip *chip; + long server; + unsigned long flags; + struct ics *ics; + + /* We can't set affinity on ISA interrupts */ + if (virq < NUM_ISA_INTERRUPTS) + continue; + if (!virq_is_host(virq, xics_host)) + continue; + irq = (unsigned int)virq_to_hw(virq); + /* We need to get IPIs still. */ + if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) + continue; + desc = irq_to_desc(virq); + /* We only need to migrate enabled IRQS */ + if (!desc || !desc->action) + continue; + chip = irq_desc_get_chip(desc); + if (!chip || !chip->irq_set_affinity) + continue; + + raw_spin_lock_irqsave(&desc->lock, flags); + + /* Locate interrupt server */ + server = -1; + ics = irq_get_chip_data(virq); + if (ics) + server = ics->get_server(ics, irq); + if (server < 0) { + printk(KERN_ERR "%s: Can't find server for irq %d\n", + __func__, irq); + goto unlock; + } + + /* We only support delivery to all cpus or to one cpu. + * The irq has to be migrated only in the single cpu + * case. + */ + if (server != hw_cpu) + goto unlock; + + /* This is expected during cpu offline. */ + if (cpu_online(cpu)) + pr_warning("IRQ %u affinity broken off cpu %u\n", + virq, cpu); + + /* Reset affinity to all cpus */ + raw_spin_unlock_irqrestore(&desc->lock, flags); + irq_set_affinity(virq, cpu_all_mask); + continue; +unlock: + raw_spin_unlock_irqrestore(&desc->lock, flags); + } +} +#endif /* CONFIG_HOTPLUG_CPU */ + +#ifdef CONFIG_SMP +/* + * For the moment we only implement delivery to all cpus or one cpu. + * + * If the requested affinity is cpu_all_mask, we set global affinity. + * If not we set it to the first cpu in the mask, even if multiple cpus + * are set. This is so things like irqbalance (which set core and package + * wide affinities) do the right thing. + * + * We need to fix this to implement support for the links + */ +int xics_get_irq_server(unsigned int virq, const struct cpumask *cpumask, + unsigned int strict_check) +{ + + if (!distribute_irqs) + return xics_default_server; + + if (!cpumask_subset(cpu_possible_mask, cpumask)) { + int server = cpumask_first_and(cpu_online_mask, cpumask); + + if (server < nr_cpu_ids) + return get_hard_smp_processor_id(server); + + if (strict_check) + return -1; + } + + /* + * Workaround issue with some versions of JS20 firmware that + * deliver interrupts to cpus which haven't been started. This + * happens when using the maxcpus= boot option. + */ + if (cpumask_equal(cpu_online_mask, cpu_present_mask)) + return xics_default_distrib_server; + + return xics_default_server; +} +#endif /* CONFIG_SMP */ + +static int xics_host_match(struct irq_host *h, struct device_node *node) +{ + struct ics *ics; + + list_for_each_entry(ics, &ics_list, link) + if (ics->host_match(ics, node)) + return 1; + + return 0; +} + +/* Dummies */ +static void xics_ipi_unmask(struct irq_data *d) { } +static void xics_ipi_mask(struct irq_data *d) { } + +static struct irq_chip xics_ipi_chip = { + .name = "XICS", + .irq_eoi = NULL, /* Patched at init time */ + .irq_mask = xics_ipi_mask, + .irq_unmask = xics_ipi_unmask, +}; + +static int xics_host_map(struct irq_host *h, unsigned int virq, + irq_hw_number_t hw) +{ + struct ics *ics; + + pr_devel("xics: map virq %d, hwirq 0x%lx\n", virq, hw); + + /* Insert the interrupt mapping into the radix tree for fast lookup */ + irq_radix_revmap_insert(xics_host, virq, hw); + + /* They aren't all level sensitive but we just don't really know */ + irq_set_status_flags(virq, IRQ_LEVEL); + + /* Don't call into ICS for IPIs */ + if (hw == XICS_IPI) { + irq_set_chip_and_handler(virq, &xics_ipi_chip, + handle_percpu_irq); + return 0; + } + + /* Let the ICS setup the chip data */ + list_for_each_entry(ics, &ics_list, link) + if (ics->map(ics, virq) == 0) + return 0; + + return -EINVAL; +} + +static int xics_host_xlate(struct irq_host *h, struct device_node *ct, + const u32 *intspec, unsigned int intsize, + irq_hw_number_t *out_hwirq, unsigned int *out_flags) + +{ + /* Current xics implementation translates everything + * to level. It is not technically right for MSIs but this + * is irrelevant at this point. We might get smarter in the future + */ + *out_hwirq = intspec[0]; + *out_flags = IRQ_TYPE_LEVEL_LOW; + + return 0; +} + +static struct irq_host_ops xics_host_ops = { + .match = xics_host_match, + .map = xics_host_map, + .xlate = xics_host_xlate, +}; + +static void __init xics_init_host(void) +{ + xics_host = irq_alloc_host(NULL, IRQ_HOST_MAP_TREE, 0, &xics_host_ops, + XICS_IRQ_SPURIOUS); + BUG_ON(xics_host == NULL); + irq_set_default_host(xics_host); +} + +void __init xics_register_ics(struct ics *ics) +{ + list_add(&ics->link, &ics_list); +} + +static void __init xics_get_server_size(void) +{ + struct device_node *np; + const u32 *isize; + + /* We fetch the interrupt server size from the first ICS node + * we find if any + */ + np = of_find_compatible_node(NULL, NULL, "ibm,ppc-xics"); + if (!np) + return; + isize = of_get_property(np, "ibm,interrupt-server#-size", NULL); + if (!isize) + return; + xics_interrupt_server_size = *isize; + of_node_put(np); +} + +void __init xics_init(void) +{ + int rc = -1; + + /* Fist locate ICP */ +#ifdef CONFIG_PPC_ICP_HV + if (firmware_has_feature(FW_FEATURE_LPAR)) + rc = icp_hv_init(); +#endif +#ifdef CONFIG_PPC_ICP_NATIVE + if (rc < 0) + rc = icp_native_init(); +#endif + if (rc < 0) { + pr_warning("XICS: Cannot find a Presentation Controller !\n"); + return; + } + + /* Copy get_irq callback over to ppc_md */ + ppc_md.get_irq = icp_ops->get_irq; + + /* Patch up IPI chip EOI */ + xics_ipi_chip.irq_eoi = icp_ops->eoi; + + /* Now locate ICS */ +#ifdef CONFIG_PPC_ICS_RTAS + rc = ics_rtas_init(); +#endif + if (rc < 0) + pr_warning("XICS: Cannot find a Source Controller !\n"); + + /* Initialize common bits */ + xics_get_server_size(); + xics_update_irq_servers(); + xics_init_host(); + xics_setup_cpu(); +} diff --git a/trunk/arch/powerpc/sysdev/xilinx_intc.c b/trunk/arch/powerpc/sysdev/xilinx_intc.c index 0a13fc19e287..6183799754af 100644 --- a/trunk/arch/powerpc/sysdev/xilinx_intc.c +++ b/trunk/arch/powerpc/sysdev/xilinx_intc.c @@ -71,7 +71,7 @@ static unsigned char xilinx_intc_map_senses[] = { */ static void xilinx_intc_mask(struct irq_data *d) { - int irq = virq_to_hw(d->irq); + int irq = irqd_to_hwirq(d); void * regs = irq_data_get_irq_chip_data(d); pr_debug("mask: %d\n", irq); out_be32(regs + XINTC_CIE, 1 << irq); @@ -87,7 +87,7 @@ static int xilinx_intc_set_type(struct irq_data *d, unsigned int flow_type) */ static void xilinx_intc_level_unmask(struct irq_data *d) { - int irq = virq_to_hw(d->irq); + int irq = irqd_to_hwirq(d); void * regs = irq_data_get_irq_chip_data(d); pr_debug("unmask: %d\n", irq); out_be32(regs + XINTC_SIE, 1 << irq); @@ -112,7 +112,7 @@ static struct irq_chip xilinx_intc_level_irqchip = { */ static void xilinx_intc_edge_unmask(struct irq_data *d) { - int irq = virq_to_hw(d->irq); + int irq = irqd_to_hwirq(d); void *regs = irq_data_get_irq_chip_data(d); pr_debug("unmask: %d\n", irq); out_be32(regs + XINTC_SIE, 1 << irq); @@ -120,7 +120,7 @@ static void xilinx_intc_edge_unmask(struct irq_data *d) static void xilinx_intc_edge_ack(struct irq_data *d) { - int irq = virq_to_hw(d->irq); + int irq = irqd_to_hwirq(d); void * regs = irq_data_get_irq_chip_data(d); pr_debug("ack: %d\n", irq); out_be32(regs + XINTC_IAR, 1 << irq); diff --git a/trunk/arch/powerpc/xmon/xmon.c b/trunk/arch/powerpc/xmon/xmon.c index 33794c1d92c3..42541bbcc7fa 100644 --- a/trunk/arch/powerpc/xmon/xmon.c +++ b/trunk/arch/powerpc/xmon/xmon.c @@ -334,7 +334,7 @@ static void release_output_lock(void) int cpus_are_in_xmon(void) { - return !cpus_empty(cpus_in_xmon); + return !cpumask_empty(&cpus_in_xmon); } #endif @@ -373,7 +373,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi) #ifdef CONFIG_SMP cpu = smp_processor_id(); - if (cpu_isset(cpu, cpus_in_xmon)) { + if (cpumask_test_cpu(cpu, &cpus_in_xmon)) { get_output_lock(); excprint(regs); printf("cpu 0x%x: Exception %lx %s in xmon, " @@ -396,10 +396,10 @@ static int xmon_core(struct pt_regs *regs, int fromipi) } xmon_fault_jmp[cpu] = recurse_jmp; - cpu_set(cpu, cpus_in_xmon); + cpumask_set_cpu(cpu, &cpus_in_xmon); bp = NULL; - if ((regs->msr & (MSR_IR|MSR_PR|MSR_SF)) == (MSR_IR|MSR_SF)) + if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT)) bp = at_breakpoint(regs->nip); if (bp || unrecoverable_excp(regs)) fromipi = 0; @@ -437,10 +437,10 @@ static int xmon_core(struct pt_regs *regs, int fromipi) xmon_owner = cpu; mb(); if (ncpus > 1) { - smp_send_debugger_break(MSG_ALL_BUT_SELF); + smp_send_debugger_break(); /* wait for other cpus to come in */ for (timeout = 100000000; timeout != 0; --timeout) { - if (cpus_weight(cpus_in_xmon) >= ncpus) + if (cpumask_weight(&cpus_in_xmon) >= ncpus) break; barrier(); } @@ -484,7 +484,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi) } } leave: - cpu_clear(cpu, cpus_in_xmon); + cpumask_clear_cpu(cpu, &cpus_in_xmon); xmon_fault_jmp[cpu] = NULL; #else /* UP is simple... */ @@ -529,7 +529,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi) } } #else - if ((regs->msr & (MSR_IR|MSR_PR|MSR_SF)) == (MSR_IR|MSR_SF)) { + if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT)) { bp = at_breakpoint(regs->nip); if (bp != NULL) { int stepped = emulate_step(regs, bp->instr[0]); @@ -578,7 +578,7 @@ static int xmon_bpt(struct pt_regs *regs) struct bpt *bp; unsigned long offset; - if ((regs->msr & (MSR_IR|MSR_PR|MSR_SF)) != (MSR_IR|MSR_SF)) + if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) != (MSR_IR|MSR_64BIT)) return 0; /* Are we at the trap at bp->instr[1] for some bp? */ @@ -609,7 +609,7 @@ static int xmon_sstep(struct pt_regs *regs) static int xmon_dabr_match(struct pt_regs *regs) { - if ((regs->msr & (MSR_IR|MSR_PR|MSR_SF)) != (MSR_IR|MSR_SF)) + if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) != (MSR_IR|MSR_64BIT)) return 0; if (dabr.enabled == 0) return 0; @@ -619,7 +619,7 @@ static int xmon_dabr_match(struct pt_regs *regs) static int xmon_iabr_match(struct pt_regs *regs) { - if ((regs->msr & (MSR_IR|MSR_PR|MSR_SF)) != (MSR_IR|MSR_SF)) + if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) != (MSR_IR|MSR_64BIT)) return 0; if (iabr == NULL) return 0; @@ -630,7 +630,7 @@ static int xmon_iabr_match(struct pt_regs *regs) static int xmon_ipi(struct pt_regs *regs) { #ifdef CONFIG_SMP - if (in_xmon && !cpu_isset(smp_processor_id(), cpus_in_xmon)) + if (in_xmon && !cpumask_test_cpu(smp_processor_id(), &cpus_in_xmon)) xmon_core(regs, 1); #endif return 0; @@ -644,7 +644,7 @@ static int xmon_fault_handler(struct pt_regs *regs) if (in_xmon && catch_memory_errors) handle_fault(regs); /* doesn't return */ - if ((regs->msr & (MSR_IR|MSR_PR|MSR_SF)) == (MSR_IR|MSR_SF)) { + if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT)) { bp = in_breakpoint_table(regs->nip, &offset); if (bp != NULL) { regs->nip = bp->address + offset; @@ -929,7 +929,7 @@ static int do_step(struct pt_regs *regs) int stepped; /* check we are in 64-bit kernel mode, translation enabled */ - if ((regs->msr & (MSR_SF|MSR_PR|MSR_IR)) == (MSR_SF|MSR_IR)) { + if ((regs->msr & (MSR_64BIT|MSR_PR|MSR_IR)) == (MSR_64BIT|MSR_IR)) { if (mread(regs->nip, &instr, 4) == 4) { stepped = emulate_step(regs, instr); if (stepped < 0) { @@ -976,7 +976,7 @@ static int cpu_cmd(void) printf("cpus stopped:"); count = 0; for (cpu = 0; cpu < NR_CPUS; ++cpu) { - if (cpu_isset(cpu, cpus_in_xmon)) { + if (cpumask_test_cpu(cpu, &cpus_in_xmon)) { if (count == 0) printf(" %x", cpu); ++count; @@ -992,7 +992,7 @@ static int cpu_cmd(void) return 0; } /* try to switch to cpu specified */ - if (!cpu_isset(cpu, cpus_in_xmon)) { + if (!cpumask_test_cpu(cpu, &cpus_in_xmon)) { printf("cpu 0x%x isn't in xmon\n", cpu); return 0; } @@ -1497,6 +1497,10 @@ static void prregs(struct pt_regs *fp) #endif printf("pc = "); xmon_print_symbol(fp->nip, " ", "\n"); + if (TRAP(fp) != 0xc00 && cpu_has_feature(CPU_FTR_CFAR)) { + printf("cfar= "); + xmon_print_symbol(fp->orig_gpr3, " ", "\n"); + } printf("lr = "); xmon_print_symbol(fp->link, " ", "\n"); printf("msr = "REG" cr = %.8lx\n", fp->msr, fp->ccr); @@ -2663,7 +2667,7 @@ static void dump_stab(void) void dump_segments(void) { - if (cpu_has_feature(CPU_FTR_SLB)) + if (mmu_has_feature(MMU_FTR_SLB)) dump_slb(); else dump_stab(); diff --git a/trunk/arch/s390/Kconfig b/trunk/arch/s390/Kconfig index 2508a6f31588..4a7f14079e03 100644 --- a/trunk/arch/s390/Kconfig +++ b/trunk/arch/s390/Kconfig @@ -88,6 +88,7 @@ config S390 select HAVE_KERNEL_XZ select HAVE_GET_USER_PAGES_FAST select HAVE_ARCH_MUTEX_CPU_RELAX + select HAVE_ARCH_JUMP_LABEL if !MARCH_G5 select ARCH_INLINE_SPIN_TRYLOCK select ARCH_INLINE_SPIN_TRYLOCK_BH select ARCH_INLINE_SPIN_LOCK diff --git a/trunk/arch/s390/crypto/prng.c b/trunk/arch/s390/crypto/prng.c index 975e3ab13cb5..8b16c479585b 100644 --- a/trunk/arch/s390/crypto/prng.c +++ b/trunk/arch/s390/crypto/prng.c @@ -76,7 +76,7 @@ static void prng_seed(int nbytes) /* Add the entropy */ while (nbytes >= 8) { - *((__u64 *)parm_block) ^= *((__u64 *)buf+i*8); + *((__u64 *)parm_block) ^= *((__u64 *)(buf+i)); prng_add_entropy(); i += 8; nbytes -= 8; diff --git a/trunk/arch/s390/include/asm/cacheflush.h b/trunk/arch/s390/include/asm/cacheflush.h index 43a5c78046db..3e20383d0921 100644 --- a/trunk/arch/s390/include/asm/cacheflush.h +++ b/trunk/arch/s390/include/asm/cacheflush.h @@ -11,5 +11,6 @@ void kernel_map_pages(struct page *page, int numpages, int enable); int set_memory_ro(unsigned long addr, int numpages); int set_memory_rw(unsigned long addr, int numpages); int set_memory_nx(unsigned long addr, int numpages); +int set_memory_x(unsigned long addr, int numpages); #endif /* _S390_CACHEFLUSH_H */ diff --git a/trunk/arch/s390/include/asm/diag.h b/trunk/arch/s390/include/asm/diag.h index 72b2e2f2d32d..7e91c58072e2 100644 --- a/trunk/arch/s390/include/asm/diag.h +++ b/trunk/arch/s390/include/asm/diag.h @@ -9,9 +9,22 @@ #define _ASM_S390_DIAG_H /* - * Diagnose 10: Release pages + * Diagnose 10: Release page range */ -extern void diag10(unsigned long addr); +static inline void diag10_range(unsigned long start_pfn, unsigned long num_pfn) +{ + unsigned long start_addr, end_addr; + + start_addr = start_pfn << PAGE_SHIFT; + end_addr = (start_pfn + num_pfn - 1) << PAGE_SHIFT; + + asm volatile( + "0: diag %0,%1,0x10\n" + "1:\n" + EX_TABLE(0b, 1b) + EX_TABLE(1b, 1b) + : : "a" (start_addr), "a" (end_addr)); +} /* * Diagnose 14: Input spool file manipulation diff --git a/trunk/arch/s390/include/asm/ftrace.h b/trunk/arch/s390/include/asm/ftrace.h index 3c29be4836ed..b7931faaef6d 100644 --- a/trunk/arch/s390/include/asm/ftrace.h +++ b/trunk/arch/s390/include/asm/ftrace.h @@ -11,15 +11,13 @@ struct dyn_arch_ftrace { }; #ifdef CONFIG_64BIT #define MCOUNT_INSN_SIZE 12 -#define MCOUNT_OFFSET 8 #else #define MCOUNT_INSN_SIZE 20 -#define MCOUNT_OFFSET 4 #endif static inline unsigned long ftrace_call_adjust(unsigned long addr) { - return addr - MCOUNT_OFFSET; + return addr; } #endif /* __ASSEMBLY__ */ diff --git a/trunk/arch/s390/include/asm/jump_label.h b/trunk/arch/s390/include/asm/jump_label.h new file mode 100644 index 000000000000..95a6cf2b5b67 --- /dev/null +++ b/trunk/arch/s390/include/asm/jump_label.h @@ -0,0 +1,37 @@ +#ifndef _ASM_S390_JUMP_LABEL_H +#define _ASM_S390_JUMP_LABEL_H + +#include + +#define JUMP_LABEL_NOP_SIZE 6 + +#ifdef CONFIG_64BIT +#define ASM_PTR ".quad" +#define ASM_ALIGN ".balign 8" +#else +#define ASM_PTR ".long" +#define ASM_ALIGN ".balign 4" +#endif + +static __always_inline bool arch_static_branch(struct jump_label_key *key) +{ + asm goto("0: brcl 0,0\n" + ".pushsection __jump_table, \"aw\"\n" + ASM_ALIGN "\n" + ASM_PTR " 0b, %l[label], %0\n" + ".popsection\n" + : : "X" (key) : : label); + return false; +label: + return true; +} + +typedef unsigned long jump_label_t; + +struct jump_entry { + jump_label_t code; + jump_label_t target; + jump_label_t key; +}; + +#endif diff --git a/trunk/arch/s390/include/asm/mmu_context.h b/trunk/arch/s390/include/asm/mmu_context.h index a6f0e7cc9cde..8c277caa8d3a 100644 --- a/trunk/arch/s390/include/asm/mmu_context.h +++ b/trunk/arch/s390/include/asm/mmu_context.h @@ -23,7 +23,7 @@ static inline int init_new_context(struct task_struct *tsk, #ifdef CONFIG_64BIT mm->context.asce_bits |= _ASCE_TYPE_REGION3; #endif - if (current->mm->context.alloc_pgste) { + if (current->mm && current->mm->context.alloc_pgste) { /* * alloc_pgste indicates, that any NEW context will be created * with extended page tables. The old context is unchanged. The diff --git a/trunk/arch/s390/kernel/Makefile b/trunk/arch/s390/kernel/Makefile index 64230bc392fa..5ff15dacb571 100644 --- a/trunk/arch/s390/kernel/Makefile +++ b/trunk/arch/s390/kernel/Makefile @@ -23,7 +23,7 @@ CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o \ processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ s390_ext.o debug.o irq.o ipl.o dis.o diag.o mem_detect.o \ - vdso.o vtime.o sysinfo.o nmi.o sclp.o + vdso.o vtime.o sysinfo.o nmi.o sclp.o jump_label.o obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) diff --git a/trunk/arch/s390/kernel/diag.c b/trunk/arch/s390/kernel/diag.c index c032d11da8a1..8237fc07ac79 100644 --- a/trunk/arch/s390/kernel/diag.c +++ b/trunk/arch/s390/kernel/diag.c @@ -8,27 +8,6 @@ #include #include -/* - * Diagnose 10: Release pages - */ -void diag10(unsigned long addr) -{ - if (addr >= 0x7ff00000) - return; - asm volatile( -#ifdef CONFIG_64BIT - " sam31\n" - " diag %0,%0,0x10\n" - "0: sam64\n" -#else - " diag %0,%0,0x10\n" - "0:\n" -#endif - EX_TABLE(0b, 0b) - : : "a" (addr)); -} -EXPORT_SYMBOL(diag10); - /* * Diagnose 14: Input spool file manipulation */ diff --git a/trunk/arch/s390/kernel/dis.c b/trunk/arch/s390/kernel/dis.c index c83726c9fe03..3d4a78fc1adc 100644 --- a/trunk/arch/s390/kernel/dis.c +++ b/trunk/arch/s390/kernel/dis.c @@ -672,6 +672,7 @@ static struct insn opcode_b2[] = { { "rp", 0x77, INSTR_S_RD }, { "stcke", 0x78, INSTR_S_RD }, { "sacf", 0x79, INSTR_S_RD }, + { "spp", 0x80, INSTR_S_RD }, { "stsi", 0x7d, INSTR_S_RD }, { "srnm", 0x99, INSTR_S_RD }, { "stfpc", 0x9c, INSTR_S_RD }, diff --git a/trunk/arch/s390/kernel/entry.S b/trunk/arch/s390/kernel/entry.S index 648f64239a9d..1b67fc6ebdc2 100644 --- a/trunk/arch/s390/kernel/entry.S +++ b/trunk/arch/s390/kernel/entry.S @@ -836,7 +836,7 @@ restart_base: stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on basr %r14,0 l %r14,restart_addr-.(%r14) - br %r14 # branch to start_secondary + basr %r14,%r14 # branch to start_secondary restart_addr: .long start_secondary .align 8 diff --git a/trunk/arch/s390/kernel/entry64.S b/trunk/arch/s390/kernel/entry64.S index 9d3603d6c511..9fd864563499 100644 --- a/trunk/arch/s390/kernel/entry64.S +++ b/trunk/arch/s390/kernel/entry64.S @@ -841,7 +841,7 @@ restart_base: mvc __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1) xc __LC_STEAL_TIMER(8),__LC_STEAL_TIMER stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on - jg start_secondary + brasl %r14,start_secondary .align 8 restart_vtime: .long 0x7fffffff,0xffffffff diff --git a/trunk/arch/s390/kernel/jump_label.c b/trunk/arch/s390/kernel/jump_label.c new file mode 100644 index 000000000000..44cc06bedf77 --- /dev/null +++ b/trunk/arch/s390/kernel/jump_label.c @@ -0,0 +1,59 @@ +/* + * Jump label s390 support + * + * Copyright IBM Corp. 2011 + * Author(s): Jan Glauber + */ +#include +#include +#include +#include +#include + +#ifdef HAVE_JUMP_LABEL + +struct insn { + u16 opcode; + s32 offset; +} __packed; + +struct insn_args { + unsigned long *target; + struct insn *insn; + ssize_t size; +}; + +static int __arch_jump_label_transform(void *data) +{ + struct insn_args *args = data; + int rc; + + rc = probe_kernel_write(args->target, args->insn, args->size); + WARN_ON_ONCE(rc < 0); + return 0; +} + +void arch_jump_label_transform(struct jump_entry *entry, + enum jump_label_type type) +{ + struct insn_args args; + struct insn insn; + + if (type == JUMP_LABEL_ENABLE) { + /* brcl 15,offset */ + insn.opcode = 0xc0f4; + insn.offset = (entry->target - entry->code) >> 1; + } else { + /* brcl 0,0 */ + insn.opcode = 0xc004; + insn.offset = 0; + } + + args.target = (void *) entry->code; + args.insn = &insn; + args.size = JUMP_LABEL_NOP_SIZE; + + stop_machine(__arch_jump_label_transform, &args, NULL); +} + +#endif diff --git a/trunk/arch/s390/kernel/smp.c b/trunk/arch/s390/kernel/smp.c index 63a97db83f96..63c7d9ff220d 100644 --- a/trunk/arch/s390/kernel/smp.c +++ b/trunk/arch/s390/kernel/smp.c @@ -165,12 +165,12 @@ static void do_ext_call_interrupt(unsigned int ext_int_code, kstat_cpu(smp_processor_id()).irqs[EXTINT_IPI]++; /* * handle bit signal external calls - * - * For the ec_schedule signal we have to do nothing. All the work - * is done automatically when we return from the interrupt. */ bits = xchg(&S390_lowcore.ext_call_fast, 0); + if (test_bit(ec_schedule, &bits)) + scheduler_ipi(); + if (test_bit(ec_call_function, &bits)) generic_smp_call_function_interrupt(); diff --git a/trunk/arch/s390/kvm/sie64a.S b/trunk/arch/s390/kvm/sie64a.S index 7e9d30d567b0..ab0e041ac54c 100644 --- a/trunk/arch/s390/kvm/sie64a.S +++ b/trunk/arch/s390/kvm/sie64a.S @@ -48,10 +48,10 @@ sie_irq_handler: tm __TI_flags+7(%r2),_TIF_EXIT_SIE jz 0f larl %r2,sie_exit # work pending, leave sie - stg %r2,__LC_RETURN_PSW+8 + stg %r2,SPI_PSW+8(0,%r15) br %r14 0: larl %r2,sie_reenter # re-enter with guest id - stg %r2,__LC_RETURN_PSW+8 + stg %r2,SPI_PSW+8(0,%r15) 1: br %r14 /* diff --git a/trunk/arch/s390/mm/cmm.c b/trunk/arch/s390/mm/cmm.c index c66ffd8dbbb7..1f1dba9dcf58 100644 --- a/trunk/arch/s390/mm/cmm.c +++ b/trunk/arch/s390/mm/cmm.c @@ -91,7 +91,7 @@ static long cmm_alloc_pages(long nr, long *counter, } else free_page((unsigned long) npa); } - diag10(addr); + diag10_range(addr >> PAGE_SHIFT, 1); pa->pages[pa->index++] = addr; (*counter)++; spin_unlock(&cmm_lock); diff --git a/trunk/arch/s390/mm/fault.c b/trunk/arch/s390/mm/fault.c index 9217e332b118..ab988135e5c6 100644 --- a/trunk/arch/s390/mm/fault.c +++ b/trunk/arch/s390/mm/fault.c @@ -543,7 +543,6 @@ static void pfault_interrupt(unsigned int ext_int_code, struct task_struct *tsk; __u16 subcode; - kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++; /* * Get the external interruption subcode & pfault * initial/completion signal bit. VM stores this @@ -553,14 +552,15 @@ static void pfault_interrupt(unsigned int ext_int_code, subcode = ext_int_code >> 16; if ((subcode & 0xff00) != __SUBCODE_MASK) return; + kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++; /* * Get the token (= address of the task structure of the affected task). */ #ifdef CONFIG_64BIT - tsk = *(struct task_struct **) param64; + tsk = (struct task_struct *) param64; #else - tsk = *(struct task_struct **) param32; + tsk = (struct task_struct *) param32; #endif if (subcode & 0x0080) { diff --git a/trunk/arch/s390/mm/pageattr.c b/trunk/arch/s390/mm/pageattr.c index 122ffbd08ce0..f05edcc3beff 100644 --- a/trunk/arch/s390/mm/pageattr.c +++ b/trunk/arch/s390/mm/pageattr.c @@ -24,12 +24,13 @@ static void change_page_attr(unsigned long addr, int numpages, WARN_ON_ONCE(1); continue; } - ptep = pte_offset_kernel(pmdp, addr + i * PAGE_SIZE); + ptep = pte_offset_kernel(pmdp, addr); pte = *ptep; pte = set(pte); - ptep_invalidate(&init_mm, addr + i * PAGE_SIZE, ptep); + ptep_invalidate(&init_mm, addr, ptep); *ptep = pte; + addr += PAGE_SIZE; } } @@ -53,3 +54,8 @@ int set_memory_nx(unsigned long addr, int numpages) return 0; } EXPORT_SYMBOL_GPL(set_memory_nx); + +int set_memory_x(unsigned long addr, int numpages) +{ + return 0; +} diff --git a/trunk/arch/s390/oprofile/hwsampler.c b/trunk/arch/s390/oprofile/hwsampler.c index 4952872d6f0a..33cbd373cce4 100644 --- a/trunk/arch/s390/oprofile/hwsampler.c +++ b/trunk/arch/s390/oprofile/hwsampler.c @@ -1021,20 +1021,14 @@ int hwsampler_deallocate() return rc; } -long hwsampler_query_min_interval(void) +unsigned long hwsampler_query_min_interval(void) { - if (min_sampler_rate) - return min_sampler_rate; - else - return -EINVAL; + return min_sampler_rate; } -long hwsampler_query_max_interval(void) +unsigned long hwsampler_query_max_interval(void) { - if (max_sampler_rate) - return max_sampler_rate; - else - return -EINVAL; + return max_sampler_rate; } unsigned long hwsampler_get_sample_overflow_count(unsigned int cpu) diff --git a/trunk/arch/s390/oprofile/hwsampler.h b/trunk/arch/s390/oprofile/hwsampler.h index 8c72b59316b5..1912f3bb190c 100644 --- a/trunk/arch/s390/oprofile/hwsampler.h +++ b/trunk/arch/s390/oprofile/hwsampler.h @@ -102,8 +102,8 @@ int hwsampler_setup(void); int hwsampler_shutdown(void); int hwsampler_allocate(unsigned long sdbt, unsigned long sdb); int hwsampler_deallocate(void); -long hwsampler_query_min_interval(void); -long hwsampler_query_max_interval(void); +unsigned long hwsampler_query_min_interval(void); +unsigned long hwsampler_query_max_interval(void); int hwsampler_start_all(unsigned long interval); int hwsampler_stop_all(void); int hwsampler_deactivate(unsigned int cpu); diff --git a/trunk/arch/s390/oprofile/init.c b/trunk/arch/s390/oprofile/init.c index c63d7e58352b..5995e9bc72d9 100644 --- a/trunk/arch/s390/oprofile/init.c +++ b/trunk/arch/s390/oprofile/init.c @@ -145,15 +145,11 @@ static int oprofile_hwsampler_init(struct oprofile_operations *ops) * create hwsampler files only if hwsampler_setup() succeeds. */ oprofile_min_interval = hwsampler_query_min_interval(); - if (oprofile_min_interval < 0) { - oprofile_min_interval = 0; + if (oprofile_min_interval == 0) return -ENODEV; - } oprofile_max_interval = hwsampler_query_max_interval(); - if (oprofile_max_interval < 0) { - oprofile_max_interval = 0; + if (oprofile_max_interval == 0) return -ENODEV; - } if (oprofile_timer_init(ops)) return -ENODEV; diff --git a/trunk/arch/sh/Kconfig b/trunk/arch/sh/Kconfig index 4b89da248d17..bc439de48cd1 100644 --- a/trunk/arch/sh/Kconfig +++ b/trunk/arch/sh/Kconfig @@ -24,7 +24,6 @@ config SUPERH select RTC_LIB select GENERIC_ATOMIC64 select GENERIC_IRQ_SHOW - select ARCH_NO_SYSDEV_OPS help The SuperH is a RISC processor targeted for use in embedded systems and consumer electronics; it was also used in the Sega Dreamcast diff --git a/trunk/arch/sh/configs/apsh4ad0a_defconfig b/trunk/arch/sh/configs/apsh4ad0a_defconfig index e71a531f1e31..77ec0e7b8ddf 100644 --- a/trunk/arch/sh/configs/apsh4ad0a_defconfig +++ b/trunk/arch/sh/configs/apsh4ad0a_defconfig @@ -48,7 +48,6 @@ CONFIG_PREEMPT=y CONFIG_BINFMT_MISC=y CONFIG_PM=y CONFIG_PM_DEBUG=y -CONFIG_PM_VERBOSE=y CONFIG_PM_RUNTIME=y CONFIG_CPU_IDLE=y CONFIG_NET=y diff --git a/trunk/arch/sh/configs/sdk7786_defconfig b/trunk/arch/sh/configs/sdk7786_defconfig index dc4a2eb6a616..c41650572d79 100644 --- a/trunk/arch/sh/configs/sdk7786_defconfig +++ b/trunk/arch/sh/configs/sdk7786_defconfig @@ -83,7 +83,6 @@ CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y CONFIG_BINFMT_MISC=y CONFIG_PM=y CONFIG_PM_DEBUG=y -CONFIG_PM_VERBOSE=y CONFIG_PM_RUNTIME=y CONFIG_CPU_IDLE=y CONFIG_NET=y diff --git a/trunk/arch/sh/kernel/cpu/sh4/sq.c b/trunk/arch/sh/kernel/cpu/sh4/sq.c index 14726eef1ce0..f0907995b4c9 100644 --- a/trunk/arch/sh/kernel/cpu/sh4/sq.c +++ b/trunk/arch/sh/kernel/cpu/sh4/sq.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include diff --git a/trunk/arch/sh/kernel/cpu/shmobile/pm_runtime.c b/trunk/arch/sh/kernel/cpu/shmobile/pm_runtime.c index 6dcb8166a64d..22db127afa7b 100644 --- a/trunk/arch/sh/kernel/cpu/shmobile/pm_runtime.c +++ b/trunk/arch/sh/kernel/cpu/shmobile/pm_runtime.c @@ -139,7 +139,7 @@ void platform_pm_runtime_suspend_idle(void) queue_work(pm_wq, &hwblk_work); } -int platform_pm_runtime_suspend(struct device *dev) +static int default_platform_runtime_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct pdev_archdata *ad = &pdev->archdata; @@ -147,7 +147,7 @@ int platform_pm_runtime_suspend(struct device *dev) int hwblk = ad->hwblk_id; int ret = 0; - dev_dbg(dev, "platform_pm_runtime_suspend() [%d]\n", hwblk); + dev_dbg(dev, "%s() [%d]\n", __func__, hwblk); /* ignore off-chip platform devices */ if (!hwblk) @@ -183,20 +183,20 @@ int platform_pm_runtime_suspend(struct device *dev) mutex_unlock(&ad->mutex); out: - dev_dbg(dev, "platform_pm_runtime_suspend() [%d] returns %d\n", - hwblk, ret); + dev_dbg(dev, "%s() [%d] returns %d\n", + __func__, hwblk, ret); return ret; } -int platform_pm_runtime_resume(struct device *dev) +static int default_platform_runtime_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct pdev_archdata *ad = &pdev->archdata; int hwblk = ad->hwblk_id; int ret = 0; - dev_dbg(dev, "platform_pm_runtime_resume() [%d]\n", hwblk); + dev_dbg(dev, "%s() [%d]\n", __func__, hwblk); /* ignore off-chip platform devices */ if (!hwblk) @@ -228,19 +228,19 @@ int platform_pm_runtime_resume(struct device *dev) */ mutex_unlock(&ad->mutex); out: - dev_dbg(dev, "platform_pm_runtime_resume() [%d] returns %d\n", - hwblk, ret); + dev_dbg(dev, "%s() [%d] returns %d\n", + __func__, hwblk, ret); return ret; } -int platform_pm_runtime_idle(struct device *dev) +static int default_platform_runtime_idle(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); int hwblk = pdev->archdata.hwblk_id; int ret = 0; - dev_dbg(dev, "platform_pm_runtime_idle() [%d]\n", hwblk); + dev_dbg(dev, "%s() [%d]\n", __func__, hwblk); /* ignore off-chip platform devices */ if (!hwblk) @@ -252,10 +252,19 @@ int platform_pm_runtime_idle(struct device *dev) /* suspend synchronously to disable clocks immediately */ ret = pm_runtime_suspend(dev); out: - dev_dbg(dev, "platform_pm_runtime_idle() [%d] done!\n", hwblk); + dev_dbg(dev, "%s() [%d] done!\n", __func__, hwblk); return ret; } +static struct dev_power_domain default_power_domain = { + .ops = { + .runtime_suspend = default_platform_runtime_suspend, + .runtime_resume = default_platform_runtime_resume, + .runtime_idle = default_platform_runtime_idle, + USE_PLATFORM_PM_SLEEP_OPS + }, +}; + static int platform_bus_notify(struct notifier_block *nb, unsigned long action, void *data) { @@ -276,6 +285,7 @@ static int platform_bus_notify(struct notifier_block *nb, hwblk_disable(hwblk_info, hwblk); /* make sure driver re-inits itself once */ __set_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags); + dev->pwr_domain = &default_power_domain; break; /* TODO: add BUS_NOTIFY_BIND_DRIVER and increase idle count */ case BUS_NOTIFY_BOUND_DRIVER: @@ -289,6 +299,7 @@ static int platform_bus_notify(struct notifier_block *nb, __set_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags); break; case BUS_NOTIFY_DEL_DEVICE: + dev->pwr_domain = NULL; break; } return 0; diff --git a/trunk/arch/sh/kernel/ptrace_32.c b/trunk/arch/sh/kernel/ptrace_32.c index 2130ca674e9b..3d7b209b2178 100644 --- a/trunk/arch/sh/kernel/ptrace_32.c +++ b/trunk/arch/sh/kernel/ptrace_32.c @@ -117,7 +117,11 @@ void user_enable_single_step(struct task_struct *child) set_tsk_thread_flag(child, TIF_SINGLESTEP); + if (ptrace_get_breakpoints(child) < 0) + return; + set_single_step(child, pc); + ptrace_put_breakpoints(child); } void user_disable_single_step(struct task_struct *child) diff --git a/trunk/arch/sh/kernel/smp.c b/trunk/arch/sh/kernel/smp.c index 509b36b45115..6207561ea34a 100644 --- a/trunk/arch/sh/kernel/smp.c +++ b/trunk/arch/sh/kernel/smp.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -323,6 +324,7 @@ void smp_message_recv(unsigned int msg) generic_smp_call_function_interrupt(); break; case SMP_MSG_RESCHEDULE: + scheduler_ipi(); break; case SMP_MSG_FUNCTION_SINGLE: generic_smp_call_function_single_interrupt(); diff --git a/trunk/arch/sh/kernel/traps_32.c b/trunk/arch/sh/kernel/traps_32.c index 3484c2f65aba..b51a17104b5f 100644 --- a/trunk/arch/sh/kernel/traps_32.c +++ b/trunk/arch/sh/kernel/traps_32.c @@ -87,7 +87,6 @@ void die(const char * str, struct pt_regs * regs, long err) bust_spinlocks(1); printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter); - sysfs_printk_last_file(); print_modules(); show_regs(regs); diff --git a/trunk/arch/sparc/include/asm/jump_label.h b/trunk/arch/sparc/include/asm/jump_label.h index 427d4684e0d2..fc73a82366f8 100644 --- a/trunk/arch/sparc/include/asm/jump_label.h +++ b/trunk/arch/sparc/include/asm/jump_label.h @@ -7,17 +7,20 @@ #define JUMP_LABEL_NOP_SIZE 4 -#define JUMP_LABEL(key, label) \ - do { \ - asm goto("1:\n\t" \ - "nop\n\t" \ - "nop\n\t" \ - ".pushsection __jump_table, \"a\"\n\t"\ - ".align 4\n\t" \ - ".word 1b, %l[" #label "], %c0\n\t" \ - ".popsection \n\t" \ - : : "i" (key) : : label);\ - } while (0) +static __always_inline bool arch_static_branch(struct jump_label_key *key) +{ + asm goto("1:\n\t" + "nop\n\t" + "nop\n\t" + ".pushsection __jump_table, \"aw\"\n\t" + ".align 4\n\t" + ".word 1b, %l[l_yes], %c0\n\t" + ".popsection \n\t" + : : "i" (key) : : l_yes); + return false; +l_yes: + return true; +} #endif /* __KERNEL__ */ diff --git a/trunk/arch/sparc/include/asm/topology_64.h b/trunk/arch/sparc/include/asm/topology_64.h index 1c79f32734a0..8b9c556d630b 100644 --- a/trunk/arch/sparc/include/asm/topology_64.h +++ b/trunk/arch/sparc/include/asm/topology_64.h @@ -65,6 +65,10 @@ static inline int pcibus_to_node(struct pci_bus *pbus) #define smt_capable() (sparc64_multi_core) #endif /* CONFIG_SMP */ -#define cpu_coregroup_mask(cpu) (&cpu_core_map[cpu]) +extern cpumask_t cpu_core_map[NR_CPUS]; +static inline const struct cpumask *cpu_coregroup_mask(int cpu) +{ + return &cpu_core_map[cpu]; +} #endif /* _ASM_SPARC64_TOPOLOGY_H */ diff --git a/trunk/arch/sparc/kernel/apc.c b/trunk/arch/sparc/kernel/apc.c index f679c57644d5..1e34f29e58bb 100644 --- a/trunk/arch/sparc/kernel/apc.c +++ b/trunk/arch/sparc/kernel/apc.c @@ -165,7 +165,7 @@ static int __devinit apc_probe(struct platform_device *op) return 0; } -static struct of_device_id __initdata apc_match[] = { +static struct of_device_id apc_match[] = { { .name = APC_OBPNAME, }, diff --git a/trunk/arch/sparc/kernel/pci_sabre.c b/trunk/arch/sparc/kernel/pci_sabre.c index 948068a083fc..d1840dbdaa2f 100644 --- a/trunk/arch/sparc/kernel/pci_sabre.c +++ b/trunk/arch/sparc/kernel/pci_sabre.c @@ -452,8 +452,10 @@ static void __devinit sabre_pbm_init(struct pci_pbm_info *pbm, sabre_scan_bus(pbm, &op->dev); } +static const struct of_device_id sabre_match[]; static int __devinit sabre_probe(struct platform_device *op) { + const struct of_device_id *match; const struct linux_prom64_registers *pr_regs; struct device_node *dp = op->dev.of_node; struct pci_pbm_info *pbm; @@ -463,7 +465,8 @@ static int __devinit sabre_probe(struct platform_device *op) const u32 *vdma; u64 clear_irq; - hummingbird_p = op->dev.of_match && (op->dev.of_match->data != NULL); + match = of_match_device(sabre_match, &op->dev); + hummingbird_p = match && (match->data != NULL); if (!hummingbird_p) { struct device_node *cpu_dp; diff --git a/trunk/arch/sparc/kernel/pci_schizo.c b/trunk/arch/sparc/kernel/pci_schizo.c index fecfcb2063c8..283fbc329a43 100644 --- a/trunk/arch/sparc/kernel/pci_schizo.c +++ b/trunk/arch/sparc/kernel/pci_schizo.c @@ -1458,11 +1458,15 @@ static int __devinit __schizo_init(struct platform_device *op, unsigned long chi return err; } +static const struct of_device_id schizo_match[]; static int __devinit schizo_probe(struct platform_device *op) { - if (!op->dev.of_match) + const struct of_device_id *match; + + match = of_match_device(schizo_match, &op->dev); + if (!match) return -EINVAL; - return __schizo_init(op, (unsigned long) op->dev.of_match->data); + return __schizo_init(op, (unsigned long)match->data); } /* The ordering of this table is very important. Some Tomatillo diff --git a/trunk/arch/sparc/kernel/pmc.c b/trunk/arch/sparc/kernel/pmc.c index 93d7b4465f8d..6a585d393580 100644 --- a/trunk/arch/sparc/kernel/pmc.c +++ b/trunk/arch/sparc/kernel/pmc.c @@ -69,7 +69,7 @@ static int __devinit pmc_probe(struct platform_device *op) return 0; } -static struct of_device_id __initdata pmc_match[] = { +static struct of_device_id pmc_match[] = { { .name = PMC_OBPNAME, }, diff --git a/trunk/arch/sparc/kernel/smp_32.c b/trunk/arch/sparc/kernel/smp_32.c index 91c10fb70858..442286d83435 100644 --- a/trunk/arch/sparc/kernel/smp_32.c +++ b/trunk/arch/sparc/kernel/smp_32.c @@ -53,6 +53,7 @@ cpumask_t smp_commenced_mask = CPU_MASK_NONE; void __cpuinit smp_store_cpu_info(int id) { int cpu_node; + int mid; cpu_data(id).udelay_val = loops_per_jiffy; @@ -60,10 +61,13 @@ void __cpuinit smp_store_cpu_info(int id) cpu_data(id).clock_tick = prom_getintdefault(cpu_node, "clock-frequency", 0); cpu_data(id).prom_node = cpu_node; - cpu_data(id).mid = cpu_get_hwmid(cpu_node); + mid = cpu_get_hwmid(cpu_node); - if (cpu_data(id).mid < 0) - panic("No MID found for CPU%d at node 0x%08d", id, cpu_node); + if (mid < 0) { + printk(KERN_NOTICE "No MID found for CPU%d at node 0x%08d", id, cpu_node); + mid = 0; + } + cpu_data(id).mid = mid; } void __init smp_cpus_done(unsigned int max_cpus) @@ -125,7 +129,9 @@ struct linux_prom_registers smp_penguin_ctable __cpuinitdata = { 0 }; void smp_send_reschedule(int cpu) { - /* See sparc64 */ + /* + * XXX missing reschedule IPI, see scheduler_ipi() + */ } void smp_send_stop(void) diff --git a/trunk/arch/sparc/kernel/smp_64.c b/trunk/arch/sparc/kernel/smp_64.c index 3e94a8c23238..9478da7fdb3e 100644 --- a/trunk/arch/sparc/kernel/smp_64.c +++ b/trunk/arch/sparc/kernel/smp_64.c @@ -1368,6 +1368,7 @@ void smp_send_reschedule(int cpu) void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs) { clear_softint(1 << irq); + scheduler_ipi(); } /* This is a nop because we capture all other cpus diff --git a/trunk/arch/sparc/kernel/time_32.c b/trunk/arch/sparc/kernel/time_32.c index 4e236391b635..96046a4024c2 100644 --- a/trunk/arch/sparc/kernel/time_32.c +++ b/trunk/arch/sparc/kernel/time_32.c @@ -168,7 +168,7 @@ static int __devinit clock_probe(struct platform_device *op) return 0; } -static struct of_device_id __initdata clock_match[] = { +static struct of_device_id clock_match[] = { { .name = "eeprom", }, diff --git a/trunk/arch/sparc/lib/checksum_32.S b/trunk/arch/sparc/lib/checksum_32.S index 3632cb34e914..0084c3361e15 100644 --- a/trunk/arch/sparc/lib/checksum_32.S +++ b/trunk/arch/sparc/lib/checksum_32.S @@ -289,10 +289,16 @@ cc_end_cruft: /* Also, handle the alignment code out of band. */ cc_dword_align: - cmp %g1, 6 - bl,a ccte + cmp %g1, 16 + bge 1f + srl %g1, 1, %o3 +2: cmp %o3, 0 + be,a ccte andcc %g1, 0xf, %o3 - andcc %o0, 0x1, %g0 + andcc %o3, %o0, %g0 ! Check %o0 only (%o1 has the same last 2 bits) + be,a 2b + srl %o3, 1, %o3 +1: andcc %o0, 0x1, %g0 bne ccslow andcc %o0, 0x2, %g0 be 1f diff --git a/trunk/arch/tile/kernel/smp.c b/trunk/arch/tile/kernel/smp.c index a4293102ef81..c52224d5ed45 100644 --- a/trunk/arch/tile/kernel/smp.c +++ b/trunk/arch/tile/kernel/smp.c @@ -189,12 +189,8 @@ void flush_icache_range(unsigned long start, unsigned long end) /* Called when smp_send_reschedule() triggers IRQ_RESCHEDULE. */ static irqreturn_t handle_reschedule_ipi(int irq, void *token) { - /* - * Nothing to do here; when we return from interrupt, the - * rescheduling will occur there. But do bump the interrupt - * profiler count in the meantime. - */ __get_cpu_var(irq_stat).irq_resched_count++; + scheduler_ipi(); return IRQ_HANDLED; } diff --git a/trunk/arch/um/Kconfig.um b/trunk/arch/um/Kconfig.um index 90a438acbfaf..b5e675e370c6 100644 --- a/trunk/arch/um/Kconfig.um +++ b/trunk/arch/um/Kconfig.um @@ -47,7 +47,7 @@ config HOSTFS config HPPFS tristate "HoneyPot ProcFS (EXPERIMENTAL)" - depends on EXPERIMENTAL + depends on EXPERIMENTAL && PROC_FS help hppfs (HoneyPot ProcFS) is a filesystem which allows UML /proc entries to be overridden, removed, or fabricated from the host. diff --git a/trunk/arch/um/Kconfig.x86 b/trunk/arch/um/Kconfig.x86 index 02fb017fed47..a9da516a5274 100644 --- a/trunk/arch/um/Kconfig.x86 +++ b/trunk/arch/um/Kconfig.x86 @@ -4,6 +4,10 @@ menu "UML-specific options" menu "Host processor type and features" +config CMPXCHG_LOCAL + bool + default n + source "arch/x86/Kconfig.cpu" endmenu diff --git a/trunk/arch/um/include/asm/bug.h b/trunk/arch/um/include/asm/bug.h new file mode 100644 index 000000000000..9e33b864c359 --- /dev/null +++ b/trunk/arch/um/include/asm/bug.h @@ -0,0 +1,6 @@ +#ifndef __UM_BUG_H +#define __UM_BUG_H + +#include + +#endif diff --git a/trunk/arch/um/include/asm/thread_info.h b/trunk/arch/um/include/asm/thread_info.h index e2cf786bda0a..5bd1bad33fab 100644 --- a/trunk/arch/um/include/asm/thread_info.h +++ b/trunk/arch/um/include/asm/thread_info.h @@ -49,7 +49,10 @@ static inline struct thread_info *current_thread_info(void) { struct thread_info *ti; unsigned long mask = THREAD_SIZE - 1; - ti = (struct thread_info *) (((unsigned long) &ti) & ~mask); + void *p; + + asm volatile ("" : "=r" (p) : "0" (&ti)); + ti = (struct thread_info *) (((unsigned long)p) & ~mask); return ti; } diff --git a/trunk/arch/um/kernel/smp.c b/trunk/arch/um/kernel/smp.c index 106bf27e2a9a..eefb107d2d73 100644 --- a/trunk/arch/um/kernel/smp.c +++ b/trunk/arch/um/kernel/smp.c @@ -173,7 +173,7 @@ void IPI_handler(int cpu) break; case 'R': - set_tsk_need_resched(current); + scheduler_ipi(); break; case 'S': diff --git a/trunk/arch/um/os-Linux/util.c b/trunk/arch/um/os-Linux/util.c index 6ea77979531c..42827cafa6af 100644 --- a/trunk/arch/um/os-Linux/util.c +++ b/trunk/arch/um/os-Linux/util.c @@ -5,6 +5,7 @@ #include #include +#include #include #include #include @@ -75,6 +76,26 @@ void setup_hostinfo(char *buf, int len) host.release, host.version, host.machine); } +/* + * We cannot use glibc's abort(). It makes use of tgkill() which + * has no effect within UML's kernel threads. + * After that glibc would execute an invalid instruction to kill + * the calling process and UML crashes with SIGSEGV. + */ +static inline void __attribute__ ((noreturn)) uml_abort(void) +{ + sigset_t sig; + + fflush(NULL); + + if (!sigemptyset(&sig) && !sigaddset(&sig, SIGABRT)) + sigprocmask(SIG_UNBLOCK, &sig, 0); + + for (;;) + if (kill(getpid(), SIGABRT) < 0) + exit(127); +} + void os_dump_core(void) { int pid; @@ -116,5 +137,5 @@ void os_dump_core(void) while ((pid = waitpid(-1, NULL, WNOHANG | __WALL)) > 0) os_kill_ptraced_process(pid, 0); - abort(); + uml_abort(); } diff --git a/trunk/arch/um/sys-i386/Makefile b/trunk/arch/um/sys-i386/Makefile index 804b28dd0328..b1da91c1b200 100644 --- a/trunk/arch/um/sys-i386/Makefile +++ b/trunk/arch/um/sys-i386/Makefile @@ -4,7 +4,7 @@ obj-y = bug.o bugs.o checksum.o delay.o fault.o ksyms.o ldt.o ptrace.o \ ptrace_user.o setjmp.o signal.o stub.o stub_segv.o syscalls.o sysrq.o \ - sys_call_table.o tls.o + sys_call_table.o tls.o atomic64_cx8_32.o obj-$(CONFIG_BINFMT_ELF) += elfcore.o diff --git a/trunk/arch/um/sys-i386/atomic64_cx8_32.S b/trunk/arch/um/sys-i386/atomic64_cx8_32.S new file mode 100644 index 000000000000..1e901d3d4a95 --- /dev/null +++ b/trunk/arch/um/sys-i386/atomic64_cx8_32.S @@ -0,0 +1,225 @@ +/* + * atomic64_t for 586+ + * + * Copied from arch/x86/lib/atomic64_cx8_32.S + * + * Copyright © 2010 Luca Barbieri + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#include +#include +#include + +.macro SAVE reg + pushl_cfi %\reg + CFI_REL_OFFSET \reg, 0 +.endm + +.macro RESTORE reg + popl_cfi %\reg + CFI_RESTORE \reg +.endm + +.macro read64 reg + movl %ebx, %eax + movl %ecx, %edx +/* we need LOCK_PREFIX since otherwise cmpxchg8b always does the write */ + LOCK_PREFIX + cmpxchg8b (\reg) +.endm + +ENTRY(atomic64_read_cx8) + CFI_STARTPROC + + read64 %ecx + ret + CFI_ENDPROC +ENDPROC(atomic64_read_cx8) + +ENTRY(atomic64_set_cx8) + CFI_STARTPROC + +1: +/* we don't need LOCK_PREFIX since aligned 64-bit writes + * are atomic on 586 and newer */ + cmpxchg8b (%esi) + jne 1b + + ret + CFI_ENDPROC +ENDPROC(atomic64_set_cx8) + +ENTRY(atomic64_xchg_cx8) + CFI_STARTPROC + + movl %ebx, %eax + movl %ecx, %edx +1: + LOCK_PREFIX + cmpxchg8b (%esi) + jne 1b + + ret + CFI_ENDPROC +ENDPROC(atomic64_xchg_cx8) + +.macro addsub_return func ins insc +ENTRY(atomic64_\func\()_return_cx8) + CFI_STARTPROC + SAVE ebp + SAVE ebx + SAVE esi + SAVE edi + + movl %eax, %esi + movl %edx, %edi + movl %ecx, %ebp + + read64 %ebp +1: + movl %eax, %ebx + movl %edx, %ecx + \ins\()l %esi, %ebx + \insc\()l %edi, %ecx + LOCK_PREFIX + cmpxchg8b (%ebp) + jne 1b + +10: + movl %ebx, %eax + movl %ecx, %edx + RESTORE edi + RESTORE esi + RESTORE ebx + RESTORE ebp + ret + CFI_ENDPROC +ENDPROC(atomic64_\func\()_return_cx8) +.endm + +addsub_return add add adc +addsub_return sub sub sbb + +.macro incdec_return func ins insc +ENTRY(atomic64_\func\()_return_cx8) + CFI_STARTPROC + SAVE ebx + + read64 %esi +1: + movl %eax, %ebx + movl %edx, %ecx + \ins\()l $1, %ebx + \insc\()l $0, %ecx + LOCK_PREFIX + cmpxchg8b (%esi) + jne 1b + +10: + movl %ebx, %eax + movl %ecx, %edx + RESTORE ebx + ret + CFI_ENDPROC +ENDPROC(atomic64_\func\()_return_cx8) +.endm + +incdec_return inc add adc +incdec_return dec sub sbb + +ENTRY(atomic64_dec_if_positive_cx8) + CFI_STARTPROC + SAVE ebx + + read64 %esi +1: + movl %eax, %ebx + movl %edx, %ecx + subl $1, %ebx + sbb $0, %ecx + js 2f + LOCK_PREFIX + cmpxchg8b (%esi) + jne 1b + +2: + movl %ebx, %eax + movl %ecx, %edx + RESTORE ebx + ret + CFI_ENDPROC +ENDPROC(atomic64_dec_if_positive_cx8) + +ENTRY(atomic64_add_unless_cx8) + CFI_STARTPROC + SAVE ebp + SAVE ebx +/* these just push these two parameters on the stack */ + SAVE edi + SAVE esi + + movl %ecx, %ebp + movl %eax, %esi + movl %edx, %edi + + read64 %ebp +1: + cmpl %eax, 0(%esp) + je 4f +2: + movl %eax, %ebx + movl %edx, %ecx + addl %esi, %ebx + adcl %edi, %ecx + LOCK_PREFIX + cmpxchg8b (%ebp) + jne 1b + + movl $1, %eax +3: + addl $8, %esp + CFI_ADJUST_CFA_OFFSET -8 + RESTORE ebx + RESTORE ebp + ret +4: + cmpl %edx, 4(%esp) + jne 2b + xorl %eax, %eax + jmp 3b + CFI_ENDPROC +ENDPROC(atomic64_add_unless_cx8) + +ENTRY(atomic64_inc_not_zero_cx8) + CFI_STARTPROC + SAVE ebx + + read64 %esi +1: + testl %eax, %eax + je 4f +2: + movl %eax, %ebx + movl %edx, %ecx + addl $1, %ebx + adcl $0, %ecx + LOCK_PREFIX + cmpxchg8b (%esi) + jne 1b + + movl $1, %eax +3: + RESTORE ebx + ret +4: + testl %edx, %edx + jne 2b + jmp 3b + CFI_ENDPROC +ENDPROC(atomic64_inc_not_zero_cx8) diff --git a/trunk/arch/unicore32/kernel/irq.c b/trunk/arch/unicore32/kernel/irq.c index 2aa30a364bbe..d4efa7d679ff 100644 --- a/trunk/arch/unicore32/kernel/irq.c +++ b/trunk/arch/unicore32/kernel/irq.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include #include @@ -237,7 +237,7 @@ static struct puv3_irq_state { unsigned int iccr; } puv3_irq_state; -static int puv3_irq_suspend(struct sys_device *dev, pm_message_t state) +static int puv3_irq_suspend(void) { struct puv3_irq_state *st = &puv3_irq_state; @@ -265,7 +265,7 @@ static int puv3_irq_suspend(struct sys_device *dev, pm_message_t state) return 0; } -static int puv3_irq_resume(struct sys_device *dev) +static void puv3_irq_resume(void) { struct puv3_irq_state *st = &puv3_irq_state; @@ -278,27 +278,20 @@ static int puv3_irq_resume(struct sys_device *dev) writel(st->icmr, INTC_ICMR); } - return 0; } -static struct sysdev_class puv3_irq_sysclass = { - .name = "pkunity-irq", +static struct syscore_ops puv3_irq_syscore_ops = { .suspend = puv3_irq_suspend, .resume = puv3_irq_resume, }; -static struct sys_device puv3_irq_device = { - .id = 0, - .cls = &puv3_irq_sysclass, -}; - -static int __init puv3_irq_init_devicefs(void) +static int __init puv3_irq_init_syscore(void) { - sysdev_class_register(&puv3_irq_sysclass); - return sysdev_register(&puv3_irq_device); + register_syscore_ops(&puv3_irq_syscore_ops); + return 0; } -device_initcall(puv3_irq_init_devicefs); +device_initcall(puv3_irq_init_syscore); void __init init_IRQ(void) { diff --git a/trunk/arch/unicore32/kernel/traps.c b/trunk/arch/unicore32/kernel/traps.c index 254e36fa9513..b9a26465e728 100644 --- a/trunk/arch/unicore32/kernel/traps.c +++ b/trunk/arch/unicore32/kernel/traps.c @@ -192,7 +192,6 @@ static int __die(const char *str, int err, struct thread_info *thread, printk(KERN_EMERG "Internal error: %s: %x [#%d]\n", str, err, ++die_counter); - sysfs_printk_last_file(); /* trap and error numbers are mostly meaningless on UniCore */ ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, \ diff --git a/trunk/arch/x86/Kconfig b/trunk/arch/x86/Kconfig index 2096cf180648..880fcb6c86f4 100644 --- a/trunk/arch/x86/Kconfig +++ b/trunk/arch/x86/Kconfig @@ -8,6 +8,7 @@ config 64BIT config X86_32 def_bool !64BIT + select CLKSRC_I8253 config X86_64 def_bool 64BIT @@ -71,7 +72,6 @@ config X86 select GENERIC_IRQ_SHOW select IRQ_FORCED_THREADING select USE_GENERIC_SMP_HELPERS if SMP - select ARCH_NO_SYSDEV_OPS select HAVE_BPF_JIT if (X86_64 && NET) config INSTRUCTION_DECODER @@ -113,7 +113,14 @@ config MMU def_bool y config ZONE_DMA - def_bool y + bool "DMA memory allocation support" if EXPERT + default y + help + DMA memory allocation support allows devices with less than 32-bit + addressing to allocate within the first 16MB of address space. + Disable if no such devices will be used. + + If unsure, say Y. config SBUS bool @@ -366,17 +373,6 @@ config X86_UV # Following is an alphabetically sorted list of 32 bit extended platforms # Please maintain the alphabetic order if and when there are additions -config X86_ELAN - bool "AMD Elan" - depends on X86_32 - depends on X86_EXTENDED_PLATFORM - ---help--- - Select this for an AMD Elan processor. - - Do not use this option for K6/Athlon/Opteron processors! - - If unsure, choose "PC-compatible" instead. - config X86_INTEL_CE bool "CE4100 TV platform" depends on PCI @@ -691,6 +687,7 @@ config AMD_IOMMU bool "AMD IOMMU support" select SWIOTLB select PCI_MSI + select PCI_IOV depends on X86_64 && PCI && ACPI ---help--- With this option you can enable support for AMD IOMMU hardware in @@ -1175,7 +1172,7 @@ comment "NUMA (Summit) requires SMP, 64GB highmem support, ACPI" config AMD_NUMA def_bool y prompt "Old style AMD Opteron NUMA detection" - depends on X86_64 && NUMA && PCI + depends on NUMA && PCI ---help--- Enable AMD NUMA node topology detection. You should say Y here if you have a multi processor AMD system. This uses an old method to @@ -1202,7 +1199,7 @@ config NODES_SPAN_OTHER_NODES config NUMA_EMU bool "NUMA emulation" - depends on X86_64 && NUMA + depends on NUMA ---help--- Enable NUMA emulation. A flat machine will be split into virtual nodes when booted with "numa=fake=N", where N is the @@ -1224,6 +1221,10 @@ config HAVE_ARCH_BOOTMEM def_bool y depends on X86_32 && NUMA +config HAVE_ARCH_ALLOC_REMAP + def_bool y + depends on X86_32 && NUMA + config ARCH_HAVE_MEMORY_PRESENT def_bool y depends on X86_32 && DISCONTIGMEM @@ -1232,13 +1233,9 @@ config NEED_NODE_MEMMAP_SIZE def_bool y depends on X86_32 && (DISCONTIGMEM || SPARSEMEM) -config HAVE_ARCH_ALLOC_REMAP - def_bool y - depends on X86_32 && NUMA - config ARCH_FLATMEM_ENABLE def_bool y - depends on X86_32 && ARCH_SELECT_MEMORY_MODEL && !NUMA + depends on X86_32 && !NUMA config ARCH_DISCONTIGMEM_ENABLE def_bool y @@ -1248,20 +1245,16 @@ config ARCH_DISCONTIGMEM_DEFAULT def_bool y depends on NUMA && X86_32 -config ARCH_PROC_KCORE_TEXT - def_bool y - depends on X86_64 && PROC_KCORE - -config ARCH_SPARSEMEM_DEFAULT - def_bool y - depends on X86_64 - config ARCH_SPARSEMEM_ENABLE def_bool y depends on X86_64 || NUMA || (EXPERIMENTAL && X86_32) || X86_32_NON_STANDARD select SPARSEMEM_STATIC if X86_32 select SPARSEMEM_VMEMMAP_ENABLE if X86_64 +config ARCH_SPARSEMEM_DEFAULT + def_bool y + depends on X86_64 + config ARCH_SELECT_MEMORY_MODEL def_bool y depends on ARCH_SPARSEMEM_ENABLE @@ -1270,6 +1263,10 @@ config ARCH_MEMORY_PROBE def_bool X86_64 depends on MEMORY_HOTPLUG +config ARCH_PROC_KCORE_TEXT + def_bool y + depends on X86_64 && PROC_KCORE + config ILLEGAL_POINTER_VALUE hex default 0 if X86_32 @@ -1704,10 +1701,6 @@ config ARCH_ENABLE_MEMORY_HOTREMOVE def_bool y depends on MEMORY_HOTPLUG -config HAVE_ARCH_EARLY_PFN_TO_NID - def_bool X86_64 - depends on NUMA - config USE_PERCPU_NUMA_NODE_ID def_bool y depends on NUMA @@ -1849,7 +1842,7 @@ config APM_ALLOW_INTS endif # APM -source "arch/x86/kernel/cpu/cpufreq/Kconfig" +source "drivers/cpufreq/Kconfig" source "drivers/cpuidle/Kconfig" @@ -2077,7 +2070,7 @@ config OLPC depends on !X86_PAE select GPIOLIB select OF - select OF_PROMTREE if PROC_DEVICETREE + select OF_PROMTREE ---help--- Add support for detecting the unique features of the OLPC XO hardware. diff --git a/trunk/arch/x86/Kconfig.cpu b/trunk/arch/x86/Kconfig.cpu index d161e939df62..6a7cfdf8ff69 100644 --- a/trunk/arch/x86/Kconfig.cpu +++ b/trunk/arch/x86/Kconfig.cpu @@ -1,6 +1,4 @@ # Put here option for CPU selection and depending optimization -if !X86_ELAN - choice prompt "Processor family" default M686 if X86_32 @@ -203,6 +201,14 @@ config MWINCHIP3D stores for this CPU, which can increase performance of some operations. +config MELAN + bool "AMD Elan" + depends on X86_32 + ---help--- + Select this for an AMD Elan processor. + + Do not use this option for K6/Athlon/Opteron processors! + config MGEODEGX1 bool "GeodeGX1" depends on X86_32 @@ -292,8 +298,6 @@ config X86_GENERIC This is really intended for distributors who need more generic optimizations. -endif - # # Define implied options from the CPU selection here config X86_INTERNODE_CACHE_SHIFT @@ -312,7 +316,7 @@ config X86_L1_CACHE_SHIFT int default "7" if MPENTIUM4 || MPSC default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU - default "4" if X86_ELAN || M486 || M386 || MGEODEGX1 + default "4" if MELAN || M486 || M386 || MGEODEGX1 default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX config X86_XADD @@ -358,7 +362,7 @@ config X86_POPAD_OK config X86_ALIGNMENT_16 def_bool y - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1 config X86_INTEL_USERCOPY def_bool y diff --git a/trunk/arch/x86/Makefile_32.cpu b/trunk/arch/x86/Makefile_32.cpu index f2ee1abb1df9..86cee7b749e1 100644 --- a/trunk/arch/x86/Makefile_32.cpu +++ b/trunk/arch/x86/Makefile_32.cpu @@ -37,7 +37,7 @@ cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom,$(call cc-option,-march= $(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic)) # AMD Elan support -cflags-$(CONFIG_X86_ELAN) += -march=i486 +cflags-$(CONFIG_MELAN) += -march=i486 # Geode GX1 support cflags-$(CONFIG_MGEODEGX1) += -march=pentium-mmx diff --git a/trunk/arch/x86/boot/memory.c b/trunk/arch/x86/boot/memory.c index cae3feb1035e..db75d07c3645 100644 --- a/trunk/arch/x86/boot/memory.c +++ b/trunk/arch/x86/boot/memory.c @@ -91,7 +91,7 @@ static int detect_memory_e801(void) if (oreg.ax > 15*1024) { return -1; /* Bogus! */ } else if (oreg.ax == 15*1024) { - boot_params.alt_mem_k = (oreg.dx << 6) + oreg.ax; + boot_params.alt_mem_k = (oreg.bx << 6) + oreg.ax; } else { /* * This ignores memory above 16MB if we have a memory diff --git a/trunk/arch/x86/include/asm/acpi.h b/trunk/arch/x86/include/asm/acpi.h index 12e0e7dd869c..416d865eae39 100644 --- a/trunk/arch/x86/include/asm/acpi.h +++ b/trunk/arch/x86/include/asm/acpi.h @@ -183,8 +183,6 @@ static inline void disable_acpi(void) { } #define ARCH_HAS_POWER_INIT 1 -struct bootnode; - #ifdef CONFIG_ACPI_NUMA extern int acpi_numa; extern int x86_acpi_numa_init(void); diff --git a/trunk/arch/x86/include/asm/alternative-asm.h b/trunk/arch/x86/include/asm/alternative-asm.h index a63a68be1cce..94d420b360d1 100644 --- a/trunk/arch/x86/include/asm/alternative-asm.h +++ b/trunk/arch/x86/include/asm/alternative-asm.h @@ -15,4 +15,13 @@ .endm #endif +.macro altinstruction_entry orig alt feature orig_len alt_len + .align 8 + .quad \orig + .quad \alt + .word \feature + .byte \orig_len + .byte \alt_len +.endm + #endif /* __ASSEMBLY__ */ diff --git a/trunk/arch/x86/include/asm/alternative.h b/trunk/arch/x86/include/asm/alternative.h index 13009d1af99a..bf535f947e8c 100644 --- a/trunk/arch/x86/include/asm/alternative.h +++ b/trunk/arch/x86/include/asm/alternative.h @@ -4,7 +4,6 @@ #include #include #include -#include #include /* @@ -191,12 +190,4 @@ extern void *text_poke(void *addr, const void *opcode, size_t len); extern void *text_poke_smp(void *addr, const void *opcode, size_t len); extern void text_poke_smp_batch(struct text_poke_param *params, int n); -#if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL) -#define IDEAL_NOP_SIZE_5 5 -extern unsigned char ideal_nop5[IDEAL_NOP_SIZE_5]; -extern void arch_init_ideal_nop5(void); -#else -static inline void arch_init_ideal_nop5(void) {} -#endif - #endif /* _ASM_X86_ALTERNATIVE_H */ diff --git a/trunk/arch/x86/include/asm/amd_iommu_proto.h b/trunk/arch/x86/include/asm/amd_iommu_proto.h index 916bc8111a01..55d95eb789b3 100644 --- a/trunk/arch/x86/include/asm/amd_iommu_proto.h +++ b/trunk/arch/x86/include/asm/amd_iommu_proto.h @@ -19,13 +19,12 @@ #ifndef _ASM_X86_AMD_IOMMU_PROTO_H #define _ASM_X86_AMD_IOMMU_PROTO_H -struct amd_iommu; +#include extern int amd_iommu_init_dma_ops(void); extern int amd_iommu_init_passthrough(void); +extern irqreturn_t amd_iommu_int_thread(int irq, void *data); extern irqreturn_t amd_iommu_int_handler(int irq, void *data); -extern void amd_iommu_flush_all_domains(void); -extern void amd_iommu_flush_all_devices(void); extern void amd_iommu_apply_erratum_63(u16 devid); extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu); extern int amd_iommu_init_devices(void); @@ -44,4 +43,12 @@ static inline bool is_rd890_iommu(struct pci_dev *pdev) (pdev->device == PCI_DEVICE_ID_RD890_IOMMU); } +static inline bool iommu_feature(struct amd_iommu *iommu, u64 f) +{ + if (!(iommu->cap & (1 << IOMMU_CAP_EFR))) + return false; + + return !!(iommu->features & f); +} + #endif /* _ASM_X86_AMD_IOMMU_PROTO_H */ diff --git a/trunk/arch/x86/include/asm/amd_iommu_types.h b/trunk/arch/x86/include/asm/amd_iommu_types.h index e3509fc303bf..4c9982995414 100644 --- a/trunk/arch/x86/include/asm/amd_iommu_types.h +++ b/trunk/arch/x86/include/asm/amd_iommu_types.h @@ -68,12 +68,25 @@ #define MMIO_CONTROL_OFFSET 0x0018 #define MMIO_EXCL_BASE_OFFSET 0x0020 #define MMIO_EXCL_LIMIT_OFFSET 0x0028 +#define MMIO_EXT_FEATURES 0x0030 #define MMIO_CMD_HEAD_OFFSET 0x2000 #define MMIO_CMD_TAIL_OFFSET 0x2008 #define MMIO_EVT_HEAD_OFFSET 0x2010 #define MMIO_EVT_TAIL_OFFSET 0x2018 #define MMIO_STATUS_OFFSET 0x2020 + +/* Extended Feature Bits */ +#define FEATURE_PREFETCH (1ULL<<0) +#define FEATURE_PPR (1ULL<<1) +#define FEATURE_X2APIC (1ULL<<2) +#define FEATURE_NX (1ULL<<3) +#define FEATURE_GT (1ULL<<4) +#define FEATURE_IA (1ULL<<6) +#define FEATURE_GA (1ULL<<7) +#define FEATURE_HE (1ULL<<8) +#define FEATURE_PC (1ULL<<9) + /* MMIO status bits */ #define MMIO_STATUS_COM_WAIT_INT_MASK 0x04 @@ -113,7 +126,9 @@ /* command specific defines */ #define CMD_COMPL_WAIT 0x01 #define CMD_INV_DEV_ENTRY 0x02 -#define CMD_INV_IOMMU_PAGES 0x03 +#define CMD_INV_IOMMU_PAGES 0x03 +#define CMD_INV_IOTLB_PAGES 0x04 +#define CMD_INV_ALL 0x08 #define CMD_COMPL_WAIT_STORE_MASK 0x01 #define CMD_COMPL_WAIT_INT_MASK 0x02 @@ -215,6 +230,8 @@ #define IOMMU_PTE_IR (1ULL << 61) #define IOMMU_PTE_IW (1ULL << 62) +#define DTE_FLAG_IOTLB 0x01 + #define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL) #define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P) #define IOMMU_PTE_PAGE(pte) (phys_to_virt((pte) & IOMMU_PAGE_MASK)) @@ -227,6 +244,7 @@ /* IOMMU capabilities */ #define IOMMU_CAP_IOTLB 24 #define IOMMU_CAP_NPCACHE 26 +#define IOMMU_CAP_EFR 27 #define MAX_DOMAIN_ID 65536 @@ -249,6 +267,8 @@ extern bool amd_iommu_dump; /* global flag if IOMMUs cache non-present entries */ extern bool amd_iommu_np_cache; +/* Only true if all IOMMUs support device IOTLBs */ +extern bool amd_iommu_iotlb_sup; /* * Make iterating over all IOMMUs easier @@ -371,6 +391,9 @@ struct amd_iommu { /* flags read from acpi table */ u8 acpi_flags; + /* Extended features */ + u64 features; + /* * Capability pointer. There could be more than one IOMMU per PCI * device function if there are more than one AMD IOMMU capability @@ -409,9 +432,6 @@ struct amd_iommu { /* if one, we need to send a completion wait command */ bool need_sync; - /* becomes true if a command buffer reset is running */ - bool reset_in_progress; - /* default dma_ops domain for that IOMMU */ struct dma_ops_domain *default_dom; diff --git a/trunk/arch/x86/include/asm/amd_nb.h b/trunk/arch/x86/include/asm/amd_nb.h index 331682231bb4..67f87f257611 100644 --- a/trunk/arch/x86/include/asm/amd_nb.h +++ b/trunk/arch/x86/include/asm/amd_nb.h @@ -11,7 +11,6 @@ struct amd_nb_bus_dev_range { extern const struct pci_device_id amd_nb_misc_ids[]; extern const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[]; -struct bootnode; extern bool early_is_amd_nb(u32 value); extern int amd_cache_northbridges(void); diff --git a/trunk/arch/x86/include/asm/apic.h b/trunk/arch/x86/include/asm/apic.h index 2b7d573be549..a0c46f061210 100644 --- a/trunk/arch/x86/include/asm/apic.h +++ b/trunk/arch/x86/include/asm/apic.h @@ -363,7 +363,12 @@ struct apic { */ int (*x86_32_early_logical_apicid)(int cpu); - /* determine CPU -> NUMA node mapping */ + /* + * Optional method called from setup_local_APIC() after logical + * apicid is guaranteed to be known to initialize apicid -> node + * mapping if NUMA initialization hasn't done so already. Don't + * add new users. + */ int (*x86_32_numa_cpu_node)(int cpu); #endif }; @@ -537,8 +542,6 @@ static inline int default_phys_pkg_id(int cpuid_apic, int index_msb) return cpuid_apic >> index_msb; } -extern int default_x86_32_numa_cpu_node(int cpu); - #endif static inline unsigned int diff --git a/trunk/arch/x86/include/asm/apicdef.h b/trunk/arch/x86/include/asm/apicdef.h index d87988bacf3e..34595d5e1038 100644 --- a/trunk/arch/x86/include/asm/apicdef.h +++ b/trunk/arch/x86/include/asm/apicdef.h @@ -78,6 +78,7 @@ #define APIC_DEST_LOGICAL 0x00800 #define APIC_DEST_PHYSICAL 0x00000 #define APIC_DM_FIXED 0x00000 +#define APIC_DM_FIXED_MASK 0x00700 #define APIC_DM_LOWEST 0x00100 #define APIC_DM_SMI 0x00200 #define APIC_DM_REMRD 0x00300 diff --git a/trunk/arch/x86/include/asm/bios_ebda.h b/trunk/arch/x86/include/asm/bios_ebda.h index 3c7521063d3f..aa6a3170ab5a 100644 --- a/trunk/arch/x86/include/asm/bios_ebda.h +++ b/trunk/arch/x86/include/asm/bios_ebda.h @@ -4,16 +4,40 @@ #include /* - * there is a real-mode segmented pointer pointing to the - * 4K EBDA area at 0x40E. + * Returns physical address of EBDA. Returns 0 if there is no EBDA. */ static inline unsigned int get_bios_ebda(void) { + /* + * There is a real-mode segmented pointer pointing to the + * 4K EBDA area at 0x40E. + */ unsigned int address = *(unsigned short *)phys_to_virt(0x40E); address <<= 4; return address; /* 0 means none */ } +/* + * Return the sanitized length of the EBDA in bytes, if it exists. + */ +static inline unsigned int get_bios_ebda_length(void) +{ + unsigned int address; + unsigned int length; + + address = get_bios_ebda(); + if (!address) + return 0; + + /* EBDA length is byte 0 of the EBDA (stored in KiB) */ + length = *(unsigned char *)phys_to_virt(address); + length <<= 10; + + /* Trim the length if it extends beyond 640KiB */ + length = min_t(unsigned int, (640 * 1024) - address, length); + return length; +} + void reserve_ebda_region(void); #ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION diff --git a/trunk/arch/x86/include/asm/cpufeature.h b/trunk/arch/x86/include/asm/cpufeature.h index 91f3e087cf21..5dc6acc98dbd 100644 --- a/trunk/arch/x86/include/asm/cpufeature.h +++ b/trunk/arch/x86/include/asm/cpufeature.h @@ -195,6 +195,8 @@ /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ #define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/ +#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */ +#define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */ #if defined(__KERNEL__) && !defined(__ASSEMBLY__) @@ -207,8 +209,7 @@ extern const char * const x86_power_flags[32]; #define test_cpu_cap(c, bit) \ test_bit(bit, (unsigned long *)((c)->x86_capability)) -#define cpu_has(c, bit) \ - (__builtin_constant_p(bit) && \ +#define REQUIRED_MASK_BIT_SET(bit) \ ( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0)) || \ (((bit)>>5)==1 && (1UL<<((bit)&31) & REQUIRED_MASK1)) || \ (((bit)>>5)==2 && (1UL<<((bit)&31) & REQUIRED_MASK2)) || \ @@ -218,10 +219,16 @@ extern const char * const x86_power_flags[32]; (((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \ (((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) || \ (((bit)>>5)==8 && (1UL<<((bit)&31) & REQUIRED_MASK8)) || \ - (((bit)>>5)==9 && (1UL<<((bit)&31) & REQUIRED_MASK9)) ) \ - ? 1 : \ + (((bit)>>5)==9 && (1UL<<((bit)&31) & REQUIRED_MASK9)) ) + +#define cpu_has(c, bit) \ + (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \ test_cpu_cap(c, bit)) +#define this_cpu_has(bit) \ + (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \ + x86_this_cpu_test_bit(bit, (unsigned long *)&cpu_info.x86_capability)) + #define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit) #define set_cpu_cap(c, bit) set_bit(bit, (unsigned long *)((c)->x86_capability)) diff --git a/trunk/arch/x86/include/asm/dma.h b/trunk/arch/x86/include/asm/dma.h index 057099e5faba..0bdb0c54d9a1 100644 --- a/trunk/arch/x86/include/asm/dma.h +++ b/trunk/arch/x86/include/asm/dma.h @@ -69,22 +69,18 @@ #define MAX_DMA_CHANNELS 8 -#ifdef CONFIG_X86_32 - -/* The maximum address that we can perform a DMA transfer to on this platform */ -#define MAX_DMA_ADDRESS (PAGE_OFFSET + 0x1000000) - -#else - /* 16MB ISA DMA zone */ #define MAX_DMA_PFN ((16 * 1024 * 1024) >> PAGE_SHIFT) /* 4GB broken PCI/AGP hardware bus master zone */ #define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT) +#ifdef CONFIG_X86_32 +/* The maximum address that we can perform a DMA transfer to on this platform */ +#define MAX_DMA_ADDRESS (PAGE_OFFSET + 0x1000000) +#else /* Compat define for old dma zone */ #define MAX_DMA_ADDRESS ((unsigned long)__va(MAX_DMA_PFN << PAGE_SHIFT)) - #endif /* 8237 DMA controllers */ diff --git a/trunk/arch/x86/include/asm/efi.h b/trunk/arch/x86/include/asm/efi.h index 8e4a16508d4e..7093e4a6a0bc 100644 --- a/trunk/arch/x86/include/asm/efi.h +++ b/trunk/arch/x86/include/asm/efi.h @@ -90,6 +90,7 @@ extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size, #endif /* CONFIG_X86_32 */ extern int add_efi_memmap; +extern void efi_set_executable(efi_memory_desc_t *md, bool executable); extern void efi_memblock_x86_reserve_range(void); extern void efi_call_phys_prelog(void); extern void efi_call_phys_epilog(void); diff --git a/trunk/arch/x86/include/asm/ftrace.h b/trunk/arch/x86/include/asm/ftrace.h index db24c2278be0..268c783ab1c0 100644 --- a/trunk/arch/x86/include/asm/ftrace.h +++ b/trunk/arch/x86/include/asm/ftrace.h @@ -38,11 +38,10 @@ extern void mcount(void); static inline unsigned long ftrace_call_adjust(unsigned long addr) { /* - * call mcount is "e8 <4 byte offset>" - * The addr points to the 4 byte offset and the caller of this - * function wants the pointer to e8. Simply subtract one. + * addr is the address of the mcount call instruction. + * recordmcount does the necessary offset calculation. */ - return addr - 1; + return addr; } #ifdef CONFIG_DYNAMIC_FTRACE diff --git a/trunk/arch/x86/include/asm/gart.h b/trunk/arch/x86/include/asm/gart.h index 43085bfc99c3..156cd5d18d2a 100644 --- a/trunk/arch/x86/include/asm/gart.h +++ b/trunk/arch/x86/include/asm/gart.h @@ -66,7 +66,7 @@ static inline void gart_set_size_and_enable(struct pci_dev *dev, u32 order) * Don't enable translation but enable GART IO and CPU accesses. * Also, set DISTLBWALKPRB since GART tables memory is UC. */ - ctl = DISTLBWALKPRB | order << 1; + ctl = order << 1; pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl); } @@ -75,17 +75,17 @@ static inline void enable_gart_translation(struct pci_dev *dev, u64 addr) { u32 tmp, ctl; - /* address of the mappings table */ - addr >>= 12; - tmp = (u32) addr<<4; - tmp &= ~0xf; - pci_write_config_dword(dev, AMD64_GARTTABLEBASE, tmp); - - /* Enable GART translation for this hammer. */ - pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl); - ctl |= GARTEN; - ctl &= ~(DISGARTCPU | DISGARTIO); - pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl); + /* address of the mappings table */ + addr >>= 12; + tmp = (u32) addr<<4; + tmp &= ~0xf; + pci_write_config_dword(dev, AMD64_GARTTABLEBASE, tmp); + + /* Enable GART translation for this hammer. */ + pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl); + ctl |= GARTEN | DISTLBWALKPRB; + ctl &= ~(DISGARTCPU | DISGARTIO); + pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl); } static inline int aperture_valid(u64 aper_base, u32 aper_size, u32 min_size) diff --git a/trunk/arch/x86/include/asm/i8253.h b/trunk/arch/x86/include/asm/i8253.h index fc1f579fb965..65aaa91d5850 100644 --- a/trunk/arch/x86/include/asm/i8253.h +++ b/trunk/arch/x86/include/asm/i8253.h @@ -6,6 +6,8 @@ #define PIT_CH0 0x40 #define PIT_CH2 0x42 +#define PIT_LATCH LATCH + extern raw_spinlock_t i8253_lock; extern struct clock_event_device *global_clock_event; diff --git a/trunk/arch/x86/include/asm/io_apic.h b/trunk/arch/x86/include/asm/io_apic.h index c4bd267dfc50..a97a240f67f3 100644 --- a/trunk/arch/x86/include/asm/io_apic.h +++ b/trunk/arch/x86/include/asm/io_apic.h @@ -150,7 +150,7 @@ void setup_IO_APIC_irq_extra(u32 gsi); extern void ioapic_and_gsi_init(void); extern void ioapic_insert_resources(void); -int io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr); +int io_apic_setup_irq_pin_once(unsigned int irq, int node, struct io_apic_irq_attr *attr); extern struct IO_APIC_route_entry **alloc_ioapic_entries(void); extern void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries); diff --git a/trunk/arch/x86/include/asm/jump_label.h b/trunk/arch/x86/include/asm/jump_label.h index 574dbc22893a..a32b18ce6ead 100644 --- a/trunk/arch/x86/include/asm/jump_label.h +++ b/trunk/arch/x86/include/asm/jump_label.h @@ -5,20 +5,25 @@ #include #include +#include #define JUMP_LABEL_NOP_SIZE 5 -# define JUMP_LABEL_INITIAL_NOP ".byte 0xe9 \n\t .long 0\n\t" - -# define JUMP_LABEL(key, label) \ - do { \ - asm goto("1:" \ - JUMP_LABEL_INITIAL_NOP \ - ".pushsection __jump_table, \"aw\" \n\t"\ - _ASM_PTR "1b, %l[" #label "], %c0 \n\t" \ - ".popsection \n\t" \ - : : "i" (key) : : label); \ - } while (0) +#define JUMP_LABEL_INITIAL_NOP ".byte 0xe9 \n\t .long 0\n\t" + +static __always_inline bool arch_static_branch(struct jump_label_key *key) +{ + asm goto("1:" + JUMP_LABEL_INITIAL_NOP + ".pushsection __jump_table, \"aw\" \n\t" + _ASM_ALIGN "\n\t" + _ASM_PTR "1b, %l[l_yes], %c0 \n\t" + ".popsection \n\t" + : : "i" (key) : : l_yes); + return false; +l_yes: + return true; +} #endif /* __KERNEL__ */ diff --git a/trunk/arch/x86/include/asm/mce.h b/trunk/arch/x86/include/asm/mce.h index eb16e94ae04f..021979a6e23f 100644 --- a/trunk/arch/x86/include/asm/mce.h +++ b/trunk/arch/x86/include/asm/mce.h @@ -142,8 +142,6 @@ static inline void winchip_mcheck_init(struct cpuinfo_x86 *c) {} static inline void enable_p5_mce(void) {} #endif -extern void (*x86_mce_decode_callback)(struct mce *m); - void mce_setup(struct mce *m); void mce_log(struct mce *m); DECLARE_PER_CPU(struct sys_device, mce_dev); diff --git a/trunk/arch/x86/include/asm/mmzone_32.h b/trunk/arch/x86/include/asm/mmzone_32.h index 91df7c51806c..5e83a416eca8 100644 --- a/trunk/arch/x86/include/asm/mmzone_32.h +++ b/trunk/arch/x86/include/asm/mmzone_32.h @@ -13,31 +13,11 @@ extern struct pglist_data *node_data[]; #define NODE_DATA(nid) (node_data[nid]) #include -/* summit or generic arch */ -#include - -extern int get_memcfg_numa_flat(void); -/* - * This allows any one NUMA architecture to be compiled - * for, and still fall back to the flat function if it - * fails. - */ -static inline void get_memcfg_numa(void) -{ - - if (get_memcfg_numaq()) - return; - if (get_memcfg_from_srat()) - return; - get_memcfg_numa_flat(); -} extern void resume_map_numa_kva(pgd_t *pgd); #else /* !CONFIG_NUMA */ -#define get_memcfg_numa get_memcfg_numa_flat - static inline void resume_map_numa_kva(pgd_t *pgd) {} #endif /* CONFIG_NUMA */ diff --git a/trunk/arch/x86/include/asm/mmzone_64.h b/trunk/arch/x86/include/asm/mmzone_64.h index 288b96f815a6..b3f88d7867c7 100644 --- a/trunk/arch/x86/include/asm/mmzone_64.h +++ b/trunk/arch/x86/include/asm/mmzone_64.h @@ -4,36 +4,13 @@ #ifndef _ASM_X86_MMZONE_64_H #define _ASM_X86_MMZONE_64_H - #ifdef CONFIG_NUMA #include - #include -/* Simple perfect hash to map physical addresses to node numbers */ -struct memnode { - int shift; - unsigned int mapsize; - s16 *map; - s16 embedded_map[64 - 8]; -} ____cacheline_aligned; /* total size = 128 bytes */ -extern struct memnode memnode; -#define memnode_shift memnode.shift -#define memnodemap memnode.map -#define memnodemapsize memnode.mapsize - extern struct pglist_data *node_data[]; -static inline __attribute__((pure)) int phys_to_nid(unsigned long addr) -{ - unsigned nid; - VIRTUAL_BUG_ON(!memnodemap); - nid = memnodemap[addr >> memnode_shift]; - VIRTUAL_BUG_ON(nid >= MAX_NUMNODES || !node_data[nid]); - return nid; -} - #define NODE_DATA(nid) (node_data[nid]) #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) diff --git a/trunk/arch/x86/include/asm/module.h b/trunk/arch/x86/include/asm/module.h index 67763c5d8b4e..9eae7752ae9b 100644 --- a/trunk/arch/x86/include/asm/module.h +++ b/trunk/arch/x86/include/asm/module.h @@ -35,7 +35,7 @@ #define MODULE_PROC_FAMILY "K7 " #elif defined CONFIG_MK8 #define MODULE_PROC_FAMILY "K8 " -#elif defined CONFIG_X86_ELAN +#elif defined CONFIG_MELAN #define MODULE_PROC_FAMILY "ELAN " #elif defined CONFIG_MCRUSOE #define MODULE_PROC_FAMILY "CRUSOE " diff --git a/trunk/arch/x86/include/asm/msr-index.h b/trunk/arch/x86/include/asm/msr-index.h index fd5a1f365c95..3cce71413d0b 100644 --- a/trunk/arch/x86/include/asm/msr-index.h +++ b/trunk/arch/x86/include/asm/msr-index.h @@ -96,11 +96,15 @@ #define MSR_IA32_MC0_ADDR 0x00000402 #define MSR_IA32_MC0_MISC 0x00000403 +#define MSR_AMD64_MC0_MASK 0xc0010044 + #define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x)) #define MSR_IA32_MCx_STATUS(x) (MSR_IA32_MC0_STATUS + 4*(x)) #define MSR_IA32_MCx_ADDR(x) (MSR_IA32_MC0_ADDR + 4*(x)) #define MSR_IA32_MCx_MISC(x) (MSR_IA32_MC0_MISC + 4*(x)) +#define MSR_AMD64_MCx_MASK(x) (MSR_AMD64_MC0_MASK + (x)) + /* These are consecutive and not in the normal 4er MCE bank block */ #define MSR_IA32_MC0_CTL2 0x00000280 #define MSR_IA32_MCx_CTL2(x) (MSR_IA32_MC0_CTL2 + (x)) diff --git a/trunk/arch/x86/include/asm/nops.h b/trunk/arch/x86/include/asm/nops.h index af788496020b..405b4032a60b 100644 --- a/trunk/arch/x86/include/asm/nops.h +++ b/trunk/arch/x86/include/asm/nops.h @@ -1,7 +1,13 @@ #ifndef _ASM_X86_NOPS_H #define _ASM_X86_NOPS_H -/* Define nops for use with alternative() */ +/* + * Define nops for use with alternative() and for tracing. + * + * *_NOP5_ATOMIC must be a single instruction. + */ + +#define NOP_DS_PREFIX 0x3e /* generic versions from gas 1: nop @@ -13,14 +19,15 @@ 6: leal 0x00000000(%esi),%esi 7: leal 0x00000000(,%esi,1),%esi */ -#define GENERIC_NOP1 ".byte 0x90\n" -#define GENERIC_NOP2 ".byte 0x89,0xf6\n" -#define GENERIC_NOP3 ".byte 0x8d,0x76,0x00\n" -#define GENERIC_NOP4 ".byte 0x8d,0x74,0x26,0x00\n" -#define GENERIC_NOP5 GENERIC_NOP1 GENERIC_NOP4 -#define GENERIC_NOP6 ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n" -#define GENERIC_NOP7 ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n" -#define GENERIC_NOP8 GENERIC_NOP1 GENERIC_NOP7 +#define GENERIC_NOP1 0x90 +#define GENERIC_NOP2 0x89,0xf6 +#define GENERIC_NOP3 0x8d,0x76,0x00 +#define GENERIC_NOP4 0x8d,0x74,0x26,0x00 +#define GENERIC_NOP5 GENERIC_NOP1,GENERIC_NOP4 +#define GENERIC_NOP6 0x8d,0xb6,0x00,0x00,0x00,0x00 +#define GENERIC_NOP7 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00 +#define GENERIC_NOP8 GENERIC_NOP1,GENERIC_NOP7 +#define GENERIC_NOP5_ATOMIC NOP_DS_PREFIX,GENERIC_NOP4 /* Opteron 64bit nops 1: nop @@ -29,13 +36,14 @@ 4: osp osp osp nop */ #define K8_NOP1 GENERIC_NOP1 -#define K8_NOP2 ".byte 0x66,0x90\n" -#define K8_NOP3 ".byte 0x66,0x66,0x90\n" -#define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n" -#define K8_NOP5 K8_NOP3 K8_NOP2 -#define K8_NOP6 K8_NOP3 K8_NOP3 -#define K8_NOP7 K8_NOP4 K8_NOP3 -#define K8_NOP8 K8_NOP4 K8_NOP4 +#define K8_NOP2 0x66,K8_NOP1 +#define K8_NOP3 0x66,K8_NOP2 +#define K8_NOP4 0x66,K8_NOP3 +#define K8_NOP5 K8_NOP3,K8_NOP2 +#define K8_NOP6 K8_NOP3,K8_NOP3 +#define K8_NOP7 K8_NOP4,K8_NOP3 +#define K8_NOP8 K8_NOP4,K8_NOP4 +#define K8_NOP5_ATOMIC 0x66,K8_NOP4 /* K7 nops uses eax dependencies (arbitrary choice) @@ -47,13 +55,14 @@ 7: leal 0x00000000(,%eax,1),%eax */ #define K7_NOP1 GENERIC_NOP1 -#define K7_NOP2 ".byte 0x8b,0xc0\n" -#define K7_NOP3 ".byte 0x8d,0x04,0x20\n" -#define K7_NOP4 ".byte 0x8d,0x44,0x20,0x00\n" -#define K7_NOP5 K7_NOP4 ASM_NOP1 -#define K7_NOP6 ".byte 0x8d,0x80,0,0,0,0\n" -#define K7_NOP7 ".byte 0x8D,0x04,0x05,0,0,0,0\n" -#define K7_NOP8 K7_NOP7 ASM_NOP1 +#define K7_NOP2 0x8b,0xc0 +#define K7_NOP3 0x8d,0x04,0x20 +#define K7_NOP4 0x8d,0x44,0x20,0x00 +#define K7_NOP5 K7_NOP4,K7_NOP1 +#define K7_NOP6 0x8d,0x80,0,0,0,0 +#define K7_NOP7 0x8D,0x04,0x05,0,0,0,0 +#define K7_NOP8 K7_NOP7,K7_NOP1 +#define K7_NOP5_ATOMIC NOP_DS_PREFIX,K7_NOP4 /* P6 nops uses eax dependencies (Intel-recommended choice) @@ -69,52 +78,65 @@ There is kernel code that depends on this. */ #define P6_NOP1 GENERIC_NOP1 -#define P6_NOP2 ".byte 0x66,0x90\n" -#define P6_NOP3 ".byte 0x0f,0x1f,0x00\n" -#define P6_NOP4 ".byte 0x0f,0x1f,0x40,0\n" -#define P6_NOP5 ".byte 0x0f,0x1f,0x44,0x00,0\n" -#define P6_NOP6 ".byte 0x66,0x0f,0x1f,0x44,0x00,0\n" -#define P6_NOP7 ".byte 0x0f,0x1f,0x80,0,0,0,0\n" -#define P6_NOP8 ".byte 0x0f,0x1f,0x84,0x00,0,0,0,0\n" +#define P6_NOP2 0x66,0x90 +#define P6_NOP3 0x0f,0x1f,0x00 +#define P6_NOP4 0x0f,0x1f,0x40,0 +#define P6_NOP5 0x0f,0x1f,0x44,0x00,0 +#define P6_NOP6 0x66,0x0f,0x1f,0x44,0x00,0 +#define P6_NOP7 0x0f,0x1f,0x80,0,0,0,0 +#define P6_NOP8 0x0f,0x1f,0x84,0x00,0,0,0,0 +#define P6_NOP5_ATOMIC P6_NOP5 + +#define _ASM_MK_NOP(x) ".byte " __stringify(x) "\n" #if defined(CONFIG_MK7) -#define ASM_NOP1 K7_NOP1 -#define ASM_NOP2 K7_NOP2 -#define ASM_NOP3 K7_NOP3 -#define ASM_NOP4 K7_NOP4 -#define ASM_NOP5 K7_NOP5 -#define ASM_NOP6 K7_NOP6 -#define ASM_NOP7 K7_NOP7 -#define ASM_NOP8 K7_NOP8 +#define ASM_NOP1 _ASM_MK_NOP(K7_NOP1) +#define ASM_NOP2 _ASM_MK_NOP(K7_NOP2) +#define ASM_NOP3 _ASM_MK_NOP(K7_NOP3) +#define ASM_NOP4 _ASM_MK_NOP(K7_NOP4) +#define ASM_NOP5 _ASM_MK_NOP(K7_NOP5) +#define ASM_NOP6 _ASM_MK_NOP(K7_NOP6) +#define ASM_NOP7 _ASM_MK_NOP(K7_NOP7) +#define ASM_NOP8 _ASM_MK_NOP(K7_NOP8) +#define ASM_NOP5_ATOMIC _ASM_MK_NOP(K7_NOP5_ATOMIC) #elif defined(CONFIG_X86_P6_NOP) -#define ASM_NOP1 P6_NOP1 -#define ASM_NOP2 P6_NOP2 -#define ASM_NOP3 P6_NOP3 -#define ASM_NOP4 P6_NOP4 -#define ASM_NOP5 P6_NOP5 -#define ASM_NOP6 P6_NOP6 -#define ASM_NOP7 P6_NOP7 -#define ASM_NOP8 P6_NOP8 +#define ASM_NOP1 _ASM_MK_NOP(P6_NOP1) +#define ASM_NOP2 _ASM_MK_NOP(P6_NOP2) +#define ASM_NOP3 _ASM_MK_NOP(P6_NOP3) +#define ASM_NOP4 _ASM_MK_NOP(P6_NOP4) +#define ASM_NOP5 _ASM_MK_NOP(P6_NOP5) +#define ASM_NOP6 _ASM_MK_NOP(P6_NOP6) +#define ASM_NOP7 _ASM_MK_NOP(P6_NOP7) +#define ASM_NOP8 _ASM_MK_NOP(P6_NOP8) +#define ASM_NOP5_ATOMIC _ASM_MK_NOP(P6_NOP5_ATOMIC) #elif defined(CONFIG_X86_64) -#define ASM_NOP1 K8_NOP1 -#define ASM_NOP2 K8_NOP2 -#define ASM_NOP3 K8_NOP3 -#define ASM_NOP4 K8_NOP4 -#define ASM_NOP5 K8_NOP5 -#define ASM_NOP6 K8_NOP6 -#define ASM_NOP7 K8_NOP7 -#define ASM_NOP8 K8_NOP8 +#define ASM_NOP1 _ASM_MK_NOP(K8_NOP1) +#define ASM_NOP2 _ASM_MK_NOP(K8_NOP2) +#define ASM_NOP3 _ASM_MK_NOP(K8_NOP3) +#define ASM_NOP4 _ASM_MK_NOP(K8_NOP4) +#define ASM_NOP5 _ASM_MK_NOP(K8_NOP5) +#define ASM_NOP6 _ASM_MK_NOP(K8_NOP6) +#define ASM_NOP7 _ASM_MK_NOP(K8_NOP7) +#define ASM_NOP8 _ASM_MK_NOP(K8_NOP8) +#define ASM_NOP5_ATOMIC _ASM_MK_NOP(K8_NOP5_ATOMIC) #else -#define ASM_NOP1 GENERIC_NOP1 -#define ASM_NOP2 GENERIC_NOP2 -#define ASM_NOP3 GENERIC_NOP3 -#define ASM_NOP4 GENERIC_NOP4 -#define ASM_NOP5 GENERIC_NOP5 -#define ASM_NOP6 GENERIC_NOP6 -#define ASM_NOP7 GENERIC_NOP7 -#define ASM_NOP8 GENERIC_NOP8 +#define ASM_NOP1 _ASM_MK_NOP(GENERIC_NOP1) +#define ASM_NOP2 _ASM_MK_NOP(GENERIC_NOP2) +#define ASM_NOP3 _ASM_MK_NOP(GENERIC_NOP3) +#define ASM_NOP4 _ASM_MK_NOP(GENERIC_NOP4) +#define ASM_NOP5 _ASM_MK_NOP(GENERIC_NOP5) +#define ASM_NOP6 _ASM_MK_NOP(GENERIC_NOP6) +#define ASM_NOP7 _ASM_MK_NOP(GENERIC_NOP7) +#define ASM_NOP8 _ASM_MK_NOP(GENERIC_NOP8) +#define ASM_NOP5_ATOMIC _ASM_MK_NOP(GENERIC_NOP5_ATOMIC) #endif #define ASM_NOP_MAX 8 +#define NOP_ATOMIC5 (ASM_NOP_MAX+1) /* Entry for the 5-byte atomic NOP */ + +#ifndef __ASSEMBLY__ +extern const unsigned char * const *ideal_nops; +extern void arch_init_ideal_nops(void); +#endif #endif /* _ASM_X86_NOPS_H */ diff --git a/trunk/arch/x86/include/asm/numa.h b/trunk/arch/x86/include/asm/numa.h index 3d4dab43c994..bfacd2ccf651 100644 --- a/trunk/arch/x86/include/asm/numa.h +++ b/trunk/arch/x86/include/asm/numa.h @@ -1,12 +1,24 @@ #ifndef _ASM_X86_NUMA_H #define _ASM_X86_NUMA_H +#include + #include #include #ifdef CONFIG_NUMA #define NR_NODE_MEMBLKS (MAX_NUMNODES*2) +#define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT)) + +/* + * Too small node sizes may confuse the VM badly. Usually they + * result from BIOS bugs. So dont recognize nodes as standalone + * NUMA entities that have less than this amount of RAM listed: + */ +#define NODE_MIN_SIZE (4*1024*1024) + +extern int numa_off; /* * __apicid_to_node[] stores the raw mapping between physical apicid and @@ -17,15 +29,27 @@ * numa_cpu_node(). */ extern s16 __apicid_to_node[MAX_LOCAL_APIC]; +extern nodemask_t numa_nodes_parsed __initdata; + +extern int __init numa_add_memblk(int nodeid, u64 start, u64 end); +extern void __init numa_set_distance(int from, int to, int distance); static inline void set_apicid_to_node(int apicid, s16 node) { __apicid_to_node[apicid] = node; } + +extern int __cpuinit numa_cpu_node(int cpu); + #else /* CONFIG_NUMA */ static inline void set_apicid_to_node(int apicid, s16 node) { } + +static inline int numa_cpu_node(int cpu) +{ + return NUMA_NO_NODE; +} #endif /* CONFIG_NUMA */ #ifdef CONFIG_X86_32 @@ -37,21 +61,25 @@ static inline void set_apicid_to_node(int apicid, s16 node) #ifdef CONFIG_NUMA extern void __cpuinit numa_set_node(int cpu, int node); extern void __cpuinit numa_clear_node(int cpu); -extern void __init numa_init_array(void); extern void __init init_cpu_to_node(void); extern void __cpuinit numa_add_cpu(int cpu); extern void __cpuinit numa_remove_cpu(int cpu); #else /* CONFIG_NUMA */ static inline void numa_set_node(int cpu, int node) { } static inline void numa_clear_node(int cpu) { } -static inline void numa_init_array(void) { } static inline void init_cpu_to_node(void) { } static inline void numa_add_cpu(int cpu) { } static inline void numa_remove_cpu(int cpu) { } #endif /* CONFIG_NUMA */ #ifdef CONFIG_DEBUG_PER_CPU_MAPS -struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable); +void debug_cpumask_set_cpu(int cpu, int node, bool enable); #endif +#ifdef CONFIG_NUMA_EMU +#define FAKE_NODE_MIN_SIZE ((u64)32 << 20) +#define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1UL)) +void numa_emu_cmdline(char *); +#endif /* CONFIG_NUMA_EMU */ + #endif /* _ASM_X86_NUMA_H */ diff --git a/trunk/arch/x86/include/asm/numa_32.h b/trunk/arch/x86/include/asm/numa_32.h index c6beed1ef103..e7d6b8254742 100644 --- a/trunk/arch/x86/include/asm/numa_32.h +++ b/trunk/arch/x86/include/asm/numa_32.h @@ -1,16 +1,6 @@ #ifndef _ASM_X86_NUMA_32_H #define _ASM_X86_NUMA_32_H -extern int numa_off; - -extern int pxm_to_nid(int pxm); - -#ifdef CONFIG_NUMA -extern int __cpuinit numa_cpu_node(int cpu); -#else /* CONFIG_NUMA */ -static inline int numa_cpu_node(int cpu) { return NUMA_NO_NODE; } -#endif /* CONFIG_NUMA */ - #ifdef CONFIG_HIGHMEM extern void set_highmem_pages_init(void); #else diff --git a/trunk/arch/x86/include/asm/numa_64.h b/trunk/arch/x86/include/asm/numa_64.h index 344eb1790b46..0c05f7ae46e8 100644 --- a/trunk/arch/x86/include/asm/numa_64.h +++ b/trunk/arch/x86/include/asm/numa_64.h @@ -1,42 +1,6 @@ #ifndef _ASM_X86_NUMA_64_H #define _ASM_X86_NUMA_64_H -#include - -struct bootnode { - u64 start; - u64 end; -}; - -#define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT)) - -extern int numa_off; - extern unsigned long numa_free_all_bootmem(void); -extern void setup_node_bootmem(int nodeid, unsigned long start, - unsigned long end); - -#ifdef CONFIG_NUMA -/* - * Too small node sizes may confuse the VM badly. Usually they - * result from BIOS bugs. So dont recognize nodes as standalone - * NUMA entities that have less than this amount of RAM listed: - */ -#define NODE_MIN_SIZE (4*1024*1024) - -extern nodemask_t numa_nodes_parsed __initdata; - -extern int __cpuinit numa_cpu_node(int cpu); -extern int __init numa_add_memblk(int nodeid, u64 start, u64 end); -extern void __init numa_set_distance(int from, int to, int distance); - -#ifdef CONFIG_NUMA_EMU -#define FAKE_NODE_MIN_SIZE ((u64)32 << 20) -#define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1UL)) -void numa_emu_cmdline(char *); -#endif /* CONFIG_NUMA_EMU */ -#else -static inline int numa_cpu_node(int cpu) { return NUMA_NO_NODE; } -#endif #endif /* _ASM_X86_NUMA_64_H */ diff --git a/trunk/arch/x86/include/asm/numaq.h b/trunk/arch/x86/include/asm/numaq.h index 37c516545ec8..c3b3c322fd87 100644 --- a/trunk/arch/x86/include/asm/numaq.h +++ b/trunk/arch/x86/include/asm/numaq.h @@ -29,7 +29,7 @@ #ifdef CONFIG_X86_NUMAQ extern int found_numaq; -extern int get_memcfg_numaq(void); +extern int numaq_numa_init(void); extern int pci_numaq_init(void); extern void *xquad_portio; @@ -166,11 +166,6 @@ struct sys_cfg_data { void numaq_tsc_disable(void); -#else -static inline int get_memcfg_numaq(void) -{ - return 0; -} #endif /* CONFIG_X86_NUMAQ */ #endif /* _ASM_X86_NUMAQ_H */ diff --git a/trunk/arch/x86/include/asm/olpc_ofw.h b/trunk/arch/x86/include/asm/olpc_ofw.h index c5d3a5abbb9f..24487712e0b1 100644 --- a/trunk/arch/x86/include/asm/olpc_ofw.h +++ b/trunk/arch/x86/include/asm/olpc_ofw.h @@ -26,15 +26,12 @@ extern void setup_olpc_ofw_pgd(void); /* check if OFW was detected during boot */ extern bool olpc_ofw_present(void); +extern void olpc_dt_build_devicetree(void); + #else /* !CONFIG_OLPC */ static inline void olpc_ofw_detect(void) { } static inline void setup_olpc_ofw_pgd(void) { } -#endif /* !CONFIG_OLPC */ - -#ifdef CONFIG_OF_PROMTREE -extern void olpc_dt_build_devicetree(void); -#else static inline void olpc_dt_build_devicetree(void) { } -#endif +#endif /* !CONFIG_OLPC */ #endif /* _ASM_X86_OLPC_OFW_H */ diff --git a/trunk/arch/x86/include/asm/percpu.h b/trunk/arch/x86/include/asm/percpu.h index d475b4398d8b..53278b0dfdf6 100644 --- a/trunk/arch/x86/include/asm/percpu.h +++ b/trunk/arch/x86/include/asm/percpu.h @@ -517,7 +517,7 @@ do { \ typeof(o2) __o2 = o2; \ typeof(o2) __n2 = n2; \ typeof(o2) __dummy; \ - alternative_io("call this_cpu_cmpxchg16b_emu\n\t" P6_NOP4, \ + alternative_io("call this_cpu_cmpxchg16b_emu\n\t" ASM_NOP4, \ "cmpxchg16b " __percpu_prefix "(%%rsi)\n\tsetz %0\n\t", \ X86_FEATURE_CX16, \ ASM_OUTPUT2("=a"(__ret), "=d"(__dummy)), \ @@ -542,6 +542,33 @@ do { \ old__; \ }) +static __always_inline int x86_this_cpu_constant_test_bit(unsigned int nr, + const unsigned long __percpu *addr) +{ + unsigned long __percpu *a = (unsigned long *)addr + nr / BITS_PER_LONG; + + return ((1UL << (nr % BITS_PER_LONG)) & percpu_read(*a)) != 0; +} + +static inline int x86_this_cpu_variable_test_bit(int nr, + const unsigned long __percpu *addr) +{ + int oldbit; + + asm volatile("bt "__percpu_arg(2)",%1\n\t" + "sbb %0,%0" + : "=r" (oldbit) + : "m" (*(unsigned long *)addr), "Ir" (nr)); + + return oldbit; +} + +#define x86_this_cpu_test_bit(nr, addr) \ + (__builtin_constant_p((nr)) \ + ? x86_this_cpu_constant_test_bit((nr), (addr)) \ + : x86_this_cpu_variable_test_bit((nr), (addr))) + + #include /* We can use this directly for local CPU (faster). */ diff --git a/trunk/arch/x86/include/asm/pgtable_types.h b/trunk/arch/x86/include/asm/pgtable_types.h index 7db7723d1f32..d56187c6b838 100644 --- a/trunk/arch/x86/include/asm/pgtable_types.h +++ b/trunk/arch/x86/include/asm/pgtable_types.h @@ -299,6 +299,7 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, /* Install a pte for a particular vaddr in kernel space. */ void set_pte_vaddr(unsigned long vaddr, pte_t pte); +extern void native_pagetable_reserve(u64 start, u64 end); #ifdef CONFIG_X86_32 extern void native_pagetable_setup_start(pgd_t *base); extern void native_pagetable_setup_done(pgd_t *base); diff --git a/trunk/arch/x86/include/asm/probe_roms.h b/trunk/arch/x86/include/asm/probe_roms.h new file mode 100644 index 000000000000..4950a0b1d09c --- /dev/null +++ b/trunk/arch/x86/include/asm/probe_roms.h @@ -0,0 +1,8 @@ +#ifndef _PROBE_ROMS_H_ +#define _PROBE_ROMS_H_ +struct pci_dev; + +extern void __iomem *pci_map_biosrom(struct pci_dev *pdev); +extern void pci_unmap_biosrom(void __iomem *rom); +extern size_t pci_biosrom_size(struct pci_dev *pdev); +#endif diff --git a/trunk/arch/x86/include/asm/processor-flags.h b/trunk/arch/x86/include/asm/processor-flags.h index a898a2b6e10c..59ab4dffa377 100644 --- a/trunk/arch/x86/include/asm/processor-flags.h +++ b/trunk/arch/x86/include/asm/processor-flags.h @@ -60,6 +60,7 @@ #define X86_CR4_OSXMMEXCPT 0x00000400 /* enable unmasked SSE exceptions */ #define X86_CR4_VMXE 0x00002000 /* enable VMX virtualization */ #define X86_CR4_OSXSAVE 0x00040000 /* enable xsave and xrestore */ +#define X86_CR4_SMEP 0x00100000 /* enable SMEP support */ /* * x86-64 Task Priority Register, CR8 diff --git a/trunk/arch/x86/include/asm/setup.h b/trunk/arch/x86/include/asm/setup.h index db8aa19a08a2..9756551ec760 100644 --- a/trunk/arch/x86/include/asm/setup.h +++ b/trunk/arch/x86/include/asm/setup.h @@ -88,7 +88,7 @@ void *extend_brk(size_t size, size_t align); * executable.) */ #define RESERVE_BRK(name,sz) \ - static void __section(.discard.text) __used \ + static void __section(.discard.text) __used notrace \ __brk_reservation_fn_##name##__(void) { \ asm volatile ( \ ".pushsection .brk_reservation,\"aw\",@nobits;" \ @@ -104,10 +104,10 @@ void *extend_brk(size_t size, size_t align); type *name; \ RESERVE_BRK(name, sizeof(type) * entries) +extern void probe_roms(void); #ifdef __i386__ void __init i386_start_kernel(void); -extern void probe_roms(void); #else void __init x86_64_start_kernel(char *real_mode); diff --git a/trunk/arch/x86/include/asm/srat.h b/trunk/arch/x86/include/asm/srat.h deleted file mode 100644 index b508d639d1a7..000000000000 --- a/trunk/arch/x86/include/asm/srat.h +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Some of the code in this file has been gleaned from the 64 bit - * discontigmem support code base. - * - * Copyright (C) 2002, IBM Corp. - * - * All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or - * NON INFRINGEMENT. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * - * Send feedback to Pat Gaughen - */ - -#ifndef _ASM_X86_SRAT_H -#define _ASM_X86_SRAT_H - -#ifdef CONFIG_ACPI_NUMA -extern int get_memcfg_from_srat(void); -#else -static inline int get_memcfg_from_srat(void) -{ - return 0; -} -#endif - -#endif /* _ASM_X86_SRAT_H */ diff --git a/trunk/arch/x86/include/asm/stacktrace.h b/trunk/arch/x86/include/asm/stacktrace.h index d7e89c83645d..70bbe39043a9 100644 --- a/trunk/arch/x86/include/asm/stacktrace.h +++ b/trunk/arch/x86/include/asm/stacktrace.h @@ -37,9 +37,6 @@ print_context_stack_bp(struct thread_info *tinfo, /* Generic stack tracer with callbacks */ struct stacktrace_ops { - void (*warning)(void *data, char *msg); - /* msg must contain %s for the symbol */ - void (*warning_symbol)(void *data, char *msg, unsigned long symbol); void (*address)(void *data, unsigned long address, int reliable); /* On negative return stop dumping */ int (*stack)(void *data, char *name); diff --git a/trunk/arch/x86/include/asm/system.h b/trunk/arch/x86/include/asm/system.h index 12569e691ce3..c2ff2a1d845e 100644 --- a/trunk/arch/x86/include/asm/system.h +++ b/trunk/arch/x86/include/asm/system.h @@ -303,24 +303,81 @@ static inline void native_wbinvd(void) #ifdef CONFIG_PARAVIRT #include #else -#define read_cr0() (native_read_cr0()) -#define write_cr0(x) (native_write_cr0(x)) -#define read_cr2() (native_read_cr2()) -#define write_cr2(x) (native_write_cr2(x)) -#define read_cr3() (native_read_cr3()) -#define write_cr3(x) (native_write_cr3(x)) -#define read_cr4() (native_read_cr4()) -#define read_cr4_safe() (native_read_cr4_safe()) -#define write_cr4(x) (native_write_cr4(x)) -#define wbinvd() (native_wbinvd()) + +static inline unsigned long read_cr0(void) +{ + return native_read_cr0(); +} + +static inline void write_cr0(unsigned long x) +{ + native_write_cr0(x); +} + +static inline unsigned long read_cr2(void) +{ + return native_read_cr2(); +} + +static inline void write_cr2(unsigned long x) +{ + native_write_cr2(x); +} + +static inline unsigned long read_cr3(void) +{ + return native_read_cr3(); +} + +static inline void write_cr3(unsigned long x) +{ + native_write_cr3(x); +} + +static inline unsigned long read_cr4(void) +{ + return native_read_cr4(); +} + +static inline unsigned long read_cr4_safe(void) +{ + return native_read_cr4_safe(); +} + +static inline void write_cr4(unsigned long x) +{ + native_write_cr4(x); +} + +static inline void wbinvd(void) +{ + native_wbinvd(); +} + #ifdef CONFIG_X86_64 -#define read_cr8() (native_read_cr8()) -#define write_cr8(x) (native_write_cr8(x)) -#define load_gs_index native_load_gs_index + +static inline unsigned long read_cr8(void) +{ + return native_read_cr8(); +} + +static inline void write_cr8(unsigned long x) +{ + native_write_cr8(x); +} + +static inline void load_gs_index(unsigned selector) +{ + native_load_gs_index(selector); +} + #endif /* Clear the 'TS' bit */ -#define clts() (native_clts()) +static inline void clts(void) +{ + native_clts(); +} #endif/* CONFIG_PARAVIRT */ diff --git a/trunk/arch/x86/include/asm/topology.h b/trunk/arch/x86/include/asm/topology.h index 910a7084f7f2..c00692476e9f 100644 --- a/trunk/arch/x86/include/asm/topology.h +++ b/trunk/arch/x86/include/asm/topology.h @@ -93,19 +93,11 @@ extern void setup_node_to_cpumask_map(void); #define pcibus_to_node(bus) __pcibus_to_node(bus) #ifdef CONFIG_X86_32 -extern unsigned long node_start_pfn[]; -extern unsigned long node_end_pfn[]; -extern unsigned long node_remap_size[]; -#define node_has_online_mem(nid) (node_start_pfn[nid] != node_end_pfn[nid]) - # define SD_CACHE_NICE_TRIES 1 # define SD_IDLE_IDX 1 - #else - # define SD_CACHE_NICE_TRIES 2 # define SD_IDLE_IDX 2 - #endif /* sched_domains SD_NODE_INIT for NUMA machines */ diff --git a/trunk/arch/x86/include/asm/uaccess.h b/trunk/arch/x86/include/asm/uaccess.h index abd3e0ea762a..99ddd148a760 100644 --- a/trunk/arch/x86/include/asm/uaccess.h +++ b/trunk/arch/x86/include/asm/uaccess.h @@ -6,7 +6,6 @@ #include #include #include -#include #include #include #include @@ -42,7 +41,7 @@ * Returns 0 if the range is valid, nonzero otherwise. * * This is equivalent to the following test: - * (u33)addr + (u33)size >= (u33)current->addr_limit.seg (u65 for x86_64) + * (u33)addr + (u33)size > (u33)current->addr_limit.seg (u65 for x86_64) * * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry... */ diff --git a/trunk/arch/x86/include/asm/uaccess_32.h b/trunk/arch/x86/include/asm/uaccess_32.h index 088d09fb1615..566e803cc602 100644 --- a/trunk/arch/x86/include/asm/uaccess_32.h +++ b/trunk/arch/x86/include/asm/uaccess_32.h @@ -6,7 +6,6 @@ */ #include #include -#include #include #include #include diff --git a/trunk/arch/x86/include/asm/uaccess_64.h b/trunk/arch/x86/include/asm/uaccess_64.h index 316708d5af92..1c66d30971ad 100644 --- a/trunk/arch/x86/include/asm/uaccess_64.h +++ b/trunk/arch/x86/include/asm/uaccess_64.h @@ -6,7 +6,6 @@ */ #include #include -#include #include #include #include diff --git a/trunk/arch/x86/include/asm/uv/uv_bau.h b/trunk/arch/x86/include/asm/uv/uv_bau.h index 3e094af443c3..130f1eeee5fe 100644 --- a/trunk/arch/x86/include/asm/uv/uv_bau.h +++ b/trunk/arch/x86/include/asm/uv/uv_bau.h @@ -94,6 +94,8 @@ /* after this # consecutive successes, bump up the throttle if it was lowered */ #define COMPLETE_THRESHOLD 5 +#define UV_LB_SUBNODEID 0x10 + /* * number of entries in the destination side payload queue */ @@ -124,7 +126,7 @@ * The distribution specification (32 bytes) is interpreted as a 256-bit * distribution vector. Adjacent bits correspond to consecutive even numbered * nodeIDs. The result of adding the index of a given bit to the 15-bit - * 'base_dest_nodeid' field of the header corresponds to the + * 'base_dest_nasid' field of the header corresponds to the * destination nodeID associated with that specified bit. */ struct bau_target_uvhubmask { @@ -176,7 +178,7 @@ struct bau_msg_payload { struct bau_msg_header { unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */ /* bits 5:0 */ - unsigned int base_dest_nodeid:15; /* nasid of the */ + unsigned int base_dest_nasid:15; /* nasid of the */ /* bits 20:6 */ /* first bit in uvhub map */ unsigned int command:8; /* message type */ /* bits 28:21 */ @@ -378,6 +380,10 @@ struct ptc_stats { unsigned long d_rcanceled; /* number of messages canceled by resets */ }; +struct hub_and_pnode { + short uvhub; + short pnode; +}; /* * one per-cpu; to locate the software tables */ @@ -399,10 +405,12 @@ struct bau_control { int baudisabled; int set_bau_off; short cpu; + short osnode; short uvhub_cpu; short uvhub; short cpus_in_socket; short cpus_in_uvhub; + short partition_base_pnode; unsigned short message_number; unsigned short uvhub_quiesce; short socket_acknowledge_count[DEST_Q_SIZE]; @@ -422,15 +430,16 @@ struct bau_control { int congested_period; cycles_t period_time; long period_requests; + struct hub_and_pnode *target_hub_and_pnode; }; static inline int bau_uvhub_isset(int uvhub, struct bau_target_uvhubmask *dstp) { return constant_test_bit(uvhub, &dstp->bits[0]); } -static inline void bau_uvhub_set(int uvhub, struct bau_target_uvhubmask *dstp) +static inline void bau_uvhub_set(int pnode, struct bau_target_uvhubmask *dstp) { - __set_bit(uvhub, &dstp->bits[0]); + __set_bit(pnode, &dstp->bits[0]); } static inline void bau_uvhubs_clear(struct bau_target_uvhubmask *dstp, int nbits) diff --git a/trunk/arch/x86/include/asm/uv/uv_hub.h b/trunk/arch/x86/include/asm/uv/uv_hub.h index a501741c2335..4298002d0c83 100644 --- a/trunk/arch/x86/include/asm/uv/uv_hub.h +++ b/trunk/arch/x86/include/asm/uv/uv_hub.h @@ -398,6 +398,8 @@ struct uv_blade_info { unsigned short nr_online_cpus; unsigned short pnode; short memory_nid; + spinlock_t nmi_lock; + unsigned long nmi_count; }; extern struct uv_blade_info *uv_blade_info; extern short *uv_node_to_blade; diff --git a/trunk/arch/x86/include/asm/uv/uv_mmrs.h b/trunk/arch/x86/include/asm/uv/uv_mmrs.h index 20cafeac7455..f5bb64a823d7 100644 --- a/trunk/arch/x86/include/asm/uv/uv_mmrs.h +++ b/trunk/arch/x86/include/asm/uv/uv_mmrs.h @@ -5,7 +5,7 @@ * * SGI UV MMR definitions * - * Copyright (C) 2007-2010 Silicon Graphics, Inc. All rights reserved. + * Copyright (C) 2007-2011 Silicon Graphics, Inc. All rights reserved. */ #ifndef _ASM_X86_UV_UV_MMRS_H @@ -1099,5 +1099,19 @@ union uvh_rtc1_int_config_u { } s; }; +/* ========================================================================= */ +/* UVH_SCRATCH5 */ +/* ========================================================================= */ +#define UVH_SCRATCH5 0x2d0200UL +#define UVH_SCRATCH5_32 0x00778 + +#define UVH_SCRATCH5_SCRATCH5_SHFT 0 +#define UVH_SCRATCH5_SCRATCH5_MASK 0xffffffffffffffffUL +union uvh_scratch5_u { + unsigned long v; + struct uvh_scratch5_s { + unsigned long scratch5 : 64; /* RW, W1CS */ + } s; +}; #endif /* __ASM_UV_MMRS_X86_H__ */ diff --git a/trunk/arch/x86/include/asm/x86_init.h b/trunk/arch/x86/include/asm/x86_init.h index 643ebf2e2ad8..d3d859035af9 100644 --- a/trunk/arch/x86/include/asm/x86_init.h +++ b/trunk/arch/x86/include/asm/x86_init.h @@ -67,6 +67,17 @@ struct x86_init_oem { void (*banner)(void); }; +/** + * struct x86_init_mapping - platform specific initial kernel pagetable setup + * @pagetable_reserve: reserve a range of addresses for kernel pagetable usage + * + * For more details on the purpose of this hook, look in + * init_memory_mapping and the commit that added it. + */ +struct x86_init_mapping { + void (*pagetable_reserve)(u64 start, u64 end); +}; + /** * struct x86_init_paging - platform specific paging functions * @pagetable_setup_start: platform specific pre paging_init() call @@ -123,6 +134,7 @@ struct x86_init_ops { struct x86_init_mpparse mpparse; struct x86_init_irqs irqs; struct x86_init_oem oem; + struct x86_init_mapping mapping; struct x86_init_paging paging; struct x86_init_timers timers; struct x86_init_iommu iommu; diff --git a/trunk/arch/x86/include/asm/xen/page.h b/trunk/arch/x86/include/asm/xen/page.h index c61934fbf22a..64a619d47d34 100644 --- a/trunk/arch/x86/include/asm/xen/page.h +++ b/trunk/arch/x86/include/asm/xen/page.h @@ -47,8 +47,9 @@ extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); extern unsigned long set_phys_range_identity(unsigned long pfn_s, unsigned long pfn_e); -extern int m2p_add_override(unsigned long mfn, struct page *page); -extern int m2p_remove_override(struct page *page); +extern int m2p_add_override(unsigned long mfn, struct page *page, + bool clear_pte); +extern int m2p_remove_override(struct page *page, bool clear_pte); extern struct page *m2p_find_override(unsigned long mfn); extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn); diff --git a/trunk/arch/x86/include/asm/xen/pci.h b/trunk/arch/x86/include/asm/xen/pci.h index aa8620989162..4fbda9a3f339 100644 --- a/trunk/arch/x86/include/asm/xen/pci.h +++ b/trunk/arch/x86/include/asm/xen/pci.h @@ -15,10 +15,26 @@ static inline int pci_xen_hvm_init(void) #endif #if defined(CONFIG_XEN_DOM0) void __init xen_setup_pirqs(void); +int xen_find_device_domain_owner(struct pci_dev *dev); +int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain); +int xen_unregister_device_domain_owner(struct pci_dev *dev); #else static inline void __init xen_setup_pirqs(void) { } +static inline int xen_find_device_domain_owner(struct pci_dev *dev) +{ + return -1; +} +static inline int xen_register_device_domain_owner(struct pci_dev *dev, + uint16_t domain) +{ + return -1; +} +static inline int xen_unregister_device_domain_owner(struct pci_dev *dev) +{ + return -1; +} #endif #if defined(CONFIG_PCI_MSI) diff --git a/trunk/arch/x86/kernel/Makefile b/trunk/arch/x86/kernel/Makefile index 7338ef2218bc..250806472a7e 100644 --- a/trunk/arch/x86/kernel/Makefile +++ b/trunk/arch/x86/kernel/Makefile @@ -36,7 +36,7 @@ obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o obj-y += time.o ioport.o ldt.o dumpstack.o obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o obj-$(CONFIG_IRQ_WORK) += irq_work.o -obj-$(CONFIG_X86_32) += probe_roms_32.o +obj-y += probe_roms.o obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o @@ -117,7 +117,7 @@ obj-$(CONFIG_OF) += devicetree.o ifeq ($(CONFIG_X86_64),y) obj-$(CONFIG_AUDIT) += audit_64.o - obj-$(CONFIG_GART_IOMMU) += pci-gart_64.o aperture_64.o + obj-$(CONFIG_GART_IOMMU) += amd_gart_64.o aperture_64.o obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary_64.o tce_64.o obj-$(CONFIG_AMD_IOMMU) += amd_iommu_init.o amd_iommu.o diff --git a/trunk/arch/x86/kernel/acpi/sleep.c b/trunk/arch/x86/kernel/acpi/sleep.c index ff93bc1b09c3..18a857ba7a25 100644 --- a/trunk/arch/x86/kernel/acpi/sleep.c +++ b/trunk/arch/x86/kernel/acpi/sleep.c @@ -112,11 +112,6 @@ static int __init acpi_sleep_setup(char *str) #ifdef CONFIG_HIBERNATION if (strncmp(str, "s4_nohwsig", 10) == 0) acpi_no_s4_hw_signature(); - if (strncmp(str, "s4_nonvs", 8) == 0) { - pr_warning("ACPI: acpi_sleep=s4_nonvs is deprecated, " - "please use acpi_sleep=nonvs instead"); - acpi_nvs_nosave(); - } #endif if (strncmp(str, "nonvs", 5) == 0) acpi_nvs_nosave(); diff --git a/trunk/arch/x86/kernel/alternative.c b/trunk/arch/x86/kernel/alternative.c index 4a234677e213..a81f2d52f869 100644 --- a/trunk/arch/x86/kernel/alternative.c +++ b/trunk/arch/x86/kernel/alternative.c @@ -67,17 +67,30 @@ __setup("noreplace-paravirt", setup_noreplace_paravirt); #define DPRINTK(fmt, args...) if (debug_alternative) \ printk(KERN_DEBUG fmt, args) +/* + * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes + * that correspond to that nop. Getting from one nop to the next, we + * add to the array the offset that is equal to the sum of all sizes of + * nops preceding the one we are after. + * + * Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the + * nice symmetry of sizes of the previous nops. + */ #if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64) -/* Use inline assembly to define this because the nops are defined - as inline assembly strings in the include files and we cannot - get them easily into strings. */ -asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nintelnops: " - GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6 - GENERIC_NOP7 GENERIC_NOP8 - "\t.previous"); -extern const unsigned char intelnops[]; -static const unsigned char *const __initconst_or_module -intel_nops[ASM_NOP_MAX+1] = { +static const unsigned char intelnops[] = +{ + GENERIC_NOP1, + GENERIC_NOP2, + GENERIC_NOP3, + GENERIC_NOP4, + GENERIC_NOP5, + GENERIC_NOP6, + GENERIC_NOP7, + GENERIC_NOP8, + GENERIC_NOP5_ATOMIC +}; +static const unsigned char * const intel_nops[ASM_NOP_MAX+2] = +{ NULL, intelnops, intelnops + 1, @@ -87,17 +100,25 @@ intel_nops[ASM_NOP_MAX+1] = { intelnops + 1 + 2 + 3 + 4 + 5, intelnops + 1 + 2 + 3 + 4 + 5 + 6, intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7, + intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, }; #endif #ifdef K8_NOP1 -asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nk8nops: " - K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6 - K8_NOP7 K8_NOP8 - "\t.previous"); -extern const unsigned char k8nops[]; -static const unsigned char *const __initconst_or_module -k8_nops[ASM_NOP_MAX+1] = { +static const unsigned char k8nops[] = +{ + K8_NOP1, + K8_NOP2, + K8_NOP3, + K8_NOP4, + K8_NOP5, + K8_NOP6, + K8_NOP7, + K8_NOP8, + K8_NOP5_ATOMIC +}; +static const unsigned char * const k8_nops[ASM_NOP_MAX+2] = +{ NULL, k8nops, k8nops + 1, @@ -107,17 +128,25 @@ k8_nops[ASM_NOP_MAX+1] = { k8nops + 1 + 2 + 3 + 4 + 5, k8nops + 1 + 2 + 3 + 4 + 5 + 6, k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, + k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, }; #endif #if defined(K7_NOP1) && !defined(CONFIG_X86_64) -asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nk7nops: " - K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6 - K7_NOP7 K7_NOP8 - "\t.previous"); -extern const unsigned char k7nops[]; -static const unsigned char *const __initconst_or_module -k7_nops[ASM_NOP_MAX+1] = { +static const unsigned char k7nops[] = +{ + K7_NOP1, + K7_NOP2, + K7_NOP3, + K7_NOP4, + K7_NOP5, + K7_NOP6, + K7_NOP7, + K7_NOP8, + K7_NOP5_ATOMIC +}; +static const unsigned char * const k7_nops[ASM_NOP_MAX+2] = +{ NULL, k7nops, k7nops + 1, @@ -127,17 +156,25 @@ k7_nops[ASM_NOP_MAX+1] = { k7nops + 1 + 2 + 3 + 4 + 5, k7nops + 1 + 2 + 3 + 4 + 5 + 6, k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, + k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, }; #endif #ifdef P6_NOP1 -asm("\t" __stringify(__INITRODATA_OR_MODULE) "\np6nops: " - P6_NOP1 P6_NOP2 P6_NOP3 P6_NOP4 P6_NOP5 P6_NOP6 - P6_NOP7 P6_NOP8 - "\t.previous"); -extern const unsigned char p6nops[]; -static const unsigned char *const __initconst_or_module -p6_nops[ASM_NOP_MAX+1] = { +static const unsigned char __initconst_or_module p6nops[] = +{ + P6_NOP1, + P6_NOP2, + P6_NOP3, + P6_NOP4, + P6_NOP5, + P6_NOP6, + P6_NOP7, + P6_NOP8, + P6_NOP5_ATOMIC +}; +static const unsigned char * const p6_nops[ASM_NOP_MAX+2] = +{ NULL, p6nops, p6nops + 1, @@ -147,47 +184,65 @@ p6_nops[ASM_NOP_MAX+1] = { p6nops + 1 + 2 + 3 + 4 + 5, p6nops + 1 + 2 + 3 + 4 + 5 + 6, p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, + p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, }; #endif +/* Initialize these to a safe default */ #ifdef CONFIG_X86_64 +const unsigned char * const *ideal_nops = p6_nops; +#else +const unsigned char * const *ideal_nops = intel_nops; +#endif -extern char __vsyscall_0; -static const unsigned char *const *__init_or_module find_nop_table(void) +void __init arch_init_ideal_nops(void) { - if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && - boot_cpu_has(X86_FEATURE_NOPL)) - return p6_nops; - else - return k8_nops; -} - -#else /* CONFIG_X86_64 */ + switch (boot_cpu_data.x86_vendor) { + case X86_VENDOR_INTEL: + /* + * Due to a decoder implementation quirk, some + * specific Intel CPUs actually perform better with + * the "k8_nops" than with the SDM-recommended NOPs. + */ + if (boot_cpu_data.x86 == 6 && + boot_cpu_data.x86_model >= 0x0f && + boot_cpu_data.x86_model != 0x1c && + boot_cpu_data.x86_model != 0x26 && + boot_cpu_data.x86_model != 0x27 && + boot_cpu_data.x86_model < 0x30) { + ideal_nops = k8_nops; + } else if (boot_cpu_has(X86_FEATURE_NOPL)) { + ideal_nops = p6_nops; + } else { +#ifdef CONFIG_X86_64 + ideal_nops = k8_nops; +#else + ideal_nops = intel_nops; +#endif + } -static const unsigned char *const *__init_or_module find_nop_table(void) -{ - if (boot_cpu_has(X86_FEATURE_K8)) - return k8_nops; - else if (boot_cpu_has(X86_FEATURE_K7)) - return k7_nops; - else if (boot_cpu_has(X86_FEATURE_NOPL)) - return p6_nops; - else - return intel_nops; + default: +#ifdef CONFIG_X86_64 + ideal_nops = k8_nops; +#else + if (boot_cpu_has(X86_FEATURE_K8)) + ideal_nops = k8_nops; + else if (boot_cpu_has(X86_FEATURE_K7)) + ideal_nops = k7_nops; + else + ideal_nops = intel_nops; +#endif + } } -#endif /* CONFIG_X86_64 */ - /* Use this to add nops to a buffer, then text_poke the whole buffer. */ static void __init_or_module add_nops(void *insns, unsigned int len) { - const unsigned char *const *noptable = find_nop_table(); - while (len > 0) { unsigned int noplen = len; if (noplen > ASM_NOP_MAX) noplen = ASM_NOP_MAX; - memcpy(insns, noptable[noplen], noplen); + memcpy(insns, ideal_nops[noplen], noplen); insns += noplen; len -= noplen; } @@ -195,6 +250,7 @@ static void __init_or_module add_nops(void *insns, unsigned int len) extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; extern s32 __smp_locks[], __smp_locks_end[]; +extern char __vsyscall_0; void *text_poke_early(void *addr, const void *opcode, size_t len); /* Replace instructions with better alternatives for this CPU type. @@ -210,6 +266,15 @@ void __init_or_module apply_alternatives(struct alt_instr *start, u8 insnbuf[MAX_PATCH_LEN]; DPRINTK("%s: alt table %p -> %p\n", __func__, start, end); + /* + * The scan order should be from start to end. A later scanned + * alternative code can overwrite a previous scanned alternative code. + * Some kernel functions (e.g. memcpy, memset, etc) use this order to + * patch code. + * + * So be careful if you want to change the scan order to any other + * order. + */ for (a = start; a < end; a++) { u8 *instr = a->instr; BUG_ON(a->replacementlen > a->instrlen); @@ -678,29 +743,3 @@ void __kprobes text_poke_smp_batch(struct text_poke_param *params, int n) wrote_text = 0; __stop_machine(stop_machine_text_poke, (void *)&tpp, NULL); } - -#if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL) - -#ifdef CONFIG_X86_64 -unsigned char ideal_nop5[5] = { 0x66, 0x66, 0x66, 0x66, 0x90 }; -#else -unsigned char ideal_nop5[5] = { 0x3e, 0x8d, 0x74, 0x26, 0x00 }; -#endif - -void __init arch_init_ideal_nop5(void) -{ - /* - * There is no good nop for all x86 archs. This selection - * algorithm should be unified with the one in find_nop_table(), - * but this should be good enough for now. - * - * For cases other than the ones below, use the safe (as in - * always functional) defaults above. - */ -#ifdef CONFIG_X86_64 - /* Don't use these on 32 bits due to broken virtualizers */ - if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) - memcpy(ideal_nop5, p6_nops[5], 5); -#endif -} -#endif diff --git a/trunk/arch/x86/kernel/pci-gart_64.c b/trunk/arch/x86/kernel/amd_gart_64.c similarity index 98% rename from trunk/arch/x86/kernel/pci-gart_64.c rename to trunk/arch/x86/kernel/amd_gart_64.c index 82ada01625b9..b117efd24f71 100644 --- a/trunk/arch/x86/kernel/pci-gart_64.c +++ b/trunk/arch/x86/kernel/amd_gart_64.c @@ -81,6 +81,9 @@ static u32 gart_unmapped_entry; #define AGPEXTERN #endif +/* GART can only remap to physical addresses < 1TB */ +#define GART_MAX_PHYS_ADDR (1ULL << 40) + /* backdoor interface to AGP driver */ AGPEXTERN int agp_memory_reserved; AGPEXTERN __u32 *agp_gatt_table; @@ -212,9 +215,13 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, size_t size, int dir, unsigned long align_mask) { unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE); - unsigned long iommu_page = alloc_iommu(dev, npages, align_mask); + unsigned long iommu_page; int i; + if (unlikely(phys_mem + size > GART_MAX_PHYS_ADDR)) + return bad_dma_addr; + + iommu_page = alloc_iommu(dev, npages, align_mask); if (iommu_page == -1) { if (!nonforced_iommu(dev, phys_mem, size)) return phys_mem; diff --git a/trunk/arch/x86/kernel/amd_iommu.c b/trunk/arch/x86/kernel/amd_iommu.c index 57ca77787220..873e7e1ead7b 100644 --- a/trunk/arch/x86/kernel/amd_iommu.c +++ b/trunk/arch/x86/kernel/amd_iommu.c @@ -18,6 +18,7 @@ */ #include +#include #include #include #include @@ -25,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -34,7 +36,7 @@ #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28)) -#define EXIT_LOOP_COUNT 10000000 +#define LOOP_TIMEOUT 100000 static DEFINE_RWLOCK(amd_iommu_devtable_lock); @@ -57,7 +59,6 @@ struct iommu_cmd { u32 data[4]; }; -static void reset_iommu_command_buffer(struct amd_iommu *iommu); static void update_domain(struct protection_domain *domain); /**************************************************************************** @@ -322,8 +323,6 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt) break; case EVENT_TYPE_ILL_CMD: printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address); - iommu->reset_in_progress = true; - reset_iommu_command_buffer(iommu); dump_command(address); break; case EVENT_TYPE_CMD_HARD_ERR: @@ -367,7 +366,7 @@ static void iommu_poll_events(struct amd_iommu *iommu) spin_unlock_irqrestore(&iommu->lock, flags); } -irqreturn_t amd_iommu_int_handler(int irq, void *data) +irqreturn_t amd_iommu_int_thread(int irq, void *data) { struct amd_iommu *iommu; @@ -377,192 +376,300 @@ irqreturn_t amd_iommu_int_handler(int irq, void *data) return IRQ_HANDLED; } +irqreturn_t amd_iommu_int_handler(int irq, void *data) +{ + return IRQ_WAKE_THREAD; +} + /**************************************************************************** * * IOMMU command queuing functions * ****************************************************************************/ -/* - * Writes the command to the IOMMUs command buffer and informs the - * hardware about the new command. Must be called with iommu->lock held. - */ -static int __iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) +static int wait_on_sem(volatile u64 *sem) +{ + int i = 0; + + while (*sem == 0 && i < LOOP_TIMEOUT) { + udelay(1); + i += 1; + } + + if (i == LOOP_TIMEOUT) { + pr_alert("AMD-Vi: Completion-Wait loop timed out\n"); + return -EIO; + } + + return 0; +} + +static void copy_cmd_to_buffer(struct amd_iommu *iommu, + struct iommu_cmd *cmd, + u32 tail) { - u32 tail, head; u8 *target; - WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED); - tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); target = iommu->cmd_buf + tail; - memcpy_toio(target, cmd, sizeof(*cmd)); - tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size; - head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); - if (tail == head) - return -ENOMEM; + tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size; + + /* Copy command to buffer */ + memcpy(target, cmd, sizeof(*cmd)); + + /* Tell the IOMMU about it */ writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); +} - return 0; +static void build_completion_wait(struct iommu_cmd *cmd, u64 address) +{ + WARN_ON(address & 0x7ULL); + + memset(cmd, 0, sizeof(*cmd)); + cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK; + cmd->data[1] = upper_32_bits(__pa(address)); + cmd->data[2] = 1; + CMD_SET_TYPE(cmd, CMD_COMPL_WAIT); +} + +static void build_inv_dte(struct iommu_cmd *cmd, u16 devid) +{ + memset(cmd, 0, sizeof(*cmd)); + cmd->data[0] = devid; + CMD_SET_TYPE(cmd, CMD_INV_DEV_ENTRY); +} + +static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address, + size_t size, u16 domid, int pde) +{ + u64 pages; + int s; + + pages = iommu_num_pages(address, size, PAGE_SIZE); + s = 0; + + if (pages > 1) { + /* + * If we have to flush more than one page, flush all + * TLB entries for this domain + */ + address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; + s = 1; + } + + address &= PAGE_MASK; + + memset(cmd, 0, sizeof(*cmd)); + cmd->data[1] |= domid; + cmd->data[2] = lower_32_bits(address); + cmd->data[3] = upper_32_bits(address); + CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES); + if (s) /* size bit - we flush more than one 4kb page */ + cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; + if (pde) /* PDE bit - we wan't flush everything not only the PTEs */ + cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK; +} + +static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep, + u64 address, size_t size) +{ + u64 pages; + int s; + + pages = iommu_num_pages(address, size, PAGE_SIZE); + s = 0; + + if (pages > 1) { + /* + * If we have to flush more than one page, flush all + * TLB entries for this domain + */ + address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; + s = 1; + } + + address &= PAGE_MASK; + + memset(cmd, 0, sizeof(*cmd)); + cmd->data[0] = devid; + cmd->data[0] |= (qdep & 0xff) << 24; + cmd->data[1] = devid; + cmd->data[2] = lower_32_bits(address); + cmd->data[3] = upper_32_bits(address); + CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES); + if (s) + cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; +} + +static void build_inv_all(struct iommu_cmd *cmd) +{ + memset(cmd, 0, sizeof(*cmd)); + CMD_SET_TYPE(cmd, CMD_INV_ALL); } /* - * General queuing function for commands. Takes iommu->lock and calls - * __iommu_queue_command(). + * Writes the command to the IOMMUs command buffer and informs the + * hardware about the new command. */ static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) { + u32 left, tail, head, next_tail; unsigned long flags; - int ret; + WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED); + +again: spin_lock_irqsave(&iommu->lock, flags); - ret = __iommu_queue_command(iommu, cmd); - if (!ret) - iommu->need_sync = true; - spin_unlock_irqrestore(&iommu->lock, flags); - return ret; -} + head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); + tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); + next_tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size; + left = (head - next_tail) % iommu->cmd_buf_size; -/* - * This function waits until an IOMMU has completed a completion - * wait command - */ -static void __iommu_wait_for_completion(struct amd_iommu *iommu) -{ - int ready = 0; - unsigned status = 0; - unsigned long i = 0; + if (left <= 2) { + struct iommu_cmd sync_cmd; + volatile u64 sem = 0; + int ret; - INC_STATS_COUNTER(compl_wait); + build_completion_wait(&sync_cmd, (u64)&sem); + copy_cmd_to_buffer(iommu, &sync_cmd, tail); - while (!ready && (i < EXIT_LOOP_COUNT)) { - ++i; - /* wait for the bit to become one */ - status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); - ready = status & MMIO_STATUS_COM_WAIT_INT_MASK; + spin_unlock_irqrestore(&iommu->lock, flags); + + if ((ret = wait_on_sem(&sem)) != 0) + return ret; + + goto again; } - /* set bit back to zero */ - status &= ~MMIO_STATUS_COM_WAIT_INT_MASK; - writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET); + copy_cmd_to_buffer(iommu, cmd, tail); + + /* We need to sync now to make sure all commands are processed */ + iommu->need_sync = true; + + spin_unlock_irqrestore(&iommu->lock, flags); - if (unlikely(i == EXIT_LOOP_COUNT)) - iommu->reset_in_progress = true; + return 0; } /* * This function queues a completion wait command into the command * buffer of an IOMMU */ -static int __iommu_completion_wait(struct amd_iommu *iommu) +static int iommu_completion_wait(struct amd_iommu *iommu) { struct iommu_cmd cmd; + volatile u64 sem = 0; + int ret; - memset(&cmd, 0, sizeof(cmd)); - cmd.data[0] = CMD_COMPL_WAIT_INT_MASK; - CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT); + if (!iommu->need_sync) + return 0; - return __iommu_queue_command(iommu, &cmd); + build_completion_wait(&cmd, (u64)&sem); + + ret = iommu_queue_command(iommu, &cmd); + if (ret) + return ret; + + return wait_on_sem(&sem); } -/* - * This function is called whenever we need to ensure that the IOMMU has - * completed execution of all commands we sent. It sends a - * COMPLETION_WAIT command and waits for it to finish. The IOMMU informs - * us about that by writing a value to a physical address we pass with - * the command. - */ -static int iommu_completion_wait(struct amd_iommu *iommu) +static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid) { - int ret = 0; - unsigned long flags; + struct iommu_cmd cmd; - spin_lock_irqsave(&iommu->lock, flags); + build_inv_dte(&cmd, devid); - if (!iommu->need_sync) - goto out; + return iommu_queue_command(iommu, &cmd); +} - ret = __iommu_completion_wait(iommu); +static void iommu_flush_dte_all(struct amd_iommu *iommu) +{ + u32 devid; - iommu->need_sync = false; + for (devid = 0; devid <= 0xffff; ++devid) + iommu_flush_dte(iommu, devid); - if (ret) - goto out; - - __iommu_wait_for_completion(iommu); + iommu_completion_wait(iommu); +} -out: - spin_unlock_irqrestore(&iommu->lock, flags); +/* + * This function uses heavy locking and may disable irqs for some time. But + * this is no issue because it is only called during resume. + */ +static void iommu_flush_tlb_all(struct amd_iommu *iommu) +{ + u32 dom_id; - if (iommu->reset_in_progress) - reset_iommu_command_buffer(iommu); + for (dom_id = 0; dom_id <= 0xffff; ++dom_id) { + struct iommu_cmd cmd; + build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, + dom_id, 1); + iommu_queue_command(iommu, &cmd); + } - return 0; + iommu_completion_wait(iommu); } -static void iommu_flush_complete(struct protection_domain *domain) +static void iommu_flush_all(struct amd_iommu *iommu) { - int i; + struct iommu_cmd cmd; - for (i = 0; i < amd_iommus_present; ++i) { - if (!domain->dev_iommu[i]) - continue; + build_inv_all(&cmd); - /* - * Devices of this domain are behind this IOMMU - * We need to wait for completion of all commands. - */ - iommu_completion_wait(amd_iommus[i]); + iommu_queue_command(iommu, &cmd); + iommu_completion_wait(iommu); +} + +void iommu_flush_all_caches(struct amd_iommu *iommu) +{ + if (iommu_feature(iommu, FEATURE_IA)) { + iommu_flush_all(iommu); + } else { + iommu_flush_dte_all(iommu); + iommu_flush_tlb_all(iommu); } } /* - * Command send function for invalidating a device table entry + * Command send function for flushing on-device TLB */ -static int iommu_flush_device(struct device *dev) +static int device_flush_iotlb(struct device *dev, u64 address, size_t size) { + struct pci_dev *pdev = to_pci_dev(dev); struct amd_iommu *iommu; struct iommu_cmd cmd; u16 devid; + int qdep; + qdep = pci_ats_queue_depth(pdev); devid = get_device_id(dev); iommu = amd_iommu_rlookup_table[devid]; - /* Build command */ - memset(&cmd, 0, sizeof(cmd)); - CMD_SET_TYPE(&cmd, CMD_INV_DEV_ENTRY); - cmd.data[0] = devid; + build_inv_iotlb_pages(&cmd, devid, qdep, address, size); return iommu_queue_command(iommu, &cmd); } -static void __iommu_build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address, - u16 domid, int pde, int s) -{ - memset(cmd, 0, sizeof(*cmd)); - address &= PAGE_MASK; - CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES); - cmd->data[1] |= domid; - cmd->data[2] = lower_32_bits(address); - cmd->data[3] = upper_32_bits(address); - if (s) /* size bit - we flush more than one 4kb page */ - cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; - if (pde) /* PDE bit - we wan't flush everything not only the PTEs */ - cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK; -} - /* - * Generic command send function for invalidaing TLB entries + * Command send function for invalidating a device table entry */ -static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu, - u64 address, u16 domid, int pde, int s) +static int device_flush_dte(struct device *dev) { - struct iommu_cmd cmd; + struct amd_iommu *iommu; + struct pci_dev *pdev; + u16 devid; int ret; - __iommu_build_inv_iommu_pages(&cmd, address, domid, pde, s); + pdev = to_pci_dev(dev); + devid = get_device_id(dev); + iommu = amd_iommu_rlookup_table[devid]; - ret = iommu_queue_command(iommu, &cmd); + ret = iommu_flush_dte(iommu, devid); + if (ret) + return ret; + + if (pci_ats_enabled(pdev)) + ret = device_flush_iotlb(dev, 0, ~0UL); return ret; } @@ -572,23 +679,14 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu, * It invalidates a single PTE if the range to flush is within a single * page. Otherwise it flushes the whole TLB of the IOMMU. */ -static void __iommu_flush_pages(struct protection_domain *domain, - u64 address, size_t size, int pde) +static void __domain_flush_pages(struct protection_domain *domain, + u64 address, size_t size, int pde) { - int s = 0, i; - unsigned long pages = iommu_num_pages(address, size, PAGE_SIZE); - - address &= PAGE_MASK; - - if (pages > 1) { - /* - * If we have to flush more than one page, flush all - * TLB entries for this domain - */ - address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; - s = 1; - } + struct iommu_dev_data *dev_data; + struct iommu_cmd cmd; + int ret = 0, i; + build_inv_iommu_pages(&cmd, address, size, domain->id, pde); for (i = 0; i < amd_iommus_present; ++i) { if (!domain->dev_iommu[i]) @@ -598,101 +696,70 @@ static void __iommu_flush_pages(struct protection_domain *domain, * Devices of this domain are behind this IOMMU * We need a TLB flush */ - iommu_queue_inv_iommu_pages(amd_iommus[i], address, - domain->id, pde, s); + ret |= iommu_queue_command(amd_iommus[i], &cmd); + } + + list_for_each_entry(dev_data, &domain->dev_list, list) { + struct pci_dev *pdev = to_pci_dev(dev_data->dev); + + if (!pci_ats_enabled(pdev)) + continue; + + ret |= device_flush_iotlb(dev_data->dev, address, size); } - return; + WARN_ON(ret); } -static void iommu_flush_pages(struct protection_domain *domain, - u64 address, size_t size) +static void domain_flush_pages(struct protection_domain *domain, + u64 address, size_t size) { - __iommu_flush_pages(domain, address, size, 0); + __domain_flush_pages(domain, address, size, 0); } /* Flush the whole IO/TLB for a given protection domain */ -static void iommu_flush_tlb(struct protection_domain *domain) +static void domain_flush_tlb(struct protection_domain *domain) { - __iommu_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0); + __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0); } /* Flush the whole IO/TLB for a given protection domain - including PDE */ -static void iommu_flush_tlb_pde(struct protection_domain *domain) +static void domain_flush_tlb_pde(struct protection_domain *domain) { - __iommu_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1); -} - - -/* - * This function flushes the DTEs for all devices in domain - */ -static void iommu_flush_domain_devices(struct protection_domain *domain) -{ - struct iommu_dev_data *dev_data; - unsigned long flags; - - spin_lock_irqsave(&domain->lock, flags); - - list_for_each_entry(dev_data, &domain->dev_list, list) - iommu_flush_device(dev_data->dev); - - spin_unlock_irqrestore(&domain->lock, flags); + __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1); } -static void iommu_flush_all_domain_devices(void) +static void domain_flush_complete(struct protection_domain *domain) { - struct protection_domain *domain; - unsigned long flags; + int i; - spin_lock_irqsave(&amd_iommu_pd_lock, flags); + for (i = 0; i < amd_iommus_present; ++i) { + if (!domain->dev_iommu[i]) + continue; - list_for_each_entry(domain, &amd_iommu_pd_list, list) { - iommu_flush_domain_devices(domain); - iommu_flush_complete(domain); + /* + * Devices of this domain are behind this IOMMU + * We need to wait for completion of all commands. + */ + iommu_completion_wait(amd_iommus[i]); } - - spin_unlock_irqrestore(&amd_iommu_pd_lock, flags); } -void amd_iommu_flush_all_devices(void) -{ - iommu_flush_all_domain_devices(); -} /* - * This function uses heavy locking and may disable irqs for some time. But - * this is no issue because it is only called during resume. + * This function flushes the DTEs for all devices in domain */ -void amd_iommu_flush_all_domains(void) +static void domain_flush_devices(struct protection_domain *domain) { - struct protection_domain *domain; + struct iommu_dev_data *dev_data; unsigned long flags; - spin_lock_irqsave(&amd_iommu_pd_lock, flags); - - list_for_each_entry(domain, &amd_iommu_pd_list, list) { - spin_lock(&domain->lock); - iommu_flush_tlb_pde(domain); - iommu_flush_complete(domain); - spin_unlock(&domain->lock); - } - - spin_unlock_irqrestore(&amd_iommu_pd_lock, flags); -} - -static void reset_iommu_command_buffer(struct amd_iommu *iommu) -{ - pr_err("AMD-Vi: Resetting IOMMU command buffer\n"); - - if (iommu->reset_in_progress) - panic("AMD-Vi: ILLEGAL_COMMAND_ERROR while resetting command buffer\n"); + spin_lock_irqsave(&domain->lock, flags); - amd_iommu_reset_cmd_buffer(iommu); - amd_iommu_flush_all_devices(); - amd_iommu_flush_all_domains(); + list_for_each_entry(dev_data, &domain->dev_list, list) + device_flush_dte(dev_data->dev); - iommu->reset_in_progress = false; + spin_unlock_irqrestore(&domain->lock, flags); } /**************************************************************************** @@ -1410,17 +1477,22 @@ static bool dma_ops_domain(struct protection_domain *domain) return domain->flags & PD_DMA_OPS_MASK; } -static void set_dte_entry(u16 devid, struct protection_domain *domain) +static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats) { u64 pte_root = virt_to_phys(domain->pt_root); + u32 flags = 0; pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK) << DEV_ENTRY_MODE_SHIFT; pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV; - amd_iommu_dev_table[devid].data[2] = domain->id; - amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root); - amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root); + if (ats) + flags |= DTE_FLAG_IOTLB; + + amd_iommu_dev_table[devid].data[3] |= flags; + amd_iommu_dev_table[devid].data[2] = domain->id; + amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root); + amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root); } static void clear_dte_entry(u16 devid) @@ -1437,34 +1509,42 @@ static void do_attach(struct device *dev, struct protection_domain *domain) { struct iommu_dev_data *dev_data; struct amd_iommu *iommu; + struct pci_dev *pdev; + bool ats = false; u16 devid; devid = get_device_id(dev); iommu = amd_iommu_rlookup_table[devid]; dev_data = get_dev_data(dev); + pdev = to_pci_dev(dev); + + if (amd_iommu_iotlb_sup) + ats = pci_ats_enabled(pdev); /* Update data structures */ dev_data->domain = domain; list_add(&dev_data->list, &domain->dev_list); - set_dte_entry(devid, domain); + set_dte_entry(devid, domain, ats); /* Do reference counting */ domain->dev_iommu[iommu->index] += 1; domain->dev_cnt += 1; /* Flush the DTE entry */ - iommu_flush_device(dev); + device_flush_dte(dev); } static void do_detach(struct device *dev) { struct iommu_dev_data *dev_data; struct amd_iommu *iommu; + struct pci_dev *pdev; u16 devid; devid = get_device_id(dev); iommu = amd_iommu_rlookup_table[devid]; dev_data = get_dev_data(dev); + pdev = to_pci_dev(dev); /* decrease reference counters */ dev_data->domain->dev_iommu[iommu->index] -= 1; @@ -1476,7 +1556,7 @@ static void do_detach(struct device *dev) clear_dte_entry(devid); /* Flush the DTE entry */ - iommu_flush_device(dev); + device_flush_dte(dev); } /* @@ -1539,9 +1619,13 @@ static int __attach_device(struct device *dev, static int attach_device(struct device *dev, struct protection_domain *domain) { + struct pci_dev *pdev = to_pci_dev(dev); unsigned long flags; int ret; + if (amd_iommu_iotlb_sup) + pci_enable_ats(pdev, PAGE_SHIFT); + write_lock_irqsave(&amd_iommu_devtable_lock, flags); ret = __attach_device(dev, domain); write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); @@ -1551,7 +1635,7 @@ static int attach_device(struct device *dev, * left the caches in the IOMMU dirty. So we have to flush * here to evict all dirty stuff. */ - iommu_flush_tlb_pde(domain); + domain_flush_tlb_pde(domain); return ret; } @@ -1598,12 +1682,16 @@ static void __detach_device(struct device *dev) */ static void detach_device(struct device *dev) { + struct pci_dev *pdev = to_pci_dev(dev); unsigned long flags; /* lock device table */ write_lock_irqsave(&amd_iommu_devtable_lock, flags); __detach_device(dev); write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); + + if (amd_iommu_iotlb_sup && pci_ats_enabled(pdev)) + pci_disable_ats(pdev); } /* @@ -1692,7 +1780,7 @@ static int device_change_notifier(struct notifier_block *nb, goto out; } - iommu_flush_device(dev); + device_flush_dte(dev); iommu_completion_wait(iommu); out: @@ -1753,8 +1841,9 @@ static void update_device_table(struct protection_domain *domain) struct iommu_dev_data *dev_data; list_for_each_entry(dev_data, &domain->dev_list, list) { + struct pci_dev *pdev = to_pci_dev(dev_data->dev); u16 devid = get_device_id(dev_data->dev); - set_dte_entry(devid, domain); + set_dte_entry(devid, domain, pci_ats_enabled(pdev)); } } @@ -1764,8 +1853,9 @@ static void update_domain(struct protection_domain *domain) return; update_device_table(domain); - iommu_flush_domain_devices(domain); - iommu_flush_tlb_pde(domain); + + domain_flush_devices(domain); + domain_flush_tlb_pde(domain); domain->updated = false; } @@ -1924,10 +2014,10 @@ static dma_addr_t __map_single(struct device *dev, ADD_STATS_COUNTER(alloced_io_mem, size); if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) { - iommu_flush_tlb(&dma_dom->domain); + domain_flush_tlb(&dma_dom->domain); dma_dom->need_flush = false; } else if (unlikely(amd_iommu_np_cache)) - iommu_flush_pages(&dma_dom->domain, address, size); + domain_flush_pages(&dma_dom->domain, address, size); out: return address; @@ -1976,7 +2066,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom, dma_ops_free_addresses(dma_dom, dma_addr, pages); if (amd_iommu_unmap_flush || dma_dom->need_flush) { - iommu_flush_pages(&dma_dom->domain, flush_addr, size); + domain_flush_pages(&dma_dom->domain, flush_addr, size); dma_dom->need_flush = false; } } @@ -2012,7 +2102,7 @@ static dma_addr_t map_page(struct device *dev, struct page *page, if (addr == DMA_ERROR_CODE) goto out; - iommu_flush_complete(domain); + domain_flush_complete(domain); out: spin_unlock_irqrestore(&domain->lock, flags); @@ -2039,7 +2129,7 @@ static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, __unmap_single(domain->priv, dma_addr, size, dir); - iommu_flush_complete(domain); + domain_flush_complete(domain); spin_unlock_irqrestore(&domain->lock, flags); } @@ -2104,7 +2194,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist, goto unmap; } - iommu_flush_complete(domain); + domain_flush_complete(domain); out: spin_unlock_irqrestore(&domain->lock, flags); @@ -2150,7 +2240,7 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist, s->dma_address = s->dma_length = 0; } - iommu_flush_complete(domain); + domain_flush_complete(domain); spin_unlock_irqrestore(&domain->lock, flags); } @@ -2200,7 +2290,7 @@ static void *alloc_coherent(struct device *dev, size_t size, goto out_free; } - iommu_flush_complete(domain); + domain_flush_complete(domain); spin_unlock_irqrestore(&domain->lock, flags); @@ -2232,7 +2322,7 @@ static void free_coherent(struct device *dev, size_t size, __unmap_single(domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); - iommu_flush_complete(domain); + domain_flush_complete(domain); spin_unlock_irqrestore(&domain->lock, flags); @@ -2476,7 +2566,7 @@ static void amd_iommu_detach_device(struct iommu_domain *dom, if (!iommu) return; - iommu_flush_device(dev); + device_flush_dte(dev); iommu_completion_wait(iommu); } @@ -2542,7 +2632,7 @@ static int amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova, unmap_size = iommu_unmap_page(domain, iova, page_size); mutex_unlock(&domain->api_lock); - iommu_flush_tlb_pde(domain); + domain_flush_tlb_pde(domain); return get_order(unmap_size); } diff --git a/trunk/arch/x86/kernel/amd_iommu_init.c b/trunk/arch/x86/kernel/amd_iommu_init.c index 246d727b65b7..9179c21120a8 100644 --- a/trunk/arch/x86/kernel/amd_iommu_init.c +++ b/trunk/arch/x86/kernel/amd_iommu_init.c @@ -137,6 +137,7 @@ int amd_iommus_present; /* IOMMUs have a non-present cache? */ bool amd_iommu_np_cache __read_mostly; +bool amd_iommu_iotlb_sup __read_mostly = true; /* * The ACPI table parsing functions set this variable on an error @@ -180,6 +181,12 @@ static u32 dev_table_size; /* size of the device table */ static u32 alias_table_size; /* size of the alias table */ static u32 rlookup_table_size; /* size if the rlookup table */ +/* + * This function flushes all internal caches of + * the IOMMU used by this driver. + */ +extern void iommu_flush_all_caches(struct amd_iommu *iommu); + static inline void update_last_devid(u16 devid) { if (devid > amd_iommu_last_bdf) @@ -293,9 +300,23 @@ static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit) /* Function to enable the hardware */ static void iommu_enable(struct amd_iommu *iommu) { - printk(KERN_INFO "AMD-Vi: Enabling IOMMU at %s cap 0x%hx\n", + static const char * const feat_str[] = { + "PreF", "PPR", "X2APIC", "NX", "GT", "[5]", + "IA", "GA", "HE", "PC", NULL + }; + int i; + + printk(KERN_INFO "AMD-Vi: Enabling IOMMU at %s cap 0x%hx", dev_name(&iommu->dev->dev), iommu->cap_ptr); + if (iommu->cap & (1 << IOMMU_CAP_EFR)) { + printk(KERN_CONT " extended features: "); + for (i = 0; feat_str[i]; ++i) + if (iommu_feature(iommu, (1ULL << i))) + printk(KERN_CONT " %s", feat_str[i]); + } + printk(KERN_CONT "\n"); + iommu_feature_enable(iommu, CONTROL_IOMMU_EN); } @@ -651,7 +672,7 @@ static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m) static void __init init_iommu_from_pci(struct amd_iommu *iommu) { int cap_ptr = iommu->cap_ptr; - u32 range, misc; + u32 range, misc, low, high; int i, j; pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, @@ -667,6 +688,15 @@ static void __init init_iommu_from_pci(struct amd_iommu *iommu) MMIO_GET_LD(range)); iommu->evt_msi_num = MMIO_MSI_NUM(misc); + if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB))) + amd_iommu_iotlb_sup = false; + + /* read extended feature bits */ + low = readl(iommu->mmio_base + MMIO_EXT_FEATURES); + high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4); + + iommu->features = ((u64)high << 32) | low; + if (!is_rd890_iommu(iommu->dev)) return; @@ -1004,10 +1034,11 @@ static int iommu_setup_msi(struct amd_iommu *iommu) if (pci_enable_msi(iommu->dev)) return 1; - r = request_irq(iommu->dev->irq, amd_iommu_int_handler, - IRQF_SAMPLE_RANDOM, - "AMD-Vi", - NULL); + r = request_threaded_irq(iommu->dev->irq, + amd_iommu_int_handler, + amd_iommu_int_thread, + 0, "AMD-Vi", + iommu->dev); if (r) { pci_disable_msi(iommu->dev); @@ -1244,6 +1275,7 @@ static void enable_iommus(void) iommu_set_exclusion_range(iommu); iommu_init_msi(iommu); iommu_enable(iommu); + iommu_flush_all_caches(iommu); } } @@ -1274,8 +1306,8 @@ static void amd_iommu_resume(void) * we have to flush after the IOMMUs are enabled because a * disabled IOMMU will never execute the commands we send */ - amd_iommu_flush_all_devices(); - amd_iommu_flush_all_domains(); + for_each_iommu(iommu) + iommu_flush_all_caches(iommu); } static int amd_iommu_suspend(void) diff --git a/trunk/arch/x86/kernel/apb_timer.c b/trunk/arch/x86/kernel/apb_timer.c index cd1ffed4ee22..289e92862fd9 100644 --- a/trunk/arch/x86/kernel/apb_timer.c +++ b/trunk/arch/x86/kernel/apb_timer.c @@ -177,7 +177,6 @@ static struct clocksource clocksource_apbt = { .rating = APBT_CLOCKSOURCE_RATING, .read = apbt_read_clocksource, .mask = APBT_MASK, - .shift = APBT_SHIFT, .flags = CLOCK_SOURCE_IS_CONTINUOUS, .resume = apbt_restart_clocksource, }; @@ -543,14 +542,7 @@ static int apbt_clocksource_register(void) if (t1 == apbt_read_clocksource(&clocksource_apbt)) panic("APBT counter not counting. APBT disabled\n"); - /* - * initialize and register APBT clocksource - * convert that to ns/clock cycle - * mult = (ns/c) * 2^APBT_SHIFT - */ - clocksource_apbt.mult = div_sc(MSEC_PER_SEC, - (unsigned long) apbt_freq, APBT_SHIFT); - clocksource_register(&clocksource_apbt); + clocksource_register_khz(&clocksource_apbt, (u32)apbt_freq*1000); return 0; } diff --git a/trunk/arch/x86/kernel/aperture_64.c b/trunk/arch/x86/kernel/aperture_64.c index 86d1ad4962a7..3d2661ca6542 100644 --- a/trunk/arch/x86/kernel/aperture_64.c +++ b/trunk/arch/x86/kernel/aperture_64.c @@ -30,6 +30,22 @@ #include #include +/* + * Using 512M as goal, in case kexec will load kernel_big + * that will do the on-position decompress, and could overlap with + * with the gart aperture that is used. + * Sequence: + * kernel_small + * ==> kexec (with kdump trigger path or gart still enabled) + * ==> kernel_small (gart area become e820_reserved) + * ==> kexec (with kdump trigger path or gart still enabled) + * ==> kerne_big (uncompressed size will be big than 64M or 128M) + * So don't use 512M below as gart iommu, leave the space for kernel + * code for safe. + */ +#define GART_MIN_ADDR (512ULL << 20) +#define GART_MAX_ADDR (1ULL << 32) + int gart_iommu_aperture; int gart_iommu_aperture_disabled __initdata; int gart_iommu_aperture_allowed __initdata; @@ -70,21 +86,9 @@ static u32 __init allocate_aperture(void) * memory. Unfortunately we cannot move it up because that would * make the IOMMU useless. */ - /* - * using 512M as goal, in case kexec will load kernel_big - * that will do the on position decompress, and could overlap with - * that position with gart that is used. - * sequende: - * kernel_small - * ==> kexec (with kdump trigger path or previous doesn't shutdown gart) - * ==> kernel_small(gart area become e820_reserved) - * ==> kexec (with kdump trigger path or previous doesn't shutdown gart) - * ==> kerne_big (uncompressed size will be big than 64M or 128M) - * so don't use 512M below as gart iommu, leave the space for kernel - * code for safe - */ - addr = memblock_find_in_range(0, 1ULL<<32, aper_size, 512ULL<<20); - if (addr == MEMBLOCK_ERROR || addr + aper_size > 0xffffffff) { + addr = memblock_find_in_range(GART_MIN_ADDR, GART_MAX_ADDR, + aper_size, aper_size); + if (addr == MEMBLOCK_ERROR || addr + aper_size > GART_MAX_ADDR) { printk(KERN_ERR "Cannot allocate aperture memory hole (%lx,%uK)\n", addr, aper_size>>10); @@ -499,7 +503,7 @@ int __init gart_iommu_hole_init(void) * Don't enable translation yet but enable GART IO and CPU * accesses and set DISTLBWALKPRB since GART table memory is UC. */ - u32 ctl = DISTLBWALKPRB | aper_order << 1; + u32 ctl = aper_order << 1; bus = amd_nb_bus_dev_ranges[i].bus; dev_base = amd_nb_bus_dev_ranges[i].dev_base; diff --git a/trunk/arch/x86/kernel/apic/apic.c b/trunk/arch/x86/kernel/apic/apic.c index fabf01eff771..f92a8e5d1e21 100644 --- a/trunk/arch/x86/kernel/apic/apic.c +++ b/trunk/arch/x86/kernel/apic/apic.c @@ -505,7 +505,7 @@ static void __cpuinit setup_APIC_timer(void) { struct clock_event_device *levt = &__get_cpu_var(lapic_events); - if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_ARAT)) { + if (this_cpu_has(X86_FEATURE_ARAT)) { lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP; /* Make LAPIC timer preferrable over percpu HPET */ lapic_clockevent.rating = 150; @@ -1237,6 +1237,17 @@ void __cpuinit setup_local_APIC(void) /* always use the value from LDR */ early_per_cpu(x86_cpu_to_logical_apicid, cpu) = logical_smp_processor_id(); + + /* + * Some NUMA implementations (NUMAQ) don't initialize apicid to + * node mapping during NUMA init. Now that logical apicid is + * guaranteed to be known, give it another chance. This is already + * a bit too late - percpu allocation has already happened without + * proper NUMA affinity. + */ + if (apic->x86_32_numa_cpu_node) + set_apicid_to_node(early_per_cpu(x86_cpu_to_apicid, cpu), + apic->x86_32_numa_cpu_node(cpu)); #endif /* @@ -1812,30 +1823,41 @@ void smp_spurious_interrupt(struct pt_regs *regs) */ void smp_error_interrupt(struct pt_regs *regs) { - u32 v, v1; + u32 v0, v1; + u32 i = 0; + static const char * const error_interrupt_reason[] = { + "Send CS error", /* APIC Error Bit 0 */ + "Receive CS error", /* APIC Error Bit 1 */ + "Send accept error", /* APIC Error Bit 2 */ + "Receive accept error", /* APIC Error Bit 3 */ + "Redirectable IPI", /* APIC Error Bit 4 */ + "Send illegal vector", /* APIC Error Bit 5 */ + "Received illegal vector", /* APIC Error Bit 6 */ + "Illegal register address", /* APIC Error Bit 7 */ + }; exit_idle(); irq_enter(); /* First tickle the hardware, only then report what went on. -- REW */ - v = apic_read(APIC_ESR); + v0 = apic_read(APIC_ESR); apic_write(APIC_ESR, 0); v1 = apic_read(APIC_ESR); ack_APIC_irq(); atomic_inc(&irq_err_count); - /* - * Here is what the APIC error bits mean: - * 0: Send CS error - * 1: Receive CS error - * 2: Send accept error - * 3: Receive accept error - * 4: Reserved - * 5: Send illegal vector - * 6: Received illegal vector - * 7: Illegal register address - */ - pr_debug("APIC error on CPU%d: %02x(%02x)\n", - smp_processor_id(), v , v1); + apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)", + smp_processor_id(), v0 , v1); + + v1 = v1 & 0xff; + while (v1) { + if (v1 & 0x1) + apic_printk(APIC_DEBUG, KERN_CONT " : %s", error_interrupt_reason[i]); + i++; + v1 >>= 1; + }; + + apic_printk(APIC_DEBUG, KERN_CONT "\n"); + irq_exit(); } @@ -2003,21 +2025,6 @@ void default_init_apic_ldr(void) apic_write(APIC_LDR, val); } -#ifdef CONFIG_X86_32 -int default_x86_32_numa_cpu_node(int cpu) -{ -#ifdef CONFIG_NUMA - int apicid = early_per_cpu(x86_cpu_to_apicid, cpu); - - if (apicid != BAD_APICID) - return __apicid_to_node[apicid]; - return NUMA_NO_NODE; -#else - return 0; -#endif -} -#endif - /* * Power management */ diff --git a/trunk/arch/x86/kernel/apic/apic_noop.c b/trunk/arch/x86/kernel/apic/apic_noop.c index f1baa2dc087a..775b82bc655c 100644 --- a/trunk/arch/x86/kernel/apic/apic_noop.c +++ b/trunk/arch/x86/kernel/apic/apic_noop.c @@ -119,14 +119,6 @@ static void noop_apic_write(u32 reg, u32 v) WARN_ON_ONCE(cpu_has_apic && !disable_apic); } -#ifdef CONFIG_X86_32 -static int noop_x86_32_numa_cpu_node(int cpu) -{ - /* we're always on node 0 */ - return 0; -} -#endif - struct apic apic_noop = { .name = "noop", .probe = noop_probe, @@ -195,6 +187,5 @@ struct apic apic_noop = { #ifdef CONFIG_X86_32 .x86_32_early_logical_apicid = noop_x86_32_early_logical_apicid, - .x86_32_numa_cpu_node = noop_x86_32_numa_cpu_node, #endif }; diff --git a/trunk/arch/x86/kernel/apic/bigsmp_32.c b/trunk/arch/x86/kernel/apic/bigsmp_32.c index 541a2e431659..d84ac5a584b5 100644 --- a/trunk/arch/x86/kernel/apic/bigsmp_32.c +++ b/trunk/arch/x86/kernel/apic/bigsmp_32.c @@ -253,5 +253,4 @@ struct apic apic_bigsmp = { .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, .x86_32_early_logical_apicid = bigsmp_early_logical_apicid, - .x86_32_numa_cpu_node = default_x86_32_numa_cpu_node, }; diff --git a/trunk/arch/x86/kernel/apic/es7000_32.c b/trunk/arch/x86/kernel/apic/es7000_32.c index 3e9de4854c5b..70533de5bd29 100644 --- a/trunk/arch/x86/kernel/apic/es7000_32.c +++ b/trunk/arch/x86/kernel/apic/es7000_32.c @@ -510,11 +510,6 @@ static void es7000_setup_apic_routing(void) nr_ioapics, cpumask_bits(es7000_target_cpus())[0]); } -static int es7000_numa_cpu_node(int cpu) -{ - return 0; -} - static int es7000_cpu_present_to_apicid(int mps_cpu) { if (!mps_cpu) @@ -688,7 +683,6 @@ struct apic __refdata apic_es7000_cluster = { .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, .x86_32_early_logical_apicid = es7000_early_logical_apicid, - .x86_32_numa_cpu_node = es7000_numa_cpu_node, }; struct apic __refdata apic_es7000 = { @@ -752,5 +746,4 @@ struct apic __refdata apic_es7000 = { .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, .x86_32_early_logical_apicid = es7000_early_logical_apicid, - .x86_32_numa_cpu_node = es7000_numa_cpu_node, }; diff --git a/trunk/arch/x86/kernel/apic/io_apic.c b/trunk/arch/x86/kernel/apic/io_apic.c index 68df09bba92e..45fd33d1fd3a 100644 --- a/trunk/arch/x86/kernel/apic/io_apic.c +++ b/trunk/arch/x86/kernel/apic/io_apic.c @@ -128,8 +128,8 @@ static int __init parse_noapic(char *str) } early_param("noapic", parse_noapic); -static int io_apic_setup_irq_pin_once(unsigned int irq, int node, - struct io_apic_irq_attr *attr); +static int io_apic_setup_irq_pin(unsigned int irq, int node, + struct io_apic_irq_attr *attr); /* Will be called in mpparse/acpi/sfi codes for saving IRQ info */ void mp_save_irq(struct mpc_intsrc *m) @@ -3570,7 +3570,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) } #endif /* CONFIG_HT_IRQ */ -int +static int io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr) { struct irq_cfg *cfg = alloc_irq_and_cfg_at(irq, node); @@ -3585,8 +3585,8 @@ io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr) return ret; } -static int io_apic_setup_irq_pin_once(unsigned int irq, int node, - struct io_apic_irq_attr *attr) +int io_apic_setup_irq_pin_once(unsigned int irq, int node, + struct io_apic_irq_attr *attr) { unsigned int id = attr->ioapic, pin = attr->ioapic_pin; int ret; diff --git a/trunk/arch/x86/kernel/apic/numaq_32.c b/trunk/arch/x86/kernel/apic/numaq_32.c index 6273eee5134b..30f13319e24b 100644 --- a/trunk/arch/x86/kernel/apic/numaq_32.c +++ b/trunk/arch/x86/kernel/apic/numaq_32.c @@ -48,8 +48,6 @@ #include #include -#define MB_TO_PAGES(addr) ((addr) << (20 - PAGE_SHIFT)) - int found_numaq; /* @@ -79,31 +77,20 @@ int quad_local_to_mp_bus_id[NR_CPUS/4][4]; static inline void numaq_register_node(int node, struct sys_cfg_data *scd) { struct eachquadmem *eq = scd->eq + node; + u64 start = (u64)(eq->hi_shrd_mem_start - eq->priv_mem_size) << 20; + u64 end = (u64)(eq->hi_shrd_mem_start + eq->hi_shrd_mem_size) << 20; + int ret; - node_set_online(node); - - /* Convert to pages */ - node_start_pfn[node] = - MB_TO_PAGES(eq->hi_shrd_mem_start - eq->priv_mem_size); - - node_end_pfn[node] = - MB_TO_PAGES(eq->hi_shrd_mem_start + eq->hi_shrd_mem_size); - - memblock_x86_register_active_regions(node, node_start_pfn[node], - node_end_pfn[node]); - - memory_present(node, node_start_pfn[node], node_end_pfn[node]); - - node_remap_size[node] = node_memmap_size_bytes(node, - node_start_pfn[node], - node_end_pfn[node]); + node_set(node, numa_nodes_parsed); + ret = numa_add_memblk(node, start, end); + BUG_ON(ret < 0); } /* * Function: smp_dump_qct() * * Description: gets memory layout from the quad config table. This - * function also updates node_online_map with the nodes (quads) present. + * function also updates numa_nodes_parsed with the nodes (quads) present. */ static void __init smp_dump_qct(void) { @@ -112,7 +99,6 @@ static void __init smp_dump_qct(void) scd = (void *)__va(SYS_CFG_DATA_PRIV_ADDR); - nodes_clear(node_online_map); for_each_node(node) { if (scd->quads_present31_0 & (1 << node)) numaq_register_node(node, scd); @@ -282,14 +268,14 @@ static __init void early_check_numaq(void) } } -int __init get_memcfg_numaq(void) +int __init numaq_numa_init(void) { early_check_numaq(); if (!found_numaq) - return 0; + return -ENOENT; smp_dump_qct(); - return 1; + return 0; } #define NUMAQ_APIC_DFR_VALUE (APIC_DFR_CLUSTER) diff --git a/trunk/arch/x86/kernel/apic/probe_32.c b/trunk/arch/x86/kernel/apic/probe_32.c index fc84c7b61108..6541e471fd91 100644 --- a/trunk/arch/x86/kernel/apic/probe_32.c +++ b/trunk/arch/x86/kernel/apic/probe_32.c @@ -172,7 +172,6 @@ struct apic apic_default = { .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, .x86_32_early_logical_apicid = default_x86_32_early_logical_apicid, - .x86_32_numa_cpu_node = default_x86_32_numa_cpu_node, }; extern struct apic apic_numaq; diff --git a/trunk/arch/x86/kernel/apic/summit_32.c b/trunk/arch/x86/kernel/apic/summit_32.c index e4b8059b414a..35bcd7d995a1 100644 --- a/trunk/arch/x86/kernel/apic/summit_32.c +++ b/trunk/arch/x86/kernel/apic/summit_32.c @@ -551,5 +551,4 @@ struct apic apic_summit = { .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, .x86_32_early_logical_apicid = summit_early_logical_apicid, - .x86_32_numa_cpu_node = default_x86_32_numa_cpu_node, }; diff --git a/trunk/arch/x86/kernel/apic/x2apic_uv_x.c b/trunk/arch/x86/kernel/apic/x2apic_uv_x.c index 33b10a0fc095..7acd2d2ac965 100644 --- a/trunk/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/trunk/arch/x86/kernel/apic/x2apic_uv_x.c @@ -37,6 +37,13 @@ #include #include #include +#include + +/* BMC sets a bit this MMR non-zero before sending an NMI */ +#define UVH_NMI_MMR UVH_SCRATCH5 +#define UVH_NMI_MMR_CLEAR (UVH_NMI_MMR + 8) +#define UV_NMI_PENDING_MASK (1UL << 63) +DEFINE_PER_CPU(unsigned long, cpu_last_nmi_count); DEFINE_PER_CPU(int, x2apic_extra_bits); @@ -642,18 +649,46 @@ void __cpuinit uv_cpu_init(void) */ int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data) { + unsigned long real_uv_nmi; + int bid; + if (reason != DIE_NMIUNKNOWN) return NOTIFY_OK; if (in_crash_kexec) /* do nothing if entering the crash kernel */ return NOTIFY_OK; + /* - * Use a lock so only one cpu prints at a time - * to prevent intermixed output. + * Each blade has an MMR that indicates when an NMI has been sent + * to cpus on the blade. If an NMI is detected, atomically + * clear the MMR and update a per-blade NMI count used to + * cause each cpu on the blade to notice a new NMI. + */ + bid = uv_numa_blade_id(); + real_uv_nmi = (uv_read_local_mmr(UVH_NMI_MMR) & UV_NMI_PENDING_MASK); + + if (unlikely(real_uv_nmi)) { + spin_lock(&uv_blade_info[bid].nmi_lock); + real_uv_nmi = (uv_read_local_mmr(UVH_NMI_MMR) & UV_NMI_PENDING_MASK); + if (real_uv_nmi) { + uv_blade_info[bid].nmi_count++; + uv_write_local_mmr(UVH_NMI_MMR_CLEAR, UV_NMI_PENDING_MASK); + } + spin_unlock(&uv_blade_info[bid].nmi_lock); + } + + if (likely(__get_cpu_var(cpu_last_nmi_count) == uv_blade_info[bid].nmi_count)) + return NOTIFY_DONE; + + __get_cpu_var(cpu_last_nmi_count) = uv_blade_info[bid].nmi_count; + + /* + * Use a lock so only one cpu prints at a time. + * This prevents intermixed output. */ spin_lock(&uv_nmi_lock); - pr_info("NMI stack dump cpu %u:\n", smp_processor_id()); + pr_info("UV NMI stack dump cpu %u:\n", smp_processor_id()); dump_stack(); spin_unlock(&uv_nmi_lock); @@ -661,7 +696,8 @@ int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data) } static struct notifier_block uv_dump_stack_nmi_nb = { - .notifier_call = uv_handle_nmi + .notifier_call = uv_handle_nmi, + .priority = NMI_LOCAL_LOW_PRIOR - 1, }; void uv_register_nmi_notifier(void) @@ -720,8 +756,9 @@ void __init uv_system_init(void) printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades()); bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades(); - uv_blade_info = kmalloc(bytes, GFP_KERNEL); + uv_blade_info = kzalloc(bytes, GFP_KERNEL); BUG_ON(!uv_blade_info); + for (blade = 0; blade < uv_num_possible_blades(); blade++) uv_blade_info[blade].memory_nid = -1; @@ -747,6 +784,7 @@ void __init uv_system_init(void) uv_blade_info[blade].pnode = pnode; uv_blade_info[blade].nr_possible_cpus = 0; uv_blade_info[blade].nr_online_cpus = 0; + spin_lock_init(&uv_blade_info[blade].nmi_lock); max_pnode = max(pnode, max_pnode); blade++; } diff --git a/trunk/arch/x86/kernel/apm_32.c b/trunk/arch/x86/kernel/apm_32.c index 0b4be431c620..3bfa02235965 100644 --- a/trunk/arch/x86/kernel/apm_32.c +++ b/trunk/arch/x86/kernel/apm_32.c @@ -228,6 +228,7 @@ #include #include #include +#include #include #include @@ -1237,7 +1238,7 @@ static int suspend(int vetoable) dpm_suspend_noirq(PMSG_SUSPEND); local_irq_disable(); - sysdev_suspend(PMSG_SUSPEND); + syscore_suspend(); local_irq_enable(); @@ -1255,7 +1256,7 @@ static int suspend(int vetoable) apm_error("suspend", err); err = (err == APM_SUCCESS) ? 0 : -EIO; - sysdev_resume(); + syscore_resume(); local_irq_enable(); dpm_resume_noirq(PMSG_RESUME); @@ -1279,7 +1280,7 @@ static void standby(void) dpm_suspend_noirq(PMSG_SUSPEND); local_irq_disable(); - sysdev_suspend(PMSG_SUSPEND); + syscore_suspend(); local_irq_enable(); err = set_system_power_state(APM_STATE_STANDBY); @@ -1287,7 +1288,7 @@ static void standby(void) apm_error("standby", err); local_irq_disable(); - sysdev_resume(); + syscore_resume(); local_irq_enable(); dpm_resume_noirq(PMSG_RESUME); diff --git a/trunk/arch/x86/kernel/cpu/Makefile b/trunk/arch/x86/kernel/cpu/Makefile index 3f0ebe429a01..6042981d0309 100644 --- a/trunk/arch/x86/kernel/cpu/Makefile +++ b/trunk/arch/x86/kernel/cpu/Makefile @@ -30,7 +30,6 @@ obj-$(CONFIG_PERF_EVENTS) += perf_event.o obj-$(CONFIG_X86_MCE) += mcheck/ obj-$(CONFIG_MTRR) += mtrr/ -obj-$(CONFIG_CPU_FREQ) += cpufreq/ obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o diff --git a/trunk/arch/x86/kernel/cpu/amd.c b/trunk/arch/x86/kernel/cpu/amd.c index 3ecece0217ef..6f9d1f6063e9 100644 --- a/trunk/arch/x86/kernel/cpu/amd.c +++ b/trunk/arch/x86/kernel/cpu/amd.c @@ -613,8 +613,27 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) #endif /* As a rule processors have APIC timer running in deep C states */ - if (c->x86 >= 0xf && !cpu_has_amd_erratum(amd_erratum_400)) + if (c->x86 > 0xf && !cpu_has_amd_erratum(amd_erratum_400)) set_cpu_cap(c, X86_FEATURE_ARAT); + + /* + * Disable GART TLB Walk Errors on Fam10h. We do this here + * because this is always needed when GART is enabled, even in a + * kernel which has no MCE support built in. + */ + if (c->x86 == 0x10) { + /* + * BIOS should disable GartTlbWlk Errors themself. If + * it doesn't do it here as suggested by the BKDG. + * + * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012 + */ + u64 mask; + + rdmsrl(MSR_AMD64_MCx_MASK(4), mask); + mask |= (1 << 10); + wrmsrl(MSR_AMD64_MCx_MASK(4), mask); + } } #ifdef CONFIG_X86_32 diff --git a/trunk/arch/x86/kernel/cpu/common.c b/trunk/arch/x86/kernel/cpu/common.c index e2ced0074a45..cbc70a27430c 100644 --- a/trunk/arch/x86/kernel/cpu/common.c +++ b/trunk/arch/x86/kernel/cpu/common.c @@ -254,6 +254,25 @@ static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) } #endif +static int disable_smep __initdata; +static __init int setup_disable_smep(char *arg) +{ + disable_smep = 1; + return 1; +} +__setup("nosmep", setup_disable_smep); + +static __init void setup_smep(struct cpuinfo_x86 *c) +{ + if (cpu_has(c, X86_FEATURE_SMEP)) { + if (unlikely(disable_smep)) { + setup_clear_cpu_cap(X86_FEATURE_SMEP); + clear_in_cr4(X86_CR4_SMEP); + } else + set_in_cr4(X86_CR4_SMEP); + } +} + /* * Some CPU features depend on higher CPUID levels, which may not always * be available due to CPUID level capping or broken virtualization @@ -565,8 +584,7 @@ void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); - if (eax > 0) - c->x86_capability[9] = ebx; + c->x86_capability[9] = ebx; } /* AMD-defined flags: level 0x80000001 */ @@ -668,6 +686,8 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) c->cpu_index = 0; #endif filter_cpuid_features(c, false); + + setup_smep(c); } void __init early_cpu_init(void) @@ -753,6 +773,8 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c) #endif } + setup_smep(c); + get_model_name(c); /* Default name */ detect_nopl(c); diff --git a/trunk/arch/x86/kernel/cpu/cpufreq/Makefile b/trunk/arch/x86/kernel/cpu/cpufreq/Makefile deleted file mode 100644 index bd54bf67e6fb..000000000000 --- a/trunk/arch/x86/kernel/cpu/cpufreq/Makefile +++ /dev/null @@ -1,21 +0,0 @@ -# Link order matters. K8 is preferred to ACPI because of firmware bugs in early -# K8 systems. ACPI is preferred to all other hardware-specific drivers. -# speedstep-* is preferred over p4-clockmod. - -obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o mperf.o -obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o mperf.o -obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o -obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o -obj-$(CONFIG_X86_POWERNOW_K7) += powernow-k7.o -obj-$(CONFIG_X86_LONGHAUL) += longhaul.o -obj-$(CONFIG_X86_E_POWERSAVER) += e_powersaver.o -obj-$(CONFIG_ELAN_CPUFREQ) += elanfreq.o -obj-$(CONFIG_SC520_CPUFREQ) += sc520_freq.o -obj-$(CONFIG_X86_LONGRUN) += longrun.o -obj-$(CONFIG_X86_GX_SUSPMOD) += gx-suspmod.o -obj-$(CONFIG_X86_SPEEDSTEP_ICH) += speedstep-ich.o -obj-$(CONFIG_X86_SPEEDSTEP_LIB) += speedstep-lib.o -obj-$(CONFIG_X86_SPEEDSTEP_SMI) += speedstep-smi.o -obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o -obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o -obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o diff --git a/trunk/arch/x86/kernel/cpu/intel.c b/trunk/arch/x86/kernel/cpu/intel.c index df86bc8c859d..1edf5ba4fb2b 100644 --- a/trunk/arch/x86/kernel/cpu/intel.c +++ b/trunk/arch/x86/kernel/cpu/intel.c @@ -29,10 +29,10 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) { + u64 misc_enable; + /* Unmask CPUID levels if masked: */ if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { - u64 misc_enable; - rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); if (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) { @@ -118,8 +118,6 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) * (model 2) with the same problem. */ if (c->x86 == 15) { - u64 misc_enable; - rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) { @@ -130,6 +128,19 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) } } #endif + + /* + * If fast string is not enabled in IA32_MISC_ENABLE for any reason, + * clear the fast string and enhanced fast string CPU capabilities. + */ + if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { + rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); + if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) { + printk(KERN_INFO "Disabled fast string operations\n"); + setup_clear_cpu_cap(X86_FEATURE_REP_GOOD); + setup_clear_cpu_cap(X86_FEATURE_ERMS); + } + } } #ifdef CONFIG_X86_32 @@ -400,12 +411,10 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) switch (c->x86_model) { case 5: - if (c->x86_mask == 0) { - if (l2 == 0) - p = "Celeron (Covington)"; - else if (l2 == 256) - p = "Mobile Pentium II (Dixon)"; - } + if (l2 == 0) + p = "Celeron (Covington)"; + else if (l2 == 256) + p = "Mobile Pentium II (Dixon)"; break; case 6: diff --git a/trunk/arch/x86/kernel/cpu/intel_cacheinfo.c b/trunk/arch/x86/kernel/cpu/intel_cacheinfo.c index 1ce1af2899df..c105c533ed94 100644 --- a/trunk/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/trunk/arch/x86/kernel/cpu/intel_cacheinfo.c @@ -327,7 +327,6 @@ static void __cpuinit amd_calc_l3_indices(struct amd_l3_cache *l3) l3->subcaches[2] = sc2 = !(val & BIT(8)) + !(val & BIT(9)); l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13)); - l3->indices = (max(max(max(sc0, sc1), sc2), sc3) << 10) - 1; l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1; } @@ -454,27 +453,16 @@ int amd_set_l3_disable_slot(struct amd_l3_cache *l3, int cpu, unsigned slot, { int ret = 0; -#define SUBCACHE_MASK (3UL << 20) -#define SUBCACHE_INDEX 0xfff - - /* - * check whether this slot is already used or - * the index is already disabled - */ + /* check if @slot is already used or the index is already disabled */ ret = amd_get_l3_disable_slot(l3, slot); if (ret >= 0) return -EINVAL; - /* - * check whether the other slot has disabled the - * same index already - */ - if (index == amd_get_l3_disable_slot(l3, !slot)) + if (index > l3->indices) return -EINVAL; - /* do not allow writes outside of allowed bits */ - if ((index & ~(SUBCACHE_MASK | SUBCACHE_INDEX)) || - ((index & SUBCACHE_INDEX) > l3->indices)) + /* check whether the other slot has disabled the same index already */ + if (index == amd_get_l3_disable_slot(l3, !slot)) return -EINVAL; amd_l3_disable_index(l3, cpu, slot, index); diff --git a/trunk/arch/x86/kernel/cpu/mcheck/mce.c b/trunk/arch/x86/kernel/cpu/mcheck/mce.c index 3385ea26f684..ff1ae9b6464d 100644 --- a/trunk/arch/x86/kernel/cpu/mcheck/mce.c +++ b/trunk/arch/x86/kernel/cpu/mcheck/mce.c @@ -105,20 +105,6 @@ static int cpu_missing; ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain); EXPORT_SYMBOL_GPL(x86_mce_decoder_chain); -static int default_decode_mce(struct notifier_block *nb, unsigned long val, - void *data) -{ - pr_emerg(HW_ERR "No human readable MCE decoding support on this CPU type.\n"); - pr_emerg(HW_ERR "Run the message through 'mcelog --ascii' to decode.\n"); - - return NOTIFY_STOP; -} - -static struct notifier_block mce_dec_nb = { - .notifier_call = default_decode_mce, - .priority = -1, -}; - /* MCA banks polled by the period polling timer for corrected events */ DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = { [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL @@ -212,6 +198,8 @@ void mce_log(struct mce *mce) static void print_mce(struct mce *m) { + int ret = 0; + pr_emerg(HW_ERR "CPU %d: Machine Check Exception: %Lx Bank %d: %016Lx\n", m->extcpu, m->mcgstatus, m->bank, m->status); @@ -239,7 +227,11 @@ static void print_mce(struct mce *m) * Print out human-readable details about the MCE error, * (if the CPU has an implementation for that) */ - atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m); + ret = atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m); + if (ret == NOTIFY_STOP) + return; + + pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n"); } #define PANIC_TIMEOUT 5 /* 5 seconds */ @@ -590,7 +582,6 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b) if (!(flags & MCP_DONTLOG) && !mce_dont_log_ce) { mce_log(&m); atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, &m); - add_taint(TAINT_MACHINE_CHECK); } /* @@ -1722,8 +1713,6 @@ __setup("mce", mcheck_enable); int __init mcheck_init(void) { - atomic_notifier_chain_register(&x86_mce_decoder_chain, &mce_dec_nb); - mcheck_intel_therm_init(); return 0; diff --git a/trunk/arch/x86/kernel/cpu/mcheck/mce_amd.c b/trunk/arch/x86/kernel/cpu/mcheck/mce_amd.c index 167f97b5596e..bb0adad35143 100644 --- a/trunk/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/trunk/arch/x86/kernel/cpu/mcheck/mce_amd.c @@ -509,6 +509,7 @@ static __cpuinit int allocate_threshold_blocks(unsigned int cpu, out_free: if (b) { kobject_put(&b->kobj); + list_del(&b->miscj); kfree(b); } return err; diff --git a/trunk/arch/x86/kernel/cpu/mcheck/therm_throt.c b/trunk/arch/x86/kernel/cpu/mcheck/therm_throt.c index 6f8c5e9da97f..27c625178bf1 100644 --- a/trunk/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/trunk/arch/x86/kernel/cpu/mcheck/therm_throt.c @@ -187,8 +187,6 @@ static int therm_throt_process(bool new_event, int event, int level) this_cpu, level == CORE_LEVEL ? "Core" : "Package", state->count); - - add_taint(TAINT_MACHINE_CHECK); return 1; } if (old_event) { @@ -355,7 +353,6 @@ static void notify_thresholds(__u64 msr_val) static void intel_thermal_interrupt(void) { __u64 msr_val; - struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); rdmsrl(MSR_IA32_THERM_STATUS, msr_val); @@ -367,19 +364,19 @@ static void intel_thermal_interrupt(void) CORE_LEVEL) != 0) mce_log_therm_throt_event(CORE_THROTTLED | msr_val); - if (cpu_has(c, X86_FEATURE_PLN)) + if (this_cpu_has(X86_FEATURE_PLN)) if (therm_throt_process(msr_val & THERM_STATUS_POWER_LIMIT, POWER_LIMIT_EVENT, CORE_LEVEL) != 0) mce_log_therm_throt_event(CORE_POWER_LIMIT | msr_val); - if (cpu_has(c, X86_FEATURE_PTS)) { + if (this_cpu_has(X86_FEATURE_PTS)) { rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val); if (therm_throt_process(msr_val & PACKAGE_THERM_STATUS_PROCHOT, THERMAL_THROTTLING_EVENT, PACKAGE_LEVEL) != 0) mce_log_therm_throt_event(PACKAGE_THROTTLED | msr_val); - if (cpu_has(c, X86_FEATURE_PLN)) + if (this_cpu_has(X86_FEATURE_PLN)) if (therm_throt_process(msr_val & PACKAGE_THERM_STATUS_POWER_LIMIT, POWER_LIMIT_EVENT, @@ -393,7 +390,6 @@ static void unexpected_thermal_interrupt(void) { printk(KERN_ERR "CPU%d: Unexpected LVT thermal interrupt!\n", smp_processor_id()); - add_taint(TAINT_MACHINE_CHECK); } static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt; @@ -446,18 +442,20 @@ void intel_init_thermal(struct cpuinfo_x86 *c) */ rdmsr(MSR_IA32_MISC_ENABLE, l, h); + h = lvtthmr_init; /* * The initial value of thermal LVT entries on all APs always reads * 0x10000 because APs are woken up by BSP issuing INIT-SIPI-SIPI * sequence to them and LVT registers are reset to 0s except for * the mask bits which are set to 1s when APs receive INIT IPI. - * Always restore the value that BIOS has programmed on AP based on - * BSP's info we saved since BIOS is always setting the same value - * for all threads/cores + * If BIOS takes over the thermal interrupt and sets its interrupt + * delivery mode to SMI (not fixed), it restores the value that the + * BIOS has programmed on AP based on BSP's info we saved since BIOS + * is always setting the same value for all threads/cores. */ - apic_write(APIC_LVTTHMR, lvtthmr_init); + if ((h & APIC_DM_FIXED_MASK) != APIC_DM_FIXED) + apic_write(APIC_LVTTHMR, lvtthmr_init); - h = lvtthmr_init; if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) { printk(KERN_DEBUG diff --git a/trunk/arch/x86/kernel/cpu/perf_event.c b/trunk/arch/x86/kernel/cpu/perf_event.c index eed3673a8656..3a0338b4b179 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event.c +++ b/trunk/arch/x86/kernel/cpu/perf_event.c @@ -31,6 +31,7 @@ #include #include #include +#include #if 0 #undef wrmsrl @@ -363,12 +364,18 @@ x86_perf_event_update(struct perf_event *event) return new_raw_count; } -/* using X86_FEATURE_PERFCTR_CORE to later implement ALTERNATIVE() here */ static inline int x86_pmu_addr_offset(int index) { - if (boot_cpu_has(X86_FEATURE_PERFCTR_CORE)) - return index << 1; - return index; + int offset; + + /* offset = X86_FEATURE_PERFCTR_CORE ? index << 1 : index */ + alternative_io(ASM_NOP2, + "shll $1, %%eax", + X86_FEATURE_PERFCTR_CORE, + "=a" (offset), + "a" (index)); + + return offset; } static inline unsigned int x86_pmu_config_addr(int index) @@ -586,8 +593,12 @@ static int x86_setup_perfctr(struct perf_event *event) return -EOPNOTSUPP; } + /* + * Do not allow config1 (extended registers) to propagate, + * there's no sane user-space generalization yet: + */ if (attr->type == PERF_TYPE_RAW) - return x86_pmu_extra_regs(event->attr.config, event); + return 0; if (attr->type == PERF_TYPE_HW_CACHE) return set_ext_hw_attr(hwc, event); @@ -609,8 +620,8 @@ static int x86_setup_perfctr(struct perf_event *event) /* * Branch tracing: */ - if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) && - (hwc->sample_period == 1)) { + if (attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS && + !attr->freq && hwc->sample_period == 1) { /* BTS is not supported by this architecture. */ if (!x86_pmu.bts_active) return -EOPNOTSUPP; @@ -1284,6 +1295,16 @@ static int x86_pmu_handle_irq(struct pt_regs *regs) cpuc = &__get_cpu_var(cpu_hw_events); + /* + * Some chipsets need to unmask the LVTPC in a particular spot + * inside the nmi handler. As a result, the unmasking was pushed + * into all the nmi handlers. + * + * This generic handler doesn't seem to have any issues where the + * unmasking occurs so it was left at the top. + */ + apic_write(APIC_LVTPC, APIC_DM_NMI); + for (idx = 0; idx < x86_pmu.num_counters; idx++) { if (!test_bit(idx, cpuc->active_mask)) { /* @@ -1370,8 +1391,6 @@ perf_event_nmi_handler(struct notifier_block *self, return NOTIFY_DONE; } - apic_write(APIC_LVTPC, APIC_DM_NMI); - handled = x86_pmu.handle_irq(args->regs); if (!handled) return NOTIFY_DONE; @@ -1754,17 +1773,6 @@ static struct pmu pmu = { * callchain support */ -static void -backtrace_warning_symbol(void *data, char *msg, unsigned long symbol) -{ - /* Ignore warnings */ -} - -static void backtrace_warning(void *data, char *msg) -{ - /* Ignore warnings */ -} - static int backtrace_stack(void *data, char *name) { return 0; @@ -1778,8 +1786,6 @@ static void backtrace_address(void *data, unsigned long addr, int reliable) } static const struct stacktrace_ops backtrace_ops = { - .warning = backtrace_warning, - .warning_symbol = backtrace_warning_symbol, .stack = backtrace_stack, .address = backtrace_address, .walk_stack = print_context_stack_bp, diff --git a/trunk/arch/x86/kernel/cpu/perf_event_amd.c b/trunk/arch/x86/kernel/cpu/perf_event_amd.c index 461f62bbd774..fe29c1d2219e 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event_amd.c +++ b/trunk/arch/x86/kernel/cpu/perf_event_amd.c @@ -8,7 +8,7 @@ static __initconst const u64 amd_hw_cache_event_ids [ C(L1D) ] = { [ C(OP_READ) ] = { [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */ - [ C(RESULT_MISS) ] = 0x0041, /* Data Cache Misses */ + [ C(RESULT_MISS) ] = 0x0141, /* Data Cache Misses */ }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */ @@ -96,12 +96,14 @@ static __initconst const u64 amd_hw_cache_event_ids */ static const u64 amd_perfmon_event_map[] = { - [PERF_COUNT_HW_CPU_CYCLES] = 0x0076, - [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, - [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080, - [PERF_COUNT_HW_CACHE_MISSES] = 0x0081, - [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2, - [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3, + [PERF_COUNT_HW_CPU_CYCLES] = 0x0076, + [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, + [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080, + [PERF_COUNT_HW_CACHE_MISSES] = 0x0081, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2, + [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3, + [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */ + [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */ }; static u64 amd_pmu_event_map(int hw_event) @@ -427,7 +429,9 @@ static __initconst const struct x86_pmu amd_pmu = { * * Exceptions: * + * 0x000 FP PERF_CTL[3], PERF_CTL[5:3] (*) * 0x003 FP PERF_CTL[3] + * 0x004 FP PERF_CTL[3], PERF_CTL[5:3] (*) * 0x00B FP PERF_CTL[3] * 0x00D FP PERF_CTL[3] * 0x023 DE PERF_CTL[2:0] @@ -448,6 +452,8 @@ static __initconst const struct x86_pmu amd_pmu = { * 0x0DF LS PERF_CTL[5:0] * 0x1D6 EX PERF_CTL[5:0] * 0x1D8 EX PERF_CTL[5:0] + * + * (*) depending on the umask all FPU counters may be used */ static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0); @@ -460,18 +466,28 @@ static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0); static struct event_constraint * amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event) { - unsigned int event_code = amd_get_event_code(&event->hw); + struct hw_perf_event *hwc = &event->hw; + unsigned int event_code = amd_get_event_code(hwc); switch (event_code & AMD_EVENT_TYPE_MASK) { case AMD_EVENT_FP: switch (event_code) { + case 0x000: + if (!(hwc->config & 0x0000F000ULL)) + break; + if (!(hwc->config & 0x00000F00ULL)) + break; + return &amd_f15_PMC3; + case 0x004: + if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1) + break; + return &amd_f15_PMC3; case 0x003: case 0x00B: case 0x00D: return &amd_f15_PMC3; - default: - return &amd_f15_PMC53; } + return &amd_f15_PMC53; case AMD_EVENT_LS: case AMD_EVENT_DC: case AMD_EVENT_EX_LS: diff --git a/trunk/arch/x86/kernel/cpu/perf_event_intel.c b/trunk/arch/x86/kernel/cpu/perf_event_intel.c index 8fc2b2cee1da..41178c826c48 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event_intel.c +++ b/trunk/arch/x86/kernel/cpu/perf_event_intel.c @@ -25,7 +25,7 @@ struct intel_percore { /* * Intel PerfMon, used on Core and later. */ -static const u64 intel_perfmon_event_map[] = +static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly = { [PERF_COUNT_HW_CPU_CYCLES] = 0x003c, [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, @@ -36,7 +36,7 @@ static const u64 intel_perfmon_event_map[] = [PERF_COUNT_HW_BUS_CYCLES] = 0x013c, }; -static struct event_constraint intel_core_event_constraints[] = +static struct event_constraint intel_core_event_constraints[] __read_mostly = { INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */ INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ @@ -47,7 +47,7 @@ static struct event_constraint intel_core_event_constraints[] = EVENT_CONSTRAINT_END }; -static struct event_constraint intel_core2_event_constraints[] = +static struct event_constraint intel_core2_event_constraints[] __read_mostly = { FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ @@ -70,7 +70,7 @@ static struct event_constraint intel_core2_event_constraints[] = EVENT_CONSTRAINT_END }; -static struct event_constraint intel_nehalem_event_constraints[] = +static struct event_constraint intel_nehalem_event_constraints[] __read_mostly = { FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ @@ -86,19 +86,19 @@ static struct event_constraint intel_nehalem_event_constraints[] = EVENT_CONSTRAINT_END }; -static struct extra_reg intel_nehalem_extra_regs[] = +static struct extra_reg intel_nehalem_extra_regs[] __read_mostly = { INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff), EVENT_EXTRA_END }; -static struct event_constraint intel_nehalem_percore_constraints[] = +static struct event_constraint intel_nehalem_percore_constraints[] __read_mostly = { INTEL_EVENT_CONSTRAINT(0xb7, 0), EVENT_CONSTRAINT_END }; -static struct event_constraint intel_westmere_event_constraints[] = +static struct event_constraint intel_westmere_event_constraints[] __read_mostly = { FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ @@ -110,7 +110,7 @@ static struct event_constraint intel_westmere_event_constraints[] = EVENT_CONSTRAINT_END }; -static struct event_constraint intel_snb_event_constraints[] = +static struct event_constraint intel_snb_event_constraints[] __read_mostly = { FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ @@ -123,21 +123,21 @@ static struct event_constraint intel_snb_event_constraints[] = EVENT_CONSTRAINT_END }; -static struct extra_reg intel_westmere_extra_regs[] = +static struct extra_reg intel_westmere_extra_regs[] __read_mostly = { INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff), INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0xffff), EVENT_EXTRA_END }; -static struct event_constraint intel_westmere_percore_constraints[] = +static struct event_constraint intel_westmere_percore_constraints[] __read_mostly = { INTEL_EVENT_CONSTRAINT(0xb7, 0), INTEL_EVENT_CONSTRAINT(0xbb, 0), EVENT_CONSTRAINT_END }; -static struct event_constraint intel_gen_event_constraints[] = +static struct event_constraint intel_gen_event_constraints[] __read_mostly = { FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ @@ -184,26 +184,23 @@ static __initconst const u64 snb_hw_cache_event_ids }, }, [ C(LL ) ] = { - /* - * TBD: Need Off-core Response Performance Monitoring support - */ [ C(OP_READ) ] = { - /* OFFCORE_RESPONSE_0.ANY_DATA.LOCAL_CACHE */ + /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */ [ C(RESULT_ACCESS) ] = 0x01b7, - /* OFFCORE_RESPONSE_1.ANY_DATA.ANY_LLC_MISS */ - [ C(RESULT_MISS) ] = 0x01bb, + /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */ + [ C(RESULT_MISS) ] = 0x01b7, }, [ C(OP_WRITE) ] = { - /* OFFCORE_RESPONSE_0.ANY_RFO.LOCAL_CACHE */ + /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */ [ C(RESULT_ACCESS) ] = 0x01b7, - /* OFFCORE_RESPONSE_1.ANY_RFO.ANY_LLC_MISS */ - [ C(RESULT_MISS) ] = 0x01bb, + /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */ + [ C(RESULT_MISS) ] = 0x01b7, }, [ C(OP_PREFETCH) ] = { - /* OFFCORE_RESPONSE_0.PREFETCH.LOCAL_CACHE */ + /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */ [ C(RESULT_ACCESS) ] = 0x01b7, - /* OFFCORE_RESPONSE_1.PREFETCH.ANY_LLC_MISS */ - [ C(RESULT_MISS) ] = 0x01bb, + /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */ + [ C(RESULT_MISS) ] = 0x01b7, }, }, [ C(DTLB) ] = { @@ -285,26 +282,26 @@ static __initconst const u64 westmere_hw_cache_event_ids }, [ C(LL ) ] = { [ C(OP_READ) ] = { - /* OFFCORE_RESPONSE_0.ANY_DATA.LOCAL_CACHE */ + /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */ [ C(RESULT_ACCESS) ] = 0x01b7, - /* OFFCORE_RESPONSE_1.ANY_DATA.ANY_LLC_MISS */ - [ C(RESULT_MISS) ] = 0x01bb, + /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */ + [ C(RESULT_MISS) ] = 0x01b7, }, /* * Use RFO, not WRITEBACK, because a write miss would typically occur * on RFO. */ [ C(OP_WRITE) ] = { - /* OFFCORE_RESPONSE_1.ANY_RFO.LOCAL_CACHE */ - [ C(RESULT_ACCESS) ] = 0x01bb, - /* OFFCORE_RESPONSE_0.ANY_RFO.ANY_LLC_MISS */ + /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */ + [ C(RESULT_ACCESS) ] = 0x01b7, + /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */ [ C(RESULT_MISS) ] = 0x01b7, }, [ C(OP_PREFETCH) ] = { - /* OFFCORE_RESPONSE_0.PREFETCH.LOCAL_CACHE */ + /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */ [ C(RESULT_ACCESS) ] = 0x01b7, - /* OFFCORE_RESPONSE_1.PREFETCH.ANY_LLC_MISS */ - [ C(RESULT_MISS) ] = 0x01bb, + /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */ + [ C(RESULT_MISS) ] = 0x01b7, }, }, [ C(DTLB) ] = { @@ -352,16 +349,36 @@ static __initconst const u64 westmere_hw_cache_event_ids }; /* - * OFFCORE_RESPONSE MSR bits (subset), See IA32 SDM Vol 3 30.6.1.3 + * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits; + * See IA32 SDM Vol 3B 30.6.1.3 */ -#define DMND_DATA_RD (1 << 0) -#define DMND_RFO (1 << 1) -#define DMND_WB (1 << 3) -#define PF_DATA_RD (1 << 4) -#define PF_DATA_RFO (1 << 5) -#define RESP_UNCORE_HIT (1 << 8) -#define RESP_MISS (0xf600) /* non uncore hit */ +#define NHM_DMND_DATA_RD (1 << 0) +#define NHM_DMND_RFO (1 << 1) +#define NHM_DMND_IFETCH (1 << 2) +#define NHM_DMND_WB (1 << 3) +#define NHM_PF_DATA_RD (1 << 4) +#define NHM_PF_DATA_RFO (1 << 5) +#define NHM_PF_IFETCH (1 << 6) +#define NHM_OFFCORE_OTHER (1 << 7) +#define NHM_UNCORE_HIT (1 << 8) +#define NHM_OTHER_CORE_HIT_SNP (1 << 9) +#define NHM_OTHER_CORE_HITM (1 << 10) + /* reserved */ +#define NHM_REMOTE_CACHE_FWD (1 << 12) +#define NHM_REMOTE_DRAM (1 << 13) +#define NHM_LOCAL_DRAM (1 << 14) +#define NHM_NON_DRAM (1 << 15) + +#define NHM_ALL_DRAM (NHM_REMOTE_DRAM|NHM_LOCAL_DRAM) + +#define NHM_DMND_READ (NHM_DMND_DATA_RD) +#define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB) +#define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO) + +#define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM) +#define NHM_L3_MISS (NHM_NON_DRAM|NHM_ALL_DRAM|NHM_REMOTE_CACHE_FWD) +#define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS) static __initconst const u64 nehalem_hw_cache_extra_regs [PERF_COUNT_HW_CACHE_MAX] @@ -370,16 +387,16 @@ static __initconst const u64 nehalem_hw_cache_extra_regs { [ C(LL ) ] = { [ C(OP_READ) ] = { - [ C(RESULT_ACCESS) ] = DMND_DATA_RD|RESP_UNCORE_HIT, - [ C(RESULT_MISS) ] = DMND_DATA_RD|RESP_MISS, + [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS, + [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_L3_MISS, }, [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = DMND_RFO|DMND_WB|RESP_UNCORE_HIT, - [ C(RESULT_MISS) ] = DMND_RFO|DMND_WB|RESP_MISS, + [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS, + [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_L3_MISS, }, [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = PF_DATA_RD|PF_DATA_RFO|RESP_UNCORE_HIT, - [ C(RESULT_MISS) ] = PF_DATA_RD|PF_DATA_RFO|RESP_MISS, + [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS, + [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_L3_MISS, }, } }; @@ -391,12 +408,12 @@ static __initconst const u64 nehalem_hw_cache_event_ids { [ C(L1D) ] = { [ C(OP_READ) ] = { - [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */ - [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */ + [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */ + [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */ }, [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */ - [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */ + [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */ + [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */ }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */ @@ -933,6 +950,16 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) cpuc = &__get_cpu_var(cpu_hw_events); + /* + * Some chipsets need to unmask the LVTPC in a particular spot + * inside the nmi handler. As a result, the unmasking was pushed + * into all the nmi handlers. + * + * This handler doesn't seem to have any issues with the unmasking + * so it was left at the top. + */ + apic_write(APIC_LVTPC, APIC_DM_NMI); + intel_pmu_disable_all(); handled = intel_pmu_drain_bts_buffer(); status = intel_pmu_get_status(); @@ -998,6 +1025,9 @@ intel_bts_constraints(struct perf_event *event) struct hw_perf_event *hwc = &event->hw; unsigned int hw_event, bts_event; + if (event->attr.freq) + return NULL; + hw_event = hwc->config & INTEL_ARCH_EVENT_MASK; bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS); @@ -1305,7 +1335,7 @@ static void intel_clovertown_quirks(void) * AJ106 could possibly be worked around by not allowing LBR * usage from PEBS, including the fixup. * AJ68 could possibly be worked around by always programming - * a pebs_event_reset[0] value and coping with the lost events. + * a pebs_event_reset[0] value and coping with the lost events. * * But taken together it might just make sense to not enable PEBS on * these chips. @@ -1409,6 +1439,23 @@ static __init int intel_pmu_init(void) x86_pmu.percore_constraints = intel_nehalem_percore_constraints; x86_pmu.enable_all = intel_pmu_nhm_enable_all; x86_pmu.extra_regs = intel_nehalem_extra_regs; + + /* UOPS_ISSUED.STALLED_CYCLES */ + intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e; + /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */ + intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x1803fb1; + + if (ebx & 0x40) { + /* + * Erratum AAJ80 detected, we work it around by using + * the BR_MISP_EXEC.ANY event. This will over-count + * branch-misses, but it's still much better than the + * architectural event which is often completely bogus: + */ + intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89; + + pr_cont("erratum AAJ80 worked around, "); + } pr_cont("Nehalem events, "); break; @@ -1425,6 +1472,7 @@ static __init int intel_pmu_init(void) case 37: /* 32 nm nehalem, "Clarkdale" */ case 44: /* 32 nm nehalem, "Gulftown" */ + case 47: /* 32 nm Xeon E7 */ memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids, sizeof(hw_cache_event_ids)); memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs, @@ -1437,6 +1485,12 @@ static __init int intel_pmu_init(void) x86_pmu.enable_all = intel_pmu_nhm_enable_all; x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints; x86_pmu.extra_regs = intel_westmere_extra_regs; + + /* UOPS_ISSUED.STALLED_CYCLES */ + intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e; + /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */ + intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x1803fb1; + pr_cont("Westmere events, "); break; @@ -1448,6 +1502,12 @@ static __init int intel_pmu_init(void) x86_pmu.event_constraints = intel_snb_event_constraints; x86_pmu.pebs_constraints = intel_snb_pebs_events; + + /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */ + intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e; + /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/ + intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x18001b1; + pr_cont("SandyBridge events, "); break; diff --git a/trunk/arch/x86/kernel/cpu/perf_event_p4.c b/trunk/arch/x86/kernel/cpu/perf_event_p4.c index c2520e178d32..ead584fb6a7d 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event_p4.c +++ b/trunk/arch/x86/kernel/cpu/perf_event_p4.c @@ -468,7 +468,7 @@ static struct p4_event_bind p4_event_bind_map[] = { .opcode = P4_OPCODE(P4_EVENT_MISPRED_BRANCH_RETIRED), .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 }, .escr_emask = - P4_ESCR_EMASK_BIT(P4_EVENT_MISPRED_BRANCH_RETIRED, NBOGUS), + P4_ESCR_EMASK_BIT(P4_EVENT_MISPRED_BRANCH_RETIRED, NBOGUS), .cntr = { {12, 13, 16}, {14, 15, 17} }, }, [P4_EVENT_X87_ASSIST] = { @@ -912,8 +912,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs) int idx, handled = 0; u64 val; - data.addr = 0; - data.raw = NULL; + perf_sample_data_init(&data, 0); cpuc = &__get_cpu_var(cpu_hw_events); @@ -947,14 +946,23 @@ static int p4_pmu_handle_irq(struct pt_regs *regs) if (!x86_perf_event_set_period(event)) continue; if (perf_event_overflow(event, 1, &data, regs)) - p4_pmu_disable_event(event); + x86_pmu_stop(event, 0); } - if (handled) { - /* p4 quirk: unmask it again */ - apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED); + if (handled) inc_irq_stat(apic_perf_irqs); - } + + /* + * When dealing with the unmasking of the LVTPC on P4 perf hw, it has + * been observed that the OVF bit flag has to be cleared first _before_ + * the LVTPC can be unmasked. + * + * The reason is the NMI line will continue to be asserted while the OVF + * bit is set. This causes a second NMI to generate if the LVTPC is + * unmasked before the OVF bit is cleared, leading to unknown NMI + * messages. + */ + apic_write(APIC_LVTPC, APIC_DM_NMI); return handled; } @@ -1188,7 +1196,7 @@ static __init int p4_pmu_init(void) { unsigned int low, high; - /* If we get stripped -- indexig fails */ + /* If we get stripped -- indexing fails */ BUILD_BUG_ON(ARCH_P4_MAX_CCCR > X86_PMC_MAX_GENERIC); rdmsr(MSR_IA32_MISC_ENABLE, low, high); diff --git a/trunk/arch/x86/kernel/devicetree.c b/trunk/arch/x86/kernel/devicetree.c index 706a9fb46a58..e90f08458e6b 100644 --- a/trunk/arch/x86/kernel/devicetree.c +++ b/trunk/arch/x86/kernel/devicetree.c @@ -391,7 +391,7 @@ static int ioapic_xlate(struct irq_domain *id, const u32 *intspec, u32 intsize, set_io_apic_irq_attr(&attr, idx, line, it->trigger, it->polarity); - return io_apic_setup_irq_pin(*out_hwirq, cpu_to_node(0), &attr); + return io_apic_setup_irq_pin_once(*out_hwirq, cpu_to_node(0), &attr); } static void __init ioapic_add_ofnode(struct device_node *np) diff --git a/trunk/arch/x86/kernel/dumpstack.c b/trunk/arch/x86/kernel/dumpstack.c index e2a3f0606da4..1aae78f775fc 100644 --- a/trunk/arch/x86/kernel/dumpstack.c +++ b/trunk/arch/x86/kernel/dumpstack.c @@ -135,20 +135,6 @@ print_context_stack_bp(struct thread_info *tinfo, } EXPORT_SYMBOL_GPL(print_context_stack_bp); - -static void -print_trace_warning_symbol(void *data, char *msg, unsigned long symbol) -{ - printk(data); - print_symbol(msg, symbol); - printk("\n"); -} - -static void print_trace_warning(void *data, char *msg) -{ - printk("%s%s\n", (char *)data, msg); -} - static int print_trace_stack(void *data, char *name) { printk("%s <%s> ", (char *)data, name); @@ -166,8 +152,6 @@ static void print_trace_address(void *data, unsigned long addr, int reliable) } static const struct stacktrace_ops print_trace_ops = { - .warning = print_trace_warning, - .warning_symbol = print_trace_warning_symbol, .stack = print_trace_stack, .address = print_trace_address, .walk_stack = print_context_stack, @@ -279,7 +263,6 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err) printk("DEBUG_PAGEALLOC"); #endif printk("\n"); - sysfs_printk_last_file(); if (notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV) == NOTIFY_STOP) return 1; diff --git a/trunk/arch/x86/kernel/ftrace.c b/trunk/arch/x86/kernel/ftrace.c index a93742a57468..0ba15a6cc57e 100644 --- a/trunk/arch/x86/kernel/ftrace.c +++ b/trunk/arch/x86/kernel/ftrace.c @@ -260,9 +260,9 @@ do_ftrace_mod_code(unsigned long ip, void *new_code) return mod_code_status; } -static unsigned char *ftrace_nop_replace(void) +static const unsigned char *ftrace_nop_replace(void) { - return ideal_nop5; + return ideal_nops[NOP_ATOMIC5]; } static int diff --git a/trunk/arch/x86/kernel/head32.c b/trunk/arch/x86/kernel/head32.c index d6d6bb361931..3bb08509a7a1 100644 --- a/trunk/arch/x86/kernel/head32.c +++ b/trunk/arch/x86/kernel/head32.c @@ -23,7 +23,6 @@ static void __init i386_default_early_setup(void) { /* Initialize 32bit specific setup functions */ - x86_init.resources.probe_roms = probe_roms; x86_init.resources.reserve_resources = i386_reserve_resources; x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc; diff --git a/trunk/arch/x86/kernel/hpet.c b/trunk/arch/x86/kernel/hpet.c index bfe8f729e086..6781765b3a0d 100644 --- a/trunk/arch/x86/kernel/hpet.c +++ b/trunk/arch/x86/kernel/hpet.c @@ -217,7 +217,7 @@ static void hpet_reserve_platform_timers(unsigned int id) { } /* * Common hpet info */ -static unsigned long hpet_period; +static unsigned long hpet_freq; static void hpet_legacy_set_mode(enum clock_event_mode mode, struct clock_event_device *evt); @@ -232,7 +232,6 @@ static struct clock_event_device hpet_clockevent = { .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, .set_mode = hpet_legacy_set_mode, .set_next_event = hpet_legacy_next_event, - .shift = 32, .irq = 0, .rating = 50, }; @@ -289,29 +288,13 @@ static void hpet_legacy_clockevent_register(void) /* Start HPET legacy interrupts */ hpet_enable_legacy_int(); - /* - * The mult factor is defined as (include/linux/clockchips.h) - * mult/2^shift = cyc/ns (in contrast to ns/cyc in clocksource.h) - * hpet_period is in units of femtoseconds (per cycle), so - * mult/2^shift = cyc/ns = 10^6/hpet_period - * mult = (10^6 * 2^shift)/hpet_period - * mult = (FSEC_PER_NSEC << hpet_clockevent.shift)/hpet_period - */ - hpet_clockevent.mult = div_sc((unsigned long) FSEC_PER_NSEC, - hpet_period, hpet_clockevent.shift); - /* Calculate the min / max delta */ - hpet_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF, - &hpet_clockevent); - /* Setup minimum reprogramming delta. */ - hpet_clockevent.min_delta_ns = clockevent_delta2ns(HPET_MIN_PROG_DELTA, - &hpet_clockevent); - /* * Start hpet with the boot cpu mask and make it * global after the IO_APIC has been initialized. */ hpet_clockevent.cpumask = cpumask_of(smp_processor_id()); - clockevents_register_device(&hpet_clockevent); + clockevents_config_and_register(&hpet_clockevent, hpet_freq, + HPET_MIN_PROG_DELTA, 0x7FFFFFFF); global_clock_event = &hpet_clockevent; printk(KERN_DEBUG "hpet clockevent registered\n"); } @@ -549,7 +532,6 @@ static int hpet_setup_irq(struct hpet_dev *dev) static void init_one_hpet_msi_clockevent(struct hpet_dev *hdev, int cpu) { struct clock_event_device *evt = &hdev->evt; - uint64_t hpet_freq; WARN_ON(cpu != smp_processor_id()); if (!(hdev->flags & HPET_DEV_VALID)) @@ -571,24 +553,10 @@ static void init_one_hpet_msi_clockevent(struct hpet_dev *hdev, int cpu) evt->set_mode = hpet_msi_set_mode; evt->set_next_event = hpet_msi_next_event; - evt->shift = 32; - - /* - * The period is a femto seconds value. We need to calculate the - * scaled math multiplication factor for nanosecond to hpet tick - * conversion. - */ - hpet_freq = FSEC_PER_SEC; - do_div(hpet_freq, hpet_period); - evt->mult = div_sc((unsigned long) hpet_freq, - NSEC_PER_SEC, evt->shift); - /* Calculate the max delta */ - evt->max_delta_ns = clockevent_delta2ns(0x7FFFFFFF, evt); - /* 5 usec minimum reprogramming delta. */ - evt->min_delta_ns = 5000; - evt->cpumask = cpumask_of(hdev->cpu); - clockevents_register_device(evt); + + clockevents_config_and_register(evt, hpet_freq, HPET_MIN_PROG_DELTA, + 0x7FFFFFFF); } #ifdef CONFIG_HPET @@ -792,7 +760,6 @@ static struct clocksource clocksource_hpet = { static int hpet_clocksource_register(void) { u64 start, now; - u64 hpet_freq; cycle_t t1; /* Start the counter */ @@ -819,24 +786,7 @@ static int hpet_clocksource_register(void) return -ENODEV; } - /* - * The definition of mult is (include/linux/clocksource.h) - * mult/2^shift = ns/cyc and hpet_period is in units of fsec/cyc - * so we first need to convert hpet_period to ns/cyc units: - * mult/2^shift = ns/cyc = hpet_period/10^6 - * mult = (hpet_period * 2^shift)/10^6 - * mult = (hpet_period << shift)/FSEC_PER_NSEC - */ - - /* Need to convert hpet_period (fsec/cyc) to cyc/sec: - * - * cyc/sec = FSEC_PER_SEC/hpet_period(fsec/cyc) - * cyc/sec = (FSEC_PER_NSEC * NSEC_PER_SEC)/hpet_period - */ - hpet_freq = FSEC_PER_SEC; - do_div(hpet_freq, hpet_period); clocksource_register_hz(&clocksource_hpet, (u32)hpet_freq); - return 0; } @@ -845,7 +795,9 @@ static int hpet_clocksource_register(void) */ int __init hpet_enable(void) { + unsigned long hpet_period; unsigned int id; + u64 freq; int i; if (!is_hpet_capable()) @@ -883,6 +835,14 @@ int __init hpet_enable(void) if (hpet_period < HPET_MIN_PERIOD || hpet_period > HPET_MAX_PERIOD) goto out_nohpet; + /* + * The period is a femto seconds value. Convert it to a + * frequency. + */ + freq = FSEC_PER_SEC; + do_div(freq, hpet_period); + hpet_freq = freq; + /* * Read the HPET ID register to retrieve the IRQ routing * information and the number of channels diff --git a/trunk/arch/x86/kernel/i8253.c b/trunk/arch/x86/kernel/i8253.c index 2dfd31597443..fb66dc9e36cb 100644 --- a/trunk/arch/x86/kernel/i8253.c +++ b/trunk/arch/x86/kernel/i8253.c @@ -93,7 +93,6 @@ static struct clock_event_device pit_ce = { .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, .set_mode = init_pit_timer, .set_next_event = pit_next_event, - .shift = 32, .irq = 0, }; @@ -108,90 +107,12 @@ void __init setup_pit_timer(void) * IO_APIC has been initialized. */ pit_ce.cpumask = cpumask_of(smp_processor_id()); - pit_ce.mult = div_sc(CLOCK_TICK_RATE, NSEC_PER_SEC, pit_ce.shift); - pit_ce.max_delta_ns = clockevent_delta2ns(0x7FFF, &pit_ce); - pit_ce.min_delta_ns = clockevent_delta2ns(0xF, &pit_ce); - clockevents_register_device(&pit_ce); + clockevents_config_and_register(&pit_ce, CLOCK_TICK_RATE, 0xF, 0x7FFF); global_clock_event = &pit_ce; } #ifndef CONFIG_X86_64 -/* - * Since the PIT overflows every tick, its not very useful - * to just read by itself. So use jiffies to emulate a free - * running counter: - */ -static cycle_t pit_read(struct clocksource *cs) -{ - static int old_count; - static u32 old_jifs; - unsigned long flags; - int count; - u32 jifs; - - raw_spin_lock_irqsave(&i8253_lock, flags); - /* - * Although our caller may have the read side of xtime_lock, - * this is now a seqlock, and we are cheating in this routine - * by having side effects on state that we cannot undo if - * there is a collision on the seqlock and our caller has to - * retry. (Namely, old_jifs and old_count.) So we must treat - * jiffies as volatile despite the lock. We read jiffies - * before latching the timer count to guarantee that although - * the jiffies value might be older than the count (that is, - * the counter may underflow between the last point where - * jiffies was incremented and the point where we latch the - * count), it cannot be newer. - */ - jifs = jiffies; - outb_pit(0x00, PIT_MODE); /* latch the count ASAP */ - count = inb_pit(PIT_CH0); /* read the latched count */ - count |= inb_pit(PIT_CH0) << 8; - - /* VIA686a test code... reset the latch if count > max + 1 */ - if (count > LATCH) { - outb_pit(0x34, PIT_MODE); - outb_pit(LATCH & 0xff, PIT_CH0); - outb_pit(LATCH >> 8, PIT_CH0); - count = LATCH - 1; - } - - /* - * It's possible for count to appear to go the wrong way for a - * couple of reasons: - * - * 1. The timer counter underflows, but we haven't handled the - * resulting interrupt and incremented jiffies yet. - * 2. Hardware problem with the timer, not giving us continuous time, - * the counter does small "jumps" upwards on some Pentium systems, - * (see c't 95/10 page 335 for Neptun bug.) - * - * Previous attempts to handle these cases intelligently were - * buggy, so we just do the simple thing now. - */ - if (count > old_count && jifs == old_jifs) - count = old_count; - - old_count = count; - old_jifs = jifs; - - raw_spin_unlock_irqrestore(&i8253_lock, flags); - - count = (LATCH - 1) - count; - - return (cycle_t)(jifs * LATCH) + count; -} - -static struct clocksource pit_cs = { - .name = "pit", - .rating = 110, - .read = pit_read, - .mask = CLOCKSOURCE_MASK(32), - .mult = 0, - .shift = 20, -}; - static int __init init_pit_clocksource(void) { /* @@ -205,10 +126,7 @@ static int __init init_pit_clocksource(void) pit_ce.mode != CLOCK_EVT_MODE_PERIODIC) return 0; - pit_cs.mult = clocksource_hz2mult(CLOCK_TICK_RATE, pit_cs.shift); - - return clocksource_register(&pit_cs); + return clocksource_i8253_init(); } arch_initcall(init_pit_clocksource); - #endif /* !CONFIG_X86_64 */ diff --git a/trunk/arch/x86/kernel/irq.c b/trunk/arch/x86/kernel/irq.c index 1cb0b9fc78dc..6c0802eb2f7f 100644 --- a/trunk/arch/x86/kernel/irq.c +++ b/trunk/arch/x86/kernel/irq.c @@ -249,7 +249,7 @@ void fixup_irqs(void) data = irq_desc_get_irq_data(desc); affinity = data->affinity; - if (!irq_has_action(irq) || + if (!irq_has_action(irq) || irqd_is_per_cpu(data) || cpumask_subset(affinity, cpu_online_mask)) { raw_spin_unlock(&desc->lock); continue; @@ -276,7 +276,8 @@ void fixup_irqs(void) else if (!(warned++)) set_affinity = 0; - if (!irqd_can_move_in_process_context(data) && chip->irq_unmask) + if (!irqd_can_move_in_process_context(data) && + !irqd_irq_disabled(data) && chip->irq_unmask) chip->irq_unmask(data); raw_spin_unlock(&desc->lock); diff --git a/trunk/arch/x86/kernel/jump_label.c b/trunk/arch/x86/kernel/jump_label.c index 961b6b30ba90..3fee346ef545 100644 --- a/trunk/arch/x86/kernel/jump_label.c +++ b/trunk/arch/x86/kernel/jump_label.c @@ -34,7 +34,7 @@ void arch_jump_label_transform(struct jump_entry *entry, code.offset = entry->target - (entry->code + JUMP_LABEL_NOP_SIZE); } else - memcpy(&code, ideal_nop5, JUMP_LABEL_NOP_SIZE); + memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE); get_online_cpus(); mutex_lock(&text_mutex); text_poke_smp((void *)entry->code, &code, JUMP_LABEL_NOP_SIZE); @@ -44,7 +44,8 @@ void arch_jump_label_transform(struct jump_entry *entry, void arch_jump_label_text_poke_early(jump_label_t addr) { - text_poke_early((void *)addr, ideal_nop5, JUMP_LABEL_NOP_SIZE); + text_poke_early((void *)addr, ideal_nops[NOP_ATOMIC5], + JUMP_LABEL_NOP_SIZE); } #endif diff --git a/trunk/arch/x86/kernel/kprobes.c b/trunk/arch/x86/kernel/kprobes.c index c969fd9d1566..f1a6244d7d93 100644 --- a/trunk/arch/x86/kernel/kprobes.c +++ b/trunk/arch/x86/kernel/kprobes.c @@ -1183,12 +1183,13 @@ static void __kprobes optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs) { struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + unsigned long flags; /* This is possible if op is under delayed unoptimizing */ if (kprobe_disabled(&op->kp)) return; - preempt_disable(); + local_irq_save(flags); if (kprobe_running()) { kprobes_inc_nmissed_count(&op->kp); } else { @@ -1207,7 +1208,7 @@ static void __kprobes optimized_callback(struct optimized_kprobe *op, opt_pre_handler(&op->kp, regs); __this_cpu_write(current_kprobe, NULL); } - preempt_enable_no_resched(); + local_irq_restore(flags); } static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src) diff --git a/trunk/arch/x86/kernel/kvmclock.c b/trunk/arch/x86/kernel/kvmclock.c index f98d3eafe07a..6389a6bca11b 100644 --- a/trunk/arch/x86/kernel/kvmclock.c +++ b/trunk/arch/x86/kernel/kvmclock.c @@ -26,8 +26,6 @@ #include #include -#define KVM_SCALE 22 - static int kvmclock = 1; static int msr_kvm_system_time = MSR_KVM_SYSTEM_TIME; static int msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK; @@ -120,8 +118,6 @@ static struct clocksource kvm_clock = { .read = kvm_clock_get_cycles, .rating = 400, .mask = CLOCKSOURCE_MASK(64), - .mult = 1 << KVM_SCALE, - .shift = KVM_SCALE, .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; @@ -203,7 +199,7 @@ void __init kvmclock_init(void) machine_ops.crash_shutdown = kvm_crash_shutdown; #endif kvm_get_preset_lpj(); - clocksource_register(&kvm_clock); + clocksource_register_hz(&kvm_clock, NSEC_PER_SEC); pv_info.paravirt_enabled = 1; pv_info.name = "KVM"; diff --git a/trunk/arch/x86/kernel/module.c b/trunk/arch/x86/kernel/module.c index ab23f1ad4bf1..52f256f2cc81 100644 --- a/trunk/arch/x86/kernel/module.c +++ b/trunk/arch/x86/kernel/module.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include diff --git a/trunk/arch/x86/kernel/mpparse.c b/trunk/arch/x86/kernel/mpparse.c index 5a532ce646bf..6f9bfffb2720 100644 --- a/trunk/arch/x86/kernel/mpparse.c +++ b/trunk/arch/x86/kernel/mpparse.c @@ -715,17 +715,15 @@ static void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) } } -static int +static int __init check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length, int count) { - int ret = 0; - if (!mpc_new_phys || count <= mpc_new_length) { WARN(1, "update_mptable: No spare slots (length: %x)\n", count); return -1; } - return ret; + return 0; } #else /* CONFIG_X86_IO_APIC */ static diff --git a/trunk/arch/x86/kernel/pci-iommu_table.c b/trunk/arch/x86/kernel/pci-iommu_table.c index 55d745ec1181..35ccf75696eb 100644 --- a/trunk/arch/x86/kernel/pci-iommu_table.c +++ b/trunk/arch/x86/kernel/pci-iommu_table.c @@ -50,20 +50,14 @@ void __init check_iommu_entries(struct iommu_table_entry *start, struct iommu_table_entry *finish) { struct iommu_table_entry *p, *q, *x; - char sym_p[KSYM_SYMBOL_LEN]; - char sym_q[KSYM_SYMBOL_LEN]; /* Simple cyclic dependency checker. */ for (p = start; p < finish; p++) { q = find_dependents_of(start, finish, p); x = find_dependents_of(start, finish, q); if (p == x) { - sprint_symbol(sym_p, (unsigned long)p->detect); - sprint_symbol(sym_q, (unsigned long)q->detect); - - printk(KERN_ERR "CYCLIC DEPENDENCY FOUND! %s depends" \ - " on %s and vice-versa. BREAKING IT.\n", - sym_p, sym_q); + printk(KERN_ERR "CYCLIC DEPENDENCY FOUND! %pS depends on %pS and vice-versa. BREAKING IT.\n", + p->detect, q->detect); /* Heavy handed way..*/ x->depend = 0; } @@ -72,12 +66,8 @@ void __init check_iommu_entries(struct iommu_table_entry *start, for (p = start; p < finish; p++) { q = find_dependents_of(p, finish, p); if (q && q > p) { - sprint_symbol(sym_p, (unsigned long)p->detect); - sprint_symbol(sym_q, (unsigned long)q->detect); - - printk(KERN_ERR "EXECUTION ORDER INVALID! %s "\ - "should be called before %s!\n", - sym_p, sym_q); + printk(KERN_ERR "EXECUTION ORDER INVALID! %pS should be called before %pS!\n", + p->detect, q->detect); } } } diff --git a/trunk/arch/x86/kernel/probe_roms_32.c b/trunk/arch/x86/kernel/probe_roms.c similarity index 65% rename from trunk/arch/x86/kernel/probe_roms_32.c rename to trunk/arch/x86/kernel/probe_roms.c index 071e7fea42e5..ba0a4cce53be 100644 --- a/trunk/arch/x86/kernel/probe_roms_32.c +++ b/trunk/arch/x86/kernel/probe_roms.c @@ -73,6 +73,107 @@ static struct resource video_rom_resource = { .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM }; +/* does this oprom support the given pci device, or any of the devices + * that the driver supports? + */ +static bool match_id(struct pci_dev *pdev, unsigned short vendor, unsigned short device) +{ + struct pci_driver *drv = pdev->driver; + const struct pci_device_id *id; + + if (pdev->vendor == vendor && pdev->device == device) + return true; + + for (id = drv ? drv->id_table : NULL; id && id->vendor; id++) + if (id->vendor == vendor && id->device == device) + break; + + return id && id->vendor; +} + +static bool probe_list(struct pci_dev *pdev, unsigned short vendor, + const unsigned char *rom_list) +{ + unsigned short device; + + do { + if (probe_kernel_address(rom_list, device) != 0) + device = 0; + + if (device && match_id(pdev, vendor, device)) + break; + + rom_list += 2; + } while (device); + + return !!device; +} + +static struct resource *find_oprom(struct pci_dev *pdev) +{ + struct resource *oprom = NULL; + int i; + + for (i = 0; i < ARRAY_SIZE(adapter_rom_resources); i++) { + struct resource *res = &adapter_rom_resources[i]; + unsigned short offset, vendor, device, list, rev; + const unsigned char *rom; + + if (res->end == 0) + break; + + rom = isa_bus_to_virt(res->start); + if (probe_kernel_address(rom + 0x18, offset) != 0) + continue; + + if (probe_kernel_address(rom + offset + 0x4, vendor) != 0) + continue; + + if (probe_kernel_address(rom + offset + 0x6, device) != 0) + continue; + + if (match_id(pdev, vendor, device)) { + oprom = res; + break; + } + + if (probe_kernel_address(rom + offset + 0x8, list) == 0 && + probe_kernel_address(rom + offset + 0xc, rev) == 0 && + rev >= 3 && list && + probe_list(pdev, vendor, rom + offset + list)) { + oprom = res; + break; + } + } + + return oprom; +} + +void *pci_map_biosrom(struct pci_dev *pdev) +{ + struct resource *oprom = find_oprom(pdev); + + if (!oprom) + return NULL; + + return ioremap(oprom->start, resource_size(oprom)); +} +EXPORT_SYMBOL(pci_map_biosrom); + +void pci_unmap_biosrom(void __iomem *image) +{ + iounmap(image); +} +EXPORT_SYMBOL(pci_unmap_biosrom); + +size_t pci_biosrom_size(struct pci_dev *pdev) +{ + struct resource *oprom = find_oprom(pdev); + + return oprom ? resource_size(oprom) : 0; +} +EXPORT_SYMBOL(pci_biosrom_size); + #define ROMSIGNATURE 0xaa55 static int __init romsignature(const unsigned char *rom) diff --git a/trunk/arch/x86/kernel/process.c b/trunk/arch/x86/kernel/process.c index d46cbe46b7ab..88a90a977f8e 100644 --- a/trunk/arch/x86/kernel/process.c +++ b/trunk/arch/x86/kernel/process.c @@ -449,7 +449,7 @@ EXPORT_SYMBOL_GPL(cpu_idle_wait); void mwait_idle_with_hints(unsigned long ax, unsigned long cx) { if (!need_resched()) { - if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLUSH_MONITOR)) + if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) clflush((void *)¤t_thread_info()->flags); __monitor((void *)¤t_thread_info()->flags, 0, 0); @@ -465,7 +465,7 @@ static void mwait_idle(void) if (!need_resched()) { trace_power_start(POWER_CSTATE, 1, smp_processor_id()); trace_cpu_idle(1, smp_processor_id()); - if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLUSH_MONITOR)) + if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) clflush((void *)¤t_thread_info()->flags); __monitor((void *)¤t_thread_info()->flags, 0, 0); diff --git a/trunk/arch/x86/kernel/ptrace.c b/trunk/arch/x86/kernel/ptrace.c index 45892dc4b72a..f65e5b521dbd 100644 --- a/trunk/arch/x86/kernel/ptrace.c +++ b/trunk/arch/x86/kernel/ptrace.c @@ -608,6 +608,9 @@ static int ptrace_write_dr7(struct task_struct *tsk, unsigned long data) unsigned len, type; struct perf_event *bp; + if (ptrace_get_breakpoints(tsk) < 0) + return -ESRCH; + data &= ~DR_CONTROL_RESERVED; old_dr7 = ptrace_get_dr7(thread->ptrace_bps); restore: @@ -655,6 +658,9 @@ static int ptrace_write_dr7(struct task_struct *tsk, unsigned long data) } goto restore; } + + ptrace_put_breakpoints(tsk); + return ((orig_ret < 0) ? orig_ret : rc); } @@ -668,10 +674,17 @@ static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n) if (n < HBP_NUM) { struct perf_event *bp; + + if (ptrace_get_breakpoints(tsk) < 0) + return -ESRCH; + bp = thread->ptrace_bps[n]; if (!bp) - return 0; - val = bp->hw.info.address; + val = 0; + else + val = bp->hw.info.address; + + ptrace_put_breakpoints(tsk); } else if (n == 6) { val = thread->debugreg6; } else if (n == 7) { @@ -686,6 +699,10 @@ static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr, struct perf_event *bp; struct thread_struct *t = &tsk->thread; struct perf_event_attr attr; + int err = 0; + + if (ptrace_get_breakpoints(tsk) < 0) + return -ESRCH; if (!t->ptrace_bps[nr]) { ptrace_breakpoint_init(&attr); @@ -709,24 +726,23 @@ static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr, * writing for the user. And anyway this is the previous * behaviour. */ - if (IS_ERR(bp)) - return PTR_ERR(bp); + if (IS_ERR(bp)) { + err = PTR_ERR(bp); + goto put; + } t->ptrace_bps[nr] = bp; } else { - int err; - bp = t->ptrace_bps[nr]; attr = bp->attr; attr.bp_addr = addr; err = modify_user_hw_breakpoint(bp, &attr); - if (err) - return err; } - - return 0; +put: + ptrace_put_breakpoints(tsk); + return err; } /* diff --git a/trunk/arch/x86/kernel/reboot.c b/trunk/arch/x86/kernel/reboot.c index 08c44b08bf5b..0c016f727695 100644 --- a/trunk/arch/x86/kernel/reboot.c +++ b/trunk/arch/x86/kernel/reboot.c @@ -36,7 +36,7 @@ EXPORT_SYMBOL(pm_power_off); static const struct desc_ptr no_idt = {}; static int reboot_mode; -enum reboot_type reboot_type = BOOT_KBD; +enum reboot_type reboot_type = BOOT_ACPI; int reboot_force; #if defined(CONFIG_X86_32) && defined(CONFIG_SMP) @@ -478,9 +478,24 @@ void __attribute__((weak)) mach_reboot_fixups(void) { } +/* + * Windows compatible x86 hardware expects the following on reboot: + * + * 1) If the FADT has the ACPI reboot register flag set, try it + * 2) If still alive, write to the keyboard controller + * 3) If still alive, write to the ACPI reboot register again + * 4) If still alive, write to the keyboard controller again + * + * If the machine is still alive at this stage, it gives up. We default to + * following the same pattern, except that if we're still alive after (4) we'll + * try to force a triple fault and then cycle between hitting the keyboard + * controller and doing that + */ static void native_machine_emergency_restart(void) { int i; + int attempt = 0; + int orig_reboot_type = reboot_type; if (reboot_emergency) emergency_vmx_disable_all(); @@ -502,6 +517,13 @@ static void native_machine_emergency_restart(void) outb(0xfe, 0x64); /* pulse reset low */ udelay(50); } + if (attempt == 0 && orig_reboot_type == BOOT_ACPI) { + attempt = 1; + reboot_type = BOOT_ACPI; + } else { + reboot_type = BOOT_TRIPLE; + } + break; case BOOT_TRIPLE: load_idt(&no_idt); diff --git a/trunk/arch/x86/kernel/reboot_32.S b/trunk/arch/x86/kernel/reboot_32.S index 29092b38d816..1d5c46df0d78 100644 --- a/trunk/arch/x86/kernel/reboot_32.S +++ b/trunk/arch/x86/kernel/reboot_32.S @@ -21,26 +21,26 @@ r_base = . /* Get our own relocated address */ call 1f 1: popl %ebx - subl $1b, %ebx + subl $(1b - r_base), %ebx /* Compute the equivalent real-mode segment */ movl %ebx, %ecx shrl $4, %ecx /* Patch post-real-mode segment jump */ - movw dispatch_table(%ebx,%eax,2),%ax - movw %ax, 101f(%ebx) - movw %cx, 102f(%ebx) + movw (dispatch_table - r_base)(%ebx,%eax,2),%ax + movw %ax, (101f - r_base)(%ebx) + movw %cx, (102f - r_base)(%ebx) /* Set up the IDT for real mode. */ - lidtl machine_real_restart_idt(%ebx) + lidtl (machine_real_restart_idt - r_base)(%ebx) /* * Set up a GDT from which we can load segment descriptors for real * mode. The GDT is not used in real mode; it is just needed here to * prepare the descriptors. */ - lgdtl machine_real_restart_gdt(%ebx) + lgdtl (machine_real_restart_gdt - r_base)(%ebx) /* * Load the data segment registers with 16-bit compatible values diff --git a/trunk/arch/x86/kernel/setup.c b/trunk/arch/x86/kernel/setup.c index 4be9b398470e..c3050af9306d 100644 --- a/trunk/arch/x86/kernel/setup.c +++ b/trunk/arch/x86/kernel/setup.c @@ -691,8 +691,6 @@ early_param("reservelow", parse_reservelow); void __init setup_arch(char **cmdline_p) { - unsigned long flags; - #ifdef CONFIG_X86_32 memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data)); visws_early_detect(); @@ -1041,9 +1039,7 @@ void __init setup_arch(char **cmdline_p) mcheck_init(); - local_irq_save(flags); - arch_init_ideal_nop5(); - local_irq_restore(flags); + arch_init_ideal_nops(); } #ifdef CONFIG_X86_32 diff --git a/trunk/arch/x86/kernel/signal.c b/trunk/arch/x86/kernel/signal.c index 4fd173cd8e57..40a24932a8a1 100644 --- a/trunk/arch/x86/kernel/signal.c +++ b/trunk/arch/x86/kernel/signal.c @@ -601,10 +601,7 @@ long sys_rt_sigreturn(struct pt_regs *regs) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&set); if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax)) goto badframe; @@ -682,6 +679,7 @@ static int handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, sigset_t *oldset, struct pt_regs *regs) { + sigset_t blocked; int ret; /* Are we from a system call? */ @@ -741,12 +739,10 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, */ regs->flags &= ~X86_EFLAGS_TF; - spin_lock_irq(¤t->sighand->siglock); - sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask); + sigorsets(&blocked, ¤t->blocked, &ka->sa.sa_mask); if (!(ka->sa.sa_flags & SA_NODEFER)) - sigaddset(¤t->blocked, sig); - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + sigaddset(&blocked, sig); + set_current_blocked(&blocked); tracehook_signal_handler(sig, info, ka, regs, test_thread_flag(TIF_SINGLESTEP)); diff --git a/trunk/arch/x86/kernel/smp.c b/trunk/arch/x86/kernel/smp.c index 513deac7228d..013e7eba83bb 100644 --- a/trunk/arch/x86/kernel/smp.c +++ b/trunk/arch/x86/kernel/smp.c @@ -194,14 +194,13 @@ static void native_stop_other_cpus(int wait) } /* - * Reschedule call back. Nothing to do, - * all the work is done automatically when - * we return from the interrupt. + * Reschedule call back. */ void smp_reschedule_interrupt(struct pt_regs *regs) { ack_APIC_irq(); inc_irq_stat(irq_resched_count); + scheduler_ipi(); /* * KVM uses this interrupt to force a cpu out of guest mode */ diff --git a/trunk/arch/x86/kernel/smpboot.c b/trunk/arch/x86/kernel/smpboot.c index c2871d3c71b6..a3c430bdfb60 100644 --- a/trunk/arch/x86/kernel/smpboot.c +++ b/trunk/arch/x86/kernel/smpboot.c @@ -1332,9 +1332,9 @@ static inline void mwait_play_dead(void) void *mwait_ptr; struct cpuinfo_x86 *c = __this_cpu_ptr(&cpu_info); - if (!(cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c))) + if (!this_cpu_has(X86_FEATURE_MWAIT) && mwait_usable(c)) return; - if (!cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLSH)) + if (!this_cpu_has(X86_FEATURE_CLFLSH)) return; if (__this_cpu_read(cpu_info.cpuid_level) < CPUID_MWAIT_LEAF) return; diff --git a/trunk/arch/x86/kernel/stacktrace.c b/trunk/arch/x86/kernel/stacktrace.c index 6515733a289d..55d9bc03f696 100644 --- a/trunk/arch/x86/kernel/stacktrace.c +++ b/trunk/arch/x86/kernel/stacktrace.c @@ -9,15 +9,6 @@ #include #include -static void save_stack_warning(void *data, char *msg) -{ -} - -static void -save_stack_warning_symbol(void *data, char *msg, unsigned long symbol) -{ -} - static int save_stack_stack(void *data, char *name) { return 0; @@ -53,16 +44,12 @@ save_stack_address_nosched(void *data, unsigned long addr, int reliable) } static const struct stacktrace_ops save_stack_ops = { - .warning = save_stack_warning, - .warning_symbol = save_stack_warning_symbol, .stack = save_stack_stack, .address = save_stack_address, .walk_stack = print_context_stack, }; static const struct stacktrace_ops save_stack_ops_nosched = { - .warning = save_stack_warning, - .warning_symbol = save_stack_warning_symbol, .stack = save_stack_stack, .address = save_stack_address_nosched, .walk_stack = print_context_stack, diff --git a/trunk/arch/x86/kernel/x86_init.c b/trunk/arch/x86/kernel/x86_init.c index c11514e9128b..6f164bd5e14d 100644 --- a/trunk/arch/x86/kernel/x86_init.c +++ b/trunk/arch/x86/kernel/x86_init.c @@ -35,7 +35,7 @@ void iommu_shutdown_noop(void) { } struct x86_init_ops x86_init __initdata = { .resources = { - .probe_roms = x86_init_noop, + .probe_roms = probe_roms, .reserve_resources = reserve_standard_io_resources, .memory_setup = default_machine_specific_memory_setup, }, @@ -61,6 +61,10 @@ struct x86_init_ops x86_init __initdata = { .banner = default_banner, }, + .mapping = { + .pagetable_reserve = native_pagetable_reserve, + }, + .paging = { .pagetable_setup_start = native_pagetable_setup_start, .pagetable_setup_done = native_pagetable_setup_done, diff --git a/trunk/arch/x86/lguest/boot.c b/trunk/arch/x86/lguest/boot.c index 1cd608973ce5..e191c096ab90 100644 --- a/trunk/arch/x86/lguest/boot.c +++ b/trunk/arch/x86/lguest/boot.c @@ -7,7 +7,7 @@ * kernel and insert a module (lg.ko) which allows us to run other Linux * kernels the same way we'd run processes. We call the first kernel the Host, * and the others the Guests. The program which sets up and configures Guests - * (such as the example in Documentation/lguest/lguest.c) is called the + * (such as the example in Documentation/virtual/lguest/lguest.c) is called the * Launcher. * * Secondly, we only run specially modified Guests, not normal kernels: setting @@ -913,8 +913,6 @@ static struct clocksource lguest_clock = { .rating = 200, .read = lguest_clock_read, .mask = CLOCKSOURCE_MASK(64), - .mult = 1 << 22, - .shift = 22, .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; @@ -997,7 +995,7 @@ static void lguest_time_init(void) /* Set up the timer interrupt (0) to go to our simple timer routine */ irq_set_handler(0, lguest_time_irq); - clocksource_register(&lguest_clock); + clocksource_register_hz(&lguest_clock, NSEC_PER_SEC); /* We can't set cpumask in the initializer: damn C limitations! Set it * here and register our timer device. */ diff --git a/trunk/arch/x86/lib/clear_page_64.S b/trunk/arch/x86/lib/clear_page_64.S index aa4326bfb24a..f2145cfa12a6 100644 --- a/trunk/arch/x86/lib/clear_page_64.S +++ b/trunk/arch/x86/lib/clear_page_64.S @@ -1,5 +1,6 @@ #include #include +#include /* * Zero a page. @@ -14,6 +15,15 @@ ENTRY(clear_page_c) CFI_ENDPROC ENDPROC(clear_page_c) +ENTRY(clear_page_c_e) + CFI_STARTPROC + movl $4096,%ecx + xorl %eax,%eax + rep stosb + ret + CFI_ENDPROC +ENDPROC(clear_page_c_e) + ENTRY(clear_page) CFI_STARTPROC xorl %eax,%eax @@ -38,21 +48,26 @@ ENTRY(clear_page) .Lclear_page_end: ENDPROC(clear_page) - /* Some CPUs run faster using the string instructions. - It is also a lot simpler. Use this when possible */ + /* + * Some CPUs support enhanced REP MOVSB/STOSB instructions. + * It is recommended to use this when possible. + * If enhanced REP MOVSB/STOSB is not available, try to use fast string. + * Otherwise, use original function. + * + */ #include .section .altinstr_replacement,"ax" 1: .byte 0xeb /* jmp */ .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */ -2: +2: .byte 0xeb /* jmp */ + .byte (clear_page_c_e - clear_page) - (3f - 2b) /* offset */ +3: .previous .section .altinstructions,"a" - .align 8 - .quad clear_page - .quad 1b - .word X86_FEATURE_REP_GOOD - .byte .Lclear_page_end - clear_page - .byte 2b - 1b + altinstruction_entry clear_page,1b,X86_FEATURE_REP_GOOD,\ + .Lclear_page_end-clear_page, 2b-1b + altinstruction_entry clear_page,2b,X86_FEATURE_ERMS, \ + .Lclear_page_end-clear_page,3b-2b .previous diff --git a/trunk/arch/x86/lib/copy_user_64.S b/trunk/arch/x86/lib/copy_user_64.S index 99e482615195..024840266ba0 100644 --- a/trunk/arch/x86/lib/copy_user_64.S +++ b/trunk/arch/x86/lib/copy_user_64.S @@ -15,23 +15,30 @@ #include #include #include +#include - .macro ALTERNATIVE_JUMP feature,orig,alt +/* + * By placing feature2 after feature1 in altinstructions section, we logically + * implement: + * If CPU has feature2, jmp to alt2 is used + * else if CPU has feature1, jmp to alt1 is used + * else jmp to orig is used. + */ + .macro ALTERNATIVE_JUMP feature1,feature2,orig,alt1,alt2 0: .byte 0xe9 /* 32bit jump */ .long \orig-1f /* by default jump to orig */ 1: .section .altinstr_replacement,"ax" 2: .byte 0xe9 /* near jump with 32bit immediate */ - .long \alt-1b /* offset */ /* or alternatively to alt */ + .long \alt1-1b /* offset */ /* or alternatively to alt1 */ +3: .byte 0xe9 /* near jump with 32bit immediate */ + .long \alt2-1b /* offset */ /* or alternatively to alt2 */ .previous + .section .altinstructions,"a" - .align 8 - .quad 0b - .quad 2b - .word \feature /* when feature is set */ - .byte 5 - .byte 5 + altinstruction_entry 0b,2b,\feature1,5,5 + altinstruction_entry 0b,3b,\feature2,5,5 .previous .endm @@ -72,8 +79,10 @@ ENTRY(_copy_to_user) addq %rdx,%rcx jc bad_to_user cmpq TI_addr_limit(%rax),%rcx - jae bad_to_user - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string + ja bad_to_user + ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \ + copy_user_generic_unrolled,copy_user_generic_string, \ + copy_user_enhanced_fast_string CFI_ENDPROC ENDPROC(_copy_to_user) @@ -85,8 +94,10 @@ ENTRY(_copy_from_user) addq %rdx,%rcx jc bad_from_user cmpq TI_addr_limit(%rax),%rcx - jae bad_from_user - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string + ja bad_from_user + ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \ + copy_user_generic_unrolled,copy_user_generic_string, \ + copy_user_enhanced_fast_string CFI_ENDPROC ENDPROC(_copy_from_user) @@ -255,3 +266,37 @@ ENTRY(copy_user_generic_string) .previous CFI_ENDPROC ENDPROC(copy_user_generic_string) + +/* + * Some CPUs are adding enhanced REP MOVSB/STOSB instructions. + * It's recommended to use enhanced REP MOVSB/STOSB if it's enabled. + * + * Input: + * rdi destination + * rsi source + * rdx count + * + * Output: + * eax uncopied bytes or 0 if successful. + */ +ENTRY(copy_user_enhanced_fast_string) + CFI_STARTPROC + andl %edx,%edx + jz 2f + movl %edx,%ecx +1: rep + movsb +2: xorl %eax,%eax + ret + + .section .fixup,"ax" +12: movl %ecx,%edx /* ecx is zerorest also */ + jmp copy_user_handle_tail + .previous + + .section __ex_table,"a" + .align 8 + .quad 1b,12b + .previous + CFI_ENDPROC +ENDPROC(copy_user_enhanced_fast_string) diff --git a/trunk/arch/x86/lib/memcpy_64.S b/trunk/arch/x86/lib/memcpy_64.S index 75ef61e35e38..efbf2a0ecdea 100644 --- a/trunk/arch/x86/lib/memcpy_64.S +++ b/trunk/arch/x86/lib/memcpy_64.S @@ -4,6 +4,7 @@ #include #include +#include /* * memcpy - Copy a memory block. @@ -37,6 +38,23 @@ .Lmemcpy_e: .previous +/* + * memcpy_c_e() - enhanced fast string memcpy. This is faster and simpler than + * memcpy_c. Use memcpy_c_e when possible. + * + * This gets patched over the unrolled variant (below) via the + * alternative instructions framework: + */ + .section .altinstr_replacement, "ax", @progbits +.Lmemcpy_c_e: + movq %rdi, %rax + + movl %edx, %ecx + rep movsb + ret +.Lmemcpy_e_e: + .previous + ENTRY(__memcpy) ENTRY(memcpy) CFI_STARTPROC @@ -49,7 +67,7 @@ ENTRY(memcpy) jb .Lhandle_tail /* - * We check whether memory false dependece could occur, + * We check whether memory false dependence could occur, * then jump to corresponding copy mode. */ cmp %dil, %sil @@ -171,21 +189,22 @@ ENDPROC(memcpy) ENDPROC(__memcpy) /* - * Some CPUs run faster using the string copy instructions. - * It is also a lot simpler. Use this when possible: - */ - - .section .altinstructions, "a" - .align 8 - .quad memcpy - .quad .Lmemcpy_c - .word X86_FEATURE_REP_GOOD - - /* + * Some CPUs are adding enhanced REP MOVSB/STOSB feature + * If the feature is supported, memcpy_c_e() is the first choice. + * If enhanced rep movsb copy is not available, use fast string copy + * memcpy_c() when possible. This is faster and code is simpler than + * original memcpy(). + * Otherwise, original memcpy() is used. + * In .altinstructions section, ERMS feature is placed after REG_GOOD + * feature to implement the right patch order. + * * Replace only beginning, memcpy is used to apply alternatives, * so it is silly to overwrite itself with nops - reboot is the * only outcome... */ - .byte .Lmemcpy_e - .Lmemcpy_c - .byte .Lmemcpy_e - .Lmemcpy_c + .section .altinstructions, "a" + altinstruction_entry memcpy,.Lmemcpy_c,X86_FEATURE_REP_GOOD,\ + .Lmemcpy_e-.Lmemcpy_c,.Lmemcpy_e-.Lmemcpy_c + altinstruction_entry memcpy,.Lmemcpy_c_e,X86_FEATURE_ERMS, \ + .Lmemcpy_e_e-.Lmemcpy_c_e,.Lmemcpy_e_e-.Lmemcpy_c_e .previous diff --git a/trunk/arch/x86/lib/memmove_64.S b/trunk/arch/x86/lib/memmove_64.S index 0ecb8433e5a8..d0ec9c2936d7 100644 --- a/trunk/arch/x86/lib/memmove_64.S +++ b/trunk/arch/x86/lib/memmove_64.S @@ -8,6 +8,7 @@ #define _STRING_C #include #include +#include #undef memmove @@ -24,6 +25,7 @@ */ ENTRY(memmove) CFI_STARTPROC + /* Handle more 32bytes in loop */ mov %rdi, %rax cmp $0x20, %rdx @@ -31,8 +33,13 @@ ENTRY(memmove) /* Decide forward/backward copy mode */ cmp %rdi, %rsi - jb 2f + jge .Lmemmove_begin_forward + mov %rsi, %r8 + add %rdx, %r8 + cmp %rdi, %r8 + jg 2f +.Lmemmove_begin_forward: /* * movsq instruction have many startup latency * so we handle small size by general register. @@ -78,6 +85,8 @@ ENTRY(memmove) rep movsq movq %r11, (%r10) jmp 13f +.Lmemmove_end_forward: + /* * Handle data backward by movsq. */ @@ -194,4 +203,22 @@ ENTRY(memmove) 13: retq CFI_ENDPROC + + .section .altinstr_replacement,"ax" +.Lmemmove_begin_forward_efs: + /* Forward moving data. */ + movq %rdx, %rcx + rep movsb + retq +.Lmemmove_end_forward_efs: + .previous + + .section .altinstructions,"a" + .align 8 + .quad .Lmemmove_begin_forward + .quad .Lmemmove_begin_forward_efs + .word X86_FEATURE_ERMS + .byte .Lmemmove_end_forward-.Lmemmove_begin_forward + .byte .Lmemmove_end_forward_efs-.Lmemmove_begin_forward_efs + .previous ENDPROC(memmove) diff --git a/trunk/arch/x86/lib/memset_64.S b/trunk/arch/x86/lib/memset_64.S index 09d344269652..79bd454b78a3 100644 --- a/trunk/arch/x86/lib/memset_64.S +++ b/trunk/arch/x86/lib/memset_64.S @@ -2,9 +2,13 @@ #include #include +#include +#include /* - * ISO C memset - set a memory block to a byte value. + * ISO C memset - set a memory block to a byte value. This function uses fast + * string to get better performance than the original function. The code is + * simpler and shorter than the orignal function as well. * * rdi destination * rsi value (char) @@ -31,6 +35,28 @@ .Lmemset_e: .previous +/* + * ISO C memset - set a memory block to a byte value. This function uses + * enhanced rep stosb to override the fast string function. + * The code is simpler and shorter than the fast string function as well. + * + * rdi destination + * rsi value (char) + * rdx count (bytes) + * + * rax original destination + */ + .section .altinstr_replacement, "ax", @progbits +.Lmemset_c_e: + movq %rdi,%r9 + movb %sil,%al + movl %edx,%ecx + rep stosb + movq %r9,%rax + ret +.Lmemset_e_e: + .previous + ENTRY(memset) ENTRY(__memset) CFI_STARTPROC @@ -112,16 +138,20 @@ ENTRY(__memset) ENDPROC(memset) ENDPROC(__memset) - /* Some CPUs run faster using the string instructions. - It is also a lot simpler. Use this when possible */ - -#include - + /* Some CPUs support enhanced REP MOVSB/STOSB feature. + * It is recommended to use this when possible. + * + * If enhanced REP MOVSB/STOSB feature is not available, use fast string + * instructions. + * + * Otherwise, use original memset function. + * + * In .altinstructions section, ERMS feature is placed after REG_GOOD + * feature to implement the right patch order. + */ .section .altinstructions,"a" - .align 8 - .quad memset - .quad .Lmemset_c - .word X86_FEATURE_REP_GOOD - .byte .Lfinal - memset - .byte .Lmemset_e - .Lmemset_c + altinstruction_entry memset,.Lmemset_c,X86_FEATURE_REP_GOOD,\ + .Lfinal-memset,.Lmemset_e-.Lmemset_c + altinstruction_entry memset,.Lmemset_c_e,X86_FEATURE_ERMS, \ + .Lfinal-memset,.Lmemset_e_e-.Lmemset_c_e .previous diff --git a/trunk/arch/x86/mm/Makefile b/trunk/arch/x86/mm/Makefile index 3e608edf9958..3d11327c9ab4 100644 --- a/trunk/arch/x86/mm/Makefile +++ b/trunk/arch/x86/mm/Makefile @@ -23,8 +23,8 @@ mmiotrace-y := kmmio.o pf_in.o mmio-mod.o obj-$(CONFIG_MMIOTRACE_TEST) += testmmiotrace.o obj-$(CONFIG_NUMA) += numa.o numa_$(BITS).o -obj-$(CONFIG_AMD_NUMA) += amdtopology_64.o -obj-$(CONFIG_ACPI_NUMA) += srat_$(BITS).o +obj-$(CONFIG_AMD_NUMA) += amdtopology.o +obj-$(CONFIG_ACPI_NUMA) += srat.o obj-$(CONFIG_NUMA_EMU) += numa_emulation.o obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o diff --git a/trunk/arch/x86/mm/amdtopology_64.c b/trunk/arch/x86/mm/amdtopology.c similarity index 90% rename from trunk/arch/x86/mm/amdtopology_64.c rename to trunk/arch/x86/mm/amdtopology.c index 0919c26820d4..5247d01329ca 100644 --- a/trunk/arch/x86/mm/amdtopology_64.c +++ b/trunk/arch/x86/mm/amdtopology.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -69,10 +70,10 @@ static __init void early_get_boot_cpu_id(void) int __init amd_numa_init(void) { - unsigned long start = PFN_PHYS(0); - unsigned long end = PFN_PHYS(max_pfn); + u64 start = PFN_PHYS(0); + u64 end = PFN_PHYS(max_pfn); unsigned numnodes; - unsigned long prevbase; + u64 prevbase; int i, j, nb; u32 nodeid, reg; unsigned int bits, cores, apicid_base; @@ -95,7 +96,7 @@ int __init amd_numa_init(void) prevbase = 0; for (i = 0; i < 8; i++) { - unsigned long base, limit; + u64 base, limit; base = read_pci_config(0, nb, 1, 0x40 + i*8); limit = read_pci_config(0, nb, 1, 0x44 + i*8); @@ -107,18 +108,18 @@ int __init amd_numa_init(void) continue; } if (nodeid >= numnodes) { - pr_info("Ignoring excess node %d (%lx:%lx)\n", nodeid, + pr_info("Ignoring excess node %d (%Lx:%Lx)\n", nodeid, base, limit); continue; } if (!limit) { - pr_info("Skipping node entry %d (base %lx)\n", + pr_info("Skipping node entry %d (base %Lx)\n", i, base); continue; } if ((base >> 8) & 3 || (limit >> 8) & 3) { - pr_err("Node %d using interleaving mode %lx/%lx\n", + pr_err("Node %d using interleaving mode %Lx/%Lx\n", nodeid, (base >> 8) & 3, (limit >> 8) & 3); return -EINVAL; } @@ -150,19 +151,19 @@ int __init amd_numa_init(void) continue; } if (limit < base) { - pr_err("Node %d bogus settings %lx-%lx.\n", + pr_err("Node %d bogus settings %Lx-%Lx.\n", nodeid, base, limit); continue; } /* Could sort here, but pun for now. Should not happen anyroads. */ if (prevbase > base) { - pr_err("Node map not sorted %lx,%lx\n", + pr_err("Node map not sorted %Lx,%Lx\n", prevbase, base); return -EINVAL; } - pr_info("Node %d MemBase %016lx Limit %016lx\n", + pr_info("Node %d MemBase %016Lx Limit %016Lx\n", nodeid, base, limit); prevbase = base; diff --git a/trunk/arch/x86/mm/fault.c b/trunk/arch/x86/mm/fault.c index 20e3f8702d1e..bcb394dfbb35 100644 --- a/trunk/arch/x86/mm/fault.c +++ b/trunk/arch/x86/mm/fault.c @@ -12,6 +12,7 @@ #include /* kmmio_handler, ... */ #include /* perf_sw_event */ #include /* hstate_index_to_shift */ +#include /* prefetchw */ #include /* dotraplinkage, ... */ #include /* pgd_*(), ... */ diff --git a/trunk/arch/x86/mm/init.c b/trunk/arch/x86/mm/init.c index 286d289b039b..37b8b0fe8320 100644 --- a/trunk/arch/x86/mm/init.c +++ b/trunk/arch/x86/mm/init.c @@ -81,6 +81,11 @@ static void __init find_early_table_space(unsigned long end, int use_pse, end, pgt_buf_start << PAGE_SHIFT, pgt_buf_top << PAGE_SHIFT); } +void __init native_pagetable_reserve(u64 start, u64 end) +{ + memblock_x86_reserve_range(start, end, "PGTABLE"); +} + struct map_range { unsigned long start; unsigned long end; @@ -272,9 +277,24 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, __flush_tlb_all(); + /* + * Reserve the kernel pagetable pages we used (pgt_buf_start - + * pgt_buf_end) and free the other ones (pgt_buf_end - pgt_buf_top) + * so that they can be reused for other purposes. + * + * On native it just means calling memblock_x86_reserve_range, on Xen it + * also means marking RW the pagetable pages that we allocated before + * but that haven't been used. + * + * In fact on xen we mark RO the whole range pgt_buf_start - + * pgt_buf_top, because we have to make sure that when + * init_memory_mapping reaches the pagetable pages area, it maps + * RO all the pagetable pages, including the ones that are beyond + * pgt_buf_end at that time. + */ if (!after_bootmem && pgt_buf_end > pgt_buf_start) - memblock_x86_reserve_range(pgt_buf_start << PAGE_SHIFT, - pgt_buf_end << PAGE_SHIFT, "PGTABLE"); + x86_init.mapping.pagetable_reserve(PFN_PHYS(pgt_buf_start), + PFN_PHYS(pgt_buf_end)); if (!after_bootmem) early_memtest(start, end); diff --git a/trunk/arch/x86/mm/init_32.c b/trunk/arch/x86/mm/init_32.c index 80088f994193..29f7c6d98179 100644 --- a/trunk/arch/x86/mm/init_32.c +++ b/trunk/arch/x86/mm/init_32.c @@ -678,8 +678,10 @@ static void __init zone_sizes_init(void) { unsigned long max_zone_pfns[MAX_NR_ZONES]; memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); +#ifdef CONFIG_ZONE_DMA max_zone_pfns[ZONE_DMA] = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; +#endif max_zone_pfns[ZONE_NORMAL] = max_low_pfn; #ifdef CONFIG_HIGHMEM max_zone_pfns[ZONE_HIGHMEM] = highend_pfn; @@ -716,6 +718,7 @@ void __init paging_init(void) * NOTE: at this point the bootmem allocator is fully available. */ olpc_dt_build_devicetree(); + sparse_memory_present_with_active_regions(MAX_NUMNODES); sparse_init(); zone_sizes_init(); } diff --git a/trunk/arch/x86/mm/init_64.c b/trunk/arch/x86/mm/init_64.c index 794233587287..d865c4aeec55 100644 --- a/trunk/arch/x86/mm/init_64.c +++ b/trunk/arch/x86/mm/init_64.c @@ -616,7 +616,9 @@ void __init paging_init(void) unsigned long max_zone_pfns[MAX_NR_ZONES]; memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); +#ifdef CONFIG_ZONE_DMA max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; +#endif max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; max_zone_pfns[ZONE_NORMAL] = max_pfn; @@ -679,14 +681,6 @@ int arch_add_memory(int nid, u64 start, u64 size) } EXPORT_SYMBOL_GPL(arch_add_memory); -#if !defined(CONFIG_ACPI_NUMA) && defined(CONFIG_NUMA) -int memory_add_physaddr_to_nid(u64 start) -{ - return 0; -} -EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); -#endif - #endif /* CONFIG_MEMORY_HOTPLUG */ static struct kcore_list kcore_vsyscall; diff --git a/trunk/arch/x86/mm/ioremap.c b/trunk/arch/x86/mm/ioremap.c index 0369843511dc..be1ef574ce9a 100644 --- a/trunk/arch/x86/mm/ioremap.c +++ b/trunk/arch/x86/mm/ioremap.c @@ -90,13 +90,6 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, if (is_ISA_range(phys_addr, last_addr)) return (__force void __iomem *)phys_to_virt(phys_addr); - /* - * Check if the request spans more than any BAR in the iomem resource - * tree. - */ - WARN_ONCE(iomem_map_sanity_check(phys_addr, size), - KERN_INFO "Info: mapping multiple BARs. Your kernel is fine."); - /* * Don't allow anybody to remap normal RAM that we're using.. */ @@ -170,6 +163,13 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, ret_addr = (void __iomem *) (vaddr + offset); mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr); + /* + * Check if the request spans more than any BAR in the iomem resource + * tree. + */ + WARN_ONCE(iomem_map_sanity_check(unaligned_phys_addr, unaligned_size), + KERN_INFO "Info: mapping multiple BARs. Your kernel is fine."); + return ret_addr; err_free_area: free_vm_area(area); diff --git a/trunk/arch/x86/mm/numa.c b/trunk/arch/x86/mm/numa.c index 9559d360fde7..f5510d889a22 100644 --- a/trunk/arch/x86/mm/numa.c +++ b/trunk/arch/x86/mm/numa.c @@ -1,11 +1,39 @@ /* Common code for 32 and 64-bit NUMA */ -#include -#include +#include +#include +#include +#include #include -#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include #include +#include + +#include "numa_internal.h" int __initdata numa_off; +nodemask_t numa_nodes_parsed __initdata; + +struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; +EXPORT_SYMBOL(node_data); + +static struct numa_meminfo numa_meminfo +#ifndef CONFIG_MEMORY_HOTPLUG +__initdata +#endif +; + +static int numa_distance_cnt; +static u8 *numa_distance; static __init int numa_setup(char *opt) { @@ -32,6 +60,15 @@ s16 __apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = { [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE }; +int __cpuinit numa_cpu_node(int cpu) +{ + int apicid = early_per_cpu(x86_cpu_to_apicid, cpu); + + if (apicid != BAD_APICID) + return __apicid_to_node[apicid]; + return NUMA_NO_NODE; +} + cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; EXPORT_SYMBOL(node_to_cpumask_map); @@ -95,6 +132,407 @@ void __init setup_node_to_cpumask_map(void) pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids); } +static int __init numa_add_memblk_to(int nid, u64 start, u64 end, + struct numa_meminfo *mi) +{ + /* ignore zero length blks */ + if (start == end) + return 0; + + /* whine about and ignore invalid blks */ + if (start > end || nid < 0 || nid >= MAX_NUMNODES) { + pr_warning("NUMA: Warning: invalid memblk node %d (%Lx-%Lx)\n", + nid, start, end); + return 0; + } + + if (mi->nr_blks >= NR_NODE_MEMBLKS) { + pr_err("NUMA: too many memblk ranges\n"); + return -EINVAL; + } + + mi->blk[mi->nr_blks].start = start; + mi->blk[mi->nr_blks].end = end; + mi->blk[mi->nr_blks].nid = nid; + mi->nr_blks++; + return 0; +} + +/** + * numa_remove_memblk_from - Remove one numa_memblk from a numa_meminfo + * @idx: Index of memblk to remove + * @mi: numa_meminfo to remove memblk from + * + * Remove @idx'th numa_memblk from @mi by shifting @mi->blk[] and + * decrementing @mi->nr_blks. + */ +void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi) +{ + mi->nr_blks--; + memmove(&mi->blk[idx], &mi->blk[idx + 1], + (mi->nr_blks - idx) * sizeof(mi->blk[0])); +} + +/** + * numa_add_memblk - Add one numa_memblk to numa_meminfo + * @nid: NUMA node ID of the new memblk + * @start: Start address of the new memblk + * @end: End address of the new memblk + * + * Add a new memblk to the default numa_meminfo. + * + * RETURNS: + * 0 on success, -errno on failure. + */ +int __init numa_add_memblk(int nid, u64 start, u64 end) +{ + return numa_add_memblk_to(nid, start, end, &numa_meminfo); +} + +/* Initialize NODE_DATA for a node on the local memory */ +static void __init setup_node_data(int nid, u64 start, u64 end) +{ + const u64 nd_low = PFN_PHYS(MAX_DMA_PFN); + const u64 nd_high = PFN_PHYS(max_pfn_mapped); + const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE); + bool remapped = false; + u64 nd_pa; + void *nd; + int tnid; + + /* + * Don't confuse VM with a node that doesn't have the + * minimum amount of memory: + */ + if (end && (end - start) < NODE_MIN_SIZE) + return; + + /* initialize remap allocator before aligning to ZONE_ALIGN */ + init_alloc_remap(nid, start, end); + + start = roundup(start, ZONE_ALIGN); + + printk(KERN_INFO "Initmem setup node %d %016Lx-%016Lx\n", + nid, start, end); + + /* + * Allocate node data. Try remap allocator first, node-local + * memory and then any node. Never allocate in DMA zone. + */ + nd = alloc_remap(nid, nd_size); + if (nd) { + nd_pa = __pa(nd); + remapped = true; + } else { + nd_pa = memblock_x86_find_in_range_node(nid, nd_low, nd_high, + nd_size, SMP_CACHE_BYTES); + if (nd_pa == MEMBLOCK_ERROR) + nd_pa = memblock_find_in_range(nd_low, nd_high, + nd_size, SMP_CACHE_BYTES); + if (nd_pa == MEMBLOCK_ERROR) { + pr_err("Cannot find %zu bytes in node %d\n", + nd_size, nid); + return; + } + memblock_x86_reserve_range(nd_pa, nd_pa + nd_size, "NODE_DATA"); + nd = __va(nd_pa); + } + + /* report and initialize */ + printk(KERN_INFO " NODE_DATA [%016Lx - %016Lx]%s\n", + nd_pa, nd_pa + nd_size - 1, remapped ? " (remapped)" : ""); + tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT); + if (!remapped && tnid != nid) + printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nid, tnid); + + node_data[nid] = nd; + memset(NODE_DATA(nid), 0, sizeof(pg_data_t)); + NODE_DATA(nid)->node_id = nid; + NODE_DATA(nid)->node_start_pfn = start >> PAGE_SHIFT; + NODE_DATA(nid)->node_spanned_pages = (end - start) >> PAGE_SHIFT; + + node_set_online(nid); +} + +/** + * numa_cleanup_meminfo - Cleanup a numa_meminfo + * @mi: numa_meminfo to clean up + * + * Sanitize @mi by merging and removing unncessary memblks. Also check for + * conflicts and clear unused memblks. + * + * RETURNS: + * 0 on success, -errno on failure. + */ +int __init numa_cleanup_meminfo(struct numa_meminfo *mi) +{ + const u64 low = 0; + const u64 high = PFN_PHYS(max_pfn); + int i, j, k; + + /* first, trim all entries */ + for (i = 0; i < mi->nr_blks; i++) { + struct numa_memblk *bi = &mi->blk[i]; + + /* make sure all blocks are inside the limits */ + bi->start = max(bi->start, low); + bi->end = min(bi->end, high); + + /* and there's no empty block */ + if (bi->start >= bi->end) + numa_remove_memblk_from(i--, mi); + } + + /* merge neighboring / overlapping entries */ + for (i = 0; i < mi->nr_blks; i++) { + struct numa_memblk *bi = &mi->blk[i]; + + for (j = i + 1; j < mi->nr_blks; j++) { + struct numa_memblk *bj = &mi->blk[j]; + u64 start, end; + + /* + * See whether there are overlapping blocks. Whine + * about but allow overlaps of the same nid. They + * will be merged below. + */ + if (bi->end > bj->start && bi->start < bj->end) { + if (bi->nid != bj->nid) { + pr_err("NUMA: node %d (%Lx-%Lx) overlaps with node %d (%Lx-%Lx)\n", + bi->nid, bi->start, bi->end, + bj->nid, bj->start, bj->end); + return -EINVAL; + } + pr_warning("NUMA: Warning: node %d (%Lx-%Lx) overlaps with itself (%Lx-%Lx)\n", + bi->nid, bi->start, bi->end, + bj->start, bj->end); + } + + /* + * Join together blocks on the same node, holes + * between which don't overlap with memory on other + * nodes. + */ + if (bi->nid != bj->nid) + continue; + start = min(bi->start, bj->start); + end = max(bi->end, bj->end); + for (k = 0; k < mi->nr_blks; k++) { + struct numa_memblk *bk = &mi->blk[k]; + + if (bi->nid == bk->nid) + continue; + if (start < bk->end && end > bk->start) + break; + } + if (k < mi->nr_blks) + continue; + printk(KERN_INFO "NUMA: Node %d [%Lx,%Lx) + [%Lx,%Lx) -> [%Lx,%Lx)\n", + bi->nid, bi->start, bi->end, bj->start, bj->end, + start, end); + bi->start = start; + bi->end = end; + numa_remove_memblk_from(j--, mi); + } + } + + /* clear unused ones */ + for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) { + mi->blk[i].start = mi->blk[i].end = 0; + mi->blk[i].nid = NUMA_NO_NODE; + } + + return 0; +} + +/* + * Set nodes, which have memory in @mi, in *@nodemask. + */ +static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask, + const struct numa_meminfo *mi) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(mi->blk); i++) + if (mi->blk[i].start != mi->blk[i].end && + mi->blk[i].nid != NUMA_NO_NODE) + node_set(mi->blk[i].nid, *nodemask); +} + +/** + * numa_reset_distance - Reset NUMA distance table + * + * The current table is freed. The next numa_set_distance() call will + * create a new one. + */ +void __init numa_reset_distance(void) +{ + size_t size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]); + + /* numa_distance could be 1LU marking allocation failure, test cnt */ + if (numa_distance_cnt) + memblock_x86_free_range(__pa(numa_distance), + __pa(numa_distance) + size); + numa_distance_cnt = 0; + numa_distance = NULL; /* enable table creation */ +} + +static int __init numa_alloc_distance(void) +{ + nodemask_t nodes_parsed; + size_t size; + int i, j, cnt = 0; + u64 phys; + + /* size the new table and allocate it */ + nodes_parsed = numa_nodes_parsed; + numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo); + + for_each_node_mask(i, nodes_parsed) + cnt = i; + cnt++; + size = cnt * cnt * sizeof(numa_distance[0]); + + phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped), + size, PAGE_SIZE); + if (phys == MEMBLOCK_ERROR) { + pr_warning("NUMA: Warning: can't allocate distance table!\n"); + /* don't retry until explicitly reset */ + numa_distance = (void *)1LU; + return -ENOMEM; + } + memblock_x86_reserve_range(phys, phys + size, "NUMA DIST"); + + numa_distance = __va(phys); + numa_distance_cnt = cnt; + + /* fill with the default distances */ + for (i = 0; i < cnt; i++) + for (j = 0; j < cnt; j++) + numa_distance[i * cnt + j] = i == j ? + LOCAL_DISTANCE : REMOTE_DISTANCE; + printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt); + + return 0; +} + +/** + * numa_set_distance - Set NUMA distance from one NUMA to another + * @from: the 'from' node to set distance + * @to: the 'to' node to set distance + * @distance: NUMA distance + * + * Set the distance from node @from to @to to @distance. If distance table + * doesn't exist, one which is large enough to accommodate all the currently + * known nodes will be created. + * + * If such table cannot be allocated, a warning is printed and further + * calls are ignored until the distance table is reset with + * numa_reset_distance(). + * + * If @from or @to is higher than the highest known node at the time of + * table creation or @distance doesn't make sense, the call is ignored. + * This is to allow simplification of specific NUMA config implementations. + */ +void __init numa_set_distance(int from, int to, int distance) +{ + if (!numa_distance && numa_alloc_distance() < 0) + return; + + if (from >= numa_distance_cnt || to >= numa_distance_cnt) { + printk_once(KERN_DEBUG "NUMA: Debug: distance out of bound, from=%d to=%d distance=%d\n", + from, to, distance); + return; + } + + if ((u8)distance != distance || + (from == to && distance != LOCAL_DISTANCE)) { + pr_warn_once("NUMA: Warning: invalid distance parameter, from=%d to=%d distance=%d\n", + from, to, distance); + return; + } + + numa_distance[from * numa_distance_cnt + to] = distance; +} + +int __node_distance(int from, int to) +{ + if (from >= numa_distance_cnt || to >= numa_distance_cnt) + return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE; + return numa_distance[from * numa_distance_cnt + to]; +} +EXPORT_SYMBOL(__node_distance); + +/* + * Sanity check to catch more bad NUMA configurations (they are amazingly + * common). Make sure the nodes cover all memory. + */ +static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi) +{ + u64 numaram, e820ram; + int i; + + numaram = 0; + for (i = 0; i < mi->nr_blks; i++) { + u64 s = mi->blk[i].start >> PAGE_SHIFT; + u64 e = mi->blk[i].end >> PAGE_SHIFT; + numaram += e - s; + numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e); + if ((s64)numaram < 0) + numaram = 0; + } + + e820ram = max_pfn - (memblock_x86_hole_size(0, + PFN_PHYS(max_pfn)) >> PAGE_SHIFT); + /* We seem to lose 3 pages somewhere. Allow 1M of slack. */ + if ((s64)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) { + printk(KERN_ERR "NUMA: nodes only cover %LuMB of your %LuMB e820 RAM. Not used.\n", + (numaram << PAGE_SHIFT) >> 20, + (e820ram << PAGE_SHIFT) >> 20); + return false; + } + return true; +} + +static int __init numa_register_memblks(struct numa_meminfo *mi) +{ + int i, nid; + + /* Account for nodes with cpus and no memory */ + node_possible_map = numa_nodes_parsed; + numa_nodemask_from_meminfo(&node_possible_map, mi); + if (WARN_ON(nodes_empty(node_possible_map))) + return -EINVAL; + + for (i = 0; i < mi->nr_blks; i++) + memblock_x86_register_active_regions(mi->blk[i].nid, + mi->blk[i].start >> PAGE_SHIFT, + mi->blk[i].end >> PAGE_SHIFT); + + /* for out of order entries */ + sort_node_map(); + if (!numa_meminfo_cover_memory(mi)) + return -EINVAL; + + /* Finally register nodes. */ + for_each_node_mask(nid, node_possible_map) { + u64 start = PFN_PHYS(max_pfn); + u64 end = 0; + + for (i = 0; i < mi->nr_blks; i++) { + if (nid != mi->blk[i].nid) + continue; + start = min(mi->blk[i].start, start); + end = max(mi->blk[i].end, end); + } + + if (start < end) + setup_node_data(nid, start, end); + } + + return 0; +} + /* * There are unfortunately some poorly designed mainboards around that * only connect memory to a single CPU. This breaks the 1:1 cpu->node @@ -102,7 +540,7 @@ void __init setup_node_to_cpumask_map(void) * as the number of CPUs is not known yet. We round robin the existing * nodes. */ -void __init numa_init_array(void) +static void __init numa_init_array(void) { int rr, i; @@ -117,6 +555,95 @@ void __init numa_init_array(void) } } +static int __init numa_init(int (*init_func)(void)) +{ + int i; + int ret; + + for (i = 0; i < MAX_LOCAL_APIC; i++) + set_apicid_to_node(i, NUMA_NO_NODE); + + nodes_clear(numa_nodes_parsed); + nodes_clear(node_possible_map); + nodes_clear(node_online_map); + memset(&numa_meminfo, 0, sizeof(numa_meminfo)); + remove_all_active_ranges(); + numa_reset_distance(); + + ret = init_func(); + if (ret < 0) + return ret; + ret = numa_cleanup_meminfo(&numa_meminfo); + if (ret < 0) + return ret; + + numa_emulation(&numa_meminfo, numa_distance_cnt); + + ret = numa_register_memblks(&numa_meminfo); + if (ret < 0) + return ret; + + for (i = 0; i < nr_cpu_ids; i++) { + int nid = early_cpu_to_node(i); + + if (nid == NUMA_NO_NODE) + continue; + if (!node_online(nid)) + numa_clear_node(i); + } + numa_init_array(); + return 0; +} + +/** + * dummy_numa_init - Fallback dummy NUMA init + * + * Used if there's no underlying NUMA architecture, NUMA initialization + * fails, or NUMA is disabled on the command line. + * + * Must online at least one node and add memory blocks that cover all + * allowed memory. This function must not fail. + */ +static int __init dummy_numa_init(void) +{ + printk(KERN_INFO "%s\n", + numa_off ? "NUMA turned off" : "No NUMA configuration found"); + printk(KERN_INFO "Faking a node at %016Lx-%016Lx\n", + 0LLU, PFN_PHYS(max_pfn)); + + node_set(0, numa_nodes_parsed); + numa_add_memblk(0, 0, PFN_PHYS(max_pfn)); + + return 0; +} + +/** + * x86_numa_init - Initialize NUMA + * + * Try each configured NUMA initialization method until one succeeds. The + * last fallback is dummy single node config encomapssing whole memory and + * never fails. + */ +void __init x86_numa_init(void) +{ + if (!numa_off) { +#ifdef CONFIG_X86_NUMAQ + if (!numa_init(numaq_numa_init)) + return; +#endif +#ifdef CONFIG_ACPI_NUMA + if (!numa_init(x86_acpi_numa_init)) + return; +#endif +#ifdef CONFIG_AMD_NUMA + if (!numa_init(amd_numa_init)) + return; +#endif + } + + numa_init(dummy_numa_init); +} + static __init int find_near_online_node(int node) { int n, val; @@ -213,53 +740,48 @@ int early_cpu_to_node(int cpu) return per_cpu(x86_cpu_to_node_map, cpu); } -struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable) +void debug_cpumask_set_cpu(int cpu, int node, bool enable) { - int node = early_cpu_to_node(cpu); struct cpumask *mask; char buf[64]; if (node == NUMA_NO_NODE) { /* early_cpu_to_node() already emits a warning and trace */ - return NULL; + return; } mask = node_to_cpumask_map[node]; if (!mask) { pr_err("node_to_cpumask_map[%i] NULL\n", node); dump_stack(); - return NULL; + return; } + if (enable) + cpumask_set_cpu(cpu, mask); + else + cpumask_clear_cpu(cpu, mask); + cpulist_scnprintf(buf, sizeof(buf), mask); printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n", enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf); - return mask; + return; } # ifndef CONFIG_NUMA_EMU -static void __cpuinit numa_set_cpumask(int cpu, int enable) +static void __cpuinit numa_set_cpumask(int cpu, bool enable) { - struct cpumask *mask; - - mask = debug_cpumask_set_cpu(cpu, enable); - if (!mask) - return; - - if (enable) - cpumask_set_cpu(cpu, mask); - else - cpumask_clear_cpu(cpu, mask); + debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable); } void __cpuinit numa_add_cpu(int cpu) { - numa_set_cpumask(cpu, 1); + numa_set_cpumask(cpu, true); } void __cpuinit numa_remove_cpu(int cpu) { - numa_set_cpumask(cpu, 0); + numa_set_cpumask(cpu, false); } # endif /* !CONFIG_NUMA_EMU */ @@ -287,3 +809,18 @@ const struct cpumask *cpumask_of_node(int node) EXPORT_SYMBOL(cpumask_of_node); #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ + +#ifdef CONFIG_MEMORY_HOTPLUG +int memory_add_physaddr_to_nid(u64 start) +{ + struct numa_meminfo *mi = &numa_meminfo; + int nid = mi->blk[0].nid; + int i; + + for (i = 0; i < mi->nr_blks; i++) + if (mi->blk[i].start <= start && mi->blk[i].end > start) + nid = mi->blk[i].nid; + return nid; +} +EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); +#endif diff --git a/trunk/arch/x86/mm/numa_32.c b/trunk/arch/x86/mm/numa_32.c index bde3906420df..849a975d3fa0 100644 --- a/trunk/arch/x86/mm/numa_32.c +++ b/trunk/arch/x86/mm/numa_32.c @@ -22,39 +22,11 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ -#include #include #include -#include -#include -#include -#include #include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; -EXPORT_SYMBOL(node_data); - -/* - * numa interface - we expect the numa architecture specific code to have - * populated the following initialisation. - * - * 1) node_online_map - the map of all nodes configured (online) in the system - * 2) node_start_pfn - the starting page frame number for a node - * 3) node_end_pfn - the ending page fram number for a node - */ -unsigned long node_start_pfn[MAX_NUMNODES] __read_mostly; -unsigned long node_end_pfn[MAX_NUMNODES] __read_mostly; +#include "numa_internal.h" #ifdef CONFIG_DISCONTIGMEM /* @@ -99,108 +71,46 @@ unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn, } #endif -extern unsigned long find_max_low_pfn(void); extern unsigned long highend_pfn, highstart_pfn; #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE) -unsigned long node_remap_size[MAX_NUMNODES]; static void *node_remap_start_vaddr[MAX_NUMNODES]; void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); -static unsigned long kva_start_pfn; -static unsigned long kva_pages; - -int __cpuinit numa_cpu_node(int cpu) -{ - return apic->x86_32_numa_cpu_node(cpu); -} - -/* - * FLAT - support for basic PC memory model with discontig enabled, essentially - * a single node with all available processors in it with a flat - * memory map. - */ -int __init get_memcfg_numa_flat(void) -{ - printk(KERN_DEBUG "NUMA - single node, flat memory mode\n"); - - node_start_pfn[0] = 0; - node_end_pfn[0] = max_pfn; - memblock_x86_register_active_regions(0, 0, max_pfn); - memory_present(0, 0, max_pfn); - node_remap_size[0] = node_memmap_size_bytes(0, 0, max_pfn); - - /* Indicate there is one node available. */ - nodes_clear(node_online_map); - node_set_online(0); - return 1; -} - -/* - * Find the highest page frame number we have available for the node - */ -static void __init propagate_e820_map_node(int nid) -{ - if (node_end_pfn[nid] > max_pfn) - node_end_pfn[nid] = max_pfn; - /* - * if a user has given mem=XXXX, then we need to make sure - * that the node _starts_ before that, too, not just ends - */ - if (node_start_pfn[nid] > max_pfn) - node_start_pfn[nid] = max_pfn; - BUG_ON(node_start_pfn[nid] > node_end_pfn[nid]); -} - -/* - * Allocate memory for the pg_data_t for this node via a crude pre-bootmem - * method. For node zero take this from the bottom of memory, for - * subsequent nodes place them at node_remap_start_vaddr which contains - * node local data in physically node local memory. See setup_memory() - * for details. - */ -static void __init allocate_pgdat(int nid) -{ - char buf[16]; - - if (node_has_online_mem(nid) && node_remap_start_vaddr[nid]) - NODE_DATA(nid) = (pg_data_t *)node_remap_start_vaddr[nid]; - else { - unsigned long pgdat_phys; - pgdat_phys = memblock_find_in_range(min_low_pfn<>PAGE_SHIFT)); - memset(buf, 0, sizeof(buf)); - sprintf(buf, "NODE_DATA %d", nid); - memblock_x86_reserve_range(pgdat_phys, pgdat_phys + sizeof(pg_data_t), buf); - } - printk(KERN_DEBUG "allocate_pgdat: node %d NODE_DATA %08lx\n", - nid, (unsigned long)NODE_DATA(nid)); -} - /* - * In the DISCONTIGMEM and SPARSEMEM memory model, a portion of the kernel - * virtual address space (KVA) is reserved and portions of nodes are mapped - * using it. This is to allow node-local memory to be allocated for - * structures that would normally require ZONE_NORMAL. The memory is - * allocated with alloc_remap() and callers should be prepared to allocate - * from the bootmem allocator instead. + * Remap memory allocator */ static unsigned long node_remap_start_pfn[MAX_NUMNODES]; static void *node_remap_end_vaddr[MAX_NUMNODES]; static void *node_remap_alloc_vaddr[MAX_NUMNODES]; -static unsigned long node_remap_offset[MAX_NUMNODES]; +/** + * alloc_remap - Allocate remapped memory + * @nid: NUMA node to allocate memory from + * @size: The size of allocation + * + * Allocate @size bytes from the remap area of NUMA node @nid. The + * size of the remap area is predetermined by init_alloc_remap() and + * only the callers considered there should call this function. For + * more info, please read the comment on top of init_alloc_remap(). + * + * The caller must be ready to handle allocation failure from this + * function and fall back to regular memory allocator in such cases. + * + * CONTEXT: + * Single CPU early boot context. + * + * RETURNS: + * Pointer to the allocated memory on success, %NULL on failure. + */ void *alloc_remap(int nid, unsigned long size) { void *allocation = node_remap_alloc_vaddr[nid]; size = ALIGN(size, L1_CACHE_BYTES); - if (!allocation || (allocation + size) >= node_remap_end_vaddr[nid]) + if (!allocation || (allocation + size) > node_remap_end_vaddr[nid]) return NULL; node_remap_alloc_vaddr[nid] += size; @@ -209,26 +119,6 @@ void *alloc_remap(int nid, unsigned long size) return allocation; } -static void __init remap_numa_kva(void) -{ - void *vaddr; - unsigned long pfn; - int node; - - for_each_online_node(node) { - printk(KERN_DEBUG "remap_numa_kva: node %d\n", node); - for (pfn=0; pfn < node_remap_size[node]; pfn += PTRS_PER_PTE) { - vaddr = node_remap_start_vaddr[node]+(pfn<> PAGE_SHIFT; printk(KERN_DEBUG "%s: node %d\n", __func__, node); - for (pfn = 0; pfn < size; pfn += PTRS_PER_PTE) { + for (pfn = 0; pfn < nr_pages; pfn += PTRS_PER_PTE) { unsigned long vaddr = start_va + (pfn << PAGE_SHIFT); pgd_t *pgd = pgd_base + pgd_index(vaddr); pud_t *pud = pud_offset(pgd, vaddr); @@ -264,132 +155,89 @@ void resume_map_numa_kva(pgd_t *pgd_base) } #endif -static __init unsigned long calculate_numa_remap_pages(void) +/** + * init_alloc_remap - Initialize remap allocator for a NUMA node + * @nid: NUMA node to initizlie remap allocator for + * + * NUMA nodes may end up without any lowmem. As allocating pgdat and + * memmap on a different node with lowmem is inefficient, a special + * remap allocator is implemented which can be used by alloc_remap(). + * + * For each node, the amount of memory which will be necessary for + * pgdat and memmap is calculated and two memory areas of the size are + * allocated - one in the node and the other in lowmem; then, the area + * in the node is remapped to the lowmem area. + * + * As pgdat and memmap must be allocated in lowmem anyway, this + * doesn't waste lowmem address space; however, the actual lowmem + * which gets remapped over is wasted. The amount shouldn't be + * problematic on machines this feature will be used. + * + * Initialization failure isn't fatal. alloc_remap() is used + * opportunistically and the callers will fall back to other memory + * allocation mechanisms on failure. + */ +void __init init_alloc_remap(int nid, u64 start, u64 end) { - int nid; - unsigned long size, reserve_pages = 0; - - for_each_online_node(nid) { - u64 node_kva_target; - u64 node_kva_final; - - /* - * The acpi/srat node info can show hot-add memroy zones - * where memory could be added but not currently present. - */ - printk(KERN_DEBUG "node %d pfn: [%lx - %lx]\n", - nid, node_start_pfn[nid], node_end_pfn[nid]); - if (node_start_pfn[nid] > max_pfn) - continue; - if (!node_end_pfn[nid]) - continue; - if (node_end_pfn[nid] > max_pfn) - node_end_pfn[nid] = max_pfn; - - /* ensure the remap includes space for the pgdat. */ - size = node_remap_size[nid] + sizeof(pg_data_t); - - /* convert size to large (pmd size) pages, rounding up */ - size = (size + LARGE_PAGE_BYTES - 1) / LARGE_PAGE_BYTES; - /* now the roundup is correct, convert to PAGE_SIZE pages */ - size = size * PTRS_PER_PTE; - - node_kva_target = round_down(node_end_pfn[nid] - size, - PTRS_PER_PTE); - node_kva_target <<= PAGE_SHIFT; - do { - node_kva_final = memblock_find_in_range(node_kva_target, - ((u64)node_end_pfn[nid])<>PAGE_SHIFT) > (node_start_pfn[nid])); - - if (node_kva_final == MEMBLOCK_ERROR) - panic("Can not get kva ram\n"); - - node_remap_size[nid] = size; - node_remap_offset[nid] = reserve_pages; - reserve_pages += size; - printk(KERN_DEBUG "Reserving %ld pages of KVA for lmem_map of" - " node %d at %llx\n", - size, nid, node_kva_final>>PAGE_SHIFT); - - /* - * prevent kva address below max_low_pfn want it on system - * with less memory later. - * layout will be: KVA address , KVA RAM - * - * we are supposed to only record the one less then max_low_pfn - * but we could have some hole in high memory, and it will only - * check page_is_ram(pfn) && !page_is_reserved_early(pfn) to decide - * to use it as free. - * So memblock_x86_reserve_range here, hope we don't run out of that array - */ - memblock_x86_reserve_range(node_kva_final, - node_kva_final+(((u64)size)<>PAGE_SHIFT; - } - printk(KERN_INFO "Reserving total of %lx pages for numa KVA remap\n", - reserve_pages); - return reserve_pages; -} + unsigned long start_pfn = start >> PAGE_SHIFT; + unsigned long end_pfn = end >> PAGE_SHIFT; + unsigned long size, pfn; + u64 node_pa, remap_pa; + void *remap_va; -static void init_remap_allocator(int nid) -{ - node_remap_start_vaddr[nid] = pfn_to_kaddr( - kva_start_pfn + node_remap_offset[nid]); - node_remap_end_vaddr[nid] = node_remap_start_vaddr[nid] + - (node_remap_size[nid] * PAGE_SIZE); - node_remap_alloc_vaddr[nid] = node_remap_start_vaddr[nid] + - ALIGN(sizeof(pg_data_t), PAGE_SIZE); - - printk(KERN_DEBUG "node %d will remap to vaddr %08lx - %08lx\n", nid, - (ulong) node_remap_start_vaddr[nid], - (ulong) node_remap_end_vaddr[nid]); + /* + * The acpi/srat node info can show hot-add memroy zones where + * memory could be added but not currently present. + */ + printk(KERN_DEBUG "node %d pfn: [%lx - %lx]\n", + nid, start_pfn, end_pfn); + + /* calculate the necessary space aligned to large page size */ + size = node_memmap_size_bytes(nid, start_pfn, end_pfn); + size += ALIGN(sizeof(pg_data_t), PAGE_SIZE); + size = ALIGN(size, LARGE_PAGE_BYTES); + + /* allocate node memory and the lowmem remap area */ + node_pa = memblock_find_in_range(start, end, size, LARGE_PAGE_BYTES); + if (node_pa == MEMBLOCK_ERROR) { + pr_warning("remap_alloc: failed to allocate %lu bytes for node %d\n", + size, nid); + return; + } + memblock_x86_reserve_range(node_pa, node_pa + size, "KVA RAM"); + + remap_pa = memblock_find_in_range(min_low_pfn << PAGE_SHIFT, + max_low_pfn << PAGE_SHIFT, + size, LARGE_PAGE_BYTES); + if (remap_pa == MEMBLOCK_ERROR) { + pr_warning("remap_alloc: failed to allocate %lu bytes remap area for node %d\n", + size, nid); + memblock_x86_free_range(node_pa, node_pa + size); + return; + } + memblock_x86_reserve_range(remap_pa, remap_pa + size, "KVA PG"); + remap_va = phys_to_virt(remap_pa); + + /* perform actual remap */ + for (pfn = 0; pfn < size >> PAGE_SHIFT; pfn += PTRS_PER_PTE) + set_pmd_pfn((unsigned long)remap_va + (pfn << PAGE_SHIFT), + (node_pa >> PAGE_SHIFT) + pfn, + PAGE_KERNEL_LARGE); + + /* initialize remap allocator parameters */ + node_remap_start_pfn[nid] = node_pa >> PAGE_SHIFT; + node_remap_start_vaddr[nid] = remap_va; + node_remap_end_vaddr[nid] = remap_va + size; + node_remap_alloc_vaddr[nid] = remap_va; + + printk(KERN_DEBUG "remap_alloc: node %d [%08llx-%08llx) -> [%p-%p)\n", + nid, node_pa, node_pa + size, remap_va, remap_va + size); } void __init initmem_init(void) { - int nid; - long kva_target_pfn; - - /* - * When mapping a NUMA machine we allocate the node_mem_map arrays - * from node local memory. They are then mapped directly into KVA - * between zone normal and vmalloc space. Calculate the size of - * this space and use it to adjust the boundary between ZONE_NORMAL - * and ZONE_HIGHMEM. - */ - - get_memcfg_numa(); - numa_init_array(); - - kva_pages = roundup(calculate_numa_remap_pages(), PTRS_PER_PTE); + x86_numa_init(); - kva_target_pfn = round_down(max_low_pfn - kva_pages, PTRS_PER_PTE); - do { - kva_start_pfn = memblock_find_in_range(kva_target_pfn<> PAGE_SHIFT; - kva_target_pfn -= PTRS_PER_PTE; - } while (kva_start_pfn == MEMBLOCK_ERROR && kva_target_pfn > min_low_pfn); - - if (kva_start_pfn == MEMBLOCK_ERROR) - panic("Can not get kva space\n"); - - printk(KERN_INFO "kva_start_pfn ~ %lx max_low_pfn ~ %lx\n", - kva_start_pfn, max_low_pfn); - printk(KERN_INFO "max_pfn = %lx\n", max_pfn); - - /* avoid clash with initrd */ - memblock_x86_reserve_range(kva_start_pfn< max_low_pfn) @@ -409,51 +257,9 @@ void __init initmem_init(void) printk(KERN_DEBUG "Low memory ends at vaddr %08lx\n", (ulong) pfn_to_kaddr(max_low_pfn)); - for_each_online_node(nid) { - init_remap_allocator(nid); - - allocate_pgdat(nid); - } - remap_numa_kva(); printk(KERN_DEBUG "High memory starts at vaddr %08lx\n", (ulong) pfn_to_kaddr(highstart_pfn)); - for_each_online_node(nid) - propagate_e820_map_node(nid); - - for_each_online_node(nid) { - memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); - NODE_DATA(nid)->node_id = nid; - } setup_bootmem_allocator(); } - -#ifdef CONFIG_MEMORY_HOTPLUG -static int paddr_to_nid(u64 addr) -{ - int nid; - unsigned long pfn = PFN_DOWN(addr); - - for_each_node(nid) - if (node_start_pfn[nid] <= pfn && - pfn < node_end_pfn[nid]) - return nid; - - return -1; -} - -/* - * This function is used to ask node id BEFORE memmap and mem_section's - * initialization (pfn_to_nid() can't be used yet). - * If _PXM is not defined on ACPI's DSDT, node id must be found by this. - */ -int memory_add_physaddr_to_nid(u64 addr) -{ - int nid = paddr_to_nid(addr); - return (nid >= 0) ? nid : 0; -} - -EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); -#endif - diff --git a/trunk/arch/x86/mm/numa_64.c b/trunk/arch/x86/mm/numa_64.c index e8c00cc72033..dd27f401f0a0 100644 --- a/trunk/arch/x86/mm/numa_64.c +++ b/trunk/arch/x86/mm/numa_64.c @@ -2,646 +2,13 @@ * Generic VM initialization for x86-64 NUMA setups. * Copyright 2002,2003 Andi Kleen, SuSE Labs. */ -#include -#include -#include -#include #include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include #include "numa_internal.h" -struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; -EXPORT_SYMBOL(node_data); - -nodemask_t numa_nodes_parsed __initdata; - -struct memnode memnode; - -static unsigned long __initdata nodemap_addr; -static unsigned long __initdata nodemap_size; - -static struct numa_meminfo numa_meminfo __initdata; - -static int numa_distance_cnt; -static u8 *numa_distance; - -/* - * Given a shift value, try to populate memnodemap[] - * Returns : - * 1 if OK - * 0 if memnodmap[] too small (of shift too small) - * -1 if node overlap or lost ram (shift too big) - */ -static int __init populate_memnodemap(const struct numa_meminfo *mi, int shift) -{ - unsigned long addr, end; - int i, res = -1; - - memset(memnodemap, 0xff, sizeof(s16)*memnodemapsize); - for (i = 0; i < mi->nr_blks; i++) { - addr = mi->blk[i].start; - end = mi->blk[i].end; - if (addr >= end) - continue; - if ((end >> shift) >= memnodemapsize) - return 0; - do { - if (memnodemap[addr >> shift] != NUMA_NO_NODE) - return -1; - memnodemap[addr >> shift] = mi->blk[i].nid; - addr += (1UL << shift); - } while (addr < end); - res = 1; - } - return res; -} - -static int __init allocate_cachealigned_memnodemap(void) -{ - unsigned long addr; - - memnodemap = memnode.embedded_map; - if (memnodemapsize <= ARRAY_SIZE(memnode.embedded_map)) - return 0; - - addr = 0x8000; - nodemap_size = roundup(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES); - nodemap_addr = memblock_find_in_range(addr, get_max_mapped(), - nodemap_size, L1_CACHE_BYTES); - if (nodemap_addr == MEMBLOCK_ERROR) { - printk(KERN_ERR - "NUMA: Unable to allocate Memory to Node hash map\n"); - nodemap_addr = nodemap_size = 0; - return -1; - } - memnodemap = phys_to_virt(nodemap_addr); - memblock_x86_reserve_range(nodemap_addr, nodemap_addr + nodemap_size, "MEMNODEMAP"); - - printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n", - nodemap_addr, nodemap_addr + nodemap_size); - return 0; -} - -/* - * The LSB of all start and end addresses in the node map is the value of the - * maximum possible shift. - */ -static int __init extract_lsb_from_nodes(const struct numa_meminfo *mi) -{ - int i, nodes_used = 0; - unsigned long start, end; - unsigned long bitfield = 0, memtop = 0; - - for (i = 0; i < mi->nr_blks; i++) { - start = mi->blk[i].start; - end = mi->blk[i].end; - if (start >= end) - continue; - bitfield |= start; - nodes_used++; - if (end > memtop) - memtop = end; - } - if (nodes_used <= 1) - i = 63; - else - i = find_first_bit(&bitfield, sizeof(unsigned long)*8); - memnodemapsize = (memtop >> i)+1; - return i; -} - -static int __init compute_hash_shift(const struct numa_meminfo *mi) -{ - int shift; - - shift = extract_lsb_from_nodes(mi); - if (allocate_cachealigned_memnodemap()) - return -1; - printk(KERN_DEBUG "NUMA: Using %d for the hash shift.\n", - shift); - - if (populate_memnodemap(mi, shift) != 1) { - printk(KERN_INFO "Your memory is not aligned you need to " - "rebuild your kernel with a bigger NODEMAPSIZE " - "shift=%d\n", shift); - return -1; - } - return shift; -} - -int __meminit __early_pfn_to_nid(unsigned long pfn) -{ - return phys_to_nid(pfn << PAGE_SHIFT); -} - -static void * __init early_node_mem(int nodeid, unsigned long start, - unsigned long end, unsigned long size, - unsigned long align) -{ - unsigned long mem; - - /* - * put it on high as possible - * something will go with NODE_DATA - */ - if (start < (MAX_DMA_PFN< (MAX_DMA32_PFN< end || nid < 0 || nid >= MAX_NUMNODES) { - pr_warning("NUMA: Warning: invalid memblk node %d (%Lx-%Lx)\n", - nid, start, end); - return 0; - } - - if (mi->nr_blks >= NR_NODE_MEMBLKS) { - pr_err("NUMA: too many memblk ranges\n"); - return -EINVAL; - } - - mi->blk[mi->nr_blks].start = start; - mi->blk[mi->nr_blks].end = end; - mi->blk[mi->nr_blks].nid = nid; - mi->nr_blks++; - return 0; -} - -/** - * numa_remove_memblk_from - Remove one numa_memblk from a numa_meminfo - * @idx: Index of memblk to remove - * @mi: numa_meminfo to remove memblk from - * - * Remove @idx'th numa_memblk from @mi by shifting @mi->blk[] and - * decrementing @mi->nr_blks. - */ -void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi) -{ - mi->nr_blks--; - memmove(&mi->blk[idx], &mi->blk[idx + 1], - (mi->nr_blks - idx) * sizeof(mi->blk[0])); -} - -/** - * numa_add_memblk - Add one numa_memblk to numa_meminfo - * @nid: NUMA node ID of the new memblk - * @start: Start address of the new memblk - * @end: End address of the new memblk - * - * Add a new memblk to the default numa_meminfo. - * - * RETURNS: - * 0 on success, -errno on failure. - */ -int __init numa_add_memblk(int nid, u64 start, u64 end) -{ - return numa_add_memblk_to(nid, start, end, &numa_meminfo); -} - -/* Initialize bootmem allocator for a node */ -void __init -setup_node_bootmem(int nodeid, unsigned long start, unsigned long end) -{ - unsigned long start_pfn, last_pfn, nodedata_phys; - const int pgdat_size = roundup(sizeof(pg_data_t), PAGE_SIZE); - int nid; - - if (!end) - return; - - /* - * Don't confuse VM with a node that doesn't have the - * minimum amount of memory: - */ - if (end && (end - start) < NODE_MIN_SIZE) - return; - - start = roundup(start, ZONE_ALIGN); - - printk(KERN_INFO "Initmem setup node %d %016lx-%016lx\n", nodeid, - start, end); - - start_pfn = start >> PAGE_SHIFT; - last_pfn = end >> PAGE_SHIFT; - - node_data[nodeid] = early_node_mem(nodeid, start, end, pgdat_size, - SMP_CACHE_BYTES); - if (node_data[nodeid] == NULL) - return; - nodedata_phys = __pa(node_data[nodeid]); - memblock_x86_reserve_range(nodedata_phys, nodedata_phys + pgdat_size, "NODE_DATA"); - printk(KERN_INFO " NODE_DATA [%016lx - %016lx]\n", nodedata_phys, - nodedata_phys + pgdat_size - 1); - nid = phys_to_nid(nodedata_phys); - if (nid != nodeid) - printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nodeid, nid); - - memset(NODE_DATA(nodeid), 0, sizeof(pg_data_t)); - NODE_DATA(nodeid)->node_id = nodeid; - NODE_DATA(nodeid)->node_start_pfn = start_pfn; - NODE_DATA(nodeid)->node_spanned_pages = last_pfn - start_pfn; - - node_set_online(nodeid); -} - -/** - * numa_cleanup_meminfo - Cleanup a numa_meminfo - * @mi: numa_meminfo to clean up - * - * Sanitize @mi by merging and removing unncessary memblks. Also check for - * conflicts and clear unused memblks. - * - * RETURNS: - * 0 on success, -errno on failure. - */ -int __init numa_cleanup_meminfo(struct numa_meminfo *mi) -{ - const u64 low = 0; - const u64 high = (u64)max_pfn << PAGE_SHIFT; - int i, j, k; - - for (i = 0; i < mi->nr_blks; i++) { - struct numa_memblk *bi = &mi->blk[i]; - - /* make sure all blocks are inside the limits */ - bi->start = max(bi->start, low); - bi->end = min(bi->end, high); - - /* and there's no empty block */ - if (bi->start == bi->end) { - numa_remove_memblk_from(i--, mi); - continue; - } - - for (j = i + 1; j < mi->nr_blks; j++) { - struct numa_memblk *bj = &mi->blk[j]; - unsigned long start, end; - - /* - * See whether there are overlapping blocks. Whine - * about but allow overlaps of the same nid. They - * will be merged below. - */ - if (bi->end > bj->start && bi->start < bj->end) { - if (bi->nid != bj->nid) { - pr_err("NUMA: node %d (%Lx-%Lx) overlaps with node %d (%Lx-%Lx)\n", - bi->nid, bi->start, bi->end, - bj->nid, bj->start, bj->end); - return -EINVAL; - } - pr_warning("NUMA: Warning: node %d (%Lx-%Lx) overlaps with itself (%Lx-%Lx)\n", - bi->nid, bi->start, bi->end, - bj->start, bj->end); - } - - /* - * Join together blocks on the same node, holes - * between which don't overlap with memory on other - * nodes. - */ - if (bi->nid != bj->nid) - continue; - start = max(min(bi->start, bj->start), low); - end = min(max(bi->end, bj->end), high); - for (k = 0; k < mi->nr_blks; k++) { - struct numa_memblk *bk = &mi->blk[k]; - - if (bi->nid == bk->nid) - continue; - if (start < bk->end && end > bk->start) - break; - } - if (k < mi->nr_blks) - continue; - printk(KERN_INFO "NUMA: Node %d [%Lx,%Lx) + [%Lx,%Lx) -> [%lx,%lx)\n", - bi->nid, bi->start, bi->end, bj->start, bj->end, - start, end); - bi->start = start; - bi->end = end; - numa_remove_memblk_from(j--, mi); - } - } - - for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) { - mi->blk[i].start = mi->blk[i].end = 0; - mi->blk[i].nid = NUMA_NO_NODE; - } - - return 0; -} - -/* - * Set nodes, which have memory in @mi, in *@nodemask. - */ -static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask, - const struct numa_meminfo *mi) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(mi->blk); i++) - if (mi->blk[i].start != mi->blk[i].end && - mi->blk[i].nid != NUMA_NO_NODE) - node_set(mi->blk[i].nid, *nodemask); -} - -/** - * numa_reset_distance - Reset NUMA distance table - * - * The current table is freed. The next numa_set_distance() call will - * create a new one. - */ -void __init numa_reset_distance(void) -{ - size_t size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]); - - /* numa_distance could be 1LU marking allocation failure, test cnt */ - if (numa_distance_cnt) - memblock_x86_free_range(__pa(numa_distance), - __pa(numa_distance) + size); - numa_distance_cnt = 0; - numa_distance = NULL; /* enable table creation */ -} - -static int __init numa_alloc_distance(void) -{ - nodemask_t nodes_parsed; - size_t size; - int i, j, cnt = 0; - u64 phys; - - /* size the new table and allocate it */ - nodes_parsed = numa_nodes_parsed; - numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo); - - for_each_node_mask(i, nodes_parsed) - cnt = i; - cnt++; - size = cnt * cnt * sizeof(numa_distance[0]); - - phys = memblock_find_in_range(0, (u64)max_pfn_mapped << PAGE_SHIFT, - size, PAGE_SIZE); - if (phys == MEMBLOCK_ERROR) { - pr_warning("NUMA: Warning: can't allocate distance table!\n"); - /* don't retry until explicitly reset */ - numa_distance = (void *)1LU; - return -ENOMEM; - } - memblock_x86_reserve_range(phys, phys + size, "NUMA DIST"); - - numa_distance = __va(phys); - numa_distance_cnt = cnt; - - /* fill with the default distances */ - for (i = 0; i < cnt; i++) - for (j = 0; j < cnt; j++) - numa_distance[i * cnt + j] = i == j ? - LOCAL_DISTANCE : REMOTE_DISTANCE; - printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt); - - return 0; -} - -/** - * numa_set_distance - Set NUMA distance from one NUMA to another - * @from: the 'from' node to set distance - * @to: the 'to' node to set distance - * @distance: NUMA distance - * - * Set the distance from node @from to @to to @distance. If distance table - * doesn't exist, one which is large enough to accommodate all the currently - * known nodes will be created. - * - * If such table cannot be allocated, a warning is printed and further - * calls are ignored until the distance table is reset with - * numa_reset_distance(). - * - * If @from or @to is higher than the highest known node at the time of - * table creation or @distance doesn't make sense, the call is ignored. - * This is to allow simplification of specific NUMA config implementations. - */ -void __init numa_set_distance(int from, int to, int distance) -{ - if (!numa_distance && numa_alloc_distance() < 0) - return; - - if (from >= numa_distance_cnt || to >= numa_distance_cnt) { - printk_once(KERN_DEBUG "NUMA: Debug: distance out of bound, from=%d to=%d distance=%d\n", - from, to, distance); - return; - } - - if ((u8)distance != distance || - (from == to && distance != LOCAL_DISTANCE)) { - pr_warn_once("NUMA: Warning: invalid distance parameter, from=%d to=%d distance=%d\n", - from, to, distance); - return; - } - - numa_distance[from * numa_distance_cnt + to] = distance; -} - -int __node_distance(int from, int to) -{ - if (from >= numa_distance_cnt || to >= numa_distance_cnt) - return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE; - return numa_distance[from * numa_distance_cnt + to]; -} -EXPORT_SYMBOL(__node_distance); - -/* - * Sanity check to catch more bad NUMA configurations (they are amazingly - * common). Make sure the nodes cover all memory. - */ -static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi) -{ - unsigned long numaram, e820ram; - int i; - - numaram = 0; - for (i = 0; i < mi->nr_blks; i++) { - unsigned long s = mi->blk[i].start >> PAGE_SHIFT; - unsigned long e = mi->blk[i].end >> PAGE_SHIFT; - numaram += e - s; - numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e); - if ((long)numaram < 0) - numaram = 0; - } - - e820ram = max_pfn - (memblock_x86_hole_size(0, - max_pfn << PAGE_SHIFT) >> PAGE_SHIFT); - /* We seem to lose 3 pages somewhere. Allow 1M of slack. */ - if ((long)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) { - printk(KERN_ERR "NUMA: nodes only cover %luMB of your %luMB e820 RAM. Not used.\n", - (numaram << PAGE_SHIFT) >> 20, - (e820ram << PAGE_SHIFT) >> 20); - return false; - } - return true; -} - -static int __init numa_register_memblks(struct numa_meminfo *mi) -{ - int i, nid; - - /* Account for nodes with cpus and no memory */ - node_possible_map = numa_nodes_parsed; - numa_nodemask_from_meminfo(&node_possible_map, mi); - if (WARN_ON(nodes_empty(node_possible_map))) - return -EINVAL; - - memnode_shift = compute_hash_shift(mi); - if (memnode_shift < 0) { - printk(KERN_ERR "NUMA: No NUMA node hash function found. Contact maintainer\n"); - return -EINVAL; - } - - for (i = 0; i < mi->nr_blks; i++) - memblock_x86_register_active_regions(mi->blk[i].nid, - mi->blk[i].start >> PAGE_SHIFT, - mi->blk[i].end >> PAGE_SHIFT); - - /* for out of order entries */ - sort_node_map(); - if (!numa_meminfo_cover_memory(mi)) - return -EINVAL; - - /* Finally register nodes. */ - for_each_node_mask(nid, node_possible_map) { - u64 start = (u64)max_pfn << PAGE_SHIFT; - u64 end = 0; - - for (i = 0; i < mi->nr_blks; i++) { - if (nid != mi->blk[i].nid) - continue; - start = min(mi->blk[i].start, start); - end = max(mi->blk[i].end, end); - } - - if (start < end) - setup_node_bootmem(nid, start, end); - } - - return 0; -} - -/** - * dummy_numma_init - Fallback dummy NUMA init - * - * Used if there's no underlying NUMA architecture, NUMA initialization - * fails, or NUMA is disabled on the command line. - * - * Must online at least one node and add memory blocks that cover all - * allowed memory. This function must not fail. - */ -static int __init dummy_numa_init(void) -{ - printk(KERN_INFO "%s\n", - numa_off ? "NUMA turned off" : "No NUMA configuration found"); - printk(KERN_INFO "Faking a node at %016lx-%016lx\n", - 0LU, max_pfn << PAGE_SHIFT); - - node_set(0, numa_nodes_parsed); - numa_add_memblk(0, 0, (u64)max_pfn << PAGE_SHIFT); - - return 0; -} - -static int __init numa_init(int (*init_func)(void)) -{ - int i; - int ret; - - for (i = 0; i < MAX_LOCAL_APIC; i++) - set_apicid_to_node(i, NUMA_NO_NODE); - - nodes_clear(numa_nodes_parsed); - nodes_clear(node_possible_map); - nodes_clear(node_online_map); - memset(&numa_meminfo, 0, sizeof(numa_meminfo)); - remove_all_active_ranges(); - numa_reset_distance(); - - ret = init_func(); - if (ret < 0) - return ret; - ret = numa_cleanup_meminfo(&numa_meminfo); - if (ret < 0) - return ret; - - numa_emulation(&numa_meminfo, numa_distance_cnt); - - ret = numa_register_memblks(&numa_meminfo); - if (ret < 0) - return ret; - - for (i = 0; i < nr_cpu_ids; i++) { - int nid = early_cpu_to_node(i); - - if (nid == NUMA_NO_NODE) - continue; - if (!node_online(nid)) - numa_clear_node(i); - } - numa_init_array(); - return 0; -} - void __init initmem_init(void) { - int ret; - - if (!numa_off) { -#ifdef CONFIG_ACPI_NUMA - ret = numa_init(x86_acpi_numa_init); - if (!ret) - return; -#endif -#ifdef CONFIG_AMD_NUMA - ret = numa_init(amd_numa_init); - if (!ret) - return; -#endif - } - - numa_init(dummy_numa_init); + x86_numa_init(); } unsigned long __init numa_free_all_bootmem(void) @@ -656,12 +23,3 @@ unsigned long __init numa_free_all_bootmem(void) return pages; } - -int __cpuinit numa_cpu_node(int cpu) -{ - int apicid = early_per_cpu(x86_cpu_to_apicid, cpu); - - if (apicid != BAD_APICID) - return __apicid_to_node[apicid]; - return NUMA_NO_NODE; -} diff --git a/trunk/arch/x86/mm/numa_emulation.c b/trunk/arch/x86/mm/numa_emulation.c index ad091e4cff17..d0ed086b6247 100644 --- a/trunk/arch/x86/mm/numa_emulation.c +++ b/trunk/arch/x86/mm/numa_emulation.c @@ -5,6 +5,7 @@ #include #include #include +#include #include #include "numa_internal.h" @@ -84,7 +85,13 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei, nr_nodes = MAX_NUMNODES; } - size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) / nr_nodes; + /* + * Calculate target node size. x86_32 freaks on __udivdi3() so do + * the division in ulong number of pages and convert back. + */ + size = max_addr - addr - memblock_x86_hole_size(addr, max_addr); + size = PFN_PHYS((unsigned long)(size >> PAGE_SHIFT) / nr_nodes); + /* * Calculate the number of big nodes that can be allocated as a result * of consolidating the remainder. @@ -226,7 +233,7 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei, */ while (nodes_weight(physnode_mask)) { for_each_node_mask(i, physnode_mask) { - u64 dma32_end = MAX_DMA32_PFN << PAGE_SHIFT; + u64 dma32_end = PFN_PHYS(MAX_DMA32_PFN); u64 start, limit, end; int phys_blk; @@ -298,7 +305,7 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt) { static struct numa_meminfo ei __initdata; static struct numa_meminfo pi __initdata; - const u64 max_addr = max_pfn << PAGE_SHIFT; + const u64 max_addr = PFN_PHYS(max_pfn); u8 *phys_dist = NULL; size_t phys_size = numa_dist_cnt * numa_dist_cnt * sizeof(phys_dist[0]); int max_emu_nid, dfl_phys_nid; @@ -342,8 +349,7 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt) if (numa_dist_cnt) { u64 phys; - phys = memblock_find_in_range(0, - (u64)max_pfn_mapped << PAGE_SHIFT, + phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped), phys_size, PAGE_SIZE); if (phys == MEMBLOCK_ERROR) { pr_warning("NUMA: Warning: can't allocate copy of distance table, disabling emulation\n"); @@ -454,10 +460,9 @@ void __cpuinit numa_remove_cpu(int cpu) cpumask_clear_cpu(cpu, node_to_cpumask_map[i]); } #else /* !CONFIG_DEBUG_PER_CPU_MAPS */ -static void __cpuinit numa_set_cpumask(int cpu, int enable) +static void __cpuinit numa_set_cpumask(int cpu, bool enable) { - struct cpumask *mask; - int nid, physnid, i; + int nid, physnid; nid = early_cpu_to_node(cpu); if (nid == NUMA_NO_NODE) { @@ -467,28 +472,21 @@ static void __cpuinit numa_set_cpumask(int cpu, int enable) physnid = emu_nid_to_phys[nid]; - for_each_online_node(i) { + for_each_online_node(nid) { if (emu_nid_to_phys[nid] != physnid) continue; - mask = debug_cpumask_set_cpu(cpu, enable); - if (!mask) - return; - - if (enable) - cpumask_set_cpu(cpu, mask); - else - cpumask_clear_cpu(cpu, mask); + debug_cpumask_set_cpu(cpu, nid, enable); } } void __cpuinit numa_add_cpu(int cpu) { - numa_set_cpumask(cpu, 1); + numa_set_cpumask(cpu, true); } void __cpuinit numa_remove_cpu(int cpu) { - numa_set_cpumask(cpu, 0); + numa_set_cpumask(cpu, false); } #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ diff --git a/trunk/arch/x86/mm/numa_internal.h b/trunk/arch/x86/mm/numa_internal.h index ef2d97377d7c..7178c3afe05e 100644 --- a/trunk/arch/x86/mm/numa_internal.h +++ b/trunk/arch/x86/mm/numa_internal.h @@ -19,6 +19,14 @@ void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi); int __init numa_cleanup_meminfo(struct numa_meminfo *mi); void __init numa_reset_distance(void); +void __init x86_numa_init(void); + +#ifdef CONFIG_X86_64 +static inline void init_alloc_remap(int nid, u64 start, u64 end) { } +#else +void __init init_alloc_remap(int nid, u64 start, u64 end); +#endif + #ifdef CONFIG_NUMA_EMU void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt); diff --git a/trunk/arch/x86/mm/srat_64.c b/trunk/arch/x86/mm/srat.c similarity index 67% rename from trunk/arch/x86/mm/srat_64.c rename to trunk/arch/x86/mm/srat.c index 8e9d3394f6d4..81dbfdeb080d 100644 --- a/trunk/arch/x86/mm/srat_64.c +++ b/trunk/arch/x86/mm/srat.c @@ -26,8 +26,6 @@ int acpi_numa __initdata; -static struct bootnode nodes_add[MAX_NUMNODES]; - static __init int setup_node(int pxm) { return acpi_map_pxm_to_node(pxm); @@ -37,7 +35,6 @@ static __init void bad_srat(void) { printk(KERN_ERR "SRAT: SRAT not used.\n"); acpi_numa = -1; - memset(nodes_add, 0, sizeof(nodes_add)); } static __init inline int srat_disabled(void) @@ -131,73 +128,17 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) pxm, apic_id, node); } -#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE +#ifdef CONFIG_MEMORY_HOTPLUG static inline int save_add_info(void) {return 1;} #else static inline int save_add_info(void) {return 0;} #endif -/* - * Update nodes_add[] - * This code supports one contiguous hot add area per node - */ -static void __init -update_nodes_add(int node, unsigned long start, unsigned long end) -{ - unsigned long s_pfn = start >> PAGE_SHIFT; - unsigned long e_pfn = end >> PAGE_SHIFT; - int changed = 0; - struct bootnode *nd = &nodes_add[node]; - - /* I had some trouble with strange memory hotadd regions breaking - the boot. Be very strict here and reject anything unexpected. - If you want working memory hotadd write correct SRATs. - - The node size check is a basic sanity check to guard against - mistakes */ - if ((signed long)(end - start) < NODE_MIN_SIZE) { - printk(KERN_ERR "SRAT: Hotplug area too small\n"); - return; - } - - /* This check might be a bit too strict, but I'm keeping it for now. */ - if (absent_pages_in_range(s_pfn, e_pfn) != e_pfn - s_pfn) { - printk(KERN_ERR - "SRAT: Hotplug area %lu -> %lu has existing memory\n", - s_pfn, e_pfn); - return; - } - - /* Looks good */ - - if (nd->start == nd->end) { - nd->start = start; - nd->end = end; - changed = 1; - } else { - if (nd->start == end) { - nd->start = start; - changed = 1; - } - if (nd->end == start) { - nd->end = end; - changed = 1; - } - if (!changed) - printk(KERN_ERR "SRAT: Hotplug zone not continuous. Partly ignored\n"); - } - - if (changed) { - node_set(node, numa_nodes_parsed); - printk(KERN_INFO "SRAT: hot plug zone found %Lx - %Lx\n", - nd->start, nd->end); - } -} /* Callback for parsing of the Proximity Domain <-> Memory Area mappings */ void __init acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) { - unsigned long start, end; + u64 start, end; int node, pxm; if (srat_disabled()) @@ -226,11 +167,8 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) return; } - printk(KERN_INFO "SRAT: Node %u PXM %u %lx-%lx\n", node, pxm, + printk(KERN_INFO "SRAT: Node %u PXM %u %Lx-%Lx\n", node, pxm, start, end); - - if (ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) - update_nodes_add(node, start, end); } void __init acpi_numa_arch_fixup(void) {} @@ -244,17 +182,3 @@ int __init x86_acpi_numa_init(void) return ret; return srat_disabled() ? -EINVAL : 0; } - -#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) || defined(CONFIG_ACPI_HOTPLUG_MEMORY) -int memory_add_physaddr_to_nid(u64 start) -{ - int i, ret = 0; - - for_each_node(i) - if (nodes_add[i].start <= start && nodes_add[i].end > start) - ret = i; - - return ret; -} -EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); -#endif diff --git a/trunk/arch/x86/mm/srat_32.c b/trunk/arch/x86/mm/srat_32.c deleted file mode 100644 index 364f36bdfad8..000000000000 --- a/trunk/arch/x86/mm/srat_32.c +++ /dev/null @@ -1,288 +0,0 @@ -/* - * Some of the code in this file has been gleaned from the 64 bit - * discontigmem support code base. - * - * Copyright (C) 2002, IBM Corp. - * - * All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or - * NON INFRINGEMENT. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * - * Send feedback to Pat Gaughen - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* - * proximity macros and definitions - */ -#define NODE_ARRAY_INDEX(x) ((x) / 8) /* 8 bits/char */ -#define NODE_ARRAY_OFFSET(x) ((x) % 8) /* 8 bits/char */ -#define BMAP_SET(bmap, bit) ((bmap)[NODE_ARRAY_INDEX(bit)] |= 1 << NODE_ARRAY_OFFSET(bit)) -#define BMAP_TEST(bmap, bit) ((bmap)[NODE_ARRAY_INDEX(bit)] & (1 << NODE_ARRAY_OFFSET(bit))) -/* bitmap length; _PXM is at most 255 */ -#define PXM_BITMAP_LEN (MAX_PXM_DOMAINS / 8) -static u8 __initdata pxm_bitmap[PXM_BITMAP_LEN]; /* bitmap of proximity domains */ - -#define MAX_CHUNKS_PER_NODE 3 -#define MAXCHUNKS (MAX_CHUNKS_PER_NODE * MAX_NUMNODES) -struct node_memory_chunk_s { - unsigned long start_pfn; - unsigned long end_pfn; - u8 pxm; // proximity domain of node - u8 nid; // which cnode contains this chunk? - u8 bank; // which mem bank on this node -}; -static struct node_memory_chunk_s __initdata node_memory_chunk[MAXCHUNKS]; - -static int __initdata num_memory_chunks; /* total number of memory chunks */ -static u8 __initdata apicid_to_pxm[MAX_LOCAL_APIC]; - -int acpi_numa __initdata; - -static __init void bad_srat(void) -{ - printk(KERN_ERR "SRAT: SRAT not used.\n"); - acpi_numa = -1; - num_memory_chunks = 0; -} - -static __init inline int srat_disabled(void) -{ - return numa_off || acpi_numa < 0; -} - -/* Identify CPU proximity domains */ -void __init -acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *cpu_affinity) -{ - if (srat_disabled()) - return; - if (cpu_affinity->header.length != - sizeof(struct acpi_srat_cpu_affinity)) { - bad_srat(); - return; - } - - if ((cpu_affinity->flags & ACPI_SRAT_CPU_ENABLED) == 0) - return; /* empty entry */ - - /* mark this node as "seen" in node bitmap */ - BMAP_SET(pxm_bitmap, cpu_affinity->proximity_domain_lo); - - /* don't need to check apic_id here, because it is always 8 bits */ - apicid_to_pxm[cpu_affinity->apic_id] = cpu_affinity->proximity_domain_lo; - - printk(KERN_DEBUG "CPU %02x in proximity domain %02x\n", - cpu_affinity->apic_id, cpu_affinity->proximity_domain_lo); -} - -/* - * Identify memory proximity domains and hot-remove capabilities. - * Fill node memory chunk list structure. - */ -void __init -acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *memory_affinity) -{ - unsigned long long paddr, size; - unsigned long start_pfn, end_pfn; - u8 pxm; - struct node_memory_chunk_s *p, *q, *pend; - - if (srat_disabled()) - return; - if (memory_affinity->header.length != - sizeof(struct acpi_srat_mem_affinity)) { - bad_srat(); - return; - } - - if ((memory_affinity->flags & ACPI_SRAT_MEM_ENABLED) == 0) - return; /* empty entry */ - - pxm = memory_affinity->proximity_domain & 0xff; - - /* mark this node as "seen" in node bitmap */ - BMAP_SET(pxm_bitmap, pxm); - - /* calculate info for memory chunk structure */ - paddr = memory_affinity->base_address; - size = memory_affinity->length; - - start_pfn = paddr >> PAGE_SHIFT; - end_pfn = (paddr + size) >> PAGE_SHIFT; - - - if (num_memory_chunks >= MAXCHUNKS) { - printk(KERN_WARNING "Too many mem chunks in SRAT." - " Ignoring %lld MBytes at %llx\n", - size/(1024*1024), paddr); - return; - } - - /* Insertion sort based on base address */ - pend = &node_memory_chunk[num_memory_chunks]; - for (p = &node_memory_chunk[0]; p < pend; p++) { - if (start_pfn < p->start_pfn) - break; - } - if (p < pend) { - for (q = pend; q >= p; q--) - *(q + 1) = *q; - } - p->start_pfn = start_pfn; - p->end_pfn = end_pfn; - p->pxm = pxm; - - num_memory_chunks++; - - printk(KERN_DEBUG "Memory range %08lx to %08lx" - " in proximity domain %02x %s\n", - start_pfn, end_pfn, - pxm, - ((memory_affinity->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) ? - "enabled and removable" : "enabled" ) ); -} - -/* Callback for SLIT parsing */ -void __init acpi_numa_slit_init(struct acpi_table_slit *slit) -{ -} - -void acpi_numa_arch_fixup(void) -{ -} -/* - * The SRAT table always lists ascending addresses, so can always - * assume that the first "start" address that you see is the real - * start of the node, and that the current "end" address is after - * the previous one. - */ -static __init int node_read_chunk(int nid, struct node_memory_chunk_s *memory_chunk) -{ - /* - * Only add present memory as told by the e820. - * There is no guarantee from the SRAT that the memory it - * enumerates is present at boot time because it represents - * *possible* memory hotplug areas the same as normal RAM. - */ - if (memory_chunk->start_pfn >= max_pfn) { - printk(KERN_INFO "Ignoring SRAT pfns: %08lx - %08lx\n", - memory_chunk->start_pfn, memory_chunk->end_pfn); - return -1; - } - if (memory_chunk->nid != nid) - return -1; - - if (!node_has_online_mem(nid)) - node_start_pfn[nid] = memory_chunk->start_pfn; - - if (node_start_pfn[nid] > memory_chunk->start_pfn) - node_start_pfn[nid] = memory_chunk->start_pfn; - - if (node_end_pfn[nid] < memory_chunk->end_pfn) - node_end_pfn[nid] = memory_chunk->end_pfn; - - return 0; -} - -int __init get_memcfg_from_srat(void) -{ - int i, j, nid; - - if (srat_disabled()) - goto out_fail; - - if (acpi_numa_init() < 0) - goto out_fail; - - if (num_memory_chunks == 0) { - printk(KERN_DEBUG - "could not find any ACPI SRAT memory areas.\n"); - goto out_fail; - } - - /* Calculate total number of nodes in system from PXM bitmap and create - * a set of sequential node IDs starting at zero. (ACPI doesn't seem - * to specify the range of _PXM values.) - */ - /* - * MCD - we no longer HAVE to number nodes sequentially. PXM domain - * numbers could go as high as 256, and MAX_NUMNODES for i386 is typically - * 32, so we will continue numbering them in this manner until MAX_NUMNODES - * approaches MAX_PXM_DOMAINS for i386. - */ - nodes_clear(node_online_map); - for (i = 0; i < MAX_PXM_DOMAINS; i++) { - if (BMAP_TEST(pxm_bitmap, i)) { - int nid = acpi_map_pxm_to_node(i); - node_set_online(nid); - } - } - BUG_ON(num_online_nodes() == 0); - - /* set cnode id in memory chunk structure */ - for (i = 0; i < num_memory_chunks; i++) - node_memory_chunk[i].nid = pxm_to_node(node_memory_chunk[i].pxm); - - printk(KERN_DEBUG "pxm bitmap: "); - for (i = 0; i < sizeof(pxm_bitmap); i++) { - printk(KERN_CONT "%02x ", pxm_bitmap[i]); - } - printk(KERN_CONT "\n"); - printk(KERN_DEBUG "Number of logical nodes in system = %d\n", - num_online_nodes()); - printk(KERN_DEBUG "Number of memory chunks in system = %d\n", - num_memory_chunks); - - for (i = 0; i < MAX_LOCAL_APIC; i++) - set_apicid_to_node(i, pxm_to_node(apicid_to_pxm[i])); - - for (j = 0; j < num_memory_chunks; j++){ - struct node_memory_chunk_s * chunk = &node_memory_chunk[j]; - printk(KERN_DEBUG - "chunk %d nid %d start_pfn %08lx end_pfn %08lx\n", - j, chunk->nid, chunk->start_pfn, chunk->end_pfn); - if (node_read_chunk(chunk->nid, chunk)) - continue; - - memblock_x86_register_active_regions(chunk->nid, chunk->start_pfn, - min(chunk->end_pfn, max_pfn)); - } - /* for out of order entries in SRAT */ - sort_node_map(); - - for_each_online_node(nid) { - unsigned long start = node_start_pfn[nid]; - unsigned long end = min(node_end_pfn[nid], max_pfn); - - memory_present(nid, start, end); - node_remap_size[nid] = node_memmap_size_bytes(nid, start, end); - } - return 1; -out_fail: - printk(KERN_DEBUG "failed to get NUMA memory information from SRAT" - " table\n"); - return 0; -} diff --git a/trunk/arch/x86/oprofile/backtrace.c b/trunk/arch/x86/oprofile/backtrace.c index 2d49d4e19a36..a5b64ab4cd6e 100644 --- a/trunk/arch/x86/oprofile/backtrace.c +++ b/trunk/arch/x86/oprofile/backtrace.c @@ -16,17 +16,6 @@ #include #include -static void backtrace_warning_symbol(void *data, char *msg, - unsigned long symbol) -{ - /* Ignore warnings */ -} - -static void backtrace_warning(void *data, char *msg) -{ - /* Ignore warnings */ -} - static int backtrace_stack(void *data, char *name) { /* Yes, we want all stacks */ @@ -42,8 +31,6 @@ static void backtrace_address(void *data, unsigned long addr, int reliable) } static struct stacktrace_ops backtrace_ops = { - .warning = backtrace_warning, - .warning_symbol = backtrace_warning_symbol, .stack = backtrace_stack, .address = backtrace_address, .walk_stack = print_context_stack, diff --git a/trunk/arch/x86/pci/xen.c b/trunk/arch/x86/pci/xen.c index e37b407a0ee8..8214724ce54d 100644 --- a/trunk/arch/x86/pci/xen.c +++ b/trunk/arch/x86/pci/xen.c @@ -108,7 +108,8 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) } irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq, 0, (type == PCI_CAP_ID_MSIX) ? - "msi-x" : "msi"); + "msi-x" : "msi", + DOMID_SELF); if (irq < 0) goto error; dev_dbg(&dev->dev, @@ -148,7 +149,8 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i], 0, (type == PCI_CAP_ID_MSIX) ? "pcifront-msi-x" : - "pcifront-msi"); + "pcifront-msi", + DOMID_SELF); if (irq < 0) goto free; i++; @@ -190,9 +192,16 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) list_for_each_entry(msidesc, &dev->msi_list, list) { struct physdev_map_pirq map_irq; + domid_t domid; + + domid = ret = xen_find_device_domain_owner(dev); + /* N.B. Casting int's -ENODEV to uint16_t results in 0xFFED, + * hence check ret value for < 0. */ + if (ret < 0) + domid = DOMID_SELF; memset(&map_irq, 0, sizeof(map_irq)); - map_irq.domid = DOMID_SELF; + map_irq.domid = domid; map_irq.type = MAP_PIRQ_TYPE_MSI; map_irq.index = -1; map_irq.pirq = -1; @@ -215,14 +224,16 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq); if (ret) { - dev_warn(&dev->dev, "xen map irq failed %d\n", ret); + dev_warn(&dev->dev, "xen map irq failed %d for %d domain\n", + ret, domid); goto out; } ret = xen_bind_pirq_msi_to_irq(dev, msidesc, map_irq.pirq, map_irq.index, (type == PCI_CAP_ID_MSIX) ? - "msi-x" : "msi"); + "msi-x" : "msi", + domid); if (ret < 0) goto out; } @@ -461,3 +472,78 @@ void __init xen_setup_pirqs(void) } } #endif + +#ifdef CONFIG_XEN_DOM0 +struct xen_device_domain_owner { + domid_t domain; + struct pci_dev *dev; + struct list_head list; +}; + +static DEFINE_SPINLOCK(dev_domain_list_spinlock); +static struct list_head dev_domain_list = LIST_HEAD_INIT(dev_domain_list); + +static struct xen_device_domain_owner *find_device(struct pci_dev *dev) +{ + struct xen_device_domain_owner *owner; + + list_for_each_entry(owner, &dev_domain_list, list) { + if (owner->dev == dev) + return owner; + } + return NULL; +} + +int xen_find_device_domain_owner(struct pci_dev *dev) +{ + struct xen_device_domain_owner *owner; + int domain = -ENODEV; + + spin_lock(&dev_domain_list_spinlock); + owner = find_device(dev); + if (owner) + domain = owner->domain; + spin_unlock(&dev_domain_list_spinlock); + return domain; +} +EXPORT_SYMBOL_GPL(xen_find_device_domain_owner); + +int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain) +{ + struct xen_device_domain_owner *owner; + + owner = kzalloc(sizeof(struct xen_device_domain_owner), GFP_KERNEL); + if (!owner) + return -ENODEV; + + spin_lock(&dev_domain_list_spinlock); + if (find_device(dev)) { + spin_unlock(&dev_domain_list_spinlock); + kfree(owner); + return -EEXIST; + } + owner->domain = domain; + owner->dev = dev; + list_add_tail(&owner->list, &dev_domain_list); + spin_unlock(&dev_domain_list_spinlock); + return 0; +} +EXPORT_SYMBOL_GPL(xen_register_device_domain_owner); + +int xen_unregister_device_domain_owner(struct pci_dev *dev) +{ + struct xen_device_domain_owner *owner; + + spin_lock(&dev_domain_list_spinlock); + owner = find_device(dev); + if (!owner) { + spin_unlock(&dev_domain_list_spinlock); + return -ENODEV; + } + list_del(&owner->list); + spin_unlock(&dev_domain_list_spinlock); + kfree(owner); + return 0; +} +EXPORT_SYMBOL_GPL(xen_unregister_device_domain_owner); +#endif diff --git a/trunk/arch/x86/platform/ce4100/falconfalls.dts b/trunk/arch/x86/platform/ce4100/falconfalls.dts index dc701ea58546..e70be38ce039 100644 --- a/trunk/arch/x86/platform/ce4100/falconfalls.dts +++ b/trunk/arch/x86/platform/ce4100/falconfalls.dts @@ -74,6 +74,7 @@ compatible = "intel,ce4100-pci", "pci"; device_type = "pci"; bus-range = <1 1>; + reg = <0x0800 0x0 0x0 0x0 0x0>; ranges = <0x2000000 0 0xdffe0000 0x2000000 0 0xdffe0000 0 0x1000>; interrupt-parent = <&ioapic2>; @@ -346,7 +347,7 @@ "pciclass0c03"; reg = <0x16800 0x0 0x0 0x0 0x0>; - interrupts = <22 3>; + interrupts = <22 1>; }; usb@d,1 { @@ -356,7 +357,7 @@ "pciclass0c03"; reg = <0x16900 0x0 0x0 0x0 0x0>; - interrupts = <22 3>; + interrupts = <22 1>; }; sata@e,0 { @@ -366,7 +367,7 @@ "pciclass0106"; reg = <0x17000 0x0 0x0 0x0 0x0>; - interrupts = <23 3>; + interrupts = <23 1>; }; flash@f,0 { @@ -412,6 +413,7 @@ #address-cells = <2>; #size-cells = <1>; compatible = "isa"; + reg = <0xf800 0x0 0x0 0x0 0x0>; ranges = <1 0 0 0 0 0x100>; rtc@70 { diff --git a/trunk/arch/x86/platform/efi/efi.c b/trunk/arch/x86/platform/efi/efi.c index 0fe27d7c6258..b30aa26a8df2 100644 --- a/trunk/arch/x86/platform/efi/efi.c +++ b/trunk/arch/x86/platform/efi/efi.c @@ -145,17 +145,6 @@ static void virt_efi_reset_system(int reset_type, data_size, data); } -static efi_status_t virt_efi_set_virtual_address_map( - unsigned long memory_map_size, - unsigned long descriptor_size, - u32 descriptor_version, - efi_memory_desc_t *virtual_map) -{ - return efi_call_virt4(set_virtual_address_map, - memory_map_size, descriptor_size, - descriptor_version, virtual_map); -} - static efi_status_t __init phys_efi_set_virtual_address_map( unsigned long memory_map_size, unsigned long descriptor_size, @@ -468,11 +457,25 @@ void __init efi_init(void) #endif } +void __init efi_set_executable(efi_memory_desc_t *md, bool executable) +{ + u64 addr, npages; + + addr = md->virt_addr; + npages = md->num_pages; + + memrange_efi_to_native(&addr, &npages); + + if (executable) + set_memory_x(addr, npages); + else + set_memory_nx(addr, npages); +} + static void __init runtime_code_page_mkexec(void) { efi_memory_desc_t *md; void *p; - u64 addr, npages; /* Make EFI runtime service code area executable */ for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { @@ -481,10 +484,7 @@ static void __init runtime_code_page_mkexec(void) if (md->type != EFI_RUNTIME_SERVICES_CODE) continue; - addr = md->virt_addr; - npages = md->num_pages; - memrange_efi_to_native(&addr, &npages); - set_memory_x(addr, npages); + efi_set_executable(md, true); } } @@ -498,13 +498,42 @@ static void __init runtime_code_page_mkexec(void) */ void __init efi_enter_virtual_mode(void) { - efi_memory_desc_t *md; + efi_memory_desc_t *md, *prev_md = NULL; efi_status_t status; unsigned long size; u64 end, systab, addr, npages, end_pfn; - void *p, *va; + void *p, *va, *new_memmap = NULL; + int count = 0; efi.systab = NULL; + + /* Merge contiguous regions of the same type and attribute */ + for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { + u64 prev_size; + md = p; + + if (!prev_md) { + prev_md = md; + continue; + } + + if (prev_md->type != md->type || + prev_md->attribute != md->attribute) { + prev_md = md; + continue; + } + + prev_size = prev_md->num_pages << EFI_PAGE_SHIFT; + + if (md->phys_addr == (prev_md->phys_addr + prev_size)) { + prev_md->num_pages += md->num_pages; + md->type = EFI_RESERVED_TYPE; + md->attribute = 0; + continue; + } + prev_md = md; + } + for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { md = p; if (!(md->attribute & EFI_MEMORY_RUNTIME)) @@ -541,15 +570,21 @@ void __init efi_enter_virtual_mode(void) systab += md->virt_addr - md->phys_addr; efi.systab = (efi_system_table_t *) (unsigned long) systab; } + new_memmap = krealloc(new_memmap, + (count + 1) * memmap.desc_size, + GFP_KERNEL); + memcpy(new_memmap + (count * memmap.desc_size), md, + memmap.desc_size); + count++; } BUG_ON(!efi.systab); status = phys_efi_set_virtual_address_map( - memmap.desc_size * memmap.nr_map, + memmap.desc_size * count, memmap.desc_size, memmap.desc_version, - memmap.phys_map); + (efi_memory_desc_t *)__pa(new_memmap)); if (status != EFI_SUCCESS) { printk(KERN_ALERT "Unable to switch EFI into virtual mode " @@ -572,11 +607,12 @@ void __init efi_enter_virtual_mode(void) efi.set_variable = virt_efi_set_variable; efi.get_next_high_mono_count = virt_efi_get_next_high_mono_count; efi.reset_system = virt_efi_reset_system; - efi.set_virtual_address_map = virt_efi_set_virtual_address_map; + efi.set_virtual_address_map = NULL; if (__supported_pte_mask & _PAGE_NX) runtime_code_page_mkexec(); early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size); memmap.map = NULL; + kfree(new_memmap); } /* diff --git a/trunk/arch/x86/platform/efi/efi_64.c b/trunk/arch/x86/platform/efi/efi_64.c index ac0621a7ac3d..2649426a7905 100644 --- a/trunk/arch/x86/platform/efi/efi_64.c +++ b/trunk/arch/x86/platform/efi/efi_64.c @@ -41,22 +41,7 @@ static pgd_t save_pgd __initdata; static unsigned long efi_flags __initdata; -static void __init early_mapping_set_exec(unsigned long start, - unsigned long end, - int executable) -{ - unsigned long num_pages; - - start &= PMD_MASK; - end = (end + PMD_SIZE - 1) & PMD_MASK; - num_pages = (end - start) >> PAGE_SHIFT; - if (executable) - set_memory_x((unsigned long)__va(start), num_pages); - else - set_memory_nx((unsigned long)__va(start), num_pages); -} - -static void __init early_runtime_code_mapping_set_exec(int executable) +static void __init early_code_mapping_set_exec(int executable) { efi_memory_desc_t *md; void *p; @@ -67,11 +52,8 @@ static void __init early_runtime_code_mapping_set_exec(int executable) /* Make EFI runtime service code area executable */ for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { md = p; - if (md->type == EFI_RUNTIME_SERVICES_CODE) { - unsigned long end; - end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT); - early_mapping_set_exec(md->phys_addr, end, executable); - } + if (md->type == EFI_RUNTIME_SERVICES_CODE) + efi_set_executable(md, executable); } } @@ -79,7 +61,7 @@ void __init efi_call_phys_prelog(void) { unsigned long vaddress; - early_runtime_code_mapping_set_exec(1); + early_code_mapping_set_exec(1); local_irq_save(efi_flags); vaddress = (unsigned long)__va(0x0UL); save_pgd = *pgd_offset_k(0x0UL); @@ -95,7 +77,7 @@ void __init efi_call_phys_epilog(void) set_pgd(pgd_offset_k(0x0UL), save_pgd); __flush_tlb_all(); local_irq_restore(efi_flags); - early_runtime_code_mapping_set_exec(0); + early_code_mapping_set_exec(0); } void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size, @@ -107,8 +89,10 @@ void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size, return ioremap(phys_addr, size); last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size); - if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) - return NULL; + if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) { + unsigned long top = last_map_pfn << PAGE_SHIFT; + efi_ioremap(top, size - (top - phys_addr), type); + } return (void __iomem *)__va(phys_addr); } diff --git a/trunk/arch/x86/platform/mrst/mrst.c b/trunk/arch/x86/platform/mrst/mrst.c index 5c0207bf959b..7000e74b3087 100644 --- a/trunk/arch/x86/platform/mrst/mrst.c +++ b/trunk/arch/x86/platform/mrst/mrst.c @@ -97,11 +97,11 @@ static int __init sfi_parse_mtmr(struct sfi_table_header *table) pentry->freq_hz, pentry->irq); if (!pentry->irq) continue; - mp_irq.type = MP_IOAPIC; + mp_irq.type = MP_INTSRC; mp_irq.irqtype = mp_INT; /* triggering mode edge bit 2-3, active high polarity bit 0-1 */ mp_irq.irqflag = 5; - mp_irq.srcbus = 0; + mp_irq.srcbus = MP_BUS_ISA; mp_irq.srcbusirq = pentry->irq; /* IRQ */ mp_irq.dstapic = MP_APIC_ALL; mp_irq.dstirq = pentry->irq; @@ -168,10 +168,10 @@ int __init sfi_parse_mrtc(struct sfi_table_header *table) for (totallen = 0; totallen < sfi_mrtc_num; totallen++, pentry++) { pr_debug("RTC[%d]: paddr = 0x%08x, irq = %d\n", totallen, (u32)pentry->phys_addr, pentry->irq); - mp_irq.type = MP_IOAPIC; + mp_irq.type = MP_INTSRC; mp_irq.irqtype = mp_INT; mp_irq.irqflag = 0xf; /* level trigger and active low */ - mp_irq.srcbus = 0; + mp_irq.srcbus = MP_BUS_ISA; mp_irq.srcbusirq = pentry->irq; /* IRQ */ mp_irq.dstapic = MP_APIC_ALL; mp_irq.dstirq = pentry->irq; @@ -194,7 +194,7 @@ static unsigned long __init mrst_calibrate_tsc(void) return 0; } -void __init mrst_time_init(void) +static void __init mrst_time_init(void) { sfi_table_parse(SFI_SIG_MTMR, NULL, NULL, sfi_parse_mtmr); switch (mrst_timer_options) { @@ -216,7 +216,7 @@ void __init mrst_time_init(void) apbt_time_init(); } -void __cpuinit mrst_arch_setup(void) +static void __cpuinit mrst_arch_setup(void) { if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 0x27) __mrst_cpu_chip = MRST_CPU_CHIP_PENWELL; @@ -282,7 +282,7 @@ void __init x86_mrst_early_setup(void) /* Avoid searching for BIOS MP tables */ x86_init.mpparse.find_smp_config = x86_init_noop; x86_init.mpparse.get_smp_config = x86_init_uint_noop; - + set_bit(MP_BUS_ISA, mp_bus_not_pci); } /* diff --git a/trunk/arch/x86/platform/olpc/Makefile b/trunk/arch/x86/platform/olpc/Makefile index c2a8cab65e5d..81c5e2165c24 100644 --- a/trunk/arch/x86/platform/olpc/Makefile +++ b/trunk/arch/x86/platform/olpc/Makefile @@ -1,4 +1,2 @@ -obj-$(CONFIG_OLPC) += olpc.o +obj-$(CONFIG_OLPC) += olpc.o olpc_ofw.o olpc_dt.o obj-$(CONFIG_OLPC_XO1) += olpc-xo1.o -obj-$(CONFIG_OLPC) += olpc_ofw.o -obj-$(CONFIG_OF_PROMTREE) += olpc_dt.o diff --git a/trunk/arch/x86/platform/olpc/olpc.c b/trunk/arch/x86/platform/olpc/olpc.c index edaf3fe8dc5e..0060fd59ea00 100644 --- a/trunk/arch/x86/platform/olpc/olpc.c +++ b/trunk/arch/x86/platform/olpc/olpc.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -187,41 +188,43 @@ int olpc_ec_cmd(unsigned char cmd, unsigned char *inbuf, size_t inlen, } EXPORT_SYMBOL_GPL(olpc_ec_cmd); -static bool __init check_ofw_architecture(void) +static bool __init check_ofw_architecture(struct device_node *root) { - size_t propsize; - char olpc_arch[5]; - const void *args[] = { NULL, "architecture", olpc_arch, (void *)5 }; - void *res[] = { &propsize }; + const char *olpc_arch; + int propsize; - if (olpc_ofw("getprop", args, res)) { - printk(KERN_ERR "ofw: getprop call failed!\n"); - return false; - } + olpc_arch = of_get_property(root, "architecture", &propsize); return propsize == 5 && strncmp("OLPC", olpc_arch, 5) == 0; } -static u32 __init get_board_revision(void) +static u32 __init get_board_revision(struct device_node *root) { - size_t propsize; - __be32 rev; - const void *args[] = { NULL, "board-revision-int", &rev, (void *)4 }; - void *res[] = { &propsize }; - - if (olpc_ofw("getprop", args, res) || propsize != 4) { - printk(KERN_ERR "ofw: getprop call failed!\n"); - return cpu_to_be32(0); - } - return be32_to_cpu(rev); + int propsize; + const __be32 *rev; + + rev = of_get_property(root, "board-revision-int", &propsize); + if (propsize != 4) + return 0; + + return be32_to_cpu(*rev); } static bool __init platform_detect(void) { - if (!check_ofw_architecture()) + struct device_node *root = of_find_node_by_path("/"); + bool success; + + if (!root) return false; - olpc_platform_info.flags |= OLPC_F_PRESENT; - olpc_platform_info.boardrev = get_board_revision(); - return true; + + success = check_ofw_architecture(root); + if (success) { + olpc_platform_info.boardrev = get_board_revision(root); + olpc_platform_info.flags |= OLPC_F_PRESENT; + } + + of_node_put(root); + return success; } static int __init add_xo1_platform_devices(void) diff --git a/trunk/arch/x86/platform/olpc/olpc_dt.c b/trunk/arch/x86/platform/olpc/olpc_dt.c index 044bda5b3174..d39f63d017d2 100644 --- a/trunk/arch/x86/platform/olpc/olpc_dt.c +++ b/trunk/arch/x86/platform/olpc/olpc_dt.c @@ -19,7 +19,9 @@ #include #include #include +#include #include +#include #include static phandle __init olpc_dt_getsibling(phandle node) @@ -180,3 +182,20 @@ void __init olpc_dt_build_devicetree(void) pr_info("PROM DT: Built device tree with %u bytes of memory.\n", prom_early_allocated); } + +/* A list of DT node/bus matches that we want to expose as platform devices */ +static struct of_device_id __initdata of_ids[] = { + { .compatible = "olpc,xo1-battery" }, + { .compatible = "olpc,xo1-dcon" }, + { .compatible = "olpc,xo1-rtc" }, + {}, +}; + +static int __init olpc_create_platform_devices(void) +{ + if (machine_is_olpc()) + return of_platform_bus_probe(NULL, of_ids, NULL); + else + return 0; +} +device_initcall(olpc_create_platform_devices); diff --git a/trunk/arch/x86/platform/uv/tlb_uv.c b/trunk/arch/x86/platform/uv/tlb_uv.c index 7cb6424317f6..c58e0ea39ef5 100644 --- a/trunk/arch/x86/platform/uv/tlb_uv.c +++ b/trunk/arch/x86/platform/uv/tlb_uv.c @@ -699,16 +699,17 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm, unsigned long va, unsigned int cpu) { - int tcpu; - int uvhub; int locals = 0; int remotes = 0; int hubs = 0; + int tcpu; + int tpnode; struct bau_desc *bau_desc; struct cpumask *flush_mask; struct ptc_stats *stat; struct bau_control *bcp; struct bau_control *tbcp; + struct hub_and_pnode *hpp; /* kernel was booted 'nobau' */ if (nobau) @@ -750,11 +751,18 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, bau_desc += UV_ITEMS_PER_DESCRIPTOR * bcp->uvhub_cpu; bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE); - /* cpu statistics */ for_each_cpu(tcpu, flush_mask) { - uvhub = uv_cpu_to_blade_id(tcpu); - bau_uvhub_set(uvhub, &bau_desc->distribution); - if (uvhub == bcp->uvhub) + /* + * The distribution vector is a bit map of pnodes, relative + * to the partition base pnode (and the partition base nasid + * in the header). + * Translate cpu to pnode and hub using an array stored + * in local memory. + */ + hpp = &bcp->socket_master->target_hub_and_pnode[tcpu]; + tpnode = hpp->pnode - bcp->partition_base_pnode; + bau_uvhub_set(tpnode, &bau_desc->distribution); + if (hpp->uvhub == bcp->uvhub) locals++; else remotes++; @@ -855,7 +863,7 @@ void uv_bau_message_interrupt(struct pt_regs *regs) * an interrupt, but causes an error message to be returned to * the sender. */ -static void uv_enable_timeouts(void) +static void __init uv_enable_timeouts(void) { int uvhub; int nuvhubs; @@ -1326,10 +1334,10 @@ static int __init uv_ptc_init(void) } /* - * initialize the sending side's sending buffers + * Initialize the sending side's sending buffers. */ static void -uv_activation_descriptor_init(int node, int pnode) +uv_activation_descriptor_init(int node, int pnode, int base_pnode) { int i; int cpu; @@ -1352,11 +1360,11 @@ uv_activation_descriptor_init(int node, int pnode) n = pa >> uv_nshift; m = pa & uv_mmask; + /* the 14-bit pnode */ uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE, (n << UV_DESC_BASE_PNODE_SHIFT | m)); - /* - * initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each + * Initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each * cpu even though we only use the first one; one descriptor can * describe a broadcast to 256 uv hubs. */ @@ -1365,12 +1373,13 @@ uv_activation_descriptor_init(int node, int pnode) memset(bd2, 0, sizeof(struct bau_desc)); bd2->header.sw_ack_flag = 1; /* - * base_dest_nodeid is the nasid of the first uvhub - * in the partition. The bit map will indicate uvhub numbers, - * which are 0-N in a partition. Pnodes are unique system-wide. + * The base_dest_nasid set in the message header is the nasid + * of the first uvhub in the partition. The bit map will + * indicate destination pnode numbers relative to that base. + * They may not be consecutive if nasid striding is being used. */ - bd2->header.base_dest_nodeid = UV_PNODE_TO_NASID(uv_partition_base_pnode); - bd2->header.dest_subnodeid = 0x10; /* the LB */ + bd2->header.base_dest_nasid = UV_PNODE_TO_NASID(base_pnode); + bd2->header.dest_subnodeid = UV_LB_SUBNODEID; bd2->header.command = UV_NET_ENDPOINT_INTD; bd2->header.int_both = 1; /* @@ -1442,7 +1451,7 @@ uv_payload_queue_init(int node, int pnode) /* * Initialization of each UV hub's structures */ -static void __init uv_init_uvhub(int uvhub, int vector) +static void __init uv_init_uvhub(int uvhub, int vector, int base_pnode) { int node; int pnode; @@ -1450,11 +1459,11 @@ static void __init uv_init_uvhub(int uvhub, int vector) node = uvhub_to_first_node(uvhub); pnode = uv_blade_to_pnode(uvhub); - uv_activation_descriptor_init(node, pnode); + uv_activation_descriptor_init(node, pnode, base_pnode); uv_payload_queue_init(node, pnode); /* - * the below initialization can't be in firmware because the - * messaging IRQ will be determined by the OS + * The below initialization can't be in firmware because the + * messaging IRQ will be determined by the OS. */ apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits; uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, @@ -1491,10 +1500,11 @@ calculate_destination_timeout(void) /* * initialize the bau_control structure for each cpu */ -static int __init uv_init_per_cpu(int nuvhubs) +static int __init uv_init_per_cpu(int nuvhubs, int base_part_pnode) { int i; int cpu; + int tcpu; int pnode; int uvhub; int have_hmaster; @@ -1528,6 +1538,15 @@ static int __init uv_init_per_cpu(int nuvhubs) bcp = &per_cpu(bau_control, cpu); memset(bcp, 0, sizeof(struct bau_control)); pnode = uv_cpu_hub_info(cpu)->pnode; + if ((pnode - base_part_pnode) >= UV_DISTRIBUTION_SIZE) { + printk(KERN_EMERG + "cpu %d pnode %d-%d beyond %d; BAU disabled\n", + cpu, pnode, base_part_pnode, + UV_DISTRIBUTION_SIZE); + return 1; + } + bcp->osnode = cpu_to_node(cpu); + bcp->partition_base_pnode = uv_partition_base_pnode; uvhub = uv_cpu_hub_info(cpu)->numa_blade_id; *(uvhub_mask + (uvhub/8)) |= (1 << (uvhub%8)); bdp = &uvhub_descs[uvhub]; @@ -1536,7 +1555,7 @@ static int __init uv_init_per_cpu(int nuvhubs) bdp->pnode = pnode; /* kludge: 'assuming' one node per socket, and assuming that disabling a socket just leaves a gap in node numbers */ - socket = (cpu_to_node(cpu) & 1); + socket = bcp->osnode & 1; bdp->socket_mask |= (1 << socket); sdp = &bdp->socket[socket]; sdp->cpu_number[sdp->num_cpus] = cpu; @@ -1585,6 +1604,20 @@ static int __init uv_init_per_cpu(int nuvhubs) nextsocket: socket++; socket_mask = (socket_mask >> 1); + /* each socket gets a local array of pnodes/hubs */ + bcp = smaster; + bcp->target_hub_and_pnode = kmalloc_node( + sizeof(struct hub_and_pnode) * + num_possible_cpus(), GFP_KERNEL, bcp->osnode); + memset(bcp->target_hub_and_pnode, 0, + sizeof(struct hub_and_pnode) * + num_possible_cpus()); + for_each_present_cpu(tcpu) { + bcp->target_hub_and_pnode[tcpu].pnode = + uv_cpu_hub_info(tcpu)->pnode; + bcp->target_hub_and_pnode[tcpu].uvhub = + uv_cpu_hub_info(tcpu)->numa_blade_id; + } } } kfree(uvhub_descs); @@ -1637,21 +1670,22 @@ static int __init uv_bau_init(void) spin_lock_init(&disable_lock); congested_cycles = microsec_2_cycles(congested_response_us); - if (uv_init_per_cpu(nuvhubs)) { - nobau = 1; - return 0; - } - uv_partition_base_pnode = 0x7fffffff; - for (uvhub = 0; uvhub < nuvhubs; uvhub++) + for (uvhub = 0; uvhub < nuvhubs; uvhub++) { if (uv_blade_nr_possible_cpus(uvhub) && (uv_blade_to_pnode(uvhub) < uv_partition_base_pnode)) uv_partition_base_pnode = uv_blade_to_pnode(uvhub); + } + + if (uv_init_per_cpu(nuvhubs, uv_partition_base_pnode)) { + nobau = 1; + return 0; + } vector = UV_BAU_MESSAGE; for_each_possible_blade(uvhub) if (uv_blade_nr_possible_cpus(uvhub)) - uv_init_uvhub(uvhub, vector); + uv_init_uvhub(uvhub, vector, uv_partition_base_pnode); uv_enable_timeouts(); alloc_intr_gate(vector, uv_bau_message_intr1); diff --git a/trunk/arch/x86/platform/uv/uv_time.c b/trunk/arch/x86/platform/uv/uv_time.c index 9daf5d1af9f1..0eb90184515f 100644 --- a/trunk/arch/x86/platform/uv/uv_time.c +++ b/trunk/arch/x86/platform/uv/uv_time.c @@ -40,7 +40,6 @@ static struct clocksource clocksource_uv = { .rating = 400, .read = uv_read_rtc, .mask = (cycle_t)UVH_RTC_REAL_TIME_CLOCK_MASK, - .shift = 10, .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; @@ -372,14 +371,11 @@ static __init int uv_rtc_setup_clock(void) if (!is_uv_system()) return -ENODEV; - clocksource_uv.mult = clocksource_hz2mult(sn_rtc_cycles_per_second, - clocksource_uv.shift); - /* If single blade, prefer tsc */ if (uv_num_possible_blades() == 1) clocksource_uv.rating = 250; - rc = clocksource_register(&clocksource_uv); + rc = clocksource_register_hz(&clocksource_uv, sn_rtc_cycles_per_second); if (rc) printk(KERN_INFO "UV RTC clocksource failed rc %d\n", rc); else diff --git a/trunk/arch/x86/xen/Kconfig b/trunk/arch/x86/xen/Kconfig index 1c7121ba18ff..5cc821cb2e09 100644 --- a/trunk/arch/x86/xen/Kconfig +++ b/trunk/arch/x86/xen/Kconfig @@ -39,6 +39,7 @@ config XEN_MAX_DOMAIN_MEMORY config XEN_SAVE_RESTORE bool depends on XEN + select HIBERNATE_CALLBACKS default y config XEN_DEBUG_FS diff --git a/trunk/arch/x86/xen/enlighten.c b/trunk/arch/x86/xen/enlighten.c index 49dbd78ec3cb..dd7b88f2ec7a 100644 --- a/trunk/arch/x86/xen/enlighten.c +++ b/trunk/arch/x86/xen/enlighten.c @@ -235,9 +235,10 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx, *dx &= maskedx; } -static __init void xen_init_cpuid_mask(void) +static void __init xen_init_cpuid_mask(void) { unsigned int ax, bx, cx, dx; + unsigned int xsave_mask; cpuid_leaf1_edx_mask = ~((1 << X86_FEATURE_MCE) | /* disable MCE */ @@ -249,24 +250,16 @@ static __init void xen_init_cpuid_mask(void) cpuid_leaf1_edx_mask &= ~((1 << X86_FEATURE_APIC) | /* disable local APIC */ (1 << X86_FEATURE_ACPI)); /* disable ACPI */ - ax = 1; - cx = 0; xen_cpuid(&ax, &bx, &cx, &dx); - /* cpuid claims we support xsave; try enabling it to see what happens */ - if (cx & (1 << (X86_FEATURE_XSAVE % 32))) { - unsigned long cr4; - - set_in_cr4(X86_CR4_OSXSAVE); - - cr4 = read_cr4(); + xsave_mask = + (1 << (X86_FEATURE_XSAVE % 32)) | + (1 << (X86_FEATURE_OSXSAVE % 32)); - if ((cr4 & X86_CR4_OSXSAVE) == 0) - cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_XSAVE % 32)); - - clear_in_cr4(X86_CR4_OSXSAVE); - } + /* Xen will set CR4.OSXSAVE if supported and not disabled by force */ + if ((cx & xsave_mask) != xsave_mask) + cpuid_leaf1_ecx_mask &= ~xsave_mask; /* disable XSAVE & OSXSAVE */ } static void xen_set_debugreg(int reg, unsigned long val) @@ -407,7 +400,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr) /* * load_gdt for early boot, when the gdt is only mapped once */ -static __init void xen_load_gdt_boot(const struct desc_ptr *dtr) +static void __init xen_load_gdt_boot(const struct desc_ptr *dtr) { unsigned long va = dtr->address; unsigned int size = dtr->size + 1; @@ -669,7 +662,7 @@ static void xen_write_gdt_entry(struct desc_struct *dt, int entry, * Version of write_gdt_entry for use at early boot-time needed to * update an entry as simply as possible. */ -static __init void xen_write_gdt_entry_boot(struct desc_struct *dt, int entry, +static void __init xen_write_gdt_entry_boot(struct desc_struct *dt, int entry, const void *desc, int type) { switch (type) { @@ -940,18 +933,18 @@ static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf, return ret; } -static const struct pv_info xen_info __initdata = { +static const struct pv_info xen_info __initconst = { .paravirt_enabled = 1, .shared_kernel_pmd = 0, .name = "Xen", }; -static const struct pv_init_ops xen_init_ops __initdata = { +static const struct pv_init_ops xen_init_ops __initconst = { .patch = xen_patch, }; -static const struct pv_cpu_ops xen_cpu_ops __initdata = { +static const struct pv_cpu_ops xen_cpu_ops __initconst = { .cpuid = xen_cpuid, .set_debugreg = xen_set_debugreg, @@ -1011,7 +1004,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initdata = { .end_context_switch = xen_end_context_switch, }; -static const struct pv_apic_ops xen_apic_ops __initdata = { +static const struct pv_apic_ops xen_apic_ops __initconst = { #ifdef CONFIG_X86_LOCAL_APIC .startup_ipi_hook = paravirt_nop, #endif @@ -1062,7 +1055,7 @@ int xen_panic_handler_init(void) return 0; } -static const struct machine_ops __initdata xen_machine_ops = { +static const struct machine_ops xen_machine_ops __initconst = { .restart = xen_restart, .halt = xen_machine_halt, .power_off = xen_machine_halt, @@ -1339,7 +1332,7 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self, return NOTIFY_OK; } -static struct notifier_block __cpuinitdata xen_hvm_cpu_notifier = { +static struct notifier_block xen_hvm_cpu_notifier __cpuinitdata = { .notifier_call = xen_hvm_cpu_notify, }; @@ -1388,7 +1381,7 @@ bool xen_hvm_need_lapic(void) } EXPORT_SYMBOL_GPL(xen_hvm_need_lapic); -const __refconst struct hypervisor_x86 x86_hyper_xen_hvm = { +const struct hypervisor_x86 x86_hyper_xen_hvm __refconst = { .name = "Xen HVM", .detect = xen_hvm_platform, .init_platform = xen_hvm_guest_init, diff --git a/trunk/arch/x86/xen/irq.c b/trunk/arch/x86/xen/irq.c index 6a6fe8939645..8bbb465b6f0a 100644 --- a/trunk/arch/x86/xen/irq.c +++ b/trunk/arch/x86/xen/irq.c @@ -113,7 +113,7 @@ static void xen_halt(void) xen_safe_halt(); } -static const struct pv_irq_ops xen_irq_ops __initdata = { +static const struct pv_irq_ops xen_irq_ops __initconst = { .save_fl = PV_CALLEE_SAVE(xen_save_fl), .restore_fl = PV_CALLEE_SAVE(xen_restore_fl), .irq_disable = PV_CALLEE_SAVE(xen_irq_disable), diff --git a/trunk/arch/x86/xen/mmu.c b/trunk/arch/x86/xen/mmu.c index c82df6c9c0f0..02d752460371 100644 --- a/trunk/arch/x86/xen/mmu.c +++ b/trunk/arch/x86/xen/mmu.c @@ -565,13 +565,13 @@ pte_t xen_make_pte_debug(pteval_t pte) if (io_page && (xen_initial_domain() || addr >= ISA_END_ADDRESS)) { other_addr = pfn_to_mfn(addr >> PAGE_SHIFT) << PAGE_SHIFT; - WARN(addr != other_addr, + WARN_ONCE(addr != other_addr, "0x%lx is using VM_IO, but it is 0x%lx!\n", (unsigned long)addr, (unsigned long)other_addr); } else { pteval_t iomap_set = (_pte.pte & PTE_FLAGS_MASK) & _PAGE_IOMAP; other_addr = (_pte.pte & PTE_PFN_MASK); - WARN((addr == other_addr) && (!io_page) && (!iomap_set), + WARN_ONCE((addr == other_addr) && (!io_page) && (!iomap_set), "0x%lx is missing VM_IO (and wasn't fixed)!\n", (unsigned long)addr); } @@ -1054,7 +1054,7 @@ void xen_mm_pin_all(void) * that's before we have page structures to store the bits. So do all * the book-keeping now. */ -static __init int xen_mark_pinned(struct mm_struct *mm, struct page *page, +static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page, enum pt_level level) { SetPagePinned(page); @@ -1187,7 +1187,7 @@ static void drop_other_mm_ref(void *info) active_mm = percpu_read(cpu_tlbstate.active_mm); - if (active_mm == mm) + if (active_mm == mm && percpu_read(cpu_tlbstate.state) != TLBSTATE_OK) leave_mm(smp_processor_id()); /* If this cpu still has a stale cr3 reference, then make sure @@ -1271,13 +1271,27 @@ void xen_exit_mmap(struct mm_struct *mm) spin_unlock(&mm->page_table_lock); } -static __init void xen_pagetable_setup_start(pgd_t *base) +static void __init xen_pagetable_setup_start(pgd_t *base) { } +static __init void xen_mapping_pagetable_reserve(u64 start, u64 end) +{ + /* reserve the range used */ + native_pagetable_reserve(start, end); + + /* set as RW the rest */ + printk(KERN_DEBUG "xen: setting RW the range %llx - %llx\n", end, + PFN_PHYS(pgt_buf_top)); + while (end < PFN_PHYS(pgt_buf_top)) { + make_lowmem_page_readwrite(__va(end)); + end += PAGE_SIZE; + } +} + static void xen_post_allocator_init(void); -static __init void xen_pagetable_setup_done(pgd_t *base) +static void __init xen_pagetable_setup_done(pgd_t *base) { xen_setup_shared_info(); xen_post_allocator_init(); @@ -1473,16 +1487,20 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd) #endif } -static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) -{ - unsigned long pfn = pte_pfn(pte); - #ifdef CONFIG_X86_32 +static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte) +{ /* If there's an existing pte, then don't allow _PAGE_RW to be set */ if (pte_val_ma(*ptep) & _PAGE_PRESENT) pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) & pte_val_ma(pte)); -#endif + + return pte; +} +#else /* CONFIG_X86_64 */ +static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte) +{ + unsigned long pfn = pte_pfn(pte); /* * If the new pfn is within the range of the newly allocated @@ -1491,16 +1509,17 @@ static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) * it is RO. */ if (((!is_early_ioremap_ptep(ptep) && - pfn >= pgt_buf_start && pfn < pgt_buf_end)) || + pfn >= pgt_buf_start && pfn < pgt_buf_top)) || (is_early_ioremap_ptep(ptep) && pfn != (pgt_buf_end - 1))) pte = pte_wrprotect(pte); return pte; } +#endif /* CONFIG_X86_64 */ /* Init-time set_pte while constructing initial pagetables, which doesn't allow RO pagetable pages to be remapped RW */ -static __init void xen_set_pte_init(pte_t *ptep, pte_t pte) +static void __init xen_set_pte_init(pte_t *ptep, pte_t pte) { pte = mask_rw_pte(ptep, pte); @@ -1518,7 +1537,7 @@ static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn) /* Early in boot, while setting up the initial pagetable, assume everything is pinned. */ -static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn) +static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn) { #ifdef CONFIG_FLATMEM BUG_ON(mem_map); /* should only be used early */ @@ -1528,7 +1547,7 @@ static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn) } /* Used for pmd and pud */ -static __init void xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn) +static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn) { #ifdef CONFIG_FLATMEM BUG_ON(mem_map); /* should only be used early */ @@ -1538,13 +1557,13 @@ static __init void xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn) /* Early release_pte assumes that all pts are pinned, since there's only init_mm and anything attached to that is pinned. */ -static __init void xen_release_pte_init(unsigned long pfn) +static void __init xen_release_pte_init(unsigned long pfn) { pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); } -static __init void xen_release_pmd_init(unsigned long pfn) +static void __init xen_release_pmd_init(unsigned long pfn) { make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); } @@ -1670,7 +1689,7 @@ static void set_page_prot(void *addr, pgprot_t prot) BUG(); } -static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) +static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) { unsigned pmdidx, pteidx; unsigned ident_pte; @@ -1753,7 +1772,7 @@ static void convert_pfn_mfn(void *v) * of the physical mapping once some sort of allocator has been set * up. */ -__init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, +pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) { pud_t *l3; @@ -1824,7 +1843,7 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD); static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD); -static __init void xen_write_cr3_init(unsigned long cr3) +static void __init xen_write_cr3_init(unsigned long cr3) { unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir)); @@ -1861,7 +1880,7 @@ static __init void xen_write_cr3_init(unsigned long cr3) pv_mmu_ops.write_cr3 = &xen_write_cr3; } -__init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, +pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) { pmd_t *kernel_pmd; @@ -1967,7 +1986,7 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) #endif } -__init void xen_ident_map_ISA(void) +void __init xen_ident_map_ISA(void) { unsigned long pa; @@ -1990,7 +2009,7 @@ __init void xen_ident_map_ISA(void) xen_flush_tlb(); } -static __init void xen_post_allocator_init(void) +static void __init xen_post_allocator_init(void) { #ifdef CONFIG_XEN_DEBUG pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte_debug); @@ -2027,7 +2046,7 @@ static void xen_leave_lazy_mmu(void) preempt_enable(); } -static const struct pv_mmu_ops xen_mmu_ops __initdata = { +static const struct pv_mmu_ops xen_mmu_ops __initconst = { .read_cr2 = xen_read_cr2, .write_cr2 = xen_write_cr2, @@ -2100,6 +2119,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = { void __init xen_init_mmu_ops(void) { + x86_init.mapping.pagetable_reserve = xen_mapping_pagetable_reserve; x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start; x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done; pv_mmu_ops = xen_mmu_ops; diff --git a/trunk/arch/x86/xen/p2m.c b/trunk/arch/x86/xen/p2m.c index 141eb0de8b06..58efeb9d5440 100644 --- a/trunk/arch/x86/xen/p2m.c +++ b/trunk/arch/x86/xen/p2m.c @@ -522,11 +522,20 @@ static bool __init __early_alloc_p2m(unsigned long pfn) /* Boundary cross-over for the edges: */ if (idx) { unsigned long *p2m = extend_brk(PAGE_SIZE, PAGE_SIZE); + unsigned long *mid_mfn_p; p2m_init(p2m); p2m_top[topidx][mididx] = p2m; + /* For save/restore we need to MFN of the P2M saved */ + + mid_mfn_p = p2m_top_mfn_p[topidx]; + WARN(mid_mfn_p[mididx] != virt_to_mfn(p2m_missing), + "P2M_TOP_P[%d][%d] != MFN of p2m_missing!\n", + topidx, mididx); + mid_mfn_p[mididx] = virt_to_mfn(p2m); + } return idx != 0; } @@ -549,12 +558,29 @@ unsigned long __init set_phys_range_identity(unsigned long pfn_s, pfn += P2M_MID_PER_PAGE * P2M_PER_PAGE) { unsigned topidx = p2m_top_index(pfn); - if (p2m_top[topidx] == p2m_mid_missing) { - unsigned long **mid = extend_brk(PAGE_SIZE, PAGE_SIZE); + unsigned long *mid_mfn_p; + unsigned long **mid; + + mid = p2m_top[topidx]; + mid_mfn_p = p2m_top_mfn_p[topidx]; + if (mid == p2m_mid_missing) { + mid = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_mid_init(mid); p2m_top[topidx] = mid; + + BUG_ON(mid_mfn_p != p2m_mid_missing_mfn); + } + /* And the save/restore P2M tables.. */ + if (mid_mfn_p == p2m_mid_missing_mfn) { + mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE); + p2m_mid_mfn_init(mid_mfn_p); + + p2m_top_mfn_p[topidx] = mid_mfn_p; + p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p); + /* Note: we don't set mid_mfn_p[midix] here, + * look in __early_alloc_p2m */ } } @@ -650,7 +676,7 @@ static unsigned long mfn_hash(unsigned long mfn) } /* Add an MFN override for a particular page */ -int m2p_add_override(unsigned long mfn, struct page *page) +int m2p_add_override(unsigned long mfn, struct page *page, bool clear_pte) { unsigned long flags; unsigned long pfn; @@ -662,7 +688,6 @@ int m2p_add_override(unsigned long mfn, struct page *page) if (!PageHighMem(page)) { address = (unsigned long)__va(pfn << PAGE_SHIFT); ptep = lookup_address(address, &level); - if (WARN(ptep == NULL || level != PG_LEVEL_4K, "m2p_add_override: pfn %lx not mapped", pfn)) return -EINVAL; @@ -674,18 +699,17 @@ int m2p_add_override(unsigned long mfn, struct page *page) if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) return -ENOMEM; - if (!PageHighMem(page)) + if (clear_pte && !PageHighMem(page)) /* Just zap old mapping for now */ pte_clear(&init_mm, address, ptep); - spin_lock_irqsave(&m2p_override_lock, flags); list_add(&page->lru, &m2p_overrides[mfn_hash(mfn)]); spin_unlock_irqrestore(&m2p_override_lock, flags); return 0; } - -int m2p_remove_override(struct page *page) +EXPORT_SYMBOL_GPL(m2p_add_override); +int m2p_remove_override(struct page *page, bool clear_pte) { unsigned long flags; unsigned long mfn; @@ -713,7 +737,7 @@ int m2p_remove_override(struct page *page) spin_unlock_irqrestore(&m2p_override_lock, flags); set_phys_to_machine(pfn, page->index); - if (!PageHighMem(page)) + if (clear_pte && !PageHighMem(page)) set_pte_at(&init_mm, address, ptep, pfn_pte(pfn, PAGE_KERNEL)); /* No tlb flush necessary because the caller already @@ -721,6 +745,7 @@ int m2p_remove_override(struct page *page) return 0; } +EXPORT_SYMBOL_GPL(m2p_remove_override); struct page *m2p_find_override(unsigned long mfn) { diff --git a/trunk/arch/x86/xen/setup.c b/trunk/arch/x86/xen/setup.c index fa0269a99377..be1a464f6d66 100644 --- a/trunk/arch/x86/xen/setup.c +++ b/trunk/arch/x86/xen/setup.c @@ -50,7 +50,7 @@ phys_addr_t xen_extra_mem_start, xen_extra_mem_size; */ #define EXTRA_MEM_RATIO (10) -static __init void xen_add_extra_mem(unsigned long pages) +static void __init xen_add_extra_mem(unsigned long pages) { unsigned long pfn; @@ -166,7 +166,7 @@ static unsigned long __init xen_set_identity(const struct e820entry *list, if (last > end) continue; - if (entry->type == E820_RAM) { + if ((entry->type == E820_RAM) || (entry->type == E820_UNUSABLE)) { if (start > start_pci) identity += set_phys_range_identity( PFN_UP(start_pci), PFN_DOWN(start)); @@ -227,7 +227,11 @@ char * __init xen_memory_setup(void) memcpy(map_raw, map, sizeof(map)); e820.nr_map = 0; +#ifdef CONFIG_X86_32 xen_extra_mem_start = mem_end; +#else + xen_extra_mem_start = max((1ULL << 32), mem_end); +#endif for (i = 0; i < memmap.nr_entries; i++) { unsigned long long end; @@ -336,7 +340,7 @@ static void __init fiddle_vdso(void) #endif } -static __cpuinit int register_callback(unsigned type, const void *func) +static int __cpuinit register_callback(unsigned type, const void *func) { struct callback_register callback = { .type = type, diff --git a/trunk/arch/x86/xen/smp.c b/trunk/arch/x86/xen/smp.c index 30612441ed99..41038c01de40 100644 --- a/trunk/arch/x86/xen/smp.c +++ b/trunk/arch/x86/xen/smp.c @@ -46,18 +46,17 @@ static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id); static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id); /* - * Reschedule call back. Nothing to do, - * all the work is done automatically when - * we return from the interrupt. + * Reschedule call back. */ static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id) { inc_irq_stat(irq_resched_count); + scheduler_ipi(); return IRQ_HANDLED; } -static __cpuinit void cpu_bringup(void) +static void __cpuinit cpu_bringup(void) { int cpu = smp_processor_id(); @@ -85,7 +84,7 @@ static __cpuinit void cpu_bringup(void) wmb(); /* make sure everything is out */ } -static __cpuinit void cpu_bringup_and_idle(void) +static void __cpuinit cpu_bringup_and_idle(void) { cpu_bringup(); cpu_idle(); @@ -242,7 +241,7 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus) } } -static __cpuinit int +static int __cpuinit cpu_initialize_context(unsigned int cpu, struct task_struct *idle) { struct vcpu_guest_context *ctxt; @@ -486,7 +485,7 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static const struct smp_ops xen_smp_ops __initdata = { +static const struct smp_ops xen_smp_ops __initconst = { .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu, .smp_prepare_cpus = xen_smp_prepare_cpus, .smp_cpus_done = xen_smp_cpus_done, diff --git a/trunk/arch/x86/xen/time.c b/trunk/arch/x86/xen/time.c index 2e2d370a47b1..5158c505bef9 100644 --- a/trunk/arch/x86/xen/time.c +++ b/trunk/arch/x86/xen/time.c @@ -26,8 +26,6 @@ #include "xen-ops.h" -#define XEN_SHIFT 22 - /* Xen may fire a timer up to this many ns early */ #define TIMER_SLOP 100000 #define NS_PER_TICK (1000000000LL / HZ) @@ -211,8 +209,6 @@ static struct clocksource xen_clocksource __read_mostly = { .rating = 400, .read = xen_clocksource_get_cycles, .mask = ~0, - .mult = 1< 0 and at this point we don't know how many cpus are diff --git a/trunk/arch/x86/xen/xen-ops.h b/trunk/arch/x86/xen/xen-ops.h index 3112f55638c4..97dfdc8757b3 100644 --- a/trunk/arch/x86/xen/xen-ops.h +++ b/trunk/arch/x86/xen/xen-ops.h @@ -74,7 +74,7 @@ static inline void xen_hvm_smp_init(void) {} #ifdef CONFIG_PARAVIRT_SPINLOCKS void __init xen_init_spinlocks(void); -__cpuinit void xen_init_lock_cpu(int cpu); +void __cpuinit xen_init_lock_cpu(int cpu); void xen_uninit_lock_cpu(int cpu); #else static inline void xen_init_spinlocks(void) diff --git a/trunk/arch/xtensa/kernel/irq.c b/trunk/arch/xtensa/kernel/irq.c index d77089df412e..4340ee076bd5 100644 --- a/trunk/arch/xtensa/kernel/irq.c +++ b/trunk/arch/xtensa/kernel/irq.c @@ -64,47 +64,41 @@ asmlinkage void do_IRQ(int irq, struct pt_regs *regs) int arch_show_interrupts(struct seq_file *p, int prec) { - int j; - - seq_printf(p, "%*s: ", prec, "NMI"); - for_each_online_cpu(j) - seq_printf(p, "%10u ", nmi_count(j)); - seq_putc(p, '\n'); seq_printf(p, "%*s: ", prec, "ERR"); seq_printf(p, "%10u\n", atomic_read(&irq_err_count)); return 0; } -static void xtensa_irq_mask(struct irq_chip *d) +static void xtensa_irq_mask(struct irq_data *d) { cached_irq_mask &= ~(1 << d->irq); set_sr (cached_irq_mask, INTENABLE); } -static void xtensa_irq_unmask(struct irq_chip *d) +static void xtensa_irq_unmask(struct irq_data *d) { cached_irq_mask |= 1 << d->irq; set_sr (cached_irq_mask, INTENABLE); } -static void xtensa_irq_enable(struct irq_chip *d) +static void xtensa_irq_enable(struct irq_data *d) { variant_irq_enable(d->irq); xtensa_irq_unmask(d->irq); } -static void xtensa_irq_disable(struct irq_chip *d) +static void xtensa_irq_disable(struct irq_data *d) { xtensa_irq_mask(d->irq); variant_irq_disable(d->irq); } -static void xtensa_irq_ack(struct irq_chip *d) +static void xtensa_irq_ack(struct irq_data *d) { set_sr(1 << d->irq, INTCLEAR); } -static int xtensa_irq_retrigger(struct irq_chip *d) +static int xtensa_irq_retrigger(struct irq_data *d) { set_sr (1 << d->irq, INTSET); return 1; diff --git a/trunk/block/blk-cgroup.c b/trunk/block/blk-cgroup.c index f0605ab2a761..471fdcc5df85 100644 --- a/trunk/block/blk-cgroup.c +++ b/trunk/block/blk-cgroup.c @@ -114,6 +114,13 @@ struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup) } EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup); +struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk) +{ + return container_of(task_subsys_state(tsk, blkio_subsys_id), + struct blkio_cgroup, css); +} +EXPORT_SYMBOL_GPL(task_blkio_cgroup); + static inline void blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight) { diff --git a/trunk/block/blk-cgroup.h b/trunk/block/blk-cgroup.h index 10919fae2d3a..c774930cc206 100644 --- a/trunk/block/blk-cgroup.h +++ b/trunk/block/blk-cgroup.h @@ -291,6 +291,7 @@ static inline void blkiocg_set_start_empty_time(struct blkio_group *blkg) {} #if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE) extern struct blkio_cgroup blkio_root_cgroup; extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup); +extern struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk); extern void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, struct blkio_group *blkg, void *key, dev_t dev, enum blkio_policy_id plid); @@ -314,6 +315,8 @@ void blkiocg_update_io_remove_stats(struct blkio_group *blkg, struct cgroup; static inline struct blkio_cgroup * cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; } +static inline struct blkio_cgroup * +task_blkio_cgroup(struct task_struct *tsk) { return NULL; } static inline void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, struct blkio_group *blkg, void *key, dev_t dev, diff --git a/trunk/block/blk-core.c b/trunk/block/blk-core.c index 90f22cc30799..3fe00a14822a 100644 --- a/trunk/block/blk-core.c +++ b/trunk/block/blk-core.c @@ -198,26 +198,13 @@ void blk_dump_rq_flags(struct request *rq, char *msg) } EXPORT_SYMBOL(blk_dump_rq_flags); -/* - * Make sure that plugs that were pending when this function was entered, - * are now complete and requests pushed to the queue. -*/ -static inline void queue_sync_plugs(struct request_queue *q) -{ - /* - * If the current process is plugged and has barriers submitted, - * we will livelock if we don't unplug first. - */ - blk_flush_plug(current); -} - static void blk_delay_work(struct work_struct *work) { struct request_queue *q; q = container_of(work, struct request_queue, delay_work.work); spin_lock_irq(q->queue_lock); - __blk_run_queue(q, false); + __blk_run_queue(q); spin_unlock_irq(q->queue_lock); } @@ -233,7 +220,8 @@ static void blk_delay_work(struct work_struct *work) */ void blk_delay_queue(struct request_queue *q, unsigned long msecs) { - schedule_delayed_work(&q->delay_work, msecs_to_jiffies(msecs)); + queue_delayed_work(kblockd_workqueue, &q->delay_work, + msecs_to_jiffies(msecs)); } EXPORT_SYMBOL(blk_delay_queue); @@ -251,7 +239,7 @@ void blk_start_queue(struct request_queue *q) WARN_ON(!irqs_disabled()); queue_flag_clear(QUEUE_FLAG_STOPPED, q); - __blk_run_queue(q, false); + __blk_run_queue(q); } EXPORT_SYMBOL(blk_start_queue); @@ -298,37 +286,43 @@ void blk_sync_queue(struct request_queue *q) { del_timer_sync(&q->timeout); cancel_delayed_work_sync(&q->delay_work); - queue_sync_plugs(q); } EXPORT_SYMBOL(blk_sync_queue); /** * __blk_run_queue - run a single device queue * @q: The queue to run - * @force_kblockd: Don't run @q->request_fn directly. Use kblockd. * * Description: * See @blk_run_queue. This variant must be called with the queue lock * held and interrupts disabled. - * */ -void __blk_run_queue(struct request_queue *q, bool force_kblockd) +void __blk_run_queue(struct request_queue *q) { if (unlikely(blk_queue_stopped(q))) return; - /* - * Only recurse once to avoid overrunning the stack, let the unplug - * handling reinvoke the handler shortly if we already got there. - */ - if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { - q->request_fn(q); - queue_flag_clear(QUEUE_FLAG_REENTER, q); - } else - queue_delayed_work(kblockd_workqueue, &q->delay_work, 0); + q->request_fn(q); } EXPORT_SYMBOL(__blk_run_queue); +/** + * blk_run_queue_async - run a single device queue in workqueue context + * @q: The queue to run + * + * Description: + * Tells kblockd to perform the equivalent of @blk_run_queue on behalf + * of us. + */ +void blk_run_queue_async(struct request_queue *q) +{ + if (likely(!blk_queue_stopped(q))) { + __cancel_delayed_work(&q->delay_work); + queue_delayed_work(kblockd_workqueue, &q->delay_work, 0); + } +} +EXPORT_SYMBOL(blk_run_queue_async); + /** * blk_run_queue - run a single device queue * @q: The queue to run @@ -342,7 +336,7 @@ void blk_run_queue(struct request_queue *q) unsigned long flags; spin_lock_irqsave(q->queue_lock, flags); - __blk_run_queue(q, false); + __blk_run_queue(q); spin_unlock_irqrestore(q->queue_lock, flags); } EXPORT_SYMBOL(blk_run_queue); @@ -991,7 +985,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq, blk_queue_end_tag(q, rq); add_acct_request(q, rq, where); - __blk_run_queue(q, false); + __blk_run_queue(q); spin_unlock_irqrestore(q->queue_lock, flags); } EXPORT_SYMBOL(blk_insert_request); @@ -1311,7 +1305,15 @@ static int __make_request(struct request_queue *q, struct bio *bio) plug = current->plug; if (plug) { - if (!plug->should_sort && !list_empty(&plug->list)) { + /* + * If this is the first request added after a plug, fire + * of a plug trace. If others have been added before, check + * if we have multiple devices in this plug. If so, make a + * note to sort the list before dispatch. + */ + if (list_empty(&plug->list)) + trace_block_plug(q); + else if (!plug->should_sort) { struct request *__rq; __rq = list_entry_rq(plug->list.prev); @@ -1327,7 +1329,7 @@ static int __make_request(struct request_queue *q, struct bio *bio) } else { spin_lock_irq(q->queue_lock); add_acct_request(q, req, where); - __blk_run_queue(q, false); + __blk_run_queue(q); out_unlock: spin_unlock_irq(q->queue_lock); } @@ -2644,6 +2646,7 @@ void blk_start_plug(struct blk_plug *plug) plug->magic = PLUG_MAGIC; INIT_LIST_HEAD(&plug->list); + INIT_LIST_HEAD(&plug->cb_list); plug->should_sort = 0; /* @@ -2668,33 +2671,93 @@ static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b) return !(rqa->q <= rqb->q); } -static void flush_plug_list(struct blk_plug *plug) +/* + * If 'from_schedule' is true, then postpone the dispatch of requests + * until a safe kblockd context. We due this to avoid accidental big + * additional stack usage in driver dispatch, in places where the originally + * plugger did not intend it. + */ +static void queue_unplugged(struct request_queue *q, unsigned int depth, + bool from_schedule) + __releases(q->queue_lock) +{ + trace_block_unplug(q, depth, !from_schedule); + + /* + * If we are punting this to kblockd, then we can safely drop + * the queue_lock before waking kblockd (which needs to take + * this lock). + */ + if (from_schedule) { + spin_unlock(q->queue_lock); + blk_run_queue_async(q); + } else { + __blk_run_queue(q); + spin_unlock(q->queue_lock); + } + +} + +static void flush_plug_callbacks(struct blk_plug *plug) +{ + LIST_HEAD(callbacks); + + if (list_empty(&plug->cb_list)) + return; + + list_splice_init(&plug->cb_list, &callbacks); + + while (!list_empty(&callbacks)) { + struct blk_plug_cb *cb = list_first_entry(&callbacks, + struct blk_plug_cb, + list); + list_del(&cb->list); + cb->callback(cb); + } +} + +void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) { struct request_queue *q; unsigned long flags; struct request *rq; + LIST_HEAD(list); + unsigned int depth; BUG_ON(plug->magic != PLUG_MAGIC); + flush_plug_callbacks(plug); if (list_empty(&plug->list)) return; - if (plug->should_sort) - list_sort(NULL, &plug->list, plug_rq_cmp); + list_splice_init(&plug->list, &list); + + if (plug->should_sort) { + list_sort(NULL, &list, plug_rq_cmp); + plug->should_sort = 0; + } q = NULL; + depth = 0; + + /* + * Save and disable interrupts here, to avoid doing it for every + * queue lock we have to take. + */ local_irq_save(flags); - while (!list_empty(&plug->list)) { - rq = list_entry_rq(plug->list.next); + while (!list_empty(&list)) { + rq = list_entry_rq(list.next); list_del_init(&rq->queuelist); BUG_ON(!(rq->cmd_flags & REQ_ON_PLUG)); BUG_ON(!rq->q); if (rq->q != q) { - if (q) { - __blk_run_queue(q, false); - spin_unlock(q->queue_lock); - } + /* + * This drops the queue lock + */ + if (q) + queue_unplugged(q, depth, from_schedule); q = rq->q; + depth = 0; spin_lock(q->queue_lock); } rq->cmd_flags &= ~REQ_ON_PLUG; @@ -2706,38 +2769,27 @@ static void flush_plug_list(struct blk_plug *plug) __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH); else __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE); - } - if (q) { - __blk_run_queue(q, false); - spin_unlock(q->queue_lock); + depth++; } - BUG_ON(!list_empty(&plug->list)); - local_irq_restore(flags); -} - -static void __blk_finish_plug(struct task_struct *tsk, struct blk_plug *plug) -{ - flush_plug_list(plug); + /* + * This drops the queue lock + */ + if (q) + queue_unplugged(q, depth, from_schedule); - if (plug == tsk->plug) - tsk->plug = NULL; + local_irq_restore(flags); } void blk_finish_plug(struct blk_plug *plug) { - if (plug) - __blk_finish_plug(current, plug); -} -EXPORT_SYMBOL(blk_finish_plug); + blk_flush_plug_list(plug, false); -void __blk_flush_plug(struct task_struct *tsk, struct blk_plug *plug) -{ - __blk_finish_plug(tsk, plug); - tsk->plug = plug; + if (plug == current->plug) + current->plug = NULL; } -EXPORT_SYMBOL(__blk_flush_plug); +EXPORT_SYMBOL(blk_finish_plug); int __init blk_dev_init(void) { diff --git a/trunk/block/blk-exec.c b/trunk/block/blk-exec.c index 7482b7fa863b..81e31819a597 100644 --- a/trunk/block/blk-exec.c +++ b/trunk/block/blk-exec.c @@ -55,7 +55,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, WARN_ON(irqs_disabled()); spin_lock_irq(q->queue_lock); __elv_add_request(q, rq, where); - __blk_run_queue(q, false); + __blk_run_queue(q); /* the queue is stopped so it won't be plugged+unplugged */ if (rq->cmd_type == REQ_TYPE_PM_RESUME) q->request_fn(q); diff --git a/trunk/block/blk-flush.c b/trunk/block/blk-flush.c index eba4a2790c6c..6c9b5e189e62 100644 --- a/trunk/block/blk-flush.c +++ b/trunk/block/blk-flush.c @@ -218,7 +218,7 @@ static void flush_end_io(struct request *flush_rq, int error) * request_fn may confuse the driver. Always use kblockd. */ if (queued) - __blk_run_queue(q, true); + blk_run_queue_async(q); } /** @@ -274,7 +274,7 @@ static void flush_data_end_io(struct request *rq, int error) * the comment in flush_end_io(). */ if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error)) - __blk_run_queue(q, true); + blk_run_queue_async(q); } /** diff --git a/trunk/block/blk-sysfs.c b/trunk/block/blk-sysfs.c index 261c75c665ae..bd236313f35d 100644 --- a/trunk/block/blk-sysfs.c +++ b/trunk/block/blk-sysfs.c @@ -66,14 +66,14 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count) if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { blk_set_queue_full(q, BLK_RW_SYNC); - } else if (rl->count[BLK_RW_SYNC]+1 <= q->nr_requests) { + } else { blk_clear_queue_full(q, BLK_RW_SYNC); wake_up(&rl->wait[BLK_RW_SYNC]); } if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { blk_set_queue_full(q, BLK_RW_ASYNC); - } else if (rl->count[BLK_RW_ASYNC]+1 <= q->nr_requests) { + } else { blk_clear_queue_full(q, BLK_RW_ASYNC); wake_up(&rl->wait[BLK_RW_ASYNC]); } @@ -498,7 +498,6 @@ int blk_register_queue(struct gendisk *disk) { int ret; struct device *dev = disk_to_dev(disk); - struct request_queue *q = disk->queue; if (WARN_ON(!q)) @@ -509,8 +508,10 @@ int blk_register_queue(struct gendisk *disk) return ret; ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); - if (ret < 0) + if (ret < 0) { + blk_trace_remove_sysfs(dev); return ret; + } kobject_uevent(&q->kobj, KOBJ_ADD); @@ -521,7 +522,7 @@ int blk_register_queue(struct gendisk *disk) if (ret) { kobject_uevent(&q->kobj, KOBJ_REMOVE); kobject_del(&q->kobj); - blk_trace_remove_sysfs(disk_to_dev(disk)); + blk_trace_remove_sysfs(dev); kobject_put(&dev->kobj); return ret; } diff --git a/trunk/block/blk-throttle.c b/trunk/block/blk-throttle.c index 0475a22a420d..252a81a306f7 100644 --- a/trunk/block/blk-throttle.c +++ b/trunk/block/blk-throttle.c @@ -160,9 +160,8 @@ static void throtl_put_tg(struct throtl_grp *tg) } static struct throtl_grp * throtl_find_alloc_tg(struct throtl_data *td, - struct cgroup *cgroup) + struct blkio_cgroup *blkcg) { - struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); struct throtl_grp *tg = NULL; void *key = td; struct backing_dev_info *bdi = &td->queue->backing_dev_info; @@ -229,12 +228,12 @@ static struct throtl_grp * throtl_find_alloc_tg(struct throtl_data *td, static struct throtl_grp * throtl_get_tg(struct throtl_data *td) { - struct cgroup *cgroup; struct throtl_grp *tg = NULL; + struct blkio_cgroup *blkcg; rcu_read_lock(); - cgroup = task_cgroup(current, blkio_subsys_id); - tg = throtl_find_alloc_tg(td, cgroup); + blkcg = task_blkio_cgroup(current); + tg = throtl_find_alloc_tg(td, blkcg); if (!tg) tg = &td->root_tg; rcu_read_unlock(); diff --git a/trunk/block/cfq-iosched.c b/trunk/block/cfq-iosched.c index 3be881ec95ad..ab7a9e6a9b1c 100644 --- a/trunk/block/cfq-iosched.c +++ b/trunk/block/cfq-iosched.c @@ -1014,10 +1014,9 @@ void cfq_update_blkio_group_weight(void *key, struct blkio_group *blkg, cfqg->needs_update = true; } -static struct cfq_group * -cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create) +static struct cfq_group * cfq_find_alloc_cfqg(struct cfq_data *cfqd, + struct blkio_cgroup *blkcg, int create) { - struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); struct cfq_group *cfqg = NULL; void *key = cfqd; int i, j; @@ -1079,12 +1078,12 @@ cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create) */ static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create) { - struct cgroup *cgroup; + struct blkio_cgroup *blkcg; struct cfq_group *cfqg = NULL; rcu_read_lock(); - cgroup = task_cgroup(current, blkio_subsys_id); - cfqg = cfq_find_alloc_cfqg(cfqd, cgroup, create); + blkcg = task_blkio_cgroup(current); + cfqg = cfq_find_alloc_cfqg(cfqd, blkcg, create); if (!cfqg && create) cfqg = &cfqd->root_group; rcu_read_unlock(); @@ -2582,28 +2581,20 @@ static void cfq_put_queue(struct cfq_queue *cfqq) } /* - * Must always be called with the rcu_read_lock() held + * Call func for each cic attached to this ioc. */ static void -__call_for_each_cic(struct io_context *ioc, - void (*func)(struct io_context *, struct cfq_io_context *)) +call_for_each_cic(struct io_context *ioc, + void (*func)(struct io_context *, struct cfq_io_context *)) { struct cfq_io_context *cic; struct hlist_node *n; + rcu_read_lock(); + hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list) func(ioc, cic); -} -/* - * Call func for each cic attached to this ioc. - */ -static void -call_for_each_cic(struct io_context *ioc, - void (*func)(struct io_context *, struct cfq_io_context *)) -{ - rcu_read_lock(); - __call_for_each_cic(ioc, func); rcu_read_unlock(); } @@ -2664,7 +2655,7 @@ static void cfq_free_io_context(struct io_context *ioc) * should be ok to iterate over the known list, we will see all cic's * since no new ones are added. */ - __call_for_each_cic(ioc, cic_free_func); + call_for_each_cic(ioc, cic_free_func); } static void cfq_put_cooperator(struct cfq_queue *cfqq) @@ -3368,7 +3359,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, cfqd->busy_queues > 1) { cfq_del_timer(cfqd, cfqq); cfq_clear_cfqq_wait_request(cfqq); - __blk_run_queue(cfqd->queue, false); + __blk_run_queue(cfqd->queue); } else { cfq_blkiocg_update_idle_time_stats( &cfqq->cfqg->blkg); @@ -3383,7 +3374,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, * this new queue is RT and the current one is BE */ cfq_preempt_queue(cfqd, cfqq); - __blk_run_queue(cfqd->queue, false); + __blk_run_queue(cfqd->queue); } } @@ -3743,7 +3734,7 @@ static void cfq_kick_queue(struct work_struct *work) struct request_queue *q = cfqd->queue; spin_lock_irq(q->queue_lock); - __blk_run_queue(cfqd->queue, false); + __blk_run_queue(cfqd->queue); spin_unlock_irq(q->queue_lock); } diff --git a/trunk/block/elevator.c b/trunk/block/elevator.c index 0cdb4e7ebab4..45ca1e34f582 100644 --- a/trunk/block/elevator.c +++ b/trunk/block/elevator.c @@ -642,7 +642,7 @@ void elv_quiesce_start(struct request_queue *q) */ elv_drain_elevator(q); while (q->rq.elvpriv) { - __blk_run_queue(q, false); + __blk_run_queue(q); spin_unlock_irq(q->queue_lock); msleep(10); spin_lock_irq(q->queue_lock); @@ -671,7 +671,8 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where) q->boundary_rq = rq; } } else if (!(rq->cmd_flags & REQ_ELVPRIV) && - where == ELEVATOR_INSERT_SORT) + (where == ELEVATOR_INSERT_SORT || + where == ELEVATOR_INSERT_SORT_MERGE)) where = ELEVATOR_INSERT_BACK; switch (where) { @@ -695,7 +696,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where) * with anything. There's no point in delaying queue * processing. */ - __blk_run_queue(q, false); + __blk_run_queue(q); break; case ELEVATOR_INSERT_SORT_MERGE: diff --git a/trunk/block/genhd.c b/trunk/block/genhd.c index b364bd038a18..2dd988723d73 100644 --- a/trunk/block/genhd.c +++ b/trunk/block/genhd.c @@ -1588,9 +1588,13 @@ static void disk_events_workfn(struct work_struct *work) spin_unlock_irq(&ev->lock); - /* tell userland about new events */ + /* + * Tell userland about new events. Only the events listed in + * @disk->events are reported. Unlisted events are processed the + * same internally but never get reported to userland. + */ for (i = 0; i < ARRAY_SIZE(disk_uevents); i++) - if (events & (1 << i)) + if (events & disk->events & (1 << i)) envp[nr_events++] = disk_uevents[i]; if (nr_events) diff --git a/trunk/drivers/Kconfig b/trunk/drivers/Kconfig index aca706751469..61631edfecc2 100644 --- a/trunk/drivers/Kconfig +++ b/trunk/drivers/Kconfig @@ -121,4 +121,7 @@ source "drivers/platform/Kconfig" source "drivers/clk/Kconfig" source "drivers/hwspinlock/Kconfig" + +source "drivers/clocksource/Kconfig" + endmenu diff --git a/trunk/drivers/acpi/apei/Kconfig b/trunk/drivers/acpi/apei/Kconfig index 66a03caa2ad9..f739a70b1c70 100644 --- a/trunk/drivers/acpi/apei/Kconfig +++ b/trunk/drivers/acpi/apei/Kconfig @@ -1,5 +1,6 @@ config ACPI_APEI bool "ACPI Platform Error Interface (APEI)" + select MISC_FILESYSTEMS select PSTORE depends on X86 help diff --git a/trunk/drivers/acpi/apei/erst.c b/trunk/drivers/acpi/apei/erst.c index d6cb0ff6988e..e6cef8e1b534 100644 --- a/trunk/drivers/acpi/apei/erst.c +++ b/trunk/drivers/acpi/apei/erst.c @@ -929,13 +929,17 @@ static int erst_check_table(struct acpi_table_erst *erst_tab) return 0; } -static size_t erst_reader(u64 *id, enum pstore_type_id *type, +static int erst_open_pstore(struct pstore_info *psi); +static int erst_close_pstore(struct pstore_info *psi); +static ssize_t erst_reader(u64 *id, enum pstore_type_id *type, struct timespec *time); static u64 erst_writer(enum pstore_type_id type, size_t size); static struct pstore_info erst_info = { .owner = THIS_MODULE, .name = "erst", + .open = erst_open_pstore, + .close = erst_close_pstore, .read = erst_reader, .write = erst_writer, .erase = erst_clear @@ -957,12 +961,32 @@ struct cper_pstore_record { char data[]; } __packed; -static size_t erst_reader(u64 *id, enum pstore_type_id *type, +static int reader_pos; + +static int erst_open_pstore(struct pstore_info *psi) +{ + int rc; + + if (erst_disable) + return -ENODEV; + + rc = erst_get_record_id_begin(&reader_pos); + + return rc; +} + +static int erst_close_pstore(struct pstore_info *psi) +{ + erst_get_record_id_end(); + + return 0; +} + +static ssize_t erst_reader(u64 *id, enum pstore_type_id *type, struct timespec *time) { int rc; - ssize_t len; - unsigned long flags; + ssize_t len = 0; u64 record_id; struct cper_pstore_record *rcd = (struct cper_pstore_record *) (erst_info.buf - sizeof(*rcd)); @@ -970,24 +994,28 @@ static size_t erst_reader(u64 *id, enum pstore_type_id *type, if (erst_disable) return -ENODEV; - raw_spin_lock_irqsave(&erst_lock, flags); skip: - rc = __erst_get_next_record_id(&record_id); - if (rc) { - raw_spin_unlock_irqrestore(&erst_lock, flags); - return rc; - } + rc = erst_get_record_id_next(&reader_pos, &record_id); + if (rc) + goto out; + /* no more record */ if (record_id == APEI_ERST_INVALID_RECORD_ID) { - raw_spin_unlock_irqrestore(&erst_lock, flags); - return 0; + rc = -1; + goto out; } - len = __erst_read(record_id, &rcd->hdr, sizeof(*rcd) + - erst_erange.size); + len = erst_read(record_id, &rcd->hdr, sizeof(*rcd) + + erst_info.bufsize); + /* The record may be cleared by others, try read next record */ + if (len == -ENOENT) + goto skip; + else if (len < 0) { + rc = -1; + goto out; + } if (uuid_le_cmp(rcd->hdr.creator_id, CPER_CREATOR_PSTORE) != 0) goto skip; - raw_spin_unlock_irqrestore(&erst_lock, flags); *id = record_id; if (uuid_le_cmp(rcd->sec_hdr.section_type, @@ -1005,7 +1033,8 @@ static size_t erst_reader(u64 *id, enum pstore_type_id *type, time->tv_sec = 0; time->tv_nsec = 0; - return len - sizeof(*rcd); +out: + return (rc < 0) ? rc : (len - sizeof(*rcd)); } static u64 erst_writer(enum pstore_type_id type, size_t size) diff --git a/trunk/drivers/acpi/processor_perflib.c b/trunk/drivers/acpi/processor_perflib.c index 3a73a93596e8..85b32376dad7 100644 --- a/trunk/drivers/acpi/processor_perflib.c +++ b/trunk/drivers/acpi/processor_perflib.c @@ -49,10 +49,6 @@ ACPI_MODULE_NAME("processor_perflib"); static DEFINE_MUTEX(performance_mutex); -/* Use cpufreq debug layer for _PPC changes. */ -#define cpufreq_printk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, \ - "cpufreq-core", msg) - /* * _PPC support is implemented as a CPUfreq policy notifier: * This means each time a CPUfreq driver registered also with @@ -145,7 +141,7 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr) return -ENODEV; } - cpufreq_printk("CPU %d: _PPC is %d - frequency %s limited\n", pr->id, + pr_debug("CPU %d: _PPC is %d - frequency %s limited\n", pr->id, (int)ppc, ppc ? "" : "not"); pr->performance_platform_limit = (int)ppc; diff --git a/trunk/drivers/acpi/processor_throttling.c b/trunk/drivers/acpi/processor_throttling.c index ad3501739563..605a2954ef17 100644 --- a/trunk/drivers/acpi/processor_throttling.c +++ b/trunk/drivers/acpi/processor_throttling.c @@ -710,20 +710,14 @@ static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr) } #ifdef CONFIG_X86 -static int acpi_throttling_rdmsr(struct acpi_processor *pr, - u64 *value) +static int acpi_throttling_rdmsr(u64 *value) { - struct cpuinfo_x86 *c; u64 msr_high, msr_low; - unsigned int cpu; u64 msr = 0; int ret = -1; - cpu = pr->id; - c = &cpu_data(cpu); - - if ((c->x86_vendor != X86_VENDOR_INTEL) || - !cpu_has(c, X86_FEATURE_ACPI)) { + if ((this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_INTEL) || + !this_cpu_has(X86_FEATURE_ACPI)) { printk(KERN_ERR PREFIX "HARDWARE addr space,NOT supported yet\n"); } else { @@ -738,18 +732,13 @@ static int acpi_throttling_rdmsr(struct acpi_processor *pr, return ret; } -static int acpi_throttling_wrmsr(struct acpi_processor *pr, u64 value) +static int acpi_throttling_wrmsr(u64 value) { - struct cpuinfo_x86 *c; - unsigned int cpu; int ret = -1; u64 msr; - cpu = pr->id; - c = &cpu_data(cpu); - - if ((c->x86_vendor != X86_VENDOR_INTEL) || - !cpu_has(c, X86_FEATURE_ACPI)) { + if ((this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_INTEL) || + !this_cpu_has(X86_FEATURE_ACPI)) { printk(KERN_ERR PREFIX "HARDWARE addr space,NOT supported yet\n"); } else { @@ -761,15 +750,14 @@ static int acpi_throttling_wrmsr(struct acpi_processor *pr, u64 value) return ret; } #else -static int acpi_throttling_rdmsr(struct acpi_processor *pr, - u64 *value) +static int acpi_throttling_rdmsr(u64 *value) { printk(KERN_ERR PREFIX "HARDWARE addr space,NOT supported yet\n"); return -1; } -static int acpi_throttling_wrmsr(struct acpi_processor *pr, u64 value) +static int acpi_throttling_wrmsr(u64 value) { printk(KERN_ERR PREFIX "HARDWARE addr space,NOT supported yet\n"); @@ -801,7 +789,7 @@ static int acpi_read_throttling_status(struct acpi_processor *pr, ret = 0; break; case ACPI_ADR_SPACE_FIXED_HARDWARE: - ret = acpi_throttling_rdmsr(pr, value); + ret = acpi_throttling_rdmsr(value); break; default: printk(KERN_ERR PREFIX "Unknown addr space %d\n", @@ -834,7 +822,7 @@ static int acpi_write_throttling_state(struct acpi_processor *pr, ret = 0; break; case ACPI_ADR_SPACE_FIXED_HARDWARE: - ret = acpi_throttling_wrmsr(pr, value); + ret = acpi_throttling_wrmsr(value); break; default: printk(KERN_ERR PREFIX "Unknown addr space %d\n", diff --git a/trunk/drivers/acpi/scan.c b/trunk/drivers/acpi/scan.c index b136c9c1e531..449c556274c0 100644 --- a/trunk/drivers/acpi/scan.c +++ b/trunk/drivers/acpi/scan.c @@ -943,6 +943,10 @@ static int acpi_bus_get_flags(struct acpi_device *device) if (ACPI_SUCCESS(status)) device->flags.lockable = 1; + /* Power resources cannot be power manageable. */ + if (device->device_type == ACPI_BUS_TYPE_POWER) + return 0; + /* Presence of _PS0|_PR0 indicates 'power manageable' */ status = acpi_get_handle(device->handle, "_PS0", &temp); if (ACPI_FAILURE(status)) diff --git a/trunk/drivers/amba/bus.c b/trunk/drivers/amba/bus.c index 821040503154..7025593a58c8 100644 --- a/trunk/drivers/amba/bus.c +++ b/trunk/drivers/amba/bus.c @@ -214,7 +214,7 @@ static int amba_pm_resume_noirq(struct device *dev) #endif /* !CONFIG_SUSPEND */ -#ifdef CONFIG_HIBERNATION +#ifdef CONFIG_HIBERNATE_CALLBACKS static int amba_pm_freeze(struct device *dev) { @@ -352,7 +352,7 @@ static int amba_pm_restore_noirq(struct device *dev) return ret; } -#else /* !CONFIG_HIBERNATION */ +#else /* !CONFIG_HIBERNATE_CALLBACKS */ #define amba_pm_freeze NULL #define amba_pm_thaw NULL @@ -363,7 +363,7 @@ static int amba_pm_restore_noirq(struct device *dev) #define amba_pm_poweroff_noirq NULL #define amba_pm_restore_noirq NULL -#endif /* !CONFIG_HIBERNATION */ +#endif /* !CONFIG_HIBERNATE_CALLBACKS */ #ifdef CONFIG_PM diff --git a/trunk/drivers/ata/ahci.c b/trunk/drivers/ata/ahci.c index 39d829cd82dd..71afe0371311 100644 --- a/trunk/drivers/ata/ahci.c +++ b/trunk/drivers/ata/ahci.c @@ -150,7 +150,7 @@ static const struct ata_port_info ahci_port_info[] = { { AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP | AHCI_HFLAG_YES_NCQ), - .flags = AHCI_FLAG_COMMON, + .flags = AHCI_FLAG_COMMON | ATA_FLAG_NO_DIPM, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &ahci_ops, @@ -261,6 +261,12 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */ { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* PBG RAID */ { PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */ + { PCI_VDEVICE(INTEL, 0x1e02), board_ahci }, /* Panther Point AHCI */ + { PCI_VDEVICE(INTEL, 0x1e03), board_ahci }, /* Panther Point AHCI */ + { PCI_VDEVICE(INTEL, 0x1e04), board_ahci }, /* Panther Point RAID */ + { PCI_VDEVICE(INTEL, 0x1e05), board_ahci }, /* Panther Point RAID */ + { PCI_VDEVICE(INTEL, 0x1e06), board_ahci }, /* Panther Point RAID */ + { PCI_VDEVICE(INTEL, 0x1e07), board_ahci }, /* Panther Point RAID */ /* JMicron 360/1/3/5/6, match class to avoid IDE function */ { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, diff --git a/trunk/drivers/ata/ahci.h b/trunk/drivers/ata/ahci.h index 39865009c251..12c5282e7fca 100644 --- a/trunk/drivers/ata/ahci.h +++ b/trunk/drivers/ata/ahci.h @@ -229,6 +229,10 @@ enum { EM_CTL_ALHD = (1 << 26), /* Activity LED */ EM_CTL_XMT = (1 << 25), /* Transmit Only */ EM_CTL_SMB = (1 << 24), /* Single Message Buffer */ + EM_CTL_SGPIO = (1 << 19), /* SGPIO messages supported */ + EM_CTL_SES = (1 << 18), /* SES-2 messages supported */ + EM_CTL_SAFTE = (1 << 17), /* SAF-TE messages supported */ + EM_CTL_LED = (1 << 16), /* LED messages supported */ /* em message type */ EM_MSG_TYPE_LED = (1 << 0), /* LED */ diff --git a/trunk/drivers/ata/ata_piix.c b/trunk/drivers/ata/ata_piix.c index 0bc3fd6c3fdb..6f6e7718b05c 100644 --- a/trunk/drivers/ata/ata_piix.c +++ b/trunk/drivers/ata/ata_piix.c @@ -309,6 +309,14 @@ static const struct pci_device_id piix_pci_tbl[] = { { 0x8086, 0x1d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, /* SATA Controller IDE (PBG) */ { 0x8086, 0x1d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, + /* SATA Controller IDE (Panther Point) */ + { 0x8086, 0x1e00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, + /* SATA Controller IDE (Panther Point) */ + { 0x8086, 0x1e01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, + /* SATA Controller IDE (Panther Point) */ + { 0x8086, 0x1e08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, + /* SATA Controller IDE (Panther Point) */ + { 0x8086, 0x1e09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, { } /* terminate list */ }; diff --git a/trunk/drivers/ata/libahci.c b/trunk/drivers/ata/libahci.c index 26d452339e98..d38c40fe4ddb 100644 --- a/trunk/drivers/ata/libahci.c +++ b/trunk/drivers/ata/libahci.c @@ -109,6 +109,8 @@ static ssize_t ahci_read_em_buffer(struct device *dev, static ssize_t ahci_store_em_buffer(struct device *dev, struct device_attribute *attr, const char *buf, size_t size); +static ssize_t ahci_show_em_supported(struct device *dev, + struct device_attribute *attr, char *buf); static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL); static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL); @@ -116,6 +118,7 @@ static DEVICE_ATTR(ahci_host_version, S_IRUGO, ahci_show_host_version, NULL); static DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL); static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO, ahci_read_em_buffer, ahci_store_em_buffer); +static DEVICE_ATTR(em_message_supported, S_IRUGO, ahci_show_em_supported, NULL); struct device_attribute *ahci_shost_attrs[] = { &dev_attr_link_power_management_policy, @@ -126,6 +129,7 @@ struct device_attribute *ahci_shost_attrs[] = { &dev_attr_ahci_host_version, &dev_attr_ahci_port_cmd, &dev_attr_em_buffer, + &dev_attr_em_message_supported, NULL }; EXPORT_SYMBOL_GPL(ahci_shost_attrs); @@ -343,6 +347,24 @@ static ssize_t ahci_store_em_buffer(struct device *dev, return size; } +static ssize_t ahci_show_em_supported(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ata_port *ap = ata_shost_to_port(shost); + struct ahci_host_priv *hpriv = ap->host->private_data; + void __iomem *mmio = hpriv->mmio; + u32 em_ctl; + + em_ctl = readl(mmio + HOST_EM_CTL); + + return sprintf(buf, "%s%s%s%s\n", + em_ctl & EM_CTL_LED ? "led " : "", + em_ctl & EM_CTL_SAFTE ? "saf-te " : "", + em_ctl & EM_CTL_SES ? "ses-2 " : "", + em_ctl & EM_CTL_SGPIO ? "sgpio " : ""); +} + /** * ahci_save_initial_config - Save and fixup initial config values * @dev: target AHCI device @@ -1897,7 +1919,17 @@ static void ahci_pmp_attach(struct ata_port *ap) ahci_enable_fbs(ap); pp->intr_mask |= PORT_IRQ_BAD_PMP; - writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); + + /* + * We must not change the port interrupt mask register if the + * port is marked frozen, the value in pp->intr_mask will be + * restored later when the port is thawed. + * + * Note that during initialization, the port is marked as + * frozen since the irq handler is not yet registered. + */ + if (!(ap->pflags & ATA_PFLAG_FROZEN)) + writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); } static void ahci_pmp_detach(struct ata_port *ap) @@ -1913,7 +1945,10 @@ static void ahci_pmp_detach(struct ata_port *ap) writel(cmd, port_mmio + PORT_CMD); pp->intr_mask &= ~PORT_IRQ_BAD_PMP; - writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); + + /* see comment above in ahci_pmp_attach() */ + if (!(ap->pflags & ATA_PFLAG_FROZEN)) + writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); } int ahci_port_resume(struct ata_port *ap) diff --git a/trunk/drivers/ata/libata-core.c b/trunk/drivers/ata/libata-core.c index 423c0a6952b2..76c3c15cb1e6 100644 --- a/trunk/drivers/ata/libata-core.c +++ b/trunk/drivers/ata/libata-core.c @@ -4139,6 +4139,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { */ { "PIONEER DVD-RW DVRTD08", "1.00", ATA_HORKAGE_NOSETXFER }, { "PIONEER DVD-RW DVR-212D", "1.28", ATA_HORKAGE_NOSETXFER }, + { "PIONEER DVD-RW DVR-216D", "1.08", ATA_HORKAGE_NOSETXFER }, /* End Marker */ { } @@ -5480,7 +5481,7 @@ struct ata_port *ata_port_alloc(struct ata_host *host) if (!ap) return NULL; - ap->pflags |= ATA_PFLAG_INITIALIZING; + ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN; ap->lock = &host->lock; ap->print_id = -1; ap->host = host; diff --git a/trunk/drivers/ata/libata-eh.c b/trunk/drivers/ata/libata-eh.c index 88cd22fa65cd..dad9fd660f37 100644 --- a/trunk/drivers/ata/libata-eh.c +++ b/trunk/drivers/ata/libata-eh.c @@ -3316,6 +3316,7 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, struct ata_eh_context *ehc = &link->eh_context; struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL; enum ata_lpm_policy old_policy = link->lpm_policy; + bool no_dipm = link->ap->flags & ATA_FLAG_NO_DIPM; unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM; unsigned int err_mask; int rc; @@ -3332,7 +3333,7 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, */ ata_for_each_dev(dev, link, ENABLED) { bool hipm = ata_id_has_hipm(dev->id); - bool dipm = ata_id_has_dipm(dev->id); + bool dipm = ata_id_has_dipm(dev->id) && !no_dipm; /* find the first enabled and LPM enabled devices */ if (!link_dev) @@ -3389,7 +3390,8 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, /* host config updated, enable DIPM if transitioning to MIN_POWER */ ata_for_each_dev(dev, link, ENABLED) { - if (policy == ATA_LPM_MIN_POWER && ata_id_has_dipm(dev->id)) { + if (policy == ATA_LPM_MIN_POWER && !no_dipm && + ata_id_has_dipm(dev->id)) { err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE, SATA_DIPM); if (err_mask && err_mask != AC_ERR_DEV) { diff --git a/trunk/drivers/ata/pata_at91.c b/trunk/drivers/ata/pata_at91.c index 0da0dcc7dd08..a5fdbdcb0faf 100644 --- a/trunk/drivers/ata/pata_at91.c +++ b/trunk/drivers/ata/pata_at91.c @@ -33,11 +33,12 @@ #define DRV_NAME "pata_at91" -#define DRV_VERSION "0.1" +#define DRV_VERSION "0.2" #define CF_IDE_OFFSET 0x00c00000 #define CF_ALT_IDE_OFFSET 0x00e00000 #define CF_IDE_RES_SIZE 0x08 +#define NCS_RD_PULSE_LIMIT 0x3f /* maximal value for pulse bitfields */ struct at91_ide_info { unsigned long mode; @@ -49,8 +50,18 @@ struct at91_ide_info { void __iomem *alt_addr; }; -static const struct ata_timing initial_timing = - {XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0}; +static const struct ata_timing initial_timing = { + .mode = XFER_PIO_0, + .setup = 70, + .act8b = 290, + .rec8b = 240, + .cyc8b = 600, + .active = 165, + .recover = 150, + .dmack_hold = 0, + .cycle = 600, + .udma = 0 +}; static unsigned long calc_mck_cycles(unsigned long ns, unsigned long mck_hz) { @@ -109,6 +120,11 @@ static void set_smc_timing(struct device *dev, /* (CS0, CS1, DIR, OE) <= (CFCE1, CFCE2, CFRNW, NCSX) timings */ ncs_read_setup = 1; ncs_read_pulse = read_cycle - 2; + if (ncs_read_pulse > NCS_RD_PULSE_LIMIT) { + ncs_read_pulse = NCS_RD_PULSE_LIMIT; + dev_warn(dev, "ncs_read_pulse limited to maximal value %lu\n", + ncs_read_pulse); + } /* Write timings same as read timings */ write_cycle = read_cycle; diff --git a/trunk/drivers/atm/fore200e.c b/trunk/drivers/atm/fore200e.c index bdd2719f3f68..bc9e702186dd 100644 --- a/trunk/drivers/atm/fore200e.c +++ b/trunk/drivers/atm/fore200e.c @@ -2643,16 +2643,19 @@ fore200e_init(struct fore200e* fore200e, struct device *parent) } #ifdef CONFIG_SBUS +static const struct of_device_id fore200e_sba_match[]; static int __devinit fore200e_sba_probe(struct platform_device *op) { + const struct of_device_id *match; const struct fore200e_bus *bus; struct fore200e *fore200e; static int index = 0; int err; - if (!op->dev.of_match) + match = of_match_device(fore200e_sba_match, &op->dev); + if (!match) return -EINVAL; - bus = op->dev.of_match->data; + bus = match->data; fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL); if (!fore200e) diff --git a/trunk/drivers/base/Kconfig b/trunk/drivers/base/Kconfig index e9e5238f3106..d57e8d0fb823 100644 --- a/trunk/drivers/base/Kconfig +++ b/trunk/drivers/base/Kconfig @@ -168,11 +168,4 @@ config SYS_HYPERVISOR bool default n -config ARCH_NO_SYSDEV_OPS - bool - ---help--- - To be selected by architectures that don't use sysdev class or - sysdev driver power management (suspend/resume) and shutdown - operations. - endmenu diff --git a/trunk/drivers/base/base.h b/trunk/drivers/base/base.h index 19f49e41ce5d..a34dca0ad041 100644 --- a/trunk/drivers/base/base.h +++ b/trunk/drivers/base/base.h @@ -111,8 +111,6 @@ static inline int driver_match_device(struct device_driver *drv, return drv->bus->match ? drv->bus->match(dev, drv) : 1; } -extern void sysdev_shutdown(void); - extern char *make_class_name(const char *name, struct kobject *kobj); extern int devres_release_all(struct device *dev); diff --git a/trunk/drivers/base/core.c b/trunk/drivers/base/core.c index 81b78ede37c4..bc8729d603a7 100644 --- a/trunk/drivers/base/core.c +++ b/trunk/drivers/base/core.c @@ -400,7 +400,7 @@ static void device_remove_groups(struct device *dev, static int device_add_attrs(struct device *dev) { struct class *class = dev->class; - struct device_type *type = dev->type; + const struct device_type *type = dev->type; int error; if (class) { @@ -440,7 +440,7 @@ static int device_add_attrs(struct device *dev) static void device_remove_attrs(struct device *dev) { struct class *class = dev->class; - struct device_type *type = dev->type; + const struct device_type *type = dev->type; device_remove_groups(dev, dev->groups); @@ -1314,8 +1314,7 @@ EXPORT_SYMBOL_GPL(put_device); EXPORT_SYMBOL_GPL(device_create_file); EXPORT_SYMBOL_GPL(device_remove_file); -struct root_device -{ +struct root_device { struct device dev; struct module *owner; }; diff --git a/trunk/drivers/base/dd.c b/trunk/drivers/base/dd.c index da57ee9d63fe..6658da743c3a 100644 --- a/trunk/drivers/base/dd.c +++ b/trunk/drivers/base/dd.c @@ -245,6 +245,10 @@ int device_attach(struct device *dev) device_lock(dev); if (dev->driver) { + if (klist_node_attached(&dev->p->knode_driver)) { + ret = 1; + goto out_unlock; + } ret = device_bind_driver(dev); if (ret == 0) ret = 1; @@ -257,6 +261,7 @@ int device_attach(struct device *dev) ret = bus_for_each_drv(dev->bus, NULL, dev, __device_attach); pm_runtime_put_sync(dev); } +out_unlock: device_unlock(dev); return ret; } @@ -316,8 +321,7 @@ static void __device_release_driver(struct device *dev) drv = dev->driver; if (drv) { - pm_runtime_get_noresume(dev); - pm_runtime_barrier(dev); + pm_runtime_get_sync(dev); driver_sysfs_remove(dev); @@ -326,6 +330,8 @@ static void __device_release_driver(struct device *dev) BUS_NOTIFY_UNBIND_DRIVER, dev); + pm_runtime_put_sync(dev); + if (dev->bus && dev->bus->remove) dev->bus->remove(dev); else if (drv->remove) @@ -338,7 +344,6 @@ static void __device_release_driver(struct device *dev) BUS_NOTIFY_UNBOUND_DRIVER, dev); - pm_runtime_put_sync(dev); } } @@ -408,17 +413,16 @@ void *dev_get_drvdata(const struct device *dev) } EXPORT_SYMBOL(dev_get_drvdata); -void dev_set_drvdata(struct device *dev, void *data) +int dev_set_drvdata(struct device *dev, void *data) { int error; - if (!dev) - return; if (!dev->p) { error = device_private_init(dev); if (error) - return; + return error; } dev->p->driver_data = data; + return 0; } EXPORT_SYMBOL(dev_set_drvdata); diff --git a/trunk/drivers/base/firmware_class.c b/trunk/drivers/base/firmware_class.c index 8c798ef7f13f..bbb03e6f7255 100644 --- a/trunk/drivers/base/firmware_class.c +++ b/trunk/drivers/base/firmware_class.c @@ -521,6 +521,11 @@ static int _request_firmware(const struct firmware **firmware_p, if (!firmware_p) return -EINVAL; + if (WARN_ON(usermodehelper_is_disabled())) { + dev_err(device, "firmware: %s will not be loaded\n", name); + return -EBUSY; + } + *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL); if (!firmware) { dev_err(device, "%s: kmalloc(struct firmware) failed\n", diff --git a/trunk/drivers/base/memory.c b/trunk/drivers/base/memory.c index 3da6a43b7756..0a134a424a37 100644 --- a/trunk/drivers/base/memory.c +++ b/trunk/drivers/base/memory.c @@ -48,7 +48,8 @@ static const char *memory_uevent_name(struct kset *kset, struct kobject *kobj) return MEMORY_CLASS_NAME; } -static int memory_uevent(struct kset *kset, struct kobject *obj, struct kobj_uevent_env *env) +static int memory_uevent(struct kset *kset, struct kobject *obj, + struct kobj_uevent_env *env) { int retval = 0; @@ -228,10 +229,11 @@ int memory_isolate_notify(unsigned long val, void *v) * OK to have direct references to sparsemem variables in here. */ static int -memory_section_action(unsigned long phys_index, unsigned long action) +memory_block_action(unsigned long phys_index, unsigned long action) { int i; unsigned long start_pfn, start_paddr; + unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; struct page *first_page; int ret; @@ -243,7 +245,7 @@ memory_section_action(unsigned long phys_index, unsigned long action) * that way. */ if (action == MEM_ONLINE) { - for (i = 0; i < PAGES_PER_SECTION; i++) { + for (i = 0; i < nr_pages; i++) { if (PageReserved(first_page+i)) continue; @@ -257,12 +259,12 @@ memory_section_action(unsigned long phys_index, unsigned long action) switch (action) { case MEM_ONLINE: start_pfn = page_to_pfn(first_page); - ret = online_pages(start_pfn, PAGES_PER_SECTION); + ret = online_pages(start_pfn, nr_pages); break; case MEM_OFFLINE: start_paddr = page_to_pfn(first_page) << PAGE_SHIFT; ret = remove_memory(start_paddr, - PAGES_PER_SECTION << PAGE_SHIFT); + nr_pages << PAGE_SHIFT); break; default: WARN(1, KERN_WARNING "%s(%ld, %ld) unknown action: " @@ -276,7 +278,7 @@ memory_section_action(unsigned long phys_index, unsigned long action) static int memory_block_change_state(struct memory_block *mem, unsigned long to_state, unsigned long from_state_req) { - int i, ret = 0; + int ret = 0; mutex_lock(&mem->state_mutex); @@ -288,20 +290,11 @@ static int memory_block_change_state(struct memory_block *mem, if (to_state == MEM_OFFLINE) mem->state = MEM_GOING_OFFLINE; - for (i = 0; i < sections_per_block; i++) { - ret = memory_section_action(mem->start_section_nr + i, - to_state); - if (ret) - break; - } - - if (ret) { - for (i = 0; i < sections_per_block; i++) - memory_section_action(mem->start_section_nr + i, - from_state_req); + ret = memory_block_action(mem->start_section_nr, to_state); + if (ret) mem->state = from_state_req; - } else + else mem->state = to_state; out: diff --git a/trunk/drivers/base/platform.c b/trunk/drivers/base/platform.c index f051cfff18af..1c291af637b3 100644 --- a/trunk/drivers/base/platform.c +++ b/trunk/drivers/base/platform.c @@ -149,6 +149,7 @@ static void platform_device_release(struct device *dev) of_device_node_put(&pa->pdev.dev); kfree(pa->pdev.dev.platform_data); + kfree(pa->pdev.mfd_cell); kfree(pa->pdev.resource); kfree(pa); } @@ -191,18 +192,18 @@ EXPORT_SYMBOL_GPL(platform_device_alloc); int platform_device_add_resources(struct platform_device *pdev, const struct resource *res, unsigned int num) { - struct resource *r; + struct resource *r = NULL; - if (!res) - return 0; - - r = kmemdup(res, sizeof(struct resource) * num, GFP_KERNEL); - if (r) { - pdev->resource = r; - pdev->num_resources = num; - return 0; + if (res) { + r = kmemdup(res, sizeof(struct resource) * num, GFP_KERNEL); + if (!r) + return -ENOMEM; } - return -ENOMEM; + + kfree(pdev->resource); + pdev->resource = r; + pdev->num_resources = num; + return 0; } EXPORT_SYMBOL_GPL(platform_device_add_resources); @@ -219,17 +220,17 @@ EXPORT_SYMBOL_GPL(platform_device_add_resources); int platform_device_add_data(struct platform_device *pdev, const void *data, size_t size) { - void *d; + void *d = NULL; - if (!data) - return 0; - - d = kmemdup(data, size, GFP_KERNEL); - if (d) { - pdev->dev.platform_data = d; - return 0; + if (data) { + d = kmemdup(data, size, GFP_KERNEL); + if (!d) + return -ENOMEM; } - return -ENOMEM; + + kfree(pdev->dev.platform_data); + pdev->dev.platform_data = d; + return 0; } EXPORT_SYMBOL_GPL(platform_device_add_data); @@ -666,7 +667,7 @@ static int platform_legacy_resume(struct device *dev) return ret; } -static int platform_pm_prepare(struct device *dev) +int platform_pm_prepare(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; @@ -677,7 +678,7 @@ static int platform_pm_prepare(struct device *dev) return ret; } -static void platform_pm_complete(struct device *dev) +void platform_pm_complete(struct device *dev) { struct device_driver *drv = dev->driver; @@ -685,16 +686,11 @@ static void platform_pm_complete(struct device *dev) drv->pm->complete(dev); } -#else /* !CONFIG_PM_SLEEP */ - -#define platform_pm_prepare NULL -#define platform_pm_complete NULL - -#endif /* !CONFIG_PM_SLEEP */ +#endif /* CONFIG_PM_SLEEP */ #ifdef CONFIG_SUSPEND -int __weak platform_pm_suspend(struct device *dev) +int platform_pm_suspend(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; @@ -712,7 +708,7 @@ int __weak platform_pm_suspend(struct device *dev) return ret; } -int __weak platform_pm_suspend_noirq(struct device *dev) +int platform_pm_suspend_noirq(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; @@ -728,7 +724,7 @@ int __weak platform_pm_suspend_noirq(struct device *dev) return ret; } -int __weak platform_pm_resume(struct device *dev) +int platform_pm_resume(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; @@ -746,7 +742,7 @@ int __weak platform_pm_resume(struct device *dev) return ret; } -int __weak platform_pm_resume_noirq(struct device *dev) +int platform_pm_resume_noirq(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; @@ -762,18 +758,11 @@ int __weak platform_pm_resume_noirq(struct device *dev) return ret; } -#else /* !CONFIG_SUSPEND */ - -#define platform_pm_suspend NULL -#define platform_pm_resume NULL -#define platform_pm_suspend_noirq NULL -#define platform_pm_resume_noirq NULL - -#endif /* !CONFIG_SUSPEND */ +#endif /* CONFIG_SUSPEND */ -#ifdef CONFIG_HIBERNATION +#ifdef CONFIG_HIBERNATE_CALLBACKS -static int platform_pm_freeze(struct device *dev) +int platform_pm_freeze(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; @@ -791,7 +780,7 @@ static int platform_pm_freeze(struct device *dev) return ret; } -static int platform_pm_freeze_noirq(struct device *dev) +int platform_pm_freeze_noirq(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; @@ -807,7 +796,7 @@ static int platform_pm_freeze_noirq(struct device *dev) return ret; } -static int platform_pm_thaw(struct device *dev) +int platform_pm_thaw(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; @@ -825,7 +814,7 @@ static int platform_pm_thaw(struct device *dev) return ret; } -static int platform_pm_thaw_noirq(struct device *dev) +int platform_pm_thaw_noirq(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; @@ -841,7 +830,7 @@ static int platform_pm_thaw_noirq(struct device *dev) return ret; } -static int platform_pm_poweroff(struct device *dev) +int platform_pm_poweroff(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; @@ -859,7 +848,7 @@ static int platform_pm_poweroff(struct device *dev) return ret; } -static int platform_pm_poweroff_noirq(struct device *dev) +int platform_pm_poweroff_noirq(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; @@ -875,7 +864,7 @@ static int platform_pm_poweroff_noirq(struct device *dev) return ret; } -static int platform_pm_restore(struct device *dev) +int platform_pm_restore(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; @@ -893,7 +882,7 @@ static int platform_pm_restore(struct device *dev) return ret; } -static int platform_pm_restore_noirq(struct device *dev) +int platform_pm_restore_noirq(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; @@ -909,62 +898,13 @@ static int platform_pm_restore_noirq(struct device *dev) return ret; } -#else /* !CONFIG_HIBERNATION */ - -#define platform_pm_freeze NULL -#define platform_pm_thaw NULL -#define platform_pm_poweroff NULL -#define platform_pm_restore NULL -#define platform_pm_freeze_noirq NULL -#define platform_pm_thaw_noirq NULL -#define platform_pm_poweroff_noirq NULL -#define platform_pm_restore_noirq NULL - -#endif /* !CONFIG_HIBERNATION */ - -#ifdef CONFIG_PM_RUNTIME - -int __weak platform_pm_runtime_suspend(struct device *dev) -{ - return pm_generic_runtime_suspend(dev); -}; - -int __weak platform_pm_runtime_resume(struct device *dev) -{ - return pm_generic_runtime_resume(dev); -}; - -int __weak platform_pm_runtime_idle(struct device *dev) -{ - return pm_generic_runtime_idle(dev); -}; - -#else /* !CONFIG_PM_RUNTIME */ - -#define platform_pm_runtime_suspend NULL -#define platform_pm_runtime_resume NULL -#define platform_pm_runtime_idle NULL - -#endif /* !CONFIG_PM_RUNTIME */ +#endif /* CONFIG_HIBERNATE_CALLBACKS */ static const struct dev_pm_ops platform_dev_pm_ops = { - .prepare = platform_pm_prepare, - .complete = platform_pm_complete, - .suspend = platform_pm_suspend, - .resume = platform_pm_resume, - .freeze = platform_pm_freeze, - .thaw = platform_pm_thaw, - .poweroff = platform_pm_poweroff, - .restore = platform_pm_restore, - .suspend_noirq = platform_pm_suspend_noirq, - .resume_noirq = platform_pm_resume_noirq, - .freeze_noirq = platform_pm_freeze_noirq, - .thaw_noirq = platform_pm_thaw_noirq, - .poweroff_noirq = platform_pm_poweroff_noirq, - .restore_noirq = platform_pm_restore_noirq, - .runtime_suspend = platform_pm_runtime_suspend, - .runtime_resume = platform_pm_runtime_resume, - .runtime_idle = platform_pm_runtime_idle, + .runtime_suspend = pm_generic_runtime_suspend, + .runtime_resume = pm_generic_runtime_resume, + .runtime_idle = pm_generic_runtime_idle, + USE_PLATFORM_PM_SLEEP_OPS }; struct bus_type platform_bus_type = { @@ -976,41 +916,6 @@ struct bus_type platform_bus_type = { }; EXPORT_SYMBOL_GPL(platform_bus_type); -/** - * platform_bus_get_pm_ops() - return pointer to busses dev_pm_ops - * - * This function can be used by platform code to get the current - * set of dev_pm_ops functions used by the platform_bus_type. - */ -const struct dev_pm_ops * __init platform_bus_get_pm_ops(void) -{ - return platform_bus_type.pm; -} - -/** - * platform_bus_set_pm_ops() - update dev_pm_ops for the platform_bus_type - * - * @pm: pointer to new dev_pm_ops struct to be used for platform_bus_type - * - * Platform code can override the dev_pm_ops methods of - * platform_bus_type by using this function. It is expected that - * platform code will first do a platform_bus_get_pm_ops(), then - * kmemdup it, then customize selected methods and pass a pointer to - * the new struct dev_pm_ops to this function. - * - * Since platform-specific code is customizing methods for *all* - * devices (not just platform-specific devices) it is expected that - * any custom overrides of these functions will keep existing behavior - * and simply extend it. For example, any customization of the - * runtime PM methods should continue to call the pm_generic_* - * functions as the default ones do in addition to the - * platform-specific behavior. - */ -void __init platform_bus_set_pm_ops(const struct dev_pm_ops *pm) -{ - platform_bus_type.pm = pm; -} - int __init platform_bus_init(void) { int error; diff --git a/trunk/drivers/base/power/Makefile b/trunk/drivers/base/power/Makefile index 118c1b92a511..3647e114d0e7 100644 --- a/trunk/drivers/base/power/Makefile +++ b/trunk/drivers/base/power/Makefile @@ -3,6 +3,6 @@ obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o obj-$(CONFIG_PM_RUNTIME) += runtime.o obj-$(CONFIG_PM_TRACE_RTC) += trace.o obj-$(CONFIG_PM_OPP) += opp.o +obj-$(CONFIG_HAVE_CLK) += clock_ops.o -ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG -ccflags-$(CONFIG_PM_VERBOSE) += -DDEBUG +ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG \ No newline at end of file diff --git a/trunk/drivers/base/power/clock_ops.c b/trunk/drivers/base/power/clock_ops.c new file mode 100644 index 000000000000..c0dd09df7be8 --- /dev/null +++ b/trunk/drivers/base/power/clock_ops.c @@ -0,0 +1,431 @@ +/* + * drivers/base/power/clock_ops.c - Generic clock manipulation PM callbacks + * + * Copyright (c) 2011 Rafael J. Wysocki , Renesas Electronics Corp. + * + * This file is released under the GPLv2. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_PM_RUNTIME + +struct pm_runtime_clk_data { + struct list_head clock_list; + struct mutex lock; +}; + +enum pce_status { + PCE_STATUS_NONE = 0, + PCE_STATUS_ACQUIRED, + PCE_STATUS_ENABLED, + PCE_STATUS_ERROR, +}; + +struct pm_clock_entry { + struct list_head node; + char *con_id; + struct clk *clk; + enum pce_status status; +}; + +static struct pm_runtime_clk_data *__to_prd(struct device *dev) +{ + return dev ? dev->power.subsys_data : NULL; +} + +/** + * pm_runtime_clk_add - Start using a device clock for runtime PM. + * @dev: Device whose clock is going to be used for runtime PM. + * @con_id: Connection ID of the clock. + * + * Add the clock represented by @con_id to the list of clocks used for + * the runtime PM of @dev. + */ +int pm_runtime_clk_add(struct device *dev, const char *con_id) +{ + struct pm_runtime_clk_data *prd = __to_prd(dev); + struct pm_clock_entry *ce; + + if (!prd) + return -EINVAL; + + ce = kzalloc(sizeof(*ce), GFP_KERNEL); + if (!ce) { + dev_err(dev, "Not enough memory for clock entry.\n"); + return -ENOMEM; + } + + if (con_id) { + ce->con_id = kstrdup(con_id, GFP_KERNEL); + if (!ce->con_id) { + dev_err(dev, + "Not enough memory for clock connection ID.\n"); + kfree(ce); + return -ENOMEM; + } + } + + mutex_lock(&prd->lock); + list_add_tail(&ce->node, &prd->clock_list); + mutex_unlock(&prd->lock); + return 0; +} + +/** + * __pm_runtime_clk_remove - Destroy runtime PM clock entry. + * @ce: Runtime PM clock entry to destroy. + * + * This routine must be called under the mutex protecting the runtime PM list + * of clocks corresponding the the @ce's device. + */ +static void __pm_runtime_clk_remove(struct pm_clock_entry *ce) +{ + if (!ce) + return; + + list_del(&ce->node); + + if (ce->status < PCE_STATUS_ERROR) { + if (ce->status == PCE_STATUS_ENABLED) + clk_disable(ce->clk); + + if (ce->status >= PCE_STATUS_ACQUIRED) + clk_put(ce->clk); + } + + if (ce->con_id) + kfree(ce->con_id); + + kfree(ce); +} + +/** + * pm_runtime_clk_remove - Stop using a device clock for runtime PM. + * @dev: Device whose clock should not be used for runtime PM any more. + * @con_id: Connection ID of the clock. + * + * Remove the clock represented by @con_id from the list of clocks used for + * the runtime PM of @dev. + */ +void pm_runtime_clk_remove(struct device *dev, const char *con_id) +{ + struct pm_runtime_clk_data *prd = __to_prd(dev); + struct pm_clock_entry *ce; + + if (!prd) + return; + + mutex_lock(&prd->lock); + + list_for_each_entry(ce, &prd->clock_list, node) { + if (!con_id && !ce->con_id) { + __pm_runtime_clk_remove(ce); + break; + } else if (!con_id || !ce->con_id) { + continue; + } else if (!strcmp(con_id, ce->con_id)) { + __pm_runtime_clk_remove(ce); + break; + } + } + + mutex_unlock(&prd->lock); +} + +/** + * pm_runtime_clk_init - Initialize a device's list of runtime PM clocks. + * @dev: Device to initialize the list of runtime PM clocks for. + * + * Allocate a struct pm_runtime_clk_data object, initialize its lock member and + * make the @dev's power.subsys_data field point to it. + */ +int pm_runtime_clk_init(struct device *dev) +{ + struct pm_runtime_clk_data *prd; + + prd = kzalloc(sizeof(*prd), GFP_KERNEL); + if (!prd) { + dev_err(dev, "Not enough memory fo runtime PM data.\n"); + return -ENOMEM; + } + + INIT_LIST_HEAD(&prd->clock_list); + mutex_init(&prd->lock); + dev->power.subsys_data = prd; + return 0; +} + +/** + * pm_runtime_clk_destroy - Destroy a device's list of runtime PM clocks. + * @dev: Device to destroy the list of runtime PM clocks for. + * + * Clear the @dev's power.subsys_data field, remove the list of clock entries + * from the struct pm_runtime_clk_data object pointed to by it before and free + * that object. + */ +void pm_runtime_clk_destroy(struct device *dev) +{ + struct pm_runtime_clk_data *prd = __to_prd(dev); + struct pm_clock_entry *ce, *c; + + if (!prd) + return; + + dev->power.subsys_data = NULL; + + mutex_lock(&prd->lock); + + list_for_each_entry_safe_reverse(ce, c, &prd->clock_list, node) + __pm_runtime_clk_remove(ce); + + mutex_unlock(&prd->lock); + + kfree(prd); +} + +/** + * pm_runtime_clk_acquire - Acquire a device clock. + * @dev: Device whose clock is to be acquired. + * @con_id: Connection ID of the clock. + */ +static void pm_runtime_clk_acquire(struct device *dev, + struct pm_clock_entry *ce) +{ + ce->clk = clk_get(dev, ce->con_id); + if (IS_ERR(ce->clk)) { + ce->status = PCE_STATUS_ERROR; + } else { + ce->status = PCE_STATUS_ACQUIRED; + dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id); + } +} + +/** + * pm_runtime_clk_suspend - Disable clocks in a device's runtime PM clock list. + * @dev: Device to disable the clocks for. + */ +int pm_runtime_clk_suspend(struct device *dev) +{ + struct pm_runtime_clk_data *prd = __to_prd(dev); + struct pm_clock_entry *ce; + + dev_dbg(dev, "%s()\n", __func__); + + if (!prd) + return 0; + + mutex_lock(&prd->lock); + + list_for_each_entry_reverse(ce, &prd->clock_list, node) { + if (ce->status == PCE_STATUS_NONE) + pm_runtime_clk_acquire(dev, ce); + + if (ce->status < PCE_STATUS_ERROR) { + clk_disable(ce->clk); + ce->status = PCE_STATUS_ACQUIRED; + } + } + + mutex_unlock(&prd->lock); + + return 0; +} + +/** + * pm_runtime_clk_resume - Enable clocks in a device's runtime PM clock list. + * @dev: Device to enable the clocks for. + */ +int pm_runtime_clk_resume(struct device *dev) +{ + struct pm_runtime_clk_data *prd = __to_prd(dev); + struct pm_clock_entry *ce; + + dev_dbg(dev, "%s()\n", __func__); + + if (!prd) + return 0; + + mutex_lock(&prd->lock); + + list_for_each_entry(ce, &prd->clock_list, node) { + if (ce->status == PCE_STATUS_NONE) + pm_runtime_clk_acquire(dev, ce); + + if (ce->status < PCE_STATUS_ERROR) { + clk_enable(ce->clk); + ce->status = PCE_STATUS_ENABLED; + } + } + + mutex_unlock(&prd->lock); + + return 0; +} + +/** + * pm_runtime_clk_notify - Notify routine for device addition and removal. + * @nb: Notifier block object this function is a member of. + * @action: Operation being carried out by the caller. + * @data: Device the routine is being run for. + * + * For this function to work, @nb must be a member of an object of type + * struct pm_clk_notifier_block containing all of the requisite data. + * Specifically, the pwr_domain member of that object is copied to the device's + * pwr_domain field and its con_ids member is used to populate the device's list + * of runtime PM clocks, depending on @action. + * + * If the device's pwr_domain field is already populated with a value different + * from the one stored in the struct pm_clk_notifier_block object, the function + * does nothing. + */ +static int pm_runtime_clk_notify(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct pm_clk_notifier_block *clknb; + struct device *dev = data; + char *con_id; + int error; + + dev_dbg(dev, "%s() %ld\n", __func__, action); + + clknb = container_of(nb, struct pm_clk_notifier_block, nb); + + switch (action) { + case BUS_NOTIFY_ADD_DEVICE: + if (dev->pwr_domain) + break; + + error = pm_runtime_clk_init(dev); + if (error) + break; + + dev->pwr_domain = clknb->pwr_domain; + if (clknb->con_ids[0]) { + for (con_id = clknb->con_ids[0]; *con_id; con_id++) + pm_runtime_clk_add(dev, con_id); + } else { + pm_runtime_clk_add(dev, NULL); + } + + break; + case BUS_NOTIFY_DEL_DEVICE: + if (dev->pwr_domain != clknb->pwr_domain) + break; + + dev->pwr_domain = NULL; + pm_runtime_clk_destroy(dev); + break; + } + + return 0; +} + +#else /* !CONFIG_PM_RUNTIME */ + +/** + * enable_clock - Enable a device clock. + * @dev: Device whose clock is to be enabled. + * @con_id: Connection ID of the clock. + */ +static void enable_clock(struct device *dev, const char *con_id) +{ + struct clk *clk; + + clk = clk_get(dev, con_id); + if (!IS_ERR(clk)) { + clk_enable(clk); + clk_put(clk); + dev_info(dev, "Runtime PM disabled, clock forced on.\n"); + } +} + +/** + * disable_clock - Disable a device clock. + * @dev: Device whose clock is to be disabled. + * @con_id: Connection ID of the clock. + */ +static void disable_clock(struct device *dev, const char *con_id) +{ + struct clk *clk; + + clk = clk_get(dev, con_id); + if (!IS_ERR(clk)) { + clk_disable(clk); + clk_put(clk); + dev_info(dev, "Runtime PM disabled, clock forced off.\n"); + } +} + +/** + * pm_runtime_clk_notify - Notify routine for device addition and removal. + * @nb: Notifier block object this function is a member of. + * @action: Operation being carried out by the caller. + * @data: Device the routine is being run for. + * + * For this function to work, @nb must be a member of an object of type + * struct pm_clk_notifier_block containing all of the requisite data. + * Specifically, the con_ids member of that object is used to enable or disable + * the device's clocks, depending on @action. + */ +static int pm_runtime_clk_notify(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct pm_clk_notifier_block *clknb; + struct device *dev = data; + char *con_id; + + dev_dbg(dev, "%s() %ld\n", __func__, action); + + clknb = container_of(nb, struct pm_clk_notifier_block, nb); + + switch (action) { + case BUS_NOTIFY_ADD_DEVICE: + if (clknb->con_ids[0]) { + for (con_id = clknb->con_ids[0]; *con_id; con_id++) + enable_clock(dev, con_id); + } else { + enable_clock(dev, NULL); + } + break; + case BUS_NOTIFY_DEL_DEVICE: + if (clknb->con_ids[0]) { + for (con_id = clknb->con_ids[0]; *con_id; con_id++) + disable_clock(dev, con_id); + } else { + disable_clock(dev, NULL); + } + break; + } + + return 0; +} + +#endif /* !CONFIG_PM_RUNTIME */ + +/** + * pm_runtime_clk_add_notifier - Add bus type notifier for runtime PM clocks. + * @bus: Bus type to add the notifier to. + * @clknb: Notifier to be added to the given bus type. + * + * The nb member of @clknb is not expected to be initialized and its + * notifier_call member will be replaced with pm_runtime_clk_notify(). However, + * the remaining members of @clknb should be populated prior to calling this + * routine. + */ +void pm_runtime_clk_add_notifier(struct bus_type *bus, + struct pm_clk_notifier_block *clknb) +{ + if (!bus || !clknb) + return; + + clknb->nb.notifier_call = pm_runtime_clk_notify; + bus_register_notifier(bus, &clknb->nb); +} diff --git a/trunk/drivers/base/power/generic_ops.c b/trunk/drivers/base/power/generic_ops.c index 42f97f925629..cb3bb368681c 100644 --- a/trunk/drivers/base/power/generic_ops.c +++ b/trunk/drivers/base/power/generic_ops.c @@ -73,6 +73,23 @@ EXPORT_SYMBOL_GPL(pm_generic_runtime_resume); #endif /* CONFIG_PM_RUNTIME */ #ifdef CONFIG_PM_SLEEP +/** + * pm_generic_prepare - Generic routine preparing a device for power transition. + * @dev: Device to prepare. + * + * Prepare a device for a system-wide power transition. + */ +int pm_generic_prepare(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (drv && drv->pm && drv->pm->prepare) + ret = drv->pm->prepare(dev); + + return ret; +} + /** * __pm_generic_call - Generic suspend/freeze/poweroff/thaw subsystem callback. * @dev: Device to handle. @@ -213,16 +230,38 @@ int pm_generic_restore(struct device *dev) return __pm_generic_resume(dev, PM_EVENT_RESTORE); } EXPORT_SYMBOL_GPL(pm_generic_restore); + +/** + * pm_generic_complete - Generic routine competing a device power transition. + * @dev: Device to handle. + * + * Complete a device power transition during a system-wide power transition. + */ +void pm_generic_complete(struct device *dev) +{ + struct device_driver *drv = dev->driver; + + if (drv && drv->pm && drv->pm->complete) + drv->pm->complete(dev); + + /* + * Let runtime PM try to suspend devices that haven't been in use before + * going into the system-wide sleep state we're resuming from. + */ + pm_runtime_idle(dev); +} #endif /* CONFIG_PM_SLEEP */ struct dev_pm_ops generic_subsys_pm_ops = { #ifdef CONFIG_PM_SLEEP + .prepare = pm_generic_prepare, .suspend = pm_generic_suspend, .resume = pm_generic_resume, .freeze = pm_generic_freeze, .thaw = pm_generic_thaw, .poweroff = pm_generic_poweroff, .restore = pm_generic_restore, + .complete = pm_generic_complete, #endif #ifdef CONFIG_PM_RUNTIME .runtime_suspend = pm_generic_runtime_suspend, diff --git a/trunk/drivers/base/power/main.c b/trunk/drivers/base/power/main.c index 052dc53eef38..aa6320207745 100644 --- a/trunk/drivers/base/power/main.c +++ b/trunk/drivers/base/power/main.c @@ -63,6 +63,7 @@ void device_pm_init(struct device *dev) dev->power.wakeup = NULL; spin_lock_init(&dev->power.lock); pm_runtime_init(dev); + INIT_LIST_HEAD(&dev->power.entry); } /** @@ -233,7 +234,7 @@ static int pm_op(struct device *dev, } break; #endif /* CONFIG_SUSPEND */ -#ifdef CONFIG_HIBERNATION +#ifdef CONFIG_HIBERNATE_CALLBACKS case PM_EVENT_FREEZE: case PM_EVENT_QUIESCE: if (ops->freeze) { @@ -260,7 +261,7 @@ static int pm_op(struct device *dev, suspend_report_result(ops->restore, error); } break; -#endif /* CONFIG_HIBERNATION */ +#endif /* CONFIG_HIBERNATE_CALLBACKS */ default: error = -EINVAL; } @@ -308,7 +309,7 @@ static int pm_noirq_op(struct device *dev, } break; #endif /* CONFIG_SUSPEND */ -#ifdef CONFIG_HIBERNATION +#ifdef CONFIG_HIBERNATE_CALLBACKS case PM_EVENT_FREEZE: case PM_EVENT_QUIESCE: if (ops->freeze_noirq) { @@ -335,7 +336,7 @@ static int pm_noirq_op(struct device *dev, suspend_report_result(ops->restore_noirq, error); } break; -#endif /* CONFIG_HIBERNATION */ +#endif /* CONFIG_HIBERNATE_CALLBACKS */ default: error = -EINVAL; } @@ -425,10 +426,8 @@ static int device_resume_noirq(struct device *dev, pm_message_t state) if (dev->pwr_domain) { pm_dev_dbg(dev, state, "EARLY power domain "); - pm_noirq_op(dev, &dev->pwr_domain->ops, state); - } - - if (dev->type && dev->type->pm) { + error = pm_noirq_op(dev, &dev->pwr_domain->ops, state); + } else if (dev->type && dev->type->pm) { pm_dev_dbg(dev, state, "EARLY type "); error = pm_noirq_op(dev, dev->type->pm, state); } else if (dev->class && dev->class->pm) { @@ -516,7 +515,8 @@ static int device_resume(struct device *dev, pm_message_t state, bool async) if (dev->pwr_domain) { pm_dev_dbg(dev, state, "power domain "); - pm_op(dev, &dev->pwr_domain->ops, state); + error = pm_op(dev, &dev->pwr_domain->ops, state); + goto End; } if (dev->type && dev->type->pm) { @@ -579,11 +579,13 @@ static bool is_async(struct device *dev) * Execute the appropriate "resume" callback for all devices whose status * indicates that they are suspended. */ -static void dpm_resume(pm_message_t state) +void dpm_resume(pm_message_t state) { struct device *dev; ktime_t starttime = ktime_get(); + might_sleep(); + mutex_lock(&dpm_list_mtx); pm_transition = state; async_error = 0; @@ -628,12 +630,11 @@ static void device_complete(struct device *dev, pm_message_t state) { device_lock(dev); - if (dev->pwr_domain && dev->pwr_domain->ops.complete) { + if (dev->pwr_domain) { pm_dev_dbg(dev, state, "completing power domain "); - dev->pwr_domain->ops.complete(dev); - } - - if (dev->type && dev->type->pm) { + if (dev->pwr_domain->ops.complete) + dev->pwr_domain->ops.complete(dev); + } else if (dev->type && dev->type->pm) { pm_dev_dbg(dev, state, "completing type "); if (dev->type->pm->complete) dev->type->pm->complete(dev); @@ -657,10 +658,12 @@ static void device_complete(struct device *dev, pm_message_t state) * Execute the ->complete() callbacks for all devices whose PM status is not * DPM_ON (this allows new devices to be registered). */ -static void dpm_complete(pm_message_t state) +void dpm_complete(pm_message_t state) { struct list_head list; + might_sleep(); + INIT_LIST_HEAD(&list); mutex_lock(&dpm_list_mtx); while (!list_empty(&dpm_prepared_list)) { @@ -689,7 +692,6 @@ static void dpm_complete(pm_message_t state) */ void dpm_resume_end(pm_message_t state) { - might_sleep(); dpm_resume(state); dpm_complete(state); } @@ -731,7 +733,12 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state) { int error; - if (dev->type && dev->type->pm) { + if (dev->pwr_domain) { + pm_dev_dbg(dev, state, "LATE power domain "); + error = pm_noirq_op(dev, &dev->pwr_domain->ops, state); + if (error) + return error; + } else if (dev->type && dev->type->pm) { pm_dev_dbg(dev, state, "LATE type "); error = pm_noirq_op(dev, dev->type->pm, state); if (error) @@ -748,11 +755,6 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state) return error; } - if (dev->pwr_domain) { - pm_dev_dbg(dev, state, "LATE power domain "); - pm_noirq_op(dev, &dev->pwr_domain->ops, state); - } - return 0; } @@ -840,21 +842,27 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) goto End; } + if (dev->pwr_domain) { + pm_dev_dbg(dev, state, "power domain "); + error = pm_op(dev, &dev->pwr_domain->ops, state); + goto End; + } + if (dev->type && dev->type->pm) { pm_dev_dbg(dev, state, "type "); error = pm_op(dev, dev->type->pm, state); - goto Domain; + goto End; } if (dev->class) { if (dev->class->pm) { pm_dev_dbg(dev, state, "class "); error = pm_op(dev, dev->class->pm, state); - goto Domain; + goto End; } else if (dev->class->suspend) { pm_dev_dbg(dev, state, "legacy class "); error = legacy_suspend(dev, state, dev->class->suspend); - goto Domain; + goto End; } } @@ -868,12 +876,6 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) } } - Domain: - if (!error && dev->pwr_domain) { - pm_dev_dbg(dev, state, "power domain "); - pm_op(dev, &dev->pwr_domain->ops, state); - } - End: device_unlock(dev); complete_all(&dev->power.completion); @@ -913,11 +915,13 @@ static int device_suspend(struct device *dev) * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices. * @state: PM transition of the system being carried out. */ -static int dpm_suspend(pm_message_t state) +int dpm_suspend(pm_message_t state) { ktime_t starttime = ktime_get(); int error = 0; + might_sleep(); + mutex_lock(&dpm_list_mtx); pm_transition = state; async_error = 0; @@ -964,7 +968,14 @@ static int device_prepare(struct device *dev, pm_message_t state) device_lock(dev); - if (dev->type && dev->type->pm) { + if (dev->pwr_domain) { + pm_dev_dbg(dev, state, "preparing power domain "); + if (dev->pwr_domain->ops.prepare) + error = dev->pwr_domain->ops.prepare(dev); + suspend_report_result(dev->pwr_domain->ops.prepare, error); + if (error) + goto End; + } else if (dev->type && dev->type->pm) { pm_dev_dbg(dev, state, "preparing type "); if (dev->type->pm->prepare) error = dev->type->pm->prepare(dev); @@ -983,13 +994,6 @@ static int device_prepare(struct device *dev, pm_message_t state) if (dev->bus->pm->prepare) error = dev->bus->pm->prepare(dev); suspend_report_result(dev->bus->pm->prepare, error); - if (error) - goto End; - } - - if (dev->pwr_domain && dev->pwr_domain->ops.prepare) { - pm_dev_dbg(dev, state, "preparing power domain "); - dev->pwr_domain->ops.prepare(dev); } End: @@ -1004,10 +1008,12 @@ static int device_prepare(struct device *dev, pm_message_t state) * * Execute the ->prepare() callback(s) for all devices. */ -static int dpm_prepare(pm_message_t state) +int dpm_prepare(pm_message_t state) { int error = 0; + might_sleep(); + mutex_lock(&dpm_list_mtx); while (!list_empty(&dpm_list)) { struct device *dev = to_device(dpm_list.next); @@ -1056,7 +1062,6 @@ int dpm_suspend_start(pm_message_t state) { int error; - might_sleep(); error = dpm_prepare(state); if (!error) error = dpm_suspend(state); diff --git a/trunk/drivers/base/power/runtime.c b/trunk/drivers/base/power/runtime.c index 3172c60d23a9..0d4587b15c55 100644 --- a/trunk/drivers/base/power/runtime.c +++ b/trunk/drivers/base/power/runtime.c @@ -168,7 +168,6 @@ static int rpm_check_suspend_allowed(struct device *dev) static int rpm_idle(struct device *dev, int rpmflags) { int (*callback)(struct device *); - int (*domain_callback)(struct device *); int retval; retval = rpm_check_suspend_allowed(dev); @@ -214,7 +213,9 @@ static int rpm_idle(struct device *dev, int rpmflags) dev->power.idle_notification = true; - if (dev->type && dev->type->pm) + if (dev->pwr_domain) + callback = dev->pwr_domain->ops.runtime_idle; + else if (dev->type && dev->type->pm) callback = dev->type->pm->runtime_idle; else if (dev->class && dev->class->pm) callback = dev->class->pm->runtime_idle; @@ -223,19 +224,10 @@ static int rpm_idle(struct device *dev, int rpmflags) else callback = NULL; - if (dev->pwr_domain) - domain_callback = dev->pwr_domain->ops.runtime_idle; - else - domain_callback = NULL; - - if (callback || domain_callback) { + if (callback) { spin_unlock_irq(&dev->power.lock); - if (domain_callback) - retval = domain_callback(dev); - - if (!retval && callback) - callback(dev); + callback(dev); spin_lock_irq(&dev->power.lock); } @@ -382,7 +374,9 @@ static int rpm_suspend(struct device *dev, int rpmflags) __update_runtime_status(dev, RPM_SUSPENDING); - if (dev->type && dev->type->pm) + if (dev->pwr_domain) + callback = dev->pwr_domain->ops.runtime_suspend; + else if (dev->type && dev->type->pm) callback = dev->type->pm->runtime_suspend; else if (dev->class && dev->class->pm) callback = dev->class->pm->runtime_suspend; @@ -400,8 +394,6 @@ static int rpm_suspend(struct device *dev, int rpmflags) else pm_runtime_cancel_pending(dev); } else { - if (dev->pwr_domain) - rpm_callback(dev->pwr_domain->ops.runtime_suspend, dev); no_callback: __update_runtime_status(dev, RPM_SUSPENDED); pm_runtime_deactivate_timer(dev); @@ -582,9 +574,8 @@ static int rpm_resume(struct device *dev, int rpmflags) __update_runtime_status(dev, RPM_RESUMING); if (dev->pwr_domain) - rpm_callback(dev->pwr_domain->ops.runtime_resume, dev); - - if (dev->type && dev->type->pm) + callback = dev->pwr_domain->ops.runtime_resume; + else if (dev->type && dev->type->pm) callback = dev->type->pm->runtime_resume; else if (dev->class && dev->class->pm) callback = dev->class->pm->runtime_resume; diff --git a/trunk/drivers/base/power/sysfs.c b/trunk/drivers/base/power/sysfs.c index fff49bee781d..a9f5b8979611 100644 --- a/trunk/drivers/base/power/sysfs.c +++ b/trunk/drivers/base/power/sysfs.c @@ -212,8 +212,9 @@ static ssize_t autosuspend_delay_ms_store(struct device *dev, static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show, autosuspend_delay_ms_store); -#endif +#endif /* CONFIG_PM_RUNTIME */ +#ifdef CONFIG_PM_SLEEP static ssize_t wake_show(struct device * dev, struct device_attribute *attr, char * buf) { @@ -248,7 +249,6 @@ wake_store(struct device * dev, struct device_attribute *attr, static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store); -#ifdef CONFIG_PM_SLEEP static ssize_t wakeup_count_show(struct device *dev, struct device_attribute *attr, char *buf) { diff --git a/trunk/drivers/base/power/wakeup.c b/trunk/drivers/base/power/wakeup.c index 4573c83df6dd..84f7c7d5a098 100644 --- a/trunk/drivers/base/power/wakeup.c +++ b/trunk/drivers/base/power/wakeup.c @@ -110,7 +110,6 @@ void wakeup_source_add(struct wakeup_source *ws) spin_lock_irq(&events_lock); list_add_rcu(&ws->entry, &wakeup_sources); spin_unlock_irq(&events_lock); - synchronize_rcu(); } EXPORT_SYMBOL_GPL(wakeup_source_add); @@ -258,7 +257,7 @@ void device_set_wakeup_capable(struct device *dev, bool capable) if (!!dev->power.can_wakeup == !!capable) return; - if (device_is_registered(dev)) { + if (device_is_registered(dev) && !list_empty(&dev->power.entry)) { if (capable) { if (wakeup_sysfs_add(dev)) return; diff --git a/trunk/drivers/base/sys.c b/trunk/drivers/base/sys.c index acde9b5ee131..9dff77bfe1e3 100644 --- a/trunk/drivers/base/sys.c +++ b/trunk/drivers/base/sys.c @@ -328,203 +328,8 @@ void sysdev_unregister(struct sys_device *sysdev) kobject_put(&sysdev->kobj); } - -#ifndef CONFIG_ARCH_NO_SYSDEV_OPS -/** - * sysdev_shutdown - Shut down all system devices. - * - * Loop over each class of system devices, and the devices in each - * of those classes. For each device, we call the shutdown method for - * each driver registered for the device - the auxiliaries, - * and the class driver. - * - * Note: The list is iterated in reverse order, so that we shut down - * child devices before we shut down their parents. The list ordering - * is guaranteed by virtue of the fact that child devices are registered - * after their parents. - */ -void sysdev_shutdown(void) -{ - struct sysdev_class *cls; - - pr_debug("Shutting Down System Devices\n"); - - mutex_lock(&sysdev_drivers_lock); - list_for_each_entry_reverse(cls, &system_kset->list, kset.kobj.entry) { - struct sys_device *sysdev; - - pr_debug("Shutting down type '%s':\n", - kobject_name(&cls->kset.kobj)); - - list_for_each_entry(sysdev, &cls->kset.list, kobj.entry) { - struct sysdev_driver *drv; - pr_debug(" %s\n", kobject_name(&sysdev->kobj)); - - /* Call auxiliary drivers first */ - list_for_each_entry(drv, &cls->drivers, entry) { - if (drv->shutdown) - drv->shutdown(sysdev); - } - - /* Now call the generic one */ - if (cls->shutdown) - cls->shutdown(sysdev); - } - } - mutex_unlock(&sysdev_drivers_lock); -} - -static void __sysdev_resume(struct sys_device *dev) -{ - struct sysdev_class *cls = dev->cls; - struct sysdev_driver *drv; - - /* First, call the class-specific one */ - if (cls->resume) - cls->resume(dev); - WARN_ONCE(!irqs_disabled(), - "Interrupts enabled after %pF\n", cls->resume); - - /* Call auxiliary drivers next. */ - list_for_each_entry(drv, &cls->drivers, entry) { - if (drv->resume) - drv->resume(dev); - WARN_ONCE(!irqs_disabled(), - "Interrupts enabled after %pF\n", drv->resume); - } -} - -/** - * sysdev_suspend - Suspend all system devices. - * @state: Power state to enter. - * - * We perform an almost identical operation as sysdev_shutdown() - * above, though calling ->suspend() instead. Interrupts are disabled - * when this called. Devices are responsible for both saving state and - * quiescing or powering down the device. - * - * This is only called by the device PM core, so we let them handle - * all synchronization. - */ -int sysdev_suspend(pm_message_t state) -{ - struct sysdev_class *cls; - struct sys_device *sysdev, *err_dev; - struct sysdev_driver *drv, *err_drv; - int ret; - - pr_debug("Checking wake-up interrupts\n"); - - /* Return error code if there are any wake-up interrupts pending */ - ret = check_wakeup_irqs(); - if (ret) - return ret; - - WARN_ONCE(!irqs_disabled(), - "Interrupts enabled while suspending system devices\n"); - - pr_debug("Suspending System Devices\n"); - - list_for_each_entry_reverse(cls, &system_kset->list, kset.kobj.entry) { - pr_debug("Suspending type '%s':\n", - kobject_name(&cls->kset.kobj)); - - list_for_each_entry(sysdev, &cls->kset.list, kobj.entry) { - pr_debug(" %s\n", kobject_name(&sysdev->kobj)); - - /* Call auxiliary drivers first */ - list_for_each_entry(drv, &cls->drivers, entry) { - if (drv->suspend) { - ret = drv->suspend(sysdev, state); - if (ret) - goto aux_driver; - } - WARN_ONCE(!irqs_disabled(), - "Interrupts enabled after %pF\n", - drv->suspend); - } - - /* Now call the generic one */ - if (cls->suspend) { - ret = cls->suspend(sysdev, state); - if (ret) - goto cls_driver; - WARN_ONCE(!irqs_disabled(), - "Interrupts enabled after %pF\n", - cls->suspend); - } - } - } - return 0; - /* resume current sysdev */ -cls_driver: - drv = NULL; - printk(KERN_ERR "Class suspend failed for %s: %d\n", - kobject_name(&sysdev->kobj), ret); - -aux_driver: - if (drv) - printk(KERN_ERR "Class driver suspend failed for %s: %d\n", - kobject_name(&sysdev->kobj), ret); - list_for_each_entry(err_drv, &cls->drivers, entry) { - if (err_drv == drv) - break; - if (err_drv->resume) - err_drv->resume(sysdev); - } - - /* resume other sysdevs in current class */ - list_for_each_entry(err_dev, &cls->kset.list, kobj.entry) { - if (err_dev == sysdev) - break; - pr_debug(" %s\n", kobject_name(&err_dev->kobj)); - __sysdev_resume(err_dev); - } - - /* resume other classes */ - list_for_each_entry_continue(cls, &system_kset->list, kset.kobj.entry) { - list_for_each_entry(err_dev, &cls->kset.list, kobj.entry) { - pr_debug(" %s\n", kobject_name(&err_dev->kobj)); - __sysdev_resume(err_dev); - } - } - return ret; -} -EXPORT_SYMBOL_GPL(sysdev_suspend); - -/** - * sysdev_resume - Bring system devices back to life. - * - * Similar to sysdev_suspend(), but we iterate the list forwards - * to guarantee that parent devices are resumed before their children. - * - * Note: Interrupts are disabled when called. - */ -int sysdev_resume(void) -{ - struct sysdev_class *cls; - - WARN_ONCE(!irqs_disabled(), - "Interrupts enabled while resuming system devices\n"); - - pr_debug("Resuming System Devices\n"); - - list_for_each_entry(cls, &system_kset->list, kset.kobj.entry) { - struct sys_device *sysdev; - - pr_debug("Resuming type '%s':\n", - kobject_name(&cls->kset.kobj)); - - list_for_each_entry(sysdev, &cls->kset.list, kobj.entry) { - pr_debug(" %s\n", kobject_name(&sysdev->kobj)); - - __sysdev_resume(sysdev); - } - } - return 0; -} -EXPORT_SYMBOL_GPL(sysdev_resume); -#endif /* CONFIG_ARCH_NO_SYSDEV_OPS */ +EXPORT_SYMBOL_GPL(sysdev_register); +EXPORT_SYMBOL_GPL(sysdev_unregister); int __init system_bus_init(void) { @@ -534,9 +339,6 @@ int __init system_bus_init(void) return 0; } -EXPORT_SYMBOL_GPL(sysdev_register); -EXPORT_SYMBOL_GPL(sysdev_unregister); - #define to_ext_attr(x) container_of(x, struct sysdev_ext_attribute, attr) ssize_t sysdev_store_ulong(struct sys_device *sysdev, diff --git a/trunk/drivers/base/syscore.c b/trunk/drivers/base/syscore.c index 90af2943f9e4..c126db3cb7d1 100644 --- a/trunk/drivers/base/syscore.c +++ b/trunk/drivers/base/syscore.c @@ -73,6 +73,7 @@ int syscore_suspend(void) return ret; } +EXPORT_SYMBOL_GPL(syscore_suspend); /** * syscore_resume - Execute all the registered system core resume callbacks. @@ -95,6 +96,7 @@ void syscore_resume(void) "Interrupts enabled after %pF\n", ops->resume); } } +EXPORT_SYMBOL_GPL(syscore_resume); #endif /* CONFIG_PM_SLEEP */ /** diff --git a/trunk/drivers/block/DAC960.c b/trunk/drivers/block/DAC960.c index 8066d086578a..e086fbbbe853 100644 --- a/trunk/drivers/block/DAC960.c +++ b/trunk/drivers/block/DAC960.c @@ -2547,7 +2547,6 @@ static bool DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller) disk->major = MajorNumber; disk->first_minor = n << DAC960_MaxPartitionsBits; disk->fops = &DAC960_BlockDeviceOperations; - disk->events = DISK_EVENT_MEDIA_CHANGE; } /* Indicate the Block Device Registration completed successfully, diff --git a/trunk/drivers/block/amiflop.c b/trunk/drivers/block/amiflop.c index 456c0cc90dcf..8eba86bba599 100644 --- a/trunk/drivers/block/amiflop.c +++ b/trunk/drivers/block/amiflop.c @@ -1736,7 +1736,6 @@ static int __init fd_probe_drives(void) disk->major = FLOPPY_MAJOR; disk->first_minor = drive; disk->fops = &floppy_fops; - disk->events = DISK_EVENT_MEDIA_CHANGE; sprintf(disk->disk_name, "fd%d", drive); disk->private_data = &unit[drive]; set_capacity(disk, 880*2); diff --git a/trunk/drivers/block/ataflop.c b/trunk/drivers/block/ataflop.c index c871eae14120..ede16c64ff07 100644 --- a/trunk/drivers/block/ataflop.c +++ b/trunk/drivers/block/ataflop.c @@ -1964,7 +1964,6 @@ static int __init atari_floppy_init (void) unit[i].disk->first_minor = i; sprintf(unit[i].disk->disk_name, "fd%d", i); unit[i].disk->fops = &floppy_fops; - unit[i].disk->events = DISK_EVENT_MEDIA_CHANGE; unit[i].disk->private_data = &unit[i]; unit[i].disk->queue = blk_init_queue(do_fd_request, &ataflop_lock); diff --git a/trunk/drivers/block/floppy.c b/trunk/drivers/block/floppy.c index 301d7a9a41a6..db8f88586c8d 100644 --- a/trunk/drivers/block/floppy.c +++ b/trunk/drivers/block/floppy.c @@ -4205,7 +4205,6 @@ static int __init floppy_init(void) disks[dr]->major = FLOPPY_MAJOR; disks[dr]->first_minor = TOMINOR(dr); disks[dr]->fops = &floppy_fops; - disks[dr]->events = DISK_EVENT_MEDIA_CHANGE; sprintf(disks[dr]->disk_name, "fd%d", dr); init_timer(&motor_off_timer[dr]); diff --git a/trunk/drivers/block/paride/pcd.c b/trunk/drivers/block/paride/pcd.c index 2f2ccf686251..8690e31d9932 100644 --- a/trunk/drivers/block/paride/pcd.c +++ b/trunk/drivers/block/paride/pcd.c @@ -320,7 +320,6 @@ static void pcd_init_units(void) disk->first_minor = unit; strcpy(disk->disk_name, cd->name); /* umm... */ disk->fops = &pcd_bdops; - disk->events = DISK_EVENT_MEDIA_CHANGE; } } diff --git a/trunk/drivers/block/paride/pd.c b/trunk/drivers/block/paride/pd.c index 21dfdb776869..869e7676d46f 100644 --- a/trunk/drivers/block/paride/pd.c +++ b/trunk/drivers/block/paride/pd.c @@ -837,7 +837,6 @@ static void pd_probe_drive(struct pd_unit *disk) p->fops = &pd_fops; p->major = major; p->first_minor = (disk - pd) << PD_BITS; - p->events = DISK_EVENT_MEDIA_CHANGE; disk->gd = p; p->private_data = disk; p->queue = pd_queue; diff --git a/trunk/drivers/block/paride/pf.c b/trunk/drivers/block/paride/pf.c index 7adeb1edbf43..f21b520ef419 100644 --- a/trunk/drivers/block/paride/pf.c +++ b/trunk/drivers/block/paride/pf.c @@ -294,7 +294,6 @@ static void __init pf_init_units(void) disk->first_minor = unit; strcpy(disk->disk_name, pf->name); disk->fops = &pf_fops; - disk->events = DISK_EVENT_MEDIA_CHANGE; if (!(*drives[unit])[D_PRT]) pf_drive_count++; } diff --git a/trunk/drivers/block/rbd.c b/trunk/drivers/block/rbd.c index 16dc3645291c..9712fad82bc6 100644 --- a/trunk/drivers/block/rbd.c +++ b/trunk/drivers/block/rbd.c @@ -92,6 +92,8 @@ struct rbd_client { struct list_head node; }; +struct rbd_req_coll; + /* * a single io request */ @@ -100,6 +102,24 @@ struct rbd_request { struct bio *bio; /* cloned bio */ struct page **pages; /* list of used pages */ u64 len; + int coll_index; + struct rbd_req_coll *coll; +}; + +struct rbd_req_status { + int done; + int rc; + u64 bytes; +}; + +/* + * a collection of requests + */ +struct rbd_req_coll { + int total; + int num_done; + struct kref kref; + struct rbd_req_status status[0]; }; struct rbd_snap { @@ -416,6 +436,17 @@ static void rbd_put_client(struct rbd_device *rbd_dev) rbd_dev->client = NULL; } +/* + * Destroy requests collection + */ +static void rbd_coll_release(struct kref *kref) +{ + struct rbd_req_coll *coll = + container_of(kref, struct rbd_req_coll, kref); + + dout("rbd_coll_release %p\n", coll); + kfree(coll); +} /* * Create a new header structure, translate header format from the on-disk @@ -590,6 +621,14 @@ static u64 rbd_get_segment(struct rbd_image_header *header, return len; } +static int rbd_get_num_segments(struct rbd_image_header *header, + u64 ofs, u64 len) +{ + u64 start_seg = ofs >> header->obj_order; + u64 end_seg = (ofs + len - 1) >> header->obj_order; + return end_seg - start_seg + 1; +} + /* * bio helpers */ @@ -735,6 +774,50 @@ static void rbd_destroy_ops(struct ceph_osd_req_op *ops) kfree(ops); } +static void rbd_coll_end_req_index(struct request *rq, + struct rbd_req_coll *coll, + int index, + int ret, u64 len) +{ + struct request_queue *q; + int min, max, i; + + dout("rbd_coll_end_req_index %p index %d ret %d len %lld\n", + coll, index, ret, len); + + if (!rq) + return; + + if (!coll) { + blk_end_request(rq, ret, len); + return; + } + + q = rq->q; + + spin_lock_irq(q->queue_lock); + coll->status[index].done = 1; + coll->status[index].rc = ret; + coll->status[index].bytes = len; + max = min = coll->num_done; + while (max < coll->total && coll->status[max].done) + max++; + + for (i = min; istatus[i].rc, + coll->status[i].bytes); + coll->num_done++; + kref_put(&coll->kref, rbd_coll_release); + } + spin_unlock_irq(q->queue_lock); +} + +static void rbd_coll_end_req(struct rbd_request *req, + int ret, u64 len) +{ + rbd_coll_end_req_index(req->rq, req->coll, req->coll_index, ret, len); +} + /* * Send ceph osd request */ @@ -749,6 +832,8 @@ static int rbd_do_request(struct request *rq, int flags, struct ceph_osd_req_op *ops, int num_reply, + struct rbd_req_coll *coll, + int coll_index, void (*rbd_cb)(struct ceph_osd_request *req, struct ceph_msg *msg), struct ceph_osd_request **linger_req, @@ -763,12 +848,20 @@ static int rbd_do_request(struct request *rq, struct ceph_osd_request_head *reqhead; struct rbd_image_header *header = &dev->header; - ret = -ENOMEM; req_data = kzalloc(sizeof(*req_data), GFP_NOIO); - if (!req_data) - goto done; + if (!req_data) { + if (coll) + rbd_coll_end_req_index(rq, coll, coll_index, + -ENOMEM, len); + return -ENOMEM; + } - dout("rbd_do_request len=%lld ofs=%lld\n", len, ofs); + if (coll) { + req_data->coll = coll; + req_data->coll_index = coll_index; + } + + dout("rbd_do_request obj=%s ofs=%lld len=%lld\n", obj, len, ofs); down_read(&header->snap_rwsem); @@ -777,9 +870,9 @@ static int rbd_do_request(struct request *rq, ops, false, GFP_NOIO, pages, bio); - if (IS_ERR(req)) { + if (!req) { up_read(&header->snap_rwsem); - ret = PTR_ERR(req); + ret = -ENOMEM; goto done_pages; } @@ -828,7 +921,8 @@ static int rbd_do_request(struct request *rq, ret = ceph_osdc_wait_request(&dev->client->osdc, req); if (ver) *ver = le64_to_cpu(req->r_reassert_version.version); - dout("reassert_ver=%lld\n", le64_to_cpu(req->r_reassert_version.version)); + dout("reassert_ver=%lld\n", + le64_to_cpu(req->r_reassert_version.version)); ceph_osdc_put_request(req); } return ret; @@ -837,10 +931,8 @@ static int rbd_do_request(struct request *rq, bio_chain_put(req_data->bio); ceph_osdc_put_request(req); done_pages: + rbd_coll_end_req(req_data, ret, len); kfree(req_data); -done: - if (rq) - blk_end_request(rq, ret, len); return ret; } @@ -874,7 +966,7 @@ static void rbd_req_cb(struct ceph_osd_request *req, struct ceph_msg *msg) bytes = req_data->len; } - blk_end_request(req_data->rq, rc, bytes); + rbd_coll_end_req(req_data, rc, bytes); if (req_data->bio) bio_chain_put(req_data->bio); @@ -934,6 +1026,7 @@ static int rbd_req_sync_op(struct rbd_device *dev, flags, ops, 2, + NULL, 0, NULL, linger_req, ver); if (ret < 0) @@ -959,7 +1052,9 @@ static int rbd_do_op(struct request *rq, u64 snapid, int opcode, int flags, int num_reply, u64 ofs, u64 len, - struct bio *bio) + struct bio *bio, + struct rbd_req_coll *coll, + int coll_index) { char *seg_name; u64 seg_ofs; @@ -995,7 +1090,10 @@ static int rbd_do_op(struct request *rq, flags, ops, num_reply, + coll, coll_index, rbd_req_cb, 0, NULL); + + rbd_destroy_ops(ops); done: kfree(seg_name); return ret; @@ -1008,13 +1106,15 @@ static int rbd_req_write(struct request *rq, struct rbd_device *rbd_dev, struct ceph_snap_context *snapc, u64 ofs, u64 len, - struct bio *bio) + struct bio *bio, + struct rbd_req_coll *coll, + int coll_index) { return rbd_do_op(rq, rbd_dev, snapc, CEPH_NOSNAP, CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK, 2, - ofs, len, bio); + ofs, len, bio, coll, coll_index); } /* @@ -1024,14 +1124,16 @@ static int rbd_req_read(struct request *rq, struct rbd_device *rbd_dev, u64 snapid, u64 ofs, u64 len, - struct bio *bio) + struct bio *bio, + struct rbd_req_coll *coll, + int coll_index) { return rbd_do_op(rq, rbd_dev, NULL, (snapid ? snapid : CEPH_NOSNAP), CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ, 2, - ofs, len, bio); + ofs, len, bio, coll, coll_index); } /* @@ -1063,7 +1165,9 @@ static int rbd_req_sync_notify_ack(struct rbd_device *dev, { struct ceph_osd_req_op *ops; struct page **pages = NULL; - int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_NOTIFY_ACK, 0); + int ret; + + ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_NOTIFY_ACK, 0); if (ret < 0) return ret; @@ -1077,6 +1181,7 @@ static int rbd_req_sync_notify_ack(struct rbd_device *dev, CEPH_OSD_FLAG_READ, ops, 1, + NULL, 0, rbd_simple_req_cb, 0, NULL); rbd_destroy_ops(ops); @@ -1274,6 +1379,20 @@ static int rbd_req_sync_exec(struct rbd_device *dev, return ret; } +static struct rbd_req_coll *rbd_alloc_coll(int num_reqs) +{ + struct rbd_req_coll *coll = + kzalloc(sizeof(struct rbd_req_coll) + + sizeof(struct rbd_req_status) * num_reqs, + GFP_ATOMIC); + + if (!coll) + return NULL; + coll->total = num_reqs; + kref_init(&coll->kref); + return coll; +} + /* * block device queue callback */ @@ -1291,6 +1410,8 @@ static void rbd_rq_fn(struct request_queue *q) bool do_write; int size, op_size = 0; u64 ofs; + int num_segs, cur_seg = 0; + struct rbd_req_coll *coll; /* peek at request from block layer */ if (!rq) @@ -1321,6 +1442,14 @@ static void rbd_rq_fn(struct request_queue *q) do_write ? "write" : "read", size, blk_rq_pos(rq) * 512ULL); + num_segs = rbd_get_num_segments(&rbd_dev->header, ofs, size); + coll = rbd_alloc_coll(num_segs); + if (!coll) { + spin_lock_irq(q->queue_lock); + __blk_end_request_all(rq, -ENOMEM); + goto next; + } + do { /* a bio clone to be passed down to OSD req */ dout("rq->bio->bi_vcnt=%d\n", rq->bio->bi_vcnt); @@ -1328,35 +1457,41 @@ static void rbd_rq_fn(struct request_queue *q) rbd_dev->header.block_name, ofs, size, NULL, NULL); + kref_get(&coll->kref); bio = bio_chain_clone(&rq_bio, &next_bio, &bp, op_size, GFP_ATOMIC); if (!bio) { - spin_lock_irq(q->queue_lock); - __blk_end_request_all(rq, -ENOMEM); - goto next; + rbd_coll_end_req_index(rq, coll, cur_seg, + -ENOMEM, op_size); + goto next_seg; } + /* init OSD command: write or read */ if (do_write) rbd_req_write(rq, rbd_dev, rbd_dev->header.snapc, ofs, - op_size, bio); + op_size, bio, + coll, cur_seg); else rbd_req_read(rq, rbd_dev, cur_snap_id(rbd_dev), ofs, - op_size, bio); + op_size, bio, + coll, cur_seg); +next_seg: size -= op_size; ofs += op_size; + cur_seg++; rq_bio = next_bio; } while (size > 0); + kref_put(&coll->kref, rbd_coll_release); if (bp) bio_pair_release(bp); - spin_lock_irq(q->queue_lock); next: rq = blk_fetch_request(q); diff --git a/trunk/drivers/block/swim.c b/trunk/drivers/block/swim.c index 24a482f2fbd6..fd5adcd55944 100644 --- a/trunk/drivers/block/swim.c +++ b/trunk/drivers/block/swim.c @@ -858,7 +858,6 @@ static int __devinit swim_floppy_init(struct swim_priv *swd) swd->unit[drive].disk->first_minor = drive; sprintf(swd->unit[drive].disk->disk_name, "fd%d", drive); swd->unit[drive].disk->fops = &floppy_fops; - swd->unit[drive].disk->events = DISK_EVENT_MEDIA_CHANGE; swd->unit[drive].disk->private_data = &swd->unit[drive]; swd->unit[drive].disk->queue = swd->queue; set_capacity(swd->unit[drive].disk, 2880); diff --git a/trunk/drivers/block/swim3.c b/trunk/drivers/block/swim3.c index 4c10f56facbf..773bfa792777 100644 --- a/trunk/drivers/block/swim3.c +++ b/trunk/drivers/block/swim3.c @@ -1163,7 +1163,6 @@ static int __devinit swim3_attach(struct macio_dev *mdev, const struct of_device disk->major = FLOPPY_MAJOR; disk->first_minor = i; disk->fops = &floppy_fops; - disk->events = DISK_EVENT_MEDIA_CHANGE; disk->private_data = &floppy_states[i]; disk->queue = swim3_queue; disk->flags |= GENHD_FL_REMOVABLE; diff --git a/trunk/drivers/block/ub.c b/trunk/drivers/block/ub.c index 68b9430c7cfe..0e376d46bdd1 100644 --- a/trunk/drivers/block/ub.c +++ b/trunk/drivers/block/ub.c @@ -2334,7 +2334,6 @@ static int ub_probe_lun(struct ub_dev *sc, int lnum) disk->major = UB_MAJOR; disk->first_minor = lun->id * UB_PARTS_PER_LUN; disk->fops = &ub_bd_fops; - disk->events = DISK_EVENT_MEDIA_CHANGE; disk->private_data = lun; disk->driverfs_dev = &sc->intf->dev; diff --git a/trunk/drivers/block/xsysace.c b/trunk/drivers/block/xsysace.c index 645ff765cd12..6c7fd7db6dff 100644 --- a/trunk/drivers/block/xsysace.c +++ b/trunk/drivers/block/xsysace.c @@ -1005,7 +1005,6 @@ static int __devinit ace_setup(struct ace_device *ace) ace->gd->major = ace_major; ace->gd->first_minor = ace->id * ACE_NUM_MINORS; ace->gd->fops = &ace_fops; - ace->gd->events = DISK_EVENT_MEDIA_CHANGE; ace->gd->queue = ace->queue; ace->gd->private_data = ace; snprintf(ace->gd->disk_name, 32, "xs%c", ace->id + 'a'); diff --git a/trunk/drivers/cdrom/cdrom.c b/trunk/drivers/cdrom/cdrom.c index 514dd8efaf73..75fb965b8f72 100644 --- a/trunk/drivers/cdrom/cdrom.c +++ b/trunk/drivers/cdrom/cdrom.c @@ -986,6 +986,9 @@ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev, fmode_t cdinfo(CD_OPEN, "entering cdrom_open\n"); + /* open is event synchronization point, check events first */ + check_disk_change(bdev); + /* if this was a O_NONBLOCK open and we should honor the flags, * do a quick open without drive/disc integrity checks. */ cdi->use_count++; @@ -1012,9 +1015,6 @@ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev, fmode_t cdinfo(CD_OPEN, "Use count for \"/dev/%s\" now %d\n", cdi->name, cdi->use_count); - /* Do this on open. Don't wait for mount, because they might - not be mounting, but opening with O_NONBLOCK */ - check_disk_change(bdev); return 0; err_release: if (CDROM_CAN(CDC_LOCK) && cdi->options & CDO_LOCK) { diff --git a/trunk/drivers/cdrom/gdrom.c b/trunk/drivers/cdrom/gdrom.c index b2b034fea34e..3ceaf006e7f0 100644 --- a/trunk/drivers/cdrom/gdrom.c +++ b/trunk/drivers/cdrom/gdrom.c @@ -803,7 +803,6 @@ static int __devinit probe_gdrom(struct platform_device *devptr) goto probe_fail_cdrom_register; } gd.disk->fops = &gdrom_bdops; - gd.disk->events = DISK_EVENT_MEDIA_CHANGE; /* latch on to the interrupt */ err = gdrom_set_interrupt_handlers(); if (err) diff --git a/trunk/drivers/cdrom/viocd.c b/trunk/drivers/cdrom/viocd.c index 4e874c5fa605..e427fbe45999 100644 --- a/trunk/drivers/cdrom/viocd.c +++ b/trunk/drivers/cdrom/viocd.c @@ -626,7 +626,6 @@ static int viocd_probe(struct vio_dev *vdev, const struct vio_device_id *id) gendisk->queue = q; gendisk->fops = &viocd_fops; gendisk->flags = GENHD_FL_CD|GENHD_FL_REMOVABLE; - gendisk->events = DISK_EVENT_MEDIA_CHANGE; set_capacity(gendisk, 0); gendisk->private_data = d; d->viocd_disk = gendisk; diff --git a/trunk/drivers/char/Kconfig b/trunk/drivers/char/Kconfig index ad59b4e0a9b5..49502bc5360a 100644 --- a/trunk/drivers/char/Kconfig +++ b/trunk/drivers/char/Kconfig @@ -523,7 +523,7 @@ config RAW_DRIVER with the O_DIRECT flag. config MAX_RAW_DEVS - int "Maximum number of RAW devices to support (1-8192)" + int "Maximum number of RAW devices to support (1-65536)" depends on RAW_DRIVER default "256" help diff --git a/trunk/drivers/char/agp/generic.c b/trunk/drivers/char/agp/generic.c index 012cba0d6d96..b072648dc3f6 100644 --- a/trunk/drivers/char/agp/generic.c +++ b/trunk/drivers/char/agp/generic.c @@ -115,6 +115,9 @@ static struct agp_memory *agp_create_user_memory(unsigned long num_agp_pages) struct agp_memory *new; unsigned long alloc_size = num_agp_pages*sizeof(struct page *); + if (INT_MAX/sizeof(struct page *) < num_agp_pages) + return NULL; + new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL); if (new == NULL) return NULL; @@ -234,11 +237,14 @@ struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge, int scratch_pages; struct agp_memory *new; size_t i; + int cur_memory; if (!bridge) return NULL; - if ((atomic_read(&bridge->current_memory_agp) + page_count) > bridge->max_memory_agp) + cur_memory = atomic_read(&bridge->current_memory_agp); + if ((cur_memory + page_count > bridge->max_memory_agp) || + (cur_memory + page_count < page_count)) return NULL; if (type >= AGP_USER_TYPES) { @@ -1089,8 +1095,8 @@ int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type) return -EINVAL; } - /* AK: could wrap */ - if ((pg_start + mem->page_count) > num_entries) + if (((pg_start + mem->page_count) > num_entries) || + ((pg_start + mem->page_count) < pg_start)) return -EINVAL; j = pg_start; @@ -1124,7 +1130,7 @@ int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type) { size_t i; struct agp_bridge_data *bridge; - int mask_type; + int mask_type, num_entries; bridge = mem->bridge; if (!bridge) @@ -1136,6 +1142,11 @@ int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type) if (type != mem->type) return -EINVAL; + num_entries = agp_num_entries(); + if (((pg_start + mem->page_count) > num_entries) || + ((pg_start + mem->page_count) < pg_start)) + return -EINVAL; + mask_type = bridge->driver->agp_type_to_mask_type(bridge, type); if (mask_type != 0) { /* The generic routines know nothing of memory types */ diff --git a/trunk/drivers/char/bsr.c b/trunk/drivers/char/bsr.c index a4a6c2f044b5..cf39bc08ce08 100644 --- a/trunk/drivers/char/bsr.c +++ b/trunk/drivers/char/bsr.c @@ -295,7 +295,7 @@ static int bsr_create_devs(struct device_node *bn) static int __init bsr_init(void) { struct device_node *np; - dev_t bsr_dev = MKDEV(bsr_major, 0); + dev_t bsr_dev; int ret = -ENODEV; int result; diff --git a/trunk/drivers/char/hpet.c b/trunk/drivers/char/hpet.c index 7066e801b9d3..051474c65b78 100644 --- a/trunk/drivers/char/hpet.c +++ b/trunk/drivers/char/hpet.c @@ -84,8 +84,6 @@ static struct clocksource clocksource_hpet = { .rating = 250, .read = read_hpet, .mask = CLOCKSOURCE_MASK(64), - .mult = 0, /* to be calculated */ - .shift = 10, .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; static struct clocksource *hpet_clocksource; @@ -934,9 +932,7 @@ int hpet_alloc(struct hpet_data *hdp) if (!hpet_clocksource) { hpet_mctr = (void __iomem *)&hpetp->hp_hpet->hpet_mc; CLKSRC_FSYS_MMIO_SET(clocksource_hpet.fsys_mmio, hpet_mctr); - clocksource_hpet.mult = clocksource_hz2mult(hpetp->hp_tick_freq, - clocksource_hpet.shift); - clocksource_register(&clocksource_hpet); + clocksource_register_hz(&clocksource_hpet, hpetp->hp_tick_freq); hpetp->hp_clocksource = &clocksource_hpet; hpet_clocksource = &clocksource_hpet; } diff --git a/trunk/drivers/char/hw_random/n2-drv.c b/trunk/drivers/char/hw_random/n2-drv.c index 43ac61978d8b..ac6739e085e3 100644 --- a/trunk/drivers/char/hw_random/n2-drv.c +++ b/trunk/drivers/char/hw_random/n2-drv.c @@ -619,15 +619,18 @@ static void __devinit n2rng_driver_version(void) pr_info("%s", version); } +static const struct of_device_id n2rng_match[]; static int __devinit n2rng_probe(struct platform_device *op) { + const struct of_device_id *match; int victoria_falls; int err = -ENOMEM; struct n2rng *np; - if (!op->dev.of_match) + match = of_match_device(n2rng_match, &op->dev); + if (!match) return -EINVAL; - victoria_falls = (op->dev.of_match->data != NULL); + victoria_falls = (match->data != NULL); n2rng_driver_version(); np = kzalloc(sizeof(*np), GFP_KERNEL); diff --git a/trunk/drivers/char/ipmi/ipmi_si_intf.c b/trunk/drivers/char/ipmi/ipmi_si_intf.c index cc6c9b2546a3..64c6b8530615 100644 --- a/trunk/drivers/char/ipmi/ipmi_si_intf.c +++ b/trunk/drivers/char/ipmi/ipmi_si_intf.c @@ -2554,9 +2554,11 @@ static struct pci_driver ipmi_pci_driver = { }; #endif /* CONFIG_PCI */ +static struct of_device_id ipmi_match[]; static int __devinit ipmi_probe(struct platform_device *dev) { #ifdef CONFIG_OF + const struct of_device_id *match; struct smi_info *info; struct resource resource; const __be32 *regsize, *regspacing, *regshift; @@ -2566,7 +2568,8 @@ static int __devinit ipmi_probe(struct platform_device *dev) dev_info(&dev->dev, "probing via device tree\n"); - if (!dev->dev.of_match) + match = of_match_device(ipmi_match, &dev->dev); + if (!match) return -EINVAL; ret = of_address_to_resource(np, 0, &resource); @@ -2601,7 +2604,7 @@ static int __devinit ipmi_probe(struct platform_device *dev) return -ENOMEM; } - info->si_type = (enum si_type) dev->dev.of_match->data; + info->si_type = (enum si_type) match->data; info->addr_source = SI_DEVICETREE; info->irq_setup = std_irq_setup; diff --git a/trunk/drivers/char/mem.c b/trunk/drivers/char/mem.c index 436a99017998..8fc04b4f311f 100644 --- a/trunk/drivers/char/mem.c +++ b/trunk/drivers/char/mem.c @@ -806,29 +806,41 @@ static const struct file_operations oldmem_fops = { }; #endif -static ssize_t kmsg_write(struct file *file, const char __user *buf, - size_t count, loff_t *ppos) +static ssize_t kmsg_writev(struct kiocb *iocb, const struct iovec *iv, + unsigned long count, loff_t pos) { - char *tmp; - ssize_t ret; + char *line, *p; + int i; + ssize_t ret = -EFAULT; + size_t len = iov_length(iv, count); - tmp = kmalloc(count + 1, GFP_KERNEL); - if (tmp == NULL) + line = kmalloc(len + 1, GFP_KERNEL); + if (line == NULL) return -ENOMEM; - ret = -EFAULT; - if (!copy_from_user(tmp, buf, count)) { - tmp[count] = 0; - ret = printk("%s", tmp); - if (ret > count) - /* printk can add a prefix */ - ret = count; + + /* + * copy all vectors into a single string, to ensure we do + * not interleave our log line with other printk calls + */ + p = line; + for (i = 0; i < count; i++) { + if (copy_from_user(p, iv[i].iov_base, iv[i].iov_len)) + goto out; + p += iv[i].iov_len; } - kfree(tmp); + p[0] = '\0'; + + ret = printk("%s", line); + /* printk can add a prefix */ + if (ret > len) + ret = len; +out: + kfree(line); return ret; } static const struct file_operations kmsg_fops = { - .write = kmsg_write, + .aio_write = kmsg_writev, .llseek = noop_llseek, }; diff --git a/trunk/drivers/char/raw.c b/trunk/drivers/char/raw.c index b4b9d5a47885..b33e8ea314ed 100644 --- a/trunk/drivers/char/raw.c +++ b/trunk/drivers/char/raw.c @@ -21,6 +21,7 @@ #include #include #include +#include #include @@ -30,10 +31,15 @@ struct raw_device_data { }; static struct class *raw_class; -static struct raw_device_data raw_devices[MAX_RAW_MINORS]; +static struct raw_device_data *raw_devices; static DEFINE_MUTEX(raw_mutex); static const struct file_operations raw_ctl_fops; /* forward declaration */ +static int max_raw_minors = MAX_RAW_MINORS; + +module_param(max_raw_minors, int, 0); +MODULE_PARM_DESC(max_raw_minors, "Maximum number of raw devices (1-65536)"); + /* * Open/close code for raw IO. * @@ -125,7 +131,7 @@ static int bind_set(int number, u64 major, u64 minor) struct raw_device_data *rawdev; int err = 0; - if (number <= 0 || number >= MAX_RAW_MINORS) + if (number <= 0 || number >= max_raw_minors) return -EINVAL; if (MAJOR(dev) != major || MINOR(dev) != minor) @@ -312,14 +318,27 @@ static int __init raw_init(void) dev_t dev = MKDEV(RAW_MAJOR, 0); int ret; - ret = register_chrdev_region(dev, MAX_RAW_MINORS, "raw"); + if (max_raw_minors < 1 || max_raw_minors > 65536) { + printk(KERN_WARNING "raw: invalid max_raw_minors (must be" + " between 1 and 65536), using %d\n", MAX_RAW_MINORS); + max_raw_minors = MAX_RAW_MINORS; + } + + raw_devices = vmalloc(sizeof(struct raw_device_data) * max_raw_minors); + if (!raw_devices) { + printk(KERN_ERR "Not enough memory for raw device structures\n"); + ret = -ENOMEM; + goto error; + } + memset(raw_devices, 0, sizeof(struct raw_device_data) * max_raw_minors); + + ret = register_chrdev_region(dev, max_raw_minors, "raw"); if (ret) goto error; cdev_init(&raw_cdev, &raw_fops); - ret = cdev_add(&raw_cdev, dev, MAX_RAW_MINORS); + ret = cdev_add(&raw_cdev, dev, max_raw_minors); if (ret) { - kobject_put(&raw_cdev.kobj); goto error_region; } @@ -336,8 +355,9 @@ static int __init raw_init(void) return 0; error_region: - unregister_chrdev_region(dev, MAX_RAW_MINORS); + unregister_chrdev_region(dev, max_raw_minors); error: + vfree(raw_devices); return ret; } @@ -346,7 +366,7 @@ static void __exit raw_exit(void) device_destroy(raw_class, MKDEV(RAW_MAJOR, 0)); class_destroy(raw_class); cdev_del(&raw_cdev); - unregister_chrdev_region(MKDEV(RAW_MAJOR, 0), MAX_RAW_MINORS); + unregister_chrdev_region(MKDEV(RAW_MAJOR, 0), max_raw_minors); } module_init(raw_init); diff --git a/trunk/drivers/char/virtio_console.c b/trunk/drivers/char/virtio_console.c index 84b164d1eb2b..838568a7dbf5 100644 --- a/trunk/drivers/char/virtio_console.c +++ b/trunk/drivers/char/virtio_console.c @@ -1280,18 +1280,7 @@ static void unplug_port(struct port *port) spin_lock_irq(&pdrvdata_lock); list_del(&port->cons.list); spin_unlock_irq(&pdrvdata_lock); -#if 0 - /* - * hvc_remove() not called as removing one hvc port - * results in other hvc ports getting frozen. - * - * Once this is resolved in hvc, this functionality - * will be enabled. Till that is done, the -EPIPE - * return from get_chars() above will help - * hvc_console.c to clean up on ports we remove here. - */ hvc_remove(port->cons.hvc); -#endif } /* Remove unused data this port might have received. */ diff --git a/trunk/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/trunk/drivers/char/xilinx_hwicap/xilinx_hwicap.c index d6412c16385f..39ccdeada791 100644 --- a/trunk/drivers/char/xilinx_hwicap/xilinx_hwicap.c +++ b/trunk/drivers/char/xilinx_hwicap/xilinx_hwicap.c @@ -715,13 +715,13 @@ static int __devexit hwicap_remove(struct device *dev) } #ifdef CONFIG_OF -static int __devinit hwicap_of_probe(struct platform_device *op) +static int __devinit hwicap_of_probe(struct platform_device *op, + const struct hwicap_driver_config *config) { struct resource res; const unsigned int *id; const char *family; int rc; - const struct hwicap_driver_config *config = op->dev.of_match->data; const struct config_registers *regs; @@ -751,20 +751,24 @@ static int __devinit hwicap_of_probe(struct platform_device *op) regs); } #else -static inline int hwicap_of_probe(struct platform_device *op) +static inline int hwicap_of_probe(struct platform_device *op, + const struct hwicap_driver_config *config) { return -EINVAL; } #endif /* CONFIG_OF */ +static const struct of_device_id __devinitconst hwicap_of_match[]; static int __devinit hwicap_drv_probe(struct platform_device *pdev) { + const struct of_device_id *match; struct resource *res; const struct config_registers *regs; const char *family; - if (pdev->dev.of_match) - return hwicap_of_probe(pdev); + match = of_match_device(hwicap_of_match, &pdev->dev); + if (match) + return hwicap_of_probe(pdev, match->data); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) diff --git a/trunk/drivers/clk/clkdev.c b/trunk/drivers/clk/clkdev.c index 0fc0a79852de..6db161f64ae0 100644 --- a/trunk/drivers/clk/clkdev.c +++ b/trunk/drivers/clk/clkdev.c @@ -32,10 +32,9 @@ static DEFINE_MUTEX(clocks_mutex); * Then we take the most specific entry - with the following * order of precedence: dev+con > dev only > con only. */ -static struct clk *clk_find(const char *dev_id, const char *con_id) +static struct clk_lookup *clk_find(const char *dev_id, const char *con_id) { - struct clk_lookup *p; - struct clk *clk = NULL; + struct clk_lookup *p, *cl = NULL; int match, best = 0; list_for_each_entry(p, &clocks, node) { @@ -52,27 +51,27 @@ static struct clk *clk_find(const char *dev_id, const char *con_id) } if (match > best) { - clk = p->clk; + cl = p; if (match != 3) best = match; else break; } } - return clk; + return cl; } struct clk *clk_get_sys(const char *dev_id, const char *con_id) { - struct clk *clk; + struct clk_lookup *cl; mutex_lock(&clocks_mutex); - clk = clk_find(dev_id, con_id); - if (clk && !__clk_get(clk)) - clk = NULL; + cl = clk_find(dev_id, con_id); + if (cl && !__clk_get(cl->clk)) + cl = NULL; mutex_unlock(&clocks_mutex); - return clk ? clk : ERR_PTR(-ENOENT); + return cl ? cl->clk : ERR_PTR(-ENOENT); } EXPORT_SYMBOL(clk_get_sys); diff --git a/trunk/drivers/clocksource/Kconfig b/trunk/drivers/clocksource/Kconfig new file mode 100644 index 000000000000..110aeeb52f9a --- /dev/null +++ b/trunk/drivers/clocksource/Kconfig @@ -0,0 +1,2 @@ +config CLKSRC_I8253 + bool diff --git a/trunk/drivers/clocksource/Makefile b/trunk/drivers/clocksource/Makefile index be61ece6330b..cfb6383b543a 100644 --- a/trunk/drivers/clocksource/Makefile +++ b/trunk/drivers/clocksource/Makefile @@ -6,3 +6,4 @@ obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC) += cs5535-clockevt.o obj-$(CONFIG_SH_TIMER_CMT) += sh_cmt.o obj-$(CONFIG_SH_TIMER_MTU2) += sh_mtu2.o obj-$(CONFIG_SH_TIMER_TMU) += sh_tmu.o +obj-$(CONFIG_CLKSRC_I8253) += i8253.o diff --git a/trunk/drivers/clocksource/cyclone.c b/trunk/drivers/clocksource/cyclone.c index 64e528e8bfa6..72f811f73e9c 100644 --- a/trunk/drivers/clocksource/cyclone.c +++ b/trunk/drivers/clocksource/cyclone.c @@ -29,8 +29,6 @@ static struct clocksource clocksource_cyclone = { .rating = 250, .read = read_cyclone, .mask = CYCLONE_TIMER_MASK, - .mult = 10, - .shift = 0, .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; @@ -108,12 +106,8 @@ static int __init init_cyclone_clocksource(void) } cyclone_ptr = cyclone_timer; - /* sort out mult/shift values: */ - clocksource_cyclone.shift = 22; - clocksource_cyclone.mult = clocksource_hz2mult(CYCLONE_TIMER_FREQ, - clocksource_cyclone.shift); - - return clocksource_register(&clocksource_cyclone); + return clocksource_register_hz(&clocksource_cyclone, + CYCLONE_TIMER_FREQ); } arch_initcall(init_cyclone_clocksource); diff --git a/trunk/drivers/clocksource/i8253.c b/trunk/drivers/clocksource/i8253.c new file mode 100644 index 000000000000..225c1761b372 --- /dev/null +++ b/trunk/drivers/clocksource/i8253.c @@ -0,0 +1,88 @@ +/* + * i8253 PIT clocksource + */ +#include +#include +#include +#include +#include + +#include + +/* + * Since the PIT overflows every tick, its not very useful + * to just read by itself. So use jiffies to emulate a free + * running counter: + */ +static cycle_t i8253_read(struct clocksource *cs) +{ + static int old_count; + static u32 old_jifs; + unsigned long flags; + int count; + u32 jifs; + + raw_spin_lock_irqsave(&i8253_lock, flags); + /* + * Although our caller may have the read side of xtime_lock, + * this is now a seqlock, and we are cheating in this routine + * by having side effects on state that we cannot undo if + * there is a collision on the seqlock and our caller has to + * retry. (Namely, old_jifs and old_count.) So we must treat + * jiffies as volatile despite the lock. We read jiffies + * before latching the timer count to guarantee that although + * the jiffies value might be older than the count (that is, + * the counter may underflow between the last point where + * jiffies was incremented and the point where we latch the + * count), it cannot be newer. + */ + jifs = jiffies; + outb_pit(0x00, PIT_MODE); /* latch the count ASAP */ + count = inb_pit(PIT_CH0); /* read the latched count */ + count |= inb_pit(PIT_CH0) << 8; + + /* VIA686a test code... reset the latch if count > max + 1 */ + if (count > LATCH) { + outb_pit(0x34, PIT_MODE); + outb_pit(PIT_LATCH & 0xff, PIT_CH0); + outb_pit(PIT_LATCH >> 8, PIT_CH0); + count = PIT_LATCH - 1; + } + + /* + * It's possible for count to appear to go the wrong way for a + * couple of reasons: + * + * 1. The timer counter underflows, but we haven't handled the + * resulting interrupt and incremented jiffies yet. + * 2. Hardware problem with the timer, not giving us continuous time, + * the counter does small "jumps" upwards on some Pentium systems, + * (see c't 95/10 page 335 for Neptun bug.) + * + * Previous attempts to handle these cases intelligently were + * buggy, so we just do the simple thing now. + */ + if (count > old_count && jifs == old_jifs) + count = old_count; + + old_count = count; + old_jifs = jifs; + + raw_spin_unlock_irqrestore(&i8253_lock, flags); + + count = (PIT_LATCH - 1) - count; + + return (cycle_t)(jifs * PIT_LATCH) + count; +} + +static struct clocksource i8253_cs = { + .name = "pit", + .rating = 110, + .read = i8253_read, + .mask = CLOCKSOURCE_MASK(32), +}; + +int __init clocksource_i8253_init(void) +{ + return clocksource_register_hz(&i8253_cs, PIT_TICK_RATE); +} diff --git a/trunk/drivers/cpufreq/Kconfig b/trunk/drivers/cpufreq/Kconfig index ca8ee8093d6c..9fb84853d8e3 100644 --- a/trunk/drivers/cpufreq/Kconfig +++ b/trunk/drivers/cpufreq/Kconfig @@ -1,3 +1,5 @@ +menu "CPU Frequency scaling" + config CPU_FREQ bool "CPU Frequency scaling" help @@ -18,19 +20,6 @@ if CPU_FREQ config CPU_FREQ_TABLE tristate -config CPU_FREQ_DEBUG - bool "Enable CPUfreq debugging" - help - Say Y here to enable CPUfreq subsystem (including drivers) - debugging. You will need to activate it via the kernel - command line by passing - cpufreq.debug= - - To get , add - 1 to activate CPUfreq core debugging, - 2 to activate CPUfreq drivers debugging, and - 4 to activate CPUfreq governor debugging - config CPU_FREQ_STAT tristate "CPU frequency translation statistics" select CPU_FREQ_TABLE @@ -190,4 +179,10 @@ config CPU_FREQ_GOV_CONSERVATIVE If in doubt, say N. -endif # CPU_FREQ +menu "x86 CPU frequency scaling drivers" +depends on X86 +source "drivers/cpufreq/Kconfig.x86" +endmenu + +endif +endmenu diff --git a/trunk/arch/x86/kernel/cpu/cpufreq/Kconfig b/trunk/drivers/cpufreq/Kconfig.x86 similarity index 97% rename from trunk/arch/x86/kernel/cpu/cpufreq/Kconfig rename to trunk/drivers/cpufreq/Kconfig.x86 index 870e6cc6ad28..78ff7ee48951 100644 --- a/trunk/arch/x86/kernel/cpu/cpufreq/Kconfig +++ b/trunk/drivers/cpufreq/Kconfig.x86 @@ -1,15 +1,7 @@ # -# CPU Frequency scaling +# x86 CPU Frequency scaling drivers # -menu "CPU Frequency scaling" - -source "drivers/cpufreq/Kconfig" - -if CPU_FREQ - -comment "CPUFreq processor drivers" - config X86_PCC_CPUFREQ tristate "Processor Clocking Control interface driver" depends on ACPI && ACPI_PROCESSOR @@ -43,7 +35,7 @@ config X86_ACPI_CPUFREQ config ELAN_CPUFREQ tristate "AMD Elan SC400 and SC410" select CPU_FREQ_TABLE - depends on X86_ELAN + depends on MELAN ---help--- This adds the CPUFreq driver for AMD Elan SC400 and SC410 processors. @@ -59,7 +51,7 @@ config ELAN_CPUFREQ config SC520_CPUFREQ tristate "AMD Elan SC520" select CPU_FREQ_TABLE - depends on X86_ELAN + depends on MELAN ---help--- This adds the CPUFreq driver for AMD Elan SC520 processor. @@ -261,6 +253,3 @@ config X86_SPEEDSTEP_RELAXED_CAP_CHECK option lets the probing code bypass some of those checks if the parameter "relaxed_check=1" is passed to the module. -endif # CPU_FREQ - -endmenu diff --git a/trunk/drivers/cpufreq/Makefile b/trunk/drivers/cpufreq/Makefile index 71fc3b4173f1..c7f1a6f16b6e 100644 --- a/trunk/drivers/cpufreq/Makefile +++ b/trunk/drivers/cpufreq/Makefile @@ -13,3 +13,29 @@ obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o # CPUfreq cross-arch helpers obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o +##################################################################################d +# x86 drivers. +# Link order matters. K8 is preferred to ACPI because of firmware bugs in early +# K8 systems. ACPI is preferred to all other hardware-specific drivers. +# speedstep-* is preferred over p4-clockmod. + +obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o mperf.o +obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o mperf.o +obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o +obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o +obj-$(CONFIG_X86_POWERNOW_K7) += powernow-k7.o +obj-$(CONFIG_X86_LONGHAUL) += longhaul.o +obj-$(CONFIG_X86_E_POWERSAVER) += e_powersaver.o +obj-$(CONFIG_ELAN_CPUFREQ) += elanfreq.o +obj-$(CONFIG_SC520_CPUFREQ) += sc520_freq.o +obj-$(CONFIG_X86_LONGRUN) += longrun.o +obj-$(CONFIG_X86_GX_SUSPMOD) += gx-suspmod.o +obj-$(CONFIG_X86_SPEEDSTEP_ICH) += speedstep-ich.o +obj-$(CONFIG_X86_SPEEDSTEP_LIB) += speedstep-lib.o +obj-$(CONFIG_X86_SPEEDSTEP_SMI) += speedstep-smi.o +obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o +obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o +obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o + +##################################################################################d + diff --git a/trunk/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/trunk/drivers/cpufreq/acpi-cpufreq.c similarity index 94% rename from trunk/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c rename to trunk/drivers/cpufreq/acpi-cpufreq.c index a2baafb2fe6d..4e04e1274388 100644 --- a/trunk/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/trunk/drivers/cpufreq/acpi-cpufreq.c @@ -47,9 +47,6 @@ #include #include "mperf.h" -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ - "acpi-cpufreq", msg) - MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski"); MODULE_DESCRIPTION("ACPI Processor P-States Driver"); MODULE_LICENSE("GPL"); @@ -233,7 +230,7 @@ static u32 get_cur_val(const struct cpumask *mask) cmd.mask = mask; drv_read(&cmd); - dprintk("get_cur_val = %u\n", cmd.val); + pr_debug("get_cur_val = %u\n", cmd.val); return cmd.val; } @@ -244,7 +241,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu) unsigned int freq; unsigned int cached_freq; - dprintk("get_cur_freq_on_cpu (%d)\n", cpu); + pr_debug("get_cur_freq_on_cpu (%d)\n", cpu); if (unlikely(data == NULL || data->acpi_data == NULL || data->freq_table == NULL)) { @@ -261,7 +258,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu) data->resume = 1; } - dprintk("cur freq = %u\n", freq); + pr_debug("cur freq = %u\n", freq); return freq; } @@ -293,7 +290,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, unsigned int i; int result = 0; - dprintk("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu); + pr_debug("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu); if (unlikely(data == NULL || data->acpi_data == NULL || data->freq_table == NULL)) { @@ -313,11 +310,11 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, next_perf_state = data->freq_table[next_state].index; if (perf->state == next_perf_state) { if (unlikely(data->resume)) { - dprintk("Called after resume, resetting to P%d\n", + pr_debug("Called after resume, resetting to P%d\n", next_perf_state); data->resume = 0; } else { - dprintk("Already at target state (P%d)\n", + pr_debug("Already at target state (P%d)\n", next_perf_state); goto out; } @@ -357,7 +354,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, if (acpi_pstate_strict) { if (!check_freqs(cmd.mask, freqs.new, data)) { - dprintk("acpi_cpufreq_target failed (%d)\n", + pr_debug("acpi_cpufreq_target failed (%d)\n", policy->cpu); result = -EAGAIN; goto out; @@ -378,7 +375,7 @@ static int acpi_cpufreq_verify(struct cpufreq_policy *policy) { struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); - dprintk("acpi_cpufreq_verify\n"); + pr_debug("acpi_cpufreq_verify\n"); return cpufreq_frequency_table_verify(policy, data->freq_table); } @@ -433,11 +430,11 @@ static void free_acpi_perf_data(void) static int __init acpi_cpufreq_early_init(void) { unsigned int i; - dprintk("acpi_cpufreq_early_init\n"); + pr_debug("acpi_cpufreq_early_init\n"); acpi_perf_data = alloc_percpu(struct acpi_processor_performance); if (!acpi_perf_data) { - dprintk("Memory allocation error for acpi_perf_data.\n"); + pr_debug("Memory allocation error for acpi_perf_data.\n"); return -ENOMEM; } for_each_possible_cpu(i) { @@ -519,7 +516,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) static int blacklisted; #endif - dprintk("acpi_cpufreq_cpu_init\n"); + pr_debug("acpi_cpufreq_cpu_init\n"); #ifdef CONFIG_SMP if (blacklisted) @@ -566,7 +563,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) /* capability check */ if (perf->state_count <= 1) { - dprintk("No P-States\n"); + pr_debug("No P-States\n"); result = -ENODEV; goto err_unreg; } @@ -578,11 +575,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) switch (perf->control_register.space_id) { case ACPI_ADR_SPACE_SYSTEM_IO: - dprintk("SYSTEM IO addr space\n"); + pr_debug("SYSTEM IO addr space\n"); data->cpu_feature = SYSTEM_IO_CAPABLE; break; case ACPI_ADR_SPACE_FIXED_HARDWARE: - dprintk("HARDWARE addr space\n"); + pr_debug("HARDWARE addr space\n"); if (!check_est_cpu(cpu)) { result = -ENODEV; goto err_unreg; @@ -590,7 +587,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE; break; default: - dprintk("Unknown addr space %d\n", + pr_debug("Unknown addr space %d\n", (u32) (perf->control_register.space_id)); result = -ENODEV; goto err_unreg; @@ -661,9 +658,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) if (cpu_has(c, X86_FEATURE_APERFMPERF)) acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf; - dprintk("CPU%u - ACPI performance management activated.\n", cpu); + pr_debug("CPU%u - ACPI performance management activated.\n", cpu); for (i = 0; i < perf->state_count; i++) - dprintk(" %cP%d: %d MHz, %d mW, %d uS\n", + pr_debug(" %cP%d: %d MHz, %d mW, %d uS\n", (i == perf->state ? '*' : ' '), i, (u32) perf->states[i].core_frequency, (u32) perf->states[i].power, @@ -694,7 +691,7 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) { struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); - dprintk("acpi_cpufreq_cpu_exit\n"); + pr_debug("acpi_cpufreq_cpu_exit\n"); if (data) { cpufreq_frequency_table_put_attr(policy->cpu); @@ -712,7 +709,7 @@ static int acpi_cpufreq_resume(struct cpufreq_policy *policy) { struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); - dprintk("acpi_cpufreq_resume\n"); + pr_debug("acpi_cpufreq_resume\n"); data->resume = 1; @@ -743,7 +740,7 @@ static int __init acpi_cpufreq_init(void) if (acpi_disabled) return 0; - dprintk("acpi_cpufreq_init\n"); + pr_debug("acpi_cpufreq_init\n"); ret = acpi_cpufreq_early_init(); if (ret) @@ -758,7 +755,7 @@ static int __init acpi_cpufreq_init(void) static void __exit acpi_cpufreq_exit(void) { - dprintk("acpi_cpufreq_exit\n"); + pr_debug("acpi_cpufreq_exit\n"); cpufreq_unregister_driver(&acpi_cpufreq_driver); diff --git a/trunk/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c b/trunk/drivers/cpufreq/cpufreq-nforce2.c similarity index 97% rename from trunk/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c rename to trunk/drivers/cpufreq/cpufreq-nforce2.c index 141abebc4516..7bac808804f3 100644 --- a/trunk/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c +++ b/trunk/drivers/cpufreq/cpufreq-nforce2.c @@ -57,8 +57,6 @@ MODULE_PARM_DESC(min_fsb, "Minimum FSB to use, if not defined: current FSB - 50"); #define PFX "cpufreq-nforce2: " -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ - "cpufreq-nforce2", msg) /** * nforce2_calc_fsb - calculate FSB @@ -270,7 +268,7 @@ static int nforce2_target(struct cpufreq_policy *policy, if (freqs.old == freqs.new) return 0; - dprintk("Old CPU frequency %d kHz, new %d kHz\n", + pr_debug("Old CPU frequency %d kHz, new %d kHz\n", freqs.old, freqs.new); cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); @@ -282,7 +280,7 @@ static int nforce2_target(struct cpufreq_policy *policy, printk(KERN_ERR PFX "Changing FSB to %d failed\n", target_fsb); else - dprintk("Changed FSB successfully to %d\n", + pr_debug("Changed FSB successfully to %d\n", target_fsb); /* Enable IRQs */ diff --git a/trunk/drivers/cpufreq/cpufreq.c b/trunk/drivers/cpufreq/cpufreq.c index 2dafc5c38ae7..0a5bea9e3585 100644 --- a/trunk/drivers/cpufreq/cpufreq.c +++ b/trunk/drivers/cpufreq/cpufreq.c @@ -32,9 +32,6 @@ #include -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, \ - "cpufreq-core", msg) - /** * The "cpufreq driver" - the arch- or hardware-dependent low * level driver of CPUFreq support, and its spinlock. This lock @@ -180,93 +177,6 @@ void cpufreq_cpu_put(struct cpufreq_policy *data) EXPORT_SYMBOL_GPL(cpufreq_cpu_put); -/********************************************************************* - * UNIFIED DEBUG HELPERS * - *********************************************************************/ -#ifdef CONFIG_CPU_FREQ_DEBUG - -/* what part(s) of the CPUfreq subsystem are debugged? */ -static unsigned int debug; - -/* is the debug output ratelimit'ed using printk_ratelimit? User can - * set or modify this value. - */ -static unsigned int debug_ratelimit = 1; - -/* is the printk_ratelimit'ing enabled? It's enabled after a successful - * loading of a cpufreq driver, temporarily disabled when a new policy - * is set, and disabled upon cpufreq driver removal - */ -static unsigned int disable_ratelimit = 1; -static DEFINE_SPINLOCK(disable_ratelimit_lock); - -static void cpufreq_debug_enable_ratelimit(void) -{ - unsigned long flags; - - spin_lock_irqsave(&disable_ratelimit_lock, flags); - if (disable_ratelimit) - disable_ratelimit--; - spin_unlock_irqrestore(&disable_ratelimit_lock, flags); -} - -static void cpufreq_debug_disable_ratelimit(void) -{ - unsigned long flags; - - spin_lock_irqsave(&disable_ratelimit_lock, flags); - disable_ratelimit++; - spin_unlock_irqrestore(&disable_ratelimit_lock, flags); -} - -void cpufreq_debug_printk(unsigned int type, const char *prefix, - const char *fmt, ...) -{ - char s[256]; - va_list args; - unsigned int len; - unsigned long flags; - - WARN_ON(!prefix); - if (type & debug) { - spin_lock_irqsave(&disable_ratelimit_lock, flags); - if (!disable_ratelimit && debug_ratelimit - && !printk_ratelimit()) { - spin_unlock_irqrestore(&disable_ratelimit_lock, flags); - return; - } - spin_unlock_irqrestore(&disable_ratelimit_lock, flags); - - len = snprintf(s, 256, KERN_DEBUG "%s: ", prefix); - - va_start(args, fmt); - len += vsnprintf(&s[len], (256 - len), fmt, args); - va_end(args); - - printk(s); - - WARN_ON(len < 5); - } -} -EXPORT_SYMBOL(cpufreq_debug_printk); - - -module_param(debug, uint, 0644); -MODULE_PARM_DESC(debug, "CPUfreq debugging: add 1 to debug core," - " 2 to debug drivers, and 4 to debug governors."); - -module_param(debug_ratelimit, uint, 0644); -MODULE_PARM_DESC(debug_ratelimit, "CPUfreq debugging:" - " set to 0 to disable ratelimiting."); - -#else /* !CONFIG_CPU_FREQ_DEBUG */ - -static inline void cpufreq_debug_enable_ratelimit(void) { return; } -static inline void cpufreq_debug_disable_ratelimit(void) { return; } - -#endif /* CONFIG_CPU_FREQ_DEBUG */ - - /********************************************************************* * EXTERNALLY AFFECTING FREQUENCY CHANGES * *********************************************************************/ @@ -291,7 +201,7 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) if (!l_p_j_ref_freq) { l_p_j_ref = loops_per_jiffy; l_p_j_ref_freq = ci->old; - dprintk("saving %lu as reference value for loops_per_jiffy; " + pr_debug("saving %lu as reference value for loops_per_jiffy; " "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq); } if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) || @@ -299,7 +209,7 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) { loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, ci->new); - dprintk("scaling loops_per_jiffy to %lu " + pr_debug("scaling loops_per_jiffy to %lu " "for frequency %u kHz\n", loops_per_jiffy, ci->new); } } @@ -326,7 +236,7 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state) BUG_ON(irqs_disabled()); freqs->flags = cpufreq_driver->flags; - dprintk("notification %u of frequency transition to %u kHz\n", + pr_debug("notification %u of frequency transition to %u kHz\n", state, freqs->new); policy = per_cpu(cpufreq_cpu_data, freqs->cpu); @@ -340,7 +250,7 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state) if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { if ((policy) && (policy->cpu == freqs->cpu) && (policy->cur) && (policy->cur != freqs->old)) { - dprintk("Warning: CPU frequency is" + pr_debug("Warning: CPU frequency is" " %u, cpufreq assumed %u kHz.\n", freqs->old, policy->cur); freqs->old = policy->cur; @@ -353,7 +263,7 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state) case CPUFREQ_POSTCHANGE: adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); - dprintk("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new, + pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new, (unsigned long)freqs->cpu); trace_power_frequency(POWER_PSTATE, freqs->new, freqs->cpu); trace_cpu_frequency(freqs->new, freqs->cpu); @@ -411,21 +321,14 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy, t = __find_governor(str_governor); if (t == NULL) { - char *name = kasprintf(GFP_KERNEL, "cpufreq_%s", - str_governor); - - if (name) { - int ret; + int ret; - mutex_unlock(&cpufreq_governor_mutex); - ret = request_module("%s", name); - mutex_lock(&cpufreq_governor_mutex); + mutex_unlock(&cpufreq_governor_mutex); + ret = request_module("cpufreq_%s", str_governor); + mutex_lock(&cpufreq_governor_mutex); - if (ret == 0) - t = __find_governor(str_governor); - } - - kfree(name); + if (ret == 0) + t = __find_governor(str_governor); } if (t != NULL) { @@ -753,7 +656,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr, static void cpufreq_sysfs_release(struct kobject *kobj) { struct cpufreq_policy *policy = to_policy(kobj); - dprintk("last reference is dropped\n"); + pr_debug("last reference is dropped\n"); complete(&policy->kobj_unregister); } @@ -788,7 +691,7 @@ static int cpufreq_add_dev_policy(unsigned int cpu, gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu)); if (gov) { policy->governor = gov; - dprintk("Restoring governor %s for cpu %d\n", + pr_debug("Restoring governor %s for cpu %d\n", policy->governor->name, cpu); } #endif @@ -824,7 +727,7 @@ static int cpufreq_add_dev_policy(unsigned int cpu, per_cpu(cpufreq_cpu_data, cpu) = managed_policy; spin_unlock_irqrestore(&cpufreq_driver_lock, flags); - dprintk("CPU already managed, adding link\n"); + pr_debug("CPU already managed, adding link\n"); ret = sysfs_create_link(&sys_dev->kobj, &managed_policy->kobj, "cpufreq"); @@ -865,7 +768,7 @@ static int cpufreq_add_dev_symlink(unsigned int cpu, if (!cpu_online(j)) continue; - dprintk("CPU %u already managed, adding link\n", j); + pr_debug("CPU %u already managed, adding link\n", j); managed_policy = cpufreq_cpu_get(cpu); cpu_sys_dev = get_cpu_sysdev(j); ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj, @@ -941,7 +844,7 @@ static int cpufreq_add_dev_interface(unsigned int cpu, policy->user_policy.governor = policy->governor; if (ret) { - dprintk("setting policy failed\n"); + pr_debug("setting policy failed\n"); if (cpufreq_driver->exit) cpufreq_driver->exit(policy); } @@ -977,8 +880,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) if (cpu_is_offline(cpu)) return 0; - cpufreq_debug_disable_ratelimit(); - dprintk("adding CPU %u\n", cpu); + pr_debug("adding CPU %u\n", cpu); #ifdef CONFIG_SMP /* check whether a different CPU already registered this @@ -986,7 +888,6 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) policy = cpufreq_cpu_get(cpu); if (unlikely(policy)) { cpufreq_cpu_put(policy); - cpufreq_debug_enable_ratelimit(); return 0; } #endif @@ -1037,7 +938,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) */ ret = cpufreq_driver->init(policy); if (ret) { - dprintk("initialization failed\n"); + pr_debug("initialization failed\n"); goto err_unlock_policy; } policy->user_policy.min = policy->min; @@ -1063,8 +964,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) kobject_uevent(&policy->kobj, KOBJ_ADD); module_put(cpufreq_driver->owner); - dprintk("initialization complete\n"); - cpufreq_debug_enable_ratelimit(); + pr_debug("initialization complete\n"); return 0; @@ -1088,7 +988,6 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) nomem_out: module_put(cpufreq_driver->owner); module_out: - cpufreq_debug_enable_ratelimit(); return ret; } @@ -1112,15 +1011,13 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) unsigned int j; #endif - cpufreq_debug_disable_ratelimit(); - dprintk("unregistering CPU %u\n", cpu); + pr_debug("unregistering CPU %u\n", cpu); spin_lock_irqsave(&cpufreq_driver_lock, flags); data = per_cpu(cpufreq_cpu_data, cpu); if (!data) { spin_unlock_irqrestore(&cpufreq_driver_lock, flags); - cpufreq_debug_enable_ratelimit(); unlock_policy_rwsem_write(cpu); return -EINVAL; } @@ -1132,12 +1029,11 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) * only need to unlink, put and exit */ if (unlikely(cpu != data->cpu)) { - dprintk("removing link\n"); + pr_debug("removing link\n"); cpumask_clear_cpu(cpu, data->cpus); spin_unlock_irqrestore(&cpufreq_driver_lock, flags); kobj = &sys_dev->kobj; cpufreq_cpu_put(data); - cpufreq_debug_enable_ratelimit(); unlock_policy_rwsem_write(cpu); sysfs_remove_link(kobj, "cpufreq"); return 0; @@ -1170,7 +1066,7 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) for_each_cpu(j, data->cpus) { if (j == cpu) continue; - dprintk("removing link for cpu %u\n", j); + pr_debug("removing link for cpu %u\n", j); #ifdef CONFIG_HOTPLUG_CPU strncpy(per_cpu(cpufreq_cpu_governor, j), data->governor->name, CPUFREQ_NAME_LEN); @@ -1199,21 +1095,35 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) * not referenced anymore by anybody before we proceed with * unloading. */ - dprintk("waiting for dropping of refcount\n"); + pr_debug("waiting for dropping of refcount\n"); wait_for_completion(cmp); - dprintk("wait complete\n"); + pr_debug("wait complete\n"); lock_policy_rwsem_write(cpu); if (cpufreq_driver->exit) cpufreq_driver->exit(data); unlock_policy_rwsem_write(cpu); +#ifdef CONFIG_HOTPLUG_CPU + /* when the CPU which is the parent of the kobj is hotplugged + * offline, check for siblings, and create cpufreq sysfs interface + * and symlinks + */ + if (unlikely(cpumask_weight(data->cpus) > 1)) { + /* first sibling now owns the new sysfs dir */ + cpumask_clear_cpu(cpu, data->cpus); + cpufreq_add_dev(get_cpu_sysdev(cpumask_first(data->cpus))); + + /* finally remove our own symlink */ + lock_policy_rwsem_write(cpu); + __cpufreq_remove_dev(sys_dev); + } +#endif + free_cpumask_var(data->related_cpus); free_cpumask_var(data->cpus); kfree(data); - per_cpu(cpufreq_cpu_data, cpu) = NULL; - cpufreq_debug_enable_ratelimit(); return 0; } @@ -1239,7 +1149,7 @@ static void handle_update(struct work_struct *work) struct cpufreq_policy *policy = container_of(work, struct cpufreq_policy, update); unsigned int cpu = policy->cpu; - dprintk("handle_update for cpu %u called\n", cpu); + pr_debug("handle_update for cpu %u called\n", cpu); cpufreq_update_policy(cpu); } @@ -1257,7 +1167,7 @@ static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, { struct cpufreq_freqs freqs; - dprintk("Warning: CPU frequency out of sync: cpufreq and timing " + pr_debug("Warning: CPU frequency out of sync: cpufreq and timing " "core thinks of %u, is %u kHz.\n", old_freq, new_freq); freqs.cpu = cpu; @@ -1360,7 +1270,7 @@ static int cpufreq_bp_suspend(void) int cpu = smp_processor_id(); struct cpufreq_policy *cpu_policy; - dprintk("suspending cpu %u\n", cpu); + pr_debug("suspending cpu %u\n", cpu); /* If there's no policy for the boot CPU, we have nothing to do. */ cpu_policy = cpufreq_cpu_get(cpu); @@ -1398,7 +1308,7 @@ static void cpufreq_bp_resume(void) int cpu = smp_processor_id(); struct cpufreq_policy *cpu_policy; - dprintk("resuming cpu %u\n", cpu); + pr_debug("resuming cpu %u\n", cpu); /* If there's no policy for the boot CPU, we have nothing to do. */ cpu_policy = cpufreq_cpu_get(cpu); @@ -1510,7 +1420,7 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, { int retval = -EINVAL; - dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu, + pr_debug("target for CPU %u: %u kHz, relation %u\n", policy->cpu, target_freq, relation); if (cpu_online(policy->cpu) && cpufreq_driver->target) retval = cpufreq_driver->target(policy, target_freq, relation); @@ -1596,7 +1506,7 @@ static int __cpufreq_governor(struct cpufreq_policy *policy, if (!try_module_get(policy->governor->owner)) return -EINVAL; - dprintk("__cpufreq_governor for CPU %u, event %u\n", + pr_debug("__cpufreq_governor for CPU %u, event %u\n", policy->cpu, event); ret = policy->governor->governor(policy, event); @@ -1697,8 +1607,7 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, { int ret = 0; - cpufreq_debug_disable_ratelimit(); - dprintk("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu, + pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu, policy->min, policy->max); memcpy(&policy->cpuinfo, &data->cpuinfo, @@ -1735,19 +1644,19 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, data->min = policy->min; data->max = policy->max; - dprintk("new min and max freqs are %u - %u kHz\n", + pr_debug("new min and max freqs are %u - %u kHz\n", data->min, data->max); if (cpufreq_driver->setpolicy) { data->policy = policy->policy; - dprintk("setting range\n"); + pr_debug("setting range\n"); ret = cpufreq_driver->setpolicy(policy); } else { if (policy->governor != data->governor) { /* save old, working values */ struct cpufreq_governor *old_gov = data->governor; - dprintk("governor switch\n"); + pr_debug("governor switch\n"); /* end old governor */ if (data->governor) @@ -1757,7 +1666,7 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, data->governor = policy->governor; if (__cpufreq_governor(data, CPUFREQ_GOV_START)) { /* new governor failed, so re-start old one */ - dprintk("starting governor %s failed\n", + pr_debug("starting governor %s failed\n", data->governor->name); if (old_gov) { data->governor = old_gov; @@ -1769,12 +1678,11 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, } /* might be a policy change, too, so fall through */ } - dprintk("governor: change or update limits\n"); + pr_debug("governor: change or update limits\n"); __cpufreq_governor(data, CPUFREQ_GOV_LIMITS); } error_out: - cpufreq_debug_enable_ratelimit(); return ret; } @@ -1801,7 +1709,7 @@ int cpufreq_update_policy(unsigned int cpu) goto fail; } - dprintk("updating policy for CPU %u\n", cpu); + pr_debug("updating policy for CPU %u\n", cpu); memcpy(&policy, data, sizeof(struct cpufreq_policy)); policy.min = data->user_policy.min; policy.max = data->user_policy.max; @@ -1813,7 +1721,7 @@ int cpufreq_update_policy(unsigned int cpu) if (cpufreq_driver->get) { policy.cur = cpufreq_driver->get(cpu); if (!data->cur) { - dprintk("Driver did not initialize current freq"); + pr_debug("Driver did not initialize current freq"); data->cur = policy.cur; } else { if (data->cur != policy.cur) @@ -1889,7 +1797,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) ((!driver_data->setpolicy) && (!driver_data->target))) return -EINVAL; - dprintk("trying to register driver %s\n", driver_data->name); + pr_debug("trying to register driver %s\n", driver_data->name); if (driver_data->setpolicy) driver_data->flags |= CPUFREQ_CONST_LOOPS; @@ -1920,15 +1828,14 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) /* if all ->init() calls failed, unregister */ if (ret) { - dprintk("no CPU initialized for driver %s\n", + pr_debug("no CPU initialized for driver %s\n", driver_data->name); goto err_sysdev_unreg; } } register_hotcpu_notifier(&cpufreq_cpu_notifier); - dprintk("driver %s up and running\n", driver_data->name); - cpufreq_debug_enable_ratelimit(); + pr_debug("driver %s up and running\n", driver_data->name); return 0; err_sysdev_unreg: @@ -1955,14 +1862,10 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver) { unsigned long flags; - cpufreq_debug_disable_ratelimit(); - - if (!cpufreq_driver || (driver != cpufreq_driver)) { - cpufreq_debug_enable_ratelimit(); + if (!cpufreq_driver || (driver != cpufreq_driver)) return -EINVAL; - } - dprintk("unregistering driver %s\n", driver->name); + pr_debug("unregistering driver %s\n", driver->name); sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver); unregister_hotcpu_notifier(&cpufreq_cpu_notifier); diff --git a/trunk/drivers/cpufreq/cpufreq_performance.c b/trunk/drivers/cpufreq/cpufreq_performance.c index 7e2e515087f8..f13a8a9af6a1 100644 --- a/trunk/drivers/cpufreq/cpufreq_performance.c +++ b/trunk/drivers/cpufreq/cpufreq_performance.c @@ -15,9 +15,6 @@ #include #include -#define dprintk(msg...) \ - cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "performance", msg) - static int cpufreq_governor_performance(struct cpufreq_policy *policy, unsigned int event) @@ -25,7 +22,7 @@ static int cpufreq_governor_performance(struct cpufreq_policy *policy, switch (event) { case CPUFREQ_GOV_START: case CPUFREQ_GOV_LIMITS: - dprintk("setting to %u kHz because of event %u\n", + pr_debug("setting to %u kHz because of event %u\n", policy->max, event); __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H); diff --git a/trunk/drivers/cpufreq/cpufreq_powersave.c b/trunk/drivers/cpufreq/cpufreq_powersave.c index e6db5faf3eb1..4c2eb512f2bc 100644 --- a/trunk/drivers/cpufreq/cpufreq_powersave.c +++ b/trunk/drivers/cpufreq/cpufreq_powersave.c @@ -15,16 +15,13 @@ #include #include -#define dprintk(msg...) \ - cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "powersave", msg) - static int cpufreq_governor_powersave(struct cpufreq_policy *policy, unsigned int event) { switch (event) { case CPUFREQ_GOV_START: case CPUFREQ_GOV_LIMITS: - dprintk("setting to %u kHz because of event %u\n", + pr_debug("setting to %u kHz because of event %u\n", policy->min, event); __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L); diff --git a/trunk/drivers/cpufreq/cpufreq_stats.c b/trunk/drivers/cpufreq/cpufreq_stats.c index 00d73fc8e4e2..b60a4c263686 100644 --- a/trunk/drivers/cpufreq/cpufreq_stats.c +++ b/trunk/drivers/cpufreq/cpufreq_stats.c @@ -165,17 +165,27 @@ static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq) return -1; } +/* should be called late in the CPU removal sequence so that the stats + * memory is still available in case someone tries to use it. + */ static void cpufreq_stats_free_table(unsigned int cpu) { struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu); - struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); - if (policy && policy->cpu == cpu) - sysfs_remove_group(&policy->kobj, &stats_attr_group); if (stat) { kfree(stat->time_in_state); kfree(stat); } per_cpu(cpufreq_stats_table, cpu) = NULL; +} + +/* must be called early in the CPU removal sequence (before + * cpufreq_remove_dev) so that policy is still valid. + */ +static void cpufreq_stats_free_sysfs(unsigned int cpu) +{ + struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); + if (policy && policy->cpu == cpu) + sysfs_remove_group(&policy->kobj, &stats_attr_group); if (policy) cpufreq_cpu_put(policy); } @@ -316,6 +326,9 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb, case CPU_ONLINE_FROZEN: cpufreq_update_policy(cpu); break; + case CPU_DOWN_PREPARE: + cpufreq_stats_free_sysfs(cpu); + break; case CPU_DEAD: case CPU_DEAD_FROZEN: cpufreq_stats_free_table(cpu); @@ -324,9 +337,10 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb, return NOTIFY_OK; } -static struct notifier_block cpufreq_stat_cpu_notifier __refdata = -{ +/* priority=1 so this will get called before cpufreq_remove_dev */ +static struct notifier_block cpufreq_stat_cpu_notifier __refdata = { .notifier_call = cpufreq_stat_cpu_callback, + .priority = 1, }; static struct notifier_block notifier_policy_block = { diff --git a/trunk/drivers/cpufreq/cpufreq_userspace.c b/trunk/drivers/cpufreq/cpufreq_userspace.c index 66d2d1d6c80f..f231015904c0 100644 --- a/trunk/drivers/cpufreq/cpufreq_userspace.c +++ b/trunk/drivers/cpufreq/cpufreq_userspace.c @@ -37,9 +37,6 @@ static DEFINE_PER_CPU(unsigned int, cpu_is_managed); static DEFINE_MUTEX(userspace_mutex); static int cpus_using_userspace_governor; -#define dprintk(msg...) \ - cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "userspace", msg) - /* keep track of frequency transitions */ static int userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val, @@ -50,7 +47,7 @@ userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val, if (!per_cpu(cpu_is_managed, freq->cpu)) return 0; - dprintk("saving cpu_cur_freq of cpu %u to be %u kHz\n", + pr_debug("saving cpu_cur_freq of cpu %u to be %u kHz\n", freq->cpu, freq->new); per_cpu(cpu_cur_freq, freq->cpu) = freq->new; @@ -73,7 +70,7 @@ static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq) { int ret = -EINVAL; - dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq); + pr_debug("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq); mutex_lock(&userspace_mutex); if (!per_cpu(cpu_is_managed, policy->cpu)) @@ -134,7 +131,7 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy, per_cpu(cpu_max_freq, cpu) = policy->max; per_cpu(cpu_cur_freq, cpu) = policy->cur; per_cpu(cpu_set_freq, cpu) = policy->cur; - dprintk("managing cpu %u started " + pr_debug("managing cpu %u started " "(%u - %u kHz, currently %u kHz)\n", cpu, per_cpu(cpu_min_freq, cpu), @@ -156,12 +153,12 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy, per_cpu(cpu_min_freq, cpu) = 0; per_cpu(cpu_max_freq, cpu) = 0; per_cpu(cpu_set_freq, cpu) = 0; - dprintk("managing cpu %u stopped\n", cpu); + pr_debug("managing cpu %u stopped\n", cpu); mutex_unlock(&userspace_mutex); break; case CPUFREQ_GOV_LIMITS: mutex_lock(&userspace_mutex); - dprintk("limit event for cpu %u: %u - %u kHz, " + pr_debug("limit event for cpu %u: %u - %u kHz, " "currently %u kHz, last set to %u kHz\n", cpu, policy->min, policy->max, per_cpu(cpu_cur_freq, cpu), diff --git a/trunk/arch/x86/kernel/cpu/cpufreq/e_powersaver.c b/trunk/drivers/cpufreq/e_powersaver.c similarity index 100% rename from trunk/arch/x86/kernel/cpu/cpufreq/e_powersaver.c rename to trunk/drivers/cpufreq/e_powersaver.c diff --git a/trunk/arch/x86/kernel/cpu/cpufreq/elanfreq.c b/trunk/drivers/cpufreq/elanfreq.c similarity index 100% rename from trunk/arch/x86/kernel/cpu/cpufreq/elanfreq.c rename to trunk/drivers/cpufreq/elanfreq.c diff --git a/trunk/drivers/cpufreq/freq_table.c b/trunk/drivers/cpufreq/freq_table.c index 05432216e224..90431cb92804 100644 --- a/trunk/drivers/cpufreq/freq_table.c +++ b/trunk/drivers/cpufreq/freq_table.c @@ -14,9 +14,6 @@ #include #include -#define dprintk(msg...) \ - cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, "freq-table", msg) - /********************************************************************* * FREQUENCY TABLE HELPERS * *********************************************************************/ @@ -31,11 +28,11 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { unsigned int freq = table[i].frequency; if (freq == CPUFREQ_ENTRY_INVALID) { - dprintk("table entry %u is invalid, skipping\n", i); + pr_debug("table entry %u is invalid, skipping\n", i); continue; } - dprintk("table entry %u: %u kHz, %u index\n", + pr_debug("table entry %u: %u kHz, %u index\n", i, freq, table[i].index); if (freq < min_freq) min_freq = freq; @@ -61,7 +58,7 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, unsigned int i; unsigned int count = 0; - dprintk("request for verification of policy (%u - %u kHz) for cpu %u\n", + pr_debug("request for verification of policy (%u - %u kHz) for cpu %u\n", policy->min, policy->max, policy->cpu); if (!cpu_online(policy->cpu)) @@ -86,7 +83,7 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); - dprintk("verification lead to (%u - %u kHz) for cpu %u\n", + pr_debug("verification lead to (%u - %u kHz) for cpu %u\n", policy->min, policy->max, policy->cpu); return 0; @@ -110,7 +107,7 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy, }; unsigned int i; - dprintk("request for target %u kHz (relation: %u) for cpu %u\n", + pr_debug("request for target %u kHz (relation: %u) for cpu %u\n", target_freq, relation, policy->cpu); switch (relation) { @@ -167,7 +164,7 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy, } else *index = optimal.index; - dprintk("target is %u (%u kHz, %u)\n", *index, table[*index].frequency, + pr_debug("target is %u (%u kHz, %u)\n", *index, table[*index].frequency, table[*index].index); return 0; @@ -216,14 +213,14 @@ EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs); void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table, unsigned int cpu) { - dprintk("setting show_table for cpu %u to %p\n", cpu, table); + pr_debug("setting show_table for cpu %u to %p\n", cpu, table); per_cpu(cpufreq_show_table, cpu) = table; } EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_attr); void cpufreq_frequency_table_put_attr(unsigned int cpu) { - dprintk("clearing show_table for cpu %u\n", cpu); + pr_debug("clearing show_table for cpu %u\n", cpu); per_cpu(cpufreq_show_table, cpu) = NULL; } EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr); diff --git a/trunk/arch/x86/kernel/cpu/cpufreq/gx-suspmod.c b/trunk/drivers/cpufreq/gx-suspmod.c similarity index 95% rename from trunk/arch/x86/kernel/cpu/cpufreq/gx-suspmod.c rename to trunk/drivers/cpufreq/gx-suspmod.c index 32974cf84232..ffe1f2c92ed3 100644 --- a/trunk/arch/x86/kernel/cpu/cpufreq/gx-suspmod.c +++ b/trunk/drivers/cpufreq/gx-suspmod.c @@ -142,9 +142,6 @@ module_param(max_duration, int, 0444); #define POLICY_MIN_DIV 20 -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ - "gx-suspmod", msg) - /** * we can detect a core multipiler from dir0_lsb * from GX1 datasheet p.56, @@ -191,7 +188,7 @@ static __init struct pci_dev *gx_detect_chipset(void) /* check if CPU is a MediaGX or a Geode. */ if ((boot_cpu_data.x86_vendor != X86_VENDOR_NSC) && (boot_cpu_data.x86_vendor != X86_VENDOR_CYRIX)) { - dprintk("error: no MediaGX/Geode processor found!\n"); + pr_debug("error: no MediaGX/Geode processor found!\n"); return NULL; } @@ -201,7 +198,7 @@ static __init struct pci_dev *gx_detect_chipset(void) return gx_pci; } - dprintk("error: no supported chipset found!\n"); + pr_debug("error: no supported chipset found!\n"); return NULL; } @@ -305,14 +302,14 @@ static void gx_set_cpuspeed(unsigned int khz) break; default: local_irq_restore(flags); - dprintk("fatal: try to set unknown chipset.\n"); + pr_debug("fatal: try to set unknown chipset.\n"); return; } } else { suscfg = gx_params->pci_suscfg & ~(SUSMOD); gx_params->off_duration = 0; gx_params->on_duration = 0; - dprintk("suspend modulation disabled: cpu runs 100%% speed.\n"); + pr_debug("suspend modulation disabled: cpu runs 100%% speed.\n"); } gx_write_byte(PCI_MODOFF, gx_params->off_duration); @@ -327,9 +324,9 @@ static void gx_set_cpuspeed(unsigned int khz) cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); - dprintk("suspend modulation w/ duration of ON:%d us, OFF:%d us\n", + pr_debug("suspend modulation w/ duration of ON:%d us, OFF:%d us\n", gx_params->on_duration * 32, gx_params->off_duration * 32); - dprintk("suspend modulation w/ clock speed: %d kHz.\n", freqs.new); + pr_debug("suspend modulation w/ clock speed: %d kHz.\n", freqs.new); } /**************************************************************** @@ -428,8 +425,8 @@ static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy) stock_freq = maxfreq; curfreq = gx_get_cpuspeed(0); - dprintk("cpu max frequency is %d.\n", maxfreq); - dprintk("cpu current frequency is %dkHz.\n", curfreq); + pr_debug("cpu max frequency is %d.\n", maxfreq); + pr_debug("cpu current frequency is %dkHz.\n", curfreq); /* setup basic struct for cpufreq API */ policy->cpu = 0; @@ -475,7 +472,7 @@ static int __init cpufreq_gx_init(void) if (max_duration > 0xff) max_duration = 0xff; - dprintk("geode suspend modulation available.\n"); + pr_debug("geode suspend modulation available.\n"); params = kzalloc(sizeof(struct gxfreq_params), GFP_KERNEL); if (params == NULL) diff --git a/trunk/arch/x86/kernel/cpu/cpufreq/longhaul.c b/trunk/drivers/cpufreq/longhaul.c similarity index 98% rename from trunk/arch/x86/kernel/cpu/cpufreq/longhaul.c rename to trunk/drivers/cpufreq/longhaul.c index cf48cdd6907d..f47d26e2a135 100644 --- a/trunk/arch/x86/kernel/cpu/cpufreq/longhaul.c +++ b/trunk/drivers/cpufreq/longhaul.c @@ -77,9 +77,6 @@ static int scale_voltage; static int disable_acpi_c3; static int revid_errata; -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ - "longhaul", msg) - /* Clock ratios multiplied by 10 */ static int mults[32]; @@ -87,7 +84,6 @@ static int eblcr[32]; static int longhaul_version; static struct cpufreq_frequency_table *longhaul_table; -#ifdef CONFIG_CPU_FREQ_DEBUG static char speedbuffer[8]; static char *print_speed(int speed) @@ -106,7 +102,6 @@ static char *print_speed(int speed) return speedbuffer; } -#endif static unsigned int calc_speed(int mult) @@ -275,7 +270,7 @@ static void longhaul_setstate(unsigned int table_index) cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); - dprintk("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n", + pr_debug("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n", fsb, mult/10, mult%10, print_speed(speed/1000)); retry_loop: preempt_disable(); @@ -460,12 +455,12 @@ static int __cpuinit longhaul_get_ranges(void) break; } - dprintk("MinMult:%d.%dx MaxMult:%d.%dx\n", + pr_debug("MinMult:%d.%dx MaxMult:%d.%dx\n", minmult/10, minmult%10, maxmult/10, maxmult%10); highest_speed = calc_speed(maxmult); lowest_speed = calc_speed(minmult); - dprintk("FSB:%dMHz Lowest speed: %s Highest speed:%s\n", fsb, + pr_debug("FSB:%dMHz Lowest speed: %s Highest speed:%s\n", fsb, print_speed(lowest_speed/1000), print_speed(highest_speed/1000)); diff --git a/trunk/arch/x86/kernel/cpu/cpufreq/longhaul.h b/trunk/drivers/cpufreq/longhaul.h similarity index 100% rename from trunk/arch/x86/kernel/cpu/cpufreq/longhaul.h rename to trunk/drivers/cpufreq/longhaul.h diff --git a/trunk/arch/x86/kernel/cpu/cpufreq/longrun.c b/trunk/drivers/cpufreq/longrun.c similarity index 94% rename from trunk/arch/x86/kernel/cpu/cpufreq/longrun.c rename to trunk/drivers/cpufreq/longrun.c index d9f51367666b..34ea359b370e 100644 --- a/trunk/arch/x86/kernel/cpu/cpufreq/longrun.c +++ b/trunk/drivers/cpufreq/longrun.c @@ -15,9 +15,6 @@ #include #include -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ - "longrun", msg) - static struct cpufreq_driver longrun_driver; /** @@ -40,14 +37,14 @@ static void __cpuinit longrun_get_policy(struct cpufreq_policy *policy) u32 msr_lo, msr_hi; rdmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi); - dprintk("longrun flags are %x - %x\n", msr_lo, msr_hi); + pr_debug("longrun flags are %x - %x\n", msr_lo, msr_hi); if (msr_lo & 0x01) policy->policy = CPUFREQ_POLICY_PERFORMANCE; else policy->policy = CPUFREQ_POLICY_POWERSAVE; rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi); - dprintk("longrun ctrl is %x - %x\n", msr_lo, msr_hi); + pr_debug("longrun ctrl is %x - %x\n", msr_lo, msr_hi); msr_lo &= 0x0000007F; msr_hi &= 0x0000007F; @@ -150,7 +147,7 @@ static unsigned int longrun_get(unsigned int cpu) return 0; cpuid(0x80860007, &eax, &ebx, &ecx, &edx); - dprintk("cpuid eax is %u\n", eax); + pr_debug("cpuid eax is %u\n", eax); return eax * 1000; } @@ -196,7 +193,7 @@ static int __cpuinit longrun_determine_freqs(unsigned int *low_freq, rdmsr(MSR_TMTA_LRTI_VOLT_MHZ, msr_lo, msr_hi); *high_freq = msr_lo * 1000; /* to kHz */ - dprintk("longrun table interface told %u - %u kHz\n", + pr_debug("longrun table interface told %u - %u kHz\n", *low_freq, *high_freq); if (*low_freq > *high_freq) @@ -207,7 +204,7 @@ static int __cpuinit longrun_determine_freqs(unsigned int *low_freq, /* set the upper border to the value determined during TSC init */ *high_freq = (cpu_khz / 1000); *high_freq = *high_freq * 1000; - dprintk("high frequency is %u kHz\n", *high_freq); + pr_debug("high frequency is %u kHz\n", *high_freq); /* get current borders */ rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi); @@ -233,7 +230,7 @@ static int __cpuinit longrun_determine_freqs(unsigned int *low_freq, /* restore values */ wrmsr(MSR_TMTA_LONGRUN_CTRL, save_lo, save_hi); } - dprintk("percentage is %u %%, freq is %u MHz\n", ecx, eax); + pr_debug("percentage is %u %%, freq is %u MHz\n", ecx, eax); /* performance_pctg = (current_freq - low_freq)/(high_freq - low_freq) * eqals @@ -249,7 +246,7 @@ static int __cpuinit longrun_determine_freqs(unsigned int *low_freq, edx = ((eax - ebx) * 100) / (100 - ecx); *low_freq = edx * 1000; /* back to kHz */ - dprintk("low frequency is %u kHz\n", *low_freq); + pr_debug("low frequency is %u kHz\n", *low_freq); if (*low_freq > *high_freq) *low_freq = *high_freq; diff --git a/trunk/arch/x86/kernel/cpu/cpufreq/mperf.c b/trunk/drivers/cpufreq/mperf.c similarity index 100% rename from trunk/arch/x86/kernel/cpu/cpufreq/mperf.c rename to trunk/drivers/cpufreq/mperf.c diff --git a/trunk/arch/x86/kernel/cpu/cpufreq/mperf.h b/trunk/drivers/cpufreq/mperf.h similarity index 100% rename from trunk/arch/x86/kernel/cpu/cpufreq/mperf.h rename to trunk/drivers/cpufreq/mperf.h diff --git a/trunk/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/trunk/drivers/cpufreq/p4-clockmod.c similarity index 96% rename from trunk/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c rename to trunk/drivers/cpufreq/p4-clockmod.c index 52c93648e492..6be3e0760c26 100644 --- a/trunk/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c +++ b/trunk/drivers/cpufreq/p4-clockmod.c @@ -35,8 +35,6 @@ #include "speedstep-lib.h" #define PFX "p4-clockmod: " -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ - "p4-clockmod", msg) /* * Duty Cycle (3bits), note DC_DISABLE is not specified in @@ -66,7 +64,7 @@ static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate) rdmsr_on_cpu(cpu, MSR_IA32_THERM_STATUS, &l, &h); if (l & 0x01) - dprintk("CPU#%d currently thermal throttled\n", cpu); + pr_debug("CPU#%d currently thermal throttled\n", cpu); if (has_N44_O17_errata[cpu] && (newstate == DC_25PT || newstate == DC_DFLT)) @@ -74,10 +72,10 @@ static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate) rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h); if (newstate == DC_DISABLE) { - dprintk("CPU#%d disabling modulation\n", cpu); + pr_debug("CPU#%d disabling modulation\n", cpu); wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l & ~(1<<4), h); } else { - dprintk("CPU#%d setting duty cycle to %d%%\n", + pr_debug("CPU#%d setting duty cycle to %d%%\n", cpu, ((125 * newstate) / 10)); /* bits 63 - 5 : reserved * bit 4 : enable/disable @@ -217,7 +215,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) case 0x0f11: case 0x0f12: has_N44_O17_errata[policy->cpu] = 1; - dprintk("has errata -- disabling low frequencies\n"); + pr_debug("has errata -- disabling low frequencies\n"); } if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4D && diff --git a/trunk/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c b/trunk/drivers/cpufreq/pcc-cpufreq.c similarity index 91% rename from trunk/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c rename to trunk/drivers/cpufreq/pcc-cpufreq.c index 755a31e0f5b0..7b0603eb0129 100644 --- a/trunk/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c +++ b/trunk/drivers/cpufreq/pcc-cpufreq.c @@ -39,7 +39,7 @@ #include -#define PCC_VERSION "1.00.00" +#define PCC_VERSION "1.10.00" #define POLL_LOOPS 300 #define CMD_COMPLETE 0x1 @@ -48,9 +48,6 @@ #define BUF_SZ 4 -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ - "pcc-cpufreq", msg) - struct pcc_register_resource { u8 descriptor; u16 length; @@ -102,7 +99,7 @@ static struct acpi_generic_address doorbell; static u64 doorbell_preserve; static u64 doorbell_write; -static u8 OSC_UUID[16] = {0x63, 0x9B, 0x2C, 0x9F, 0x70, 0x91, 0x49, 0x1f, +static u8 OSC_UUID[16] = {0x9F, 0x2C, 0x9B, 0x63, 0x91, 0x70, 0x1f, 0x49, 0xBB, 0x4F, 0xA5, 0x98, 0x2F, 0xA1, 0xB5, 0x46}; struct pcc_cpu { @@ -152,7 +149,7 @@ static unsigned int pcc_get_freq(unsigned int cpu) spin_lock(&pcc_lock); - dprintk("get: get_freq for CPU %d\n", cpu); + pr_debug("get: get_freq for CPU %d\n", cpu); pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu); input_buffer = 0x1; @@ -170,7 +167,7 @@ static unsigned int pcc_get_freq(unsigned int cpu) status = ioread16(&pcch_hdr->status); if (status != CMD_COMPLETE) { - dprintk("get: FAILED: for CPU %d, status is %d\n", + pr_debug("get: FAILED: for CPU %d, status is %d\n", cpu, status); goto cmd_incomplete; } @@ -178,14 +175,14 @@ static unsigned int pcc_get_freq(unsigned int cpu) curr_freq = (((ioread32(&pcch_hdr->nominal) * (output_buffer & 0xff)) / 100) * 1000); - dprintk("get: SUCCESS: (virtual) output_offset for cpu %d is " - "0x%x, contains a value of: 0x%x. Speed is: %d MHz\n", + pr_debug("get: SUCCESS: (virtual) output_offset for cpu %d is " + "0x%p, contains a value of: 0x%x. Speed is: %d MHz\n", cpu, (pcch_virt_addr + pcc_cpu_data->output_offset), output_buffer, curr_freq); freq_limit = (output_buffer >> 8) & 0xff; if (freq_limit != 0xff) { - dprintk("get: frequency for cpu %d is being temporarily" + pr_debug("get: frequency for cpu %d is being temporarily" " capped at %d\n", cpu, curr_freq); } @@ -212,8 +209,8 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy, cpu = policy->cpu; pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu); - dprintk("target: CPU %d should go to target freq: %d " - "(virtual) input_offset is 0x%x\n", + pr_debug("target: CPU %d should go to target freq: %d " + "(virtual) input_offset is 0x%p\n", cpu, target_freq, (pcch_virt_addr + pcc_cpu_data->input_offset)); @@ -234,14 +231,14 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy, status = ioread16(&pcch_hdr->status); if (status != CMD_COMPLETE) { - dprintk("target: FAILED for cpu %d, with status: 0x%x\n", + pr_debug("target: FAILED for cpu %d, with status: 0x%x\n", cpu, status); goto cmd_incomplete; } iowrite16(0, &pcch_hdr->status); cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); - dprintk("target: was SUCCESSFUL for cpu %d\n", cpu); + pr_debug("target: was SUCCESSFUL for cpu %d\n", cpu); spin_unlock(&pcc_lock); return 0; @@ -293,7 +290,7 @@ static int pcc_get_offset(int cpu) memset_io((pcch_virt_addr + pcc_cpu_data->input_offset), 0, BUF_SZ); memset_io((pcch_virt_addr + pcc_cpu_data->output_offset), 0, BUF_SZ); - dprintk("pcc_get_offset: for CPU %d: pcc_cpu_data " + pr_debug("pcc_get_offset: for CPU %d: pcc_cpu_data " "input_offset: 0x%x, pcc_cpu_data output_offset: 0x%x\n", cpu, pcc_cpu_data->input_offset, pcc_cpu_data->output_offset); out_free: @@ -410,7 +407,7 @@ static int __init pcc_cpufreq_probe(void) if (ACPI_SUCCESS(status)) { ret = pcc_cpufreq_do_osc(&osc_handle); if (ret) - dprintk("probe: _OSC evaluation did not succeed\n"); + pr_debug("probe: _OSC evaluation did not succeed\n"); /* Firmware's use of _OSC is optional */ ret = 0; } @@ -433,7 +430,7 @@ static int __init pcc_cpufreq_probe(void) mem_resource = (struct pcc_memory_resource *)member->buffer.pointer; - dprintk("probe: mem_resource descriptor: 0x%x," + pr_debug("probe: mem_resource descriptor: 0x%x," " length: %d, space_id: %d, resource_usage: %d," " type_specific: %d, granularity: 0x%llx," " minimum: 0x%llx, maximum: 0x%llx," @@ -453,13 +450,13 @@ static int __init pcc_cpufreq_probe(void) pcch_virt_addr = ioremap_nocache(mem_resource->minimum, mem_resource->address_length); if (pcch_virt_addr == NULL) { - dprintk("probe: could not map shared mem region\n"); + pr_debug("probe: could not map shared mem region\n"); goto out_free; } pcch_hdr = pcch_virt_addr; - dprintk("probe: PCCH header (virtual) addr: 0x%p\n", pcch_hdr); - dprintk("probe: PCCH header is at physical address: 0x%llx," + pr_debug("probe: PCCH header (virtual) addr: 0x%p\n", pcch_hdr); + pr_debug("probe: PCCH header is at physical address: 0x%llx," " signature: 0x%x, length: %d bytes, major: %d, minor: %d," " supported features: 0x%x, command field: 0x%x," " status field: 0x%x, nominal latency: %d us\n", @@ -469,7 +466,7 @@ static int __init pcc_cpufreq_probe(void) ioread16(&pcch_hdr->command), ioread16(&pcch_hdr->status), ioread32(&pcch_hdr->latency)); - dprintk("probe: min time between commands: %d us," + pr_debug("probe: min time between commands: %d us," " max time between commands: %d us," " nominal CPU frequency: %d MHz," " minimum CPU frequency: %d MHz," @@ -494,7 +491,7 @@ static int __init pcc_cpufreq_probe(void) doorbell.access_width = 64; doorbell.address = reg_resource->address; - dprintk("probe: doorbell: space_id is %d, bit_width is %d, " + pr_debug("probe: doorbell: space_id is %d, bit_width is %d, " "bit_offset is %d, access_width is %d, address is 0x%llx\n", doorbell.space_id, doorbell.bit_width, doorbell.bit_offset, doorbell.access_width, reg_resource->address); @@ -515,7 +512,7 @@ static int __init pcc_cpufreq_probe(void) doorbell_write = member->integer.value; - dprintk("probe: doorbell_preserve: 0x%llx," + pr_debug("probe: doorbell_preserve: 0x%llx," " doorbell_write: 0x%llx\n", doorbell_preserve, doorbell_write); @@ -550,7 +547,7 @@ static int pcc_cpufreq_cpu_init(struct cpufreq_policy *policy) result = pcc_get_offset(cpu); if (result) { - dprintk("init: PCCP evaluation failed\n"); + pr_debug("init: PCCP evaluation failed\n"); goto out; } @@ -561,12 +558,12 @@ static int pcc_cpufreq_cpu_init(struct cpufreq_policy *policy) policy->cur = pcc_get_freq(cpu); if (!policy->cur) { - dprintk("init: Unable to get current CPU frequency\n"); + pr_debug("init: Unable to get current CPU frequency\n"); result = -EINVAL; goto out; } - dprintk("init: policy->max is %d, policy->min is %d\n", + pr_debug("init: policy->max is %d, policy->min is %d\n", policy->max, policy->min); out: return result; @@ -597,7 +594,7 @@ static int __init pcc_cpufreq_init(void) ret = pcc_cpufreq_probe(); if (ret) { - dprintk("pcc_cpufreq_init: PCCH evaluation failed\n"); + pr_debug("pcc_cpufreq_init: PCCH evaluation failed\n"); return ret; } diff --git a/trunk/arch/x86/kernel/cpu/cpufreq/powernow-k6.c b/trunk/drivers/cpufreq/powernow-k6.c similarity index 100% rename from trunk/arch/x86/kernel/cpu/cpufreq/powernow-k6.c rename to trunk/drivers/cpufreq/powernow-k6.c diff --git a/trunk/arch/x86/kernel/cpu/cpufreq/powernow-k7.c b/trunk/drivers/cpufreq/powernow-k7.c similarity index 95% rename from trunk/arch/x86/kernel/cpu/cpufreq/powernow-k7.c rename to trunk/drivers/cpufreq/powernow-k7.c index 4a45fd6e41ba..d71d9f372359 100644 --- a/trunk/arch/x86/kernel/cpu/cpufreq/powernow-k7.c +++ b/trunk/drivers/cpufreq/powernow-k7.c @@ -68,7 +68,6 @@ union powernow_acpi_control_t { }; #endif -#ifdef CONFIG_CPU_FREQ_DEBUG /* divide by 1000 to get VCore voltage in V. */ static const int mobile_vid_table[32] = { 2000, 1950, 1900, 1850, 1800, 1750, 1700, 1650, @@ -76,7 +75,6 @@ static const int mobile_vid_table[32] = { 1275, 1250, 1225, 1200, 1175, 1150, 1125, 1100, 1075, 1050, 1025, 1000, 975, 950, 925, 0, }; -#endif /* divide by 10 to get FID. */ static const int fid_codes[32] = { @@ -103,9 +101,6 @@ static unsigned int fsb; static unsigned int latency; static char have_a0; -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ - "powernow-k7", msg) - static int check_fsb(unsigned int fsbspeed) { int delta; @@ -209,7 +204,7 @@ static int get_ranges(unsigned char *pst) vid = *pst++; powernow_table[j].index |= (vid << 8); /* upper 8 bits */ - dprintk(" FID: 0x%x (%d.%dx [%dMHz]) " + pr_debug(" FID: 0x%x (%d.%dx [%dMHz]) " "VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10, fid_codes[fid] % 10, speed/1000, vid, mobile_vid_table[vid]/1000, @@ -367,7 +362,7 @@ static int powernow_acpi_init(void) unsigned int speed, speed_mhz; pc.val = (unsigned long) state->control; - dprintk("acpi: P%d: %d MHz %d mW %d uS control %08x SGTC %d\n", + pr_debug("acpi: P%d: %d MHz %d mW %d uS control %08x SGTC %d\n", i, (u32) state->core_frequency, (u32) state->power, @@ -401,7 +396,7 @@ static int powernow_acpi_init(void) invalidate_entry(i); } - dprintk(" FID: 0x%x (%d.%dx [%dMHz]) " + pr_debug(" FID: 0x%x (%d.%dx [%dMHz]) " "VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10, fid_codes[fid] % 10, speed_mhz, vid, mobile_vid_table[vid]/1000, @@ -409,7 +404,7 @@ static int powernow_acpi_init(void) if (state->core_frequency != speed_mhz) { state->core_frequency = speed_mhz; - dprintk(" Corrected ACPI frequency to %d\n", + pr_debug(" Corrected ACPI frequency to %d\n", speed_mhz); } @@ -453,8 +448,8 @@ static int powernow_acpi_init(void) static void print_pst_entry(struct pst_s *pst, unsigned int j) { - dprintk("PST:%d (@%p)\n", j, pst); - dprintk(" cpuid: 0x%x fsb: %d maxFID: 0x%x startvid: 0x%x\n", + pr_debug("PST:%d (@%p)\n", j, pst); + pr_debug(" cpuid: 0x%x fsb: %d maxFID: 0x%x startvid: 0x%x\n", pst->cpuid, pst->fsbspeed, pst->maxfid, pst->startvid); } @@ -474,20 +469,20 @@ static int powernow_decode_bios(int maxfid, int startvid) p = phys_to_virt(i); if (memcmp(p, "AMDK7PNOW!", 10) == 0) { - dprintk("Found PSB header at %p\n", p); + pr_debug("Found PSB header at %p\n", p); psb = (struct psb_s *) p; - dprintk("Table version: 0x%x\n", psb->tableversion); + pr_debug("Table version: 0x%x\n", psb->tableversion); if (psb->tableversion != 0x12) { printk(KERN_INFO PFX "Sorry, only v1.2 tables" " supported right now\n"); return -ENODEV; } - dprintk("Flags: 0x%x\n", psb->flags); + pr_debug("Flags: 0x%x\n", psb->flags); if ((psb->flags & 1) == 0) - dprintk("Mobile voltage regulator\n"); + pr_debug("Mobile voltage regulator\n"); else - dprintk("Desktop voltage regulator\n"); + pr_debug("Desktop voltage regulator\n"); latency = psb->settlingtime; if (latency < 100) { @@ -497,9 +492,9 @@ static int powernow_decode_bios(int maxfid, int startvid) "Correcting.\n", latency); latency = 100; } - dprintk("Settling Time: %d microseconds.\n", + pr_debug("Settling Time: %d microseconds.\n", psb->settlingtime); - dprintk("Has %d PST tables. (Only dumping ones " + pr_debug("Has %d PST tables. (Only dumping ones " "relevant to this CPU).\n", psb->numpst); @@ -650,7 +645,7 @@ static int __cpuinit powernow_cpu_init(struct cpufreq_policy *policy) printk(KERN_WARNING PFX "can not determine bus frequency\n"); return -EINVAL; } - dprintk("FSB: %3dMHz\n", fsb/1000); + pr_debug("FSB: %3dMHz\n", fsb/1000); if (dmi_check_system(powernow_dmi_table) || acpi_force) { printk(KERN_INFO PFX "PSB/PST known to be broken. " diff --git a/trunk/arch/x86/kernel/cpu/cpufreq/powernow-k7.h b/trunk/drivers/cpufreq/powernow-k7.h similarity index 100% rename from trunk/arch/x86/kernel/cpu/cpufreq/powernow-k7.h rename to trunk/drivers/cpufreq/powernow-k7.h diff --git a/trunk/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/trunk/drivers/cpufreq/powernow-k8.c similarity index 93% rename from trunk/arch/x86/kernel/cpu/cpufreq/powernow-k8.c rename to trunk/drivers/cpufreq/powernow-k8.c index 2368e38327b3..83479b6fb9a1 100644 --- a/trunk/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/trunk/drivers/cpufreq/powernow-k8.c @@ -139,7 +139,7 @@ static int query_current_values_with_pending_wait(struct powernow_k8_data *data) } do { if (i++ > 10000) { - dprintk("detected change pending stuck\n"); + pr_debug("detected change pending stuck\n"); return 1; } rdmsr(MSR_FIDVID_STATUS, lo, hi); @@ -176,7 +176,7 @@ static void fidvid_msr_init(void) fid = lo & MSR_S_LO_CURRENT_FID; lo = fid | (vid << MSR_C_LO_VID_SHIFT); hi = MSR_C_HI_STP_GNT_BENIGN; - dprintk("cpu%d, init lo 0x%x, hi 0x%x\n", smp_processor_id(), lo, hi); + pr_debug("cpu%d, init lo 0x%x, hi 0x%x\n", smp_processor_id(), lo, hi); wrmsr(MSR_FIDVID_CTL, lo, hi); } @@ -196,7 +196,7 @@ static int write_new_fid(struct powernow_k8_data *data, u32 fid) lo |= (data->currvid << MSR_C_LO_VID_SHIFT); lo |= MSR_C_LO_INIT_FID_VID; - dprintk("writing fid 0x%x, lo 0x%x, hi 0x%x\n", + pr_debug("writing fid 0x%x, lo 0x%x, hi 0x%x\n", fid, lo, data->plllock * PLL_LOCK_CONVERSION); do { @@ -244,7 +244,7 @@ static int write_new_vid(struct powernow_k8_data *data, u32 vid) lo |= (vid << MSR_C_LO_VID_SHIFT); lo |= MSR_C_LO_INIT_FID_VID; - dprintk("writing vid 0x%x, lo 0x%x, hi 0x%x\n", + pr_debug("writing vid 0x%x, lo 0x%x, hi 0x%x\n", vid, lo, STOP_GRANT_5NS); do { @@ -325,7 +325,7 @@ static int transition_fid_vid(struct powernow_k8_data *data, return 1; } - dprintk("transitioned (cpu%d): new fid 0x%x, vid 0x%x\n", + pr_debug("transitioned (cpu%d): new fid 0x%x, vid 0x%x\n", smp_processor_id(), data->currfid, data->currvid); return 0; @@ -339,7 +339,7 @@ static int core_voltage_pre_transition(struct powernow_k8_data *data, u32 savefid = data->currfid; u32 maxvid, lo, rvomult = 1; - dprintk("ph1 (cpu%d): start, currfid 0x%x, currvid 0x%x, " + pr_debug("ph1 (cpu%d): start, currfid 0x%x, currvid 0x%x, " "reqvid 0x%x, rvo 0x%x\n", smp_processor_id(), data->currfid, data->currvid, reqvid, data->rvo); @@ -349,12 +349,12 @@ static int core_voltage_pre_transition(struct powernow_k8_data *data, rvosteps *= rvomult; rdmsr(MSR_FIDVID_STATUS, lo, maxvid); maxvid = 0x1f & (maxvid >> 16); - dprintk("ph1 maxvid=0x%x\n", maxvid); + pr_debug("ph1 maxvid=0x%x\n", maxvid); if (reqvid < maxvid) /* lower numbers are higher voltages */ reqvid = maxvid; while (data->currvid > reqvid) { - dprintk("ph1: curr 0x%x, req vid 0x%x\n", + pr_debug("ph1: curr 0x%x, req vid 0x%x\n", data->currvid, reqvid); if (decrease_vid_code_by_step(data, reqvid, data->vidmvs)) return 1; @@ -365,7 +365,7 @@ static int core_voltage_pre_transition(struct powernow_k8_data *data, if (data->currvid == maxvid) { rvosteps = 0; } else { - dprintk("ph1: changing vid for rvo, req 0x%x\n", + pr_debug("ph1: changing vid for rvo, req 0x%x\n", data->currvid - 1); if (decrease_vid_code_by_step(data, data->currvid-1, 1)) return 1; @@ -382,7 +382,7 @@ static int core_voltage_pre_transition(struct powernow_k8_data *data, return 1; } - dprintk("ph1 complete, currfid 0x%x, currvid 0x%x\n", + pr_debug("ph1 complete, currfid 0x%x, currvid 0x%x\n", data->currfid, data->currvid); return 0; @@ -400,7 +400,7 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid) return 0; } - dprintk("ph2 (cpu%d): starting, currfid 0x%x, currvid 0x%x, " + pr_debug("ph2 (cpu%d): starting, currfid 0x%x, currvid 0x%x, " "reqfid 0x%x\n", smp_processor_id(), data->currfid, data->currvid, reqfid); @@ -457,7 +457,7 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid) return 1; } - dprintk("ph2 complete, currfid 0x%x, currvid 0x%x\n", + pr_debug("ph2 complete, currfid 0x%x, currvid 0x%x\n", data->currfid, data->currvid); return 0; @@ -470,7 +470,7 @@ static int core_voltage_post_transition(struct powernow_k8_data *data, u32 savefid = data->currfid; u32 savereqvid = reqvid; - dprintk("ph3 (cpu%d): starting, currfid 0x%x, currvid 0x%x\n", + pr_debug("ph3 (cpu%d): starting, currfid 0x%x, currvid 0x%x\n", smp_processor_id(), data->currfid, data->currvid); @@ -498,17 +498,17 @@ static int core_voltage_post_transition(struct powernow_k8_data *data, return 1; if (savereqvid != data->currvid) { - dprintk("ph3 failed, currvid 0x%x\n", data->currvid); + pr_debug("ph3 failed, currvid 0x%x\n", data->currvid); return 1; } if (savefid != data->currfid) { - dprintk("ph3 failed, currfid changed 0x%x\n", + pr_debug("ph3 failed, currfid changed 0x%x\n", data->currfid); return 1; } - dprintk("ph3 complete, currfid 0x%x, currvid 0x%x\n", + pr_debug("ph3 complete, currfid 0x%x, currvid 0x%x\n", data->currfid, data->currvid); return 0; @@ -707,7 +707,7 @@ static int fill_powernow_table(struct powernow_k8_data *data, return -EIO; } - dprintk("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid); + pr_debug("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid); data->powernow_table = powernow_table; if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu) print_basics(data); @@ -717,7 +717,7 @@ static int fill_powernow_table(struct powernow_k8_data *data, (pst[j].vid == data->currvid)) return 0; - dprintk("currfid/vid do not match PST, ignoring\n"); + pr_debug("currfid/vid do not match PST, ignoring\n"); return 0; } @@ -739,36 +739,36 @@ static int find_psb_table(struct powernow_k8_data *data) if (memcmp(psb, PSB_ID_STRING, PSB_ID_STRING_LEN) != 0) continue; - dprintk("found PSB header at 0x%p\n", psb); + pr_debug("found PSB header at 0x%p\n", psb); - dprintk("table vers: 0x%x\n", psb->tableversion); + pr_debug("table vers: 0x%x\n", psb->tableversion); if (psb->tableversion != PSB_VERSION_1_4) { printk(KERN_ERR FW_BUG PFX "PSB table is not v1.4\n"); return -ENODEV; } - dprintk("flags: 0x%x\n", psb->flags1); + pr_debug("flags: 0x%x\n", psb->flags1); if (psb->flags1) { printk(KERN_ERR FW_BUG PFX "unknown flags\n"); return -ENODEV; } data->vstable = psb->vstable; - dprintk("voltage stabilization time: %d(*20us)\n", + pr_debug("voltage stabilization time: %d(*20us)\n", data->vstable); - dprintk("flags2: 0x%x\n", psb->flags2); + pr_debug("flags2: 0x%x\n", psb->flags2); data->rvo = psb->flags2 & 3; data->irt = ((psb->flags2) >> 2) & 3; mvs = ((psb->flags2) >> 4) & 3; data->vidmvs = 1 << mvs; data->batps = ((psb->flags2) >> 6) & 3; - dprintk("ramp voltage offset: %d\n", data->rvo); - dprintk("isochronous relief time: %d\n", data->irt); - dprintk("maximum voltage step: %d - 0x%x\n", mvs, data->vidmvs); + pr_debug("ramp voltage offset: %d\n", data->rvo); + pr_debug("isochronous relief time: %d\n", data->irt); + pr_debug("maximum voltage step: %d - 0x%x\n", mvs, data->vidmvs); - dprintk("numpst: 0x%x\n", psb->num_tables); + pr_debug("numpst: 0x%x\n", psb->num_tables); cpst = psb->num_tables; if ((psb->cpuid == 0x00000fc0) || (psb->cpuid == 0x00000fe0)) { @@ -783,13 +783,13 @@ static int find_psb_table(struct powernow_k8_data *data) } data->plllock = psb->plllocktime; - dprintk("plllocktime: 0x%x (units 1us)\n", psb->plllocktime); - dprintk("maxfid: 0x%x\n", psb->maxfid); - dprintk("maxvid: 0x%x\n", psb->maxvid); + pr_debug("plllocktime: 0x%x (units 1us)\n", psb->plllocktime); + pr_debug("maxfid: 0x%x\n", psb->maxfid); + pr_debug("maxvid: 0x%x\n", psb->maxvid); maxvid = psb->maxvid; data->numps = psb->numps; - dprintk("numpstates: 0x%x\n", data->numps); + pr_debug("numpstates: 0x%x\n", data->numps); return fill_powernow_table(data, (struct pst_s *)(psb+1), maxvid); } @@ -834,13 +834,13 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) u64 control, status; if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) { - dprintk("register performance failed: bad ACPI data\n"); + pr_debug("register performance failed: bad ACPI data\n"); return -EIO; } /* verify the data contained in the ACPI structures */ if (data->acpi_data.state_count <= 1) { - dprintk("No ACPI P-States\n"); + pr_debug("No ACPI P-States\n"); goto err_out; } @@ -849,7 +849,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) if ((control != ACPI_ADR_SPACE_FIXED_HARDWARE) || (status != ACPI_ADR_SPACE_FIXED_HARDWARE)) { - dprintk("Invalid control/status registers (%x - %x)\n", + pr_debug("Invalid control/status registers (%llx - %llx)\n", control, status); goto err_out; } @@ -858,7 +858,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table) * (data->acpi_data.state_count + 1)), GFP_KERNEL); if (!powernow_table) { - dprintk("powernow_table memory alloc failure\n"); + pr_debug("powernow_table memory alloc failure\n"); goto err_out; } @@ -928,7 +928,7 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data, } rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi); if (!(hi & HW_PSTATE_VALID_MASK)) { - dprintk("invalid pstate %d, ignoring\n", index); + pr_debug("invalid pstate %d, ignoring\n", index); invalidate_entry(powernow_table, i); continue; } @@ -968,7 +968,7 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, vid = (control >> VID_SHIFT) & VID_MASK; } - dprintk(" %d : fid 0x%x, vid 0x%x\n", i, fid, vid); + pr_debug(" %d : fid 0x%x, vid 0x%x\n", i, fid, vid); index = fid | (vid<<8); powernow_table[i].index = index; @@ -978,7 +978,7 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, /* verify frequency is OK */ if ((freq > (MAX_FREQ * 1000)) || (freq < (MIN_FREQ * 1000))) { - dprintk("invalid freq %u kHz, ignoring\n", freq); + pr_debug("invalid freq %u kHz, ignoring\n", freq); invalidate_entry(powernow_table, i); continue; } @@ -986,7 +986,7 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, /* verify voltage is OK - * BIOSs are using "off" to indicate invalid */ if (vid == VID_OFF) { - dprintk("invalid vid %u, ignoring\n", vid); + pr_debug("invalid vid %u, ignoring\n", vid); invalidate_entry(powernow_table, i); continue; } @@ -1047,7 +1047,7 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, int res, i; struct cpufreq_freqs freqs; - dprintk("cpu %d transition to index %u\n", smp_processor_id(), index); + pr_debug("cpu %d transition to index %u\n", smp_processor_id(), index); /* fid/vid correctness check for k8 */ /* fid are the lower 8 bits of the index we stored into @@ -1057,18 +1057,18 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, fid = data->powernow_table[index].index & 0xFF; vid = (data->powernow_table[index].index & 0xFF00) >> 8; - dprintk("table matched fid 0x%x, giving vid 0x%x\n", fid, vid); + pr_debug("table matched fid 0x%x, giving vid 0x%x\n", fid, vid); if (query_current_values_with_pending_wait(data)) return 1; if ((data->currvid == vid) && (data->currfid == fid)) { - dprintk("target matches current values (fid 0x%x, vid 0x%x)\n", + pr_debug("target matches current values (fid 0x%x, vid 0x%x)\n", fid, vid); return 0; } - dprintk("cpu %d, changing to fid 0x%x, vid 0x%x\n", + pr_debug("cpu %d, changing to fid 0x%x, vid 0x%x\n", smp_processor_id(), fid, vid); freqs.old = find_khz_freq_from_fid(data->currfid); freqs.new = find_khz_freq_from_fid(fid); @@ -1096,7 +1096,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, int res, i; struct cpufreq_freqs freqs; - dprintk("cpu %d transition to index %u\n", smp_processor_id(), index); + pr_debug("cpu %d transition to index %u\n", smp_processor_id(), index); /* get MSR index for hardware pstate transition */ pstate = index & HW_PSTATE_MASK; @@ -1156,14 +1156,14 @@ static int powernowk8_target(struct cpufreq_policy *pol, goto err_out; } - dprintk("targ: cpu %d, %d kHz, min %d, max %d, relation %d\n", + pr_debug("targ: cpu %d, %d kHz, min %d, max %d, relation %d\n", pol->cpu, targfreq, pol->min, pol->max, relation); if (query_current_values_with_pending_wait(data)) goto err_out; if (cpu_family != CPU_HW_PSTATE) { - dprintk("targ: curr fid 0x%x, vid 0x%x\n", + pr_debug("targ: curr fid 0x%x, vid 0x%x\n", data->currfid, data->currvid); if ((checkvid != data->currvid) || @@ -1319,7 +1319,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) data->currpstate); else pol->cur = find_khz_freq_from_fid(data->currfid); - dprintk("policy current frequency %d kHz\n", pol->cur); + pr_debug("policy current frequency %d kHz\n", pol->cur); /* min/max the cpu is capable of */ if (cpufreq_frequency_table_cpuinfo(pol, data->powernow_table)) { @@ -1337,10 +1337,10 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu); if (cpu_family == CPU_HW_PSTATE) - dprintk("cpu_init done, current pstate 0x%x\n", + pr_debug("cpu_init done, current pstate 0x%x\n", data->currpstate); else - dprintk("cpu_init done, current fid 0x%x, vid 0x%x\n", + pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n", data->currfid, data->currvid); per_cpu(powernow_data, pol->cpu) = data; @@ -1586,7 +1586,7 @@ static int __cpuinit powernowk8_init(void) /* driver entry point for term */ static void __exit powernowk8_exit(void) { - dprintk("exit\n"); + pr_debug("exit\n"); if (boot_cpu_has(X86_FEATURE_CPB)) { msrs_free(msrs); diff --git a/trunk/arch/x86/kernel/cpu/cpufreq/powernow-k8.h b/trunk/drivers/cpufreq/powernow-k8.h similarity index 98% rename from trunk/arch/x86/kernel/cpu/cpufreq/powernow-k8.h rename to trunk/drivers/cpufreq/powernow-k8.h index df3529b1c02d..3744d26cdc2b 100644 --- a/trunk/arch/x86/kernel/cpu/cpufreq/powernow-k8.h +++ b/trunk/drivers/cpufreq/powernow-k8.h @@ -211,8 +211,6 @@ struct pst_s { u8 vid; }; -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "powernow-k8", msg) - static int core_voltage_pre_transition(struct powernow_k8_data *data, u32 reqvid, u32 regfid); static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvid); diff --git a/trunk/arch/x86/kernel/cpu/cpufreq/sc520_freq.c b/trunk/drivers/cpufreq/sc520_freq.c similarity index 95% rename from trunk/arch/x86/kernel/cpu/cpufreq/sc520_freq.c rename to trunk/drivers/cpufreq/sc520_freq.c index 435a996a613a..1e205e6b1727 100644 --- a/trunk/arch/x86/kernel/cpu/cpufreq/sc520_freq.c +++ b/trunk/drivers/cpufreq/sc520_freq.c @@ -29,8 +29,6 @@ static __u8 __iomem *cpuctl; -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ - "sc520_freq", msg) #define PFX "sc520_freq: " static struct cpufreq_frequency_table sc520_freq_table[] = { @@ -66,7 +64,7 @@ static void sc520_freq_set_cpu_state(unsigned int state) cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); - dprintk("attempting to set frequency to %i kHz\n", + pr_debug("attempting to set frequency to %i kHz\n", sc520_freq_table[state].frequency); local_irq_disable(); @@ -161,7 +159,7 @@ static int __init sc520_freq_init(void) /* Test if we have the right hardware */ if (c->x86_vendor != X86_VENDOR_AMD || c->x86 != 4 || c->x86_model != 9) { - dprintk("no Elan SC520 processor found!\n"); + pr_debug("no Elan SC520 processor found!\n"); return -ENODEV; } cpuctl = ioremap((unsigned long)(MMCR_BASE + OFFS_CPUCTL), 1); diff --git a/trunk/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/trunk/drivers/cpufreq/speedstep-centrino.c similarity index 96% rename from trunk/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c rename to trunk/drivers/cpufreq/speedstep-centrino.c index 9b1ff37de46a..6ea3455def21 100644 --- a/trunk/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c +++ b/trunk/drivers/cpufreq/speedstep-centrino.c @@ -29,9 +29,6 @@ #define PFX "speedstep-centrino: " #define MAINTAINER "cpufreq@vger.kernel.org" -#define dprintk(msg...) \ - cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-centrino", msg) - #define INTEL_MSR_RANGE (0xffff) struct cpu_id @@ -244,7 +241,7 @@ static int centrino_cpu_init_table(struct cpufreq_policy *policy) if (model->cpu_id == NULL) { /* No match at all */ - dprintk("no support for CPU model \"%s\": " + pr_debug("no support for CPU model \"%s\": " "send /proc/cpuinfo to " MAINTAINER "\n", cpu->x86_model_id); return -ENOENT; @@ -252,15 +249,15 @@ static int centrino_cpu_init_table(struct cpufreq_policy *policy) if (model->op_points == NULL) { /* Matched a non-match */ - dprintk("no table support for CPU model \"%s\"\n", + pr_debug("no table support for CPU model \"%s\"\n", cpu->x86_model_id); - dprintk("try using the acpi-cpufreq driver\n"); + pr_debug("try using the acpi-cpufreq driver\n"); return -ENOENT; } per_cpu(centrino_model, policy->cpu) = model; - dprintk("found \"%s\": max frequency: %dkHz\n", + pr_debug("found \"%s\": max frequency: %dkHz\n", model->model_name, model->max_freq); return 0; @@ -369,7 +366,7 @@ static int centrino_cpu_init(struct cpufreq_policy *policy) per_cpu(centrino_cpu, policy->cpu) = &cpu_ids[i]; if (!per_cpu(centrino_cpu, policy->cpu)) { - dprintk("found unsupported CPU with " + pr_debug("found unsupported CPU with " "Enhanced SpeedStep: send /proc/cpuinfo to " MAINTAINER "\n"); return -ENODEV; @@ -385,7 +382,7 @@ static int centrino_cpu_init(struct cpufreq_policy *policy) if (!(l & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) { l |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP; - dprintk("trying to enable Enhanced SpeedStep (%x)\n", l); + pr_debug("trying to enable Enhanced SpeedStep (%x)\n", l); wrmsr(MSR_IA32_MISC_ENABLE, l, h); /* check to see if it stuck */ @@ -402,7 +399,7 @@ static int centrino_cpu_init(struct cpufreq_policy *policy) /* 10uS transition latency */ policy->cur = freq; - dprintk("centrino_cpu_init: cur=%dkHz\n", policy->cur); + pr_debug("centrino_cpu_init: cur=%dkHz\n", policy->cur); ret = cpufreq_frequency_table_cpuinfo(policy, per_cpu(centrino_model, policy->cpu)->op_points); @@ -498,7 +495,7 @@ static int centrino_target (struct cpufreq_policy *policy, good_cpu = j; if (good_cpu >= nr_cpu_ids) { - dprintk("couldn't limit to CPUs in this domain\n"); + pr_debug("couldn't limit to CPUs in this domain\n"); retval = -EAGAIN; if (first_cpu) { /* We haven't started the transition yet. */ @@ -512,7 +509,7 @@ static int centrino_target (struct cpufreq_policy *policy, if (first_cpu) { rdmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, &oldmsr, &h); if (msr == (oldmsr & 0xffff)) { - dprintk("no change needed - msr was and needs " + pr_debug("no change needed - msr was and needs " "to be %x\n", oldmsr); retval = 0; goto out; @@ -521,7 +518,7 @@ static int centrino_target (struct cpufreq_policy *policy, freqs.old = extract_clock(oldmsr, cpu, 0); freqs.new = extract_clock(msr, cpu, 0); - dprintk("target=%dkHz old=%d new=%d msr=%04x\n", + pr_debug("target=%dkHz old=%d new=%d msr=%04x\n", target_freq, freqs.old, freqs.new, msr); for_each_cpu(k, policy->cpus) { diff --git a/trunk/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/trunk/drivers/cpufreq/speedstep-ich.c similarity index 92% rename from trunk/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c rename to trunk/drivers/cpufreq/speedstep-ich.c index 561758e95180..a748ce782fee 100644 --- a/trunk/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c +++ b/trunk/drivers/cpufreq/speedstep-ich.c @@ -53,10 +53,6 @@ static struct cpufreq_frequency_table speedstep_freqs[] = { }; -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ - "speedstep-ich", msg) - - /** * speedstep_find_register - read the PMBASE address * @@ -80,7 +76,7 @@ static int speedstep_find_register(void) return -ENODEV; } - dprintk("pmbase is 0x%x\n", pmbase); + pr_debug("pmbase is 0x%x\n", pmbase); return 0; } @@ -106,13 +102,13 @@ static void speedstep_set_state(unsigned int state) /* read state */ value = inb(pmbase + 0x50); - dprintk("read at pmbase 0x%x + 0x50 returned 0x%x\n", pmbase, value); + pr_debug("read at pmbase 0x%x + 0x50 returned 0x%x\n", pmbase, value); /* write new state */ value &= 0xFE; value |= state; - dprintk("writing 0x%x to pmbase 0x%x + 0x50\n", value, pmbase); + pr_debug("writing 0x%x to pmbase 0x%x + 0x50\n", value, pmbase); /* Disable bus master arbitration */ pm2_blk = inb(pmbase + 0x20); @@ -132,10 +128,10 @@ static void speedstep_set_state(unsigned int state) /* Enable IRQs */ local_irq_restore(flags); - dprintk("read at pmbase 0x%x + 0x50 returned 0x%x\n", pmbase, value); + pr_debug("read at pmbase 0x%x + 0x50 returned 0x%x\n", pmbase, value); if (state == (value & 0x1)) - dprintk("change to %u MHz succeeded\n", + pr_debug("change to %u MHz succeeded\n", speedstep_get_frequency(speedstep_processor) / 1000); else printk(KERN_ERR "cpufreq: change failed - I/O error\n"); @@ -165,7 +161,7 @@ static int speedstep_activate(void) pci_read_config_word(speedstep_chipset_dev, 0x00A0, &value); if (!(value & 0x08)) { value |= 0x08; - dprintk("activating SpeedStep (TM) registers\n"); + pr_debug("activating SpeedStep (TM) registers\n"); pci_write_config_word(speedstep_chipset_dev, 0x00A0, value); } @@ -218,7 +214,7 @@ static unsigned int speedstep_detect_chipset(void) return 2; /* 2-M */ if (hostbridge->revision < 5) { - dprintk("hostbridge does not support speedstep\n"); + pr_debug("hostbridge does not support speedstep\n"); speedstep_chipset_dev = NULL; pci_dev_put(hostbridge); return 0; @@ -246,7 +242,7 @@ static unsigned int speedstep_get(unsigned int cpu) if (smp_call_function_single(cpu, get_freq_data, &speed, 1) != 0) BUG(); - dprintk("detected %u kHz as current frequency\n", speed); + pr_debug("detected %u kHz as current frequency\n", speed); return speed; } @@ -276,7 +272,7 @@ static int speedstep_target(struct cpufreq_policy *policy, freqs.new = speedstep_freqs[newstate].frequency; freqs.cpu = policy->cpu; - dprintk("transiting from %u to %u kHz\n", freqs.old, freqs.new); + pr_debug("transiting from %u to %u kHz\n", freqs.old, freqs.new); /* no transition necessary */ if (freqs.old == freqs.new) @@ -351,7 +347,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) if (!speed) return -EIO; - dprintk("currently at %s speed setting - %i MHz\n", + pr_debug("currently at %s speed setting - %i MHz\n", (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency) ? "low" : "high", (speed / 1000)); @@ -405,14 +401,14 @@ static int __init speedstep_init(void) /* detect processor */ speedstep_processor = speedstep_detect_processor(); if (!speedstep_processor) { - dprintk("Intel(R) SpeedStep(TM) capable processor " + pr_debug("Intel(R) SpeedStep(TM) capable processor " "not found\n"); return -ENODEV; } /* detect chipset */ if (!speedstep_detect_chipset()) { - dprintk("Intel(R) SpeedStep(TM) for this chipset not " + pr_debug("Intel(R) SpeedStep(TM) for this chipset not " "(yet) available.\n"); return -ENODEV; } diff --git a/trunk/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c b/trunk/drivers/cpufreq/speedstep-lib.c similarity index 90% rename from trunk/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c rename to trunk/drivers/cpufreq/speedstep-lib.c index a94ec6be69fa..8af2d2fd9d51 100644 --- a/trunk/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c +++ b/trunk/drivers/cpufreq/speedstep-lib.c @@ -18,9 +18,6 @@ #include #include "speedstep-lib.h" -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ - "speedstep-lib", msg) - #define PFX "speedstep-lib: " #ifdef CONFIG_X86_SPEEDSTEP_RELAXED_CAP_CHECK @@ -75,7 +72,7 @@ static unsigned int pentium3_get_frequency(enum speedstep_processor processor) /* read MSR 0x2a - we only need the low 32 bits */ rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp); - dprintk("P3 - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp); + pr_debug("P3 - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp); msr_tmp = msr_lo; /* decode the FSB */ @@ -89,7 +86,7 @@ static unsigned int pentium3_get_frequency(enum speedstep_processor processor) /* decode the multiplier */ if (processor == SPEEDSTEP_CPU_PIII_C_EARLY) { - dprintk("workaround for early PIIIs\n"); + pr_debug("workaround for early PIIIs\n"); msr_lo &= 0x03c00000; } else msr_lo &= 0x0bc00000; @@ -100,7 +97,7 @@ static unsigned int pentium3_get_frequency(enum speedstep_processor processor) j++; } - dprintk("speed is %u\n", + pr_debug("speed is %u\n", (msr_decode_mult[j].ratio * msr_decode_fsb[i].value * 100)); return msr_decode_mult[j].ratio * msr_decode_fsb[i].value * 100; @@ -112,7 +109,7 @@ static unsigned int pentiumM_get_frequency(void) u32 msr_lo, msr_tmp; rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp); - dprintk("PM - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp); + pr_debug("PM - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp); /* see table B-2 of 24547212.pdf */ if (msr_lo & 0x00040000) { @@ -122,7 +119,7 @@ static unsigned int pentiumM_get_frequency(void) } msr_tmp = (msr_lo >> 22) & 0x1f; - dprintk("bits 22-26 are 0x%x, speed is %u\n", + pr_debug("bits 22-26 are 0x%x, speed is %u\n", msr_tmp, (msr_tmp * 100 * 1000)); return msr_tmp * 100 * 1000; @@ -160,11 +157,11 @@ static unsigned int pentium_core_get_frequency(void) } rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp); - dprintk("PCORE - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", + pr_debug("PCORE - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp); msr_tmp = (msr_lo >> 22) & 0x1f; - dprintk("bits 22-26 are 0x%x, speed is %u\n", + pr_debug("bits 22-26 are 0x%x, speed is %u\n", msr_tmp, (msr_tmp * fsb)); ret = (msr_tmp * fsb); @@ -190,7 +187,7 @@ static unsigned int pentium4_get_frequency(void) rdmsr(0x2c, msr_lo, msr_hi); - dprintk("P4 - MSR_EBC_FREQUENCY_ID: 0x%x 0x%x\n", msr_lo, msr_hi); + pr_debug("P4 - MSR_EBC_FREQUENCY_ID: 0x%x 0x%x\n", msr_lo, msr_hi); /* decode the FSB: see IA-32 Intel (C) Architecture Software * Developer's Manual, Volume 3: System Prgramming Guide, @@ -217,7 +214,7 @@ static unsigned int pentium4_get_frequency(void) /* Multiplier. */ mult = msr_lo >> 24; - dprintk("P4 - FSB %u kHz; Multiplier %u; Speed %u kHz\n", + pr_debug("P4 - FSB %u kHz; Multiplier %u; Speed %u kHz\n", fsb, mult, (fsb * mult)); ret = (fsb * mult); @@ -257,7 +254,7 @@ unsigned int speedstep_detect_processor(void) struct cpuinfo_x86 *c = &cpu_data(0); u32 ebx, msr_lo, msr_hi; - dprintk("x86: %x, model: %x\n", c->x86, c->x86_model); + pr_debug("x86: %x, model: %x\n", c->x86, c->x86_model); if ((c->x86_vendor != X86_VENDOR_INTEL) || ((c->x86 != 6) && (c->x86 != 0xF))) @@ -272,7 +269,7 @@ unsigned int speedstep_detect_processor(void) ebx = cpuid_ebx(0x00000001); ebx &= 0x000000FF; - dprintk("ebx value is %x, x86_mask is %x\n", ebx, c->x86_mask); + pr_debug("ebx value is %x, x86_mask is %x\n", ebx, c->x86_mask); switch (c->x86_mask) { case 4: @@ -327,7 +324,7 @@ unsigned int speedstep_detect_processor(void) /* cpuid_ebx(1) is 0x04 for desktop PIII, * 0x06 for mobile PIII-M */ ebx = cpuid_ebx(0x00000001); - dprintk("ebx is %x\n", ebx); + pr_debug("ebx is %x\n", ebx); ebx &= 0x000000FF; @@ -344,7 +341,7 @@ unsigned int speedstep_detect_processor(void) /* all mobile PIII Coppermines have FSB 100 MHz * ==> sort out a few desktop PIIIs. */ rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_hi); - dprintk("Coppermine: MSR_IA32_EBL_CR_POWERON is 0x%x, 0x%x\n", + pr_debug("Coppermine: MSR_IA32_EBL_CR_POWERON is 0x%x, 0x%x\n", msr_lo, msr_hi); msr_lo &= 0x00c0000; if (msr_lo != 0x0080000) @@ -357,12 +354,12 @@ unsigned int speedstep_detect_processor(void) * bit 56 or 57 is set */ rdmsr(MSR_IA32_PLATFORM_ID, msr_lo, msr_hi); - dprintk("Coppermine: MSR_IA32_PLATFORM ID is 0x%x, 0x%x\n", + pr_debug("Coppermine: MSR_IA32_PLATFORM ID is 0x%x, 0x%x\n", msr_lo, msr_hi); if ((msr_hi & (1<<18)) && (relaxed_check ? 1 : (msr_hi & (3<<24)))) { if (c->x86_mask == 0x01) { - dprintk("early PIII version\n"); + pr_debug("early PIII version\n"); return SPEEDSTEP_CPU_PIII_C_EARLY; } else return SPEEDSTEP_CPU_PIII_C; @@ -393,14 +390,14 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor, if ((!processor) || (!low_speed) || (!high_speed) || (!set_state)) return -EINVAL; - dprintk("trying to determine both speeds\n"); + pr_debug("trying to determine both speeds\n"); /* get current speed */ prev_speed = speedstep_get_frequency(processor); if (!prev_speed) return -EIO; - dprintk("previous speed is %u\n", prev_speed); + pr_debug("previous speed is %u\n", prev_speed); local_irq_save(flags); @@ -412,7 +409,7 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor, goto out; } - dprintk("low speed is %u\n", *low_speed); + pr_debug("low speed is %u\n", *low_speed); /* start latency measurement */ if (transition_latency) @@ -431,7 +428,7 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor, goto out; } - dprintk("high speed is %u\n", *high_speed); + pr_debug("high speed is %u\n", *high_speed); if (*low_speed == *high_speed) { ret = -ENODEV; @@ -445,7 +442,7 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor, if (transition_latency) { *transition_latency = (tv2.tv_sec - tv1.tv_sec) * USEC_PER_SEC + tv2.tv_usec - tv1.tv_usec; - dprintk("transition latency is %u uSec\n", *transition_latency); + pr_debug("transition latency is %u uSec\n", *transition_latency); /* convert uSec to nSec and add 20% for safety reasons */ *transition_latency *= 1200; diff --git a/trunk/arch/x86/kernel/cpu/cpufreq/speedstep-lib.h b/trunk/drivers/cpufreq/speedstep-lib.h similarity index 100% rename from trunk/arch/x86/kernel/cpu/cpufreq/speedstep-lib.h rename to trunk/drivers/cpufreq/speedstep-lib.h diff --git a/trunk/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c b/trunk/drivers/cpufreq/speedstep-smi.c similarity index 90% rename from trunk/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c rename to trunk/drivers/cpufreq/speedstep-smi.c index 91bc25b67bc1..c76ead3490bf 100644 --- a/trunk/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c +++ b/trunk/drivers/cpufreq/speedstep-smi.c @@ -55,9 +55,6 @@ static struct cpufreq_frequency_table speedstep_freqs[] = { * of DMA activity going on? */ #define SMI_TRIES 5 -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ - "speedstep-smi", msg) - /** * speedstep_smi_ownership */ @@ -70,7 +67,7 @@ static int speedstep_smi_ownership(void) command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); magic = virt_to_phys(magic_data); - dprintk("trying to obtain ownership with command %x at port %x\n", + pr_debug("trying to obtain ownership with command %x at port %x\n", command, smi_port); __asm__ __volatile__( @@ -85,7 +82,7 @@ static int speedstep_smi_ownership(void) : "memory" ); - dprintk("result is %x\n", result); + pr_debug("result is %x\n", result); return result; } @@ -106,13 +103,13 @@ static int speedstep_smi_get_freqs(unsigned int *low, unsigned int *high) u32 function = GET_SPEEDSTEP_FREQS; if (!(ist_info.event & 0xFFFF)) { - dprintk("bug #1422 -- can't read freqs from BIOS\n"); + pr_debug("bug #1422 -- can't read freqs from BIOS\n"); return -ENODEV; } command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); - dprintk("trying to determine frequencies with command %x at port %x\n", + pr_debug("trying to determine frequencies with command %x at port %x\n", command, smi_port); __asm__ __volatile__( @@ -129,7 +126,7 @@ static int speedstep_smi_get_freqs(unsigned int *low, unsigned int *high) "d" (smi_port), "S" (0), "D" (0) ); - dprintk("result %x, low_freq %u, high_freq %u\n", + pr_debug("result %x, low_freq %u, high_freq %u\n", result, low_mhz, high_mhz); /* abort if results are obviously incorrect... */ @@ -154,7 +151,7 @@ static int speedstep_get_state(void) command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); - dprintk("trying to determine current setting with command %x " + pr_debug("trying to determine current setting with command %x " "at port %x\n", command, smi_port); __asm__ __volatile__( @@ -168,7 +165,7 @@ static int speedstep_get_state(void) "d" (smi_port), "S" (0), "D" (0) ); - dprintk("state is %x, result is %x\n", state, result); + pr_debug("state is %x, result is %x\n", state, result); return state & 1; } @@ -194,13 +191,13 @@ static void speedstep_set_state(unsigned int state) command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); - dprintk("trying to set frequency to state %u " + pr_debug("trying to set frequency to state %u " "with command %x at port %x\n", state, command, smi_port); do { if (retry) { - dprintk("retry %u, previous result %u, waiting...\n", + pr_debug("retry %u, previous result %u, waiting...\n", retry, result); mdelay(retry * 50); } @@ -221,7 +218,7 @@ static void speedstep_set_state(unsigned int state) local_irq_restore(flags); if (new_state == state) - dprintk("change to %u MHz succeeded after %u tries " + pr_debug("change to %u MHz succeeded after %u tries " "with result %u\n", (speedstep_freqs[new_state].frequency / 1000), retry, result); @@ -292,7 +289,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) result = speedstep_smi_ownership(); if (result) { - dprintk("fails in acquiring ownership of a SMI interface.\n"); + pr_debug("fails in acquiring ownership of a SMI interface.\n"); return -EINVAL; } @@ -304,7 +301,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) if (result) { /* fall back to speedstep_lib.c dection mechanism: * try both states out */ - dprintk("could not detect low and high frequencies " + pr_debug("could not detect low and high frequencies " "by SMI call.\n"); result = speedstep_get_freqs(speedstep_processor, low, high, @@ -312,18 +309,18 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) &speedstep_set_state); if (result) { - dprintk("could not detect two different speeds" + pr_debug("could not detect two different speeds" " -- aborting.\n"); return result; } else - dprintk("workaround worked.\n"); + pr_debug("workaround worked.\n"); } /* get current speed setting */ state = speedstep_get_state(); speed = speedstep_freqs[state].frequency; - dprintk("currently at %s speed setting - %i MHz\n", + pr_debug("currently at %s speed setting - %i MHz\n", (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency) ? "low" : "high", (speed / 1000)); @@ -360,7 +357,7 @@ static int speedstep_resume(struct cpufreq_policy *policy) int result = speedstep_smi_ownership(); if (result) - dprintk("fails in re-acquiring ownership of a SMI interface.\n"); + pr_debug("fails in re-acquiring ownership of a SMI interface.\n"); return result; } @@ -403,12 +400,12 @@ static int __init speedstep_init(void) } if (!speedstep_processor) { - dprintk("No supported Intel CPU detected.\n"); + pr_debug("No supported Intel CPU detected.\n"); return -ENODEV; } - dprintk("signature:0x%.8lx, command:0x%.8lx, " - "event:0x%.8lx, perf_level:0x%.8lx.\n", + pr_debug("signature:0x%.8ulx, command:0x%.8ulx, " + "event:0x%.8ulx, perf_level:0x%.8ulx.\n", ist_info.signature, ist_info.command, ist_info.event, ist_info.perf_level); diff --git a/trunk/drivers/dma/fsldma.c b/trunk/drivers/dma/fsldma.c index 6b396759e7f5..8a781540590c 100644 --- a/trunk/drivers/dma/fsldma.c +++ b/trunk/drivers/dma/fsldma.c @@ -1448,7 +1448,7 @@ static const struct of_device_id fsldma_of_ids[] = { {} }; -static struct of_platform_driver fsldma_of_driver = { +static struct platform_driver fsldma_of_driver = { .driver = { .name = "fsl-elo-dma", .owner = THIS_MODULE, diff --git a/trunk/drivers/edac/amd64_edac.c b/trunk/drivers/edac/amd64_edac.c index 31e71c4fc831..9a8bebcf6b17 100644 --- a/trunk/drivers/edac/amd64_edac.c +++ b/trunk/drivers/edac/amd64_edac.c @@ -211,8 +211,6 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci) scrubval = scrubval & 0x001F; - amd64_debug("pci-read, sdram scrub control value: %d\n", scrubval); - for (i = 0; i < ARRAY_SIZE(scrubrates); i++) { if (scrubrates[i].scrubval == scrubval) { retval = scrubrates[i].bandwidth; @@ -933,25 +931,74 @@ static int k8_early_channel_count(struct amd64_pvt *pvt) /* On F10h and later ErrAddr is MC4_ADDR[47:1] */ static u64 get_error_address(struct mce *m) { + struct cpuinfo_x86 *c = &boot_cpu_data; + u64 addr; u8 start_bit = 1; u8 end_bit = 47; - if (boot_cpu_data.x86 == 0xf) { + if (c->x86 == 0xf) { start_bit = 3; end_bit = 39; } - return m->addr & GENMASK(start_bit, end_bit); + addr = m->addr & GENMASK(start_bit, end_bit); + + /* + * Erratum 637 workaround + */ + if (c->x86 == 0x15) { + struct amd64_pvt *pvt; + u64 cc6_base, tmp_addr; + u32 tmp; + u8 mce_nid, intlv_en; + + if ((addr & GENMASK(24, 47)) >> 24 != 0x00fdf7) + return addr; + + mce_nid = amd_get_nb_id(m->extcpu); + pvt = mcis[mce_nid]->pvt_info; + + amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp); + intlv_en = tmp >> 21 & 0x7; + + /* add [47:27] + 3 trailing bits */ + cc6_base = (tmp & GENMASK(0, 20)) << 3; + + /* reverse and add DramIntlvEn */ + cc6_base |= intlv_en ^ 0x7; + + /* pin at [47:24] */ + cc6_base <<= 24; + + if (!intlv_en) + return cc6_base | (addr & GENMASK(0, 23)); + + amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp); + + /* faster log2 */ + tmp_addr = (addr & GENMASK(12, 23)) << __fls(intlv_en + 1); + + /* OR DramIntlvSel into bits [14:12] */ + tmp_addr |= (tmp & GENMASK(21, 23)) >> 9; + + /* add remaining [11:0] bits from original MC4_ADDR */ + tmp_addr |= addr & GENMASK(0, 11); + + return cc6_base | tmp_addr; + } + + return addr; } static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range) { + struct cpuinfo_x86 *c = &boot_cpu_data; int off = range << 3; amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo); amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo); - if (boot_cpu_data.x86 == 0xf) + if (c->x86 == 0xf) return; if (!dram_rw(pvt, range)) @@ -959,6 +1006,31 @@ static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range) amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi); amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi); + + /* Factor in CC6 save area by reading dst node's limit reg */ + if (c->x86 == 0x15) { + struct pci_dev *f1 = NULL; + u8 nid = dram_dst_node(pvt, range); + u32 llim; + + f1 = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0x18 + nid, 1)); + if (WARN_ON(!f1)) + return; + + amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim); + + pvt->ranges[range].lim.lo &= GENMASK(0, 15); + + /* {[39:27],111b} */ + pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16; + + pvt->ranges[range].lim.hi &= GENMASK(0, 7); + + /* [47:40] */ + pvt->ranges[range].lim.hi |= llim >> 13; + + pci_dev_put(f1); + } } static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr, @@ -1403,12 +1475,8 @@ static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range, return -EINVAL; } - if (intlv_en && - (intlv_sel != ((sys_addr >> 12) & intlv_en))) { - amd64_warn("Botched intlv bits, en: 0x%x, sel: 0x%x\n", - intlv_en, intlv_sel); + if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en))) return -EINVAL; - } sys_addr = f1x_swap_interleaved_region(pvt, sys_addr); diff --git a/trunk/drivers/edac/amd64_edac.h b/trunk/drivers/edac/amd64_edac.h index 11be36a311eb..9a666cb985b2 100644 --- a/trunk/drivers/edac/amd64_edac.h +++ b/trunk/drivers/edac/amd64_edac.h @@ -196,6 +196,9 @@ #define DCT_CFG_SEL 0x10C +#define DRAM_LOCAL_NODE_BASE 0x120 +#define DRAM_LOCAL_NODE_LIM 0x124 + #define DRAM_BASE_HI 0x140 #define DRAM_LIMIT_HI 0x144 diff --git a/trunk/drivers/edac/edac_mc_sysfs.c b/trunk/drivers/edac/edac_mc_sysfs.c index 26343fd46596..29ffa350bfbe 100644 --- a/trunk/drivers/edac/edac_mc_sysfs.c +++ b/trunk/drivers/edac/edac_mc_sysfs.c @@ -458,13 +458,13 @@ static ssize_t mci_sdram_scrub_rate_store(struct mem_ctl_info *mci, return -EINVAL; new_bw = mci->set_sdram_scrub_rate(mci, bandwidth); - if (new_bw >= 0) { - edac_printk(KERN_DEBUG, EDAC_MC, "Scrub rate set to %d\n", new_bw); - return count; + if (new_bw < 0) { + edac_printk(KERN_WARNING, EDAC_MC, + "Error setting scrub rate to: %lu\n", bandwidth); + return -EINVAL; } - edac_printk(KERN_DEBUG, EDAC_MC, "Error setting scrub rate to: %lu\n", bandwidth); - return -EINVAL; + return count; } /* @@ -483,7 +483,6 @@ static ssize_t mci_sdram_scrub_rate_show(struct mem_ctl_info *mci, char *data) return bandwidth; } - edac_printk(KERN_DEBUG, EDAC_MC, "Read scrub rate: %d\n", bandwidth); return sprintf(data, "%d\n", bandwidth); } diff --git a/trunk/drivers/edac/ppc4xx_edac.c b/trunk/drivers/edac/ppc4xx_edac.c index c1f0045ceb8e..af8e7b1aa290 100644 --- a/trunk/drivers/edac/ppc4xx_edac.c +++ b/trunk/drivers/edac/ppc4xx_edac.c @@ -1019,7 +1019,7 @@ ppc4xx_edac_mc_init(struct mem_ctl_info *mci, struct ppc4xx_edac_pdata *pdata = NULL; const struct device_node *np = op->dev.of_node; - if (op->dev.of_match == NULL) + if (of_match_device(ppc4xx_edac_match, &op->dev) == NULL) return -EINVAL; /* Initial driver pointers and private data */ diff --git a/trunk/drivers/firewire/ohci.c b/trunk/drivers/firewire/ohci.c index f903d7b6f34a..23d1468ad253 100644 --- a/trunk/drivers/firewire/ohci.c +++ b/trunk/drivers/firewire/ohci.c @@ -2199,7 +2199,6 @@ static int ohci_set_config_rom(struct fw_card *card, { struct fw_ohci *ohci; unsigned long flags; - int ret = -EBUSY; __be32 *next_config_rom; dma_addr_t uninitialized_var(next_config_rom_bus); @@ -2240,22 +2239,37 @@ static int ohci_set_config_rom(struct fw_card *card, spin_lock_irqsave(&ohci->lock, flags); + /* + * If there is not an already pending config_rom update, + * push our new allocation into the ohci->next_config_rom + * and then mark the local variable as null so that we + * won't deallocate the new buffer. + * + * OTOH, if there is a pending config_rom update, just + * use that buffer with the new config_rom data, and + * let this routine free the unused DMA allocation. + */ + if (ohci->next_config_rom == NULL) { ohci->next_config_rom = next_config_rom; ohci->next_config_rom_bus = next_config_rom_bus; + next_config_rom = NULL; + } - copy_config_rom(ohci->next_config_rom, config_rom, length); + copy_config_rom(ohci->next_config_rom, config_rom, length); - ohci->next_header = config_rom[0]; - ohci->next_config_rom[0] = 0; + ohci->next_header = config_rom[0]; + ohci->next_config_rom[0] = 0; - reg_write(ohci, OHCI1394_ConfigROMmap, - ohci->next_config_rom_bus); - ret = 0; - } + reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus); spin_unlock_irqrestore(&ohci->lock, flags); + /* If we didn't use the DMA allocation, delete it. */ + if (next_config_rom != NULL) + dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, + next_config_rom, next_config_rom_bus); + /* * Now initiate a bus reset to have the changes take * effect. We clean up the old config rom memory and DMA @@ -2263,13 +2277,10 @@ static int ohci_set_config_rom(struct fw_card *card, * controller could need to access it before the bus reset * takes effect. */ - if (ret == 0) - fw_schedule_bus_reset(&ohci->card, true, true); - else - dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, - next_config_rom, next_config_rom_bus); - return ret; + fw_schedule_bus_reset(&ohci->card, true, true); + + return 0; } static void ohci_send_request(struct fw_card *card, struct fw_packet *packet) diff --git a/trunk/drivers/firmware/Kconfig b/trunk/drivers/firmware/Kconfig index b3a25a55ba23..efba163595db 100644 --- a/trunk/drivers/firmware/Kconfig +++ b/trunk/drivers/firmware/Kconfig @@ -157,4 +157,6 @@ config SIGMA If unsure, say N here. Drivers that need these helpers will select this option automatically. +source "drivers/firmware/google/Kconfig" + endmenu diff --git a/trunk/drivers/firmware/Makefile b/trunk/drivers/firmware/Makefile index 00bb0b80a79f..47338c979126 100644 --- a/trunk/drivers/firmware/Makefile +++ b/trunk/drivers/firmware/Makefile @@ -13,3 +13,5 @@ obj-$(CONFIG_ISCSI_IBFT_FIND) += iscsi_ibft_find.o obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o obj-$(CONFIG_SIGMA) += sigma.o + +obj-$(CONFIG_GOOGLE_FIRMWARE) += google/ diff --git a/trunk/drivers/firmware/edd.c b/trunk/drivers/firmware/edd.c index 96c25d93eed1..f1b7f659d3c9 100644 --- a/trunk/drivers/firmware/edd.c +++ b/trunk/drivers/firmware/edd.c @@ -531,8 +531,8 @@ static int edd_has_edd30(struct edd_device *edev) { struct edd_info *info; - int i, nonzero_path = 0; - char c; + int i; + u8 csum = 0; if (!edev) return 0; @@ -544,16 +544,16 @@ edd_has_edd30(struct edd_device *edev) return 0; } - for (i = 30; i <= 73; i++) { - c = *(((uint8_t *) info) + i + 4); - if (c) { - nonzero_path++; - break; - } - } - if (!nonzero_path) { + + /* We support only T13 spec */ + if (info->params.device_path_info_length != 44) + return 0; + + for (i = 30; i < info->params.device_path_info_length + 30; i++) + csum += *(((u8 *)&info->params) + i); + + if (csum) return 0; - } return 1; } diff --git a/trunk/drivers/firmware/efivars.c b/trunk/drivers/firmware/efivars.c index ff0c373e3bbf..a2d2f1f0d4f3 100644 --- a/trunk/drivers/firmware/efivars.c +++ b/trunk/drivers/firmware/efivars.c @@ -677,8 +677,8 @@ create_efivars_bin_attributes(struct efivars *efivars) return 0; out_free: - kfree(efivars->new_var); - efivars->new_var = NULL; + kfree(efivars->del_var); + efivars->del_var = NULL; kfree(efivars->new_var); efivars->new_var = NULL; return error; @@ -803,6 +803,8 @@ efivars_init(void) ops.set_variable = efi.set_variable; ops.get_next_variable = efi.get_next_variable; error = register_efivars(&__efivars, &ops, efi_kobj); + if (error) + goto err_put; /* Don't forget the systab entry */ error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group); @@ -810,18 +812,25 @@ efivars_init(void) printk(KERN_ERR "efivars: Sysfs attribute export failed with error %d.\n", error); - unregister_efivars(&__efivars); - kobject_put(efi_kobj); + goto err_unregister; } + return 0; + +err_unregister: + unregister_efivars(&__efivars); +err_put: + kobject_put(efi_kobj); return error; } static void __exit efivars_exit(void) { - unregister_efivars(&__efivars); - kobject_put(efi_kobj); + if (efi_enabled) { + unregister_efivars(&__efivars); + kobject_put(efi_kobj); + } } module_init(efivars_init); diff --git a/trunk/drivers/firmware/google/Kconfig b/trunk/drivers/firmware/google/Kconfig new file mode 100644 index 000000000000..87096b6ca5c9 --- /dev/null +++ b/trunk/drivers/firmware/google/Kconfig @@ -0,0 +1,31 @@ +config GOOGLE_FIRMWARE + bool "Google Firmware Drivers" + depends on X86 + default n + help + These firmware drivers are used by Google's servers. They are + only useful if you are working directly on one of their + proprietary servers. If in doubt, say "N". + +menu "Google Firmware Drivers" + depends on GOOGLE_FIRMWARE + +config GOOGLE_SMI + tristate "SMI interface for Google platforms" + depends on ACPI && DMI + select EFI_VARS + help + Say Y here if you want to enable SMI callbacks for Google + platforms. This provides an interface for writing to and + clearing the EFI event log and reading and writing NVRAM + variables. + +config GOOGLE_MEMCONSOLE + tristate "Firmware Memory Console" + depends on DMI + help + This option enables the kernel to search for a firmware log in + the EBDA on Google servers. If found, this log is exported to + userland in the file /sys/firmware/log. + +endmenu diff --git a/trunk/drivers/firmware/google/Makefile b/trunk/drivers/firmware/google/Makefile new file mode 100644 index 000000000000..54a294e3cb61 --- /dev/null +++ b/trunk/drivers/firmware/google/Makefile @@ -0,0 +1,3 @@ + +obj-$(CONFIG_GOOGLE_SMI) += gsmi.o +obj-$(CONFIG_GOOGLE_MEMCONSOLE) += memconsole.o diff --git a/trunk/drivers/firmware/google/gsmi.c b/trunk/drivers/firmware/google/gsmi.c new file mode 100644 index 000000000000..fa7f0b3e81dd --- /dev/null +++ b/trunk/drivers/firmware/google/gsmi.c @@ -0,0 +1,940 @@ +/* + * Copyright 2010 Google Inc. All Rights Reserved. + * Author: dlaurie@google.com (Duncan Laurie) + * + * Re-worked to expose sysfs APIs by mikew@google.com (Mike Waychison) + * + * EFI SMI interface for Google platforms + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define GSMI_SHUTDOWN_CLEAN 0 /* Clean Shutdown */ +/* TODO(mikew@google.com): Tie in HARDLOCKUP_DETECTOR with NMIWDT */ +#define GSMI_SHUTDOWN_NMIWDT 1 /* NMI Watchdog */ +#define GSMI_SHUTDOWN_PANIC 2 /* Panic */ +#define GSMI_SHUTDOWN_OOPS 3 /* Oops */ +#define GSMI_SHUTDOWN_DIE 4 /* Die -- No longer meaningful */ +#define GSMI_SHUTDOWN_MCE 5 /* Machine Check */ +#define GSMI_SHUTDOWN_SOFTWDT 6 /* Software Watchdog */ +#define GSMI_SHUTDOWN_MBE 7 /* Uncorrected ECC */ +#define GSMI_SHUTDOWN_TRIPLE 8 /* Triple Fault */ + +#define DRIVER_VERSION "1.0" +#define GSMI_GUID_SIZE 16 +#define GSMI_BUF_SIZE 1024 +#define GSMI_BUF_ALIGN sizeof(u64) +#define GSMI_CALLBACK 0xef + +/* SMI return codes */ +#define GSMI_SUCCESS 0x00 +#define GSMI_UNSUPPORTED2 0x03 +#define GSMI_LOG_FULL 0x0b +#define GSMI_VAR_NOT_FOUND 0x0e +#define GSMI_HANDSHAKE_SPIN 0x7d +#define GSMI_HANDSHAKE_CF 0x7e +#define GSMI_HANDSHAKE_NONE 0x7f +#define GSMI_INVALID_PARAMETER 0x82 +#define GSMI_UNSUPPORTED 0x83 +#define GSMI_BUFFER_TOO_SMALL 0x85 +#define GSMI_NOT_READY 0x86 +#define GSMI_DEVICE_ERROR 0x87 +#define GSMI_NOT_FOUND 0x8e + +#define QUIRKY_BOARD_HASH 0x78a30a50 + +/* Internally used commands passed to the firmware */ +#define GSMI_CMD_GET_NVRAM_VAR 0x01 +#define GSMI_CMD_GET_NEXT_VAR 0x02 +#define GSMI_CMD_SET_NVRAM_VAR 0x03 +#define GSMI_CMD_SET_EVENT_LOG 0x08 +#define GSMI_CMD_CLEAR_EVENT_LOG 0x09 +#define GSMI_CMD_CLEAR_CONFIG 0x20 +#define GSMI_CMD_HANDSHAKE_TYPE 0xC1 + +/* Magic entry type for kernel events */ +#define GSMI_LOG_ENTRY_TYPE_KERNEL 0xDEAD + +/* SMI buffers must be in 32bit physical address space */ +struct gsmi_buf { + u8 *start; /* start of buffer */ + size_t length; /* length of buffer */ + dma_addr_t handle; /* dma allocation handle */ + u32 address; /* physical address of buffer */ +}; + +struct gsmi_device { + struct platform_device *pdev; /* platform device */ + struct gsmi_buf *name_buf; /* variable name buffer */ + struct gsmi_buf *data_buf; /* generic data buffer */ + struct gsmi_buf *param_buf; /* parameter buffer */ + spinlock_t lock; /* serialize access to SMIs */ + u16 smi_cmd; /* SMI command port */ + int handshake_type; /* firmware handler interlock type */ + struct dma_pool *dma_pool; /* DMA buffer pool */ +} gsmi_dev; + +/* Packed structures for communicating with the firmware */ +struct gsmi_nvram_var_param { + efi_guid_t guid; + u32 name_ptr; + u32 attributes; + u32 data_len; + u32 data_ptr; +} __packed; + +struct gsmi_get_next_var_param { + u8 guid[GSMI_GUID_SIZE]; + u32 name_ptr; + u32 name_len; +} __packed; + +struct gsmi_set_eventlog_param { + u32 data_ptr; + u32 data_len; + u32 type; +} __packed; + +/* Event log formats */ +struct gsmi_log_entry_type_1 { + u16 type; + u32 instance; +} __packed; + + +/* + * Some platforms don't have explicit SMI handshake + * and need to wait for SMI to complete. + */ +#define GSMI_DEFAULT_SPINCOUNT 0x10000 +static unsigned int spincount = GSMI_DEFAULT_SPINCOUNT; +module_param(spincount, uint, 0600); +MODULE_PARM_DESC(spincount, + "The number of loop iterations to use when using the spin handshake."); + +static struct gsmi_buf *gsmi_buf_alloc(void) +{ + struct gsmi_buf *smibuf; + + smibuf = kzalloc(sizeof(*smibuf), GFP_KERNEL); + if (!smibuf) { + printk(KERN_ERR "gsmi: out of memory\n"); + return NULL; + } + + /* allocate buffer in 32bit address space */ + smibuf->start = dma_pool_alloc(gsmi_dev.dma_pool, GFP_KERNEL, + &smibuf->handle); + if (!smibuf->start) { + printk(KERN_ERR "gsmi: failed to allocate name buffer\n"); + kfree(smibuf); + return NULL; + } + + /* fill in the buffer handle */ + smibuf->length = GSMI_BUF_SIZE; + smibuf->address = (u32)virt_to_phys(smibuf->start); + + return smibuf; +} + +static void gsmi_buf_free(struct gsmi_buf *smibuf) +{ + if (smibuf) { + if (smibuf->start) + dma_pool_free(gsmi_dev.dma_pool, smibuf->start, + smibuf->handle); + kfree(smibuf); + } +} + +/* + * Make a call to gsmi func(sub). GSMI error codes are translated to + * in-kernel errnos (0 on success, -ERRNO on error). + */ +static int gsmi_exec(u8 func, u8 sub) +{ + u16 cmd = (sub << 8) | func; + u16 result = 0; + int rc = 0; + + /* + * AH : Subfunction number + * AL : Function number + * EBX : Parameter block address + * DX : SMI command port + * + * Three protocols here. See also the comment in gsmi_init(). + */ + if (gsmi_dev.handshake_type == GSMI_HANDSHAKE_CF) { + /* + * If handshake_type == HANDSHAKE_CF then set CF on the + * way in and wait for the handler to clear it; this avoids + * corrupting register state on those chipsets which have + * a delay between writing the SMI trigger register and + * entering SMM. + */ + asm volatile ( + "stc\n" + "outb %%al, %%dx\n" + "1: jc 1b\n" + : "=a" (result) + : "0" (cmd), + "d" (gsmi_dev.smi_cmd), + "b" (gsmi_dev.param_buf->address) + : "memory", "cc" + ); + } else if (gsmi_dev.handshake_type == GSMI_HANDSHAKE_SPIN) { + /* + * If handshake_type == HANDSHAKE_SPIN we spin a + * hundred-ish usecs to ensure the SMI has triggered. + */ + asm volatile ( + "outb %%al, %%dx\n" + "1: loop 1b\n" + : "=a" (result) + : "0" (cmd), + "d" (gsmi_dev.smi_cmd), + "b" (gsmi_dev.param_buf->address), + "c" (spincount) + : "memory", "cc" + ); + } else { + /* + * If handshake_type == HANDSHAKE_NONE we do nothing; + * either we don't need to or it's legacy firmware that + * doesn't understand the CF protocol. + */ + asm volatile ( + "outb %%al, %%dx\n\t" + : "=a" (result) + : "0" (cmd), + "d" (gsmi_dev.smi_cmd), + "b" (gsmi_dev.param_buf->address) + : "memory", "cc" + ); + } + + /* check return code from SMI handler */ + switch (result) { + case GSMI_SUCCESS: + break; + case GSMI_VAR_NOT_FOUND: + /* not really an error, but let the caller know */ + rc = 1; + break; + case GSMI_INVALID_PARAMETER: + printk(KERN_ERR "gsmi: exec 0x%04x: Invalid parameter\n", cmd); + rc = -EINVAL; + break; + case GSMI_BUFFER_TOO_SMALL: + printk(KERN_ERR "gsmi: exec 0x%04x: Buffer too small\n", cmd); + rc = -ENOMEM; + break; + case GSMI_UNSUPPORTED: + case GSMI_UNSUPPORTED2: + if (sub != GSMI_CMD_HANDSHAKE_TYPE) + printk(KERN_ERR "gsmi: exec 0x%04x: Not supported\n", + cmd); + rc = -ENOSYS; + break; + case GSMI_NOT_READY: + printk(KERN_ERR "gsmi: exec 0x%04x: Not ready\n", cmd); + rc = -EBUSY; + break; + case GSMI_DEVICE_ERROR: + printk(KERN_ERR "gsmi: exec 0x%04x: Device error\n", cmd); + rc = -EFAULT; + break; + case GSMI_NOT_FOUND: + printk(KERN_ERR "gsmi: exec 0x%04x: Data not found\n", cmd); + rc = -ENOENT; + break; + case GSMI_LOG_FULL: + printk(KERN_ERR "gsmi: exec 0x%04x: Log full\n", cmd); + rc = -ENOSPC; + break; + case GSMI_HANDSHAKE_CF: + case GSMI_HANDSHAKE_SPIN: + case GSMI_HANDSHAKE_NONE: + rc = result; + break; + default: + printk(KERN_ERR "gsmi: exec 0x%04x: Unknown error 0x%04x\n", + cmd, result); + rc = -ENXIO; + } + + return rc; +} + +/* Return the number of unicode characters in data */ +static size_t +utf16_strlen(efi_char16_t *data, unsigned long maxlength) +{ + unsigned long length = 0; + + while (*data++ != 0 && length < maxlength) + length++; + return length; +} + +static efi_status_t gsmi_get_variable(efi_char16_t *name, + efi_guid_t *vendor, u32 *attr, + unsigned long *data_size, + void *data) +{ + struct gsmi_nvram_var_param param = { + .name_ptr = gsmi_dev.name_buf->address, + .data_ptr = gsmi_dev.data_buf->address, + .data_len = (u32)*data_size, + }; + efi_status_t ret = EFI_SUCCESS; + unsigned long flags; + size_t name_len = utf16_strlen(name, GSMI_BUF_SIZE / 2); + int rc; + + if (name_len >= GSMI_BUF_SIZE / 2) + return EFI_BAD_BUFFER_SIZE; + + spin_lock_irqsave(&gsmi_dev.lock, flags); + + /* Vendor guid */ + memcpy(¶m.guid, vendor, sizeof(param.guid)); + + /* variable name, already in UTF-16 */ + memset(gsmi_dev.name_buf->start, 0, gsmi_dev.name_buf->length); + memcpy(gsmi_dev.name_buf->start, name, name_len * 2); + + /* data pointer */ + memset(gsmi_dev.data_buf->start, 0, gsmi_dev.data_buf->length); + + /* parameter buffer */ + memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length); + memcpy(gsmi_dev.param_buf->start, ¶m, sizeof(param)); + + rc = gsmi_exec(GSMI_CALLBACK, GSMI_CMD_GET_NVRAM_VAR); + if (rc < 0) { + printk(KERN_ERR "gsmi: Get Variable failed\n"); + ret = EFI_LOAD_ERROR; + } else if (rc == 1) { + /* variable was not found */ + ret = EFI_NOT_FOUND; + } else { + /* Get the arguments back */ + memcpy(¶m, gsmi_dev.param_buf->start, sizeof(param)); + + /* The size reported is the min of all of our buffers */ + *data_size = min(*data_size, gsmi_dev.data_buf->length); + *data_size = min_t(unsigned long, *data_size, param.data_len); + + /* Copy data back to return buffer. */ + memcpy(data, gsmi_dev.data_buf->start, *data_size); + + /* All variables are have the following attributes */ + *attr = EFI_VARIABLE_NON_VOLATILE | + EFI_VARIABLE_BOOTSERVICE_ACCESS | + EFI_VARIABLE_RUNTIME_ACCESS; + } + + spin_unlock_irqrestore(&gsmi_dev.lock, flags); + + return ret; +} + +static efi_status_t gsmi_get_next_variable(unsigned long *name_size, + efi_char16_t *name, + efi_guid_t *vendor) +{ + struct gsmi_get_next_var_param param = { + .name_ptr = gsmi_dev.name_buf->address, + .name_len = gsmi_dev.name_buf->length, + }; + efi_status_t ret = EFI_SUCCESS; + int rc; + unsigned long flags; + + /* For the moment, only support buffers that exactly match in size */ + if (*name_size != GSMI_BUF_SIZE) + return EFI_BAD_BUFFER_SIZE; + + /* Let's make sure the thing is at least null-terminated */ + if (utf16_strlen(name, GSMI_BUF_SIZE / 2) == GSMI_BUF_SIZE / 2) + return EFI_INVALID_PARAMETER; + + spin_lock_irqsave(&gsmi_dev.lock, flags); + + /* guid */ + memcpy(¶m.guid, vendor, sizeof(param.guid)); + + /* variable name, already in UTF-16 */ + memcpy(gsmi_dev.name_buf->start, name, *name_size); + + /* parameter buffer */ + memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length); + memcpy(gsmi_dev.param_buf->start, ¶m, sizeof(param)); + + rc = gsmi_exec(GSMI_CALLBACK, GSMI_CMD_GET_NEXT_VAR); + if (rc < 0) { + printk(KERN_ERR "gsmi: Get Next Variable Name failed\n"); + ret = EFI_LOAD_ERROR; + } else if (rc == 1) { + /* variable not found -- end of list */ + ret = EFI_NOT_FOUND; + } else { + /* copy variable data back to return buffer */ + memcpy(¶m, gsmi_dev.param_buf->start, sizeof(param)); + + /* Copy the name back */ + memcpy(name, gsmi_dev.name_buf->start, GSMI_BUF_SIZE); + *name_size = utf16_strlen(name, GSMI_BUF_SIZE / 2) * 2; + + /* copy guid to return buffer */ + memcpy(vendor, ¶m.guid, sizeof(param.guid)); + ret = EFI_SUCCESS; + } + + spin_unlock_irqrestore(&gsmi_dev.lock, flags); + + return ret; +} + +static efi_status_t gsmi_set_variable(efi_char16_t *name, + efi_guid_t *vendor, + unsigned long attr, + unsigned long data_size, + void *data) +{ + struct gsmi_nvram_var_param param = { + .name_ptr = gsmi_dev.name_buf->address, + .data_ptr = gsmi_dev.data_buf->address, + .data_len = (u32)data_size, + .attributes = EFI_VARIABLE_NON_VOLATILE | + EFI_VARIABLE_BOOTSERVICE_ACCESS | + EFI_VARIABLE_RUNTIME_ACCESS, + }; + size_t name_len = utf16_strlen(name, GSMI_BUF_SIZE / 2); + efi_status_t ret = EFI_SUCCESS; + int rc; + unsigned long flags; + + if (name_len >= GSMI_BUF_SIZE / 2) + return EFI_BAD_BUFFER_SIZE; + + spin_lock_irqsave(&gsmi_dev.lock, flags); + + /* guid */ + memcpy(¶m.guid, vendor, sizeof(param.guid)); + + /* variable name, already in UTF-16 */ + memset(gsmi_dev.name_buf->start, 0, gsmi_dev.name_buf->length); + memcpy(gsmi_dev.name_buf->start, name, name_len * 2); + + /* data pointer */ + memset(gsmi_dev.data_buf->start, 0, gsmi_dev.data_buf->length); + memcpy(gsmi_dev.data_buf->start, data, data_size); + + /* parameter buffer */ + memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length); + memcpy(gsmi_dev.param_buf->start, ¶m, sizeof(param)); + + rc = gsmi_exec(GSMI_CALLBACK, GSMI_CMD_SET_NVRAM_VAR); + if (rc < 0) { + printk(KERN_ERR "gsmi: Set Variable failed\n"); + ret = EFI_INVALID_PARAMETER; + } + + spin_unlock_irqrestore(&gsmi_dev.lock, flags); + + return ret; +} + +static const struct efivar_operations efivar_ops = { + .get_variable = gsmi_get_variable, + .set_variable = gsmi_set_variable, + .get_next_variable = gsmi_get_next_variable, +}; + +static ssize_t eventlog_write(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t pos, size_t count) +{ + struct gsmi_set_eventlog_param param = { + .data_ptr = gsmi_dev.data_buf->address, + }; + int rc = 0; + unsigned long flags; + + /* Pull the type out */ + if (count < sizeof(u32)) + return -EINVAL; + param.type = *(u32 *)buf; + count -= sizeof(u32); + buf += sizeof(u32); + + /* The remaining buffer is the data payload */ + if (count > gsmi_dev.data_buf->length) + return -EINVAL; + param.data_len = count - sizeof(u32); + + spin_lock_irqsave(&gsmi_dev.lock, flags); + + /* data pointer */ + memset(gsmi_dev.data_buf->start, 0, gsmi_dev.data_buf->length); + memcpy(gsmi_dev.data_buf->start, buf, param.data_len); + + /* parameter buffer */ + memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length); + memcpy(gsmi_dev.param_buf->start, ¶m, sizeof(param)); + + rc = gsmi_exec(GSMI_CALLBACK, GSMI_CMD_SET_EVENT_LOG); + if (rc < 0) + printk(KERN_ERR "gsmi: Set Event Log failed\n"); + + spin_unlock_irqrestore(&gsmi_dev.lock, flags); + + return rc; + +} + +static struct bin_attribute eventlog_bin_attr = { + .attr = {.name = "append_to_eventlog", .mode = 0200}, + .write = eventlog_write, +}; + +static ssize_t gsmi_clear_eventlog_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + int rc; + unsigned long flags; + unsigned long val; + struct { + u32 percentage; + u32 data_type; + } param; + + rc = strict_strtoul(buf, 0, &val); + if (rc) + return rc; + + /* + * Value entered is a percentage, 0 through 100, anything else + * is invalid. + */ + if (val > 100) + return -EINVAL; + + /* data_type here selects the smbios event log. */ + param.percentage = val; + param.data_type = 0; + + spin_lock_irqsave(&gsmi_dev.lock, flags); + + /* parameter buffer */ + memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length); + memcpy(gsmi_dev.param_buf->start, ¶m, sizeof(param)); + + rc = gsmi_exec(GSMI_CALLBACK, GSMI_CMD_CLEAR_EVENT_LOG); + + spin_unlock_irqrestore(&gsmi_dev.lock, flags); + + if (rc) + return rc; + return count; +} + +static struct kobj_attribute gsmi_clear_eventlog_attr = { + .attr = {.name = "clear_eventlog", .mode = 0200}, + .store = gsmi_clear_eventlog_store, +}; + +static ssize_t gsmi_clear_config_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + int rc; + unsigned long flags; + + spin_lock_irqsave(&gsmi_dev.lock, flags); + + /* clear parameter buffer */ + memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length); + + rc = gsmi_exec(GSMI_CALLBACK, GSMI_CMD_CLEAR_CONFIG); + + spin_unlock_irqrestore(&gsmi_dev.lock, flags); + + if (rc) + return rc; + return count; +} + +static struct kobj_attribute gsmi_clear_config_attr = { + .attr = {.name = "clear_config", .mode = 0200}, + .store = gsmi_clear_config_store, +}; + +static const struct attribute *gsmi_attrs[] = { + &gsmi_clear_config_attr.attr, + &gsmi_clear_eventlog_attr.attr, + NULL, +}; + +static int gsmi_shutdown_reason(int reason) +{ + struct gsmi_log_entry_type_1 entry = { + .type = GSMI_LOG_ENTRY_TYPE_KERNEL, + .instance = reason, + }; + struct gsmi_set_eventlog_param param = { + .data_len = sizeof(entry), + .type = 1, + }; + static int saved_reason; + int rc = 0; + unsigned long flags; + + /* avoid duplicate entries in the log */ + if (saved_reason & (1 << reason)) + return 0; + + spin_lock_irqsave(&gsmi_dev.lock, flags); + + saved_reason |= (1 << reason); + + /* data pointer */ + memset(gsmi_dev.data_buf->start, 0, gsmi_dev.data_buf->length); + memcpy(gsmi_dev.data_buf->start, &entry, sizeof(entry)); + + /* parameter buffer */ + param.data_ptr = gsmi_dev.data_buf->address; + memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length); + memcpy(gsmi_dev.param_buf->start, ¶m, sizeof(param)); + + rc = gsmi_exec(GSMI_CALLBACK, GSMI_CMD_SET_EVENT_LOG); + + spin_unlock_irqrestore(&gsmi_dev.lock, flags); + + if (rc < 0) + printk(KERN_ERR "gsmi: Log Shutdown Reason failed\n"); + else + printk(KERN_EMERG "gsmi: Log Shutdown Reason 0x%02x\n", + reason); + + return rc; +} + +static int gsmi_reboot_callback(struct notifier_block *nb, + unsigned long reason, void *arg) +{ + gsmi_shutdown_reason(GSMI_SHUTDOWN_CLEAN); + return NOTIFY_DONE; +} + +static struct notifier_block gsmi_reboot_notifier = { + .notifier_call = gsmi_reboot_callback +}; + +static int gsmi_die_callback(struct notifier_block *nb, + unsigned long reason, void *arg) +{ + if (reason == DIE_OOPS) + gsmi_shutdown_reason(GSMI_SHUTDOWN_OOPS); + return NOTIFY_DONE; +} + +static struct notifier_block gsmi_die_notifier = { + .notifier_call = gsmi_die_callback +}; + +static int gsmi_panic_callback(struct notifier_block *nb, + unsigned long reason, void *arg) +{ + gsmi_shutdown_reason(GSMI_SHUTDOWN_PANIC); + return NOTIFY_DONE; +} + +static struct notifier_block gsmi_panic_notifier = { + .notifier_call = gsmi_panic_callback, +}; + +/* + * This hash function was blatantly copied from include/linux/hash.h. + * It is used by this driver to obfuscate a board name that requires a + * quirk within this driver. + * + * Please do not remove this copy of the function as any changes to the + * global utility hash_64() function would break this driver's ability + * to identify a board and provide the appropriate quirk -- mikew@google.com + */ +static u64 __init local_hash_64(u64 val, unsigned bits) +{ + u64 hash = val; + + /* Sigh, gcc can't optimise this alone like it does for 32 bits. */ + u64 n = hash; + n <<= 18; + hash -= n; + n <<= 33; + hash -= n; + n <<= 3; + hash += n; + n <<= 3; + hash -= n; + n <<= 4; + hash += n; + n <<= 2; + hash += n; + + /* High bits are more random, so use them. */ + return hash >> (64 - bits); +} + +static u32 __init hash_oem_table_id(char s[8]) +{ + u64 input; + memcpy(&input, s, 8); + return local_hash_64(input, 32); +} + +static struct dmi_system_id gsmi_dmi_table[] __initdata = { + { + .ident = "Google Board", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Google, Inc."), + }, + }, + {} +}; +MODULE_DEVICE_TABLE(dmi, gsmi_dmi_table); + +static __init int gsmi_system_valid(void) +{ + u32 hash; + + if (!dmi_check_system(gsmi_dmi_table)) + return -ENODEV; + + /* + * Only newer firmware supports the gsmi interface. All older + * firmware that didn't support this interface used to plug the + * table name in the first four bytes of the oem_table_id field. + * Newer firmware doesn't do that though, so use that as the + * discriminant factor. We have to do this in order to + * whitewash our board names out of the public driver. + */ + if (!strncmp(acpi_gbl_FADT.header.oem_table_id, "FACP", 4)) { + printk(KERN_INFO "gsmi: Board is too old\n"); + return -ENODEV; + } + + /* Disable on board with 1.0 BIOS due to Google bug 2602657 */ + hash = hash_oem_table_id(acpi_gbl_FADT.header.oem_table_id); + if (hash == QUIRKY_BOARD_HASH) { + const char *bios_ver = dmi_get_system_info(DMI_BIOS_VERSION); + if (strncmp(bios_ver, "1.0", 3) == 0) { + pr_info("gsmi: disabled on this board's BIOS %s\n", + bios_ver); + return -ENODEV; + } + } + + /* check for valid SMI command port in ACPI FADT */ + if (acpi_gbl_FADT.smi_command == 0) { + pr_info("gsmi: missing smi_command\n"); + return -ENODEV; + } + + /* Found */ + return 0; +} + +static struct kobject *gsmi_kobj; +static struct efivars efivars; + +static __init int gsmi_init(void) +{ + unsigned long flags; + int ret; + + ret = gsmi_system_valid(); + if (ret) + return ret; + + gsmi_dev.smi_cmd = acpi_gbl_FADT.smi_command; + + /* register device */ + gsmi_dev.pdev = platform_device_register_simple("gsmi", -1, NULL, 0); + if (IS_ERR(gsmi_dev.pdev)) { + printk(KERN_ERR "gsmi: unable to register platform device\n"); + return PTR_ERR(gsmi_dev.pdev); + } + + /* SMI access needs to be serialized */ + spin_lock_init(&gsmi_dev.lock); + + /* SMI callbacks require 32bit addresses */ + gsmi_dev.pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); + gsmi_dev.pdev->dev.dma_mask = + &gsmi_dev.pdev->dev.coherent_dma_mask; + ret = -ENOMEM; + gsmi_dev.dma_pool = dma_pool_create("gsmi", &gsmi_dev.pdev->dev, + GSMI_BUF_SIZE, GSMI_BUF_ALIGN, 0); + if (!gsmi_dev.dma_pool) + goto out_err; + + /* + * pre-allocate buffers because sometimes we are called when + * this is not feasible: oops, panic, die, mce, etc + */ + gsmi_dev.name_buf = gsmi_buf_alloc(); + if (!gsmi_dev.name_buf) { + printk(KERN_ERR "gsmi: failed to allocate name buffer\n"); + goto out_err; + } + + gsmi_dev.data_buf = gsmi_buf_alloc(); + if (!gsmi_dev.data_buf) { + printk(KERN_ERR "gsmi: failed to allocate data buffer\n"); + goto out_err; + } + + gsmi_dev.param_buf = gsmi_buf_alloc(); + if (!gsmi_dev.param_buf) { + printk(KERN_ERR "gsmi: failed to allocate param buffer\n"); + goto out_err; + } + + /* + * Determine type of handshake used to serialize the SMI + * entry. See also gsmi_exec(). + * + * There's a "behavior" present on some chipsets where writing the + * SMI trigger register in the southbridge doesn't result in an + * immediate SMI. Rather, the processor can execute "a few" more + * instructions before the SMI takes effect. To ensure synchronous + * behavior, implement a handshake between the kernel driver and the + * firmware handler to spin until released. This ioctl determines + * the type of handshake. + * + * NONE: The firmware handler does not implement any + * handshake. Either it doesn't need to, or it's legacy firmware + * that doesn't know it needs to and never will. + * + * CF: The firmware handler will clear the CF in the saved + * state before returning. The driver may set the CF and test for + * it to clear before proceeding. + * + * SPIN: The firmware handler does not implement any handshake + * but the driver should spin for a hundred or so microseconds + * to ensure the SMI has triggered. + * + * Finally, the handler will return -ENOSYS if + * GSMI_CMD_HANDSHAKE_TYPE is unimplemented, which implies + * HANDSHAKE_NONE. + */ + spin_lock_irqsave(&gsmi_dev.lock, flags); + gsmi_dev.handshake_type = GSMI_HANDSHAKE_SPIN; + gsmi_dev.handshake_type = + gsmi_exec(GSMI_CALLBACK, GSMI_CMD_HANDSHAKE_TYPE); + if (gsmi_dev.handshake_type == -ENOSYS) + gsmi_dev.handshake_type = GSMI_HANDSHAKE_NONE; + spin_unlock_irqrestore(&gsmi_dev.lock, flags); + + /* Remove and clean up gsmi if the handshake could not complete. */ + if (gsmi_dev.handshake_type == -ENXIO) { + printk(KERN_INFO "gsmi version " DRIVER_VERSION + " failed to load\n"); + ret = -ENODEV; + goto out_err; + } + + printk(KERN_INFO "gsmi version " DRIVER_VERSION " loaded\n"); + + /* Register in the firmware directory */ + ret = -ENOMEM; + gsmi_kobj = kobject_create_and_add("gsmi", firmware_kobj); + if (!gsmi_kobj) { + printk(KERN_INFO "gsmi: Failed to create firmware kobj\n"); + goto out_err; + } + + /* Setup eventlog access */ + ret = sysfs_create_bin_file(gsmi_kobj, &eventlog_bin_attr); + if (ret) { + printk(KERN_INFO "gsmi: Failed to setup eventlog"); + goto out_err; + } + + /* Other attributes */ + ret = sysfs_create_files(gsmi_kobj, gsmi_attrs); + if (ret) { + printk(KERN_INFO "gsmi: Failed to add attrs"); + goto out_err; + } + + if (register_efivars(&efivars, &efivar_ops, gsmi_kobj)) { + printk(KERN_INFO "gsmi: Failed to register efivars\n"); + goto out_err; + } + + register_reboot_notifier(&gsmi_reboot_notifier); + register_die_notifier(&gsmi_die_notifier); + atomic_notifier_chain_register(&panic_notifier_list, + &gsmi_panic_notifier); + + return 0; + + out_err: + kobject_put(gsmi_kobj); + gsmi_buf_free(gsmi_dev.param_buf); + gsmi_buf_free(gsmi_dev.data_buf); + gsmi_buf_free(gsmi_dev.name_buf); + if (gsmi_dev.dma_pool) + dma_pool_destroy(gsmi_dev.dma_pool); + platform_device_unregister(gsmi_dev.pdev); + pr_info("gsmi: failed to load: %d\n", ret); + return ret; +} + +static void __exit gsmi_exit(void) +{ + unregister_reboot_notifier(&gsmi_reboot_notifier); + unregister_die_notifier(&gsmi_die_notifier); + atomic_notifier_chain_unregister(&panic_notifier_list, + &gsmi_panic_notifier); + unregister_efivars(&efivars); + + kobject_put(gsmi_kobj); + gsmi_buf_free(gsmi_dev.param_buf); + gsmi_buf_free(gsmi_dev.data_buf); + gsmi_buf_free(gsmi_dev.name_buf); + dma_pool_destroy(gsmi_dev.dma_pool); + platform_device_unregister(gsmi_dev.pdev); +} + +module_init(gsmi_init); +module_exit(gsmi_exit); + +MODULE_AUTHOR("Google, Inc."); +MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/firmware/google/memconsole.c b/trunk/drivers/firmware/google/memconsole.c new file mode 100644 index 000000000000..2a90ba613613 --- /dev/null +++ b/trunk/drivers/firmware/google/memconsole.c @@ -0,0 +1,166 @@ +/* + * memconsole.c + * + * Infrastructure for importing the BIOS memory based console + * into the kernel log ringbuffer. + * + * Copyright 2010 Google Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define BIOS_MEMCONSOLE_V1_MAGIC 0xDEADBABE +#define BIOS_MEMCONSOLE_V2_MAGIC (('M')|('C'<<8)|('O'<<16)|('N'<<24)) + +struct biosmemcon_ebda { + u32 signature; + union { + struct { + u8 enabled; + u32 buffer_addr; + u16 start; + u16 end; + u16 num_chars; + u8 wrapped; + } __packed v1; + struct { + u32 buffer_addr; + /* Misdocumented as number of pages! */ + u16 num_bytes; + u16 start; + u16 end; + } __packed v2; + }; +} __packed; + +static char *memconsole_baseaddr; +static size_t memconsole_length; + +static ssize_t memconsole_read(struct file *filp, struct kobject *kobp, + struct bin_attribute *bin_attr, char *buf, + loff_t pos, size_t count) +{ + return memory_read_from_buffer(buf, count, &pos, memconsole_baseaddr, + memconsole_length); +} + +static struct bin_attribute memconsole_bin_attr = { + .attr = {.name = "log", .mode = 0444}, + .read = memconsole_read, +}; + + +static void found_v1_header(struct biosmemcon_ebda *hdr) +{ + printk(KERN_INFO "BIOS console v1 EBDA structure found at %p\n", hdr); + printk(KERN_INFO "BIOS console buffer at 0x%.8x, " + "start = %d, end = %d, num = %d\n", + hdr->v1.buffer_addr, hdr->v1.start, + hdr->v1.end, hdr->v1.num_chars); + + memconsole_length = hdr->v1.num_chars; + memconsole_baseaddr = phys_to_virt(hdr->v1.buffer_addr); +} + +static void found_v2_header(struct biosmemcon_ebda *hdr) +{ + printk(KERN_INFO "BIOS console v2 EBDA structure found at %p\n", hdr); + printk(KERN_INFO "BIOS console buffer at 0x%.8x, " + "start = %d, end = %d, num_bytes = %d\n", + hdr->v2.buffer_addr, hdr->v2.start, + hdr->v2.end, hdr->v2.num_bytes); + + memconsole_length = hdr->v2.end - hdr->v2.start; + memconsole_baseaddr = phys_to_virt(hdr->v2.buffer_addr + + hdr->v2.start); +} + +/* + * Search through the EBDA for the BIOS Memory Console, and + * set the global variables to point to it. Return true if found. + */ +static bool found_memconsole(void) +{ + unsigned int address; + size_t length, cur; + + address = get_bios_ebda(); + if (!address) { + printk(KERN_INFO "BIOS EBDA non-existent.\n"); + return false; + } + + /* EBDA length is byte 0 of EBDA (in KB) */ + length = *(u8 *)phys_to_virt(address); + length <<= 10; /* convert to bytes */ + + /* + * Search through EBDA for BIOS memory console structure + * note: signature is not necessarily dword-aligned + */ + for (cur = 0; cur < length; cur++) { + struct biosmemcon_ebda *hdr = phys_to_virt(address + cur); + + /* memconsole v1 */ + if (hdr->signature == BIOS_MEMCONSOLE_V1_MAGIC) { + found_v1_header(hdr); + return true; + } + + /* memconsole v2 */ + if (hdr->signature == BIOS_MEMCONSOLE_V2_MAGIC) { + found_v2_header(hdr); + return true; + } + } + + printk(KERN_INFO "BIOS console EBDA structure not found!\n"); + return false; +} + +static struct dmi_system_id memconsole_dmi_table[] __initdata = { + { + .ident = "Google Board", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Google, Inc."), + }, + }, + {} +}; +MODULE_DEVICE_TABLE(dmi, memconsole_dmi_table); + +static int __init memconsole_init(void) +{ + int ret; + + if (!dmi_check_system(memconsole_dmi_table)) + return -ENODEV; + + if (!found_memconsole()) + return -ENODEV; + + memconsole_bin_attr.size = memconsole_length; + + ret = sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr); + + return ret; +} + +static void __exit memconsole_exit(void) +{ + sysfs_remove_bin_file(firmware_kobj, &memconsole_bin_attr); +} + +module_init(memconsole_init); +module_exit(memconsole_exit); + +MODULE_AUTHOR("Google, Inc."); +MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/firmware/iscsi_ibft_find.c b/trunk/drivers/firmware/iscsi_ibft_find.c index 2192456dfd68..f032e446fc11 100644 --- a/trunk/drivers/firmware/iscsi_ibft_find.c +++ b/trunk/drivers/firmware/iscsi_ibft_find.c @@ -42,7 +42,20 @@ struct acpi_table_ibft *ibft_addr; EXPORT_SYMBOL_GPL(ibft_addr); -#define IBFT_SIGN "iBFT" +static const struct { + char *sign; +} ibft_signs[] = { +#ifdef CONFIG_ACPI + /* + * One spec says "IBFT", the other says "iBFT". We have to check + * for both. + */ + { ACPI_SIG_IBFT }, +#endif + { "iBFT" }, + { "BIFT" }, /* Broadcom iSCSI Offload */ +}; + #define IBFT_SIGN_LEN 4 #define IBFT_START 0x80000 /* 512kB */ #define IBFT_END 0x100000 /* 1MB */ @@ -62,6 +75,7 @@ static int __init find_ibft_in_mem(void) unsigned long pos; unsigned int len = 0; void *virt; + int i; for (pos = IBFT_START; pos < IBFT_END; pos += 16) { /* The table can't be inside the VGA BIOS reserved space, @@ -69,18 +83,23 @@ static int __init find_ibft_in_mem(void) if (pos == VGA_MEM) pos += VGA_SIZE; virt = isa_bus_to_virt(pos); - if (memcmp(virt, IBFT_SIGN, IBFT_SIGN_LEN) == 0) { - unsigned long *addr = - (unsigned long *)isa_bus_to_virt(pos + 4); - len = *addr; - /* if the length of the table extends past 1M, - * the table cannot be valid. */ - if (pos + len <= (IBFT_END-1)) { - ibft_addr = (struct acpi_table_ibft *)virt; - break; + + for (i = 0; i < ARRAY_SIZE(ibft_signs); i++) { + if (memcmp(virt, ibft_signs[i].sign, IBFT_SIGN_LEN) == + 0) { + unsigned long *addr = + (unsigned long *)isa_bus_to_virt(pos + 4); + len = *addr; + /* if the length of the table extends past 1M, + * the table cannot be valid. */ + if (pos + len <= (IBFT_END-1)) { + ibft_addr = (struct acpi_table_ibft *)virt; + goto done; + } } } } +done: return len; } /* @@ -89,18 +108,12 @@ static int __init find_ibft_in_mem(void) */ unsigned long __init find_ibft_region(unsigned long *sizep) { - + int i; ibft_addr = NULL; #ifdef CONFIG_ACPI - /* - * One spec says "IBFT", the other says "iBFT". We have to check - * for both. - */ - if (!ibft_addr) - acpi_table_parse(ACPI_SIG_IBFT, acpi_find_ibft); - if (!ibft_addr) - acpi_table_parse(IBFT_SIGN, acpi_find_ibft); + for (i = 0; i < ARRAY_SIZE(ibft_signs) && !ibft_addr; i++) + acpi_table_parse(ibft_signs[i].sign, acpi_find_ibft); #endif /* CONFIG_ACPI */ /* iBFT 1.03 section 1.4.3.1 mandates that UEFI machines will diff --git a/trunk/drivers/gpio/ml_ioh_gpio.c b/trunk/drivers/gpio/ml_ioh_gpio.c index 7f6f01a4b145..0a775f7987c2 100644 --- a/trunk/drivers/gpio/ml_ioh_gpio.c +++ b/trunk/drivers/gpio/ml_ioh_gpio.c @@ -116,6 +116,7 @@ static int ioh_gpio_direction_output(struct gpio_chip *gpio, unsigned nr, reg_val |= (1 << nr); else reg_val &= ~(1 << nr); + iowrite32(reg_val, &chip->reg->regs[chip->ch].po); mutex_unlock(&chip->lock); diff --git a/trunk/drivers/gpio/pca953x.c b/trunk/drivers/gpio/pca953x.c index 583e92592073..7630ab7b9bec 100644 --- a/trunk/drivers/gpio/pca953x.c +++ b/trunk/drivers/gpio/pca953x.c @@ -558,7 +558,7 @@ static int __devinit pca953x_probe(struct i2c_client *client, ret = gpiochip_add(&chip->gpio_chip); if (ret) - goto out_failed; + goto out_failed_irq; if (pdata->setup) { ret = pdata->setup(client, chip->gpio_chip.base, @@ -570,8 +570,9 @@ static int __devinit pca953x_probe(struct i2c_client *client, i2c_set_clientdata(client, chip); return 0; -out_failed: +out_failed_irq: pca953x_irq_teardown(chip); +out_failed: kfree(chip->dyn_pdata); kfree(chip); return ret; diff --git a/trunk/drivers/gpio/pch_gpio.c b/trunk/drivers/gpio/pch_gpio.c index 2c6af8705103..f970a5f3585e 100644 --- a/trunk/drivers/gpio/pch_gpio.c +++ b/trunk/drivers/gpio/pch_gpio.c @@ -105,6 +105,7 @@ static int pch_gpio_direction_output(struct gpio_chip *gpio, unsigned nr, reg_val |= (1 << nr); else reg_val &= ~(1 << nr); + iowrite32(reg_val, &chip->reg->po); mutex_unlock(&chip->lock); diff --git a/trunk/drivers/gpu/drm/Kconfig b/trunk/drivers/gpu/drm/Kconfig index a6feb78c404c..b493663c7ba7 100644 --- a/trunk/drivers/gpu/drm/Kconfig +++ b/trunk/drivers/gpu/drm/Kconfig @@ -24,6 +24,7 @@ config DRM_KMS_HELPER depends on DRM select FB select FRAMEBUFFER_CONSOLE if !EXPERT + select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE help FB and CRTC helpers for KMS drivers. @@ -96,6 +97,7 @@ config DRM_I915 # i915 depends on ACPI_VIDEO when ACPI is enabled # but for select to work, need to select ACPI_VIDEO's dependencies, ick select BACKLIGHT_CLASS_DEVICE if ACPI + select VIDEO_OUTPUT_CONTROL if ACPI select INPUT if ACPI select ACPI_VIDEO if ACPI select ACPI_BUTTON if ACPI diff --git a/trunk/drivers/gpu/drm/drm_fb_helper.c b/trunk/drivers/gpu/drm/drm_fb_helper.c index 950720473967..140b9525b48a 100644 --- a/trunk/drivers/gpu/drm/drm_fb_helper.c +++ b/trunk/drivers/gpu/drm/drm_fb_helper.c @@ -342,9 +342,22 @@ int drm_fb_helper_debug_leave(struct fb_info *info) } EXPORT_SYMBOL(drm_fb_helper_debug_leave); +bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper) +{ + bool error = false; + int i, ret; + for (i = 0; i < fb_helper->crtc_count; i++) { + struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set; + ret = drm_crtc_helper_set_config(mode_set); + if (ret) + error = true; + } + return error; +} +EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode); + bool drm_fb_helper_force_kernel_mode(void) { - int i = 0; bool ret, error = false; struct drm_fb_helper *helper; @@ -352,12 +365,12 @@ bool drm_fb_helper_force_kernel_mode(void) return false; list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) { - for (i = 0; i < helper->crtc_count; i++) { - struct drm_mode_set *mode_set = &helper->crtc_info[i].mode_set; - ret = drm_crtc_helper_set_config(mode_set); - if (ret) - error = true; - } + if (helper->dev->switch_power_state == DRM_SWITCH_POWER_OFF) + continue; + + ret = drm_fb_helper_restore_fbdev_mode(helper); + if (ret) + error = true; } return error; } @@ -1503,17 +1516,33 @@ bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel) } EXPORT_SYMBOL(drm_fb_helper_initial_config); -bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) +/** + * drm_fb_helper_hotplug_event - respond to a hotplug notification by + * probing all the outputs attached to the fb. + * @fb_helper: the drm_fb_helper + * + * LOCKING: + * Called at runtime, must take mode config lock. + * + * Scan the connectors attached to the fb_helper and try to put together a + * setup after *notification of a change in output configuration. + * + * RETURNS: + * 0 on success and a non-zero error code otherwise. + */ +int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) { + struct drm_device *dev = fb_helper->dev; int count = 0; u32 max_width, max_height, bpp_sel; bool bound = false, crtcs_bound = false; struct drm_crtc *crtc; if (!fb_helper->fb) - return false; + return 0; - list_for_each_entry(crtc, &fb_helper->dev->mode_config.crtc_list, head) { + mutex_lock(&dev->mode_config.mutex); + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { if (crtc->fb) crtcs_bound = true; if (crtc->fb == fb_helper->fb) @@ -1522,7 +1551,8 @@ bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) if (!bound && crtcs_bound) { fb_helper->delayed_hotplug = true; - return false; + mutex_unlock(&dev->mode_config.mutex); + return 0; } DRM_DEBUG_KMS("\n"); @@ -1533,6 +1563,7 @@ bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) count = drm_fb_helper_probe_connector_modes(fb_helper, max_width, max_height); drm_setup_crtcs(fb_helper); + mutex_unlock(&dev->mode_config.mutex); return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel); } diff --git a/trunk/drivers/gpu/drm/drm_irq.c b/trunk/drivers/gpu/drm/drm_irq.c index 741457bd1c46..a1f12cb043de 100644 --- a/trunk/drivers/gpu/drm/drm_irq.c +++ b/trunk/drivers/gpu/drm/drm_irq.c @@ -932,11 +932,34 @@ EXPORT_SYMBOL(drm_vblank_put); void drm_vblank_off(struct drm_device *dev, int crtc) { + struct drm_pending_vblank_event *e, *t; + struct timeval now; unsigned long irqflags; + unsigned int seq; spin_lock_irqsave(&dev->vbl_lock, irqflags); vblank_disable_and_save(dev, crtc); DRM_WAKEUP(&dev->vbl_queue[crtc]); + + /* Send any queued vblank events, lest the natives grow disquiet */ + seq = drm_vblank_count_and_time(dev, crtc, &now); + list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) { + if (e->pipe != crtc) + continue; + DRM_DEBUG("Sending premature vblank event on disable: \ + wanted %d, current %d\n", + e->event.sequence, seq); + + e->event.sequence = seq; + e->event.tv_sec = now.tv_sec; + e->event.tv_usec = now.tv_usec; + drm_vblank_put(dev, e->pipe); + list_move_tail(&e->base.link, &e->base.file_priv->event_list); + wake_up_interruptible(&e->base.file_priv->event_wait); + trace_drm_vblank_event_delivered(e->base.pid, e->pipe, + e->event.sequence); + } + spin_unlock_irqrestore(&dev->vbl_lock, irqflags); } EXPORT_SYMBOL(drm_vblank_off); diff --git a/trunk/drivers/gpu/drm/drm_mm.c b/trunk/drivers/gpu/drm/drm_mm.c index 5d00b0fc0d91..959186cbf328 100644 --- a/trunk/drivers/gpu/drm/drm_mm.c +++ b/trunk/drivers/gpu/drm/drm_mm.c @@ -431,7 +431,7 @@ EXPORT_SYMBOL(drm_mm_search_free_in_range); void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new) { list_replace(&old->node_list, &new->node_list); - list_replace(&old->node_list, &new->hole_stack); + list_replace(&old->hole_stack, &new->hole_stack); new->hole_follows = old->hole_follows; new->mm = old->mm; new->start = old->start; @@ -699,8 +699,8 @@ int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) entry->size); total_used += entry->size; if (entry->hole_follows) { - hole_start = drm_mm_hole_node_start(&mm->head_node); - hole_end = drm_mm_hole_node_end(&mm->head_node); + hole_start = drm_mm_hole_node_start(entry); + hole_end = drm_mm_hole_node_end(entry); hole_size = hole_end - hole_start; seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n", hole_start, hole_end, hole_size); diff --git a/trunk/drivers/gpu/drm/i915/i915_dma.c b/trunk/drivers/gpu/drm/i915/i915_dma.c index 72730377a01b..12876f2795d2 100644 --- a/trunk/drivers/gpu/drm/i915/i915_dma.c +++ b/trunk/drivers/gpu/drm/i915/i915_dma.c @@ -2207,7 +2207,7 @@ void i915_driver_lastclose(struct drm_device * dev) drm_i915_private_t *dev_priv = dev->dev_private; if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) { - drm_fb_helper_restore(); + intel_fb_restore_mode(dev); vga_switcheroo_process_delayed_switch(); return; } diff --git a/trunk/drivers/gpu/drm/i915/i915_drv.c b/trunk/drivers/gpu/drm/i915/i915_drv.c index c34a8dd31d02..32d1b3e829c8 100644 --- a/trunk/drivers/gpu/drm/i915/i915_drv.c +++ b/trunk/drivers/gpu/drm/i915/i915_drv.c @@ -49,7 +49,7 @@ module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600); unsigned int i915_powersave = 1; module_param_named(powersave, i915_powersave, int, 0600); -unsigned int i915_semaphores = 1; +unsigned int i915_semaphores = 0; module_param_named(semaphores, i915_semaphores, int, 0600); unsigned int i915_enable_rc6 = 0; diff --git a/trunk/drivers/gpu/drm/i915/intel_display.c b/trunk/drivers/gpu/drm/i915/intel_display.c index 432fc04c6bff..2166ee071ddb 100644 --- a/trunk/drivers/gpu/drm/i915/intel_display.c +++ b/trunk/drivers/gpu/drm/i915/intel_display.c @@ -3771,8 +3771,11 @@ static bool g4x_compute_wm0(struct drm_device *dev, int entries, tlb_miss; crtc = intel_get_crtc_for_plane(dev, plane); - if (crtc->fb == NULL || !crtc->enabled) + if (crtc->fb == NULL || !crtc->enabled) { + *cursor_wm = cursor->guard_size; + *plane_wm = display->guard_size; return false; + } htotal = crtc->mode.htotal; hdisplay = crtc->mode.hdisplay; @@ -5602,9 +5605,9 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) intel_clock_t clock; if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) - fp = FP0(pipe); + fp = I915_READ(FP0(pipe)); else - fp = FP1(pipe); + fp = I915_READ(FP1(pipe)); clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; if (IS_PINEVIEW(dev)) { @@ -6215,36 +6218,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, return ret; } -static void intel_crtc_reset(struct drm_crtc *crtc) -{ - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - - /* Reset flags back to the 'unknown' status so that they - * will be correctly set on the initial modeset. - */ - intel_crtc->dpms_mode = -1; -} - -static struct drm_crtc_helper_funcs intel_helper_funcs = { - .dpms = intel_crtc_dpms, - .mode_fixup = intel_crtc_mode_fixup, - .mode_set = intel_crtc_mode_set, - .mode_set_base = intel_pipe_set_base, - .mode_set_base_atomic = intel_pipe_set_base_atomic, - .load_lut = intel_crtc_load_lut, - .disable = intel_crtc_disable, -}; - -static const struct drm_crtc_funcs intel_crtc_funcs = { - .reset = intel_crtc_reset, - .cursor_set = intel_crtc_cursor_set, - .cursor_move = intel_crtc_cursor_move, - .gamma_set = intel_crtc_gamma_set, - .set_config = drm_crtc_helper_set_config, - .destroy = intel_crtc_destroy, - .page_flip = intel_crtc_page_flip, -}; - static void intel_sanitize_modesetting(struct drm_device *dev, int pipe, int plane) { @@ -6281,6 +6254,42 @@ static void intel_sanitize_modesetting(struct drm_device *dev, intel_disable_pipe(dev_priv, pipe); } +static void intel_crtc_reset(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + + /* Reset flags back to the 'unknown' status so that they + * will be correctly set on the initial modeset. + */ + intel_crtc->dpms_mode = -1; + + /* We need to fix up any BIOS configuration that conflicts with + * our expectations. + */ + intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane); +} + +static struct drm_crtc_helper_funcs intel_helper_funcs = { + .dpms = intel_crtc_dpms, + .mode_fixup = intel_crtc_mode_fixup, + .mode_set = intel_crtc_mode_set, + .mode_set_base = intel_pipe_set_base, + .mode_set_base_atomic = intel_pipe_set_base_atomic, + .load_lut = intel_crtc_load_lut, + .disable = intel_crtc_disable, +}; + +static const struct drm_crtc_funcs intel_crtc_funcs = { + .reset = intel_crtc_reset, + .cursor_set = intel_crtc_cursor_set, + .cursor_move = intel_crtc_cursor_move, + .gamma_set = intel_crtc_gamma_set, + .set_config = drm_crtc_helper_set_config, + .destroy = intel_crtc_destroy, + .page_flip = intel_crtc_page_flip, +}; + static void intel_crtc_init(struct drm_device *dev, int pipe) { drm_i915_private_t *dev_priv = dev->dev_private; @@ -6330,8 +6339,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer, (unsigned long)intel_crtc); - - intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane); } int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, @@ -6572,8 +6579,10 @@ intel_user_framebuffer_create(struct drm_device *dev, return ERR_PTR(-ENOENT); intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); - if (!intel_fb) + if (!intel_fb) { + drm_gem_object_unreference_unlocked(&obj->base); return ERR_PTR(-ENOMEM); + } ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); if (ret) { diff --git a/trunk/drivers/gpu/drm/i915/intel_dp.c b/trunk/drivers/gpu/drm/i915/intel_dp.c index cb8578b7e443..a4d80314e7f8 100644 --- a/trunk/drivers/gpu/drm/i915/intel_dp.c +++ b/trunk/drivers/gpu/drm/i915/intel_dp.c @@ -1470,7 +1470,8 @@ intel_dp_link_down(struct intel_dp *intel_dp) if (!HAS_PCH_CPT(dev) && I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { - struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc); + struct drm_crtc *crtc = intel_dp->base.base.crtc; + /* Hardware workaround: leaving our transcoder select * set to transcoder B while it's off will prevent the * corresponding HDMI output on transcoder A. @@ -1485,7 +1486,19 @@ intel_dp_link_down(struct intel_dp *intel_dp) /* Changes to enable or select take place the vblank * after being written. */ - intel_wait_for_vblank(dev, intel_crtc->pipe); + if (crtc == NULL) { + /* We can arrive here never having been attached + * to a CRTC, for instance, due to inheriting + * random state from the BIOS. + * + * If the pipe is not running, play safe and + * wait for the clocks to stabilise before + * continuing. + */ + POSTING_READ(intel_dp->output_reg); + msleep(50); + } else + intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe); } I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); diff --git a/trunk/drivers/gpu/drm/i915/intel_drv.h b/trunk/drivers/gpu/drm/i915/intel_drv.h index f5b0d8306d83..1d20712d527f 100644 --- a/trunk/drivers/gpu/drm/i915/intel_drv.h +++ b/trunk/drivers/gpu/drm/i915/intel_drv.h @@ -338,4 +338,5 @@ extern int intel_overlay_attrs(struct drm_device *dev, void *data, struct drm_file *file_priv); extern void intel_fb_output_poll_changed(struct drm_device *dev); +extern void intel_fb_restore_mode(struct drm_device *dev); #endif /* __INTEL_DRV_H__ */ diff --git a/trunk/drivers/gpu/drm/i915/intel_fb.c b/trunk/drivers/gpu/drm/i915/intel_fb.c index 512782728e51..ec49bae73382 100644 --- a/trunk/drivers/gpu/drm/i915/intel_fb.c +++ b/trunk/drivers/gpu/drm/i915/intel_fb.c @@ -264,3 +264,13 @@ void intel_fb_output_poll_changed(struct drm_device *dev) drm_i915_private_t *dev_priv = dev->dev_private; drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper); } + +void intel_fb_restore_mode(struct drm_device *dev) +{ + int ret; + drm_i915_private_t *dev_priv = dev->dev_private; + + ret = drm_fb_helper_restore_fbdev_mode(&dev_priv->fbdev->helper); + if (ret) + DRM_DEBUG("failed to restore crtc mode\n"); +} diff --git a/trunk/drivers/gpu/drm/i915/intel_lvds.c b/trunk/drivers/gpu/drm/i915/intel_lvds.c index a562bd2648c7..67cb076d271b 100644 --- a/trunk/drivers/gpu/drm/i915/intel_lvds.c +++ b/trunk/drivers/gpu/drm/i915/intel_lvds.c @@ -539,6 +539,9 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val, struct drm_device *dev = dev_priv->dev; struct drm_connector *connector = dev_priv->int_lvds_connector; + if (dev->switch_power_state != DRM_SWITCH_POWER_ON) + return NOTIFY_OK; + /* * check and update the status of LVDS connector after receiving * the LID nofication event. diff --git a/trunk/drivers/gpu/drm/i915/intel_tv.c b/trunk/drivers/gpu/drm/i915/intel_tv.c index 4256b8ef3947..6b22c1dcc015 100644 --- a/trunk/drivers/gpu/drm/i915/intel_tv.c +++ b/trunk/drivers/gpu/drm/i915/intel_tv.c @@ -1151,10 +1151,10 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, (video_levels->blank << TV_BLANK_LEVEL_SHIFT))); { int pipeconf_reg = PIPECONF(pipe); - int dspcntr_reg = DSPCNTR(pipe); + int dspcntr_reg = DSPCNTR(intel_crtc->plane); int pipeconf = I915_READ(pipeconf_reg); int dspcntr = I915_READ(dspcntr_reg); - int dspbase_reg = DSPADDR(pipe); + int dspbase_reg = DSPADDR(intel_crtc->plane); int xpos = 0x0, ypos = 0x0; unsigned int xsize, ysize; /* Pipe must be off here */ @@ -1378,7 +1378,9 @@ intel_tv_detect(struct drm_connector *connector, bool force) if (type < 0) return connector_status_disconnected; + intel_tv->type = type; intel_tv_find_better_format(connector); + return connector_status_connected; } @@ -1670,8 +1672,7 @@ intel_tv_init(struct drm_device *dev) * * More recent chipsets favour HDMI rather than integrated S-Video. */ - connector->polled = - DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; + connector->polled = DRM_CONNECTOR_POLL_CONNECT; drm_connector_init(dev, connector, &intel_tv_connector_funcs, DRM_MODE_CONNECTOR_SVIDEO); diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_bios.c b/trunk/drivers/gpu/drm/nouveau/nouveau_bios.c index 8314a49b6b9a..90aef64b76f2 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_bios.c @@ -269,7 +269,7 @@ struct init_tbl_entry { int (*handler)(struct nvbios *, uint16_t, struct init_exec *); }; -static int parse_init_table(struct nvbios *, unsigned int, struct init_exec *); +static int parse_init_table(struct nvbios *, uint16_t, struct init_exec *); #define MACRO_INDEX_SIZE 2 #define MACRO_SIZE 8 @@ -2010,6 +2010,27 @@ init_sub_direct(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) return 3; } +static int +init_jump(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) +{ + /* + * INIT_JUMP opcode: 0x5C ('\') + * + * offset (8 bit): opcode + * offset + 1 (16 bit): offset (in bios) + * + * Continue execution of init table from 'offset' + */ + + uint16_t jmp_offset = ROM16(bios->data[offset + 1]); + + if (!iexec->execute) + return 3; + + BIOSLOG(bios, "0x%04X: Jump to 0x%04X\n", offset, jmp_offset); + return jmp_offset - offset; +} + static int init_i2c_if(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) { @@ -3659,6 +3680,7 @@ static struct init_tbl_entry itbl_entry[] = { { "INIT_ZM_REG_SEQUENCE" , 0x58, init_zm_reg_sequence }, /* INIT_INDIRECT_REG (0x5A, 7, 0, 0) removed due to no example of use */ { "INIT_SUB_DIRECT" , 0x5B, init_sub_direct }, + { "INIT_JUMP" , 0x5C, init_jump }, { "INIT_I2C_IF" , 0x5E, init_i2c_if }, { "INIT_COPY_NV_REG" , 0x5F, init_copy_nv_reg }, { "INIT_ZM_INDEX_IO" , 0x62, init_zm_index_io }, @@ -3700,8 +3722,7 @@ static struct init_tbl_entry itbl_entry[] = { #define MAX_TABLE_OPS 1000 static int -parse_init_table(struct nvbios *bios, unsigned int offset, - struct init_exec *iexec) +parse_init_table(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) { /* * Parses all commands in an init table. @@ -6333,6 +6354,32 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf) } } + /* XFX GT-240X-YA + * + * So many things wrong here, replace the entire encoder table.. + */ + if (nv_match_device(dev, 0x0ca3, 0x1682, 0x3003)) { + if (idx == 0) { + *conn = 0x02001300; /* VGA, connector 1 */ + *conf = 0x00000028; + } else + if (idx == 1) { + *conn = 0x01010312; /* DVI, connector 0 */ + *conf = 0x00020030; + } else + if (idx == 2) { + *conn = 0x01010310; /* VGA, connector 0 */ + *conf = 0x00000028; + } else + if (idx == 3) { + *conn = 0x02022362; /* HDMI, connector 2 */ + *conf = 0x00020010; + } else { + *conn = 0x0000000e; /* EOL */ + *conf = 0x00000000; + } + } + return true; } diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_dma.c b/trunk/drivers/gpu/drm/nouveau/nouveau_dma.c index ce38e97b9428..568caedd7216 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_dma.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_dma.c @@ -83,7 +83,7 @@ nouveau_dma_init(struct nouveau_channel *chan) return ret; /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */ - ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfd0, 0x1000, + ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000, &chan->m2mf_ntfy); if (ret) return ret; diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_drv.h b/trunk/drivers/gpu/drm/nouveau/nouveau_drv.h index 57e5302503db..a76514a209b3 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -682,6 +682,9 @@ struct drm_nouveau_private { /* For PFIFO and PGRAPH. */ spinlock_t context_switch_lock; + /* VM/PRAMIN flush, legacy PRAMIN aperture */ + spinlock_t vm_lock; + /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */ struct nouveau_ramht *ramht; struct nouveau_gpuobj *ramfc; @@ -1190,7 +1193,7 @@ extern int nv50_graph_load_context(struct nouveau_channel *); extern int nv50_graph_unload_context(struct drm_device *); extern int nv50_grctx_init(struct nouveau_grctx *); extern void nv50_graph_tlb_flush(struct drm_device *dev); -extern void nv86_graph_tlb_flush(struct drm_device *dev); +extern void nv84_graph_tlb_flush(struct drm_device *dev); extern struct nouveau_enum nv50_data_error_names[]; /* nvc0_graph.c */ diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/trunk/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 889c4454682e..39aee6d4daf8 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -181,13 +181,13 @@ nouveau_fbcon_sync(struct fb_info *info) OUT_RING (chan, 0); } - nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy + 3, 0xffffffff); + nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy/4 + 3, 0xffffffff); FIRE_RING(chan); mutex_unlock(&chan->mutex); ret = -EBUSY; for (i = 0; i < 100000; i++) { - if (!nouveau_bo_rd32(chan->notifier_bo, chan->m2mf_ntfy + 3)) { + if (!nouveau_bo_rd32(chan->notifier_bo, chan->m2mf_ntfy/4 + 3)) { ret = 0; break; } diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_mem.c b/trunk/drivers/gpu/drm/nouveau/nouveau_mem.c index 2683377f4131..c3e953b08992 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -152,8 +152,6 @@ nouveau_mem_vram_fini(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - nouveau_bo_ref(NULL, &dev_priv->vga_ram); - ttm_bo_device_release(&dev_priv->ttm.bdev); nouveau_ttm_global_release(dev_priv); @@ -398,7 +396,7 @@ nouveau_mem_vram_init(struct drm_device *dev) dma_bits = 40; } else if (drm_pci_device_is_pcie(dev) && - dev_priv->chipset != 0x40 && + dev_priv->chipset > 0x40 && dev_priv->chipset != 0x45) { if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39))) dma_bits = 39; @@ -552,6 +550,7 @@ nouveau_mem_timing_init(struct drm_device *dev) u8 tRC; /* Byte 9 */ u8 tUNK_10, tUNK_11, tUNK_12, tUNK_13, tUNK_14; u8 tUNK_18, tUNK_19, tUNK_20, tUNK_21; + u8 magic_number = 0; /* Yeah... sorry*/ u8 *mem = NULL, *entry; int i, recordlen, entries; @@ -596,6 +595,12 @@ nouveau_mem_timing_init(struct drm_device *dev) if (!memtimings->timing) return; + /* Get "some number" from the timing reg for NV_40 + * Used in calculations later */ + if(dev_priv->card_type == NV_40) { + magic_number = (nv_rd32(dev,0x100228) & 0x0f000000) >> 24; + } + entry = mem + mem[1]; for (i = 0; i < entries; i++, entry += recordlen) { struct nouveau_pm_memtiming *timing = &pm->memtimings.timing[i]; @@ -635,36 +640,51 @@ nouveau_mem_timing_init(struct drm_device *dev) /* XXX: I don't trust the -1's and +1's... they must come * from somewhere! */ - timing->reg_100224 = ((tUNK_0 + tUNK_19 + 1) << 24 | + timing->reg_100224 = (tUNK_0 + tUNK_19 + 1 + magic_number) << 24 | tUNK_18 << 16 | - (tUNK_1 + tUNK_19 + 1) << 8 | - (tUNK_2 - 1)); + (tUNK_1 + tUNK_19 + 1 + magic_number) << 8; + if(dev_priv->chipset == 0xa8) { + timing->reg_100224 |= (tUNK_2 - 1); + } else { + timing->reg_100224 |= (tUNK_2 + 2 - magic_number); + } timing->reg_100228 = (tUNK_12 << 16 | tUNK_11 << 8 | tUNK_10); - if(recordlen > 19) { - timing->reg_100228 += (tUNK_19 - 1) << 24; - }/* I cannot back-up this else-statement right now - else { - timing->reg_100228 += tUNK_12 << 24; - }*/ - - /* XXX: reg_10022c */ - timing->reg_10022c = tUNK_2 - 1; - - timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 | - tUNK_13 << 8 | tUNK_13); - - /* XXX: +6? */ - timing->reg_100234 = (tRAS << 24 | (tUNK_19 + 6) << 8 | tRC); - timing->reg_100234 += max(tUNK_10,tUNK_11) << 16; - - /* XXX; reg_100238, reg_10023c - * reg: 0x00?????? - * reg_10023c: - * 0 for pre-NV50 cards - * 0x????0202 for NV50+ cards (empirical evidence) */ - if(dev_priv->card_type >= NV_50) { + if(dev_priv->chipset >= 0xa3 && dev_priv->chipset < 0xaa) { + timing->reg_100228 |= (tUNK_19 - 1) << 24; + } + + if(dev_priv->card_type == NV_40) { + /* NV40: don't know what the rest of the regs are.. + * And don't need to know either */ + timing->reg_100228 |= 0x20200000 | magic_number << 24; + } else if(dev_priv->card_type >= NV_50) { + /* XXX: reg_10022c */ + timing->reg_10022c = tUNK_2 - 1; + + timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 | + tUNK_13 << 8 | tUNK_13); + + timing->reg_100234 = (tRAS << 24 | tRC); + timing->reg_100234 += max(tUNK_10,tUNK_11) << 16; + + if(dev_priv->chipset < 0xa3) { + timing->reg_100234 |= (tUNK_2 + 2) << 8; + } else { + /* XXX: +6? */ + timing->reg_100234 |= (tUNK_19 + 6) << 8; + } + + /* XXX; reg_100238, reg_10023c + * reg_100238: 0x00?????? + * reg_10023c: 0x!!??0202 for NV50+ cards (empirical evidence) */ timing->reg_10023c = 0x202; + if(dev_priv->chipset < 0xa3) { + timing->reg_10023c |= 0x4000000 | (tUNK_2 - 1) << 16; + } else { + /* currently unknown + * 10023c seen as 06xxxxxx, 0bxxxxxx or 0fxxxxxx */ + } } NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", i, @@ -675,7 +695,7 @@ nouveau_mem_timing_init(struct drm_device *dev) timing->reg_100238, timing->reg_10023c); } - memtimings->nr_timing = entries; + memtimings->nr_timing = entries; memtimings->supported = true; } diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_notifier.c b/trunk/drivers/gpu/drm/nouveau/nouveau_notifier.c index 7ba3fc0b30c1..5b39718ae1f8 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_notifier.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_notifier.c @@ -35,19 +35,22 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct nouveau_bo *ntfy = NULL; - uint32_t flags; + uint32_t flags, ttmpl; int ret; - if (nouveau_vram_notify) + if (nouveau_vram_notify) { flags = NOUVEAU_GEM_DOMAIN_VRAM; - else + ttmpl = TTM_PL_FLAG_VRAM; + } else { flags = NOUVEAU_GEM_DOMAIN_GART; + ttmpl = TTM_PL_FLAG_TT; + } ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags, 0, 0, &ntfy); if (ret) return ret; - ret = nouveau_bo_pin(ntfy, flags); + ret = nouveau_bo_pin(ntfy, ttmpl); if (ret) goto out_err; diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_object.c b/trunk/drivers/gpu/drm/nouveau/nouveau_object.c index 4f00c87ed86e..67a16e01ffa6 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_object.c @@ -1039,19 +1039,20 @@ nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset) { struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; struct drm_device *dev = gpuobj->dev; + unsigned long flags; if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) { u64 ptr = gpuobj->vinst + offset; u32 base = ptr >> 16; u32 val; - spin_lock(&dev_priv->ramin_lock); + spin_lock_irqsave(&dev_priv->vm_lock, flags); if (dev_priv->ramin_base != base) { dev_priv->ramin_base = base; nv_wr32(dev, 0x001700, dev_priv->ramin_base); } val = nv_rd32(dev, 0x700000 + (ptr & 0xffff)); - spin_unlock(&dev_priv->ramin_lock); + spin_unlock_irqrestore(&dev_priv->vm_lock, flags); return val; } @@ -1063,18 +1064,19 @@ nv_wo32(struct nouveau_gpuobj *gpuobj, u32 offset, u32 val) { struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; struct drm_device *dev = gpuobj->dev; + unsigned long flags; if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) { u64 ptr = gpuobj->vinst + offset; u32 base = ptr >> 16; - spin_lock(&dev_priv->ramin_lock); + spin_lock_irqsave(&dev_priv->vm_lock, flags); if (dev_priv->ramin_base != base) { dev_priv->ramin_base = base; nv_wr32(dev, 0x001700, dev_priv->ramin_base); } nv_wr32(dev, 0x700000 + (ptr & 0xffff), val); - spin_unlock(&dev_priv->ramin_lock); + spin_unlock_irqrestore(&dev_priv->vm_lock, flags); return; } diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_perf.c b/trunk/drivers/gpu/drm/nouveau/nouveau_perf.c index ac62a1b8c4fc..670e3cb697ec 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_perf.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_perf.c @@ -134,7 +134,7 @@ nouveau_perf_init(struct drm_device *dev) case 0x13: case 0x15: perflvl->fanspeed = entry[55]; - perflvl->voltage = entry[56]; + perflvl->voltage = (recordlen > 56) ? entry[56] : 0; perflvl->core = ROM32(entry[1]) * 10; perflvl->memory = ROM32(entry[5]) * 20; break; diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/trunk/drivers/gpu/drm/nouveau/nouveau_sgdma.c index a33fe4019286..c77111eca6ac 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_sgdma.c @@ -42,7 +42,8 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages, nvbe->nr_pages = 0; while (num_pages--) { - if (dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE) { + /* this code path isn't called and is incorrect anyways */ + if (0) { /*dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE)*/ nvbe->pages[nvbe->nr_pages] = dma_addrs[nvbe->nr_pages]; nvbe->ttm_alloced[nvbe->nr_pages] = true; @@ -55,6 +56,7 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages, be->func->clear(be); return -EFAULT; } + nvbe->ttm_alloced[nvbe->nr_pages] = false; } nvbe->nr_pages++; @@ -427,7 +429,7 @@ nouveau_sgdma_init(struct drm_device *dev) u32 aper_size, align; int ret; - if (dev_priv->card_type >= NV_50 || drm_pci_device_is_pcie(dev)) + if (dev_priv->card_type >= NV_40 && drm_pci_device_is_pcie(dev)) aper_size = 512 * 1024 * 1024; else aper_size = 64 * 1024 * 1024; @@ -457,7 +459,7 @@ nouveau_sgdma_init(struct drm_device *dev) dev_priv->gart_info.func = &nv50_sgdma_backend; } else if (drm_pci_device_is_pcie(dev) && - dev_priv->chipset != 0x40 && dev_priv->chipset != 0x45) { + dev_priv->chipset > 0x40 && dev_priv->chipset != 0x45) { if (nv44_graph_class(dev)) { dev_priv->gart_info.func = &nv44_sgdma_backend; align = 512 * 1024; diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_state.c b/trunk/drivers/gpu/drm/nouveau/nouveau_state.c index 5bb2859001e2..915fbce89595 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_state.c @@ -376,15 +376,11 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->graph.destroy_context = nv50_graph_destroy_context; engine->graph.load_context = nv50_graph_load_context; engine->graph.unload_context = nv50_graph_unload_context; - if (dev_priv->chipset != 0x86) + if (dev_priv->chipset == 0x50 || + dev_priv->chipset == 0xac) engine->graph.tlb_flush = nv50_graph_tlb_flush; - else { - /* from what i can see nvidia do this on every - * pre-NVA3 board except NVAC, but, we've only - * ever seen problems on NV86 - */ - engine->graph.tlb_flush = nv86_graph_tlb_flush; - } + else + engine->graph.tlb_flush = nv84_graph_tlb_flush; engine->fifo.channels = 128; engine->fifo.init = nv50_fifo_init; engine->fifo.takedown = nv50_fifo_takedown; @@ -612,6 +608,7 @@ nouveau_card_init(struct drm_device *dev) spin_lock_init(&dev_priv->channels.lock); spin_lock_init(&dev_priv->tile.lock); spin_lock_init(&dev_priv->context_switch_lock); + spin_lock_init(&dev_priv->vm_lock); /* Make the CRTCs and I2C buses accessible */ ret = engine->display.early_init(dev); @@ -771,6 +768,11 @@ static void nouveau_card_takedown(struct drm_device *dev) engine->mc.takedown(dev); engine->display.late_takedown(dev); + if (dev_priv->vga_ram) { + nouveau_bo_unpin(dev_priv->vga_ram); + nouveau_bo_ref(NULL, &dev_priv->vga_ram); + } + mutex_lock(&dev->struct_mutex); ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM); ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT); diff --git a/trunk/drivers/gpu/drm/nouveau/nv04_dfp.c b/trunk/drivers/gpu/drm/nouveau/nv04_dfp.c index c82db37d9f41..12098bf839c4 100644 --- a/trunk/drivers/gpu/drm/nouveau/nv04_dfp.c +++ b/trunk/drivers/gpu/drm/nouveau/nv04_dfp.c @@ -581,12 +581,13 @@ static void nv04_dfp_restore(struct drm_encoder *encoder) int head = nv_encoder->restore.head; if (nv_encoder->dcb->type == OUTPUT_LVDS) { - struct drm_display_mode *native_mode = nouveau_encoder_connector_get(nv_encoder)->native_mode; - if (native_mode) - call_lvds_script(dev, nv_encoder->dcb, head, LVDS_PANEL_ON, - native_mode->clock); - else - NV_ERROR(dev, "Not restoring LVDS without native mode\n"); + struct nouveau_connector *connector = + nouveau_encoder_connector_get(nv_encoder); + + if (connector && connector->native_mode) + call_lvds_script(dev, nv_encoder->dcb, head, + LVDS_PANEL_ON, + connector->native_mode->clock); } else if (nv_encoder->dcb->type == OUTPUT_TMDS) { int clock = nouveau_hw_pllvals_to_clk diff --git a/trunk/drivers/gpu/drm/nouveau/nv50_crtc.c b/trunk/drivers/gpu/drm/nouveau/nv50_crtc.c index 2b9984027f41..a19ccaa025b3 100644 --- a/trunk/drivers/gpu/drm/nouveau/nv50_crtc.c +++ b/trunk/drivers/gpu/drm/nouveau/nv50_crtc.c @@ -469,9 +469,6 @@ nv50_crtc_wait_complete(struct drm_crtc *crtc) start = ptimer->read(dev); do { - nv_wr32(dev, 0x61002c, 0x370); - nv_wr32(dev, 0x000140, 1); - if (nv_ro32(disp->ntfy, 0x000)) return 0; } while (ptimer->read(dev) - start < 2000000000ULL); diff --git a/trunk/drivers/gpu/drm/nouveau/nv50_evo.c b/trunk/drivers/gpu/drm/nouveau/nv50_evo.c index a2cfaa691e9b..c8e83c1a4de8 100644 --- a/trunk/drivers/gpu/drm/nouveau/nv50_evo.c +++ b/trunk/drivers/gpu/drm/nouveau/nv50_evo.c @@ -186,6 +186,7 @@ nv50_evo_channel_init(struct nouveau_channel *evo) nv_mask(dev, 0x610028, 0x00000000, 0x00010001 << id); evo->dma.max = (4096/4) - 2; + evo->dma.max &= ~7; evo->dma.put = 0; evo->dma.cur = evo->dma.put; evo->dma.free = evo->dma.max - evo->dma.cur; diff --git a/trunk/drivers/gpu/drm/nouveau/nv50_graph.c b/trunk/drivers/gpu/drm/nouveau/nv50_graph.c index 8675b00caf18..b02a5b1e7d37 100644 --- a/trunk/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/trunk/drivers/gpu/drm/nouveau/nv50_graph.c @@ -503,7 +503,7 @@ nv50_graph_tlb_flush(struct drm_device *dev) } void -nv86_graph_tlb_flush(struct drm_device *dev) +nv84_graph_tlb_flush(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; diff --git a/trunk/drivers/gpu/drm/nouveau/nv50_instmem.c b/trunk/drivers/gpu/drm/nouveau/nv50_instmem.c index a6f8aa651fc6..4f95a1e5822e 100644 --- a/trunk/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/trunk/drivers/gpu/drm/nouveau/nv50_instmem.c @@ -404,23 +404,25 @@ void nv50_instmem_flush(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; + unsigned long flags; - spin_lock(&dev_priv->ramin_lock); + spin_lock_irqsave(&dev_priv->vm_lock, flags); nv_wr32(dev, 0x00330c, 0x00000001); if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000)) NV_ERROR(dev, "PRAMIN flush timeout\n"); - spin_unlock(&dev_priv->ramin_lock); + spin_unlock_irqrestore(&dev_priv->vm_lock, flags); } void nv84_instmem_flush(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; + unsigned long flags; - spin_lock(&dev_priv->ramin_lock); + spin_lock_irqsave(&dev_priv->vm_lock, flags); nv_wr32(dev, 0x070000, 0x00000001); if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000)) NV_ERROR(dev, "PRAMIN flush timeout\n"); - spin_unlock(&dev_priv->ramin_lock); + spin_unlock_irqrestore(&dev_priv->vm_lock, flags); } diff --git a/trunk/drivers/gpu/drm/nouveau/nv50_vm.c b/trunk/drivers/gpu/drm/nouveau/nv50_vm.c index 4fd3432b5b8d..6c2694490741 100644 --- a/trunk/drivers/gpu/drm/nouveau/nv50_vm.c +++ b/trunk/drivers/gpu/drm/nouveau/nv50_vm.c @@ -174,10 +174,11 @@ void nv50_vm_flush_engine(struct drm_device *dev, int engine) { struct drm_nouveau_private *dev_priv = dev->dev_private; + unsigned long flags; - spin_lock(&dev_priv->ramin_lock); + spin_lock_irqsave(&dev_priv->vm_lock, flags); nv_wr32(dev, 0x100c80, (engine << 16) | 1); if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000)) NV_ERROR(dev, "vm flush timeout: engine %d\n", engine); - spin_unlock(&dev_priv->ramin_lock); + spin_unlock_irqrestore(&dev_priv->vm_lock, flags); } diff --git a/trunk/drivers/gpu/drm/nouveau/nvc0_vm.c b/trunk/drivers/gpu/drm/nouveau/nvc0_vm.c index 69af0ba7edd3..a179e6c55afb 100644 --- a/trunk/drivers/gpu/drm/nouveau/nvc0_vm.c +++ b/trunk/drivers/gpu/drm/nouveau/nvc0_vm.c @@ -104,20 +104,27 @@ nvc0_vm_flush(struct nouveau_vm *vm) struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; struct drm_device *dev = vm->dev; struct nouveau_vm_pgd *vpgd; - u32 r100c80, engine; + unsigned long flags; + u32 engine = (dev_priv->chan_vm == vm) ? 1 : 5; pinstmem->flush(vm->dev); - if (vm == dev_priv->chan_vm) - engine = 1; - else - engine = 5; - + spin_lock_irqsave(&dev_priv->vm_lock, flags); list_for_each_entry(vpgd, &vm->pgd_list, head) { - r100c80 = nv_rd32(dev, 0x100c80); + /* looks like maybe a "free flush slots" counter, the + * faster you write to 0x100cbc to more it decreases + */ + if (!nv_wait_ne(dev, 0x100c80, 0x00ff0000, 0x00000000)) { + NV_ERROR(dev, "vm timeout 0: 0x%08x %d\n", + nv_rd32(dev, 0x100c80), engine); + } nv_wr32(dev, 0x100cb8, vpgd->obj->vinst >> 8); nv_wr32(dev, 0x100cbc, 0x80000000 | engine); - if (!nv_wait(dev, 0x100c80, 0xffffffff, r100c80)) - NV_ERROR(dev, "vm flush timeout eng %d\n", engine); + /* wait for flush to be queued? */ + if (!nv_wait(dev, 0x100c80, 0x00008000, 0x00008000)) { + NV_ERROR(dev, "vm timeout 1: 0x%08x %d\n", + nv_rd32(dev, 0x100c80), engine); + } } + spin_unlock_irqrestore(&dev_priv->vm_lock, flags); } diff --git a/trunk/drivers/gpu/drm/radeon/atom.c b/trunk/drivers/gpu/drm/radeon/atom.c index 258fa5e7a2d9..7bd745689097 100644 --- a/trunk/drivers/gpu/drm/radeon/atom.c +++ b/trunk/drivers/gpu/drm/radeon/atom.c @@ -32,6 +32,7 @@ #include "atom.h" #include "atom-names.h" #include "atom-bits.h" +#include "radeon.h" #define ATOM_COND_ABOVE 0 #define ATOM_COND_ABOVEOREQUAL 1 @@ -101,7 +102,9 @@ static void debug_print_spaces(int n) static uint32_t atom_iio_execute(struct atom_context *ctx, int base, uint32_t index, uint32_t data) { + struct radeon_device *rdev = ctx->card->dev->dev_private; uint32_t temp = 0xCDCDCDCD; + while (1) switch (CU8(base)) { case ATOM_IIO_NOP: @@ -112,7 +115,8 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base, base += 3; break; case ATOM_IIO_WRITE: - (void)ctx->card->ioreg_read(ctx->card, CU16(base + 1)); + if (rdev->family == CHIP_RV515) + (void)ctx->card->ioreg_read(ctx->card, CU16(base + 1)); ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp); base += 3; break; @@ -131,7 +135,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base, case ATOM_IIO_MOVE_INDEX: temp &= ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << - CU8(base + 2)); + CU8(base + 3)); temp |= ((index >> CU8(base + 2)) & (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base + @@ -141,7 +145,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base, case ATOM_IIO_MOVE_DATA: temp &= ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << - CU8(base + 2)); + CU8(base + 3)); temp |= ((data >> CU8(base + 2)) & (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base + @@ -151,7 +155,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base, case ATOM_IIO_MOVE_ATTR: temp &= ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << - CU8(base + 2)); + CU8(base + 3)); temp |= ((ctx-> io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 - diff --git a/trunk/drivers/gpu/drm/radeon/atombios_crtc.c b/trunk/drivers/gpu/drm/radeon/atombios_crtc.c index b41ec59c7100..529a3a704731 100644 --- a/trunk/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/trunk/drivers/gpu/drm/radeon/atombios_crtc.c @@ -531,6 +531,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; else pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; + + if (rdev->family < CHIP_RV770) + pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; } else { pll->flags |= RADEON_PLL_LEGACY; @@ -559,7 +562,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { if (ss_enabled) { if (ss->refdiv) { - pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; pll->flags |= RADEON_PLL_USE_REF_DIV; pll->reference_div = ss->refdiv; if (ASIC_IS_AVIVO(rdev)) diff --git a/trunk/drivers/gpu/drm/radeon/evergreen.c b/trunk/drivers/gpu/drm/radeon/evergreen.c index 0b0cc74c08c0..9073e3bfb08c 100644 --- a/trunk/drivers/gpu/drm/radeon/evergreen.c +++ b/trunk/drivers/gpu/drm/radeon/evergreen.c @@ -120,11 +120,16 @@ void evergreen_pm_misc(struct radeon_device *rdev) struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx]; struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage; - if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { - if (voltage->voltage != rdev->pm.current_vddc) { - radeon_atom_set_voltage(rdev, voltage->voltage); + if (voltage->type == VOLTAGE_SW) { + if (voltage->voltage && (voltage->voltage != rdev->pm.current_vddc)) { + radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC); rdev->pm.current_vddc = voltage->voltage; - DRM_DEBUG("Setting: v: %d\n", voltage->voltage); + DRM_DEBUG("Setting: vddc: %d\n", voltage->voltage); + } + if (voltage->vddci && (voltage->vddci != rdev->pm.current_vddci)) { + radeon_atom_set_voltage(rdev, voltage->vddci, SET_VOLTAGE_TYPE_ASIC_VDDCI); + rdev->pm.current_vddci = voltage->vddci; + DRM_DEBUG("Setting: vddci: %d\n", voltage->vddci); } } } @@ -348,7 +353,7 @@ static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev, struct drm_display_mode *mode, struct drm_display_mode *other_mode) { - u32 tmp = 0; + u32 tmp; /* * Line Buffer Setup * There are 3 line buffers, each one shared by 2 display controllers. @@ -358,64 +363,63 @@ static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev, * first display controller * 0 - first half of lb (3840 * 2) * 1 - first 3/4 of lb (5760 * 2) - * 2 - whole lb (7680 * 2) + * 2 - whole lb (7680 * 2), other crtc must be disabled * 3 - first 1/4 of lb (1920 * 2) * second display controller * 4 - second half of lb (3840 * 2) * 5 - second 3/4 of lb (5760 * 2) - * 6 - whole lb (7680 * 2) + * 6 - whole lb (7680 * 2), other crtc must be disabled * 7 - last 1/4 of lb (1920 * 2) */ - if (mode && other_mode) { - if (mode->hdisplay > other_mode->hdisplay) { - if (mode->hdisplay > 2560) - tmp = 1; /* 3/4 */ - else - tmp = 0; /* 1/2 */ - } else if (other_mode->hdisplay > mode->hdisplay) { - if (other_mode->hdisplay > 2560) - tmp = 3; /* 1/4 */ - else - tmp = 0; /* 1/2 */ - } else + /* this can get tricky if we have two large displays on a paired group + * of crtcs. Ideally for multiple large displays we'd assign them to + * non-linked crtcs for maximum line buffer allocation. + */ + if (radeon_crtc->base.enabled && mode) { + if (other_mode) tmp = 0; /* 1/2 */ - } else if (mode) - tmp = 2; /* whole */ - else if (other_mode) - tmp = 3; /* 1/4 */ + else + tmp = 2; /* whole */ + } else + tmp = 0; /* second controller of the pair uses second half of the lb */ if (radeon_crtc->crtc_id % 2) tmp += 4; WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp); - switch (tmp) { - case 0: - case 4: - default: - if (ASIC_IS_DCE5(rdev)) - return 4096 * 2; - else - return 3840 * 2; - case 1: - case 5: - if (ASIC_IS_DCE5(rdev)) - return 6144 * 2; - else - return 5760 * 2; - case 2: - case 6: - if (ASIC_IS_DCE5(rdev)) - return 8192 * 2; - else - return 7680 * 2; - case 3: - case 7: - if (ASIC_IS_DCE5(rdev)) - return 2048 * 2; - else - return 1920 * 2; + if (radeon_crtc->base.enabled && mode) { + switch (tmp) { + case 0: + case 4: + default: + if (ASIC_IS_DCE5(rdev)) + return 4096 * 2; + else + return 3840 * 2; + case 1: + case 5: + if (ASIC_IS_DCE5(rdev)) + return 6144 * 2; + else + return 5760 * 2; + case 2: + case 6: + if (ASIC_IS_DCE5(rdev)) + return 8192 * 2; + else + return 7680 * 2; + case 3: + case 7: + if (ASIC_IS_DCE5(rdev)) + return 2048 * 2; + else + return 1920 * 2; + } } + + /* controller not enabled, so no lb used */ + return 0; } static u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev) @@ -858,9 +862,15 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev) SYSTEM_ACCESS_MODE_NOT_IN_SYS | SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU | EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5); - WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); - WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); - WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); + if (rdev->flags & RADEON_IS_IGP) { + WREG32(FUS_MC_VM_MD_L1_TLB0_CNTL, tmp); + WREG32(FUS_MC_VM_MD_L1_TLB1_CNTL, tmp); + WREG32(FUS_MC_VM_MD_L1_TLB2_CNTL, tmp); + } else { + WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); + WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); + WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); + } WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp); WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); @@ -1770,7 +1780,10 @@ static void evergreen_gpu_init(struct radeon_device *rdev) mc_shared_chmap = RREG32(MC_SHARED_CHMAP); - mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); + if (rdev->flags & RADEON_IS_IGP) + mc_arb_ramcfg = RREG32(FUS_MC_ARB_RAMCFG); + else + mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); switch (rdev->config.evergreen.max_tile_pipes) { case 1: @@ -2576,7 +2589,7 @@ static inline u32 evergreen_get_ih_wptr(struct radeon_device *rdev) u32 wptr, tmp; if (rdev->wb.enabled) - wptr = rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]; + wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]); else wptr = RREG32(IH_RB_WPTR); @@ -2919,11 +2932,6 @@ static int evergreen_startup(struct radeon_device *rdev) rdev->asic->copy = NULL; dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); } - /* XXX: ontario has problems blitting to gart at the moment */ - if (rdev->family == CHIP_PALM) { - rdev->asic->copy = NULL; - radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); - } /* allocate wb buffer */ r = radeon_wb_init(rdev); @@ -3036,9 +3044,6 @@ int evergreen_init(struct radeon_device *rdev) { int r; - r = radeon_dummy_page_init(rdev); - if (r) - return r; /* This don't do much */ r = radeon_gem_init(rdev); if (r) @@ -3150,7 +3155,6 @@ void evergreen_fini(struct radeon_device *rdev) radeon_atombios_fini(rdev); kfree(rdev->bios); rdev->bios = NULL; - radeon_dummy_page_fini(rdev); } static void evergreen_pcie_gen2_enable(struct radeon_device *rdev) diff --git a/trunk/drivers/gpu/drm/radeon/evergreend.h b/trunk/drivers/gpu/drm/radeon/evergreend.h index 9aaa3f0c9372..fc40e0cc3451 100644 --- a/trunk/drivers/gpu/drm/radeon/evergreend.h +++ b/trunk/drivers/gpu/drm/radeon/evergreend.h @@ -200,6 +200,7 @@ #define BURSTLENGTH_SHIFT 9 #define BURSTLENGTH_MASK 0x00000200 #define CHANSIZE_OVERRIDE (1 << 11) +#define FUS_MC_ARB_RAMCFG 0x2768 #define MC_VM_AGP_TOP 0x2028 #define MC_VM_AGP_BOT 0x202C #define MC_VM_AGP_BASE 0x2030 @@ -221,6 +222,11 @@ #define MC_VM_MD_L1_TLB0_CNTL 0x2654 #define MC_VM_MD_L1_TLB1_CNTL 0x2658 #define MC_VM_MD_L1_TLB2_CNTL 0x265C + +#define FUS_MC_VM_MD_L1_TLB0_CNTL 0x265C +#define FUS_MC_VM_MD_L1_TLB1_CNTL 0x2660 +#define FUS_MC_VM_MD_L1_TLB2_CNTL 0x2664 + #define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C #define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038 #define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034 diff --git a/trunk/drivers/gpu/drm/radeon/ni.c b/trunk/drivers/gpu/drm/radeon/ni.c index 7aade20f63a8..3d8a7634bbe9 100644 --- a/trunk/drivers/gpu/drm/radeon/ni.c +++ b/trunk/drivers/gpu/drm/radeon/ni.c @@ -674,7 +674,7 @@ static void cayman_gpu_init(struct radeon_device *rdev) cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE); cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG); - cgts_tcc_disable = RREG32(CGTS_TCC_DISABLE); + cgts_tcc_disable = 0xff000000; gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE); gc_user_shader_pipe_config = RREG32(GC_USER_SHADER_PIPE_CONFIG); cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE); @@ -871,7 +871,7 @@ static void cayman_gpu_init(struct radeon_device *rdev) smx_dc_ctl0 = RREG32(SMX_DC_CTL0); smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff); - smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets); + smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.cayman.sx_num_of_sets); WREG32(SMX_DC_CTL0, smx_dc_ctl0); WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4) | CRC_SIMD_ID_WADDR_DISABLE); @@ -887,20 +887,20 @@ static void cayman_gpu_init(struct radeon_device *rdev) WREG32(TA_CNTL_AUX, DISABLE_CUBE_ANISO); - WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) | - POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) | - SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1))); + WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.cayman.sx_max_export_size / 4) - 1) | + POSITION_BUFFER_SIZE((rdev->config.cayman.sx_max_export_pos_size / 4) - 1) | + SMX_BUFFER_SIZE((rdev->config.cayman.sx_max_export_smx_size / 4) - 1))); - WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) | - SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) | - SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size))); + WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.cayman.sc_prim_fifo_size) | + SC_HIZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_hiz_tile_fifo_size) | + SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_earlyz_tile_fifo_size))); WREG32(VGT_NUM_INSTANCES, 1); WREG32(CP_PERFMON_CNTL, 0); - WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) | + WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.cayman.sq_num_cf_insts) | FETCH_FIFO_HIWATER(0x4) | DONE_FIFO_HIWATER(0xe0) | ALU_UPDATE_FIFO_HIWATER(0x8))); diff --git a/trunk/drivers/gpu/drm/radeon/r600.c b/trunk/drivers/gpu/drm/radeon/r600.c index be271c42de4d..6f27593901c7 100644 --- a/trunk/drivers/gpu/drm/radeon/r600.c +++ b/trunk/drivers/gpu/drm/radeon/r600.c @@ -587,7 +587,7 @@ void r600_pm_misc(struct radeon_device *rdev) if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { if (voltage->voltage != rdev->pm.current_vddc) { - radeon_atom_set_voltage(rdev, voltage->voltage); + radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC); rdev->pm.current_vddc = voltage->voltage; DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage); } @@ -2509,9 +2509,6 @@ int r600_init(struct radeon_device *rdev) { int r; - r = radeon_dummy_page_init(rdev); - if (r) - return r; if (r600_debugfs_mc_info_init(rdev)) { DRM_ERROR("Failed to register debugfs file for mc !\n"); } @@ -2625,7 +2622,6 @@ void r600_fini(struct radeon_device *rdev) radeon_atombios_fini(rdev); kfree(rdev->bios); rdev->bios = NULL; - radeon_dummy_page_fini(rdev); } @@ -3235,7 +3231,7 @@ static inline u32 r600_get_ih_wptr(struct radeon_device *rdev) u32 wptr, tmp; if (rdev->wb.enabled) - wptr = rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]; + wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]); else wptr = RREG32(IH_RB_WPTR); diff --git a/trunk/drivers/gpu/drm/radeon/radeon.h b/trunk/drivers/gpu/drm/radeon/radeon.h index 93f536594c73..ba643b576054 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon.h +++ b/trunk/drivers/gpu/drm/radeon/radeon.h @@ -177,7 +177,7 @@ void radeon_pm_suspend(struct radeon_device *rdev); void radeon_pm_resume(struct radeon_device *rdev); void radeon_combios_get_power_modes(struct radeon_device *rdev); void radeon_atombios_get_power_modes(struct radeon_device *rdev); -void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level); +void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type); void rs690_pm_info(struct radeon_device *rdev); extern int rv6xx_get_temp(struct radeon_device *rdev); extern int rv770_get_temp(struct radeon_device *rdev); @@ -767,7 +767,9 @@ struct radeon_voltage { u8 vddci_id; /* index into vddci voltage table */ bool vddci_enabled; /* r6xx+ sw */ - u32 voltage; + u16 voltage; + /* evergreen+ vddci */ + u16 vddci; }; /* clock mode flags */ @@ -835,10 +837,12 @@ struct radeon_pm { int default_power_state_index; u32 current_sclk; u32 current_mclk; - u32 current_vddc; + u16 current_vddc; + u16 current_vddci; u32 default_sclk; u32 default_mclk; - u32 default_vddc; + u16 default_vddc; + u16 default_vddci; struct radeon_i2c_chan *i2c_bus; /* selected pm method */ enum radeon_pm_method pm_method; diff --git a/trunk/drivers/gpu/drm/radeon/radeon_asic.c b/trunk/drivers/gpu/drm/radeon/radeon_asic.c index eb888ee5f674..ca576191d058 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_asic.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_asic.c @@ -94,7 +94,7 @@ static void radeon_register_accessor_init(struct radeon_device *rdev) rdev->mc_rreg = &rs600_mc_rreg; rdev->mc_wreg = &rs600_mc_wreg; } - if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_HEMLOCK)) { + if (rdev->family >= CHIP_R600) { rdev->pciep_rreg = &r600_pciep_rreg; rdev->pciep_wreg = &r600_pciep_wreg; } diff --git a/trunk/drivers/gpu/drm/radeon/radeon_atombios.c b/trunk/drivers/gpu/drm/radeon/radeon_atombios.c index 99768d9d91da..90dfb2b8cf03 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_atombios.c @@ -431,7 +431,7 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, } } - /* Acer laptop (Acer TravelMate 5730G) has an HDMI port + /* Acer laptop (Acer TravelMate 5730/5730G) has an HDMI port * on the laptop and a DVI port on the docking station and * both share the same encoder, hpd pin, and ddc line. * So while the bios table is technically correct, @@ -440,7 +440,7 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, * with different crtcs which isn't possible on the hardware * side and leaves no crtcs for LVDS or VGA. */ - if ((dev->pdev->device == 0x95c4) && + if (((dev->pdev->device == 0x95c4) || (dev->pdev->device == 0x9591)) && (dev->pdev->subsystem_vendor == 0x1025) && (dev->pdev->subsystem_device == 0x013c)) { if ((*connector_type == DRM_MODE_CONNECTOR_DVII) && @@ -1574,9 +1574,17 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record; ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record; bool bad_record = false; - u8 *record = (u8 *)(mode_info->atom_context->bios + - data_offset + - le16_to_cpu(lvds_info->info.usModePatchTableOffset)); + u8 *record; + + if ((frev == 1) && (crev < 2)) + /* absolute */ + record = (u8 *)(mode_info->atom_context->bios + + le16_to_cpu(lvds_info->info.usModePatchTableOffset)); + else + /* relative */ + record = (u8 *)(mode_info->atom_context->bios + + data_offset + + le16_to_cpu(lvds_info->info.usModePatchTableOffset)); while (*record != ATOM_RECORD_END_TYPE) { switch (*record) { case LCD_MODE_PATCH_RECORD_MODE_TYPE: @@ -1599,9 +1607,10 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct memcpy((u8 *)edid, (u8 *)&fake_edid_record->ucFakeEDIDString[0], fake_edid_record->ucFakeEDIDLength); - if (drm_edid_is_valid(edid)) + if (drm_edid_is_valid(edid)) { rdev->mode_info.bios_hardcoded_edid = edid; - else + rdev->mode_info.bios_hardcoded_edid_size = edid_size; + } else kfree(edid); } } @@ -2176,24 +2185,27 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r } } -static u16 radeon_atombios_get_default_vddc(struct radeon_device *rdev) +static void radeon_atombios_get_default_voltages(struct radeon_device *rdev, + u16 *vddc, u16 *vddci) { struct radeon_mode_info *mode_info = &rdev->mode_info; int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); u8 frev, crev; u16 data_offset; union firmware_info *firmware_info; - u16 vddc = 0; + + *vddc = 0; + *vddci = 0; if (atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset)) { firmware_info = (union firmware_info *)(mode_info->atom_context->bios + data_offset); - vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage); + *vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage); + if ((frev == 2) && (crev >= 2)) + *vddci = le16_to_cpu(firmware_info->info_22.usBootUpVDDCIVoltage); } - - return vddc; } static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rdev, @@ -2203,7 +2215,9 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde int j; u32 misc = le32_to_cpu(non_clock_info->ulCapsAndSettings); u32 misc2 = le16_to_cpu(non_clock_info->usClassification); - u16 vddc = radeon_atombios_get_default_vddc(rdev); + u16 vddc, vddci; + + radeon_atombios_get_default_voltages(rdev, &vddc, &vddci); rdev->pm.power_state[state_index].misc = misc; rdev->pm.power_state[state_index].misc2 = misc2; @@ -2244,6 +2258,7 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk; rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk; rdev->pm.default_vddc = rdev->pm.power_state[state_index].clock_info[0].voltage.voltage; + rdev->pm.default_vddci = rdev->pm.power_state[state_index].clock_info[0].voltage.vddci; } else { /* patch the table values with the default slck/mclk from firmware info */ for (j = 0; j < mode_index; j++) { @@ -2286,6 +2301,8 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev, VOLTAGE_SW; rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = le16_to_cpu(clock_info->evergreen.usVDDC); + rdev->pm.power_state[state_index].clock_info[mode_index].voltage.vddci = + le16_to_cpu(clock_info->evergreen.usVDDCI); } else { sclk = le16_to_cpu(clock_info->r600.usEngineClockLow); sclk |= clock_info->r600.ucEngineClockHigh << 16; @@ -2577,25 +2594,25 @@ union set_voltage { struct _SET_VOLTAGE_PARAMETERS_V2 v2; }; -void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level) +void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type) { union set_voltage args; int index = GetIndexIntoMasterTable(COMMAND, SetVoltage); - u8 frev, crev, volt_index = level; + u8 frev, crev, volt_index = voltage_level; if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) return; switch (crev) { case 1: - args.v1.ucVoltageType = SET_VOLTAGE_TYPE_ASIC_VDDC; + args.v1.ucVoltageType = voltage_type; args.v1.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_ALL_SOURCE; args.v1.ucVoltageIndex = volt_index; break; case 2: - args.v2.ucVoltageType = SET_VOLTAGE_TYPE_ASIC_VDDC; + args.v2.ucVoltageType = voltage_type; args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE; - args.v2.usVoltageLevel = cpu_to_le16(level); + args.v2.usVoltageLevel = cpu_to_le16(voltage_level); break; default: DRM_ERROR("Unknown table version %d, %d\n", frev, crev); diff --git a/trunk/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/trunk/drivers/gpu/drm/radeon/radeon_atpx_handler.c index ed5dfe58f29c..9d95792bea3e 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_atpx_handler.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_atpx_handler.c @@ -15,6 +15,9 @@ #define ATPX_VERSION 0 #define ATPX_GPU_PWR 2 #define ATPX_MUX_SELECT 3 +#define ATPX_I2C_MUX_SELECT 4 +#define ATPX_SWITCH_START 5 +#define ATPX_SWITCH_END 6 #define ATPX_INTEGRATED 0 #define ATPX_DISCRETE 1 @@ -149,13 +152,35 @@ static int radeon_atpx_switch_mux(acpi_handle handle, int mux_id) return radeon_atpx_execute(handle, ATPX_MUX_SELECT, mux_id); } +static int radeon_atpx_switch_i2c_mux(acpi_handle handle, int mux_id) +{ + return radeon_atpx_execute(handle, ATPX_I2C_MUX_SELECT, mux_id); +} + +static int radeon_atpx_switch_start(acpi_handle handle, int gpu_id) +{ + return radeon_atpx_execute(handle, ATPX_SWITCH_START, gpu_id); +} + +static int radeon_atpx_switch_end(acpi_handle handle, int gpu_id) +{ + return radeon_atpx_execute(handle, ATPX_SWITCH_END, gpu_id); +} static int radeon_atpx_switchto(enum vga_switcheroo_client_id id) { + int gpu_id; + if (id == VGA_SWITCHEROO_IGD) - radeon_atpx_switch_mux(radeon_atpx_priv.atpx_handle, 0); + gpu_id = ATPX_INTEGRATED; else - radeon_atpx_switch_mux(radeon_atpx_priv.atpx_handle, 1); + gpu_id = ATPX_DISCRETE; + + radeon_atpx_switch_start(radeon_atpx_priv.atpx_handle, gpu_id); + radeon_atpx_switch_mux(radeon_atpx_priv.atpx_handle, gpu_id); + radeon_atpx_switch_i2c_mux(radeon_atpx_priv.atpx_handle, gpu_id); + radeon_atpx_switch_end(radeon_atpx_priv.atpx_handle, gpu_id); + return 0; } diff --git a/trunk/drivers/gpu/drm/radeon/radeon_connectors.c b/trunk/drivers/gpu/drm/radeon/radeon_connectors.c index 2ef6d5135064..5f45fa12bb8b 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_connectors.c @@ -1199,7 +1199,7 @@ radeon_add_atom_connector(struct drm_device *dev, if (router->ddc_valid || router->cd_valid) { radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info); if (!radeon_connector->router_bus) - goto failed; + DRM_ERROR("Failed to assign router i2c bus! Check dmesg for i2c errors.\n"); } switch (connector_type) { case DRM_MODE_CONNECTOR_VGA: @@ -1208,7 +1208,7 @@ radeon_add_atom_connector(struct drm_device *dev, if (i2c_bus->valid) { radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); if (!radeon_connector->ddc_bus) - goto failed; + DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } radeon_connector->dac_load_detect = true; drm_connector_attach_property(&radeon_connector->base, @@ -1226,7 +1226,7 @@ radeon_add_atom_connector(struct drm_device *dev, if (i2c_bus->valid) { radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); if (!radeon_connector->ddc_bus) - goto failed; + DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } radeon_connector->dac_load_detect = true; drm_connector_attach_property(&radeon_connector->base, @@ -1249,7 +1249,7 @@ radeon_add_atom_connector(struct drm_device *dev, if (i2c_bus->valid) { radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); if (!radeon_connector->ddc_bus) - goto failed; + DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } subpixel_order = SubPixelHorizontalRGB; drm_connector_attach_property(&radeon_connector->base, @@ -1290,7 +1290,7 @@ radeon_add_atom_connector(struct drm_device *dev, if (i2c_bus->valid) { radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); if (!radeon_connector->ddc_bus) - goto failed; + DRM_ERROR("HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.coherent_mode_property, @@ -1329,10 +1329,10 @@ radeon_add_atom_connector(struct drm_device *dev, else radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch"); if (!radeon_dig_connector->dp_i2c_bus) - goto failed; + DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n"); radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); if (!radeon_connector->ddc_bus) - goto failed; + DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } subpixel_order = SubPixelHorizontalRGB; drm_connector_attach_property(&radeon_connector->base, @@ -1381,7 +1381,7 @@ radeon_add_atom_connector(struct drm_device *dev, if (i2c_bus->valid) { radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); if (!radeon_connector->ddc_bus) - goto failed; + DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } drm_connector_attach_property(&radeon_connector->base, dev->mode_config.scaling_mode_property, @@ -1457,7 +1457,7 @@ radeon_add_legacy_connector(struct drm_device *dev, if (i2c_bus->valid) { radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); if (!radeon_connector->ddc_bus) - goto failed; + DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } radeon_connector->dac_load_detect = true; drm_connector_attach_property(&radeon_connector->base, @@ -1475,7 +1475,7 @@ radeon_add_legacy_connector(struct drm_device *dev, if (i2c_bus->valid) { radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); if (!radeon_connector->ddc_bus) - goto failed; + DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } radeon_connector->dac_load_detect = true; drm_connector_attach_property(&radeon_connector->base, @@ -1493,7 +1493,7 @@ radeon_add_legacy_connector(struct drm_device *dev, if (i2c_bus->valid) { radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); if (!radeon_connector->ddc_bus) - goto failed; + DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } if (connector_type == DRM_MODE_CONNECTOR_DVII) { radeon_connector->dac_load_detect = true; @@ -1538,7 +1538,7 @@ radeon_add_legacy_connector(struct drm_device *dev, if (i2c_bus->valid) { radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); if (!radeon_connector->ddc_bus) - goto failed; + DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); } drm_connector_attach_property(&radeon_connector->base, dev->mode_config.scaling_mode_property, @@ -1567,9 +1567,4 @@ radeon_add_legacy_connector(struct drm_device *dev, radeon_legacy_backlight_init(radeon_encoder, connector); } } - return; - -failed: - drm_connector_cleanup(connector); - kfree(connector); } diff --git a/trunk/drivers/gpu/drm/radeon/radeon_cursor.c b/trunk/drivers/gpu/drm/radeon/radeon_cursor.c index bdf2fa1189ae..3189a7efb2e9 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_cursor.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_cursor.c @@ -167,9 +167,6 @@ int radeon_crtc_cursor_set(struct drm_crtc *crtc, return -EINVAL; } - radeon_crtc->cursor_width = width; - radeon_crtc->cursor_height = height; - obj = drm_gem_object_lookup(crtc->dev, file_priv, handle); if (!obj) { DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, radeon_crtc->crtc_id); @@ -180,6 +177,9 @@ int radeon_crtc_cursor_set(struct drm_crtc *crtc, if (ret) goto fail; + radeon_crtc->cursor_width = width; + radeon_crtc->cursor_height = height; + radeon_lock_cursor(crtc, true); /* XXX only 27 bit offset for legacy cursor */ radeon_set_cursor(crtc, obj, gpu_addr); diff --git a/trunk/drivers/gpu/drm/radeon/radeon_fence.c b/trunk/drivers/gpu/drm/radeon/radeon_fence.c index 9e59868d354e..bbcd1dd7bac0 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_fence.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_fence.c @@ -79,7 +79,7 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev) scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; else scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; - seq = rdev->wb.wb[scratch_index/4]; + seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]); } else seq = RREG32(rdev->fence_drv.scratch_reg); if (seq != rdev->fence_drv.last_seq) { diff --git a/trunk/drivers/gpu/drm/radeon/radeon_gart.c b/trunk/drivers/gpu/drm/radeon/radeon_gart.c index f0534ef2f331..a533f52fd163 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_gart.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_gart.c @@ -181,9 +181,9 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); for (i = 0; i < pages; i++, p++) { - /* On TTM path, we only use the DMA API if TTM_PAGE_FLAG_DMA32 - * is requested. */ - if (dma_addr[i] != DMA_ERROR_CODE) { + /* we reverted the patch using dma_addr in TTM for now but this + * code stops building on alpha so just comment it out for now */ + if (0) { /*dma_addr[i] != DMA_ERROR_CODE) */ rdev->gart.ttm_alloced[p] = true; rdev->gart.pages_addr[p] = dma_addr[i]; } else { @@ -285,4 +285,6 @@ void radeon_gart_fini(struct radeon_device *rdev) rdev->gart.pages = NULL; rdev->gart.pages_addr = NULL; rdev->gart.ttm_alloced = NULL; + + radeon_dummy_page_fini(rdev); } diff --git a/trunk/drivers/gpu/drm/radeon/radeon_i2c.c b/trunk/drivers/gpu/drm/radeon/radeon_i2c.c index ded2a45bc95c..983cbac75af0 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_i2c.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_i2c.c @@ -1062,7 +1062,7 @@ void radeon_i2c_get_byte(struct radeon_i2c_chan *i2c_bus, *val = in_buf[0]; DRM_DEBUG("val = 0x%02x\n", *val); } else { - DRM_ERROR("i2c 0x%02x 0x%02x read failed\n", + DRM_DEBUG("i2c 0x%02x 0x%02x read failed\n", addr, *val); } } @@ -1084,7 +1084,7 @@ void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c_bus, out_buf[1] = val; if (i2c_transfer(&i2c_bus->adapter, &msg, 1) != 1) - DRM_ERROR("i2c 0x%02x 0x%02x write failed\n", + DRM_DEBUG("i2c 0x%02x 0x%02x write failed\n", addr, val); } @@ -1096,6 +1096,9 @@ void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector) if (!radeon_connector->router.ddc_valid) return; + if (!radeon_connector->router_bus) + return; + radeon_i2c_get_byte(radeon_connector->router_bus, radeon_connector->router.i2c_addr, 0x3, &val); @@ -1121,6 +1124,9 @@ void radeon_router_select_cd_port(struct radeon_connector *radeon_connector) if (!radeon_connector->router.cd_valid) return; + if (!radeon_connector->router_bus) + return; + radeon_i2c_get_byte(radeon_connector->router_bus, radeon_connector->router.i2c_addr, 0x3, &val); diff --git a/trunk/drivers/gpu/drm/radeon/radeon_kms.c b/trunk/drivers/gpu/drm/radeon/radeon_kms.c index bf7d4c061451..bd58af658581 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_kms.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_kms.c @@ -221,6 +221,22 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) return -EINVAL; } break; + case RADEON_INFO_NUM_TILE_PIPES: + if (rdev->family >= CHIP_CAYMAN) + value = rdev->config.cayman.max_tile_pipes; + else if (rdev->family >= CHIP_CEDAR) + value = rdev->config.evergreen.max_tile_pipes; + else if (rdev->family >= CHIP_RV770) + value = rdev->config.rv770.max_tile_pipes; + else if (rdev->family >= CHIP_R600) + value = rdev->config.r600.max_tile_pipes; + else { + return -EINVAL; + } + break; + case RADEON_INFO_FUSION_GART_WORKING: + value = 1; + break; default: DRM_DEBUG_KMS("Invalid request %d\n", info->request); return -EINVAL; diff --git a/trunk/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/trunk/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index 5b54268ed6b2..2f46e0c8df53 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_legacy_encoders.c @@ -269,7 +269,7 @@ static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = { .disable = radeon_legacy_encoder_disable, }; -#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE +#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) #define MAX_RADEON_LEVEL 0xFF diff --git a/trunk/drivers/gpu/drm/radeon/radeon_pm.c b/trunk/drivers/gpu/drm/radeon/radeon_pm.c index 08de669e025a..86eda1ea94df 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_pm.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_pm.c @@ -23,6 +23,7 @@ #include "drmP.h" #include "radeon.h" #include "avivod.h" +#include "atom.h" #ifdef CONFIG_ACPI #include #endif @@ -535,7 +536,11 @@ void radeon_pm_resume(struct radeon_device *rdev) /* set up the default clocks if the MC ucode is loaded */ if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) { if (rdev->pm.default_vddc) - radeon_atom_set_voltage(rdev, rdev->pm.default_vddc); + radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, + SET_VOLTAGE_TYPE_ASIC_VDDC); + if (rdev->pm.default_vddci) + radeon_atom_set_voltage(rdev, rdev->pm.default_vddci, + SET_VOLTAGE_TYPE_ASIC_VDDCI); if (rdev->pm.default_sclk) radeon_set_engine_clock(rdev, rdev->pm.default_sclk); if (rdev->pm.default_mclk) @@ -548,6 +553,7 @@ void radeon_pm_resume(struct radeon_device *rdev) rdev->pm.current_sclk = rdev->pm.default_sclk; rdev->pm.current_mclk = rdev->pm.default_mclk; rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; + rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci; if (rdev->pm.pm_method == PM_METHOD_DYNPM && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) { rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; @@ -585,7 +591,8 @@ int radeon_pm_init(struct radeon_device *rdev) /* set up the default clocks if the MC ucode is loaded */ if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) { if (rdev->pm.default_vddc) - radeon_atom_set_voltage(rdev, rdev->pm.default_vddc); + radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, + SET_VOLTAGE_TYPE_ASIC_VDDC); if (rdev->pm.default_sclk) radeon_set_engine_clock(rdev, rdev->pm.default_sclk); if (rdev->pm.default_mclk) diff --git a/trunk/drivers/gpu/drm/radeon/radeon_ring.c b/trunk/drivers/gpu/drm/radeon/radeon_ring.c index bbc9cd823334..c6776e48fdde 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_ring.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_ring.c @@ -248,7 +248,7 @@ void radeon_ib_pool_fini(struct radeon_device *rdev) void radeon_ring_free_size(struct radeon_device *rdev) { if (rdev->wb.enabled) - rdev->cp.rptr = rdev->wb.wb[RADEON_WB_CP_RPTR_OFFSET/4]; + rdev->cp.rptr = le32_to_cpu(rdev->wb.wb[RADEON_WB_CP_RPTR_OFFSET/4]); else { if (rdev->family >= CHIP_R600) rdev->cp.rptr = RREG32(R600_CP_RB_RPTR); diff --git a/trunk/drivers/gpu/drm/radeon/reg_srcs/cayman b/trunk/drivers/gpu/drm/radeon/reg_srcs/cayman index 6334f8ac1209..0aa8e85a9457 100644 --- a/trunk/drivers/gpu/drm/radeon/reg_srcs/cayman +++ b/trunk/drivers/gpu/drm/radeon/reg_srcs/cayman @@ -33,6 +33,7 @@ cayman 0x9400 0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS 0x00009100 SPI_CONFIG_CNTL 0x0000913C SPI_CONFIG_CNTL_1 +0x00009508 TA_CNTL_AUX 0x00009830 DB_DEBUG 0x00009834 DB_DEBUG2 0x00009838 DB_DEBUG3 diff --git a/trunk/drivers/gpu/drm/radeon/reg_srcs/evergreen b/trunk/drivers/gpu/drm/radeon/reg_srcs/evergreen index 7e1637176e08..0e28cae7ea43 100644 --- a/trunk/drivers/gpu/drm/radeon/reg_srcs/evergreen +++ b/trunk/drivers/gpu/drm/radeon/reg_srcs/evergreen @@ -46,6 +46,7 @@ evergreen 0x9400 0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS 0x00009100 SPI_CONFIG_CNTL 0x0000913C SPI_CONFIG_CNTL_1 +0x00009508 TA_CNTL_AUX 0x00009700 VC_CNTL 0x00009714 VC_ENHANCE 0x00009830 DB_DEBUG diff --git a/trunk/drivers/gpu/drm/radeon/reg_srcs/r600 b/trunk/drivers/gpu/drm/radeon/reg_srcs/r600 index af0da4ae3f55..92f1900dc7ca 100644 --- a/trunk/drivers/gpu/drm/radeon/reg_srcs/r600 +++ b/trunk/drivers/gpu/drm/radeon/reg_srcs/r600 @@ -708,6 +708,7 @@ r600 0x9400 0x00028D0C DB_RENDER_CONTROL 0x00028D10 DB_RENDER_OVERRIDE 0x0002880C DB_SHADER_CONTROL +0x00028D28 DB_SRESULTS_COMPARE_STATE0 0x00028D2C DB_SRESULTS_COMPARE_STATE1 0x00028430 DB_STENCILREFMASK 0x00028434 DB_STENCILREFMASK_BF diff --git a/trunk/drivers/gpu/drm/radeon/rs600.c b/trunk/drivers/gpu/drm/radeon/rs600.c index 876cebc4b8ba..6e3b11e5abbe 100644 --- a/trunk/drivers/gpu/drm/radeon/rs600.c +++ b/trunk/drivers/gpu/drm/radeon/rs600.c @@ -114,7 +114,7 @@ void rs600_pm_misc(struct radeon_device *rdev) udelay(voltage->delay); } } else if (voltage->type == VOLTAGE_VDDC) - radeon_atom_set_voltage(rdev, voltage->vddc_id); + radeon_atom_set_voltage(rdev, voltage->vddc_id, SET_VOLTAGE_TYPE_ASIC_VDDC); dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH); dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf); diff --git a/trunk/drivers/gpu/drm/radeon/rv770.c b/trunk/drivers/gpu/drm/radeon/rv770.c index b974ac7df8df..ef8a5babe9f7 100644 --- a/trunk/drivers/gpu/drm/radeon/rv770.c +++ b/trunk/drivers/gpu/drm/radeon/rv770.c @@ -106,7 +106,7 @@ void rv770_pm_misc(struct radeon_device *rdev) if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { if (voltage->voltage != rdev->pm.current_vddc) { - radeon_atom_set_voltage(rdev, voltage->voltage); + radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC); rdev->pm.current_vddc = voltage->voltage; DRM_DEBUG("Setting: v: %d\n", voltage->voltage); } @@ -1255,9 +1255,6 @@ int rv770_init(struct radeon_device *rdev) { int r; - r = radeon_dummy_page_init(rdev); - if (r) - return r; /* This don't do much */ r = radeon_gem_init(rdev); if (r) @@ -1372,7 +1369,6 @@ void rv770_fini(struct radeon_device *rdev) radeon_atombios_fini(rdev); kfree(rdev->bios); rdev->bios = NULL; - radeon_dummy_page_fini(rdev); } static void rv770_pcie_gen2_enable(struct radeon_device *rdev) diff --git a/trunk/drivers/gpu/drm/ttm/ttm_page_alloc.c b/trunk/drivers/gpu/drm/ttm/ttm_page_alloc.c index 737a2a2e46a5..9d9d92945f8c 100644 --- a/trunk/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/trunk/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -683,22 +683,14 @@ int ttm_get_pages(struct list_head *pages, int flags, gfp_flags |= GFP_HIGHUSER; for (r = 0; r < count; ++r) { - if ((flags & TTM_PAGE_FLAG_DMA32) && dma_address) { - void *addr; - addr = dma_alloc_coherent(NULL, PAGE_SIZE, - &dma_address[r], - gfp_flags); - if (addr == NULL) - return -ENOMEM; - p = virt_to_page(addr); - } else - p = alloc_page(gfp_flags); + p = alloc_page(gfp_flags); if (!p) { printk(KERN_ERR TTM_PFX "Unable to allocate page."); return -ENOMEM; } + list_add(&p->lru, pages); } return 0; @@ -746,24 +738,12 @@ void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags, unsigned long irq_flags; struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); struct page *p, *tmp; - unsigned r; if (pool == NULL) { /* No pool for this memory type so free the pages */ - r = page_count-1; list_for_each_entry_safe(p, tmp, pages, lru) { - if ((flags & TTM_PAGE_FLAG_DMA32) && dma_address) { - void *addr = page_address(p); - WARN_ON(!addr || !dma_address[r]); - if (addr) - dma_free_coherent(NULL, PAGE_SIZE, - addr, - dma_address[r]); - dma_address[r] = 0; - } else - __free_page(p); - r--; + __free_page(p); } /* Make the pages list empty */ INIT_LIST_HEAD(pages); diff --git a/trunk/drivers/gpu/stub/Kconfig b/trunk/drivers/gpu/stub/Kconfig index 70e60a4bb678..419917955bf6 100644 --- a/trunk/drivers/gpu/stub/Kconfig +++ b/trunk/drivers/gpu/stub/Kconfig @@ -5,6 +5,7 @@ config STUB_POULSBO # Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled # but for select to work, need to select ACPI_VIDEO's dependencies, ick select BACKLIGHT_CLASS_DEVICE if ACPI + select VIDEO_OUTPUT_CONTROL if ACPI select INPUT if ACPI select ACPI_VIDEO if ACPI select THERMAL if ACPI diff --git a/trunk/drivers/gpu/vga/vga_switcheroo.c b/trunk/drivers/gpu/vga/vga_switcheroo.c index e01cacba685f..498b284e5ef9 100644 --- a/trunk/drivers/gpu/vga/vga_switcheroo.c +++ b/trunk/drivers/gpu/vga/vga_switcheroo.c @@ -219,9 +219,6 @@ static int vga_switchto_stage1(struct vga_switcheroo_client *new_client) int i; struct vga_switcheroo_client *active = NULL; - if (new_client->active == true) - return 0; - for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) { if (vgasr_priv.clients[i].active == true) { active = &vgasr_priv.clients[i]; @@ -372,6 +369,9 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf, goto out; } + if (client->active == true) + goto out; + /* okay we want a switch - test if devices are willing to switch */ can_switch = true; for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) { diff --git a/trunk/drivers/hwmon/Kconfig b/trunk/drivers/hwmon/Kconfig index 060ef6327876..50e40dbd8bb6 100644 --- a/trunk/drivers/hwmon/Kconfig +++ b/trunk/drivers/hwmon/Kconfig @@ -110,8 +110,7 @@ config SENSORS_ADM1021 help If you say yes here you get support for Analog Devices ADM1021 and ADM1023 sensor chips and clones: Maxim MAX1617 and MAX1617A, - Genesys Logic GL523SM, National Semiconductor LM84, TI THMC10, - and the XEON processor built-in sensor. + Genesys Logic GL523SM, National Semiconductor LM84 and TI THMC10. This driver can also be built as a module. If so, the module will be called adm1021. @@ -618,10 +617,10 @@ config SENSORS_LM90 depends on I2C help If you say yes here you get support for National Semiconductor LM90, - LM86, LM89 and LM99, Analog Devices ADM1032 and ADT7461, Maxim - MAX6646, MAX6647, MAX6648, MAX6649, MAX6657, MAX6658, MAX6659, - MAX6680, MAX6681, MAX6692, MAX6695, MAX6696, and Winbond/Nuvoton - W83L771W/G/AWG/ASG sensor chips. + LM86, LM89 and LM99, Analog Devices ADM1032, ADT7461, and ADT7461A, + Maxim MAX6646, MAX6647, MAX6648, MAX6649, MAX6657, MAX6658, MAX6659, + MAX6680, MAX6681, MAX6692, MAX6695, MAX6696, ON Semiconductor NCT1008, + and Winbond/Nuvoton W83L771W/G/AWG/ASG sensor chips. This driver can also be built as a module. If so, the module will be called lm90. diff --git a/trunk/drivers/hwmon/lm85.c b/trunk/drivers/hwmon/lm85.c index 250d099ca398..da72dc12068c 100644 --- a/trunk/drivers/hwmon/lm85.c +++ b/trunk/drivers/hwmon/lm85.c @@ -1094,6 +1094,7 @@ static struct attribute *lm85_attributes_minctl[] = { &sensor_dev_attr_pwm1_auto_pwm_minctl.dev_attr.attr, &sensor_dev_attr_pwm2_auto_pwm_minctl.dev_attr.attr, &sensor_dev_attr_pwm3_auto_pwm_minctl.dev_attr.attr, + NULL }; static const struct attribute_group lm85_group_minctl = { @@ -1104,6 +1105,7 @@ static struct attribute *lm85_attributes_temp_off[] = { &sensor_dev_attr_temp1_auto_temp_off.dev_attr.attr, &sensor_dev_attr_temp2_auto_temp_off.dev_attr.attr, &sensor_dev_attr_temp3_auto_temp_off.dev_attr.attr, + NULL }; static const struct attribute_group lm85_group_temp_off = { @@ -1329,11 +1331,11 @@ static int lm85_probe(struct i2c_client *client, if (data->type != emc6d103s) { err = sysfs_create_group(&client->dev.kobj, &lm85_group_minctl); if (err) - goto err_kfree; + goto err_remove_files; err = sysfs_create_group(&client->dev.kobj, &lm85_group_temp_off); if (err) - goto err_kfree; + goto err_remove_files; } /* The ADT7463/68 have an optional VRM 10 mode where pin 21 is used diff --git a/trunk/drivers/hwmon/lm90.c b/trunk/drivers/hwmon/lm90.c index c43b4e9f96a9..2f94f9504804 100644 --- a/trunk/drivers/hwmon/lm90.c +++ b/trunk/drivers/hwmon/lm90.c @@ -49,10 +49,10 @@ * chips, but support three temperature sensors instead of two. MAX6695 * and MAX6696 only differ in the pinout so they can be treated identically. * - * This driver also supports the ADT7461 chip from Analog Devices. - * It's supported in both compatibility and extended mode. It is mostly - * compatible with LM90 except for a data format difference for the - * temperature value registers. + * This driver also supports ADT7461 and ADT7461A from Analog Devices as well as + * NCT1008 from ON Semiconductor. The chips are supported in both compatibility + * and extended mode. They are mostly compatible with LM90 except for a data + * format difference for the temperature value registers. * * Since the LM90 was the first chipset supported by this driver, most * comments will refer to this chipset, but are actually general and @@ -88,9 +88,10 @@ * Addresses to scan * Address is fully defined internally and cannot be changed except for * MAX6659, MAX6680 and MAX6681. - * LM86, LM89, LM90, LM99, ADM1032, ADM1032-1, ADT7461, MAX6649, MAX6657, - * MAX6658 and W83L771 have address 0x4c. - * ADM1032-2, ADT7461-2, LM89-1, LM99-1 and MAX6646 have address 0x4d. + * LM86, LM89, LM90, LM99, ADM1032, ADM1032-1, ADT7461, ADT7461A, MAX6649, + * MAX6657, MAX6658, NCT1008 and W83L771 have address 0x4c. + * ADM1032-2, ADT7461-2, ADT7461A-2, LM89-1, LM99-1, MAX6646, and NCT1008D + * have address 0x4d. * MAX6647 has address 0x4e. * MAX6659 can have address 0x4c, 0x4d or 0x4e. * MAX6680 and MAX6681 can have address 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, @@ -174,6 +175,7 @@ enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680, static const struct i2c_device_id lm90_id[] = { { "adm1032", adm1032 }, { "adt7461", adt7461 }, + { "adt7461a", adt7461 }, { "lm90", lm90 }, { "lm86", lm86 }, { "lm89", lm86 }, @@ -188,6 +190,7 @@ static const struct i2c_device_id lm90_id[] = { { "max6681", max6680 }, { "max6695", max6696 }, { "max6696", max6696 }, + { "nct1008", adt7461 }, { "w83l771", w83l771 }, { } }; @@ -1153,6 +1156,11 @@ static int lm90_detect(struct i2c_client *new_client, && (reg_config1 & 0x1B) == 0x00 && reg_convrate <= 0x0A) { name = "adt7461"; + } else + if (chip_id == 0x57 /* ADT7461A, NCT1008 */ + && (reg_config1 & 0x1B) == 0x00 + && reg_convrate <= 0x0A) { + name = "adt7461a"; } } else if (man_id == 0x4D) { /* Maxim */ diff --git a/trunk/drivers/hwmon/pmbus_core.c b/trunk/drivers/hwmon/pmbus_core.c index edfb92e41735..196ffafafd88 100644 --- a/trunk/drivers/hwmon/pmbus_core.c +++ b/trunk/drivers/hwmon/pmbus_core.c @@ -139,7 +139,6 @@ struct pmbus_data { * A single status register covers multiple attributes, * so we keep them all together. */ - u8 status_bits; u8 status[PB_NUM_STATUS_REG]; u8 currpage; diff --git a/trunk/drivers/hwmon/twl4030-madc-hwmon.c b/trunk/drivers/hwmon/twl4030-madc-hwmon.c index de5819199e2e..57240740b161 100644 --- a/trunk/drivers/hwmon/twl4030-madc-hwmon.c +++ b/trunk/drivers/hwmon/twl4030-madc-hwmon.c @@ -98,7 +98,6 @@ static const struct attribute_group twl4030_madc_group = { static int __devinit twl4030_madc_hwmon_probe(struct platform_device *pdev) { int ret; - int status; struct device *hwmon; ret = sysfs_create_group(&pdev->dev.kobj, &twl4030_madc_group); @@ -107,7 +106,7 @@ static int __devinit twl4030_madc_hwmon_probe(struct platform_device *pdev) hwmon = hwmon_device_register(&pdev->dev); if (IS_ERR(hwmon)) { dev_err(&pdev->dev, "hwmon_device_register failed.\n"); - status = PTR_ERR(hwmon); + ret = PTR_ERR(hwmon); goto err_reg; } diff --git a/trunk/drivers/i2c/algos/i2c-algo-bit.c b/trunk/drivers/i2c/algos/i2c-algo-bit.c index 38319a69bd0a..d6d58684712b 100644 --- a/trunk/drivers/i2c/algos/i2c-algo-bit.c +++ b/trunk/drivers/i2c/algos/i2c-algo-bit.c @@ -232,9 +232,17 @@ static int i2c_inb(struct i2c_adapter *i2c_adap) * Sanity check for the adapter hardware - check the reaction of * the bus lines only if it seems to be idle. */ -static int test_bus(struct i2c_algo_bit_data *adap, char *name) +static int test_bus(struct i2c_adapter *i2c_adap) { - int scl, sda; + struct i2c_algo_bit_data *adap = i2c_adap->algo_data; + const char *name = i2c_adap->name; + int scl, sda, ret; + + if (adap->pre_xfer) { + ret = adap->pre_xfer(i2c_adap); + if (ret < 0) + return -ENODEV; + } if (adap->getscl == NULL) pr_info("%s: Testing SDA only, SCL is not readable\n", name); @@ -297,11 +305,19 @@ static int test_bus(struct i2c_algo_bit_data *adap, char *name) "while pulling SCL high!\n", name); goto bailout; } + + if (adap->post_xfer) + adap->post_xfer(i2c_adap); + pr_info("%s: Test OK\n", name); return 0; bailout: sdahi(adap); sclhi(adap); + + if (adap->post_xfer) + adap->post_xfer(i2c_adap); + return -ENODEV; } @@ -607,7 +623,7 @@ static int __i2c_bit_add_bus(struct i2c_adapter *adap, int ret; if (bit_test) { - ret = test_bus(bit_adap, adap->name); + ret = test_bus(adap); if (ret < 0) return -ENODEV; } diff --git a/trunk/drivers/i2c/busses/i2c-i801.c b/trunk/drivers/i2c/busses/i2c-i801.c index 72c0415f6f94..455e909bc768 100644 --- a/trunk/drivers/i2c/busses/i2c-i801.c +++ b/trunk/drivers/i2c/busses/i2c-i801.c @@ -134,10 +134,15 @@ SMBHSTSTS_BUS_ERR | SMBHSTSTS_DEV_ERR | \ SMBHSTSTS_INTR) +/* Older devices have their ID defined in */ +#define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS 0x1c22 +#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS 0x1d22 /* Patsburg also has three 'Integrated Device Function' SMBus controllers */ #define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF0 0x1d70 #define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF1 0x1d71 #define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF2 0x1d72 +#define PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS 0x2330 +#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS 0x3b30 struct i801_priv { struct i2c_adapter adapter; diff --git a/trunk/drivers/i2c/busses/i2c-mpc.c b/trunk/drivers/i2c/busses/i2c-mpc.c index 75b984c519ac..107397a606b4 100644 --- a/trunk/drivers/i2c/busses/i2c-mpc.c +++ b/trunk/drivers/i2c/busses/i2c-mpc.c @@ -560,15 +560,18 @@ static struct i2c_adapter mpc_ops = { .timeout = HZ, }; +static const struct of_device_id mpc_i2c_of_match[]; static int __devinit fsl_i2c_probe(struct platform_device *op) { + const struct of_device_id *match; struct mpc_i2c *i2c; const u32 *prop; u32 clock = MPC_I2C_CLOCK_LEGACY; int result = 0; int plen; - if (!op->dev.of_match) + match = of_match_device(mpc_i2c_of_match, &op->dev); + if (!match) return -EINVAL; i2c = kzalloc(sizeof(*i2c), GFP_KERNEL); @@ -605,8 +608,8 @@ static int __devinit fsl_i2c_probe(struct platform_device *op) clock = *prop; } - if (op->dev.of_match->data) { - struct mpc_i2c_data *data = op->dev.of_match->data; + if (match->data) { + struct mpc_i2c_data *data = match->data; data->setup(op->dev.of_node, i2c, clock, data->prescaler); } else { /* Backwards compatibility */ diff --git a/trunk/drivers/i2c/busses/i2c-parport.c b/trunk/drivers/i2c/busses/i2c-parport.c index 0eb1515541e7..2dbba163b102 100644 --- a/trunk/drivers/i2c/busses/i2c-parport.c +++ b/trunk/drivers/i2c/busses/i2c-parport.c @@ -1,7 +1,7 @@ /* ------------------------------------------------------------------------ * * i2c-parport.c I2C bus over parallel port * * ------------------------------------------------------------------------ * - Copyright (C) 2003-2010 Jean Delvare + Copyright (C) 2003-2011 Jean Delvare Based on older i2c-philips-par.c driver Copyright (C) 1995-2000 Simon G. Vogl @@ -33,6 +33,8 @@ #include #include #include +#include +#include #include "i2c-parport.h" /* ----- Device list ------------------------------------------------------ */ @@ -43,10 +45,11 @@ struct i2c_par { struct i2c_algo_bit_data algo_data; struct i2c_smbus_alert_setup alert_data; struct i2c_client *ara; - struct i2c_par *next; + struct list_head node; }; -static struct i2c_par *adapter_list; +static LIST_HEAD(adapter_list); +static DEFINE_MUTEX(adapter_list_lock); /* ----- Low-level parallel port access ----------------------------------- */ @@ -228,8 +231,9 @@ static void i2c_parport_attach (struct parport *port) } /* Add the new adapter to the list */ - adapter->next = adapter_list; - adapter_list = adapter; + mutex_lock(&adapter_list_lock); + list_add_tail(&adapter->node, &adapter_list); + mutex_unlock(&adapter_list_lock); return; ERROR1: @@ -241,11 +245,11 @@ static void i2c_parport_attach (struct parport *port) static void i2c_parport_detach (struct parport *port) { - struct i2c_par *adapter, *prev; + struct i2c_par *adapter, *_n; /* Walk the list */ - for (prev = NULL, adapter = adapter_list; adapter; - prev = adapter, adapter = adapter->next) { + mutex_lock(&adapter_list_lock); + list_for_each_entry_safe(adapter, _n, &adapter_list, node) { if (adapter->pdev->port == port) { if (adapter->ara) { parport_disable_irq(port); @@ -259,14 +263,11 @@ static void i2c_parport_detach (struct parport *port) parport_release(adapter->pdev); parport_unregister_device(adapter->pdev); - if (prev) - prev->next = adapter->next; - else - adapter_list = adapter->next; + list_del(&adapter->node); kfree(adapter); - return; } } + mutex_unlock(&adapter_list_lock); } static struct parport_driver i2c_parport_driver = { diff --git a/trunk/drivers/i2c/busses/i2c-pnx.c b/trunk/drivers/i2c/busses/i2c-pnx.c index a97e3fec8148..04be9f82e14b 100644 --- a/trunk/drivers/i2c/busses/i2c-pnx.c +++ b/trunk/drivers/i2c/busses/i2c-pnx.c @@ -65,7 +65,7 @@ static inline void i2c_pnx_arm_timer(struct i2c_pnx_algo_data *alg_data) jiffies, expires); timer->expires = jiffies + expires; - timer->data = (unsigned long)&alg_data; + timer->data = (unsigned long)alg_data; add_timer(timer); } diff --git a/trunk/drivers/i2c/i2c-core.c b/trunk/drivers/i2c/i2c-core.c index 70c30e6bce0b..9a58994ff7ea 100644 --- a/trunk/drivers/i2c/i2c-core.c +++ b/trunk/drivers/i2c/i2c-core.c @@ -797,7 +797,8 @@ static int i2c_do_add_adapter(struct i2c_driver *driver, /* Let legacy drivers scan this bus for matching devices */ if (driver->attach_adapter) { - dev_warn(&adap->dev, "attach_adapter method is deprecated\n"); + dev_warn(&adap->dev, "%s: attach_adapter method is deprecated\n", + driver->driver.name); dev_warn(&adap->dev, "Please use another way to instantiate " "your i2c_client\n"); /* We ignore the return code; if it fails, too bad */ @@ -984,7 +985,8 @@ static int i2c_do_del_adapter(struct i2c_driver *driver, if (!driver->detach_adapter) return 0; - dev_warn(&adapter->dev, "detach_adapter method is deprecated\n"); + dev_warn(&adapter->dev, "%s: detach_adapter method is deprecated\n", + driver->driver.name); res = driver->detach_adapter(adapter); if (res) dev_err(&adapter->dev, "detach_adapter failed (%d) " diff --git a/trunk/drivers/ide/ide-cd.c b/trunk/drivers/ide/ide-cd.c index fd1e11799137..a5ec5a7cb381 100644 --- a/trunk/drivers/ide/ide-cd.c +++ b/trunk/drivers/ide/ide-cd.c @@ -1782,7 +1782,6 @@ static int ide_cd_probe(ide_drive_t *drive) ide_cd_read_toc(drive, &sense); g->fops = &idecd_ops; g->flags |= GENHD_FL_REMOVABLE; - g->events = DISK_EVENT_MEDIA_CHANGE; add_disk(g); return 0; diff --git a/trunk/drivers/ide/ide-cd_ioctl.c b/trunk/drivers/ide/ide-cd_ioctl.c index 2a6bc50e8a41..02caa7dd51c8 100644 --- a/trunk/drivers/ide/ide-cd_ioctl.c +++ b/trunk/drivers/ide/ide-cd_ioctl.c @@ -79,6 +79,12 @@ int ide_cdrom_drive_status(struct cdrom_device_info *cdi, int slot_nr) return CDS_DRIVE_NOT_READY; } +/* + * ide-cd always generates media changed event if media is missing, which + * makes it impossible to use for proper event reporting, so disk->events + * is cleared to 0 and the following function is used only to trigger + * revalidation and never propagated to userland. + */ unsigned int ide_cdrom_check_events_real(struct cdrom_device_info *cdi, unsigned int clearing, int slot_nr) { diff --git a/trunk/drivers/ide/ide-gd.c b/trunk/drivers/ide/ide-gd.c index c4ffd4888939..70ea8763567d 100644 --- a/trunk/drivers/ide/ide-gd.c +++ b/trunk/drivers/ide/ide-gd.c @@ -298,6 +298,12 @@ static unsigned int ide_gd_check_events(struct gendisk *disk, return 0; } + /* + * The following is used to force revalidation on the first open on + * removeable devices, and never gets reported to userland as + * genhd->events is 0. This is intended as removeable ide disk + * can't really detect MEDIA_CHANGE events. + */ ret = drive->dev_flags & IDE_DFLAG_MEDIA_CHANGED; drive->dev_flags &= ~IDE_DFLAG_MEDIA_CHANGED; @@ -413,7 +419,6 @@ static int ide_gd_probe(ide_drive_t *drive) if (drive->dev_flags & IDE_DFLAG_REMOVABLE) g->flags = GENHD_FL_REMOVABLE; g->fops = &ide_gd_ops; - g->events = DISK_EVENT_MEDIA_CHANGE; add_disk(g); return 0; diff --git a/trunk/drivers/infiniband/core/cma.c b/trunk/drivers/infiniband/core/cma.c index 5ed9d25d021a..99dde874fbbd 100644 --- a/trunk/drivers/infiniband/core/cma.c +++ b/trunk/drivers/infiniband/core/cma.c @@ -148,6 +148,7 @@ struct rdma_id_private { u32 qp_num; u8 srq; u8 tos; + u8 reuseaddr; }; struct cma_multicast { @@ -712,6 +713,21 @@ static inline int cma_any_addr(struct sockaddr *addr) return cma_zero_addr(addr) || cma_loopback_addr(addr); } +static int cma_addr_cmp(struct sockaddr *src, struct sockaddr *dst) +{ + if (src->sa_family != dst->sa_family) + return -1; + + switch (src->sa_family) { + case AF_INET: + return ((struct sockaddr_in *) src)->sin_addr.s_addr != + ((struct sockaddr_in *) dst)->sin_addr.s_addr; + default: + return ipv6_addr_cmp(&((struct sockaddr_in6 *) src)->sin6_addr, + &((struct sockaddr_in6 *) dst)->sin6_addr); + } +} + static inline __be16 cma_port(struct sockaddr *addr) { if (addr->sa_family == AF_INET) @@ -1564,50 +1580,6 @@ static void cma_listen_on_all(struct rdma_id_private *id_priv) mutex_unlock(&lock); } -int rdma_listen(struct rdma_cm_id *id, int backlog) -{ - struct rdma_id_private *id_priv; - int ret; - - id_priv = container_of(id, struct rdma_id_private, id); - if (id_priv->state == CMA_IDLE) { - ((struct sockaddr *) &id->route.addr.src_addr)->sa_family = AF_INET; - ret = rdma_bind_addr(id, (struct sockaddr *) &id->route.addr.src_addr); - if (ret) - return ret; - } - - if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_LISTEN)) - return -EINVAL; - - id_priv->backlog = backlog; - if (id->device) { - switch (rdma_node_get_transport(id->device->node_type)) { - case RDMA_TRANSPORT_IB: - ret = cma_ib_listen(id_priv); - if (ret) - goto err; - break; - case RDMA_TRANSPORT_IWARP: - ret = cma_iw_listen(id_priv, backlog); - if (ret) - goto err; - break; - default: - ret = -ENOSYS; - goto err; - } - } else - cma_listen_on_all(id_priv); - - return 0; -err: - id_priv->backlog = 0; - cma_comp_exch(id_priv, CMA_LISTEN, CMA_ADDR_BOUND); - return ret; -} -EXPORT_SYMBOL(rdma_listen); - void rdma_set_service_type(struct rdma_cm_id *id, int tos) { struct rdma_id_private *id_priv; @@ -2090,6 +2062,25 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, } EXPORT_SYMBOL(rdma_resolve_addr); +int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse) +{ + struct rdma_id_private *id_priv; + unsigned long flags; + int ret; + + id_priv = container_of(id, struct rdma_id_private, id); + spin_lock_irqsave(&id_priv->lock, flags); + if (id_priv->state == CMA_IDLE) { + id_priv->reuseaddr = reuse; + ret = 0; + } else { + ret = -EINVAL; + } + spin_unlock_irqrestore(&id_priv->lock, flags); + return ret; +} +EXPORT_SYMBOL(rdma_set_reuseaddr); + static void cma_bind_port(struct rdma_bind_list *bind_list, struct rdma_id_private *id_priv) { @@ -2165,41 +2156,71 @@ static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv) return -EADDRNOTAVAIL; } -static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv) +/* + * Check that the requested port is available. This is called when trying to + * bind to a specific port, or when trying to listen on a bound port. In + * the latter case, the provided id_priv may already be on the bind_list, but + * we still need to check that it's okay to start listening. + */ +static int cma_check_port(struct rdma_bind_list *bind_list, + struct rdma_id_private *id_priv, uint8_t reuseaddr) { struct rdma_id_private *cur_id; - struct sockaddr_in *sin, *cur_sin; - struct rdma_bind_list *bind_list; + struct sockaddr *addr, *cur_addr; struct hlist_node *node; + + addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr; + if (cma_any_addr(addr) && !reuseaddr) + return -EADDRNOTAVAIL; + + hlist_for_each_entry(cur_id, node, &bind_list->owners, node) { + if (id_priv == cur_id) + continue; + + if ((cur_id->state == CMA_LISTEN) || + !reuseaddr || !cur_id->reuseaddr) { + cur_addr = (struct sockaddr *) &cur_id->id.route.addr.src_addr; + if (cma_any_addr(cur_addr)) + return -EADDRNOTAVAIL; + + if (!cma_addr_cmp(addr, cur_addr)) + return -EADDRINUSE; + } + } + return 0; +} + +static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv) +{ + struct rdma_bind_list *bind_list; unsigned short snum; + int ret; - sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; - snum = ntohs(sin->sin_port); + snum = ntohs(cma_port((struct sockaddr *) &id_priv->id.route.addr.src_addr)); if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) return -EACCES; bind_list = idr_find(ps, snum); - if (!bind_list) - return cma_alloc_port(ps, id_priv, snum); - - /* - * We don't support binding to any address if anyone is bound to - * a specific address on the same port. - */ - if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr)) - return -EADDRNOTAVAIL; - - hlist_for_each_entry(cur_id, node, &bind_list->owners, node) { - if (cma_any_addr((struct sockaddr *) &cur_id->id.route.addr.src_addr)) - return -EADDRNOTAVAIL; - - cur_sin = (struct sockaddr_in *) &cur_id->id.route.addr.src_addr; - if (sin->sin_addr.s_addr == cur_sin->sin_addr.s_addr) - return -EADDRINUSE; + if (!bind_list) { + ret = cma_alloc_port(ps, id_priv, snum); + } else { + ret = cma_check_port(bind_list, id_priv, id_priv->reuseaddr); + if (!ret) + cma_bind_port(bind_list, id_priv); } + return ret; +} - cma_bind_port(bind_list, id_priv); - return 0; +static int cma_bind_listen(struct rdma_id_private *id_priv) +{ + struct rdma_bind_list *bind_list = id_priv->bind_list; + int ret = 0; + + mutex_lock(&lock); + if (bind_list->owners.first->next) + ret = cma_check_port(bind_list, id_priv, 0); + mutex_unlock(&lock); + return ret; } static int cma_get_port(struct rdma_id_private *id_priv) @@ -2253,6 +2274,56 @@ static int cma_check_linklocal(struct rdma_dev_addr *dev_addr, return 0; } +int rdma_listen(struct rdma_cm_id *id, int backlog) +{ + struct rdma_id_private *id_priv; + int ret; + + id_priv = container_of(id, struct rdma_id_private, id); + if (id_priv->state == CMA_IDLE) { + ((struct sockaddr *) &id->route.addr.src_addr)->sa_family = AF_INET; + ret = rdma_bind_addr(id, (struct sockaddr *) &id->route.addr.src_addr); + if (ret) + return ret; + } + + if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_LISTEN)) + return -EINVAL; + + if (id_priv->reuseaddr) { + ret = cma_bind_listen(id_priv); + if (ret) + goto err; + } + + id_priv->backlog = backlog; + if (id->device) { + switch (rdma_node_get_transport(id->device->node_type)) { + case RDMA_TRANSPORT_IB: + ret = cma_ib_listen(id_priv); + if (ret) + goto err; + break; + case RDMA_TRANSPORT_IWARP: + ret = cma_iw_listen(id_priv, backlog); + if (ret) + goto err; + break; + default: + ret = -ENOSYS; + goto err; + } + } else + cma_listen_on_all(id_priv); + + return 0; +err: + id_priv->backlog = 0; + cma_comp_exch(id_priv, CMA_LISTEN, CMA_ADDR_BOUND); + return ret; +} +EXPORT_SYMBOL(rdma_listen); + int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) { struct rdma_id_private *id_priv; diff --git a/trunk/drivers/infiniband/core/iwcm.c b/trunk/drivers/infiniband/core/iwcm.c index 2a1e9ae134b4..a9c042345c6f 100644 --- a/trunk/drivers/infiniband/core/iwcm.c +++ b/trunk/drivers/infiniband/core/iwcm.c @@ -725,7 +725,7 @@ static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv, */ clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT); - if (iw_event->status == IW_CM_EVENT_STATUS_ACCEPTED) { + if (iw_event->status == 0) { cm_id_priv->id.local_addr = iw_event->local_addr; cm_id_priv->id.remote_addr = iw_event->remote_addr; cm_id_priv->state = IW_CM_STATE_ESTABLISHED; diff --git a/trunk/drivers/infiniband/core/ucma.c b/trunk/drivers/infiniband/core/ucma.c index ec1e9da1488b..b3fa798525b2 100644 --- a/trunk/drivers/infiniband/core/ucma.c +++ b/trunk/drivers/infiniband/core/ucma.c @@ -883,6 +883,13 @@ static int ucma_set_option_id(struct ucma_context *ctx, int optname, } rdma_set_service_type(ctx->cm_id, *((u8 *) optval)); break; + case RDMA_OPTION_ID_REUSEADDR: + if (optlen != sizeof(int)) { + ret = -EINVAL; + break; + } + ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0); + break; default: ret = -ENOSYS; } diff --git a/trunk/drivers/infiniband/hw/cxgb4/cm.c b/trunk/drivers/infiniband/hw/cxgb4/cm.c index 6aa53cd69478..f660cd04ec2f 100644 --- a/trunk/drivers/infiniband/hw/cxgb4/cm.c +++ b/trunk/drivers/infiniband/hw/cxgb4/cm.c @@ -1199,9 +1199,7 @@ static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) } PDBG("%s ep %p status %d error %d\n", __func__, ep, rpl->status, status2errno(rpl->status)); - ep->com.wr_wait.ret = status2errno(rpl->status); - ep->com.wr_wait.done = 1; - wake_up(&ep->com.wr_wait.wait); + c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); return 0; } @@ -1235,9 +1233,7 @@ static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb) struct c4iw_listen_ep *ep = lookup_stid(t, stid); PDBG("%s ep %p\n", __func__, ep); - ep->com.wr_wait.ret = status2errno(rpl->status); - ep->com.wr_wait.done = 1; - wake_up(&ep->com.wr_wait.wait); + c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); return 0; } @@ -1467,7 +1463,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) struct c4iw_qp_attributes attrs; int disconnect = 1; int release = 0; - int closing = 0; + int abort = 0; struct tid_info *t = dev->rdev.lldi.tids; unsigned int tid = GET_TID(hdr); @@ -1493,23 +1489,22 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) * in rdma connection migration (see c4iw_accept_cr()). */ __state_set(&ep->com, CLOSING); - ep->com.wr_wait.done = 1; - ep->com.wr_wait.ret = -ECONNRESET; PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); - wake_up(&ep->com.wr_wait.wait); + c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); break; case MPA_REP_SENT: __state_set(&ep->com, CLOSING); - ep->com.wr_wait.done = 1; - ep->com.wr_wait.ret = -ECONNRESET; PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); - wake_up(&ep->com.wr_wait.wait); + c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); break; case FPDU_MODE: start_ep_timer(ep); __state_set(&ep->com, CLOSING); - closing = 1; + attrs.next_state = C4IW_QP_STATE_CLOSING; + abort = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, + C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); peer_close_upcall(ep); + disconnect = 1; break; case ABORTING: disconnect = 0; @@ -1537,11 +1532,6 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) BUG_ON(1); } mutex_unlock(&ep->com.mutex); - if (closing) { - attrs.next_state = C4IW_QP_STATE_CLOSING; - c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, - C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); - } if (disconnect) c4iw_ep_disconnect(ep, 0, GFP_KERNEL); if (release) @@ -1582,9 +1572,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) /* * Wake up any threads in rdma_init() or rdma_fini(). */ - ep->com.wr_wait.done = 1; - ep->com.wr_wait.ret = -ECONNRESET; - wake_up(&ep->com.wr_wait.wait); + c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); mutex_lock(&ep->com.mutex); switch (ep->com.state) { @@ -1711,14 +1699,14 @@ static int terminate(struct c4iw_dev *dev, struct sk_buff *skb) ep = lookup_tid(t, tid); BUG_ON(!ep); - if (ep->com.qp) { + if (ep && ep->com.qp) { printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid, ep->com.qp->wq.sq.qid); attrs.next_state = C4IW_QP_STATE_TERMINATE; c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); } else - printk(KERN_WARNING MOD "TERM received tid %u no qp\n", tid); + printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid); return 0; } @@ -2297,14 +2285,8 @@ static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb) ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff); wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1]; PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret); - if (wr_waitp) { - if (ret) - wr_waitp->ret = -ret; - else - wr_waitp->ret = 0; - wr_waitp->done = 1; - wake_up(&wr_waitp->wait); - } + if (wr_waitp) + c4iw_wake_up(wr_waitp, ret ? -ret : 0); kfree_skb(skb); break; case 2: diff --git a/trunk/drivers/infiniband/hw/cxgb4/device.c b/trunk/drivers/infiniband/hw/cxgb4/device.c index e29172c2afcb..40a13cc633a3 100644 --- a/trunk/drivers/infiniband/hw/cxgb4/device.c +++ b/trunk/drivers/infiniband/hw/cxgb4/device.c @@ -44,7 +44,7 @@ MODULE_DESCRIPTION("Chelsio T4 RDMA Driver"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(DRV_VERSION); -static LIST_HEAD(dev_list); +static LIST_HEAD(uld_ctx_list); static DEFINE_MUTEX(dev_mutex); static struct dentry *c4iw_debugfs_root; @@ -370,18 +370,23 @@ static void c4iw_rdev_close(struct c4iw_rdev *rdev) c4iw_destroy_resource(&rdev->resource); } -static void c4iw_remove(struct c4iw_dev *dev) +struct uld_ctx { + struct list_head entry; + struct cxgb4_lld_info lldi; + struct c4iw_dev *dev; +}; + +static void c4iw_remove(struct uld_ctx *ctx) { - PDBG("%s c4iw_dev %p\n", __func__, dev); - list_del(&dev->entry); - if (dev->registered) - c4iw_unregister_device(dev); - c4iw_rdev_close(&dev->rdev); - idr_destroy(&dev->cqidr); - idr_destroy(&dev->qpidr); - idr_destroy(&dev->mmidr); - iounmap(dev->rdev.oc_mw_kva); - ib_dealloc_device(&dev->ibdev); + PDBG("%s c4iw_dev %p\n", __func__, ctx->dev); + c4iw_unregister_device(ctx->dev); + c4iw_rdev_close(&ctx->dev->rdev); + idr_destroy(&ctx->dev->cqidr); + idr_destroy(&ctx->dev->qpidr); + idr_destroy(&ctx->dev->mmidr); + iounmap(ctx->dev->rdev.oc_mw_kva); + ib_dealloc_device(&ctx->dev->ibdev); + ctx->dev = NULL; } static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) @@ -392,7 +397,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp)); if (!devp) { printk(KERN_ERR MOD "Cannot allocate ib device\n"); - return NULL; + return ERR_PTR(-ENOMEM); } devp->rdev.lldi = *infop; @@ -402,27 +407,23 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa, devp->rdev.lldi.vr->ocq.size); - printk(KERN_INFO MOD "ocq memory: " + PDBG(KERN_INFO MOD "ocq memory: " "hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n", devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size, devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva); - mutex_lock(&dev_mutex); - ret = c4iw_rdev_open(&devp->rdev); if (ret) { mutex_unlock(&dev_mutex); printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret); ib_dealloc_device(&devp->ibdev); - return NULL; + return ERR_PTR(ret); } idr_init(&devp->cqidr); idr_init(&devp->qpidr); idr_init(&devp->mmidr); spin_lock_init(&devp->lock); - list_add_tail(&devp->entry, &dev_list); - mutex_unlock(&dev_mutex); if (c4iw_debugfs_root) { devp->debugfs_root = debugfs_create_dir( @@ -435,7 +436,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) static void *c4iw_uld_add(const struct cxgb4_lld_info *infop) { - struct c4iw_dev *dev; + struct uld_ctx *ctx; static int vers_printed; int i; @@ -443,25 +444,33 @@ static void *c4iw_uld_add(const struct cxgb4_lld_info *infop) printk(KERN_INFO MOD "Chelsio T4 RDMA Driver - version %s\n", DRV_VERSION); - dev = c4iw_alloc(infop); - if (!dev) + ctx = kzalloc(sizeof *ctx, GFP_KERNEL); + if (!ctx) { + ctx = ERR_PTR(-ENOMEM); goto out; + } + ctx->lldi = *infop; PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n", - __func__, pci_name(dev->rdev.lldi.pdev), - dev->rdev.lldi.nchan, dev->rdev.lldi.nrxq, - dev->rdev.lldi.ntxq, dev->rdev.lldi.nports); + __func__, pci_name(ctx->lldi.pdev), + ctx->lldi.nchan, ctx->lldi.nrxq, + ctx->lldi.ntxq, ctx->lldi.nports); + + mutex_lock(&dev_mutex); + list_add_tail(&ctx->entry, &uld_ctx_list); + mutex_unlock(&dev_mutex); - for (i = 0; i < dev->rdev.lldi.nrxq; i++) - PDBG("rxqid[%u] %u\n", i, dev->rdev.lldi.rxq_ids[i]); + for (i = 0; i < ctx->lldi.nrxq; i++) + PDBG("rxqid[%u] %u\n", i, ctx->lldi.rxq_ids[i]); out: - return dev; + return ctx; } static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp, const struct pkt_gl *gl) { - struct c4iw_dev *dev = handle; + struct uld_ctx *ctx = handle; + struct c4iw_dev *dev = ctx->dev; struct sk_buff *skb; const struct cpl_act_establish *rpl; unsigned int opcode; @@ -503,47 +512,49 @@ static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp, static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state) { - struct c4iw_dev *dev = handle; + struct uld_ctx *ctx = handle; PDBG("%s new_state %u\n", __func__, new_state); switch (new_state) { case CXGB4_STATE_UP: - printk(KERN_INFO MOD "%s: Up\n", pci_name(dev->rdev.lldi.pdev)); - if (!dev->registered) { - int ret; - ret = c4iw_register_device(dev); - if (ret) + printk(KERN_INFO MOD "%s: Up\n", pci_name(ctx->lldi.pdev)); + if (!ctx->dev) { + int ret = 0; + + ctx->dev = c4iw_alloc(&ctx->lldi); + if (!IS_ERR(ctx->dev)) + ret = c4iw_register_device(ctx->dev); + if (IS_ERR(ctx->dev) || ret) printk(KERN_ERR MOD "%s: RDMA registration failed: %d\n", - pci_name(dev->rdev.lldi.pdev), ret); + pci_name(ctx->lldi.pdev), ret); } break; case CXGB4_STATE_DOWN: printk(KERN_INFO MOD "%s: Down\n", - pci_name(dev->rdev.lldi.pdev)); - if (dev->registered) - c4iw_unregister_device(dev); + pci_name(ctx->lldi.pdev)); + if (ctx->dev) + c4iw_remove(ctx); break; case CXGB4_STATE_START_RECOVERY: printk(KERN_INFO MOD "%s: Fatal Error\n", - pci_name(dev->rdev.lldi.pdev)); - dev->rdev.flags |= T4_FATAL_ERROR; - if (dev->registered) { + pci_name(ctx->lldi.pdev)); + if (ctx->dev) { struct ib_event event; + ctx->dev->rdev.flags |= T4_FATAL_ERROR; memset(&event, 0, sizeof event); event.event = IB_EVENT_DEVICE_FATAL; - event.device = &dev->ibdev; + event.device = &ctx->dev->ibdev; ib_dispatch_event(&event); - c4iw_unregister_device(dev); + c4iw_remove(ctx); } break; case CXGB4_STATE_DETACH: printk(KERN_INFO MOD "%s: Detach\n", - pci_name(dev->rdev.lldi.pdev)); - mutex_lock(&dev_mutex); - c4iw_remove(dev); - mutex_unlock(&dev_mutex); + pci_name(ctx->lldi.pdev)); + if (ctx->dev) + c4iw_remove(ctx); break; } return 0; @@ -576,11 +587,13 @@ static int __init c4iw_init_module(void) static void __exit c4iw_exit_module(void) { - struct c4iw_dev *dev, *tmp; + struct uld_ctx *ctx, *tmp; mutex_lock(&dev_mutex); - list_for_each_entry_safe(dev, tmp, &dev_list, entry) { - c4iw_remove(dev); + list_for_each_entry_safe(ctx, tmp, &uld_ctx_list, entry) { + if (ctx->dev) + c4iw_remove(ctx); + kfree(ctx); } mutex_unlock(&dev_mutex); cxgb4_unregister_uld(CXGB4_ULD_RDMA); diff --git a/trunk/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/trunk/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 9f6166f59268..35d2a5dd9bb4 100644 --- a/trunk/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/trunk/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -131,42 +131,58 @@ static inline int c4iw_num_stags(struct c4iw_rdev *rdev) #define C4IW_WR_TO (10*HZ) +enum { + REPLY_READY = 0, +}; + struct c4iw_wr_wait { wait_queue_head_t wait; - int done; + unsigned long status; int ret; }; static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp) { wr_waitp->ret = 0; - wr_waitp->done = 0; + wr_waitp->status = 0; init_waitqueue_head(&wr_waitp->wait); } +static inline void c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret) +{ + wr_waitp->ret = ret; + set_bit(REPLY_READY, &wr_waitp->status); + wake_up(&wr_waitp->wait); +} + static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev, struct c4iw_wr_wait *wr_waitp, u32 hwtid, u32 qpid, const char *func) { unsigned to = C4IW_WR_TO; - do { + int ret; - wait_event_timeout(wr_waitp->wait, wr_waitp->done, to); - if (!wr_waitp->done) { + do { + ret = wait_event_timeout(wr_waitp->wait, + test_and_clear_bit(REPLY_READY, &wr_waitp->status), to); + if (!ret) { printk(KERN_ERR MOD "%s - Device %s not responding - " "tid %u qpid %u\n", func, pci_name(rdev->lldi.pdev), hwtid, qpid); + if (c4iw_fatal_error(rdev)) { + wr_waitp->ret = -EIO; + break; + } to = to << 2; } - } while (!wr_waitp->done); + } while (!ret); if (wr_waitp->ret) - printk(KERN_WARNING MOD "%s: FW reply %d tid %u qpid %u\n", - pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid); + PDBG("%s: FW reply %d tid %u qpid %u\n", + pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid); return wr_waitp->ret; } - struct c4iw_dev { struct ib_device ibdev; struct c4iw_rdev rdev; @@ -175,9 +191,7 @@ struct c4iw_dev { struct idr qpidr; struct idr mmidr; spinlock_t lock; - struct list_head entry; struct dentry *debugfs_root; - u8 registered; }; static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev) diff --git a/trunk/drivers/infiniband/hw/cxgb4/provider.c b/trunk/drivers/infiniband/hw/cxgb4/provider.c index f66dd8bf5128..5b9e4220ca08 100644 --- a/trunk/drivers/infiniband/hw/cxgb4/provider.c +++ b/trunk/drivers/infiniband/hw/cxgb4/provider.c @@ -516,7 +516,6 @@ int c4iw_register_device(struct c4iw_dev *dev) if (ret) goto bail2; } - dev->registered = 1; return 0; bail2: ib_unregister_device(&dev->ibdev); @@ -535,6 +534,5 @@ void c4iw_unregister_device(struct c4iw_dev *dev) c4iw_class_attributes[i]); ib_unregister_device(&dev->ibdev); kfree(dev->ibdev.iwcm); - dev->registered = 0; return; } diff --git a/trunk/drivers/infiniband/hw/cxgb4/qp.c b/trunk/drivers/infiniband/hw/cxgb4/qp.c index 70a5a3c646da..3b773b05a898 100644 --- a/trunk/drivers/infiniband/hw/cxgb4/qp.c +++ b/trunk/drivers/infiniband/hw/cxgb4/qp.c @@ -214,7 +214,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */ V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */ V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */ - t4_sq_onchip(&wq->sq) ? F_FW_RI_RES_WR_ONCHIP : 0 | + (t4_sq_onchip(&wq->sq) ? F_FW_RI_RES_WR_ONCHIP : 0) | V_FW_RI_RES_WR_IQID(scq->cqid)); res->u.sqrq.dcaen_to_eqsize = cpu_to_be32( V_FW_RI_RES_WR_DCAEN(0) | @@ -1210,7 +1210,6 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, if (ret) { if (internal) c4iw_get_ep(&qhp->ep->com); - disconnect = abort = 1; goto err; } break; diff --git a/trunk/drivers/infiniband/hw/cxgb4/t4.h b/trunk/drivers/infiniband/hw/cxgb4/t4.h index 24af12fc8228..c0221eec8817 100644 --- a/trunk/drivers/infiniband/hw/cxgb4/t4.h +++ b/trunk/drivers/infiniband/hw/cxgb4/t4.h @@ -269,11 +269,8 @@ struct t4_swsqe { static inline pgprot_t t4_pgprot_wc(pgprot_t prot) { -#if defined(__i386__) || defined(__x86_64__) +#if defined(__i386__) || defined(__x86_64__) || defined(CONFIG_PPC64) return pgprot_writecombine(prot); -#elif defined(CONFIG_PPC64) - return __pgprot((pgprot_val(prot) | _PAGE_NO_CACHE) & - ~(pgprot_t)_PAGE_GUARDED); #else return pgprot_noncached(prot); #endif diff --git a/trunk/drivers/infiniband/hw/ipath/ipath_driver.c b/trunk/drivers/infiniband/hw/ipath/ipath_driver.c index 58c0e417bc30..be24ac726114 100644 --- a/trunk/drivers/infiniband/hw/ipath/ipath_driver.c +++ b/trunk/drivers/infiniband/hw/ipath/ipath_driver.c @@ -398,7 +398,6 @@ static int __devinit ipath_init_one(struct pci_dev *pdev, struct ipath_devdata *dd; unsigned long long addr; u32 bar0 = 0, bar1 = 0; - u8 rev; dd = ipath_alloc_devdata(pdev); if (IS_ERR(dd)) { @@ -540,13 +539,7 @@ static int __devinit ipath_init_one(struct pci_dev *pdev, goto bail_regions; } - ret = pci_read_config_byte(pdev, PCI_REVISION_ID, &rev); - if (ret) { - ipath_dev_err(dd, "Failed to read PCI revision ID unit " - "%u: err %d\n", dd->ipath_unit, -ret); - goto bail_regions; /* shouldn't ever happen */ - } - dd->ipath_pcirev = rev; + dd->ipath_pcirev = pdev->revision; #if defined(__powerpc__) /* There isn't a generic way to specify writethrough mappings */ diff --git a/trunk/drivers/infiniband/hw/nes/nes_cm.c b/trunk/drivers/infiniband/hw/nes/nes_cm.c index 33c7eedaba6c..e74cdf9ef471 100644 --- a/trunk/drivers/infiniband/hw/nes/nes_cm.c +++ b/trunk/drivers/infiniband/hw/nes/nes_cm.c @@ -2563,7 +2563,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp) u16 last_ae; u8 original_hw_tcp_state; u8 original_ibqp_state; - enum iw_cm_event_status disconn_status = IW_CM_EVENT_STATUS_OK; + int disconn_status = 0; int issue_disconn = 0; int issue_close = 0; int issue_flush = 0; @@ -2605,7 +2605,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp) (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) { issue_disconn = 1; if (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET) - disconn_status = IW_CM_EVENT_STATUS_RESET; + disconn_status = -ECONNRESET; } if (((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSED) || @@ -2666,7 +2666,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp) cm_id->provider_data = nesqp; /* Send up the close complete event */ cm_event.event = IW_CM_EVENT_CLOSE; - cm_event.status = IW_CM_EVENT_STATUS_OK; + cm_event.status = 0; cm_event.provider_data = cm_id->provider_data; cm_event.local_addr = cm_id->local_addr; cm_event.remote_addr = cm_id->remote_addr; @@ -2966,7 +2966,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) nes_add_ref(&nesqp->ibqp); cm_event.event = IW_CM_EVENT_ESTABLISHED; - cm_event.status = IW_CM_EVENT_STATUS_ACCEPTED; + cm_event.status = 0; cm_event.provider_data = (void *)nesqp; cm_event.local_addr = cm_id->local_addr; cm_event.remote_addr = cm_id->remote_addr; @@ -3377,7 +3377,7 @@ static void cm_event_connected(struct nes_cm_event *event) /* notify OF layer we successfully created the requested connection */ cm_event.event = IW_CM_EVENT_CONNECT_REPLY; - cm_event.status = IW_CM_EVENT_STATUS_ACCEPTED; + cm_event.status = 0; cm_event.provider_data = cm_id->provider_data; cm_event.local_addr.sin_family = AF_INET; cm_event.local_addr.sin_port = cm_id->local_addr.sin_port; @@ -3484,7 +3484,7 @@ static void cm_event_reset(struct nes_cm_event *event) nesqp->cm_id = NULL; /* cm_id->provider_data = NULL; */ cm_event.event = IW_CM_EVENT_DISCONNECT; - cm_event.status = IW_CM_EVENT_STATUS_RESET; + cm_event.status = -ECONNRESET; cm_event.provider_data = cm_id->provider_data; cm_event.local_addr = cm_id->local_addr; cm_event.remote_addr = cm_id->remote_addr; @@ -3495,7 +3495,7 @@ static void cm_event_reset(struct nes_cm_event *event) ret = cm_id->event_handler(cm_id, &cm_event); atomic_inc(&cm_closes); cm_event.event = IW_CM_EVENT_CLOSE; - cm_event.status = IW_CM_EVENT_STATUS_OK; + cm_event.status = 0; cm_event.provider_data = cm_id->provider_data; cm_event.local_addr = cm_id->local_addr; cm_event.remote_addr = cm_id->remote_addr; @@ -3534,7 +3534,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event) cm_node, cm_id, jiffies); cm_event.event = IW_CM_EVENT_CONNECT_REQUEST; - cm_event.status = IW_CM_EVENT_STATUS_OK; + cm_event.status = 0; cm_event.provider_data = (void *)cm_node; cm_event.local_addr.sin_family = AF_INET; diff --git a/trunk/drivers/infiniband/hw/nes/nes_verbs.c b/trunk/drivers/infiniband/hw/nes/nes_verbs.c index 26d8018c0a7c..95ca93ceedac 100644 --- a/trunk/drivers/infiniband/hw/nes/nes_verbs.c +++ b/trunk/drivers/infiniband/hw/nes/nes_verbs.c @@ -1484,7 +1484,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp) (nesqp->ibqp_state == IB_QPS_RTR)) && (nesqp->cm_id)) { cm_id = nesqp->cm_id; cm_event.event = IW_CM_EVENT_CONNECT_REPLY; - cm_event.status = IW_CM_EVENT_STATUS_TIMEOUT; + cm_event.status = -ETIMEDOUT; cm_event.local_addr = cm_id->local_addr; cm_event.remote_addr = cm_id->remote_addr; cm_event.private_data = NULL; diff --git a/trunk/drivers/infiniband/hw/qib/qib_iba6120.c b/trunk/drivers/infiniband/hw/qib/qib_iba6120.c index 7de4b7ebffc5..d8ca0a0b970d 100644 --- a/trunk/drivers/infiniband/hw/qib/qib_iba6120.c +++ b/trunk/drivers/infiniband/hw/qib/qib_iba6120.c @@ -1799,7 +1799,7 @@ static int qib_6120_setup_reset(struct qib_devdata *dd) /* * Keep chip from being accessed until we are ready. Use * writeq() directly, to allow the write even though QIB_PRESENT - * isn't' set. + * isn't set. */ dd->flags &= ~(QIB_INITTED | QIB_PRESENT); dd->int_counter = 0; /* so we check interrupts work again */ diff --git a/trunk/drivers/infiniband/hw/qib/qib_iba7220.c b/trunk/drivers/infiniband/hw/qib/qib_iba7220.c index 74fe0360bec7..c765a2eb04cf 100644 --- a/trunk/drivers/infiniband/hw/qib/qib_iba7220.c +++ b/trunk/drivers/infiniband/hw/qib/qib_iba7220.c @@ -2111,7 +2111,7 @@ static int qib_setup_7220_reset(struct qib_devdata *dd) /* * Keep chip from being accessed until we are ready. Use * writeq() directly, to allow the write even though QIB_PRESENT - * isn't' set. + * isn't set. */ dd->flags &= ~(QIB_INITTED | QIB_PRESENT); dd->int_counter = 0; /* so we check interrupts work again */ diff --git a/trunk/drivers/infiniband/hw/qib/qib_iba7322.c b/trunk/drivers/infiniband/hw/qib/qib_iba7322.c index 55de3cf3441c..9f53e68a096a 100644 --- a/trunk/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/trunk/drivers/infiniband/hw/qib/qib_iba7322.c @@ -3299,7 +3299,7 @@ static int qib_do_7322_reset(struct qib_devdata *dd) /* * Keep chip from being accessed until we are ready. Use * writeq() directly, to allow the write even though QIB_PRESENT - * isn't' set. + * isn't set. */ dd->flags &= ~(QIB_INITTED | QIB_PRESENT | QIB_BADINTR); dd->flags |= QIB_DOING_RESET; @@ -7534,7 +7534,8 @@ static int serdes_7322_init_new(struct qib_pportdata *ppd) ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10)); tstart = get_jiffies_64(); while (chan_done && - !time_after64(tstart, tstart + msecs_to_jiffies(500))) { + !time_after64(get_jiffies_64(), + tstart + msecs_to_jiffies(500))) { msleep(20); for (chan = 0; chan < SERDES_CHANS; ++chan) { rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), diff --git a/trunk/drivers/infiniband/hw/qib/qib_pcie.c b/trunk/drivers/infiniband/hw/qib/qib_pcie.c index 48b6674cbc49..891cc2ff5f00 100644 --- a/trunk/drivers/infiniband/hw/qib/qib_pcie.c +++ b/trunk/drivers/infiniband/hw/qib/qib_pcie.c @@ -526,11 +526,8 @@ static int qib_tune_pcie_coalesce(struct qib_devdata *dd) */ devid = parent->device; if (devid >= 0x25e2 && devid <= 0x25fa) { - u8 rev; - /* 5000 P/V/X/Z */ - pci_read_config_byte(parent, PCI_REVISION_ID, &rev); - if (rev <= 0xb2) + if (parent->revision <= 0xb2) bits = 1U << 10; else bits = 7U << 10; diff --git a/trunk/drivers/input/evdev.c b/trunk/drivers/input/evdev.c index 7f42d3a454d2..88d8e4cb419a 100644 --- a/trunk/drivers/input/evdev.c +++ b/trunk/drivers/input/evdev.c @@ -39,13 +39,13 @@ struct evdev { }; struct evdev_client { - int head; - int tail; + unsigned int head; + unsigned int tail; spinlock_t buffer_lock; /* protects access to buffer, head and tail */ struct fasync_struct *fasync; struct evdev *evdev; struct list_head node; - int bufsize; + unsigned int bufsize; struct input_event buffer[]; }; @@ -55,16 +55,25 @@ static DEFINE_MUTEX(evdev_table_mutex); static void evdev_pass_event(struct evdev_client *client, struct input_event *event) { - /* - * Interrupts are disabled, just acquire the lock. - * Make sure we don't leave with the client buffer - * "empty" by having client->head == client->tail. - */ + /* Interrupts are disabled, just acquire the lock. */ spin_lock(&client->buffer_lock); - do { - client->buffer[client->head++] = *event; - client->head &= client->bufsize - 1; - } while (client->head == client->tail); + + client->buffer[client->head++] = *event; + client->head &= client->bufsize - 1; + + if (unlikely(client->head == client->tail)) { + /* + * This effectively "drops" all unconsumed events, leaving + * EV_SYN/SYN_DROPPED plus the newest event in the queue. + */ + client->tail = (client->head - 2) & (client->bufsize - 1); + + client->buffer[client->tail].time = event->time; + client->buffer[client->tail].type = EV_SYN; + client->buffer[client->tail].code = SYN_DROPPED; + client->buffer[client->tail].value = 0; + } + spin_unlock(&client->buffer_lock); if (event->type == EV_SYN) diff --git a/trunk/drivers/input/input.c b/trunk/drivers/input/input.c index d6e8bd8a851c..ebbceedc92f4 100644 --- a/trunk/drivers/input/input.c +++ b/trunk/drivers/input/input.c @@ -1746,6 +1746,42 @@ void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int } EXPORT_SYMBOL(input_set_capability); +static unsigned int input_estimate_events_per_packet(struct input_dev *dev) +{ + int mt_slots; + int i; + unsigned int events; + + if (dev->mtsize) { + mt_slots = dev->mtsize; + } else if (test_bit(ABS_MT_TRACKING_ID, dev->absbit)) { + mt_slots = dev->absinfo[ABS_MT_TRACKING_ID].maximum - + dev->absinfo[ABS_MT_TRACKING_ID].minimum + 1, + clamp(mt_slots, 2, 32); + } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) { + mt_slots = 2; + } else { + mt_slots = 0; + } + + events = mt_slots + 1; /* count SYN_MT_REPORT and SYN_REPORT */ + + for (i = 0; i < ABS_CNT; i++) { + if (test_bit(i, dev->absbit)) { + if (input_is_mt_axis(i)) + events += mt_slots; + else + events++; + } + } + + for (i = 0; i < REL_CNT; i++) + if (test_bit(i, dev->relbit)) + events++; + + return events; +} + #define INPUT_CLEANSE_BITMASK(dev, type, bits) \ do { \ if (!test_bit(EV_##type, dev->evbit)) \ @@ -1793,6 +1829,10 @@ int input_register_device(struct input_dev *dev) /* Make sure that bitmasks not mentioned in dev->evbit are clean. */ input_cleanse_bitmasks(dev); + if (!dev->hint_events_per_packet) + dev->hint_events_per_packet = + input_estimate_events_per_packet(dev); + /* * If delay and period are pre-set by the driver, then autorepeating * is handled by the driver itself and we don't do it in input.c. diff --git a/trunk/drivers/input/keyboard/atakbd.c b/trunk/drivers/input/keyboard/atakbd.c index 1839194ea987..10bcd4ae5402 100644 --- a/trunk/drivers/input/keyboard/atakbd.c +++ b/trunk/drivers/input/keyboard/atakbd.c @@ -223,8 +223,9 @@ static int __init atakbd_init(void) return -ENODEV; // need to init core driver if not already done so - if (atari_keyb_init()) - return -ENODEV; + error = atari_keyb_init(); + if (error) + return error; atakbd_dev = input_allocate_device(); if (!atakbd_dev) diff --git a/trunk/drivers/input/keyboard/twl4030_keypad.c b/trunk/drivers/input/keyboard/twl4030_keypad.c index 09bef79d9da1..a26922cf0e84 100644 --- a/trunk/drivers/input/keyboard/twl4030_keypad.c +++ b/trunk/drivers/input/keyboard/twl4030_keypad.c @@ -332,18 +332,20 @@ static int __devinit twl4030_kp_program(struct twl4030_keypad *kp) static int __devinit twl4030_kp_probe(struct platform_device *pdev) { struct twl4030_keypad_data *pdata = pdev->dev.platform_data; - const struct matrix_keymap_data *keymap_data = pdata->keymap_data; + const struct matrix_keymap_data *keymap_data; struct twl4030_keypad *kp; struct input_dev *input; u8 reg; int error; - if (!pdata || !pdata->rows || !pdata->cols || + if (!pdata || !pdata->rows || !pdata->cols || !pdata->keymap_data || pdata->rows > TWL4030_MAX_ROWS || pdata->cols > TWL4030_MAX_COLS) { dev_err(&pdev->dev, "Invalid platform_data\n"); return -EINVAL; } + keymap_data = pdata->keymap_data; + kp = kzalloc(sizeof(*kp), GFP_KERNEL); input = input_allocate_device(); if (!kp || !input) { diff --git a/trunk/drivers/input/misc/xen-kbdfront.c b/trunk/drivers/input/misc/xen-kbdfront.c index 7077f9bf5ead..62bae99424e6 100644 --- a/trunk/drivers/input/misc/xen-kbdfront.c +++ b/trunk/drivers/input/misc/xen-kbdfront.c @@ -303,7 +303,7 @@ static void xenkbd_backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct xenkbd_info *info = dev_get_drvdata(&dev->dev); - int val; + int ret, val; switch (backend_state) { case XenbusStateInitialising: @@ -316,6 +316,17 @@ static void xenkbd_backend_changed(struct xenbus_device *dev, case XenbusStateInitWait: InitWait: + ret = xenbus_scanf(XBT_NIL, info->xbdev->otherend, + "feature-abs-pointer", "%d", &val); + if (ret < 0) + val = 0; + if (val) { + ret = xenbus_printf(XBT_NIL, info->xbdev->nodename, + "request-abs-pointer", "1"); + if (ret) + pr_warning("xenkbd: can't request abs-pointer"); + } + xenbus_switch_state(dev, XenbusStateConnected); break; diff --git a/trunk/drivers/input/mouse/atarimouse.c b/trunk/drivers/input/mouse/atarimouse.c index adf45b3040e9..5c4a692bf73a 100644 --- a/trunk/drivers/input/mouse/atarimouse.c +++ b/trunk/drivers/input/mouse/atarimouse.c @@ -77,15 +77,15 @@ static void atamouse_interrupt(char *buf) #endif /* only relative events get here */ - dx = buf[1]; - dy = -buf[2]; + dx = buf[1]; + dy = buf[2]; input_report_rel(atamouse_dev, REL_X, dx); input_report_rel(atamouse_dev, REL_Y, dy); - input_report_key(atamouse_dev, BTN_LEFT, buttons & 0x1); + input_report_key(atamouse_dev, BTN_LEFT, buttons & 0x4); input_report_key(atamouse_dev, BTN_MIDDLE, buttons & 0x2); - input_report_key(atamouse_dev, BTN_RIGHT, buttons & 0x4); + input_report_key(atamouse_dev, BTN_RIGHT, buttons & 0x1); input_sync(atamouse_dev); @@ -108,7 +108,7 @@ static int atamouse_open(struct input_dev *dev) static void atamouse_close(struct input_dev *dev) { ikbd_mouse_disable(); - atari_mouse_interrupt_hook = NULL; + atari_input_mouse_interrupt_hook = NULL; } static int __init atamouse_init(void) @@ -118,8 +118,9 @@ static int __init atamouse_init(void) if (!MACH_IS_ATARI || !ATARIHW_PRESENT(ST_MFP)) return -ENODEV; - if (!atari_keyb_init()) - return -ENODEV; + error = atari_keyb_init(); + if (error) + return error; atamouse_dev = input_allocate_device(); if (!atamouse_dev) diff --git a/trunk/drivers/input/touchscreen/ads7846.c b/trunk/drivers/input/touchscreen/ads7846.c index c24946f51256..1de1c19dad30 100644 --- a/trunk/drivers/input/touchscreen/ads7846.c +++ b/trunk/drivers/input/touchscreen/ads7846.c @@ -281,17 +281,24 @@ struct ser_req { u8 command; u8 ref_off; u16 scratch; - __be16 sample; struct spi_message msg; struct spi_transfer xfer[6]; + /* + * DMA (thus cache coherency maintenance) requires the + * transfer buffers to live in their own cache lines. + */ + __be16 sample ____cacheline_aligned; }; struct ads7845_ser_req { u8 command[3]; - u8 pwrdown[3]; - u8 sample[3]; struct spi_message msg; struct spi_transfer xfer[2]; + /* + * DMA (thus cache coherency maintenance) requires the + * transfer buffers to live in their own cache lines. + */ + u8 sample[3] ____cacheline_aligned; }; static int ads7846_read12_ser(struct device *dev, unsigned command) diff --git a/trunk/drivers/input/touchscreen/h3600_ts_input.c b/trunk/drivers/input/touchscreen/h3600_ts_input.c index efa06882de00..45f93d0f5592 100644 --- a/trunk/drivers/input/touchscreen/h3600_ts_input.c +++ b/trunk/drivers/input/touchscreen/h3600_ts_input.c @@ -399,31 +399,34 @@ static int h3600ts_connect(struct serio *serio, struct serio_driver *drv) IRQF_SHARED | IRQF_DISABLED, "h3600_action", &ts->dev)) { printk(KERN_ERR "h3600ts.c: Could not allocate Action Button IRQ!\n"); err = -EBUSY; - goto fail2; + goto fail1; } if (request_irq(IRQ_GPIO_BITSY_NPOWER_BUTTON, npower_button_handler, IRQF_SHARED | IRQF_DISABLED, "h3600_suspend", &ts->dev)) { printk(KERN_ERR "h3600ts.c: Could not allocate Power Button IRQ!\n"); err = -EBUSY; - goto fail3; + goto fail2; } serio_set_drvdata(serio, ts); err = serio_open(serio, drv); if (err) - return err; + goto fail3; //h3600_flite_control(1, 25); /* default brightness */ - input_register_device(ts->dev); + err = input_register_device(ts->dev); + if (err) + goto fail4; return 0; -fail3: free_irq(IRQ_GPIO_BITSY_NPOWER_BUTTON, ts->dev); +fail4: serio_close(serio); +fail3: serio_set_drvdata(serio, NULL); + free_irq(IRQ_GPIO_BITSY_NPOWER_BUTTON, ts->dev); fail2: free_irq(IRQ_GPIO_BITSY_ACTION_BUTTON, ts->dev); -fail1: serio_set_drvdata(serio, NULL); - input_free_device(input_dev); +fail1: input_free_device(input_dev); kfree(ts); return err; } diff --git a/trunk/drivers/input/touchscreen/wm831x-ts.c b/trunk/drivers/input/touchscreen/wm831x-ts.c index 6ae054f8e0aa..9175d49d2546 100644 --- a/trunk/drivers/input/touchscreen/wm831x-ts.c +++ b/trunk/drivers/input/touchscreen/wm831x-ts.c @@ -68,8 +68,23 @@ struct wm831x_ts { unsigned int pd_irq; bool pressure; bool pen_down; + struct work_struct pd_data_work; }; +static void wm831x_pd_data_work(struct work_struct *work) +{ + struct wm831x_ts *wm831x_ts = + container_of(work, struct wm831x_ts, pd_data_work); + + if (wm831x_ts->pen_down) { + enable_irq(wm831x_ts->data_irq); + dev_dbg(wm831x_ts->wm831x->dev, "IRQ PD->DATA done\n"); + } else { + enable_irq(wm831x_ts->pd_irq); + dev_dbg(wm831x_ts->wm831x->dev, "IRQ DATA->PD done\n"); + } +} + static irqreturn_t wm831x_ts_data_irq(int irq, void *irq_data) { struct wm831x_ts *wm831x_ts = irq_data; @@ -110,6 +125,9 @@ static irqreturn_t wm831x_ts_data_irq(int irq, void *irq_data) } if (!wm831x_ts->pen_down) { + /* Switch from data to pen down */ + dev_dbg(wm831x->dev, "IRQ DATA->PD\n"); + disable_irq_nosync(wm831x_ts->data_irq); /* Don't need data any more */ @@ -128,6 +146,10 @@ static irqreturn_t wm831x_ts_data_irq(int irq, void *irq_data) ABS_PRESSURE, 0); input_report_key(wm831x_ts->input_dev, BTN_TOUCH, 0); + + schedule_work(&wm831x_ts->pd_data_work); + } else { + input_report_key(wm831x_ts->input_dev, BTN_TOUCH, 1); } input_sync(wm831x_ts->input_dev); @@ -141,6 +163,11 @@ static irqreturn_t wm831x_ts_pen_down_irq(int irq, void *irq_data) struct wm831x *wm831x = wm831x_ts->wm831x; int ena = 0; + if (wm831x_ts->pen_down) + return IRQ_HANDLED; + + disable_irq_nosync(wm831x_ts->pd_irq); + /* Start collecting data */ if (wm831x_ts->pressure) ena |= WM831X_TCH_Z_ENA; @@ -149,14 +176,14 @@ static irqreturn_t wm831x_ts_pen_down_irq(int irq, void *irq_data) WM831X_TCH_X_ENA | WM831X_TCH_Y_ENA | WM831X_TCH_Z_ENA, WM831X_TCH_X_ENA | WM831X_TCH_Y_ENA | ena); - input_report_key(wm831x_ts->input_dev, BTN_TOUCH, 1); - input_sync(wm831x_ts->input_dev); - wm831x_set_bits(wm831x, WM831X_INTERRUPT_STATUS_1, WM831X_TCHPD_EINT, WM831X_TCHPD_EINT); wm831x_ts->pen_down = true; - enable_irq(wm831x_ts->data_irq); + + /* Switch from pen down to data */ + dev_dbg(wm831x->dev, "IRQ PD->DATA\n"); + schedule_work(&wm831x_ts->pd_data_work); return IRQ_HANDLED; } @@ -182,13 +209,28 @@ static void wm831x_ts_input_close(struct input_dev *idev) struct wm831x_ts *wm831x_ts = input_get_drvdata(idev); struct wm831x *wm831x = wm831x_ts->wm831x; + /* Shut the controller down, disabling all other functionality too */ wm831x_set_bits(wm831x, WM831X_TOUCH_CONTROL_1, - WM831X_TCH_ENA | WM831X_TCH_CVT_ENA | - WM831X_TCH_X_ENA | WM831X_TCH_Y_ENA | - WM831X_TCH_Z_ENA, 0); + WM831X_TCH_ENA | WM831X_TCH_X_ENA | + WM831X_TCH_Y_ENA | WM831X_TCH_Z_ENA, 0); - if (wm831x_ts->pen_down) + /* Make sure any pending IRQs are done, the above will prevent + * new ones firing. + */ + synchronize_irq(wm831x_ts->data_irq); + synchronize_irq(wm831x_ts->pd_irq); + + /* Make sure the IRQ completion work is quiesced */ + flush_work_sync(&wm831x_ts->pd_data_work); + + /* If we ended up with the pen down then make sure we revert back + * to pen detection state for the next time we start up. + */ + if (wm831x_ts->pen_down) { disable_irq(wm831x_ts->data_irq); + enable_irq(wm831x_ts->pd_irq); + wm831x_ts->pen_down = false; + } } static __devinit int wm831x_ts_probe(struct platform_device *pdev) @@ -198,7 +240,7 @@ static __devinit int wm831x_ts_probe(struct platform_device *pdev) struct wm831x_pdata *core_pdata = dev_get_platdata(pdev->dev.parent); struct wm831x_touch_pdata *pdata = NULL; struct input_dev *input_dev; - int error; + int error, irqf; if (core_pdata) pdata = core_pdata->touch; @@ -212,6 +254,7 @@ static __devinit int wm831x_ts_probe(struct platform_device *pdev) wm831x_ts->wm831x = wm831x; wm831x_ts->input_dev = input_dev; + INIT_WORK(&wm831x_ts->pd_data_work, wm831x_pd_data_work); /* * If we have a direct IRQ use it, otherwise use the interrupt @@ -270,9 +313,14 @@ static __devinit int wm831x_ts_probe(struct platform_device *pdev) wm831x_set_bits(wm831x, WM831X_TOUCH_CONTROL_1, WM831X_TCH_RATE_MASK, 6); + if (pdata && pdata->data_irqf) + irqf = pdata->data_irqf; + else + irqf = IRQF_TRIGGER_HIGH; + error = request_threaded_irq(wm831x_ts->data_irq, NULL, wm831x_ts_data_irq, - IRQF_ONESHOT, + irqf | IRQF_ONESHOT, "Touchscreen data", wm831x_ts); if (error) { dev_err(&pdev->dev, "Failed to request data IRQ %d: %d\n", @@ -281,9 +329,14 @@ static __devinit int wm831x_ts_probe(struct platform_device *pdev) } disable_irq(wm831x_ts->data_irq); + if (pdata && pdata->pd_irqf) + irqf = pdata->pd_irqf; + else + irqf = IRQF_TRIGGER_HIGH; + error = request_threaded_irq(wm831x_ts->pd_irq, NULL, wm831x_ts_pen_down_irq, - IRQF_ONESHOT, + irqf | IRQF_ONESHOT, "Touchscreen pen down", wm831x_ts); if (error) { dev_err(&pdev->dev, "Failed to request pen down IRQ %d: %d\n", diff --git a/trunk/drivers/leds/leds-lm3530.c b/trunk/drivers/leds/leds-lm3530.c index e7089a1f6cb6..b37e6186d0fa 100644 --- a/trunk/drivers/leds/leds-lm3530.c +++ b/trunk/drivers/leds/leds-lm3530.c @@ -349,6 +349,7 @@ static const struct i2c_device_id lm3530_id[] = { {LM3530_NAME, 0}, {} }; +MODULE_DEVICE_TABLE(i2c, lm3530_id); static struct i2c_driver lm3530_i2c_driver = { .probe = lm3530_probe, diff --git a/trunk/drivers/leds/leds-regulator.c b/trunk/drivers/leds/leds-regulator.c index 3790816643be..8497f56f8e46 100644 --- a/trunk/drivers/leds/leds-regulator.c +++ b/trunk/drivers/leds/leds-regulator.c @@ -178,6 +178,10 @@ static int __devinit regulator_led_probe(struct platform_device *pdev) led->cdev.flags |= LED_CORE_SUSPENDRESUME; led->vcc = vcc; + /* to handle correctly an already enabled regulator */ + if (regulator_is_enabled(led->vcc)) + led->enabled = 1; + mutex_init(&led->mutex); INIT_WORK(&led->work, led_work); diff --git a/trunk/drivers/lguest/Kconfig b/trunk/drivers/lguest/Kconfig index 0aaa0597a622..34ae49dc557c 100644 --- a/trunk/drivers/lguest/Kconfig +++ b/trunk/drivers/lguest/Kconfig @@ -5,8 +5,10 @@ config LGUEST ---help--- This is a very simple module which allows you to run multiple instances of the same Linux kernel, using the - "lguest" command found in the Documentation/lguest directory. + "lguest" command found in the Documentation/virtual/lguest + directory. + Note that "lguest" is pronounced to rhyme with "fell quest", - not "rustyvisor". See Documentation/lguest/lguest.txt. + not "rustyvisor". See Documentation/virtual/lguest/lguest.txt. If unsure, say N. If curious, say M. If masochistic, say Y. diff --git a/trunk/drivers/lguest/Makefile b/trunk/drivers/lguest/Makefile index 7d463c26124f..8ac947c7e7c7 100644 --- a/trunk/drivers/lguest/Makefile +++ b/trunk/drivers/lguest/Makefile @@ -18,7 +18,7 @@ Mastery: PREFIX=M Beer: @for f in Preparation Guest Drivers Launcher Host Switcher Mastery; do echo "{==- $$f -==}"; make -s $$f; done; echo "{==-==}" Preparation Preparation! Guest Drivers Launcher Host Switcher Mastery: - @sh ../../Documentation/lguest/extract $(PREFIX) `find ../../* -name '*.[chS]' -wholename '*lguest*'` + @sh ../../Documentation/virtual/lguest/extract $(PREFIX) `find ../../* -name '*.[chS]' -wholename '*lguest*'` Puppy: @clear @printf " __ \n (___()'\`;\n /, /\`\n \\\\\\\"--\\\\\\ \n" diff --git a/trunk/drivers/macintosh/via-pmu.c b/trunk/drivers/macintosh/via-pmu.c index 8b021eb0d48c..6cccd60c594e 100644 --- a/trunk/drivers/macintosh/via-pmu.c +++ b/trunk/drivers/macintosh/via-pmu.c @@ -40,7 +40,7 @@ #include #include #include -#include +#include #include #include #include @@ -2527,12 +2527,9 @@ void pmu_blink(int n) #if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32) int pmu_sys_suspended; -static int pmu_sys_suspend(struct sys_device *sysdev, pm_message_t state) +static int pmu_syscore_suspend(void) { - if (state.event != PM_EVENT_SUSPEND || pmu_sys_suspended) - return 0; - - /* Suspend PMU event interrupts */\ + /* Suspend PMU event interrupts */ pmu_suspend(); pmu_sys_suspended = 1; @@ -2544,12 +2541,12 @@ static int pmu_sys_suspend(struct sys_device *sysdev, pm_message_t state) return 0; } -static int pmu_sys_resume(struct sys_device *sysdev) +static void pmu_syscore_resume(void) { struct adb_request req; if (!pmu_sys_suspended) - return 0; + return; /* Tell PMU we are ready */ pmu_request(&req, NULL, 2, PMU_SYSTEM_READY, 2); @@ -2562,50 +2559,21 @@ static int pmu_sys_resume(struct sys_device *sysdev) /* Resume PMU event interrupts */ pmu_resume(); pmu_sys_suspended = 0; - - return 0; } -#endif /* CONFIG_SUSPEND && CONFIG_PPC32 */ - -static struct sysdev_class pmu_sysclass = { - .name = "pmu", -}; - -static struct sys_device device_pmu = { - .cls = &pmu_sysclass, -}; - -static struct sysdev_driver driver_pmu = { -#if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32) - .suspend = &pmu_sys_suspend, - .resume = &pmu_sys_resume, -#endif /* CONFIG_SUSPEND && CONFIG_PPC32 */ +static struct syscore_ops pmu_syscore_ops = { + .suspend = pmu_syscore_suspend, + .resume = pmu_syscore_resume, }; -static int __init init_pmu_sysfs(void) +static int pmu_syscore_register(void) { - int rc; + register_syscore_ops(&pmu_syscore_ops); - rc = sysdev_class_register(&pmu_sysclass); - if (rc) { - printk(KERN_ERR "Failed registering PMU sys class\n"); - return -ENODEV; - } - rc = sysdev_register(&device_pmu); - if (rc) { - printk(KERN_ERR "Failed registering PMU sys device\n"); - return -ENODEV; - } - rc = sysdev_driver_register(&pmu_sysclass, &driver_pmu); - if (rc) { - printk(KERN_ERR "Failed registering PMU sys driver\n"); - return -ENODEV; - } return 0; } - -subsys_initcall(init_pmu_sysfs); +subsys_initcall(pmu_syscore_register); +#endif /* CONFIG_SUSPEND && CONFIG_PPC32 */ EXPORT_SYMBOL(pmu_request); EXPORT_SYMBOL(pmu_queue_request); diff --git a/trunk/drivers/md/dm-raid.c b/trunk/drivers/md/dm-raid.c index 5ef136cdba91..e5d8904fc8f6 100644 --- a/trunk/drivers/md/dm-raid.c +++ b/trunk/drivers/md/dm-raid.c @@ -390,13 +390,6 @@ static int raid_is_congested(struct dm_target_callbacks *cb, int bits) return md_raid5_congested(&rs->md, bits); } -static void raid_unplug(struct dm_target_callbacks *cb) -{ - struct raid_set *rs = container_of(cb, struct raid_set, callbacks); - - md_raid5_kick_device(rs->md.private); -} - /* * Construct a RAID4/5/6 mapping: * Args: @@ -487,7 +480,6 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv) } rs->callbacks.congested_fn = raid_is_congested; - rs->callbacks.unplug_fn = raid_unplug; dm_table_add_target_callbacks(ti->table, &rs->callbacks); return 0; diff --git a/trunk/drivers/md/md.c b/trunk/drivers/md/md.c index b12b3776c0c0..7d6f7f18a920 100644 --- a/trunk/drivers/md/md.c +++ b/trunk/drivers/md/md.c @@ -447,48 +447,59 @@ EXPORT_SYMBOL(md_flush_request); /* Support for plugging. * This mirrors the plugging support in request_queue, but does not - * require having a whole queue + * require having a whole queue or request structures. + * We allocate an md_plug_cb for each md device and each thread it gets + * plugged on. This links tot the private plug_handle structure in the + * personality data where we keep a count of the number of outstanding + * plugs so other code can see if a plug is active. */ -static void plugger_work(struct work_struct *work) -{ - struct plug_handle *plug = - container_of(work, struct plug_handle, unplug_work); - plug->unplug_fn(plug); -} -static void plugger_timeout(unsigned long data) -{ - struct plug_handle *plug = (void *)data; - kblockd_schedule_work(NULL, &plug->unplug_work); -} -void plugger_init(struct plug_handle *plug, - void (*unplug_fn)(struct plug_handle *)) -{ - plug->unplug_flag = 0; - plug->unplug_fn = unplug_fn; - init_timer(&plug->unplug_timer); - plug->unplug_timer.function = plugger_timeout; - plug->unplug_timer.data = (unsigned long)plug; - INIT_WORK(&plug->unplug_work, plugger_work); -} -EXPORT_SYMBOL_GPL(plugger_init); +struct md_plug_cb { + struct blk_plug_cb cb; + mddev_t *mddev; +}; -void plugger_set_plug(struct plug_handle *plug) +static void plugger_unplug(struct blk_plug_cb *cb) { - if (!test_and_set_bit(PLUGGED_FLAG, &plug->unplug_flag)) - mod_timer(&plug->unplug_timer, jiffies + msecs_to_jiffies(3)+1); + struct md_plug_cb *mdcb = container_of(cb, struct md_plug_cb, cb); + if (atomic_dec_and_test(&mdcb->mddev->plug_cnt)) + md_wakeup_thread(mdcb->mddev->thread); + kfree(mdcb); } -EXPORT_SYMBOL_GPL(plugger_set_plug); -int plugger_remove_plug(struct plug_handle *plug) +/* Check that an unplug wakeup will come shortly. + * If not, wakeup the md thread immediately + */ +int mddev_check_plugged(mddev_t *mddev) { - if (test_and_clear_bit(PLUGGED_FLAG, &plug->unplug_flag)) { - del_timer(&plug->unplug_timer); - return 1; - } else + struct blk_plug *plug = current->plug; + struct md_plug_cb *mdcb; + + if (!plug) return 0; -} -EXPORT_SYMBOL_GPL(plugger_remove_plug); + list_for_each_entry(mdcb, &plug->cb_list, cb.list) { + if (mdcb->cb.callback == plugger_unplug && + mdcb->mddev == mddev) { + /* Already on the list, move to top */ + if (mdcb != list_first_entry(&plug->cb_list, + struct md_plug_cb, + cb.list)) + list_move(&mdcb->cb.list, &plug->cb_list); + return 1; + } + } + /* Not currently on the callback list */ + mdcb = kmalloc(sizeof(*mdcb), GFP_ATOMIC); + if (!mdcb) + return 0; + + mdcb->mddev = mddev; + mdcb->cb.callback = plugger_unplug; + atomic_inc(&mddev->plug_cnt); + list_add(&mdcb->cb.list, &plug->cb_list); + return 1; +} +EXPORT_SYMBOL_GPL(mddev_check_plugged); static inline mddev_t *mddev_get(mddev_t *mddev) { @@ -538,6 +549,7 @@ void mddev_init(mddev_t *mddev) atomic_set(&mddev->active, 1); atomic_set(&mddev->openers, 0); atomic_set(&mddev->active_io, 0); + atomic_set(&mddev->plug_cnt, 0); spin_lock_init(&mddev->write_lock); atomic_set(&mddev->flush_pending, 0); init_waitqueue_head(&mddev->sb_wait); @@ -3158,6 +3170,7 @@ level_store(mddev_t *mddev, const char *buf, size_t len) mddev->layout = mddev->new_layout; mddev->chunk_sectors = mddev->new_chunk_sectors; mddev->delta_disks = 0; + mddev->degraded = 0; if (mddev->pers->sync_request == NULL) { /* this is now an array without redundancy, so * it must always be in_sync @@ -4723,7 +4736,6 @@ static void md_clean(mddev_t *mddev) mddev->bitmap_info.chunksize = 0; mddev->bitmap_info.daemon_sleep = 0; mddev->bitmap_info.max_write_behind = 0; - mddev->plug = NULL; } static void __md_stop_writes(mddev_t *mddev) @@ -6688,12 +6700,6 @@ int md_allow_write(mddev_t *mddev) } EXPORT_SYMBOL_GPL(md_allow_write); -void md_unplug(mddev_t *mddev) -{ - if (mddev->plug) - mddev->plug->unplug_fn(mddev->plug); -} - #define SYNC_MARKS 10 #define SYNC_MARK_STEP (3*HZ) void md_do_sync(mddev_t *mddev) diff --git a/trunk/drivers/md/md.h b/trunk/drivers/md/md.h index 52b407369e13..0b1fd3f1d85b 100644 --- a/trunk/drivers/md/md.h +++ b/trunk/drivers/md/md.h @@ -29,26 +29,6 @@ typedef struct mddev_s mddev_t; typedef struct mdk_rdev_s mdk_rdev_t; -/* generic plugging support - like that provided with request_queue, - * but does not require a request_queue - */ -struct plug_handle { - void (*unplug_fn)(struct plug_handle *); - struct timer_list unplug_timer; - struct work_struct unplug_work; - unsigned long unplug_flag; -}; -#define PLUGGED_FLAG 1 -void plugger_init(struct plug_handle *plug, - void (*unplug_fn)(struct plug_handle *)); -void plugger_set_plug(struct plug_handle *plug); -int plugger_remove_plug(struct plug_handle *plug); -static inline void plugger_flush(struct plug_handle *plug) -{ - del_timer_sync(&plug->unplug_timer); - cancel_work_sync(&plug->unplug_work); -} - /* * MD's 'extended' device */ @@ -199,6 +179,9 @@ struct mddev_s int delta_disks, new_level, new_layout; int new_chunk_sectors; + atomic_t plug_cnt; /* If device is expecting + * more bios soon. + */ struct mdk_thread_s *thread; /* management thread */ struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */ sector_t curr_resync; /* last block scheduled */ @@ -336,7 +319,6 @@ struct mddev_s struct list_head all_mddevs; struct attribute_group *to_remove; - struct plug_handle *plug; /* if used by personality */ struct bio_set *bio_set; @@ -516,7 +498,6 @@ extern int md_integrity_register(mddev_t *mddev); extern void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev); extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale); extern void restore_bitmap_write_access(struct file *file); -extern void md_unplug(mddev_t *mddev); extern void mddev_init(mddev_t *mddev); extern int md_run(mddev_t *mddev); @@ -530,4 +511,5 @@ extern struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask, mddev_t *mddev); extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, mddev_t *mddev); +extern int mddev_check_plugged(mddev_t *mddev); #endif /* _MD_MD_H */ diff --git a/trunk/drivers/md/raid1.c b/trunk/drivers/md/raid1.c index c2a21ae56d97..2b7a7ff401dc 100644 --- a/trunk/drivers/md/raid1.c +++ b/trunk/drivers/md/raid1.c @@ -565,12 +565,6 @@ static void flush_pending_writes(conf_t *conf) spin_unlock_irq(&conf->device_lock); } -static void md_kick_device(mddev_t *mddev) -{ - blk_flush_plug(current); - md_wakeup_thread(mddev->thread); -} - /* Barriers.... * Sometimes we need to suspend IO while we do something else, * either some resync/recovery, or reconfigure the array. @@ -600,7 +594,7 @@ static void raise_barrier(conf_t *conf) /* Wait until no block IO is waiting */ wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting, - conf->resync_lock, md_kick_device(conf->mddev)); + conf->resync_lock, ); /* block any new IO from starting */ conf->barrier++; @@ -608,7 +602,7 @@ static void raise_barrier(conf_t *conf) /* Now wait for all pending IO to complete */ wait_event_lock_irq(conf->wait_barrier, !conf->nr_pending && conf->barrier < RESYNC_DEPTH, - conf->resync_lock, md_kick_device(conf->mddev)); + conf->resync_lock, ); spin_unlock_irq(&conf->resync_lock); } @@ -630,7 +624,7 @@ static void wait_barrier(conf_t *conf) conf->nr_waiting++; wait_event_lock_irq(conf->wait_barrier, !conf->barrier, conf->resync_lock, - md_kick_device(conf->mddev)); + ); conf->nr_waiting--; } conf->nr_pending++; @@ -666,8 +660,7 @@ static void freeze_array(conf_t *conf) wait_event_lock_irq(conf->wait_barrier, conf->nr_pending == conf->nr_queued+1, conf->resync_lock, - ({ flush_pending_writes(conf); - md_kick_device(conf->mddev); })); + flush_pending_writes(conf)); spin_unlock_irq(&conf->resync_lock); } static void unfreeze_array(conf_t *conf) @@ -729,6 +722,7 @@ static int make_request(mddev_t *mddev, struct bio * bio) const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA)); mdk_rdev_t *blocked_rdev; + int plugged; /* * Register the new request and wait if the reconstruction @@ -820,6 +814,8 @@ static int make_request(mddev_t *mddev, struct bio * bio) * inc refcount on their rdev. Record them by setting * bios[x] to bio */ + plugged = mddev_check_plugged(mddev); + disks = conf->raid_disks; retry_write: blocked_rdev = NULL; @@ -925,7 +921,7 @@ static int make_request(mddev_t *mddev, struct bio * bio) /* In case raid1d snuck in to freeze_array */ wake_up(&conf->wait_barrier); - if (do_sync || !bitmap) + if (do_sync || !bitmap || !plugged) md_wakeup_thread(mddev->thread); return 0; @@ -1516,13 +1512,16 @@ static void raid1d(mddev_t *mddev) conf_t *conf = mddev->private; struct list_head *head = &conf->retry_list; mdk_rdev_t *rdev; + struct blk_plug plug; md_check_recovery(mddev); - + + blk_start_plug(&plug); for (;;) { char b[BDEVNAME_SIZE]; - flush_pending_writes(conf); + if (atomic_read(&mddev->plug_cnt) == 0) + flush_pending_writes(conf); spin_lock_irqsave(&conf->device_lock, flags); if (list_empty(head)) { @@ -1593,6 +1592,7 @@ static void raid1d(mddev_t *mddev) } cond_resched(); } + blk_finish_plug(&plug); } @@ -2039,7 +2039,6 @@ static int stop(mddev_t *mddev) md_unregister_thread(mddev->thread); mddev->thread = NULL; - blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ if (conf->r1bio_pool) mempool_destroy(conf->r1bio_pool); kfree(conf->mirrors); diff --git a/trunk/drivers/md/raid10.c b/trunk/drivers/md/raid10.c index 2da83d566592..8e9462626ec5 100644 --- a/trunk/drivers/md/raid10.c +++ b/trunk/drivers/md/raid10.c @@ -634,12 +634,6 @@ static void flush_pending_writes(conf_t *conf) spin_unlock_irq(&conf->device_lock); } -static void md_kick_device(mddev_t *mddev) -{ - blk_flush_plug(current); - md_wakeup_thread(mddev->thread); -} - /* Barriers.... * Sometimes we need to suspend IO while we do something else, * either some resync/recovery, or reconfigure the array. @@ -669,15 +663,15 @@ static void raise_barrier(conf_t *conf, int force) /* Wait until no block IO is waiting (unless 'force') */ wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting, - conf->resync_lock, md_kick_device(conf->mddev)); + conf->resync_lock, ); /* block any new IO from starting */ conf->barrier++; - /* No wait for all pending IO to complete */ + /* Now wait for all pending IO to complete */ wait_event_lock_irq(conf->wait_barrier, !conf->nr_pending && conf->barrier < RESYNC_DEPTH, - conf->resync_lock, md_kick_device(conf->mddev)); + conf->resync_lock, ); spin_unlock_irq(&conf->resync_lock); } @@ -698,7 +692,7 @@ static void wait_barrier(conf_t *conf) conf->nr_waiting++; wait_event_lock_irq(conf->wait_barrier, !conf->barrier, conf->resync_lock, - md_kick_device(conf->mddev)); + ); conf->nr_waiting--; } conf->nr_pending++; @@ -734,8 +728,8 @@ static void freeze_array(conf_t *conf) wait_event_lock_irq(conf->wait_barrier, conf->nr_pending == conf->nr_queued+1, conf->resync_lock, - ({ flush_pending_writes(conf); - md_kick_device(conf->mddev); })); + flush_pending_writes(conf)); + spin_unlock_irq(&conf->resync_lock); } @@ -762,6 +756,7 @@ static int make_request(mddev_t *mddev, struct bio * bio) const unsigned long do_fua = (bio->bi_rw & REQ_FUA); unsigned long flags; mdk_rdev_t *blocked_rdev; + int plugged; if (unlikely(bio->bi_rw & REQ_FLUSH)) { md_flush_request(mddev, bio); @@ -870,6 +865,8 @@ static int make_request(mddev_t *mddev, struct bio * bio) * inc refcount on their rdev. Record them by setting * bios[x] to bio */ + plugged = mddev_check_plugged(mddev); + raid10_find_phys(conf, r10_bio); retry_write: blocked_rdev = NULL; @@ -946,9 +943,8 @@ static int make_request(mddev_t *mddev, struct bio * bio) /* In case raid10d snuck in to freeze_array */ wake_up(&conf->wait_barrier); - if (do_sync || !mddev->bitmap) + if (do_sync || !mddev->bitmap || !plugged) md_wakeup_thread(mddev->thread); - return 0; } @@ -1640,9 +1636,11 @@ static void raid10d(mddev_t *mddev) conf_t *conf = mddev->private; struct list_head *head = &conf->retry_list; mdk_rdev_t *rdev; + struct blk_plug plug; md_check_recovery(mddev); + blk_start_plug(&plug); for (;;) { char b[BDEVNAME_SIZE]; @@ -1716,6 +1714,7 @@ static void raid10d(mddev_t *mddev) } cond_resched(); } + blk_finish_plug(&plug); } diff --git a/trunk/drivers/md/raid5.c b/trunk/drivers/md/raid5.c index e867ee42b152..49bf5f891435 100644 --- a/trunk/drivers/md/raid5.c +++ b/trunk/drivers/md/raid5.c @@ -27,12 +27,12 @@ * * We group bitmap updates into batches. Each batch has a number. * We may write out several batches at once, but that isn't very important. - * conf->bm_write is the number of the last batch successfully written. - * conf->bm_flush is the number of the last batch that was closed to + * conf->seq_write is the number of the last batch successfully written. + * conf->seq_flush is the number of the last batch that was closed to * new additions. * When we discover that we will need to write to any block in a stripe * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq - * the number of the batch it will be in. This is bm_flush+1. + * the number of the batch it will be in. This is seq_flush+1. * When we are ready to do a write, if that batch hasn't been written yet, * we plug the array and queue the stripe for later. * When an unplug happens, we increment bm_flush, thus closing the current @@ -199,14 +199,12 @@ static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh) BUG_ON(!list_empty(&sh->lru)); BUG_ON(atomic_read(&conf->active_stripes)==0); if (test_bit(STRIPE_HANDLE, &sh->state)) { - if (test_bit(STRIPE_DELAYED, &sh->state)) { + if (test_bit(STRIPE_DELAYED, &sh->state)) list_add_tail(&sh->lru, &conf->delayed_list); - plugger_set_plug(&conf->plug); - } else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && - sh->bm_seq - conf->seq_write > 0) { + else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && + sh->bm_seq - conf->seq_write > 0) list_add_tail(&sh->lru, &conf->bitmap_list); - plugger_set_plug(&conf->plug); - } else { + else { clear_bit(STRIPE_BIT_DELAY, &sh->state); list_add_tail(&sh->lru, &conf->handle_list); } @@ -461,7 +459,7 @@ get_active_stripe(raid5_conf_t *conf, sector_t sector, < (conf->max_nr_stripes *3/4) || !conf->inactive_blocked), conf->device_lock, - md_raid5_kick_device(conf)); + ); conf->inactive_blocked = 0; } else init_stripe(sh, sector, previous); @@ -1470,7 +1468,7 @@ static int resize_stripes(raid5_conf_t *conf, int newsize) wait_event_lock_irq(conf->wait_for_stripe, !list_empty(&conf->inactive_list), conf->device_lock, - blk_flush_plug(current)); + ); osh = get_free_stripe(conf); spin_unlock_irq(&conf->device_lock); atomic_set(&nsh->count, 1); @@ -3623,8 +3621,7 @@ static void raid5_activate_delayed(raid5_conf_t *conf) atomic_inc(&conf->preread_active_stripes); list_add_tail(&sh->lru, &conf->hold_list); } - } else - plugger_set_plug(&conf->plug); + } } static void activate_bit_delay(raid5_conf_t *conf) @@ -3641,21 +3638,6 @@ static void activate_bit_delay(raid5_conf_t *conf) } } -void md_raid5_kick_device(raid5_conf_t *conf) -{ - blk_flush_plug(current); - raid5_activate_delayed(conf); - md_wakeup_thread(conf->mddev->thread); -} -EXPORT_SYMBOL_GPL(md_raid5_kick_device); - -static void raid5_unplug(struct plug_handle *plug) -{ - raid5_conf_t *conf = container_of(plug, raid5_conf_t, plug); - - md_raid5_kick_device(conf); -} - int md_raid5_congested(mddev_t *mddev, int bits) { raid5_conf_t *conf = mddev->private; @@ -3945,6 +3927,7 @@ static int make_request(mddev_t *mddev, struct bio * bi) struct stripe_head *sh; const int rw = bio_data_dir(bi); int remaining; + int plugged; if (unlikely(bi->bi_rw & REQ_FLUSH)) { md_flush_request(mddev, bi); @@ -3963,6 +3946,7 @@ static int make_request(mddev_t *mddev, struct bio * bi) bi->bi_next = NULL; bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ + plugged = mddev_check_plugged(mddev); for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { DEFINE_WAIT(w); int disks, data_disks; @@ -4057,7 +4041,7 @@ static int make_request(mddev_t *mddev, struct bio * bi) * add failed due to overlap. Flush everything * and wait a while */ - md_raid5_kick_device(conf); + md_wakeup_thread(mddev->thread); release_stripe(sh); schedule(); goto retry; @@ -4077,6 +4061,9 @@ static int make_request(mddev_t *mddev, struct bio * bi) } } + if (!plugged) + md_wakeup_thread(mddev->thread); + spin_lock_irq(&conf->device_lock); remaining = raid5_dec_bi_phys_segments(bi); spin_unlock_irq(&conf->device_lock); @@ -4478,24 +4465,30 @@ static void raid5d(mddev_t *mddev) struct stripe_head *sh; raid5_conf_t *conf = mddev->private; int handled; + struct blk_plug plug; pr_debug("+++ raid5d active\n"); md_check_recovery(mddev); + blk_start_plug(&plug); handled = 0; spin_lock_irq(&conf->device_lock); while (1) { struct bio *bio; - if (conf->seq_flush != conf->seq_write) { - int seq = conf->seq_flush; + if (atomic_read(&mddev->plug_cnt) == 0 && + !list_empty(&conf->bitmap_list)) { + /* Now is a good time to flush some bitmap updates */ + conf->seq_flush++; spin_unlock_irq(&conf->device_lock); bitmap_unplug(mddev->bitmap); spin_lock_irq(&conf->device_lock); - conf->seq_write = seq; + conf->seq_write = conf->seq_flush; activate_bit_delay(conf); } + if (atomic_read(&mddev->plug_cnt) == 0) + raid5_activate_delayed(conf); while ((bio = remove_bio_from_retry(conf))) { int ok; @@ -4525,6 +4518,7 @@ static void raid5d(mddev_t *mddev) spin_unlock_irq(&conf->device_lock); async_tx_issue_pending_all(); + blk_finish_plug(&plug); pr_debug("--- raid5d inactive\n"); } @@ -5141,8 +5135,6 @@ static int run(mddev_t *mddev) mdname(mddev)); md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); - plugger_init(&conf->plug, raid5_unplug); - mddev->plug = &conf->plug; if (mddev->queue) { int chunk_size; /* read-ahead size must cover two whole stripes, which @@ -5159,7 +5151,6 @@ static int run(mddev_t *mddev) mddev->queue->backing_dev_info.congested_data = mddev; mddev->queue->backing_dev_info.congested_fn = raid5_congested; - mddev->queue->queue_lock = &conf->device_lock; chunk_size = mddev->chunk_sectors << 9; blk_queue_io_min(mddev->queue, chunk_size); @@ -5192,7 +5183,6 @@ static int stop(mddev_t *mddev) mddev->thread = NULL; if (mddev->queue) mddev->queue->backing_dev_info.congested_fn = NULL; - plugger_flush(&conf->plug); /* the unplug fn references 'conf'*/ free_conf(conf); mddev->private = NULL; mddev->to_remove = &raid5_attrs_group; @@ -5688,6 +5678,7 @@ static void raid5_quiesce(mddev_t *mddev, int state) static void *raid45_takeover_raid0(mddev_t *mddev, int level) { struct raid0_private_data *raid0_priv = mddev->private; + sector_t sectors; /* for raid0 takeover only one zone is supported */ if (raid0_priv->nr_strip_zones > 1) { @@ -5696,6 +5687,9 @@ static void *raid45_takeover_raid0(mddev_t *mddev, int level) return ERR_PTR(-EINVAL); } + sectors = raid0_priv->strip_zone[0].zone_end; + sector_div(sectors, raid0_priv->strip_zone[0].nb_dev); + mddev->dev_sectors = sectors; mddev->new_level = level; mddev->new_layout = ALGORITHM_PARITY_N; mddev->new_chunk_sectors = mddev->chunk_sectors; diff --git a/trunk/drivers/md/raid5.h b/trunk/drivers/md/raid5.h index 8d563a4f022a..3ca77a2613ba 100644 --- a/trunk/drivers/md/raid5.h +++ b/trunk/drivers/md/raid5.h @@ -400,8 +400,6 @@ struct raid5_private_data { * Cleared when a sync completes. */ - struct plug_handle plug; - /* per cpu variables */ struct raid5_percpu { struct page *spare_page; /* Used when checking P/Q in raid6 */ diff --git a/trunk/drivers/media/common/tuners/tda18271-common.c b/trunk/drivers/media/common/tuners/tda18271-common.c index 5466d47db899..aae40e52af5b 100644 --- a/trunk/drivers/media/common/tuners/tda18271-common.c +++ b/trunk/drivers/media/common/tuners/tda18271-common.c @@ -533,16 +533,7 @@ int tda18271_calc_main_pll(struct dvb_frontend *fe, u32 freq) if (tda_fail(ret)) goto fail; - regs[R_MPD] = (0x77 & pd); - - switch (priv->mode) { - case TDA18271_ANALOG: - regs[R_MPD] &= ~0x08; - break; - case TDA18271_DIGITAL: - regs[R_MPD] |= 0x08; - break; - } + regs[R_MPD] = (0x7f & pd); div = ((d * (freq / 1000)) << 7) / 125; diff --git a/trunk/drivers/media/common/tuners/tda18271-fe.c b/trunk/drivers/media/common/tuners/tda18271-fe.c index 9ad4454a148d..d884f5eee73c 100644 --- a/trunk/drivers/media/common/tuners/tda18271-fe.c +++ b/trunk/drivers/media/common/tuners/tda18271-fe.c @@ -579,8 +579,8 @@ static int tda18271_rf_tracking_filters_init(struct dvb_frontend *fe, u32 freq) #define RF3 2 u32 rf_default[3]; u32 rf_freq[3]; - u8 prog_cal[3]; - u8 prog_tab[3]; + s32 prog_cal[3]; + s32 prog_tab[3]; i = tda18271_lookup_rf_band(fe, &freq, NULL); @@ -602,32 +602,33 @@ static int tda18271_rf_tracking_filters_init(struct dvb_frontend *fe, u32 freq) return bcal; tda18271_calc_rf_cal(fe, &rf_freq[rf]); - prog_tab[rf] = regs[R_EB14]; + prog_tab[rf] = (s32)regs[R_EB14]; if (1 == bcal) - prog_cal[rf] = tda18271_calibrate_rf(fe, rf_freq[rf]); + prog_cal[rf] = + (s32)tda18271_calibrate_rf(fe, rf_freq[rf]); else prog_cal[rf] = prog_tab[rf]; switch (rf) { case RF1: map[i].rf_a1 = 0; - map[i].rf_b1 = (s32)(prog_cal[RF1] - prog_tab[RF1]); + map[i].rf_b1 = (prog_cal[RF1] - prog_tab[RF1]); map[i].rf1 = rf_freq[RF1] / 1000; break; case RF2: - dividend = (s32)(prog_cal[RF2] - prog_tab[RF2]) - - (s32)(prog_cal[RF1] + prog_tab[RF1]); + dividend = (prog_cal[RF2] - prog_tab[RF2] - + prog_cal[RF1] + prog_tab[RF1]); divisor = (s32)(rf_freq[RF2] - rf_freq[RF1]) / 1000; map[i].rf_a1 = (dividend / divisor); map[i].rf2 = rf_freq[RF2] / 1000; break; case RF3: - dividend = (s32)(prog_cal[RF3] - prog_tab[RF3]) - - (s32)(prog_cal[RF2] + prog_tab[RF2]); + dividend = (prog_cal[RF3] - prog_tab[RF3] - + prog_cal[RF2] + prog_tab[RF2]); divisor = (s32)(rf_freq[RF3] - rf_freq[RF2]) / 1000; map[i].rf_a2 = (dividend / divisor); - map[i].rf_b2 = (s32)(prog_cal[RF2] - prog_tab[RF2]); + map[i].rf_b2 = (prog_cal[RF2] - prog_tab[RF2]); map[i].rf3 = rf_freq[RF3] / 1000; break; default: diff --git a/trunk/drivers/media/common/tuners/tda18271-maps.c b/trunk/drivers/media/common/tuners/tda18271-maps.c index e7f84c705da8..3d5b6ab7e332 100644 --- a/trunk/drivers/media/common/tuners/tda18271-maps.c +++ b/trunk/drivers/media/common/tuners/tda18271-maps.c @@ -229,8 +229,7 @@ static struct tda18271_map tda18271c2_km[] = { static struct tda18271_map tda18271_rf_band[] = { { .rfmax = 47900, .val = 0x00 }, { .rfmax = 61100, .val = 0x01 }, -/* { .rfmax = 152600, .val = 0x02 }, */ - { .rfmax = 121200, .val = 0x02 }, + { .rfmax = 152600, .val = 0x02 }, { .rfmax = 164700, .val = 0x03 }, { .rfmax = 203500, .val = 0x04 }, { .rfmax = 457800, .val = 0x05 }, @@ -448,7 +447,7 @@ static struct tda18271_map tda18271c2_rf_cal[] = { { .rfmax = 150000, .val = 0xb0 }, { .rfmax = 151000, .val = 0xb1 }, { .rfmax = 152000, .val = 0xb7 }, - { .rfmax = 153000, .val = 0xbd }, + { .rfmax = 152600, .val = 0xbd }, { .rfmax = 154000, .val = 0x20 }, { .rfmax = 155000, .val = 0x22 }, { .rfmax = 156000, .val = 0x24 }, @@ -459,7 +458,7 @@ static struct tda18271_map tda18271c2_rf_cal[] = { { .rfmax = 161000, .val = 0x2d }, { .rfmax = 163000, .val = 0x2e }, { .rfmax = 164000, .val = 0x2f }, - { .rfmax = 165000, .val = 0x30 }, + { .rfmax = 164700, .val = 0x30 }, { .rfmax = 166000, .val = 0x11 }, { .rfmax = 167000, .val = 0x12 }, { .rfmax = 168000, .val = 0x13 }, @@ -510,7 +509,8 @@ static struct tda18271_map tda18271c2_rf_cal[] = { { .rfmax = 236000, .val = 0x1b }, { .rfmax = 237000, .val = 0x1c }, { .rfmax = 240000, .val = 0x1d }, - { .rfmax = 242000, .val = 0x1f }, + { .rfmax = 242000, .val = 0x1e }, + { .rfmax = 244000, .val = 0x1f }, { .rfmax = 247000, .val = 0x20 }, { .rfmax = 249000, .val = 0x21 }, { .rfmax = 252000, .val = 0x22 }, @@ -624,7 +624,7 @@ static struct tda18271_map tda18271c2_rf_cal[] = { { .rfmax = 453000, .val = 0x93 }, { .rfmax = 454000, .val = 0x94 }, { .rfmax = 456000, .val = 0x96 }, - { .rfmax = 457000, .val = 0x98 }, + { .rfmax = 457800, .val = 0x98 }, { .rfmax = 461000, .val = 0x11 }, { .rfmax = 468000, .val = 0x12 }, { .rfmax = 472000, .val = 0x13 }, diff --git a/trunk/drivers/media/dvb/b2c2/flexcop-pci.c b/trunk/drivers/media/dvb/b2c2/flexcop-pci.c index 955254090a0e..03f96d6ca894 100644 --- a/trunk/drivers/media/dvb/b2c2/flexcop-pci.c +++ b/trunk/drivers/media/dvb/b2c2/flexcop-pci.c @@ -38,7 +38,7 @@ MODULE_PARM_DESC(debug, DEBSTATUS); #define DRIVER_VERSION "0.1" -#define DRIVER_NAME "Technisat/B2C2 FlexCop II/IIb/III Digital TV PCI Driver" +#define DRIVER_NAME "flexcop-pci" #define DRIVER_AUTHOR "Patrick Boettcher " struct flexcop_pci { diff --git a/trunk/drivers/media/dvb/dvb-usb/Kconfig b/trunk/drivers/media/dvb/dvb-usb/Kconfig index fe4f894183ff..c545039287ad 100644 --- a/trunk/drivers/media/dvb/dvb-usb/Kconfig +++ b/trunk/drivers/media/dvb/dvb-usb/Kconfig @@ -356,13 +356,15 @@ config DVB_USB_LME2510 select DVB_TDA826X if !DVB_FE_CUSTOMISE select DVB_STV0288 if !DVB_FE_CUSTOMISE select DVB_IX2505V if !DVB_FE_CUSTOMISE + select DVB_STV0299 if !DVB_FE_CUSTOMISE + select DVB_PLL if !DVB_FE_CUSTOMISE help Say Y here to support the LME DM04/QQBOX DVB-S USB2.0 . config DVB_USB_TECHNISAT_USB2 tristate "Technisat DVB-S/S2 USB2.0 support" depends on DVB_USB - select DVB_STB0899 if !DVB_FE_CUSTOMISE - select DVB_STB6100 if !DVB_FE_CUSTOMISE + select DVB_STV090x if !DVB_FE_CUSTOMISE + select DVB_STV6110x if !DVB_FE_CUSTOMISE help Say Y here to support the Technisat USB2 DVB-S/S2 device diff --git a/trunk/drivers/media/dvb/dvb-usb/dib0700_devices.c b/trunk/drivers/media/dvb/dvb-usb/dib0700_devices.c index 97af266d7f1d..65214af5cd74 100644 --- a/trunk/drivers/media/dvb/dvb-usb/dib0700_devices.c +++ b/trunk/drivers/media/dvb/dvb-usb/dib0700_devices.c @@ -2162,7 +2162,7 @@ struct dibx000_agc_config dib7090_agc_config[2] = { .agc1_pt3 = 98, .agc1_slope1 = 0, .agc1_slope2 = 167, - .agc1_pt1 = 98, + .agc2_pt1 = 98, .agc2_pt2 = 255, .agc2_slope1 = 104, .agc2_slope2 = 0, @@ -2440,11 +2440,11 @@ static int tfe7090pvr_frontend0_attach(struct dvb_usb_adapter *adap) dib0700_set_i2c_speed(adap->dev, 340); adap->fe = dvb_attach(dib7000p_attach, &adap->dev->i2c_adap, 0x90, &tfe7090pvr_dib7000p_config[0]); - dib7090_slave_reset(adap->fe); - if (adap->fe == NULL) return -ENODEV; + dib7090_slave_reset(adap->fe); + return 0; } diff --git a/trunk/drivers/media/dvb/ngene/ngene-core.c b/trunk/drivers/media/dvb/ngene/ngene-core.c index ccc2d1af49d4..6927c726ce35 100644 --- a/trunk/drivers/media/dvb/ngene/ngene-core.c +++ b/trunk/drivers/media/dvb/ngene/ngene-core.c @@ -1520,6 +1520,7 @@ static int init_channel(struct ngene_channel *chan) if (dev->ci.en && (io & NGENE_IO_TSOUT)) { dvb_ca_en50221_init(adapter, dev->ci.en, 0, 1); set_transfer(chan, 1); + chan->dev->channel[2].DataFormatFlags = DF_SWAP32; set_transfer(&chan->dev->channel[2], 1); dvb_register_device(adapter, &chan->ci_dev, &ngene_dvbdev_ci, (void *) chan, diff --git a/trunk/drivers/media/media-entity.c b/trunk/drivers/media/media-entity.c index 23640ed44d85..056138f63c7d 100644 --- a/trunk/drivers/media/media-entity.c +++ b/trunk/drivers/media/media-entity.c @@ -378,7 +378,6 @@ EXPORT_SYMBOL_GPL(media_entity_create_link); static int __media_entity_setup_link_notify(struct media_link *link, u32 flags) { - const u32 mask = MEDIA_LNK_FL_ENABLED; int ret; /* Notify both entities. */ @@ -395,7 +394,7 @@ static int __media_entity_setup_link_notify(struct media_link *link, u32 flags) return ret; } - link->flags = (link->flags & ~mask) | (flags & mask); + link->flags = flags; link->reverse->flags = link->flags; return 0; @@ -417,6 +416,7 @@ static int __media_entity_setup_link_notify(struct media_link *link, u32 flags) */ int __media_entity_setup_link(struct media_link *link, u32 flags) { + const u32 mask = MEDIA_LNK_FL_ENABLED; struct media_device *mdev; struct media_entity *source, *sink; int ret = -EBUSY; @@ -424,6 +424,10 @@ int __media_entity_setup_link(struct media_link *link, u32 flags) if (link == NULL) return -EINVAL; + /* The non-modifiable link flags must not be modified. */ + if ((link->flags & ~mask) != (flags & ~mask)) + return -EINVAL; + if (link->flags & MEDIA_LNK_FL_IMMUTABLE) return link->flags == flags ? 0 : -EINVAL; diff --git a/trunk/drivers/media/radio/radio-sf16fmr2.c b/trunk/drivers/media/radio/radio-sf16fmr2.c index dc3f04c52d5e..87bad7678d92 100644 --- a/trunk/drivers/media/radio/radio-sf16fmr2.c +++ b/trunk/drivers/media/radio/radio-sf16fmr2.c @@ -170,7 +170,7 @@ static int fmr2_setfreq(struct fmr2 *dev) return 0; } -/* !!! not tested, in my card this does't work !!! */ +/* !!! not tested, in my card this doesn't work !!! */ static int fmr2_setvolume(struct fmr2 *dev) { int vol[16] = { 0x021, 0x084, 0x090, 0x104, diff --git a/trunk/drivers/media/radio/saa7706h.c b/trunk/drivers/media/radio/saa7706h.c index 585680ffbfb6..b1193dfc5087 100644 --- a/trunk/drivers/media/radio/saa7706h.c +++ b/trunk/drivers/media/radio/saa7706h.c @@ -376,7 +376,7 @@ static int __devinit saa7706h_probe(struct i2c_client *client, v4l_info(client, "chip found @ 0x%02x (%s)\n", client->addr << 1, client->adapter->name); - state = kmalloc(sizeof(struct saa7706h_state), GFP_KERNEL); + state = kzalloc(sizeof(struct saa7706h_state), GFP_KERNEL); if (state == NULL) return -ENOMEM; sd = &state->sd; diff --git a/trunk/drivers/media/radio/tef6862.c b/trunk/drivers/media/radio/tef6862.c index 7c0d77751f6e..0991e1973678 100644 --- a/trunk/drivers/media/radio/tef6862.c +++ b/trunk/drivers/media/radio/tef6862.c @@ -176,7 +176,7 @@ static int __devinit tef6862_probe(struct i2c_client *client, v4l_info(client, "chip found @ 0x%02x (%s)\n", client->addr << 1, client->adapter->name); - state = kmalloc(sizeof(struct tef6862_state), GFP_KERNEL); + state = kzalloc(sizeof(struct tef6862_state), GFP_KERNEL); if (state == NULL) return -ENOMEM; state->freq = TEF6862_LO_FREQ; diff --git a/trunk/drivers/media/rc/imon.c b/trunk/drivers/media/rc/imon.c index ebd68edf5b24..8fc0f081b470 100644 --- a/trunk/drivers/media/rc/imon.c +++ b/trunk/drivers/media/rc/imon.c @@ -46,7 +46,7 @@ #define MOD_AUTHOR "Jarod Wilson " #define MOD_DESC "Driver for SoundGraph iMON MultiMedia IR/Display" #define MOD_NAME "imon" -#define MOD_VERSION "0.9.2" +#define MOD_VERSION "0.9.3" #define DISPLAY_MINOR_BASE 144 #define DEVICE_NAME "lcd%d" @@ -460,8 +460,9 @@ static int display_close(struct inode *inode, struct file *file) } /** - * Sends a packet to the device -- this function must be called - * with ictx->lock held. + * Sends a packet to the device -- this function must be called with + * ictx->lock held, or its unlock/lock sequence while waiting for tx + * to complete can/will lead to a deadlock. */ static int send_packet(struct imon_context *ictx) { @@ -991,12 +992,21 @@ static void imon_touch_display_timeout(unsigned long data) * the iMON remotes, and those used by the Windows MCE remotes (which is * really just RC-6), but only one or the other at a time, as the signals * are decoded onboard the receiver. + * + * This function gets called two different ways, one way is from + * rc_register_device, for initial protocol selection/setup, and the other is + * via a userspace-initiated protocol change request, either by direct sysfs + * prodding or by something like ir-keytable. In the rc_register_device case, + * the imon context lock is already held, but when initiated from userspace, + * it is not, so we must acquire it prior to calling send_packet, which + * requires that the lock is held. */ static int imon_ir_change_protocol(struct rc_dev *rc, u64 rc_type) { int retval; struct imon_context *ictx = rc->priv; struct device *dev = ictx->dev; + bool unlock = false; unsigned char ir_proto_packet[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86 }; @@ -1029,6 +1039,11 @@ static int imon_ir_change_protocol(struct rc_dev *rc, u64 rc_type) memcpy(ictx->usb_tx_buf, &ir_proto_packet, sizeof(ir_proto_packet)); + if (!mutex_is_locked(&ictx->lock)) { + unlock = true; + mutex_lock(&ictx->lock); + } + retval = send_packet(ictx); if (retval) goto out; @@ -1037,6 +1052,9 @@ static int imon_ir_change_protocol(struct rc_dev *rc, u64 rc_type) ictx->pad_mouse = false; out: + if (unlock) + mutex_unlock(&ictx->lock); + return retval; } @@ -2134,6 +2152,7 @@ static struct imon_context *imon_init_intf0(struct usb_interface *intf) goto rdev_setup_failed; } + mutex_unlock(&ictx->lock); return ictx; rdev_setup_failed: @@ -2205,6 +2224,7 @@ static struct imon_context *imon_init_intf1(struct usb_interface *intf, goto urb_submit_failed; } + mutex_unlock(&ictx->lock); return ictx; urb_submit_failed: @@ -2299,6 +2319,8 @@ static int __devinit imon_probe(struct usb_interface *interface, usb_set_intfdata(interface, ictx); if (ifnum == 0) { + mutex_lock(&ictx->lock); + if (product == 0xffdc && ictx->rf_device) { sysfs_err = sysfs_create_group(&interface->dev.kobj, &imon_rf_attr_group); @@ -2309,13 +2331,14 @@ static int __devinit imon_probe(struct usb_interface *interface, if (ictx->display_supported) imon_init_display(ictx, interface); + + mutex_unlock(&ictx->lock); } dev_info(dev, "iMON device (%04x:%04x, intf%d) on " "usb<%d:%d> initialized\n", vendor, product, ifnum, usbdev->bus->busnum, usbdev->devnum); - mutex_unlock(&ictx->lock); mutex_unlock(&driver_lock); return 0; diff --git a/trunk/drivers/media/rc/ite-cir.c b/trunk/drivers/media/rc/ite-cir.c index accaf6c9789a..43908a70bd8b 100644 --- a/trunk/drivers/media/rc/ite-cir.c +++ b/trunk/drivers/media/rc/ite-cir.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include #include diff --git a/trunk/drivers/media/rc/mceusb.c b/trunk/drivers/media/rc/mceusb.c index 044fb7a382d6..0c273ec465c9 100644 --- a/trunk/drivers/media/rc/mceusb.c +++ b/trunk/drivers/media/rc/mceusb.c @@ -220,6 +220,8 @@ static struct usb_device_id mceusb_dev_table[] = { { USB_DEVICE(VENDOR_PHILIPS, 0x206c) }, /* Philips/Spinel plus IR transceiver for ASUS */ { USB_DEVICE(VENDOR_PHILIPS, 0x2088) }, + /* Philips IR transceiver (Dell branded) */ + { USB_DEVICE(VENDOR_PHILIPS, 0x2093) }, /* Realtek MCE IR Receiver and card reader */ { USB_DEVICE(VENDOR_REALTEK, 0x0161), .driver_info = MULTIFUNCTION }, diff --git a/trunk/drivers/media/rc/rc-main.c b/trunk/drivers/media/rc/rc-main.c index f53f9c68d38d..a2706648e365 100644 --- a/trunk/drivers/media/rc/rc-main.c +++ b/trunk/drivers/media/rc/rc-main.c @@ -707,7 +707,8 @@ static void ir_close(struct input_dev *idev) { struct rc_dev *rdev = input_get_drvdata(idev); - rdev->close(rdev); + if (rdev) + rdev->close(rdev); } /* class for /sys/class/rc */ @@ -733,6 +734,7 @@ static struct { { RC_TYPE_SONY, "sony" }, { RC_TYPE_RC5_SZ, "rc-5-sz" }, { RC_TYPE_LIRC, "lirc" }, + { RC_TYPE_OTHER, "other" }, }; #define PROTO_NONE "none" diff --git a/trunk/drivers/media/video/Kconfig b/trunk/drivers/media/video/Kconfig index 4498b944dec8..00f51dd121f3 100644 --- a/trunk/drivers/media/video/Kconfig +++ b/trunk/drivers/media/video/Kconfig @@ -875,7 +875,7 @@ config MX3_VIDEO config VIDEO_MX3 tristate "i.MX3x Camera Sensor Interface driver" depends on VIDEO_DEV && MX3_IPU && SOC_CAMERA - select VIDEOBUF_DMA_CONTIG + select VIDEOBUF2_DMA_CONTIG select MX3_VIDEO ---help--- This is a v4l2 driver for the i.MX3x Camera Sensor Interface diff --git a/trunk/drivers/media/video/cx18/cx18-streams.c b/trunk/drivers/media/video/cx18/cx18-streams.c index c6e2ca3b1149..6fbc356113c1 100644 --- a/trunk/drivers/media/video/cx18/cx18-streams.c +++ b/trunk/drivers/media/video/cx18/cx18-streams.c @@ -350,9 +350,17 @@ void cx18_streams_cleanup(struct cx18 *cx, int unregister) /* No struct video_device, but can have buffers allocated */ if (type == CX18_ENC_STREAM_TYPE_IDX) { + /* If the module params didn't inhibit IDX ... */ if (cx->stream_buffers[type] != 0) { cx->stream_buffers[type] = 0; - cx18_stream_free(&cx->streams[type]); + /* + * Before calling cx18_stream_free(), + * check if the IDX stream was actually set up. + * Needed, since the cx18_probe() error path + * exits through here as well as normal clean up + */ + if (cx->streams[type].buffers != 0) + cx18_stream_free(&cx->streams[type]); } continue; } diff --git a/trunk/drivers/media/video/cx23885/Kconfig b/trunk/drivers/media/video/cx23885/Kconfig index 3b6e7f28568e..caab1bfb79e2 100644 --- a/trunk/drivers/media/video/cx23885/Kconfig +++ b/trunk/drivers/media/video/cx23885/Kconfig @@ -22,6 +22,7 @@ config VIDEO_CX23885 select DVB_CX24116 if !DVB_FE_CUSTOMISE select DVB_STV0900 if !DVB_FE_CUSTOMISE select DVB_DS3000 if !DVB_FE_CUSTOMISE + select DVB_STV0367 if !DVB_FE_CUSTOMISE select MEDIA_TUNER_MT2131 if !MEDIA_TUNER_CUSTOMISE select MEDIA_TUNER_XC2028 if !MEDIA_TUNER_CUSTOMISE select MEDIA_TUNER_TDA8290 if !MEDIA_TUNER_CUSTOMISE diff --git a/trunk/drivers/media/video/cx88/cx88-input.c b/trunk/drivers/media/video/cx88/cx88-input.c index c820e2f53527..3f442003623d 100644 --- a/trunk/drivers/media/video/cx88/cx88-input.c +++ b/trunk/drivers/media/video/cx88/cx88-input.c @@ -524,7 +524,7 @@ void cx88_ir_irq(struct cx88_core *core) for (todo = 32; todo > 0; todo -= bits) { ev.pulse = samples & 0x80000000 ? false : true; bits = min(todo, 32U - fls(ev.pulse ? samples : ~samples)); - ev.duration = (bits * NSEC_PER_SEC) / (1000 * ir_samplerate); + ev.duration = (bits * (NSEC_PER_SEC / 1000)) / ir_samplerate; ir_raw_event_store_with_filter(ir->dev, &ev); samples <<= bits; } diff --git a/trunk/drivers/media/video/imx074.c b/trunk/drivers/media/video/imx074.c index 1a1169115716..0382ea752e6f 100644 --- a/trunk/drivers/media/video/imx074.c +++ b/trunk/drivers/media/video/imx074.c @@ -298,7 +298,7 @@ static unsigned long imx074_query_bus_param(struct soc_camera_device *icd) static int imx074_set_bus_param(struct soc_camera_device *icd, unsigned long flags) { - return -1; + return -EINVAL; } static struct soc_camera_ops imx074_ops = { diff --git a/trunk/drivers/media/video/m52790.c b/trunk/drivers/media/video/m52790.c index 5e1c9a81984c..303ffa7df4ac 100644 --- a/trunk/drivers/media/video/m52790.c +++ b/trunk/drivers/media/video/m52790.c @@ -174,7 +174,7 @@ static int m52790_probe(struct i2c_client *client, v4l_info(client, "chip found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); - state = kmalloc(sizeof(struct m52790_state), GFP_KERNEL); + state = kzalloc(sizeof(struct m52790_state), GFP_KERNEL); if (state == NULL) return -ENOMEM; diff --git a/trunk/drivers/media/video/omap3isp/isp.c b/trunk/drivers/media/video/omap3isp/isp.c index 503bd7922bd6..472a69359e60 100644 --- a/trunk/drivers/media/video/omap3isp/isp.c +++ b/trunk/drivers/media/video/omap3isp/isp.c @@ -215,20 +215,21 @@ static u32 isp_set_xclk(struct isp_device *isp, u32 xclk, u8 xclksel) } switch (xclksel) { - case 0: + case ISP_XCLK_A: isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_CTRL, ISPTCTRL_CTRL_DIVA_MASK, divisor << ISPTCTRL_CTRL_DIVA_SHIFT); dev_dbg(isp->dev, "isp_set_xclk(): cam_xclka set to %d Hz\n", currentxclk); break; - case 1: + case ISP_XCLK_B: isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_CTRL, ISPTCTRL_CTRL_DIVB_MASK, divisor << ISPTCTRL_CTRL_DIVB_SHIFT); dev_dbg(isp->dev, "isp_set_xclk(): cam_xclkb set to %d Hz\n", currentxclk); break; + case ISP_XCLK_NONE: default: omap3isp_put(isp); dev_dbg(isp->dev, "ISP_ERR: isp_set_xclk(): Invalid requested " @@ -237,13 +238,13 @@ static u32 isp_set_xclk(struct isp_device *isp, u32 xclk, u8 xclksel) } /* Do we go from stable whatever to clock? */ - if (divisor >= 2 && isp->xclk_divisor[xclksel] < 2) + if (divisor >= 2 && isp->xclk_divisor[xclksel - 1] < 2) omap3isp_get(isp); /* Stopping the clock. */ - else if (divisor < 2 && isp->xclk_divisor[xclksel] >= 2) + else if (divisor < 2 && isp->xclk_divisor[xclksel - 1] >= 2) omap3isp_put(isp); - isp->xclk_divisor[xclksel] = divisor; + isp->xclk_divisor[xclksel - 1] = divisor; omap3isp_put(isp); @@ -285,7 +286,8 @@ static void isp_power_settings(struct isp_device *isp, int idle) */ void omap3isp_configure_bridge(struct isp_device *isp, enum ccdc_input_entity input, - const struct isp_parallel_platform_data *pdata) + const struct isp_parallel_platform_data *pdata, + unsigned int shift) { u32 ispctrl_val; @@ -298,9 +300,9 @@ void omap3isp_configure_bridge(struct isp_device *isp, switch (input) { case CCDC_INPUT_PARALLEL: ispctrl_val |= ISPCTRL_PAR_SER_CLK_SEL_PARALLEL; - ispctrl_val |= pdata->data_lane_shift << ISPCTRL_SHIFT_SHIFT; ispctrl_val |= pdata->clk_pol << ISPCTRL_PAR_CLK_POL_SHIFT; ispctrl_val |= pdata->bridge << ISPCTRL_PAR_BRIDGE_SHIFT; + shift += pdata->data_lane_shift * 2; break; case CCDC_INPUT_CSI2A: @@ -319,6 +321,8 @@ void omap3isp_configure_bridge(struct isp_device *isp, return; } + ispctrl_val |= ((shift/2) << ISPCTRL_SHIFT_SHIFT) & ISPCTRL_SHIFT_MASK; + ispctrl_val &= ~ISPCTRL_SYNC_DETECT_MASK; ispctrl_val |= ISPCTRL_SYNC_DETECT_VSRISE; @@ -658,6 +662,8 @@ int omap3isp_pipeline_pm_use(struct media_entity *entity, int use) /* Apply power change to connected non-nodes. */ ret = isp_pipeline_pm_power(entity, change); + if (ret < 0) + entity->use_count -= change; mutex_unlock(&entity->parent->graph_mutex); @@ -872,6 +878,9 @@ static int isp_pipeline_disable(struct isp_pipeline *pipe) } } + if (failure < 0) + isp->needs_reset = true; + return failure; } @@ -884,7 +893,8 @@ static int isp_pipeline_disable(struct isp_pipeline *pipe) * single-shot or continuous mode. * * Return 0 if successful, or the return value of the failed video::s_stream - * operation otherwise. + * operation otherwise. The pipeline state is not updated when the operation + * fails, except when stopping the pipeline. */ int omap3isp_pipeline_set_stream(struct isp_pipeline *pipe, enum isp_pipeline_stream_state state) @@ -895,7 +905,9 @@ int omap3isp_pipeline_set_stream(struct isp_pipeline *pipe, ret = isp_pipeline_disable(pipe); else ret = isp_pipeline_enable(pipe, state); - pipe->stream_state = state; + + if (ret == 0 || state == ISP_PIPELINE_STREAM_STOPPED) + pipe->stream_state = state; return ret; } @@ -1481,6 +1493,10 @@ void omap3isp_put(struct isp_device *isp) if (--isp->ref_count == 0) { isp_disable_interrupts(isp); isp_save_ctx(isp); + if (isp->needs_reset) { + isp_reset(isp); + isp->needs_reset = false; + } isp_disable_clocks(isp); } mutex_unlock(&isp->isp_mutex); diff --git a/trunk/drivers/media/video/omap3isp/isp.h b/trunk/drivers/media/video/omap3isp/isp.h index cf5214e95a92..2620c405f5e4 100644 --- a/trunk/drivers/media/video/omap3isp/isp.h +++ b/trunk/drivers/media/video/omap3isp/isp.h @@ -132,7 +132,6 @@ struct isp_reg { /** * struct isp_parallel_platform_data - Parallel interface platform data - * @width: Parallel bus width in bits (8, 10, 11 or 12) * @data_lane_shift: Data lane shifter * 0 - CAMEXT[13:0] -> CAM[13:0] * 1 - CAMEXT[13:2] -> CAM[11:0] @@ -146,7 +145,6 @@ struct isp_reg { * ISPCTRL_PAR_BRIDGE_BENDIAN - Big endian */ struct isp_parallel_platform_data { - unsigned int width; unsigned int data_lane_shift:2; unsigned int clk_pol:1; unsigned int bridge:4; @@ -262,6 +260,7 @@ struct isp_device { /* ISP Obj */ spinlock_t stat_lock; /* common lock for statistic drivers */ struct mutex isp_mutex; /* For handling ref_count field */ + bool needs_reset; int has_context; int ref_count; unsigned int autoidle; @@ -311,11 +310,12 @@ int omap3isp_pipeline_set_stream(struct isp_pipeline *pipe, enum isp_pipeline_stream_state state); void omap3isp_configure_bridge(struct isp_device *isp, enum ccdc_input_entity input, - const struct isp_parallel_platform_data *pdata); + const struct isp_parallel_platform_data *pdata, + unsigned int shift); -#define ISP_XCLK_NONE -1 -#define ISP_XCLK_A 0 -#define ISP_XCLK_B 1 +#define ISP_XCLK_NONE 0 +#define ISP_XCLK_A 1 +#define ISP_XCLK_B 2 struct isp_device *omap3isp_get(struct isp_device *isp); void omap3isp_put(struct isp_device *isp); diff --git a/trunk/drivers/media/video/omap3isp/ispccdc.c b/trunk/drivers/media/video/omap3isp/ispccdc.c index 5ff9d14ce710..39d501bda636 100644 --- a/trunk/drivers/media/video/omap3isp/ispccdc.c +++ b/trunk/drivers/media/video/omap3isp/ispccdc.c @@ -43,6 +43,12 @@ __ccdc_get_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh, static const unsigned int ccdc_fmts[] = { V4L2_MBUS_FMT_Y8_1X8, + V4L2_MBUS_FMT_Y10_1X10, + V4L2_MBUS_FMT_Y12_1X12, + V4L2_MBUS_FMT_SGRBG8_1X8, + V4L2_MBUS_FMT_SRGGB8_1X8, + V4L2_MBUS_FMT_SBGGR8_1X8, + V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SBGGR10_1X10, @@ -1110,21 +1116,38 @@ static void ccdc_configure(struct isp_ccdc_device *ccdc) struct isp_parallel_platform_data *pdata = NULL; struct v4l2_subdev *sensor; struct v4l2_mbus_framefmt *format; + const struct isp_format_info *fmt_info; + struct v4l2_subdev_format fmt_src; + unsigned int depth_out; + unsigned int depth_in = 0; struct media_pad *pad; unsigned long flags; + unsigned int shift; u32 syn_mode; u32 ccdc_pattern; - if (ccdc->input == CCDC_INPUT_PARALLEL) { - pad = media_entity_remote_source(&ccdc->pads[CCDC_PAD_SINK]); - sensor = media_entity_to_v4l2_subdev(pad->entity); + pad = media_entity_remote_source(&ccdc->pads[CCDC_PAD_SINK]); + sensor = media_entity_to_v4l2_subdev(pad->entity); + if (ccdc->input == CCDC_INPUT_PARALLEL) pdata = &((struct isp_v4l2_subdevs_group *)sensor->host_priv) ->bus.parallel; + + /* Compute shift value for lane shifter to configure the bridge. */ + fmt_src.pad = pad->index; + fmt_src.which = V4L2_SUBDEV_FORMAT_ACTIVE; + if (!v4l2_subdev_call(sensor, pad, get_fmt, NULL, &fmt_src)) { + fmt_info = omap3isp_video_format_info(fmt_src.format.code); + depth_in = fmt_info->bpp; } - omap3isp_configure_bridge(isp, ccdc->input, pdata); + fmt_info = omap3isp_video_format_info + (isp->isp_ccdc.formats[CCDC_PAD_SINK].code); + depth_out = fmt_info->bpp; + + shift = depth_in - depth_out; + omap3isp_configure_bridge(isp, ccdc->input, pdata, shift); - ccdc->syncif.datsz = pdata ? pdata->width : 10; + ccdc->syncif.datsz = depth_out; ccdc_config_sync_if(ccdc, &ccdc->syncif); /* CCDC_PAD_SINK */ @@ -1338,7 +1361,7 @@ static int ccdc_sbl_wait_idle(struct isp_ccdc_device *ccdc, * @ccdc: Pointer to ISP CCDC device. * @event: Pointing which event trigger handler * - * Return 1 when the event and stopping request combination is satisfyied, + * Return 1 when the event and stopping request combination is satisfied, * zero otherwise. */ static int __ccdc_handle_stopping(struct isp_ccdc_device *ccdc, u32 event) @@ -1618,7 +1641,7 @@ static int ccdc_video_queue(struct isp_video *video, struct isp_buffer *buffer) ccdc_set_outaddr(ccdc, buffer->isp_addr); - /* We now have a buffer queued on the output, restart the pipeline in + /* We now have a buffer queued on the output, restart the pipeline * on the next CCDC interrupt if running in continuous mode (or when * starting the stream). */ diff --git a/trunk/drivers/media/video/omap3isp/isppreview.c b/trunk/drivers/media/video/omap3isp/isppreview.c index 2b16988a501d..aba537af87e4 100644 --- a/trunk/drivers/media/video/omap3isp/isppreview.c +++ b/trunk/drivers/media/video/omap3isp/isppreview.c @@ -755,7 +755,7 @@ static struct preview_update update_attrs[] = { * @configs - pointer to update config structure. * @config - return pointer to appropriate structure field. * @bit - for which feature to return pointers. - * Return size of coresponding prev_params member + * Return size of corresponding prev_params member */ static u32 __preview_get_ptrs(struct prev_params *params, void **param, diff --git a/trunk/drivers/media/video/omap3isp/ispqueue.c b/trunk/drivers/media/video/omap3isp/ispqueue.c index 8fddc5806b0d..9c317148205f 100644 --- a/trunk/drivers/media/video/omap3isp/ispqueue.c +++ b/trunk/drivers/media/video/omap3isp/ispqueue.c @@ -339,7 +339,7 @@ static int isp_video_buffer_prepare_user(struct isp_video_buffer *buf) up_read(¤t->mm->mmap_sem); if (ret != buf->npages) { - buf->npages = ret; + buf->npages = ret < 0 ? 0 : ret; isp_video_buffer_cleanup(buf); return -EFAULT; } @@ -408,8 +408,8 @@ static int isp_video_buffer_prepare_pfnmap(struct isp_video_buffer *buf) * isp_video_buffer_prepare_vm_flags - Get VMA flags for a userspace address * * This function locates the VMAs for the buffer's userspace address and checks - * that their flags match. The onlflag that we need to care for at the moment is - * VM_PFNMAP. + * that their flags match. The only flag that we need to care for at the moment + * is VM_PFNMAP. * * The buffer vm_flags field is set to the first VMA flags. * diff --git a/trunk/drivers/media/video/omap3isp/ispresizer.c b/trunk/drivers/media/video/omap3isp/ispresizer.c index 653f88ba56db..0bb0f8cd36f5 100644 --- a/trunk/drivers/media/video/omap3isp/ispresizer.c +++ b/trunk/drivers/media/video/omap3isp/ispresizer.c @@ -714,19 +714,50 @@ static void resizer_print_status(struct isp_res_device *res) * iw and ih are the input width and height after cropping. Those equations need * to be satisfied exactly for the resizer to work correctly. * - * Reverting the equations, we can compute the resizing ratios with + * The equations can't be easily reverted, as the >> 8 operation is not linear. + * In addition, not all input sizes can be achieved for a given output size. To + * get the highest input size lower than or equal to the requested input size, + * we need to compute the highest resizing ratio that satisfies the following + * inequality (taking the 4-tap mode width equation as an example) + * + * iw >= (32 * sph + (ow - 1) * hrsz + 16) >> 8 - 7 + * + * (where iw is the requested input width) which can be rewritten as + * + * iw - 7 >= (32 * sph + (ow - 1) * hrsz + 16) >> 8 + * (iw - 7) << 8 >= 32 * sph + (ow - 1) * hrsz + 16 - b + * ((iw - 7) << 8) + b >= 32 * sph + (ow - 1) * hrsz + 16 + * + * where b is the value of the 8 least significant bits of the right hand side + * expression of the last inequality. The highest resizing ratio value will be + * achieved when b is equal to its maximum value of 255. That resizing ratio + * value will still satisfy the original inequality, as b will disappear when + * the expression will be shifted right by 8. + * + * The reverted the equations thus become * * - 8-phase, 4-tap mode - * hrsz = ((iw - 7) * 256 - 16 - 32 * sph) / (ow - 1) - * vrsz = ((ih - 4) * 256 - 16 - 32 * spv) / (oh - 1) + * hrsz = ((iw - 7) * 256 + 255 - 16 - 32 * sph) / (ow - 1) + * vrsz = ((ih - 4) * 256 + 255 - 16 - 32 * spv) / (oh - 1) * - 4-phase, 7-tap mode - * hrsz = ((iw - 7) * 256 - 32 - 64 * sph) / (ow - 1) - * vrsz = ((ih - 7) * 256 - 32 - 64 * spv) / (oh - 1) + * hrsz = ((iw - 7) * 256 + 255 - 32 - 64 * sph) / (ow - 1) + * vrsz = ((ih - 7) * 256 + 255 - 32 - 64 * spv) / (oh - 1) * - * The ratios are integer values, and must be rounded down to ensure that the - * cropped input size is not bigger than the uncropped input size. As the ratio - * in 7-tap mode is always smaller than the ratio in 4-tap mode, we can use the - * 7-tap mode equations to compute a ratio approximation. + * The ratios are integer values, and are rounded down to ensure that the + * cropped input size is not bigger than the uncropped input size. + * + * As the number of phases/taps, used to select the correct equations to compute + * the ratio, depends on the ratio, we start with the 4-tap mode equations to + * compute an approximation of the ratio, and switch to the 7-tap mode equations + * if the approximation is higher than the ratio threshold. + * + * As the 7-tap mode equations will return a ratio smaller than or equal to the + * 4-tap mode equations, the resulting ratio could become lower than or equal to + * the ratio threshold. This 'equations loop' isn't an issue as long as the + * correct equations are used to compute the final input size. Starting with the + * 4-tap mode equations ensure that, in case of values resulting in a 'ratio + * loop', the smallest of the ratio values will be used, never exceeding the + * requested input size. * * We first clamp the output size according to the hardware capabilitie to avoid * auto-cropping the input more than required to satisfy the TRM equations. The @@ -775,6 +806,8 @@ static void resizer_calc_ratios(struct isp_res_device *res, unsigned int max_width; unsigned int max_height; unsigned int width_alignment; + unsigned int width; + unsigned int height; /* * Clamp the output height based on the hardware capabilities and @@ -786,19 +819,22 @@ static void resizer_calc_ratios(struct isp_res_device *res, max_height = min_t(unsigned int, max_height, MAX_OUT_HEIGHT); output->height = clamp(output->height, min_height, max_height); - ratio->vert = ((input->height - 7) * 256 - 32 - 64 * spv) + ratio->vert = ((input->height - 4) * 256 + 255 - 16 - 32 * spv) / (output->height - 1); + if (ratio->vert > MID_RESIZE_VALUE) + ratio->vert = ((input->height - 7) * 256 + 255 - 32 - 64 * spv) + / (output->height - 1); ratio->vert = clamp_t(unsigned int, ratio->vert, MIN_RESIZE_VALUE, MAX_RESIZE_VALUE); if (ratio->vert <= MID_RESIZE_VALUE) { upscaled_height = (output->height - 1) * ratio->vert + 32 * spv + 16; - input->height = (upscaled_height >> 8) + 4; + height = (upscaled_height >> 8) + 4; } else { upscaled_height = (output->height - 1) * ratio->vert + 64 * spv + 32; - input->height = (upscaled_height >> 8) + 7; + height = (upscaled_height >> 8) + 7; } /* @@ -854,20 +890,29 @@ static void resizer_calc_ratios(struct isp_res_device *res, max_width & ~(width_alignment - 1)); output->width = ALIGN(output->width, width_alignment); - ratio->horz = ((input->width - 7) * 256 - 32 - 64 * sph) + ratio->horz = ((input->width - 7) * 256 + 255 - 16 - 32 * sph) / (output->width - 1); + if (ratio->horz > MID_RESIZE_VALUE) + ratio->horz = ((input->width - 7) * 256 + 255 - 32 - 64 * sph) + / (output->width - 1); ratio->horz = clamp_t(unsigned int, ratio->horz, MIN_RESIZE_VALUE, MAX_RESIZE_VALUE); if (ratio->horz <= MID_RESIZE_VALUE) { upscaled_width = (output->width - 1) * ratio->horz + 32 * sph + 16; - input->width = (upscaled_width >> 8) + 7; + width = (upscaled_width >> 8) + 7; } else { upscaled_width = (output->width - 1) * ratio->horz + 64 * sph + 32; - input->width = (upscaled_width >> 8) + 7; + width = (upscaled_width >> 8) + 7; } + + /* Center the new crop rectangle. */ + input->left += (input->width - width) / 2; + input->top += (input->height - height) / 2; + input->width = width; + input->height = height; } /* diff --git a/trunk/drivers/media/video/omap3isp/ispstat.h b/trunk/drivers/media/video/omap3isp/ispstat.h index 820950c9ef46..d86da94fa50d 100644 --- a/trunk/drivers/media/video/omap3isp/ispstat.h +++ b/trunk/drivers/media/video/omap3isp/ispstat.h @@ -131,9 +131,9 @@ struct ispstat { struct ispstat_generic_config { /* * Fields must be in the same order as in: - * - isph3a_aewb_config - * - isph3a_af_config - * - isphist_config + * - omap3isp_h3a_aewb_config + * - omap3isp_h3a_af_config + * - omap3isp_hist_config */ u32 buf_size; u16 config_counter; diff --git a/trunk/drivers/media/video/omap3isp/ispvideo.c b/trunk/drivers/media/video/omap3isp/ispvideo.c index 208a7ec739d7..9cd8f1aa567b 100644 --- a/trunk/drivers/media/video/omap3isp/ispvideo.c +++ b/trunk/drivers/media/video/omap3isp/ispvideo.c @@ -47,29 +47,59 @@ static struct isp_format_info formats[] = { { V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8, - V4L2_MBUS_FMT_Y8_1X8, V4L2_PIX_FMT_GREY, 8, }, + V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8, + V4L2_PIX_FMT_GREY, 8, }, + { V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y10_1X10, + V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y8_1X8, + V4L2_PIX_FMT_Y10, 10, }, + { V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_Y10_1X10, + V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_Y8_1X8, + V4L2_PIX_FMT_Y12, 12, }, + { V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8, + V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8, + V4L2_PIX_FMT_SBGGR8, 8, }, + { V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8, + V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8, + V4L2_PIX_FMT_SGBRG8, 8, }, + { V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8, + V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8, + V4L2_PIX_FMT_SGRBG8, 8, }, + { V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8, + V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8, + V4L2_PIX_FMT_SRGGB8, 8, }, { V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, - V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_PIX_FMT_SGRBG10DPCM8, 8, }, + V4L2_MBUS_FMT_SGRBG10_1X10, 0, + V4L2_PIX_FMT_SGRBG10DPCM8, 8, }, { V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR10_1X10, - V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_PIX_FMT_SBGGR10, 10, }, + V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR8_1X8, + V4L2_PIX_FMT_SBGGR10, 10, }, { V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG10_1X10, - V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_PIX_FMT_SGBRG10, 10, }, + V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG8_1X8, + V4L2_PIX_FMT_SGBRG10, 10, }, { V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG10_1X10, - V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_PIX_FMT_SGRBG10, 10, }, + V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG8_1X8, + V4L2_PIX_FMT_SGRBG10, 10, }, { V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB10_1X10, - V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_PIX_FMT_SRGGB10, 10, }, + V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB8_1X8, + V4L2_PIX_FMT_SRGGB10, 10, }, { V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR10_1X10, - V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_PIX_FMT_SBGGR12, 12, }, + V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR8_1X8, + V4L2_PIX_FMT_SBGGR12, 12, }, { V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG10_1X10, - V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_PIX_FMT_SGBRG12, 12, }, + V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG8_1X8, + V4L2_PIX_FMT_SGBRG12, 12, }, { V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG10_1X10, - V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_PIX_FMT_SGRBG12, 12, }, + V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG8_1X8, + V4L2_PIX_FMT_SGRBG12, 12, }, { V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB10_1X10, - V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_PIX_FMT_SRGGB12, 12, }, + V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB8_1X8, + V4L2_PIX_FMT_SRGGB12, 12, }, { V4L2_MBUS_FMT_UYVY8_1X16, V4L2_MBUS_FMT_UYVY8_1X16, - V4L2_MBUS_FMT_UYVY8_1X16, V4L2_PIX_FMT_UYVY, 16, }, + V4L2_MBUS_FMT_UYVY8_1X16, 0, + V4L2_PIX_FMT_UYVY, 16, }, { V4L2_MBUS_FMT_YUYV8_1X16, V4L2_MBUS_FMT_YUYV8_1X16, - V4L2_MBUS_FMT_YUYV8_1X16, V4L2_PIX_FMT_YUYV, 16, }, + V4L2_MBUS_FMT_YUYV8_1X16, 0, + V4L2_PIX_FMT_YUYV, 16, }, }; const struct isp_format_info * @@ -85,6 +115,37 @@ omap3isp_video_format_info(enum v4l2_mbus_pixelcode code) return NULL; } +/* + * Decide whether desired output pixel code can be obtained with + * the lane shifter by shifting the input pixel code. + * @in: input pixelcode to shifter + * @out: output pixelcode from shifter + * @additional_shift: # of bits the sensor's LSB is offset from CAMEXT[0] + * + * return true if the combination is possible + * return false otherwise + */ +static bool isp_video_is_shiftable(enum v4l2_mbus_pixelcode in, + enum v4l2_mbus_pixelcode out, + unsigned int additional_shift) +{ + const struct isp_format_info *in_info, *out_info; + + if (in == out) + return true; + + in_info = omap3isp_video_format_info(in); + out_info = omap3isp_video_format_info(out); + + if ((in_info->flavor == 0) || (out_info->flavor == 0)) + return false; + + if (in_info->flavor != out_info->flavor) + return false; + + return in_info->bpp - out_info->bpp + additional_shift <= 6; +} + /* * isp_video_mbus_to_pix - Convert v4l2_mbus_framefmt to v4l2_pix_format * @video: ISP video instance @@ -235,6 +296,7 @@ static int isp_video_validate_pipeline(struct isp_pipeline *pipe) return -EPIPE; while (1) { + unsigned int shifter_link; /* Retrieve the sink format */ pad = &subdev->entity.pads[0]; if (!(pad->flags & MEDIA_PAD_FL_SINK)) @@ -263,6 +325,10 @@ static int isp_video_validate_pipeline(struct isp_pipeline *pipe) return -ENOSPC; } + /* If sink pad is on CCDC, the link has the lane shifter + * in the middle of it. */ + shifter_link = subdev == &isp->isp_ccdc.subdev; + /* Retrieve the source format */ pad = media_entity_remote_source(pad); if (pad == NULL || @@ -278,10 +344,24 @@ static int isp_video_validate_pipeline(struct isp_pipeline *pipe) return -EPIPE; /* Check if the two ends match */ - if (fmt_source.format.code != fmt_sink.format.code || - fmt_source.format.width != fmt_sink.format.width || + if (fmt_source.format.width != fmt_sink.format.width || fmt_source.format.height != fmt_sink.format.height) return -EPIPE; + + if (shifter_link) { + unsigned int parallel_shift = 0; + if (isp->isp_ccdc.input == CCDC_INPUT_PARALLEL) { + struct isp_parallel_platform_data *pdata = + &((struct isp_v4l2_subdevs_group *) + subdev->host_priv)->bus.parallel; + parallel_shift = pdata->data_lane_shift * 2; + } + if (!isp_video_is_shiftable(fmt_source.format.code, + fmt_sink.format.code, + parallel_shift)) + return -EPIPE; + } else if (fmt_source.format.code != fmt_sink.format.code) + return -EPIPE; } return 0; diff --git a/trunk/drivers/media/video/omap3isp/ispvideo.h b/trunk/drivers/media/video/omap3isp/ispvideo.h index 524a1acd0906..911bea64e78a 100644 --- a/trunk/drivers/media/video/omap3isp/ispvideo.h +++ b/trunk/drivers/media/video/omap3isp/ispvideo.h @@ -49,6 +49,8 @@ struct v4l2_pix_format; * bits. Identical to @code if the format is 10 bits wide or less. * @uncompressed: V4L2 media bus format code for the corresponding uncompressed * format. Identical to @code if the format is not DPCM compressed. + * @flavor: V4L2 media bus format code for the same pixel layout but + * shifted to be 8 bits per pixel. =0 if format is not shiftable. * @pixelformat: V4L2 pixel format FCC identifier * @bpp: Bits per pixel */ @@ -56,6 +58,7 @@ struct isp_format_info { enum v4l2_mbus_pixelcode code; enum v4l2_mbus_pixelcode truncated; enum v4l2_mbus_pixelcode uncompressed; + enum v4l2_mbus_pixelcode flavor; u32 pixelformat; unsigned int bpp; }; diff --git a/trunk/drivers/media/video/s5p-fimc/fimc-capture.c b/trunk/drivers/media/video/s5p-fimc/fimc-capture.c index 95f8b4e11e46..d142b40ea64e 100644 --- a/trunk/drivers/media/video/s5p-fimc/fimc-capture.c +++ b/trunk/drivers/media/video/s5p-fimc/fimc-capture.c @@ -527,7 +527,7 @@ static int fimc_cap_s_fmt_mplane(struct file *file, void *priv, if (ret) return ret; - if (vb2_is_streaming(&fimc->vid_cap.vbq) || fimc_capture_active(fimc)) + if (vb2_is_busy(&fimc->vid_cap.vbq) || fimc_capture_active(fimc)) return -EBUSY; frame = &ctx->d_frame; @@ -539,8 +539,10 @@ static int fimc_cap_s_fmt_mplane(struct file *file, void *priv, return -EINVAL; } - for (i = 0; i < frame->fmt->colplanes; i++) - frame->payload[i] = pix->plane_fmt[i].bytesperline * pix->height; + for (i = 0; i < frame->fmt->colplanes; i++) { + frame->payload[i] = + (pix->width * pix->height * frame->fmt->depth[i]) >> 3; + } /* Output DMA frame pixel size and offsets. */ frame->f_width = pix->plane_fmt[0].bytesperline * 8 diff --git a/trunk/drivers/media/video/s5p-fimc/fimc-core.c b/trunk/drivers/media/video/s5p-fimc/fimc-core.c index 6c919b38a3d8..dc91a8511af6 100644 --- a/trunk/drivers/media/video/s5p-fimc/fimc-core.c +++ b/trunk/drivers/media/video/s5p-fimc/fimc-core.c @@ -361,10 +361,20 @@ static void fimc_capture_irq_handler(struct fimc_dev *fimc) { struct fimc_vid_cap *cap = &fimc->vid_cap; struct fimc_vid_buffer *v_buf; + struct timeval *tv; + struct timespec ts; if (!list_empty(&cap->active_buf_q) && test_bit(ST_CAPT_RUN, &fimc->state)) { + ktime_get_real_ts(&ts); + v_buf = active_queue_pop(cap); + + tv = &v_buf->vb.v4l2_buf.timestamp; + tv->tv_sec = ts.tv_sec; + tv->tv_usec = ts.tv_nsec / NSEC_PER_USEC; + v_buf->vb.v4l2_buf.sequence = cap->frame_count++; + vb2_buffer_done(&v_buf->vb, VB2_BUF_STATE_DONE); } @@ -758,7 +768,7 @@ static void fimc_unlock(struct vb2_queue *vq) mutex_unlock(&ctx->fimc_dev->lock); } -struct vb2_ops fimc_qops = { +static struct vb2_ops fimc_qops = { .queue_setup = fimc_queue_setup, .buf_prepare = fimc_buf_prepare, .buf_queue = fimc_buf_queue, @@ -927,23 +937,23 @@ int fimc_vidioc_try_fmt_mplane(struct file *file, void *priv, pix->num_planes = fmt->memplanes; pix->colorspace = V4L2_COLORSPACE_JPEG; - for (i = 0; i < pix->num_planes; ++i) { - int bpl = pix->plane_fmt[i].bytesperline; - dbg("[%d] bpl: %d, depth: %d, w: %d, h: %d", - i, bpl, fmt->depth[i], pix->width, pix->height); + for (i = 0; i < pix->num_planes; ++i) { + u32 bpl = pix->plane_fmt[i].bytesperline; + u32 *sizeimage = &pix->plane_fmt[i].sizeimage; - if (!bpl || (bpl * 8 / fmt->depth[i]) > pix->width) - bpl = (pix->width * fmt->depth[0]) >> 3; + if (fmt->colplanes > 1 && (bpl == 0 || bpl < pix->width)) + bpl = pix->width; /* Planar */ - if (!pix->plane_fmt[i].sizeimage) - pix->plane_fmt[i].sizeimage = pix->height * bpl; + if (fmt->colplanes == 1 && /* Packed */ + (bpl == 0 || ((bpl * 8) / fmt->depth[i]) < pix->width)) + bpl = (pix->width * fmt->depth[0]) / 8; - pix->plane_fmt[i].bytesperline = bpl; + if (i == 0) /* Same bytesperline for each plane. */ + mod_x = bpl; - dbg("[%d]: bpl: %d, sizeimage: %d", - i, pix->plane_fmt[i].bytesperline, - pix->plane_fmt[i].sizeimage); + pix->plane_fmt[i].bytesperline = mod_x; + *sizeimage = (pix->width * pix->height * fmt->depth[i]) / 8; } return 0; @@ -965,7 +975,7 @@ static int fimc_m2m_s_fmt_mplane(struct file *file, void *priv, vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type); - if (vb2_is_streaming(vq)) { + if (vb2_is_busy(vq)) { v4l2_err(&fimc->m2m.v4l2_dev, "queue (%d) busy\n", f->type); return -EBUSY; } @@ -985,8 +995,10 @@ static int fimc_m2m_s_fmt_mplane(struct file *file, void *priv, if (!frame->fmt) return -EINVAL; - for (i = 0; i < frame->fmt->colplanes; i++) - frame->payload[i] = pix->plane_fmt[i].bytesperline * pix->height; + for (i = 0; i < frame->fmt->colplanes; i++) { + frame->payload[i] = + (pix->width * pix->height * frame->fmt->depth[i]) / 8; + } frame->f_width = pix->plane_fmt[0].bytesperline * 8 / frame->fmt->depth[0]; @@ -1750,7 +1762,7 @@ static int __devexit fimc_remove(struct platform_device *pdev) } /* Image pixel limits, similar across several FIMC HW revisions. */ -static struct fimc_pix_limit s5p_pix_limit[3] = { +static struct fimc_pix_limit s5p_pix_limit[4] = { [0] = { .scaler_en_w = 3264, .scaler_dis_w = 8192, @@ -1775,6 +1787,14 @@ static struct fimc_pix_limit s5p_pix_limit[3] = { .out_rot_en_w = 1280, .out_rot_dis_w = 1920, }, + [3] = { + .scaler_en_w = 1920, + .scaler_dis_w = 8192, + .in_rot_en_h = 1366, + .in_rot_dis_w = 8192, + .out_rot_en_w = 1366, + .out_rot_dis_w = 1920, + }, }; static struct samsung_fimc_variant fimc0_variant_s5p = { @@ -1827,7 +1847,7 @@ static struct samsung_fimc_variant fimc2_variant_s5pv210 = { .pix_limit = &s5p_pix_limit[2], }; -static struct samsung_fimc_variant fimc0_variant_s5pv310 = { +static struct samsung_fimc_variant fimc0_variant_exynos4 = { .pix_hoff = 1, .has_inp_rot = 1, .has_out_rot = 1, @@ -1840,7 +1860,7 @@ static struct samsung_fimc_variant fimc0_variant_s5pv310 = { .pix_limit = &s5p_pix_limit[1], }; -static struct samsung_fimc_variant fimc2_variant_s5pv310 = { +static struct samsung_fimc_variant fimc2_variant_exynos4 = { .pix_hoff = 1, .has_cistatus2 = 1, .has_mainscaler_ext = 1, @@ -1848,7 +1868,7 @@ static struct samsung_fimc_variant fimc2_variant_s5pv310 = { .min_out_pixsize = 16, .hor_offs_align = 1, .out_buf_count = 32, - .pix_limit = &s5p_pix_limit[2], + .pix_limit = &s5p_pix_limit[3], }; /* S5PC100 */ @@ -1874,12 +1894,12 @@ static struct samsung_fimc_driverdata fimc_drvdata_s5pv210 = { }; /* S5PV310, S5PC210 */ -static struct samsung_fimc_driverdata fimc_drvdata_s5pv310 = { +static struct samsung_fimc_driverdata fimc_drvdata_exynos4 = { .variant = { - [0] = &fimc0_variant_s5pv310, - [1] = &fimc0_variant_s5pv310, - [2] = &fimc0_variant_s5pv310, - [3] = &fimc2_variant_s5pv310, + [0] = &fimc0_variant_exynos4, + [1] = &fimc0_variant_exynos4, + [2] = &fimc0_variant_exynos4, + [3] = &fimc2_variant_exynos4, }, .num_entities = 4, .lclk_frequency = 166000000UL, @@ -1893,8 +1913,8 @@ static struct platform_device_id fimc_driver_ids[] = { .name = "s5pv210-fimc", .driver_data = (unsigned long)&fimc_drvdata_s5pv210, }, { - .name = "s5pv310-fimc", - .driver_data = (unsigned long)&fimc_drvdata_s5pv310, + .name = "exynos4-fimc", + .driver_data = (unsigned long)&fimc_drvdata_exynos4, }, {}, }; diff --git a/trunk/drivers/media/video/sh_mobile_ceu_camera.c b/trunk/drivers/media/video/sh_mobile_ceu_camera.c index 3fe54bf41142..134e86bf6d97 100644 --- a/trunk/drivers/media/video/sh_mobile_ceu_camera.c +++ b/trunk/drivers/media/video/sh_mobile_ceu_camera.c @@ -922,7 +922,7 @@ static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, unsigned int /* Try 2560x1920, 1280x960, 640x480, 320x240 */ mf.width = 2560 >> shift; mf.height = 1920 >> shift; - ret = v4l2_device_call_until_err(sd->v4l2_dev, 0, video, + ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video, s_mbus_fmt, &mf); if (ret < 0) return ret; @@ -1224,7 +1224,7 @@ static int client_s_fmt(struct soc_camera_device *icd, struct v4l2_cropcap cap; int ret; - ret = v4l2_device_call_until_err(sd->v4l2_dev, 0, video, + ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video, s_mbus_fmt, mf); if (ret < 0) return ret; @@ -1254,7 +1254,7 @@ static int client_s_fmt(struct soc_camera_device *icd, tmp_h = min(2 * tmp_h, max_height); mf->width = tmp_w; mf->height = tmp_h; - ret = v4l2_device_call_until_err(sd->v4l2_dev, 0, video, + ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video, s_mbus_fmt, mf); dev_geo(dev, "Camera scaled to %ux%u\n", mf->width, mf->height); @@ -1658,7 +1658,7 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd, mf.code = xlate->code; mf.colorspace = pix->colorspace; - ret = v4l2_device_call_until_err(sd->v4l2_dev, 0, video, try_mbus_fmt, &mf); + ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video, try_mbus_fmt, &mf); if (ret < 0) return ret; @@ -1682,7 +1682,7 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd, */ mf.width = 2560; mf.height = 1920; - ret = v4l2_device_call_until_err(sd->v4l2_dev, 0, video, + ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video, try_mbus_fmt, &mf); if (ret < 0) { /* Shouldn't actually happen... */ diff --git a/trunk/drivers/media/video/sh_mobile_csi2.c b/trunk/drivers/media/video/sh_mobile_csi2.c index dd1b81b1442b..98b87481fa94 100644 --- a/trunk/drivers/media/video/sh_mobile_csi2.c +++ b/trunk/drivers/media/video/sh_mobile_csi2.c @@ -38,6 +38,8 @@ struct sh_csi2 { void __iomem *base; struct platform_device *pdev; struct sh_csi2_client_config *client; + unsigned long (*query_bus_param)(struct soc_camera_device *); + int (*set_bus_param)(struct soc_camera_device *, unsigned long); }; static int sh_csi2_try_fmt(struct v4l2_subdev *sd, @@ -208,6 +210,7 @@ static int sh_csi2_notify(struct notifier_block *nb, case BUS_NOTIFY_BOUND_DRIVER: snprintf(priv->subdev.name, V4L2_SUBDEV_NAME_SIZE, "%s%s", dev_name(v4l2_dev->dev), ".mipi-csi"); + priv->subdev.grp_id = (long)icd; ret = v4l2_device_register_subdev(v4l2_dev, &priv->subdev); dev_dbg(dev, "%s(%p): ret(register_subdev) = %d\n", __func__, priv, ret); if (ret < 0) @@ -215,6 +218,8 @@ static int sh_csi2_notify(struct notifier_block *nb, priv->client = pdata->clients + i; + priv->set_bus_param = icd->ops->set_bus_param; + priv->query_bus_param = icd->ops->query_bus_param; icd->ops->set_bus_param = sh_csi2_set_bus_param; icd->ops->query_bus_param = sh_csi2_query_bus_param; @@ -226,8 +231,10 @@ static int sh_csi2_notify(struct notifier_block *nb, priv->client = NULL; /* Driver is about to be unbound */ - icd->ops->set_bus_param = NULL; - icd->ops->query_bus_param = NULL; + icd->ops->set_bus_param = priv->set_bus_param; + icd->ops->query_bus_param = priv->query_bus_param; + priv->set_bus_param = NULL; + priv->query_bus_param = NULL; v4l2_device_unregister_subdev(&priv->subdev); diff --git a/trunk/drivers/media/video/soc_camera.c b/trunk/drivers/media/video/soc_camera.c index 46284489e4eb..ddb4c091dedc 100644 --- a/trunk/drivers/media/video/soc_camera.c +++ b/trunk/drivers/media/video/soc_camera.c @@ -136,11 +136,50 @@ unsigned long soc_camera_apply_sensor_flags(struct soc_camera_link *icl, } EXPORT_SYMBOL(soc_camera_apply_sensor_flags); +#define pixfmtstr(x) (x) & 0xff, ((x) >> 8) & 0xff, ((x) >> 16) & 0xff, \ + ((x) >> 24) & 0xff + +static int soc_camera_try_fmt(struct soc_camera_device *icd, + struct v4l2_format *f) +{ + struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); + struct v4l2_pix_format *pix = &f->fmt.pix; + int ret; + + dev_dbg(&icd->dev, "TRY_FMT(%c%c%c%c, %ux%u)\n", + pixfmtstr(pix->pixelformat), pix->width, pix->height); + + pix->bytesperline = 0; + pix->sizeimage = 0; + + ret = ici->ops->try_fmt(icd, f); + if (ret < 0) + return ret; + + if (!pix->sizeimage) { + if (!pix->bytesperline) { + const struct soc_camera_format_xlate *xlate; + + xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat); + if (!xlate) + return -EINVAL; + + ret = soc_mbus_bytes_per_line(pix->width, + xlate->host_fmt); + if (ret > 0) + pix->bytesperline = ret; + } + if (pix->bytesperline) + pix->sizeimage = pix->bytesperline * pix->height; + } + + return 0; +} + static int soc_camera_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct soc_camera_device *icd = file->private_data; - struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); WARN_ON(priv != file->private_data); @@ -149,7 +188,7 @@ static int soc_camera_try_fmt_vid_cap(struct file *file, void *priv, return -EINVAL; /* limit format to hardware capabilities */ - return ici->ops->try_fmt(icd, f); + return soc_camera_try_fmt(icd, f); } static int soc_camera_enum_input(struct file *file, void *priv, @@ -362,9 +401,6 @@ static void soc_camera_free_user_formats(struct soc_camera_device *icd) icd->user_formats = NULL; } -#define pixfmtstr(x) (x) & 0xff, ((x) >> 8) & 0xff, ((x) >> 16) & 0xff, \ - ((x) >> 24) & 0xff - /* Called with .vb_lock held, or from the first open(2), see comment there */ static int soc_camera_set_fmt(struct soc_camera_device *icd, struct v4l2_format *f) @@ -377,7 +413,7 @@ static int soc_camera_set_fmt(struct soc_camera_device *icd, pixfmtstr(pix->pixelformat), pix->width, pix->height); /* We always call try_fmt() before set_fmt() or set_crop() */ - ret = ici->ops->try_fmt(icd, f); + ret = soc_camera_try_fmt(icd, f); if (ret < 0) return ret; @@ -996,10 +1032,11 @@ static void soc_camera_free_i2c(struct soc_camera_device *icd) { struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd)); + struct i2c_adapter *adap = client->adapter; dev_set_drvdata(&icd->dev, NULL); v4l2_device_unregister_subdev(i2c_get_clientdata(client)); i2c_unregister_device(client); - i2c_put_adapter(client->adapter); + i2c_put_adapter(adap); } #else #define soc_camera_init_i2c(icd, icl) (-ENODEV) @@ -1071,6 +1108,9 @@ static int soc_camera_probe(struct device *dev) } } + sd = soc_camera_to_subdev(icd); + sd->grp_id = (long)icd; + /* At this point client .probe() should have run already */ ret = soc_camera_init_user_formats(icd); if (ret < 0) @@ -1092,7 +1132,6 @@ static int soc_camera_probe(struct device *dev) goto evidstart; /* Try to improve our guess of a reasonable window format */ - sd = soc_camera_to_subdev(icd); if (!v4l2_subdev_call(sd, video, g_mbus_fmt, &mf)) { icd->user_width = mf.width; icd->user_height = mf.height; diff --git a/trunk/drivers/media/video/tda9840.c b/trunk/drivers/media/video/tda9840.c index 5d4cf3b3d435..22fa8202d5ca 100644 --- a/trunk/drivers/media/video/tda9840.c +++ b/trunk/drivers/media/video/tda9840.c @@ -171,7 +171,7 @@ static int tda9840_probe(struct i2c_client *client, v4l_info(client, "chip found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); - sd = kmalloc(sizeof(struct v4l2_subdev), GFP_KERNEL); + sd = kzalloc(sizeof(struct v4l2_subdev), GFP_KERNEL); if (sd == NULL) return -ENOMEM; v4l2_i2c_subdev_init(sd, client, &tda9840_ops); diff --git a/trunk/drivers/media/video/tea6415c.c b/trunk/drivers/media/video/tea6415c.c index 19621ed523ec..827425c5b866 100644 --- a/trunk/drivers/media/video/tea6415c.c +++ b/trunk/drivers/media/video/tea6415c.c @@ -152,7 +152,7 @@ static int tea6415c_probe(struct i2c_client *client, v4l_info(client, "chip found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); - sd = kmalloc(sizeof(struct v4l2_subdev), GFP_KERNEL); + sd = kzalloc(sizeof(struct v4l2_subdev), GFP_KERNEL); if (sd == NULL) return -ENOMEM; v4l2_i2c_subdev_init(sd, client, &tea6415c_ops); diff --git a/trunk/drivers/media/video/tea6420.c b/trunk/drivers/media/video/tea6420.c index 5ea840401f21..f350b6c24500 100644 --- a/trunk/drivers/media/video/tea6420.c +++ b/trunk/drivers/media/video/tea6420.c @@ -125,7 +125,7 @@ static int tea6420_probe(struct i2c_client *client, v4l_info(client, "chip found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); - sd = kmalloc(sizeof(struct v4l2_subdev), GFP_KERNEL); + sd = kzalloc(sizeof(struct v4l2_subdev), GFP_KERNEL); if (sd == NULL) return -ENOMEM; v4l2_i2c_subdev_init(sd, client, &tea6420_ops); diff --git a/trunk/drivers/media/video/upd64031a.c b/trunk/drivers/media/video/upd64031a.c index f8138c75be8b..1aab96a88203 100644 --- a/trunk/drivers/media/video/upd64031a.c +++ b/trunk/drivers/media/video/upd64031a.c @@ -230,7 +230,7 @@ static int upd64031a_probe(struct i2c_client *client, v4l_info(client, "chip found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); - state = kmalloc(sizeof(struct upd64031a_state), GFP_KERNEL); + state = kzalloc(sizeof(struct upd64031a_state), GFP_KERNEL); if (state == NULL) return -ENOMEM; sd = &state->sd; diff --git a/trunk/drivers/media/video/upd64083.c b/trunk/drivers/media/video/upd64083.c index 28e0e6b6ca84..9bbe61700fd5 100644 --- a/trunk/drivers/media/video/upd64083.c +++ b/trunk/drivers/media/video/upd64083.c @@ -202,7 +202,7 @@ static int upd64083_probe(struct i2c_client *client, v4l_info(client, "chip found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); - state = kmalloc(sizeof(struct upd64083_state), GFP_KERNEL); + state = kzalloc(sizeof(struct upd64083_state), GFP_KERNEL); if (state == NULL) return -ENOMEM; sd = &state->sd; diff --git a/trunk/drivers/media/video/v4l2-dev.c b/trunk/drivers/media/video/v4l2-dev.c index 498e6742579e..6dc7196296b3 100644 --- a/trunk/drivers/media/video/v4l2-dev.c +++ b/trunk/drivers/media/video/v4l2-dev.c @@ -389,7 +389,8 @@ static int v4l2_open(struct inode *inode, struct file *filp) video_get(vdev); mutex_unlock(&videodev_lock); #if defined(CONFIG_MEDIA_CONTROLLER) - if (vdev->v4l2_dev && vdev->v4l2_dev->mdev) { + if (vdev->v4l2_dev && vdev->v4l2_dev->mdev && + vdev->vfl_type != VFL_TYPE_SUBDEV) { entity = media_entity_get(&vdev->entity); if (!entity) { ret = -EBUSY; @@ -415,7 +416,8 @@ static int v4l2_open(struct inode *inode, struct file *filp) /* decrease the refcount in case of an error */ if (ret) { #if defined(CONFIG_MEDIA_CONTROLLER) - if (vdev->v4l2_dev && vdev->v4l2_dev->mdev) + if (vdev->v4l2_dev && vdev->v4l2_dev->mdev && + vdev->vfl_type != VFL_TYPE_SUBDEV) media_entity_put(entity); #endif video_put(vdev); @@ -437,7 +439,8 @@ static int v4l2_release(struct inode *inode, struct file *filp) mutex_unlock(vdev->lock); } #if defined(CONFIG_MEDIA_CONTROLLER) - if (vdev->v4l2_dev && vdev->v4l2_dev->mdev) + if (vdev->v4l2_dev && vdev->v4l2_dev->mdev && + vdev->vfl_type != VFL_TYPE_SUBDEV) media_entity_put(&vdev->entity); #endif /* decrease the refcount unconditionally since the release() @@ -686,7 +689,8 @@ int __video_register_device(struct video_device *vdev, int type, int nr, #if defined(CONFIG_MEDIA_CONTROLLER) /* Part 5: Register the entity. */ - if (vdev->v4l2_dev && vdev->v4l2_dev->mdev) { + if (vdev->v4l2_dev && vdev->v4l2_dev->mdev && + vdev->vfl_type != VFL_TYPE_SUBDEV) { vdev->entity.type = MEDIA_ENT_T_DEVNODE_V4L; vdev->entity.name = vdev->name; vdev->entity.v4l.major = VIDEO_MAJOR; @@ -733,7 +737,8 @@ void video_unregister_device(struct video_device *vdev) return; #if defined(CONFIG_MEDIA_CONTROLLER) - if (vdev->v4l2_dev && vdev->v4l2_dev->mdev) + if (vdev->v4l2_dev && vdev->v4l2_dev->mdev && + vdev->vfl_type != VFL_TYPE_SUBDEV) media_device_unregister_entity(&vdev->entity); #endif diff --git a/trunk/drivers/media/video/v4l2-device.c b/trunk/drivers/media/video/v4l2-device.c index 5aeaf876ba9b..4aae501f02d0 100644 --- a/trunk/drivers/media/video/v4l2-device.c +++ b/trunk/drivers/media/video/v4l2-device.c @@ -155,8 +155,10 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev, sd->v4l2_dev = v4l2_dev; if (sd->internal_ops && sd->internal_ops->registered) { err = sd->internal_ops->registered(sd); - if (err) + if (err) { + module_put(sd->owner); return err; + } } /* This just returns 0 if either of the two args is NULL */ @@ -164,6 +166,7 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev, if (err) { if (sd->internal_ops && sd->internal_ops->unregistered) sd->internal_ops->unregistered(sd); + module_put(sd->owner); return err; } diff --git a/trunk/drivers/media/video/v4l2-subdev.c b/trunk/drivers/media/video/v4l2-subdev.c index 0b8064490676..812729ebf09e 100644 --- a/trunk/drivers/media/video/v4l2-subdev.c +++ b/trunk/drivers/media/video/v4l2-subdev.c @@ -155,25 +155,25 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg) switch (cmd) { case VIDIOC_QUERYCTRL: - return v4l2_subdev_queryctrl(sd, arg); + return v4l2_queryctrl(sd->ctrl_handler, arg); case VIDIOC_QUERYMENU: - return v4l2_subdev_querymenu(sd, arg); + return v4l2_querymenu(sd->ctrl_handler, arg); case VIDIOC_G_CTRL: - return v4l2_subdev_g_ctrl(sd, arg); + return v4l2_g_ctrl(sd->ctrl_handler, arg); case VIDIOC_S_CTRL: - return v4l2_subdev_s_ctrl(sd, arg); + return v4l2_s_ctrl(sd->ctrl_handler, arg); case VIDIOC_G_EXT_CTRLS: - return v4l2_subdev_g_ext_ctrls(sd, arg); + return v4l2_g_ext_ctrls(sd->ctrl_handler, arg); case VIDIOC_S_EXT_CTRLS: - return v4l2_subdev_s_ext_ctrls(sd, arg); + return v4l2_s_ext_ctrls(sd->ctrl_handler, arg); case VIDIOC_TRY_EXT_CTRLS: - return v4l2_subdev_try_ext_ctrls(sd, arg); + return v4l2_try_ext_ctrls(sd->ctrl_handler, arg); case VIDIOC_DQEVENT: if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS)) diff --git a/trunk/drivers/media/video/videobuf-dma-contig.c b/trunk/drivers/media/video/videobuf-dma-contig.c index c4742fc15529..c9691115f2d2 100644 --- a/trunk/drivers/media/video/videobuf-dma-contig.c +++ b/trunk/drivers/media/video/videobuf-dma-contig.c @@ -300,7 +300,7 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q, vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); retval = remap_pfn_range(vma, vma->vm_start, - PFN_DOWN(virt_to_phys(mem->vaddr)), + mem->dma_handle >> PAGE_SHIFT, size, vma->vm_page_prot); if (retval) { dev_err(q->dev, "mmap: remap failed with error %d. ", retval); diff --git a/trunk/drivers/media/video/videobuf2-core.c b/trunk/drivers/media/video/videobuf2-core.c index 6698c77e0f64..6ba1461d51ef 100644 --- a/trunk/drivers/media/video/videobuf2-core.c +++ b/trunk/drivers/media/video/videobuf2-core.c @@ -37,6 +37,9 @@ module_param(debug, int, 0644); #define call_qop(q, op, args...) \ (((q)->ops->op) ? ((q)->ops->op(args)) : 0) +#define V4L2_BUFFER_STATE_FLAGS (V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \ + V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR) + /** * __vb2_buf_mem_alloc() - allocate video memory for the given buffer */ @@ -51,7 +54,7 @@ static int __vb2_buf_mem_alloc(struct vb2_buffer *vb, for (plane = 0; plane < vb->num_planes; ++plane) { mem_priv = call_memop(q, plane, alloc, q->alloc_ctx[plane], plane_sizes[plane]); - if (!mem_priv) + if (IS_ERR_OR_NULL(mem_priv)) goto free; /* Associate allocator private data with this plane */ @@ -284,7 +287,7 @@ static int __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b) struct vb2_queue *q = vb->vb2_queue; int ret = 0; - /* Copy back data such as timestamp, input, etc. */ + /* Copy back data such as timestamp, flags, input, etc. */ memcpy(b, &vb->v4l2_buf, offsetof(struct v4l2_buffer, m)); b->input = vb->v4l2_buf.input; b->reserved = vb->v4l2_buf.reserved; @@ -313,7 +316,10 @@ static int __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b) b->m.userptr = vb->v4l2_planes[0].m.userptr; } - b->flags = 0; + /* + * Clear any buffer state related flags. + */ + b->flags &= ~V4L2_BUFFER_STATE_FLAGS; switch (vb->state) { case VB2_BUF_STATE_QUEUED: @@ -519,6 +525,7 @@ int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req) num_buffers = min_t(unsigned int, req->count, VIDEO_MAX_FRAME); memset(plane_sizes, 0, sizeof(plane_sizes)); memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx)); + q->memory = req->memory; /* * Ask the driver how many buffers and planes per buffer it requires. @@ -560,8 +567,6 @@ int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req) ret = num_buffers; } - q->memory = req->memory; - /* * Return the number of successfully allocated buffers * to the userspace. @@ -715,6 +720,8 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b, vb->v4l2_buf.field = b->field; vb->v4l2_buf.timestamp = b->timestamp; + vb->v4l2_buf.input = b->input; + vb->v4l2_buf.flags = b->flags & ~V4L2_BUFFER_STATE_FLAGS; return 0; } diff --git a/trunk/drivers/media/video/videobuf2-dma-contig.c b/trunk/drivers/media/video/videobuf2-dma-contig.c index 58205d596138..a790a5f8c06f 100644 --- a/trunk/drivers/media/video/videobuf2-dma-contig.c +++ b/trunk/drivers/media/video/videobuf2-dma-contig.c @@ -46,7 +46,7 @@ static void *vb2_dma_contig_alloc(void *alloc_ctx, unsigned long size) GFP_KERNEL); if (!buf->vaddr) { dev_err(conf->dev, "dma_alloc_coherent of size %ld failed\n", - buf->size); + size); kfree(buf); return ERR_PTR(-ENOMEM); } diff --git a/trunk/drivers/message/fusion/mptbase.h b/trunk/drivers/message/fusion/mptbase.h index 1735c84ff757..fe902338539b 100644 --- a/trunk/drivers/message/fusion/mptbase.h +++ b/trunk/drivers/message/fusion/mptbase.h @@ -76,8 +76,8 @@ #define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR #endif -#define MPT_LINUX_VERSION_COMMON "3.04.18" -#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.18" +#define MPT_LINUX_VERSION_COMMON "3.04.19" +#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.19" #define WHAT_MAGIC_STRING "@" "(" "#" ")" #define show_mptmod_ver(s,ver) \ diff --git a/trunk/drivers/message/fusion/mptsas.c b/trunk/drivers/message/fusion/mptsas.c index 66f94125de4e..7596aecd5072 100644 --- a/trunk/drivers/message/fusion/mptsas.c +++ b/trunk/drivers/message/fusion/mptsas.c @@ -5012,7 +5012,6 @@ mptsas_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *reply) (ioc_stat & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)) { VirtTarget *vtarget = NULL; u8 id, channel; - u32 log_info = le32_to_cpu(reply->IOCLogInfo); id = sas_event_data->TargetID; channel = sas_event_data->Bus; @@ -5023,7 +5022,8 @@ mptsas_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *reply) "LogInfo (0x%x) available for " "INTERNAL_DEVICE_RESET" "fw_id %d fw_channel %d\n", ioc->name, - log_info, id, channel)); + le32_to_cpu(reply->IOCLogInfo), + id, channel)); if (vtarget->raidVolume) { devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Skipping Raid Volume for inDMD\n", diff --git a/trunk/drivers/message/fusion/mptscsih.c b/trunk/drivers/message/fusion/mptscsih.c index 0d9b82a44540..a1d4ee6671be 100644 --- a/trunk/drivers/message/fusion/mptscsih.c +++ b/trunk/drivers/message/fusion/mptscsih.c @@ -1415,11 +1415,8 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "qcmd: SCpnt=%p, done()=%p\n", ioc->name, SCpnt, done)); - if (ioc->taskmgmt_quiesce_io) { - dtmprintk(ioc, printk(MYIOC_s_WARN_FMT "qcmd: SCpnt=%p timeout + 60HZ\n", - ioc->name, SCpnt)); + if (ioc->taskmgmt_quiesce_io) return SCSI_MLQUEUE_HOST_BUSY; - } /* * Put together a MPT SCSI request... @@ -1773,7 +1770,6 @@ mptscsih_abort(struct scsi_cmnd * SCpnt) int scpnt_idx; int retval; VirtDevice *vdevice; - ulong sn = SCpnt->serial_number; MPT_ADAPTER *ioc; /* If we can't locate our host adapter structure, return FAILED status. @@ -1859,8 +1855,7 @@ mptscsih_abort(struct scsi_cmnd * SCpnt) vdevice->vtarget->id, vdevice->lun, ctx2abort, mptscsih_get_tm_timeout(ioc)); - if (SCPNT_TO_LOOKUP_IDX(ioc, SCpnt) == scpnt_idx && - SCpnt->serial_number == sn) { + if (SCPNT_TO_LOOKUP_IDX(ioc, SCpnt) == scpnt_idx) { dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "task abort: command still in active list! (sc=%p)\n", ioc->name, SCpnt)); @@ -1873,9 +1868,9 @@ mptscsih_abort(struct scsi_cmnd * SCpnt) } out: - printk(MYIOC_s_INFO_FMT "task abort: %s (rv=%04x) (sc=%p) (sn=%ld)\n", + printk(MYIOC_s_INFO_FMT "task abort: %s (rv=%04x) (sc=%p)\n", ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), retval, - SCpnt, SCpnt->serial_number); + SCpnt); return retval; } diff --git a/trunk/drivers/message/fusion/mptspi.c b/trunk/drivers/message/fusion/mptspi.c index 6d9568d2ec59..8f61ba6aac23 100644 --- a/trunk/drivers/message/fusion/mptspi.c +++ b/trunk/drivers/message/fusion/mptspi.c @@ -867,6 +867,10 @@ static int mptspi_write_spi_device_pg1(struct scsi_target *starget, struct _x_config_parms cfg; struct _CONFIG_PAGE_HEADER hdr; int err = -EBUSY; + u32 nego_parms; + u32 period; + struct scsi_device *sdev; + int i; /* don't allow updating nego parameters on RAID devices */ if (starget->channel == 0 && @@ -904,6 +908,24 @@ static int mptspi_write_spi_device_pg1(struct scsi_target *starget, pg1->Header.PageNumber = hdr.PageNumber; pg1->Header.PageType = hdr.PageType; + nego_parms = le32_to_cpu(pg1->RequestedParameters); + period = (nego_parms & MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK) >> + MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD; + if (period == 8) { + /* Turn on inline data padding for TAPE when running U320 */ + for (i = 0 ; i < 16; i++) { + sdev = scsi_device_lookup_by_target(starget, i); + if (sdev && sdev->type == TYPE_TAPE) { + sdev_printk(KERN_DEBUG, sdev, MYIOC_s_FMT + "IDP:ON\n", ioc->name); + nego_parms |= MPI_SCSIDEVPAGE1_RP_IDP; + pg1->RequestedParameters = + cpu_to_le32(nego_parms); + break; + } + } + } + mptspi_print_write_nego(hd, starget, le32_to_cpu(pg1->RequestedParameters)); if (mpt_config(ioc, &cfg)) { diff --git a/trunk/drivers/message/i2o/i2o_block.c b/trunk/drivers/message/i2o/i2o_block.c index 643ad52e3ca2..4796bbf0ae4e 100644 --- a/trunk/drivers/message/i2o/i2o_block.c +++ b/trunk/drivers/message/i2o/i2o_block.c @@ -1000,7 +1000,6 @@ static struct i2o_block_device *i2o_block_device_alloc(void) gd->major = I2O_MAJOR; gd->queue = queue; gd->fops = &i2o_block_fops; - gd->events = DISK_EVENT_MEDIA_CHANGE; gd->private_data = dev; dev->gd = gd; diff --git a/trunk/drivers/message/i2o/i2o_scsi.c b/trunk/drivers/message/i2o/i2o_scsi.c index f003957e8e1c..74fbe56321ff 100644 --- a/trunk/drivers/message/i2o/i2o_scsi.c +++ b/trunk/drivers/message/i2o/i2o_scsi.c @@ -361,7 +361,7 @@ static int i2o_scsi_reply(struct i2o_controller *c, u32 m, */ error = le32_to_cpu(msg->body[0]); - osm_debug("Completed %ld\n", cmd->serial_number); + osm_debug("Completed %0x%p\n", cmd); cmd->result = error & 0xff; /* @@ -678,7 +678,7 @@ static int i2o_scsi_queuecommand_lck(struct scsi_cmnd *SCpnt, /* Queue the message */ i2o_msg_post(c, msg); - osm_debug("Issued %ld\n", SCpnt->serial_number); + osm_debug("Issued %0x%p\n", SCpnt); return 0; diff --git a/trunk/drivers/mfd/asic3.c b/trunk/drivers/mfd/asic3.c index d4a851c6b5bf..0b4d5b23bec9 100644 --- a/trunk/drivers/mfd/asic3.c +++ b/trunk/drivers/mfd/asic3.c @@ -144,7 +144,7 @@ static void asic3_irq_demux(unsigned int irq, struct irq_desc *desc) int iter, i; unsigned long flags; - data->chip->irq_ack(irq_data); + data->chip->irq_ack(data); for (iter = 0 ; iter < MAX_ASIC_ISR_LOOPS; iter++) { u32 status; diff --git a/trunk/drivers/mfd/mfd-core.c b/trunk/drivers/mfd/mfd-core.c index d01574d98870..f4c8c844b913 100644 --- a/trunk/drivers/mfd/mfd-core.c +++ b/trunk/drivers/mfd/mfd-core.c @@ -55,6 +55,19 @@ int mfd_cell_disable(struct platform_device *pdev) } EXPORT_SYMBOL(mfd_cell_disable); +static int mfd_platform_add_cell(struct platform_device *pdev, + const struct mfd_cell *cell) +{ + if (!cell) + return 0; + + pdev->mfd_cell = kmemdup(cell, sizeof(*cell), GFP_KERNEL); + if (!pdev->mfd_cell) + return -ENOMEM; + + return 0; +} + static int mfd_add_device(struct device *parent, int id, const struct mfd_cell *cell, struct resource *mem_base, @@ -75,7 +88,7 @@ static int mfd_add_device(struct device *parent, int id, pdev->dev.parent = parent; - ret = platform_device_add_data(pdev, cell, sizeof(*cell)); + ret = mfd_platform_add_cell(pdev, cell); if (ret) goto fail_res; @@ -123,7 +136,6 @@ static int mfd_add_device(struct device *parent, int id, return 0; -/* platform_device_del(pdev); */ fail_res: kfree(res); fail_device: diff --git a/trunk/drivers/mfd/omap-usb-host.c b/trunk/drivers/mfd/omap-usb-host.c index 53450f433f10..3ab9ffa00aad 100644 --- a/trunk/drivers/mfd/omap-usb-host.c +++ b/trunk/drivers/mfd/omap-usb-host.c @@ -25,7 +25,6 @@ #include #include #include -#include #include #define USBHS_DRIVER_NAME "usbhs-omap" @@ -700,8 +699,7 @@ static int usbhs_enable(struct device *dev) dev_dbg(dev, "starting TI HSUSB Controller\n"); if (!pdata) { dev_dbg(dev, "missing platform_data\n"); - ret = -ENODEV; - goto end_enable; + return -ENODEV; } spin_lock_irqsave(&omap->lock, flags); @@ -719,14 +717,14 @@ static int usbhs_enable(struct device *dev) gpio_request(pdata->ehci_data->reset_gpio_port[0], "USB1 PHY reset"); gpio_direction_output - (pdata->ehci_data->reset_gpio_port[0], 1); + (pdata->ehci_data->reset_gpio_port[0], 0); } if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1])) { gpio_request(pdata->ehci_data->reset_gpio_port[1], "USB2 PHY reset"); gpio_direction_output - (pdata->ehci_data->reset_gpio_port[1], 1); + (pdata->ehci_data->reset_gpio_port[1], 0); } /* Hold the PHY in RESET for enough time till DIR is high */ @@ -906,16 +904,17 @@ static int usbhs_enable(struct device *dev) if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0])) gpio_set_value - (pdata->ehci_data->reset_gpio_port[0], 0); + (pdata->ehci_data->reset_gpio_port[0], 1); if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1])) gpio_set_value - (pdata->ehci_data->reset_gpio_port[1], 0); + (pdata->ehci_data->reset_gpio_port[1], 1); } end_count: omap->count++; - goto end_enable; + spin_unlock_irqrestore(&omap->lock, flags); + return 0; err_tll: if (pdata->ehci_data->phy_reset) { @@ -931,8 +930,6 @@ static int usbhs_enable(struct device *dev) clk_disable(omap->usbhost_fs_fck); clk_disable(omap->usbhost_hs_fck); clk_disable(omap->usbhost_ick); - -end_enable: spin_unlock_irqrestore(&omap->lock, flags); return ret; } diff --git a/trunk/drivers/mfd/twl4030-power.c b/trunk/drivers/mfd/twl4030-power.c index 16422de0823a..2c0d4d16491a 100644 --- a/trunk/drivers/mfd/twl4030-power.c +++ b/trunk/drivers/mfd/twl4030-power.c @@ -447,12 +447,13 @@ static int __init load_twl4030_script(struct twl4030_script *tscript, if (err) goto out; } - if (tscript->flags & TWL4030_SLEEP_SCRIPT) + if (tscript->flags & TWL4030_SLEEP_SCRIPT) { if (order) pr_warning("TWL4030: Bad order of scripts (sleep "\ "script before wakeup) Leads to boot"\ "failure on some boards\n"); err = twl4030_config_sleep_sequence(address); + } out: return err; } diff --git a/trunk/drivers/misc/Kconfig b/trunk/drivers/misc/Kconfig index 4e007c6a4b44..d80dcdee88f3 100644 --- a/trunk/drivers/misc/Kconfig +++ b/trunk/drivers/misc/Kconfig @@ -481,5 +481,6 @@ source "drivers/misc/cb710/Kconfig" source "drivers/misc/iwmc3200top/Kconfig" source "drivers/misc/ti-st/Kconfig" source "drivers/misc/lis3lv02d/Kconfig" +source "drivers/misc/carma/Kconfig" endif # MISC_DEVICES diff --git a/trunk/drivers/misc/Makefile b/trunk/drivers/misc/Makefile index f5468602961f..848e8464faab 100644 --- a/trunk/drivers/misc/Makefile +++ b/trunk/drivers/misc/Makefile @@ -44,3 +44,4 @@ obj-$(CONFIG_PCH_PHUB) += pch_phub.o obj-y += ti-st/ obj-$(CONFIG_AB8500_PWM) += ab8500-pwm.o obj-y += lis3lv02d/ +obj-y += carma/ diff --git a/trunk/drivers/misc/carma/Kconfig b/trunk/drivers/misc/carma/Kconfig new file mode 100644 index 000000000000..c90370ed712b --- /dev/null +++ b/trunk/drivers/misc/carma/Kconfig @@ -0,0 +1,17 @@ +config CARMA_FPGA + tristate "CARMA DATA-FPGA Access Driver" + depends on FSL_SOC && PPC_83xx && MEDIA_SUPPORT && HAS_DMA && FSL_DMA + select VIDEOBUF_DMA_SG + default n + help + Say Y here to include support for communicating with the data + processing FPGAs on the OVRO CARMA board. + +config CARMA_FPGA_PROGRAM + tristate "CARMA DATA-FPGA Programmer" + depends on FSL_SOC && PPC_83xx && MEDIA_SUPPORT && HAS_DMA && FSL_DMA + select VIDEOBUF_DMA_SG + default n + help + Say Y here to include support for programming the data processing + FPGAs on the OVRO CARMA board. diff --git a/trunk/drivers/misc/carma/Makefile b/trunk/drivers/misc/carma/Makefile new file mode 100644 index 000000000000..ff36ac2ce534 --- /dev/null +++ b/trunk/drivers/misc/carma/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_CARMA_FPGA) += carma-fpga.o +obj-$(CONFIG_CARMA_FPGA_PROGRAM) += carma-fpga-program.o diff --git a/trunk/drivers/misc/carma/carma-fpga-program.c b/trunk/drivers/misc/carma/carma-fpga-program.c new file mode 100644 index 000000000000..7ce6065dc20e --- /dev/null +++ b/trunk/drivers/misc/carma/carma-fpga-program.c @@ -0,0 +1,1141 @@ +/* + * CARMA Board DATA-FPGA Programmer + * + * Copyright (c) 2009-2011 Ira W. Snyder + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +/* MPC8349EMDS specific get_immrbase() */ +#include + +static const char drv_name[] = "carma-fpga-program"; + +/* + * Firmware images are always this exact size + * + * 12849552 bytes for a CARMA Digitizer Board (EP2S90 FPGAs) + * 18662880 bytes for a CARMA Correlator Board (EP2S130 FPGAs) + */ +#define FW_SIZE_EP2S90 12849552 +#define FW_SIZE_EP2S130 18662880 + +struct fpga_dev { + struct miscdevice miscdev; + + /* Reference count */ + struct kref ref; + + /* Device Registers */ + struct device *dev; + void __iomem *regs; + void __iomem *immr; + + /* Freescale DMA Device */ + struct dma_chan *chan; + + /* Interrupts */ + int irq, status; + struct completion completion; + + /* FPGA Bitfile */ + struct mutex lock; + + struct videobuf_dmabuf vb; + bool vb_allocated; + + /* max size and written bytes */ + size_t fw_size; + size_t bytes; +}; + +/* + * FPGA Bitfile Helpers + */ + +/** + * fpga_drop_firmware_data() - drop the bitfile image from memory + * @priv: the driver's private data structure + * + * LOCKING: must hold priv->lock + */ +static void fpga_drop_firmware_data(struct fpga_dev *priv) +{ + videobuf_dma_free(&priv->vb); + priv->vb_allocated = false; + priv->bytes = 0; +} + +/* + * Private Data Reference Count + */ + +static void fpga_dev_remove(struct kref *ref) +{ + struct fpga_dev *priv = container_of(ref, struct fpga_dev, ref); + + /* free any firmware image that was not programmed */ + fpga_drop_firmware_data(priv); + + mutex_destroy(&priv->lock); + kfree(priv); +} + +/* + * LED Trigger (could be a seperate module) + */ + +/* + * NOTE: this whole thing does have the problem that whenever the led's are + * NOTE: first set to use the fpga trigger, they could be in the wrong state + */ + +DEFINE_LED_TRIGGER(ledtrig_fpga); + +static void ledtrig_fpga_programmed(bool enabled) +{ + if (enabled) + led_trigger_event(ledtrig_fpga, LED_FULL); + else + led_trigger_event(ledtrig_fpga, LED_OFF); +} + +/* + * FPGA Register Helpers + */ + +/* Register Definitions */ +#define FPGA_CONFIG_CONTROL 0x40 +#define FPGA_CONFIG_STATUS 0x44 +#define FPGA_CONFIG_FIFO_SIZE 0x48 +#define FPGA_CONFIG_FIFO_USED 0x4C +#define FPGA_CONFIG_TOTAL_BYTE_COUNT 0x50 +#define FPGA_CONFIG_CUR_BYTE_COUNT 0x54 + +#define FPGA_FIFO_ADDRESS 0x3000 + +static int fpga_fifo_size(void __iomem *regs) +{ + return ioread32be(regs + FPGA_CONFIG_FIFO_SIZE); +} + +#define CFG_STATUS_ERR_MASK 0xfffe + +static int fpga_config_error(void __iomem *regs) +{ + return ioread32be(regs + FPGA_CONFIG_STATUS) & CFG_STATUS_ERR_MASK; +} + +static int fpga_fifo_empty(void __iomem *regs) +{ + return ioread32be(regs + FPGA_CONFIG_FIFO_USED) == 0; +} + +static void fpga_fifo_write(void __iomem *regs, u32 val) +{ + iowrite32be(val, regs + FPGA_FIFO_ADDRESS); +} + +static void fpga_set_byte_count(void __iomem *regs, u32 count) +{ + iowrite32be(count, regs + FPGA_CONFIG_TOTAL_BYTE_COUNT); +} + +#define CFG_CTL_ENABLE (1 << 0) +#define CFG_CTL_RESET (1 << 1) +#define CFG_CTL_DMA (1 << 2) + +static void fpga_programmer_enable(struct fpga_dev *priv, bool dma) +{ + u32 val; + + val = (dma) ? (CFG_CTL_ENABLE | CFG_CTL_DMA) : CFG_CTL_ENABLE; + iowrite32be(val, priv->regs + FPGA_CONFIG_CONTROL); +} + +static void fpga_programmer_disable(struct fpga_dev *priv) +{ + iowrite32be(0x0, priv->regs + FPGA_CONFIG_CONTROL); +} + +static void fpga_dump_registers(struct fpga_dev *priv) +{ + u32 control, status, size, used, total, curr; + + /* good status: do nothing */ + if (priv->status == 0) + return; + + /* Dump all status registers */ + control = ioread32be(priv->regs + FPGA_CONFIG_CONTROL); + status = ioread32be(priv->regs + FPGA_CONFIG_STATUS); + size = ioread32be(priv->regs + FPGA_CONFIG_FIFO_SIZE); + used = ioread32be(priv->regs + FPGA_CONFIG_FIFO_USED); + total = ioread32be(priv->regs + FPGA_CONFIG_TOTAL_BYTE_COUNT); + curr = ioread32be(priv->regs + FPGA_CONFIG_CUR_BYTE_COUNT); + + dev_err(priv->dev, "Configuration failed, dumping status registers\n"); + dev_err(priv->dev, "Control: 0x%.8x\n", control); + dev_err(priv->dev, "Status: 0x%.8x\n", status); + dev_err(priv->dev, "FIFO Size: 0x%.8x\n", size); + dev_err(priv->dev, "FIFO Used: 0x%.8x\n", used); + dev_err(priv->dev, "FIFO Total: 0x%.8x\n", total); + dev_err(priv->dev, "FIFO Curr: 0x%.8x\n", curr); +} + +/* + * FPGA Power Supply Code + */ + +#define CTL_PWR_CONTROL 0x2006 +#define CTL_PWR_STATUS 0x200A +#define CTL_PWR_FAIL 0x200B + +#define PWR_CONTROL_ENABLE 0x01 + +#define PWR_STATUS_ERROR_MASK 0x10 +#define PWR_STATUS_GOOD 0x0f + +/* + * Determine if the FPGA power is good for all supplies + */ +static bool fpga_power_good(struct fpga_dev *priv) +{ + u8 val; + + val = ioread8(priv->regs + CTL_PWR_STATUS); + if (val & PWR_STATUS_ERROR_MASK) + return false; + + return val == PWR_STATUS_GOOD; +} + +/* + * Disable the FPGA power supplies + */ +static void fpga_disable_power_supplies(struct fpga_dev *priv) +{ + unsigned long start; + u8 val; + + iowrite8(0x0, priv->regs + CTL_PWR_CONTROL); + + /* + * Wait 500ms for the power rails to discharge + * + * Without this delay, the CTL-CPLD state machine can get into a + * state where it is waiting for the power-goods to assert, but they + * never do. This only happens when enabling and disabling the + * power sequencer very rapidly. + * + * The loop below will also wait for the power goods to de-assert, + * but testing has shown that they are always disabled by the time + * the sleep completes. However, omitting the sleep and only waiting + * for the power-goods to de-assert was not sufficient to ensure + * that the power sequencer would not wedge itself. + */ + msleep(500); + + start = jiffies; + while (time_before(jiffies, start + HZ)) { + val = ioread8(priv->regs + CTL_PWR_STATUS); + if (!(val & PWR_STATUS_GOOD)) + break; + + usleep_range(5000, 10000); + } + + val = ioread8(priv->regs + CTL_PWR_STATUS); + if (val & PWR_STATUS_GOOD) { + dev_err(priv->dev, "power disable failed: " + "power goods: status 0x%.2x\n", val); + } + + if (val & PWR_STATUS_ERROR_MASK) { + dev_err(priv->dev, "power disable failed: " + "alarm bit set: status 0x%.2x\n", val); + } +} + +/** + * fpga_enable_power_supplies() - enable the DATA-FPGA power supplies + * @priv: the driver's private data structure + * + * Enable the DATA-FPGA power supplies, waiting up to 1 second for + * them to enable successfully. + * + * Returns 0 on success, -ERRNO otherwise + */ +static int fpga_enable_power_supplies(struct fpga_dev *priv) +{ + unsigned long start = jiffies; + + if (fpga_power_good(priv)) { + dev_dbg(priv->dev, "power was already good\n"); + return 0; + } + + iowrite8(PWR_CONTROL_ENABLE, priv->regs + CTL_PWR_CONTROL); + while (time_before(jiffies, start + HZ)) { + if (fpga_power_good(priv)) + return 0; + + usleep_range(5000, 10000); + } + + return fpga_power_good(priv) ? 0 : -ETIMEDOUT; +} + +/* + * Determine if the FPGA power supplies are all enabled + */ +static bool fpga_power_enabled(struct fpga_dev *priv) +{ + u8 val; + + val = ioread8(priv->regs + CTL_PWR_CONTROL); + if (val & PWR_CONTROL_ENABLE) + return true; + + return false; +} + +/* + * Determine if the FPGA's are programmed and running correctly + */ +static bool fpga_running(struct fpga_dev *priv) +{ + if (!fpga_power_good(priv)) + return false; + + /* Check the config done bit */ + return ioread32be(priv->regs + FPGA_CONFIG_STATUS) & (1 << 18); +} + +/* + * FPGA Programming Code + */ + +/** + * fpga_program_block() - put a block of data into the programmer's FIFO + * @priv: the driver's private data structure + * @buf: the data to program + * @count: the length of data to program (must be a multiple of 4 bytes) + * + * Returns 0 on success, -ERRNO otherwise + */ +static int fpga_program_block(struct fpga_dev *priv, void *buf, size_t count) +{ + u32 *data = buf; + int size = fpga_fifo_size(priv->regs); + int i, len; + unsigned long timeout; + + /* enforce correct data length for the FIFO */ + BUG_ON(count % 4 != 0); + + while (count > 0) { + + /* Get the size of the block to write (maximum is FIFO_SIZE) */ + len = min_t(size_t, count, size); + timeout = jiffies + HZ / 4; + + /* Write the block */ + for (i = 0; i < len / 4; i++) + fpga_fifo_write(priv->regs, data[i]); + + /* Update the amounts left */ + count -= len; + data += len / 4; + + /* Wait for the fifo to empty */ + while (true) { + + if (fpga_fifo_empty(priv->regs)) { + break; + } else { + dev_dbg(priv->dev, "Fifo not empty\n"); + cpu_relax(); + } + + if (fpga_config_error(priv->regs)) { + dev_err(priv->dev, "Error detected\n"); + return -EIO; + } + + if (time_after(jiffies, timeout)) { + dev_err(priv->dev, "Fifo drain timeout\n"); + return -ETIMEDOUT; + } + + usleep_range(5000, 10000); + } + } + + return 0; +} + +/** + * fpga_program_cpu() - program the DATA-FPGA's using the CPU + * @priv: the driver's private data structure + * + * This is useful when the DMA programming method fails. It is possible to + * wedge the Freescale DMA controller such that the DMA programming method + * always fails. This method has always succeeded. + * + * Returns 0 on success, -ERRNO otherwise + */ +static noinline int fpga_program_cpu(struct fpga_dev *priv) +{ + int ret; + + /* Disable the programmer */ + fpga_programmer_disable(priv); + + /* Set the total byte count */ + fpga_set_byte_count(priv->regs, priv->bytes); + dev_dbg(priv->dev, "total byte count %u bytes\n", priv->bytes); + + /* Enable the controller for programming */ + fpga_programmer_enable(priv, false); + dev_dbg(priv->dev, "enabled the controller\n"); + + /* Write each chunk of the FPGA bitfile to FPGA programmer */ + ret = fpga_program_block(priv, priv->vb.vaddr, priv->bytes); + if (ret) + goto out_disable_controller; + + /* Wait for the interrupt handler to signal that programming finished */ + ret = wait_for_completion_timeout(&priv->completion, 2 * HZ); + if (!ret) { + dev_err(priv->dev, "Timed out waiting for completion\n"); + ret = -ETIMEDOUT; + goto out_disable_controller; + } + + /* Retrieve the status from the interrupt handler */ + ret = priv->status; + +out_disable_controller: + fpga_programmer_disable(priv); + return ret; +} + +#define FIFO_DMA_ADDRESS 0xf0003000 +#define FIFO_MAX_LEN 4096 + +/** + * fpga_program_dma() - program the DATA-FPGA's using the DMA engine + * @priv: the driver's private data structure + * + * Program the DATA-FPGA's using the Freescale DMA engine. This requires that + * the engine is programmed such that the hardware DMA request lines can + * control the entire DMA transaction. The system controller FPGA then + * completely offloads the programming from the CPU. + * + * Returns 0 on success, -ERRNO otherwise + */ +static noinline int fpga_program_dma(struct fpga_dev *priv) +{ + struct videobuf_dmabuf *vb = &priv->vb; + struct dma_chan *chan = priv->chan; + struct dma_async_tx_descriptor *tx; + size_t num_pages, len, avail = 0; + struct dma_slave_config config; + struct scatterlist *sg; + struct sg_table table; + dma_cookie_t cookie; + int ret, i; + + /* Disable the programmer */ + fpga_programmer_disable(priv); + + /* Allocate a scatterlist for the DMA destination */ + num_pages = DIV_ROUND_UP(priv->bytes, FIFO_MAX_LEN); + ret = sg_alloc_table(&table, num_pages, GFP_KERNEL); + if (ret) { + dev_err(priv->dev, "Unable to allocate dst scatterlist\n"); + ret = -ENOMEM; + goto out_return; + } + + /* + * This is an ugly hack + * + * We fill in a scatterlist as if it were mapped for DMA. This is + * necessary because there exists no better structure for this + * inside the kernel code. + * + * As an added bonus, we can use the DMAEngine API for all of this, + * rather than inventing another extremely similar API. + */ + avail = priv->bytes; + for_each_sg(table.sgl, sg, num_pages, i) { + len = min_t(size_t, avail, FIFO_MAX_LEN); + sg_dma_address(sg) = FIFO_DMA_ADDRESS; + sg_dma_len(sg) = len; + + avail -= len; + } + + /* Map the buffer for DMA */ + ret = videobuf_dma_map(priv->dev, &priv->vb); + if (ret) { + dev_err(priv->dev, "Unable to map buffer for DMA\n"); + goto out_free_table; + } + + /* + * Configure the DMA channel to transfer FIFO_SIZE / 2 bytes per + * transaction, and then put it under external control + */ + memset(&config, 0, sizeof(config)); + config.direction = DMA_TO_DEVICE; + config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + config.dst_maxburst = fpga_fifo_size(priv->regs) / 2 / 4; + ret = chan->device->device_control(chan, DMA_SLAVE_CONFIG, + (unsigned long)&config); + if (ret) { + dev_err(priv->dev, "DMA slave configuration failed\n"); + goto out_dma_unmap; + } + + ret = chan->device->device_control(chan, FSLDMA_EXTERNAL_START, 1); + if (ret) { + dev_err(priv->dev, "DMA external control setup failed\n"); + goto out_dma_unmap; + } + + /* setup and submit the DMA transaction */ + tx = chan->device->device_prep_dma_sg(chan, + table.sgl, num_pages, + vb->sglist, vb->sglen, 0); + if (!tx) { + dev_err(priv->dev, "Unable to prep DMA transaction\n"); + ret = -ENOMEM; + goto out_dma_unmap; + } + + cookie = tx->tx_submit(tx); + if (dma_submit_error(cookie)) { + dev_err(priv->dev, "Unable to submit DMA transaction\n"); + ret = -ENOMEM; + goto out_dma_unmap; + } + + dma_async_memcpy_issue_pending(chan); + + /* Set the total byte count */ + fpga_set_byte_count(priv->regs, priv->bytes); + dev_dbg(priv->dev, "total byte count %u bytes\n", priv->bytes); + + /* Enable the controller for DMA programming */ + fpga_programmer_enable(priv, true); + dev_dbg(priv->dev, "enabled the controller\n"); + + /* Wait for the interrupt handler to signal that programming finished */ + ret = wait_for_completion_timeout(&priv->completion, 2 * HZ); + if (!ret) { + dev_err(priv->dev, "Timed out waiting for completion\n"); + ret = -ETIMEDOUT; + goto out_disable_controller; + } + + /* Retrieve the status from the interrupt handler */ + ret = priv->status; + +out_disable_controller: + fpga_programmer_disable(priv); +out_dma_unmap: + videobuf_dma_unmap(priv->dev, vb); +out_free_table: + sg_free_table(&table); +out_return: + return ret; +} + +/* + * Interrupt Handling + */ + +static irqreturn_t fpga_irq(int irq, void *dev_id) +{ + struct fpga_dev *priv = dev_id; + + /* Save the status */ + priv->status = fpga_config_error(priv->regs) ? -EIO : 0; + dev_dbg(priv->dev, "INTERRUPT status %d\n", priv->status); + fpga_dump_registers(priv); + + /* Disabling the programmer clears the interrupt */ + fpga_programmer_disable(priv); + + /* Notify any waiters */ + complete(&priv->completion); + + return IRQ_HANDLED; +} + +/* + * SYSFS Helpers + */ + +/** + * fpga_do_stop() - deconfigure (reset) the DATA-FPGA's + * @priv: the driver's private data structure + * + * LOCKING: must hold priv->lock + */ +static int fpga_do_stop(struct fpga_dev *priv) +{ + u32 val; + + /* Set the led to unprogrammed */ + ledtrig_fpga_programmed(false); + + /* Pulse the config line to reset the FPGA's */ + val = CFG_CTL_ENABLE | CFG_CTL_RESET; + iowrite32be(val, priv->regs + FPGA_CONFIG_CONTROL); + iowrite32be(0x0, priv->regs + FPGA_CONFIG_CONTROL); + + return 0; +} + +static noinline int fpga_do_program(struct fpga_dev *priv) +{ + int ret; + + if (priv->bytes != priv->fw_size) { + dev_err(priv->dev, "Incorrect bitfile size: got %zu bytes, " + "should be %zu bytes\n", + priv->bytes, priv->fw_size); + return -EINVAL; + } + + if (!fpga_power_enabled(priv)) { + dev_err(priv->dev, "Power not enabled\n"); + return -EINVAL; + } + + if (!fpga_power_good(priv)) { + dev_err(priv->dev, "Power not good\n"); + return -EINVAL; + } + + /* Set the LED to unprogrammed */ + ledtrig_fpga_programmed(false); + + /* Try to program the FPGA's using DMA */ + ret = fpga_program_dma(priv); + + /* If DMA failed or doesn't exist, try with CPU */ + if (ret) { + dev_warn(priv->dev, "Falling back to CPU programming\n"); + ret = fpga_program_cpu(priv); + } + + if (ret) { + dev_err(priv->dev, "Unable to program FPGA's\n"); + return ret; + } + + /* Drop the firmware bitfile from memory */ + fpga_drop_firmware_data(priv); + + dev_dbg(priv->dev, "FPGA programming successful\n"); + ledtrig_fpga_programmed(true); + + return 0; +} + +/* + * File Operations + */ + +static int fpga_open(struct inode *inode, struct file *filp) +{ + /* + * The miscdevice layer puts our struct miscdevice into the + * filp->private_data field. We use this to find our private + * data and then overwrite it with our own private structure. + */ + struct fpga_dev *priv = container_of(filp->private_data, + struct fpga_dev, miscdev); + unsigned int nr_pages; + int ret; + + /* We only allow one process at a time */ + ret = mutex_lock_interruptible(&priv->lock); + if (ret) + return ret; + + filp->private_data = priv; + kref_get(&priv->ref); + + /* Truncation: drop any existing data */ + if (filp->f_flags & O_TRUNC) + priv->bytes = 0; + + /* Check if we have already allocated a buffer */ + if (priv->vb_allocated) + return 0; + + /* Allocate a buffer to hold enough data for the bitfile */ + nr_pages = DIV_ROUND_UP(priv->fw_size, PAGE_SIZE); + ret = videobuf_dma_init_kernel(&priv->vb, DMA_TO_DEVICE, nr_pages); + if (ret) { + dev_err(priv->dev, "unable to allocate data buffer\n"); + mutex_unlock(&priv->lock); + kref_put(&priv->ref, fpga_dev_remove); + return ret; + } + + priv->vb_allocated = true; + return 0; +} + +static int fpga_release(struct inode *inode, struct file *filp) +{ + struct fpga_dev *priv = filp->private_data; + + mutex_unlock(&priv->lock); + kref_put(&priv->ref, fpga_dev_remove); + return 0; +} + +static ssize_t fpga_write(struct file *filp, const char __user *buf, + size_t count, loff_t *f_pos) +{ + struct fpga_dev *priv = filp->private_data; + + /* FPGA bitfiles have an exact size: disallow anything else */ + if (priv->bytes >= priv->fw_size) + return -ENOSPC; + + count = min_t(size_t, priv->fw_size - priv->bytes, count); + if (copy_from_user(priv->vb.vaddr + priv->bytes, buf, count)) + return -EFAULT; + + priv->bytes += count; + return count; +} + +static ssize_t fpga_read(struct file *filp, char __user *buf, size_t count, + loff_t *f_pos) +{ + struct fpga_dev *priv = filp->private_data; + + count = min_t(size_t, priv->bytes - *f_pos, count); + if (copy_to_user(buf, priv->vb.vaddr + *f_pos, count)) + return -EFAULT; + + *f_pos += count; + return count; +} + +static loff_t fpga_llseek(struct file *filp, loff_t offset, int origin) +{ + struct fpga_dev *priv = filp->private_data; + loff_t newpos; + + /* only read-only opens are allowed to seek */ + if ((filp->f_flags & O_ACCMODE) != O_RDONLY) + return -EINVAL; + + switch (origin) { + case SEEK_SET: /* seek relative to the beginning of the file */ + newpos = offset; + break; + case SEEK_CUR: /* seek relative to current position in the file */ + newpos = filp->f_pos + offset; + break; + case SEEK_END: /* seek relative to the end of the file */ + newpos = priv->fw_size - offset; + break; + default: + return -EINVAL; + } + + /* check for sanity */ + if (newpos > priv->fw_size) + return -EINVAL; + + filp->f_pos = newpos; + return newpos; +} + +static const struct file_operations fpga_fops = { + .open = fpga_open, + .release = fpga_release, + .write = fpga_write, + .read = fpga_read, + .llseek = fpga_llseek, +}; + +/* + * Device Attributes + */ + +static ssize_t pfail_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct fpga_dev *priv = dev_get_drvdata(dev); + u8 val; + + val = ioread8(priv->regs + CTL_PWR_FAIL); + return snprintf(buf, PAGE_SIZE, "0x%.2x\n", val); +} + +static ssize_t pgood_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct fpga_dev *priv = dev_get_drvdata(dev); + return snprintf(buf, PAGE_SIZE, "%d\n", fpga_power_good(priv)); +} + +static ssize_t penable_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct fpga_dev *priv = dev_get_drvdata(dev); + return snprintf(buf, PAGE_SIZE, "%d\n", fpga_power_enabled(priv)); +} + +static ssize_t penable_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fpga_dev *priv = dev_get_drvdata(dev); + unsigned long val; + int ret; + + if (strict_strtoul(buf, 0, &val)) + return -EINVAL; + + if (val) { + ret = fpga_enable_power_supplies(priv); + if (ret) + return ret; + } else { + fpga_do_stop(priv); + fpga_disable_power_supplies(priv); + } + + return count; +} + +static ssize_t program_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct fpga_dev *priv = dev_get_drvdata(dev); + return snprintf(buf, PAGE_SIZE, "%d\n", fpga_running(priv)); +} + +static ssize_t program_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fpga_dev *priv = dev_get_drvdata(dev); + unsigned long val; + int ret; + + if (strict_strtoul(buf, 0, &val)) + return -EINVAL; + + /* We can't have an image writer and be programming simultaneously */ + if (mutex_lock_interruptible(&priv->lock)) + return -ERESTARTSYS; + + /* Program or Reset the FPGA's */ + ret = val ? fpga_do_program(priv) : fpga_do_stop(priv); + if (ret) + goto out_unlock; + + /* Success */ + ret = count; + +out_unlock: + mutex_unlock(&priv->lock); + return ret; +} + +static DEVICE_ATTR(power_fail, S_IRUGO, pfail_show, NULL); +static DEVICE_ATTR(power_good, S_IRUGO, pgood_show, NULL); +static DEVICE_ATTR(power_enable, S_IRUGO | S_IWUSR, + penable_show, penable_store); + +static DEVICE_ATTR(program, S_IRUGO | S_IWUSR, + program_show, program_store); + +static struct attribute *fpga_attributes[] = { + &dev_attr_power_fail.attr, + &dev_attr_power_good.attr, + &dev_attr_power_enable.attr, + &dev_attr_program.attr, + NULL, +}; + +static const struct attribute_group fpga_attr_group = { + .attrs = fpga_attributes, +}; + +/* + * OpenFirmware Device Subsystem + */ + +#define SYS_REG_VERSION 0x00 +#define SYS_REG_GEOGRAPHIC 0x10 + +static bool dma_filter(struct dma_chan *chan, void *data) +{ + /* + * DMA Channel #0 is the only acceptable device + * + * This probably won't survive an unload/load cycle of the Freescale + * DMAEngine driver, but that won't be a problem + */ + return chan->chan_id == 0 && chan->device->dev_id == 0; +} + +static int fpga_of_remove(struct platform_device *op) +{ + struct fpga_dev *priv = dev_get_drvdata(&op->dev); + struct device *this_device = priv->miscdev.this_device; + + sysfs_remove_group(&this_device->kobj, &fpga_attr_group); + misc_deregister(&priv->miscdev); + + free_irq(priv->irq, priv); + irq_dispose_mapping(priv->irq); + + /* make sure the power supplies are off */ + fpga_disable_power_supplies(priv); + + /* unmap registers */ + iounmap(priv->immr); + iounmap(priv->regs); + + dma_release_channel(priv->chan); + + /* drop our reference to the private data structure */ + kref_put(&priv->ref, fpga_dev_remove); + return 0; +} + +/* CTL-CPLD Version Register */ +#define CTL_CPLD_VERSION 0x2000 + +static int fpga_of_probe(struct platform_device *op, + const struct of_device_id *match) +{ + struct device_node *of_node = op->dev.of_node; + struct device *this_device; + struct fpga_dev *priv; + dma_cap_mask_t mask; + u32 ver; + int ret; + + /* Allocate private data */ + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) { + dev_err(&op->dev, "Unable to allocate private data\n"); + ret = -ENOMEM; + goto out_return; + } + + /* Setup the miscdevice */ + priv->miscdev.minor = MISC_DYNAMIC_MINOR; + priv->miscdev.name = drv_name; + priv->miscdev.fops = &fpga_fops; + + kref_init(&priv->ref); + + dev_set_drvdata(&op->dev, priv); + priv->dev = &op->dev; + mutex_init(&priv->lock); + init_completion(&priv->completion); + videobuf_dma_init(&priv->vb); + + dev_set_drvdata(priv->dev, priv); + dma_cap_zero(mask); + dma_cap_set(DMA_MEMCPY, mask); + dma_cap_set(DMA_INTERRUPT, mask); + dma_cap_set(DMA_SLAVE, mask); + dma_cap_set(DMA_SG, mask); + + /* Get control of DMA channel #0 */ + priv->chan = dma_request_channel(mask, dma_filter, NULL); + if (!priv->chan) { + dev_err(&op->dev, "Unable to acquire DMA channel #0\n"); + ret = -ENODEV; + goto out_free_priv; + } + + /* Remap the registers for use */ + priv->regs = of_iomap(of_node, 0); + if (!priv->regs) { + dev_err(&op->dev, "Unable to ioremap registers\n"); + ret = -ENOMEM; + goto out_dma_release_channel; + } + + /* Remap the IMMR for use */ + priv->immr = ioremap(get_immrbase(), 0x100000); + if (!priv->immr) { + dev_err(&op->dev, "Unable to ioremap IMMR\n"); + ret = -ENOMEM; + goto out_unmap_regs; + } + + /* + * Check that external DMA is configured + * + * U-Boot does this for us, but we should check it and bail out if + * there is a problem. Failing to have this register setup correctly + * will cause the DMA controller to transfer a single cacheline + * worth of data, then wedge itself. + */ + if ((ioread32be(priv->immr + 0x114) & 0xE00) != 0xE00) { + dev_err(&op->dev, "External DMA control not configured\n"); + ret = -ENODEV; + goto out_unmap_immr; + } + + /* + * Check the CTL-CPLD version + * + * This driver uses the CTL-CPLD DATA-FPGA power sequencer, and we + * don't want to run on any version of the CTL-CPLD that does not use + * a compatible register layout. + * + * v2: changed register layout, added power sequencer + * v3: added glitch filter on the i2c overcurrent/overtemp outputs + */ + ver = ioread8(priv->regs + CTL_CPLD_VERSION); + if (ver != 0x02 && ver != 0x03) { + dev_err(&op->dev, "CTL-CPLD is not version 0x02 or 0x03!\n"); + ret = -ENODEV; + goto out_unmap_immr; + } + + /* Set the exact size that the firmware image should be */ + ver = ioread32be(priv->regs + SYS_REG_VERSION); + priv->fw_size = (ver & (1 << 18)) ? FW_SIZE_EP2S130 : FW_SIZE_EP2S90; + + /* Find the correct IRQ number */ + priv->irq = irq_of_parse_and_map(of_node, 0); + if (priv->irq == NO_IRQ) { + dev_err(&op->dev, "Unable to find IRQ line\n"); + ret = -ENODEV; + goto out_unmap_immr; + } + + /* Request the IRQ */ + ret = request_irq(priv->irq, fpga_irq, IRQF_SHARED, drv_name, priv); + if (ret) { + dev_err(&op->dev, "Unable to request IRQ %d\n", priv->irq); + ret = -ENODEV; + goto out_irq_dispose_mapping; + } + + /* Reset and stop the FPGA's, just in case */ + fpga_do_stop(priv); + + /* Register the miscdevice */ + ret = misc_register(&priv->miscdev); + if (ret) { + dev_err(&op->dev, "Unable to register miscdevice\n"); + goto out_free_irq; + } + + /* Create the sysfs files */ + this_device = priv->miscdev.this_device; + dev_set_drvdata(this_device, priv); + ret = sysfs_create_group(&this_device->kobj, &fpga_attr_group); + if (ret) { + dev_err(&op->dev, "Unable to create sysfs files\n"); + goto out_misc_deregister; + } + + dev_info(priv->dev, "CARMA FPGA Programmer: %s rev%s with %s FPGAs\n", + (ver & (1 << 17)) ? "Correlator" : "Digitizer", + (ver & (1 << 16)) ? "B" : "A", + (ver & (1 << 18)) ? "EP2S130" : "EP2S90"); + + return 0; + +out_misc_deregister: + misc_deregister(&priv->miscdev); +out_free_irq: + free_irq(priv->irq, priv); +out_irq_dispose_mapping: + irq_dispose_mapping(priv->irq); +out_unmap_immr: + iounmap(priv->immr); +out_unmap_regs: + iounmap(priv->regs); +out_dma_release_channel: + dma_release_channel(priv->chan); +out_free_priv: + kref_put(&priv->ref, fpga_dev_remove); +out_return: + return ret; +} + +static struct of_device_id fpga_of_match[] = { + { .compatible = "carma,fpga-programmer", }, + {}, +}; + +static struct of_platform_driver fpga_of_driver = { + .probe = fpga_of_probe, + .remove = fpga_of_remove, + .driver = { + .name = drv_name, + .of_match_table = fpga_of_match, + .owner = THIS_MODULE, + }, +}; + +/* + * Module Init / Exit + */ + +static int __init fpga_init(void) +{ + led_trigger_register_simple("fpga", &ledtrig_fpga); + return of_register_platform_driver(&fpga_of_driver); +} + +static void __exit fpga_exit(void) +{ + of_unregister_platform_driver(&fpga_of_driver); + led_trigger_unregister_simple(ledtrig_fpga); +} + +MODULE_AUTHOR("Ira W. Snyder "); +MODULE_DESCRIPTION("CARMA Board DATA-FPGA Programmer"); +MODULE_LICENSE("GPL"); + +module_init(fpga_init); +module_exit(fpga_exit); diff --git a/trunk/drivers/misc/carma/carma-fpga.c b/trunk/drivers/misc/carma/carma-fpga.c new file mode 100644 index 000000000000..3965821fef17 --- /dev/null +++ b/trunk/drivers/misc/carma/carma-fpga.c @@ -0,0 +1,1433 @@ +/* + * CARMA DATA-FPGA Access Driver + * + * Copyright (c) 2009-2011 Ira W. Snyder + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +/* + * FPGA Memory Dump Format + * + * FPGA #0 control registers (32 x 32-bit words) + * FPGA #1 control registers (32 x 32-bit words) + * FPGA #2 control registers (32 x 32-bit words) + * FPGA #3 control registers (32 x 32-bit words) + * SYSFPGA control registers (32 x 32-bit words) + * FPGA #0 correlation array (NUM_CORL0 correlation blocks) + * FPGA #1 correlation array (NUM_CORL1 correlation blocks) + * FPGA #2 correlation array (NUM_CORL2 correlation blocks) + * FPGA #3 correlation array (NUM_CORL3 correlation blocks) + * + * Each correlation array consists of: + * + * Correlation Data (2 x NUM_LAGSn x 32-bit words) + * Pipeline Metadata (2 x NUM_METAn x 32-bit words) + * Quantization Counters (2 x NUM_QCNTn x 32-bit words) + * + * The NUM_CORLn, NUM_LAGSn, NUM_METAn, and NUM_QCNTn values come from + * the FPGA configuration registers. They do not change once the FPGA's + * have been programmed, they only change on re-programming. + */ + +/* + * Basic Description: + * + * This driver is used to capture correlation spectra off of the four data + * processing FPGAs. The FPGAs are often reprogrammed at runtime, therefore + * this driver supports dynamic enable/disable of capture while the device + * remains open. + * + * The nominal capture rate is 64Hz (every 15.625ms). To facilitate this fast + * capture rate, all buffers are pre-allocated to avoid any potentially long + * running memory allocations while capturing. + * + * There are two lists and one pointer which are used to keep track of the + * different states of data buffers. + * + * 1) free list + * This list holds all empty data buffers which are ready to receive data. + * + * 2) inflight pointer + * This pointer holds the currently inflight data buffer. This buffer is having + * data copied into it by the DMA engine. + * + * 3) used list + * This list holds data buffers which have been filled, and are waiting to be + * read by userspace. + * + * All buffers start life on the free list, then move successively to the + * inflight pointer, and then to the used list. After they have been read by + * userspace, they are moved back to the free list. The cycle repeats as long + * as necessary. + * + * It should be noted that all buffers are mapped and ready for DMA when they + * are on any of the three lists. They are only unmapped when they are in the + * process of being read by userspace. + */ + +/* + * Notes on the IRQ masking scheme: + * + * The IRQ masking scheme here is different than most other hardware. The only + * way for the DATA-FPGAs to detect if the kernel has taken too long to copy + * the data is if the status registers are not cleared before the next + * correlation data dump is ready. + * + * The interrupt line is connected to the status registers, such that when they + * are cleared, the interrupt is de-asserted. Therein lies our problem. We need + * to schedule a long-running DMA operation and return from the interrupt + * handler quickly, but we cannot clear the status registers. + * + * To handle this, the system controller FPGA has the capability to connect the + * interrupt line to a user-controlled GPIO pin. This pin is driven high + * (unasserted) and left that way. To mask the interrupt, we change the + * interrupt source to the GPIO pin. Tada, we hid the interrupt. :) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +/* system controller registers */ +#define SYS_IRQ_SOURCE_CTL 0x24 +#define SYS_IRQ_OUTPUT_EN 0x28 +#define SYS_IRQ_OUTPUT_DATA 0x2C +#define SYS_IRQ_INPUT_DATA 0x30 +#define SYS_FPGA_CONFIG_STATUS 0x44 + +/* GPIO IRQ line assignment */ +#define IRQ_CORL_DONE 0x10 + +/* FPGA registers */ +#define MMAP_REG_VERSION 0x00 +#define MMAP_REG_CORL_CONF1 0x08 +#define MMAP_REG_CORL_CONF2 0x0C +#define MMAP_REG_STATUS 0x48 + +#define SYS_FPGA_BLOCK 0xF0000000 + +#define DATA_FPGA_START 0x400000 +#define DATA_FPGA_SIZE 0x80000 + +static const char drv_name[] = "carma-fpga"; + +#define NUM_FPGA 4 + +#define MIN_DATA_BUFS 8 +#define MAX_DATA_BUFS 64 + +struct fpga_info { + unsigned int num_lag_ram; + unsigned int blk_size; +}; + +struct data_buf { + struct list_head entry; + struct videobuf_dmabuf vb; + size_t size; +}; + +struct fpga_device { + /* character device */ + struct miscdevice miscdev; + struct device *dev; + struct mutex mutex; + + /* reference count */ + struct kref ref; + + /* FPGA registers and information */ + struct fpga_info info[NUM_FPGA]; + void __iomem *regs; + int irq; + + /* FPGA Physical Address/Size Information */ + resource_size_t phys_addr; + size_t phys_size; + + /* DMA structures */ + struct sg_table corl_table; + unsigned int corl_nents; + struct dma_chan *chan; + + /* Protection for all members below */ + spinlock_t lock; + + /* Device enable/disable flag */ + bool enabled; + + /* Correlation data buffers */ + wait_queue_head_t wait; + struct list_head free; + struct list_head used; + struct data_buf *inflight; + + /* Information about data buffers */ + unsigned int num_dropped; + unsigned int num_buffers; + size_t bufsize; + struct dentry *dbg_entry; +}; + +struct fpga_reader { + struct fpga_device *priv; + struct data_buf *buf; + off_t buf_start; +}; + +static void fpga_device_release(struct kref *ref) +{ + struct fpga_device *priv = container_of(ref, struct fpga_device, ref); + + /* the last reader has exited, cleanup the last bits */ + mutex_destroy(&priv->mutex); + kfree(priv); +} + +/* + * Data Buffer Allocation Helpers + */ + +/** + * data_free_buffer() - free a single data buffer and all allocated memory + * @buf: the buffer to free + * + * This will free all of the pages allocated to the given data buffer, and + * then free the structure itself + */ +static void data_free_buffer(struct data_buf *buf) +{ + /* It is ok to free a NULL buffer */ + if (!buf) + return; + + /* free all memory */ + videobuf_dma_free(&buf->vb); + kfree(buf); +} + +/** + * data_alloc_buffer() - allocate and fill a data buffer with pages + * @bytes: the number of bytes required + * + * This allocates all space needed for a data buffer. It must be mapped before + * use in a DMA transaction using videobuf_dma_map(). + * + * Returns NULL on failure + */ +static struct data_buf *data_alloc_buffer(const size_t bytes) +{ + unsigned int nr_pages; + struct data_buf *buf; + int ret; + + /* calculate the number of pages necessary */ + nr_pages = DIV_ROUND_UP(bytes, PAGE_SIZE); + + /* allocate the buffer structure */ + buf = kzalloc(sizeof(*buf), GFP_KERNEL); + if (!buf) + goto out_return; + + /* initialize internal fields */ + INIT_LIST_HEAD(&buf->entry); + buf->size = bytes; + + /* allocate the videobuf */ + videobuf_dma_init(&buf->vb); + ret = videobuf_dma_init_kernel(&buf->vb, DMA_FROM_DEVICE, nr_pages); + if (ret) + goto out_free_buf; + + return buf; + +out_free_buf: + kfree(buf); +out_return: + return NULL; +} + +/** + * data_free_buffers() - free all allocated buffers + * @priv: the driver's private data structure + * + * Free all buffers allocated by the driver (except those currently in the + * process of being read by userspace). + * + * LOCKING: must hold dev->mutex + * CONTEXT: user + */ +static void data_free_buffers(struct fpga_device *priv) +{ + struct data_buf *buf, *tmp; + + /* the device should be stopped, no DMA in progress */ + BUG_ON(priv->inflight != NULL); + + list_for_each_entry_safe(buf, tmp, &priv->free, entry) { + list_del_init(&buf->entry); + videobuf_dma_unmap(priv->dev, &buf->vb); + data_free_buffer(buf); + } + + list_for_each_entry_safe(buf, tmp, &priv->used, entry) { + list_del_init(&buf->entry); + videobuf_dma_unmap(priv->dev, &buf->vb); + data_free_buffer(buf); + } + + priv->num_buffers = 0; + priv->bufsize = 0; +} + +/** + * data_alloc_buffers() - allocate 1 seconds worth of data buffers + * @priv: the driver's private data structure + * + * Allocate enough buffers for a whole second worth of data + * + * This routine will attempt to degrade nicely by succeeding even if a full + * second worth of data buffers could not be allocated, as long as a minimum + * number were allocated. In this case, it will print a message to the kernel + * log. + * + * The device must not be modifying any lists when this is called. + * + * CONTEXT: user + * LOCKING: must hold dev->mutex + * + * Returns 0 on success, -ERRNO otherwise + */ +static int data_alloc_buffers(struct fpga_device *priv) +{ + struct data_buf *buf; + int i, ret; + + for (i = 0; i < MAX_DATA_BUFS; i++) { + + /* allocate a buffer */ + buf = data_alloc_buffer(priv->bufsize); + if (!buf) + break; + + /* map it for DMA */ + ret = videobuf_dma_map(priv->dev, &buf->vb); + if (ret) { + data_free_buffer(buf); + break; + } + + /* add it to the list of free buffers */ + list_add_tail(&buf->entry, &priv->free); + priv->num_buffers++; + } + + /* Make sure we allocated the minimum required number of buffers */ + if (priv->num_buffers < MIN_DATA_BUFS) { + dev_err(priv->dev, "Unable to allocate enough data buffers\n"); + data_free_buffers(priv); + return -ENOMEM; + } + + /* Warn if we are running in a degraded state, but do not fail */ + if (priv->num_buffers < MAX_DATA_BUFS) { + dev_warn(priv->dev, + "Unable to allocate %d buffers, using %d buffers instead\n", + MAX_DATA_BUFS, i); + } + + return 0; +} + +/* + * DMA Operations Helpers + */ + +/** + * fpga_start_addr() - get the physical address a DATA-FPGA + * @priv: the driver's private data structure + * @fpga: the DATA-FPGA number (zero based) + */ +static dma_addr_t fpga_start_addr(struct fpga_device *priv, unsigned int fpga) +{ + return priv->phys_addr + 0x400000 + (0x80000 * fpga); +} + +/** + * fpga_block_addr() - get the physical address of a correlation data block + * @priv: the driver's private data structure + * @fpga: the DATA-FPGA number (zero based) + * @blknum: the correlation block number (zero based) + */ +static dma_addr_t fpga_block_addr(struct fpga_device *priv, unsigned int fpga, + unsigned int blknum) +{ + return fpga_start_addr(priv, fpga) + (0x10000 * (1 + blknum)); +} + +#define REG_BLOCK_SIZE (32 * 4) + +/** + * data_setup_corl_table() - create the scatterlist for correlation dumps + * @priv: the driver's private data structure + * + * Create the scatterlist for transferring a correlation dump from the + * DATA FPGAs. This structure will be reused for each buffer than needs + * to be filled with correlation data. + * + * Returns 0 on success, -ERRNO otherwise + */ +static int data_setup_corl_table(struct fpga_device *priv) +{ + struct sg_table *table = &priv->corl_table; + struct scatterlist *sg; + struct fpga_info *info; + int i, j, ret; + + /* Calculate the number of entries needed */ + priv->corl_nents = (1 + NUM_FPGA) * REG_BLOCK_SIZE; + for (i = 0; i < NUM_FPGA; i++) + priv->corl_nents += priv->info[i].num_lag_ram; + + /* Allocate the scatterlist table */ + ret = sg_alloc_table(table, priv->corl_nents, GFP_KERNEL); + if (ret) { + dev_err(priv->dev, "unable to allocate DMA table\n"); + return ret; + } + + /* Add the DATA FPGA registers to the scatterlist */ + sg = table->sgl; + for (i = 0; i < NUM_FPGA; i++) { + sg_dma_address(sg) = fpga_start_addr(priv, i); + sg_dma_len(sg) = REG_BLOCK_SIZE; + sg = sg_next(sg); + } + + /* Add the SYS-FPGA registers to the scatterlist */ + sg_dma_address(sg) = SYS_FPGA_BLOCK; + sg_dma_len(sg) = REG_BLOCK_SIZE; + sg = sg_next(sg); + + /* Add the FPGA correlation data blocks to the scatterlist */ + for (i = 0; i < NUM_FPGA; i++) { + info = &priv->info[i]; + for (j = 0; j < info->num_lag_ram; j++) { + sg_dma_address(sg) = fpga_block_addr(priv, i, j); + sg_dma_len(sg) = info->blk_size; + sg = sg_next(sg); + } + } + + /* + * All physical addresses and lengths are present in the structure + * now. It can be reused for every FPGA DATA interrupt + */ + return 0; +} + +/* + * FPGA Register Access Helpers + */ + +static void fpga_write_reg(struct fpga_device *priv, unsigned int fpga, + unsigned int reg, u32 val) +{ + const int fpga_start = DATA_FPGA_START + (fpga * DATA_FPGA_SIZE); + iowrite32be(val, priv->regs + fpga_start + reg); +} + +static u32 fpga_read_reg(struct fpga_device *priv, unsigned int fpga, + unsigned int reg) +{ + const int fpga_start = DATA_FPGA_START + (fpga * DATA_FPGA_SIZE); + return ioread32be(priv->regs + fpga_start + reg); +} + +/** + * data_calculate_bufsize() - calculate the data buffer size required + * @priv: the driver's private data structure + * + * Calculate the total buffer size needed to hold a single block + * of correlation data + * + * CONTEXT: user + * + * Returns 0 on success, -ERRNO otherwise + */ +static int data_calculate_bufsize(struct fpga_device *priv) +{ + u32 num_corl, num_lags, num_meta, num_qcnt, num_pack; + u32 conf1, conf2, version; + u32 num_lag_ram, blk_size; + int i; + + /* Each buffer starts with the 5 FPGA register areas */ + priv->bufsize = (1 + NUM_FPGA) * REG_BLOCK_SIZE; + + /* Read and store the configuration data for each FPGA */ + for (i = 0; i < NUM_FPGA; i++) { + version = fpga_read_reg(priv, i, MMAP_REG_VERSION); + conf1 = fpga_read_reg(priv, i, MMAP_REG_CORL_CONF1); + conf2 = fpga_read_reg(priv, i, MMAP_REG_CORL_CONF2); + + /* minor version 2 and later */ + if ((version & 0x000000FF) >= 2) { + num_corl = (conf1 & 0x000000F0) >> 4; + num_pack = (conf1 & 0x00000F00) >> 8; + num_lags = (conf1 & 0x00FFF000) >> 12; + num_meta = (conf1 & 0x7F000000) >> 24; + num_qcnt = (conf2 & 0x00000FFF) >> 0; + } else { + num_corl = (conf1 & 0x000000F0) >> 4; + num_pack = 1; /* implied */ + num_lags = (conf1 & 0x000FFF00) >> 8; + num_meta = (conf1 & 0x7FF00000) >> 20; + num_qcnt = (conf2 & 0x00000FFF) >> 0; + } + + num_lag_ram = (num_corl + num_pack - 1) / num_pack; + blk_size = ((num_pack * num_lags) + num_meta + num_qcnt) * 8; + + priv->info[i].num_lag_ram = num_lag_ram; + priv->info[i].blk_size = blk_size; + priv->bufsize += num_lag_ram * blk_size; + + dev_dbg(priv->dev, "FPGA %d NUM_CORL: %d\n", i, num_corl); + dev_dbg(priv->dev, "FPGA %d NUM_PACK: %d\n", i, num_pack); + dev_dbg(priv->dev, "FPGA %d NUM_LAGS: %d\n", i, num_lags); + dev_dbg(priv->dev, "FPGA %d NUM_META: %d\n", i, num_meta); + dev_dbg(priv->dev, "FPGA %d NUM_QCNT: %d\n", i, num_qcnt); + dev_dbg(priv->dev, "FPGA %d BLK_SIZE: %d\n", i, blk_size); + } + + dev_dbg(priv->dev, "TOTAL BUFFER SIZE: %zu bytes\n", priv->bufsize); + return 0; +} + +/* + * Interrupt Handling + */ + +/** + * data_disable_interrupts() - stop the device from generating interrupts + * @priv: the driver's private data structure + * + * Hide interrupts by switching to GPIO interrupt source + * + * LOCKING: must hold dev->lock + */ +static void data_disable_interrupts(struct fpga_device *priv) +{ + /* hide the interrupt by switching the IRQ driver to GPIO */ + iowrite32be(0x2F, priv->regs + SYS_IRQ_SOURCE_CTL); +} + +/** + * data_enable_interrupts() - allow the device to generate interrupts + * @priv: the driver's private data structure + * + * Unhide interrupts by switching to the FPGA interrupt source. At the + * same time, clear the DATA-FPGA status registers. + * + * LOCKING: must hold dev->lock + */ +static void data_enable_interrupts(struct fpga_device *priv) +{ + /* clear the actual FPGA corl_done interrupt */ + fpga_write_reg(priv, 0, MMAP_REG_STATUS, 0x0); + fpga_write_reg(priv, 1, MMAP_REG_STATUS, 0x0); + fpga_write_reg(priv, 2, MMAP_REG_STATUS, 0x0); + fpga_write_reg(priv, 3, MMAP_REG_STATUS, 0x0); + + /* flush the writes */ + fpga_read_reg(priv, 0, MMAP_REG_STATUS); + + /* switch back to the external interrupt source */ + iowrite32be(0x3F, priv->regs + SYS_IRQ_SOURCE_CTL); +} + +/** + * data_dma_cb() - DMAEngine callback for DMA completion + * @data: the driver's private data structure + * + * Complete a DMA transfer from the DATA-FPGA's + * + * This is called via the DMA callback mechanism, and will handle moving the + * completed DMA transaction to the used list, and then wake any processes + * waiting for new data + * + * CONTEXT: any, softirq expected + */ +static void data_dma_cb(void *data) +{ + struct fpga_device *priv = data; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + + /* If there is no inflight buffer, we've got a bug */ + BUG_ON(priv->inflight == NULL); + + /* Move the inflight buffer onto the used list */ + list_move_tail(&priv->inflight->entry, &priv->used); + priv->inflight = NULL; + + /* clear the FPGA status and re-enable interrupts */ + data_enable_interrupts(priv); + + spin_unlock_irqrestore(&priv->lock, flags); + + /* + * We've changed both the inflight and used lists, so we need + * to wake up any processes that are blocking for those events + */ + wake_up(&priv->wait); +} + +/** + * data_submit_dma() - prepare and submit the required DMA to fill a buffer + * @priv: the driver's private data structure + * @buf: the data buffer + * + * Prepare and submit the necessary DMA transactions to fill a correlation + * data buffer. + * + * LOCKING: must hold dev->lock + * CONTEXT: hardirq only + * + * Returns 0 on success, -ERRNO otherwise + */ +static int data_submit_dma(struct fpga_device *priv, struct data_buf *buf) +{ + struct scatterlist *dst_sg, *src_sg; + unsigned int dst_nents, src_nents; + struct dma_chan *chan = priv->chan; + struct dma_async_tx_descriptor *tx; + dma_cookie_t cookie; + dma_addr_t dst, src; + + dst_sg = buf->vb.sglist; + dst_nents = buf->vb.sglen; + + src_sg = priv->corl_table.sgl; + src_nents = priv->corl_nents; + + /* + * All buffers passed to this function should be ready and mapped + * for DMA already. Therefore, we don't need to do anything except + * submit it to the Freescale DMA Engine for processing + */ + + /* setup the scatterlist to scatterlist transfer */ + tx = chan->device->device_prep_dma_sg(chan, + dst_sg, dst_nents, + src_sg, src_nents, + 0); + if (!tx) { + dev_err(priv->dev, "unable to prep scatterlist DMA\n"); + return -ENOMEM; + } + + /* submit the transaction to the DMA controller */ + cookie = tx->tx_submit(tx); + if (dma_submit_error(cookie)) { + dev_err(priv->dev, "unable to submit scatterlist DMA\n"); + return -ENOMEM; + } + + /* Prepare the re-read of the SYS-FPGA block */ + dst = sg_dma_address(dst_sg) + (NUM_FPGA * REG_BLOCK_SIZE); + src = SYS_FPGA_BLOCK; + tx = chan->device->device_prep_dma_memcpy(chan, dst, src, + REG_BLOCK_SIZE, + DMA_PREP_INTERRUPT); + if (!tx) { + dev_err(priv->dev, "unable to prep SYS-FPGA DMA\n"); + return -ENOMEM; + } + + /* Setup the callback */ + tx->callback = data_dma_cb; + tx->callback_param = priv; + + /* submit the transaction to the DMA controller */ + cookie = tx->tx_submit(tx); + if (dma_submit_error(cookie)) { + dev_err(priv->dev, "unable to submit SYS-FPGA DMA\n"); + return -ENOMEM; + } + + return 0; +} + +#define CORL_DONE 0x1 +#define CORL_ERR 0x2 + +static irqreturn_t data_irq(int irq, void *dev_id) +{ + struct fpga_device *priv = dev_id; + bool submitted = false; + struct data_buf *buf; + u32 status; + int i; + + /* detect spurious interrupts via FPGA status */ + for (i = 0; i < 4; i++) { + status = fpga_read_reg(priv, i, MMAP_REG_STATUS); + if (!(status & (CORL_DONE | CORL_ERR))) { + dev_err(priv->dev, "spurious irq detected (FPGA)\n"); + return IRQ_NONE; + } + } + + /* detect spurious interrupts via raw IRQ pin readback */ + status = ioread32be(priv->regs + SYS_IRQ_INPUT_DATA); + if (status & IRQ_CORL_DONE) { + dev_err(priv->dev, "spurious irq detected (IRQ)\n"); + return IRQ_NONE; + } + + spin_lock(&priv->lock); + + /* hide the interrupt by switching the IRQ driver to GPIO */ + data_disable_interrupts(priv); + + /* If there are no free buffers, drop this data */ + if (list_empty(&priv->free)) { + priv->num_dropped++; + goto out; + } + + buf = list_first_entry(&priv->free, struct data_buf, entry); + list_del_init(&buf->entry); + BUG_ON(buf->size != priv->bufsize); + + /* Submit a DMA transfer to get the correlation data */ + if (data_submit_dma(priv, buf)) { + dev_err(priv->dev, "Unable to setup DMA transfer\n"); + list_move_tail(&buf->entry, &priv->free); + goto out; + } + + /* Save the buffer for the DMA callback */ + priv->inflight = buf; + submitted = true; + + /* Start the DMA Engine */ + dma_async_memcpy_issue_pending(priv->chan); + +out: + /* If no DMA was submitted, re-enable interrupts */ + if (!submitted) + data_enable_interrupts(priv); + + spin_unlock(&priv->lock); + return IRQ_HANDLED; +} + +/* + * Realtime Device Enable Helpers + */ + +/** + * data_device_enable() - enable the device for buffered dumping + * @priv: the driver's private data structure + * + * Enable the device for buffered dumping. Allocates buffers and hooks up + * the interrupt handler. When this finishes, data will come pouring in. + * + * LOCKING: must hold dev->mutex + * CONTEXT: user context only + * + * Returns 0 on success, -ERRNO otherwise + */ +static int data_device_enable(struct fpga_device *priv) +{ + u32 val; + int ret; + + /* multiple enables are safe: they do nothing */ + if (priv->enabled) + return 0; + + /* check that the FPGAs are programmed */ + val = ioread32be(priv->regs + SYS_FPGA_CONFIG_STATUS); + if (!(val & (1 << 18))) { + dev_err(priv->dev, "DATA-FPGAs are not enabled\n"); + return -ENODATA; + } + + /* read the FPGAs to calculate the buffer size */ + ret = data_calculate_bufsize(priv); + if (ret) { + dev_err(priv->dev, "unable to calculate buffer size\n"); + goto out_error; + } + + /* allocate the correlation data buffers */ + ret = data_alloc_buffers(priv); + if (ret) { + dev_err(priv->dev, "unable to allocate buffers\n"); + goto out_error; + } + + /* setup the source scatterlist for dumping correlation data */ + ret = data_setup_corl_table(priv); + if (ret) { + dev_err(priv->dev, "unable to setup correlation DMA table\n"); + goto out_error; + } + + /* hookup the irq handler */ + ret = request_irq(priv->irq, data_irq, IRQF_SHARED, drv_name, priv); + if (ret) { + dev_err(priv->dev, "unable to request IRQ handler\n"); + goto out_error; + } + + /* switch to the external FPGA IRQ line */ + data_enable_interrupts(priv); + + /* success, we're enabled */ + priv->enabled = true; + return 0; + +out_error: + sg_free_table(&priv->corl_table); + priv->corl_nents = 0; + + data_free_buffers(priv); + return ret; +} + +/** + * data_device_disable() - disable the device for buffered dumping + * @priv: the driver's private data structure + * + * Disable the device for buffered dumping. Stops new DMA transactions from + * being generated, waits for all outstanding DMA to complete, and then frees + * all buffers. + * + * LOCKING: must hold dev->mutex + * CONTEXT: user only + * + * Returns 0 on success, -ERRNO otherwise + */ +static int data_device_disable(struct fpga_device *priv) +{ + int ret; + + /* allow multiple disable */ + if (!priv->enabled) + return 0; + + /* switch to the internal GPIO IRQ line */ + data_disable_interrupts(priv); + + /* unhook the irq handler */ + free_irq(priv->irq, priv); + + /* + * wait for all outstanding DMA to complete + * + * Device interrupts are disabled, therefore another buffer cannot + * be marked inflight. + */ + ret = wait_event_interruptible(priv->wait, priv->inflight == NULL); + if (ret) + return ret; + + /* free the correlation table */ + sg_free_table(&priv->corl_table); + priv->corl_nents = 0; + + /* + * We are taking the spinlock not to protect priv->enabled, but instead + * to make sure that there are no readers in the process of altering + * the free or used lists while we are setting this flag. + */ + spin_lock_irq(&priv->lock); + priv->enabled = false; + spin_unlock_irq(&priv->lock); + + /* free all buffers: the free and used lists are not being changed */ + data_free_buffers(priv); + return 0; +} + +/* + * DEBUGFS Interface + */ +#ifdef CONFIG_DEBUG_FS + +/* + * Count the number of entries in the given list + */ +static unsigned int list_num_entries(struct list_head *list) +{ + struct list_head *entry; + unsigned int ret = 0; + + list_for_each(entry, list) + ret++; + + return ret; +} + +static int data_debug_show(struct seq_file *f, void *offset) +{ + struct fpga_device *priv = f->private; + int ret; + + /* + * Lock the mutex first, so that we get an accurate value for enable + * Lock the spinlock next, to get accurate list counts + */ + ret = mutex_lock_interruptible(&priv->mutex); + if (ret) + return ret; + + spin_lock_irq(&priv->lock); + + seq_printf(f, "enabled: %d\n", priv->enabled); + seq_printf(f, "bufsize: %d\n", priv->bufsize); + seq_printf(f, "num_buffers: %d\n", priv->num_buffers); + seq_printf(f, "num_free: %d\n", list_num_entries(&priv->free)); + seq_printf(f, "inflight: %d\n", priv->inflight != NULL); + seq_printf(f, "num_used: %d\n", list_num_entries(&priv->used)); + seq_printf(f, "num_dropped: %d\n", priv->num_dropped); + + spin_unlock_irq(&priv->lock); + mutex_unlock(&priv->mutex); + return 0; +} + +static int data_debug_open(struct inode *inode, struct file *file) +{ + return single_open(file, data_debug_show, inode->i_private); +} + +static const struct file_operations data_debug_fops = { + .owner = THIS_MODULE, + .open = data_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int data_debugfs_init(struct fpga_device *priv) +{ + priv->dbg_entry = debugfs_create_file(drv_name, S_IRUGO, NULL, priv, + &data_debug_fops); + if (IS_ERR(priv->dbg_entry)) + return PTR_ERR(priv->dbg_entry); + + return 0; +} + +static void data_debugfs_exit(struct fpga_device *priv) +{ + debugfs_remove(priv->dbg_entry); +} + +#else + +static inline int data_debugfs_init(struct fpga_device *priv) +{ + return 0; +} + +static inline void data_debugfs_exit(struct fpga_device *priv) +{ +} + +#endif /* CONFIG_DEBUG_FS */ + +/* + * SYSFS Attributes + */ + +static ssize_t data_en_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct fpga_device *priv = dev_get_drvdata(dev); + return snprintf(buf, PAGE_SIZE, "%u\n", priv->enabled); +} + +static ssize_t data_en_set(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fpga_device *priv = dev_get_drvdata(dev); + unsigned long enable; + int ret; + + ret = strict_strtoul(buf, 0, &enable); + if (ret) { + dev_err(priv->dev, "unable to parse enable input\n"); + return -EINVAL; + } + + ret = mutex_lock_interruptible(&priv->mutex); + if (ret) + return ret; + + if (enable) + ret = data_device_enable(priv); + else + ret = data_device_disable(priv); + + if (ret) { + dev_err(priv->dev, "device %s failed\n", + enable ? "enable" : "disable"); + count = ret; + goto out_unlock; + } + +out_unlock: + mutex_unlock(&priv->mutex); + return count; +} + +static DEVICE_ATTR(enable, S_IWUSR | S_IRUGO, data_en_show, data_en_set); + +static struct attribute *data_sysfs_attrs[] = { + &dev_attr_enable.attr, + NULL, +}; + +static const struct attribute_group rt_sysfs_attr_group = { + .attrs = data_sysfs_attrs, +}; + +/* + * FPGA Realtime Data Character Device + */ + +static int data_open(struct inode *inode, struct file *filp) +{ + /* + * The miscdevice layer puts our struct miscdevice into the + * filp->private_data field. We use this to find our private + * data and then overwrite it with our own private structure. + */ + struct fpga_device *priv = container_of(filp->private_data, + struct fpga_device, miscdev); + struct fpga_reader *reader; + int ret; + + /* allocate private data */ + reader = kzalloc(sizeof(*reader), GFP_KERNEL); + if (!reader) + return -ENOMEM; + + reader->priv = priv; + reader->buf = NULL; + + filp->private_data = reader; + ret = nonseekable_open(inode, filp); + if (ret) { + dev_err(priv->dev, "nonseekable-open failed\n"); + kfree(reader); + return ret; + } + + /* + * success, increase the reference count of the private data structure + * so that it doesn't disappear if the device is unbound + */ + kref_get(&priv->ref); + return 0; +} + +static int data_release(struct inode *inode, struct file *filp) +{ + struct fpga_reader *reader = filp->private_data; + struct fpga_device *priv = reader->priv; + + /* free the per-reader structure */ + data_free_buffer(reader->buf); + kfree(reader); + filp->private_data = NULL; + + /* decrement our reference count to the private data */ + kref_put(&priv->ref, fpga_device_release); + return 0; +} + +static ssize_t data_read(struct file *filp, char __user *ubuf, size_t count, + loff_t *f_pos) +{ + struct fpga_reader *reader = filp->private_data; + struct fpga_device *priv = reader->priv; + struct list_head *used = &priv->used; + struct data_buf *dbuf; + size_t avail; + void *data; + int ret; + + /* check if we already have a partial buffer */ + if (reader->buf) { + dbuf = reader->buf; + goto have_buffer; + } + + spin_lock_irq(&priv->lock); + + /* Block until there is at least one buffer on the used list */ + while (list_empty(used)) { + spin_unlock_irq(&priv->lock); + + if (filp->f_flags & O_NONBLOCK) + return -EAGAIN; + + ret = wait_event_interruptible(priv->wait, !list_empty(used)); + if (ret) + return ret; + + spin_lock_irq(&priv->lock); + } + + /* Grab the first buffer off of the used list */ + dbuf = list_first_entry(used, struct data_buf, entry); + list_del_init(&dbuf->entry); + + spin_unlock_irq(&priv->lock); + + /* Buffers are always mapped: unmap it */ + videobuf_dma_unmap(priv->dev, &dbuf->vb); + + /* save the buffer for later */ + reader->buf = dbuf; + reader->buf_start = 0; + +have_buffer: + /* Get the number of bytes available */ + avail = dbuf->size - reader->buf_start; + data = dbuf->vb.vaddr + reader->buf_start; + + /* Get the number of bytes we can transfer */ + count = min(count, avail); + + /* Copy the data to the userspace buffer */ + if (copy_to_user(ubuf, data, count)) + return -EFAULT; + + /* Update the amount of available space */ + avail -= count; + + /* + * If there is still some data available, save the buffer for the + * next userspace call to read() and return + */ + if (avail > 0) { + reader->buf_start += count; + reader->buf = dbuf; + return count; + } + + /* + * Get the buffer ready to be reused for DMA + * + * If it fails, we pretend that the read never happed and return + * -EFAULT to userspace. The read will be retried. + */ + ret = videobuf_dma_map(priv->dev, &dbuf->vb); + if (ret) { + dev_err(priv->dev, "unable to remap buffer for DMA\n"); + return -EFAULT; + } + + /* Lock against concurrent enable/disable */ + spin_lock_irq(&priv->lock); + + /* the reader is finished with this buffer */ + reader->buf = NULL; + + /* + * One of two things has happened, the device is disabled, or the + * device has been reconfigured underneath us. In either case, we + * should just throw away the buffer. + */ + if (!priv->enabled || dbuf->size != priv->bufsize) { + videobuf_dma_unmap(priv->dev, &dbuf->vb); + data_free_buffer(dbuf); + goto out_unlock; + } + + /* The buffer is safe to reuse, so add it back to the free list */ + list_add_tail(&dbuf->entry, &priv->free); + +out_unlock: + spin_unlock_irq(&priv->lock); + return count; +} + +static unsigned int data_poll(struct file *filp, struct poll_table_struct *tbl) +{ + struct fpga_reader *reader = filp->private_data; + struct fpga_device *priv = reader->priv; + unsigned int mask = 0; + + poll_wait(filp, &priv->wait, tbl); + + if (!list_empty(&priv->used)) + mask |= POLLIN | POLLRDNORM; + + return mask; +} + +static int data_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct fpga_reader *reader = filp->private_data; + struct fpga_device *priv = reader->priv; + unsigned long offset, vsize, psize, addr; + + /* VMA properties */ + offset = vma->vm_pgoff << PAGE_SHIFT; + vsize = vma->vm_end - vma->vm_start; + psize = priv->phys_size - offset; + addr = (priv->phys_addr + offset) >> PAGE_SHIFT; + + /* Check against the FPGA region's physical memory size */ + if (vsize > psize) { + dev_err(priv->dev, "requested mmap mapping too large\n"); + return -EINVAL; + } + + /* IO memory (stop cacheing) */ + vma->vm_flags |= VM_IO | VM_RESERVED; + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + + return io_remap_pfn_range(vma, vma->vm_start, addr, vsize, + vma->vm_page_prot); +} + +static const struct file_operations data_fops = { + .owner = THIS_MODULE, + .open = data_open, + .release = data_release, + .read = data_read, + .poll = data_poll, + .mmap = data_mmap, + .llseek = no_llseek, +}; + +/* + * OpenFirmware Device Subsystem + */ + +static bool dma_filter(struct dma_chan *chan, void *data) +{ + /* + * DMA Channel #0 is used for the FPGA Programmer, so ignore it + * + * This probably won't survive an unload/load cycle of the Freescale + * DMAEngine driver, but that won't be a problem + */ + if (chan->chan_id == 0 && chan->device->dev_id == 0) + return false; + + return true; +} + +static int data_of_probe(struct platform_device *op, + const struct of_device_id *match) +{ + struct device_node *of_node = op->dev.of_node; + struct device *this_device; + struct fpga_device *priv; + struct resource res; + dma_cap_mask_t mask; + int ret; + + /* Allocate private data */ + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) { + dev_err(&op->dev, "Unable to allocate device private data\n"); + ret = -ENOMEM; + goto out_return; + } + + dev_set_drvdata(&op->dev, priv); + priv->dev = &op->dev; + kref_init(&priv->ref); + mutex_init(&priv->mutex); + + dev_set_drvdata(priv->dev, priv); + spin_lock_init(&priv->lock); + INIT_LIST_HEAD(&priv->free); + INIT_LIST_HEAD(&priv->used); + init_waitqueue_head(&priv->wait); + + /* Setup the misc device */ + priv->miscdev.minor = MISC_DYNAMIC_MINOR; + priv->miscdev.name = drv_name; + priv->miscdev.fops = &data_fops; + + /* Get the physical address of the FPGA registers */ + ret = of_address_to_resource(of_node, 0, &res); + if (ret) { + dev_err(&op->dev, "Unable to find FPGA physical address\n"); + ret = -ENODEV; + goto out_free_priv; + } + + priv->phys_addr = res.start; + priv->phys_size = resource_size(&res); + + /* ioremap the registers for use */ + priv->regs = of_iomap(of_node, 0); + if (!priv->regs) { + dev_err(&op->dev, "Unable to ioremap registers\n"); + ret = -ENOMEM; + goto out_free_priv; + } + + dma_cap_zero(mask); + dma_cap_set(DMA_MEMCPY, mask); + dma_cap_set(DMA_INTERRUPT, mask); + dma_cap_set(DMA_SLAVE, mask); + dma_cap_set(DMA_SG, mask); + + /* Request a DMA channel */ + priv->chan = dma_request_channel(mask, dma_filter, NULL); + if (!priv->chan) { + dev_err(&op->dev, "Unable to request DMA channel\n"); + ret = -ENODEV; + goto out_unmap_regs; + } + + /* Find the correct IRQ number */ + priv->irq = irq_of_parse_and_map(of_node, 0); + if (priv->irq == NO_IRQ) { + dev_err(&op->dev, "Unable to find IRQ line\n"); + ret = -ENODEV; + goto out_release_dma; + } + + /* Drive the GPIO for FPGA IRQ high (no interrupt) */ + iowrite32be(IRQ_CORL_DONE, priv->regs + SYS_IRQ_OUTPUT_DATA); + + /* Register the miscdevice */ + ret = misc_register(&priv->miscdev); + if (ret) { + dev_err(&op->dev, "Unable to register miscdevice\n"); + goto out_irq_dispose_mapping; + } + + /* Create the debugfs files */ + ret = data_debugfs_init(priv); + if (ret) { + dev_err(&op->dev, "Unable to create debugfs files\n"); + goto out_misc_deregister; + } + + /* Create the sysfs files */ + this_device = priv->miscdev.this_device; + dev_set_drvdata(this_device, priv); + ret = sysfs_create_group(&this_device->kobj, &rt_sysfs_attr_group); + if (ret) { + dev_err(&op->dev, "Unable to create sysfs files\n"); + goto out_data_debugfs_exit; + } + + dev_info(&op->dev, "CARMA FPGA Realtime Data Driver Loaded\n"); + return 0; + +out_data_debugfs_exit: + data_debugfs_exit(priv); +out_misc_deregister: + misc_deregister(&priv->miscdev); +out_irq_dispose_mapping: + irq_dispose_mapping(priv->irq); +out_release_dma: + dma_release_channel(priv->chan); +out_unmap_regs: + iounmap(priv->regs); +out_free_priv: + kref_put(&priv->ref, fpga_device_release); +out_return: + return ret; +} + +static int data_of_remove(struct platform_device *op) +{ + struct fpga_device *priv = dev_get_drvdata(&op->dev); + struct device *this_device = priv->miscdev.this_device; + + /* remove all sysfs files, now the device cannot be re-enabled */ + sysfs_remove_group(&this_device->kobj, &rt_sysfs_attr_group); + + /* remove all debugfs files */ + data_debugfs_exit(priv); + + /* disable the device from generating data */ + data_device_disable(priv); + + /* remove the character device to stop new readers from appearing */ + misc_deregister(&priv->miscdev); + + /* cleanup everything not needed by readers */ + irq_dispose_mapping(priv->irq); + dma_release_channel(priv->chan); + iounmap(priv->regs); + + /* release our reference */ + kref_put(&priv->ref, fpga_device_release); + return 0; +} + +static struct of_device_id data_of_match[] = { + { .compatible = "carma,carma-fpga", }, + {}, +}; + +static struct of_platform_driver data_of_driver = { + .probe = data_of_probe, + .remove = data_of_remove, + .driver = { + .name = drv_name, + .of_match_table = data_of_match, + .owner = THIS_MODULE, + }, +}; + +/* + * Module Init / Exit + */ + +static int __init data_init(void) +{ + return of_register_platform_driver(&data_of_driver); +} + +static void __exit data_exit(void) +{ + of_unregister_platform_driver(&data_of_driver); +} + +MODULE_AUTHOR("Ira W. Snyder "); +MODULE_DESCRIPTION("CARMA DATA-FPGA Access Driver"); +MODULE_LICENSE("GPL"); + +module_init(data_init); +module_exit(data_exit); diff --git a/trunk/drivers/misc/sgi-gru/grufault.c b/trunk/drivers/misc/sgi-gru/grufault.c index 38657cdaf54d..c4acac74725c 100644 --- a/trunk/drivers/misc/sgi-gru/grufault.c +++ b/trunk/drivers/misc/sgi-gru/grufault.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include "gru.h" #include "grutables.h" diff --git a/trunk/drivers/misc/sgi-gru/grufile.c b/trunk/drivers/misc/sgi-gru/grufile.c index 20e4e9395b61..ecafa4ba238b 100644 --- a/trunk/drivers/misc/sgi-gru/grufile.c +++ b/trunk/drivers/misc/sgi-gru/grufile.c @@ -348,15 +348,15 @@ static unsigned long gru_chiplet_cpu_to_mmr(int chiplet, int cpu, int *corep) static int gru_irq_count[GRU_CHIPLETS_PER_BLADE]; -static void gru_noop(unsigned int irq) +static void gru_noop(struct irq_data *d) { } static struct irq_chip gru_chip[GRU_CHIPLETS_PER_BLADE] = { [0 ... GRU_CHIPLETS_PER_BLADE - 1] { - .mask = gru_noop, - .unmask = gru_noop, - .ack = gru_noop + .irq_mask = gru_noop, + .irq_unmask = gru_noop, + .irq_ack = gru_noop } }; diff --git a/trunk/drivers/misc/sgi-gru/grumain.c b/trunk/drivers/misc/sgi-gru/grumain.c index f8538bbd0bfa..ae16c8cb4f3e 100644 --- a/trunk/drivers/misc/sgi-gru/grumain.c +++ b/trunk/drivers/misc/sgi-gru/grumain.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include "gru.h" #include "grutables.h" diff --git a/trunk/drivers/misc/ti-st/Kconfig b/trunk/drivers/misc/ti-st/Kconfig index 2c8c3f39710d..abb5de1afce3 100644 --- a/trunk/drivers/misc/ti-st/Kconfig +++ b/trunk/drivers/misc/ti-st/Kconfig @@ -5,7 +5,7 @@ menu "Texas Instruments shared transport line discipline" config TI_ST tristate "Shared transport core driver" - depends on RFKILL + depends on NET && GPIOLIB select FW_LOADER help This enables the shared transport core driver for TI diff --git a/trunk/drivers/misc/ti-st/st_core.c b/trunk/drivers/misc/ti-st/st_core.c index 486117f72c9f..f91f82eabda7 100644 --- a/trunk/drivers/misc/ti-st/st_core.c +++ b/trunk/drivers/misc/ti-st/st_core.c @@ -43,13 +43,15 @@ static void add_channel_to_table(struct st_data_s *st_gdata, pr_info("%s: id %d\n", __func__, new_proto->chnl_id); /* list now has the channel id as index itself */ st_gdata->list[new_proto->chnl_id] = new_proto; + st_gdata->is_registered[new_proto->chnl_id] = true; } static void remove_channel_from_table(struct st_data_s *st_gdata, struct st_proto_s *proto) { pr_info("%s: id %d\n", __func__, proto->chnl_id); - st_gdata->list[proto->chnl_id] = NULL; +/* st_gdata->list[proto->chnl_id] = NULL; */ + st_gdata->is_registered[proto->chnl_id] = false; } /* @@ -104,7 +106,7 @@ void st_send_frame(unsigned char chnl_id, struct st_data_s *st_gdata) if (unlikely (st_gdata == NULL || st_gdata->rx_skb == NULL - || st_gdata->list[chnl_id] == NULL)) { + || st_gdata->is_registered[chnl_id] == false)) { pr_err("chnl_id %d not registered, no data to send?", chnl_id); kfree_skb(st_gdata->rx_skb); @@ -141,14 +143,15 @@ void st_reg_complete(struct st_data_s *st_gdata, char err) unsigned char i = 0; pr_info(" %s ", __func__); for (i = 0; i < ST_MAX_CHANNELS; i++) { - if (likely(st_gdata != NULL && st_gdata->list[i] != NULL && - st_gdata->list[i]->reg_complete_cb != NULL)) { + if (likely(st_gdata != NULL && + st_gdata->is_registered[i] == true && + st_gdata->list[i]->reg_complete_cb != NULL)) { st_gdata->list[i]->reg_complete_cb (st_gdata->list[i]->priv_data, err); pr_info("protocol %d's cb sent %d\n", i, err); if (err) { /* cleanup registered protocol */ st_gdata->protos_registered--; - st_gdata->list[i] = NULL; + st_gdata->is_registered[i] = false; } } } @@ -475,9 +478,9 @@ void kim_st_list_protocols(struct st_data_s *st_gdata, void *buf) { seq_printf(buf, "[%d]\nBT=%c\nFM=%c\nGPS=%c\n", st_gdata->protos_registered, - st_gdata->list[0x04] != NULL ? 'R' : 'U', - st_gdata->list[0x08] != NULL ? 'R' : 'U', - st_gdata->list[0x09] != NULL ? 'R' : 'U'); + st_gdata->is_registered[0x04] == true ? 'R' : 'U', + st_gdata->is_registered[0x08] == true ? 'R' : 'U', + st_gdata->is_registered[0x09] == true ? 'R' : 'U'); } /********************************************************************/ @@ -504,7 +507,7 @@ long st_register(struct st_proto_s *new_proto) return -EPROTONOSUPPORT; } - if (st_gdata->list[new_proto->chnl_id] != NULL) { + if (st_gdata->is_registered[new_proto->chnl_id] == true) { pr_err("chnl_id %d already registered", new_proto->chnl_id); return -EALREADY; } @@ -563,7 +566,7 @@ long st_register(struct st_proto_s *new_proto) /* check for already registered once more, * since the above check is old */ - if (st_gdata->list[new_proto->chnl_id] != NULL) { + if (st_gdata->is_registered[new_proto->chnl_id] == true) { pr_err(" proto %d already registered ", new_proto->chnl_id); return -EALREADY; diff --git a/trunk/drivers/misc/ti-st/st_kim.c b/trunk/drivers/misc/ti-st/st_kim.c index b4488c8f6b23..5da93ee6f6be 100644 --- a/trunk/drivers/misc/ti-st/st_kim.c +++ b/trunk/drivers/misc/ti-st/st_kim.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include diff --git a/trunk/drivers/mmc/core/bus.c b/trunk/drivers/mmc/core/bus.c index 63667a8f140c..d6d62fd07ee9 100644 --- a/trunk/drivers/mmc/core/bus.c +++ b/trunk/drivers/mmc/core/bus.c @@ -284,6 +284,7 @@ int mmc_add_card(struct mmc_card *card) type = "SD-combo"; if (mmc_card_blockaddr(card)) type = "SDHC-combo"; + break; default: type = "?"; break; diff --git a/trunk/drivers/mmc/host/omap.c b/trunk/drivers/mmc/host/omap.c index 2e032f0e8cf4..a6c329040140 100644 --- a/trunk/drivers/mmc/host/omap.c +++ b/trunk/drivers/mmc/host/omap.c @@ -832,7 +832,7 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id) return IRQ_HANDLED; } - if (end_command) + if (end_command && host->cmd) mmc_omap_cmd_done(host, host->cmd); if (host->data != NULL) { if (transfer_error) diff --git a/trunk/drivers/mmc/host/sdhci-of-core.c b/trunk/drivers/mmc/host/sdhci-of-core.c index f9b611fc773e..60e4186a4345 100644 --- a/trunk/drivers/mmc/host/sdhci-of-core.c +++ b/trunk/drivers/mmc/host/sdhci-of-core.c @@ -124,8 +124,10 @@ static bool __devinit sdhci_of_wp_inverted(struct device_node *np) #endif } +static const struct of_device_id sdhci_of_match[]; static int __devinit sdhci_of_probe(struct platform_device *ofdev) { + const struct of_device_id *match; struct device_node *np = ofdev->dev.of_node; struct sdhci_of_data *sdhci_of_data; struct sdhci_host *host; @@ -134,9 +136,10 @@ static int __devinit sdhci_of_probe(struct platform_device *ofdev) int size; int ret; - if (!ofdev->dev.of_match) + match = of_match_device(sdhci_of_match, &ofdev->dev); + if (!match) return -EINVAL; - sdhci_of_data = ofdev->dev.of_match->data; + sdhci_of_data = match->data; if (!of_device_is_available(np)) return -ENODEV; diff --git a/trunk/drivers/mmc/host/sdhci-pci.c b/trunk/drivers/mmc/host/sdhci-pci.c index a136be706347..f8b5f37007b2 100644 --- a/trunk/drivers/mmc/host/sdhci-pci.c +++ b/trunk/drivers/mmc/host/sdhci-pci.c @@ -957,6 +957,7 @@ static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot( host->ioaddr = pci_ioremap_bar(pdev, bar); if (!host->ioaddr) { dev_err(&pdev->dev, "failed to remap registers\n"); + ret = -ENOMEM; goto release; } diff --git a/trunk/drivers/mmc/host/sdhci.c b/trunk/drivers/mmc/host/sdhci.c index 9e15f41f87be..5d20661bc357 100644 --- a/trunk/drivers/mmc/host/sdhci.c +++ b/trunk/drivers/mmc/host/sdhci.c @@ -1334,6 +1334,13 @@ static void sdhci_tasklet_finish(unsigned long param) host = (struct sdhci_host*)param; + /* + * If this tasklet gets rescheduled while running, it will + * be run again afterwards but without any active request. + */ + if (!host->mrq) + return; + spin_lock_irqsave(&host->lock, flags); del_timer(&host->timer); @@ -1345,7 +1352,7 @@ static void sdhci_tasklet_finish(unsigned long param) * upon error conditions. */ if (!(host->flags & SDHCI_DEVICE_DEAD) && - (mrq->cmd->error || + ((mrq->cmd && mrq->cmd->error) || (mrq->data && (mrq->data->error || (mrq->data->stop && mrq->data->stop->error))) || (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) { diff --git a/trunk/drivers/mmc/host/tmio_mmc_pio.c b/trunk/drivers/mmc/host/tmio_mmc_pio.c index 62d37de6de76..710339a85c84 100644 --- a/trunk/drivers/mmc/host/tmio_mmc_pio.c +++ b/trunk/drivers/mmc/host/tmio_mmc_pio.c @@ -728,15 +728,15 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) tmio_mmc_set_clock(host, ios->clock); /* Power sequence - OFF -> UP -> ON */ - if (ios->power_mode == MMC_POWER_OFF || !ios->clock) { + if (ios->power_mode == MMC_POWER_UP) { + /* power up SD bus */ + if (host->set_pwr) + host->set_pwr(host->pdev, 1); + } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) { /* power down SD bus */ if (ios->power_mode == MMC_POWER_OFF && host->set_pwr) host->set_pwr(host->pdev, 0); tmio_mmc_clk_stop(host); - } else if (ios->power_mode == MMC_POWER_UP) { - /* power up SD bus */ - if (host->set_pwr) - host->set_pwr(host->pdev, 1); } else { /* start bus clock */ tmio_mmc_clk_start(host); diff --git a/trunk/drivers/mtd/maps/Kconfig b/trunk/drivers/mtd/maps/Kconfig index 44b1f46458ca..5069111c81cc 100644 --- a/trunk/drivers/mtd/maps/Kconfig +++ b/trunk/drivers/mtd/maps/Kconfig @@ -260,6 +260,13 @@ config MTD_BCM963XX Support for parsing CFE image tag and creating MTD partitions on Broadcom BCM63xx boards. +config MTD_LANTIQ + tristate "Lantiq SoC NOR support" + depends on LANTIQ + select MTD_PARTITIONS + help + Support for NOR flash attached to the Lantiq SoC's External Bus Unit. + config MTD_DILNETPC tristate "CFI Flash device mapped on DIL/Net PC" depends on X86 && MTD_PARTITIONS && MTD_CFI_INTELEXT && BROKEN diff --git a/trunk/drivers/mtd/maps/Makefile b/trunk/drivers/mtd/maps/Makefile index 08533bd5cba7..6adf4c9b9057 100644 --- a/trunk/drivers/mtd/maps/Makefile +++ b/trunk/drivers/mtd/maps/Makefile @@ -60,3 +60,4 @@ obj-$(CONFIG_MTD_VMU) += vmu-flash.o obj-$(CONFIG_MTD_GPIO_ADDR) += gpio-addr-flash.o obj-$(CONFIG_MTD_BCM963XX) += bcm963xx-flash.o obj-$(CONFIG_MTD_LATCH_ADDR) += latch-addr-flash.o +obj-$(CONFIG_MTD_LANTIQ) += lantiq-flash.o diff --git a/trunk/drivers/mtd/maps/lantiq-flash.c b/trunk/drivers/mtd/maps/lantiq-flash.c new file mode 100644 index 000000000000..a90cabd7b84d --- /dev/null +++ b/trunk/drivers/mtd/maps/lantiq-flash.c @@ -0,0 +1,251 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Copyright (C) 2004 Liu Peng Infineon IFAP DC COM CPE + * Copyright (C) 2010 John Crispin + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +/* + * The NOR flash is connected to the same external bus unit (EBU) as PCI. + * To make PCI work we need to enable the endianness swapping for the address + * written to the EBU. This endianness swapping works for PCI correctly but + * fails for attached NOR devices. To workaround this we need to use a complex + * map. The workaround involves swapping all addresses whilst probing the chip. + * Once probing is complete we stop swapping the addresses but swizzle the + * unlock addresses to ensure that access to the NOR device works correctly. + */ + +enum { + LTQ_NOR_PROBING, + LTQ_NOR_NORMAL +}; + +struct ltq_mtd { + struct resource *res; + struct mtd_info *mtd; + struct map_info *map; +}; + +static char ltq_map_name[] = "ltq_nor"; + +static map_word +ltq_read16(struct map_info *map, unsigned long adr) +{ + unsigned long flags; + map_word temp; + + if (map->map_priv_1 == LTQ_NOR_PROBING) + adr ^= 2; + spin_lock_irqsave(&ebu_lock, flags); + temp.x[0] = *(u16 *)(map->virt + adr); + spin_unlock_irqrestore(&ebu_lock, flags); + return temp; +} + +static void +ltq_write16(struct map_info *map, map_word d, unsigned long adr) +{ + unsigned long flags; + + if (map->map_priv_1 == LTQ_NOR_PROBING) + adr ^= 2; + spin_lock_irqsave(&ebu_lock, flags); + *(u16 *)(map->virt + adr) = d.x[0]; + spin_unlock_irqrestore(&ebu_lock, flags); +} + +/* + * The following 2 functions copy data between iomem and a cached memory + * section. As memcpy() makes use of pre-fetching we cannot use it here. + * The normal alternative of using memcpy_{to,from}io also makes use of + * memcpy() on MIPS so it is not applicable either. We are therefore stuck + * with having to use our own loop. + */ +static void +ltq_copy_from(struct map_info *map, void *to, + unsigned long from, ssize_t len) +{ + unsigned char *f = (unsigned char *)map->virt + from; + unsigned char *t = (unsigned char *)to; + unsigned long flags; + + spin_lock_irqsave(&ebu_lock, flags); + while (len--) + *t++ = *f++; + spin_unlock_irqrestore(&ebu_lock, flags); +} + +static void +ltq_copy_to(struct map_info *map, unsigned long to, + const void *from, ssize_t len) +{ + unsigned char *f = (unsigned char *)from; + unsigned char *t = (unsigned char *)map->virt + to; + unsigned long flags; + + spin_lock_irqsave(&ebu_lock, flags); + while (len--) + *t++ = *f++; + spin_unlock_irqrestore(&ebu_lock, flags); +} + +static const char const *part_probe_types[] = { "cmdlinepart", NULL }; + +static int __init +ltq_mtd_probe(struct platform_device *pdev) +{ + struct physmap_flash_data *ltq_mtd_data = dev_get_platdata(&pdev->dev); + struct ltq_mtd *ltq_mtd; + struct mtd_partition *parts; + struct resource *res; + int nr_parts = 0; + struct cfi_private *cfi; + int err; + + ltq_mtd = kzalloc(sizeof(struct ltq_mtd), GFP_KERNEL); + platform_set_drvdata(pdev, ltq_mtd); + + ltq_mtd->res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!ltq_mtd->res) { + dev_err(&pdev->dev, "failed to get memory resource"); + err = -ENOENT; + goto err_out; + } + + res = devm_request_mem_region(&pdev->dev, ltq_mtd->res->start, + resource_size(ltq_mtd->res), dev_name(&pdev->dev)); + if (!ltq_mtd->res) { + dev_err(&pdev->dev, "failed to request mem resource"); + err = -EBUSY; + goto err_out; + } + + ltq_mtd->map = kzalloc(sizeof(struct map_info), GFP_KERNEL); + ltq_mtd->map->phys = res->start; + ltq_mtd->map->size = resource_size(res); + ltq_mtd->map->virt = devm_ioremap_nocache(&pdev->dev, + ltq_mtd->map->phys, ltq_mtd->map->size); + if (!ltq_mtd->map->virt) { + dev_err(&pdev->dev, "failed to ioremap!\n"); + err = -ENOMEM; + goto err_free; + } + + ltq_mtd->map->name = ltq_map_name; + ltq_mtd->map->bankwidth = 2; + ltq_mtd->map->read = ltq_read16; + ltq_mtd->map->write = ltq_write16; + ltq_mtd->map->copy_from = ltq_copy_from; + ltq_mtd->map->copy_to = ltq_copy_to; + + ltq_mtd->map->map_priv_1 = LTQ_NOR_PROBING; + ltq_mtd->mtd = do_map_probe("cfi_probe", ltq_mtd->map); + ltq_mtd->map->map_priv_1 = LTQ_NOR_NORMAL; + + if (!ltq_mtd->mtd) { + dev_err(&pdev->dev, "probing failed\n"); + err = -ENXIO; + goto err_unmap; + } + + ltq_mtd->mtd->owner = THIS_MODULE; + + cfi = ltq_mtd->map->fldrv_priv; + cfi->addr_unlock1 ^= 1; + cfi->addr_unlock2 ^= 1; + + nr_parts = parse_mtd_partitions(ltq_mtd->mtd, + part_probe_types, &parts, 0); + if (nr_parts > 0) { + dev_info(&pdev->dev, + "using %d partitions from cmdline", nr_parts); + } else { + nr_parts = ltq_mtd_data->nr_parts; + parts = ltq_mtd_data->parts; + } + + err = add_mtd_partitions(ltq_mtd->mtd, parts, nr_parts); + if (err) { + dev_err(&pdev->dev, "failed to add partitions\n"); + goto err_destroy; + } + + return 0; + +err_destroy: + map_destroy(ltq_mtd->mtd); +err_unmap: + iounmap(ltq_mtd->map->virt); +err_free: + kfree(ltq_mtd->map); +err_out: + kfree(ltq_mtd); + return err; +} + +static int __devexit +ltq_mtd_remove(struct platform_device *pdev) +{ + struct ltq_mtd *ltq_mtd = platform_get_drvdata(pdev); + + if (ltq_mtd) { + if (ltq_mtd->mtd) { + del_mtd_partitions(ltq_mtd->mtd); + map_destroy(ltq_mtd->mtd); + } + if (ltq_mtd->map->virt) + iounmap(ltq_mtd->map->virt); + kfree(ltq_mtd->map); + kfree(ltq_mtd); + } + return 0; +} + +static struct platform_driver ltq_mtd_driver = { + .remove = __devexit_p(ltq_mtd_remove), + .driver = { + .name = "ltq_nor", + .owner = THIS_MODULE, + }, +}; + +static int __init +init_ltq_mtd(void) +{ + int ret = platform_driver_probe(<q_mtd_driver, ltq_mtd_probe); + + if (ret) + pr_err("ltq_nor: error registering platform driver"); + return ret; +} + +static void __exit +exit_ltq_mtd(void) +{ + platform_driver_unregister(<q_mtd_driver); +} + +module_init(init_ltq_mtd); +module_exit(exit_ltq_mtd); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("John Crispin "); +MODULE_DESCRIPTION("Lantiq SoC NOR"); diff --git a/trunk/drivers/mtd/maps/physmap_of.c b/trunk/drivers/mtd/maps/physmap_of.c index bd483f0c57e1..c1d33464aee8 100644 --- a/trunk/drivers/mtd/maps/physmap_of.c +++ b/trunk/drivers/mtd/maps/physmap_of.c @@ -214,11 +214,13 @@ static void __devinit of_free_probes(const char **probes) } #endif +static struct of_device_id of_flash_match[]; static int __devinit of_flash_probe(struct platform_device *dev) { #ifdef CONFIG_MTD_PARTITIONS const char **part_probe_types; #endif + const struct of_device_id *match; struct device_node *dp = dev->dev.of_node; struct resource res; struct of_flash *info; @@ -232,9 +234,10 @@ static int __devinit of_flash_probe(struct platform_device *dev) struct mtd_info **mtd_list = NULL; resource_size_t res_size; - if (!dev->dev.of_match) + match = of_match_device(of_flash_match, &dev->dev); + if (!match) return -EINVAL; - probe_type = dev->dev.of_match->data; + probe_type = match->data; reg_tuple_size = (of_n_addr_cells(dp) + of_n_size_cells(dp)) * sizeof(u32); diff --git a/trunk/drivers/mtd/nand/au1550nd.c b/trunk/drivers/mtd/nand/au1550nd.c index 3ffe05db4923..5d513b54a7d7 100644 --- a/trunk/drivers/mtd/nand/au1550nd.c +++ b/trunk/drivers/mtd/nand/au1550nd.c @@ -10,6 +10,7 @@ */ #include +#include #include #include #include @@ -470,7 +471,7 @@ static int __init au1xxx_nand_init(void) #ifdef CONFIG_MIPS_PB1550 /* set gpio206 high */ - au_writel(au_readl(GPIO2_DIR) & ~(1 << 6), GPIO2_DIR); + gpio_direction_input(206); boot_swapboot = (au_readl(MEM_STSTAT) & (0x7 << 1)) | ((bcsr_read(BCSR_STATUS) >> 6) & 0x1); diff --git a/trunk/drivers/mtd/nand/diskonchip.c b/trunk/drivers/mtd/nand/diskonchip.c index 96c0b34ba8db..657b9f4b6f9b 100644 --- a/trunk/drivers/mtd/nand/diskonchip.c +++ b/trunk/drivers/mtd/nand/diskonchip.c @@ -400,7 +400,7 @@ static uint16_t __init doc200x_ident_chip(struct mtd_info *mtd, int nr) doc200x_hwcontrol(mtd, 0, NAND_CTRL_ALE | NAND_CTRL_CHANGE); doc200x_hwcontrol(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE); - /* We can't' use dev_ready here, but at least we wait for the + /* We can't use dev_ready here, but at least we wait for the * command to complete */ udelay(50); diff --git a/trunk/drivers/net/Kconfig b/trunk/drivers/net/Kconfig index 6c884ef1b069..19f04a34783a 100644 --- a/trunk/drivers/net/Kconfig +++ b/trunk/drivers/net/Kconfig @@ -2017,6 +2017,13 @@ config FTMAC100 from Faraday. It is used on Faraday A320, Andes AG101 and some other ARM/NDS32 SoC's. +config LANTIQ_ETOP + tristate "Lantiq SoC ETOP driver" + depends on SOC_TYPE_XWAY + help + Support for the MII0 inside the Lantiq SoC + + source "drivers/net/fs_enet/Kconfig" source "drivers/net/octeon/Kconfig" diff --git a/trunk/drivers/net/Makefile b/trunk/drivers/net/Makefile index e5a7375685ad..209fbb70619b 100644 --- a/trunk/drivers/net/Makefile +++ b/trunk/drivers/net/Makefile @@ -259,6 +259,7 @@ obj-$(CONFIG_MLX4_CORE) += mlx4/ obj-$(CONFIG_ENC28J60) += enc28j60.o obj-$(CONFIG_ETHOC) += ethoc.o obj-$(CONFIG_GRETH) += greth.o +obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o obj-$(CONFIG_XTENSA_XT2000_SONIC) += xtsonic.o diff --git a/trunk/drivers/net/acenic.c b/trunk/drivers/net/acenic.c index 82260ca70323..d7c1bfe4b6ec 100644 --- a/trunk/drivers/net/acenic.c +++ b/trunk/drivers/net/acenic.c @@ -68,6 +68,7 @@ #include #include #include +#include #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) #include diff --git a/trunk/drivers/net/arm/etherh.c b/trunk/drivers/net/arm/etherh.c index e252cd595016..03e217a868d4 100644 --- a/trunk/drivers/net/arm/etherh.c +++ b/trunk/drivers/net/arm/etherh.c @@ -527,7 +527,7 @@ static void __init etherh_banner(void) * Read the ethernet address string from the on board rom. * This is an ascii string... */ -static int __init etherh_addr(char *addr, struct expansion_card *ec) +static int __devinit etherh_addr(char *addr, struct expansion_card *ec) { struct in_chunk_dir cd; char *s; @@ -656,7 +656,7 @@ static const struct net_device_ops etherh_netdev_ops = { static u32 etherh_regoffsets[16]; static u32 etherm_regoffsets[16]; -static int __init +static int __devinit etherh_probe(struct expansion_card *ec, const struct ecard_id *id) { const struct etherh_data *data = id->data; diff --git a/trunk/drivers/net/atarilance.c b/trunk/drivers/net/atarilance.c index ce0091eb06f5..1264d781b554 100644 --- a/trunk/drivers/net/atarilance.c +++ b/trunk/drivers/net/atarilance.c @@ -554,7 +554,7 @@ static unsigned long __init lance_probe1( struct net_device *dev, memaddr == (unsigned short *)0xffe00000) { /* PAMs card and Riebl on ST use level 5 autovector */ if (request_irq(IRQ_AUTO_5, lance_interrupt, IRQ_TYPE_PRIO, - "PAM/Riebl-ST Ethernet", dev)) { + "PAM,Riebl-ST Ethernet", dev)) { printk( "Lance: request for irq %d failed\n", IRQ_AUTO_5 ); return 0; } diff --git a/trunk/drivers/net/can/mscan/mpc5xxx_can.c b/trunk/drivers/net/can/mscan/mpc5xxx_can.c index bd1d811c204f..5fedc3375562 100644 --- a/trunk/drivers/net/can/mscan/mpc5xxx_can.c +++ b/trunk/drivers/net/can/mscan/mpc5xxx_can.c @@ -247,8 +247,10 @@ static u32 __devinit mpc512x_can_get_clock(struct platform_device *ofdev, } #endif /* CONFIG_PPC_MPC512x */ +static struct of_device_id mpc5xxx_can_table[]; static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev) { + const struct of_device_id *match; struct mpc5xxx_can_data *data; struct device_node *np = ofdev->dev.of_node; struct net_device *dev; @@ -258,9 +260,10 @@ static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev) int irq, mscan_clksrc = 0; int err = -ENOMEM; - if (!ofdev->dev.of_match) + match = of_match_device(mpc5xxx_can_table, &ofdev->dev); + if (!match) return -EINVAL; - data = (struct mpc5xxx_can_data *)ofdev->dev.of_match->data; + data = match->data; base = of_iomap(np, 0); if (!base) { diff --git a/trunk/drivers/net/ehea/ehea_main.c b/trunk/drivers/net/ehea/ehea_main.c index ba763e0481e3..6a0a8fca62bc 100644 --- a/trunk/drivers/net/ehea/ehea_main.c +++ b/trunk/drivers/net/ehea/ehea_main.c @@ -41,6 +41,7 @@ #include #include #include +#include #include diff --git a/trunk/drivers/net/fs_enet/fs_enet-main.c b/trunk/drivers/net/fs_enet/fs_enet-main.c index a9388944f1d3..21abb5c01a56 100644 --- a/trunk/drivers/net/fs_enet/fs_enet-main.c +++ b/trunk/drivers/net/fs_enet/fs_enet-main.c @@ -996,8 +996,10 @@ static const struct net_device_ops fs_enet_netdev_ops = { #endif }; +static struct of_device_id fs_enet_match[]; static int __devinit fs_enet_probe(struct platform_device *ofdev) { + const struct of_device_id *match; struct net_device *ndev; struct fs_enet_private *fep; struct fs_platform_info *fpi; @@ -1005,14 +1007,15 @@ static int __devinit fs_enet_probe(struct platform_device *ofdev) const u8 *mac_addr; int privsize, len, ret = -ENODEV; - if (!ofdev->dev.of_match) + match = of_match_device(fs_enet_match, &ofdev->dev); + if (!match) return -EINVAL; fpi = kzalloc(sizeof(*fpi), GFP_KERNEL); if (!fpi) return -ENOMEM; - if (!IS_FEC(ofdev->dev.of_match)) { + if (!IS_FEC(match)) { data = of_get_property(ofdev->dev.of_node, "fsl,cpm-command", &len); if (!data || len != 4) goto out_free_fpi; @@ -1047,7 +1050,7 @@ static int __devinit fs_enet_probe(struct platform_device *ofdev) fep->dev = &ofdev->dev; fep->ndev = ndev; fep->fpi = fpi; - fep->ops = ofdev->dev.of_match->data; + fep->ops = match->data; ret = fep->ops->setup_data(ndev); if (ret) diff --git a/trunk/drivers/net/fs_enet/mii-fec.c b/trunk/drivers/net/fs_enet/mii-fec.c index 7e840d373ab3..6a2e150e75bb 100644 --- a/trunk/drivers/net/fs_enet/mii-fec.c +++ b/trunk/drivers/net/fs_enet/mii-fec.c @@ -101,17 +101,20 @@ static int fs_enet_fec_mii_reset(struct mii_bus *bus) return 0; } +static struct of_device_id fs_enet_mdio_fec_match[]; static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev) { + const struct of_device_id *match; struct resource res; struct mii_bus *new_bus; struct fec_info *fec; int (*get_bus_freq)(struct device_node *); int ret = -ENOMEM, clock, speed; - if (!ofdev->dev.of_match) + match = of_match_device(fs_enet_mdio_fec_match, &ofdev->dev); + if (!match) return -EINVAL; - get_bus_freq = ofdev->dev.of_match->data; + get_bus_freq = match->data; new_bus = mdiobus_alloc(); if (!new_bus) diff --git a/trunk/drivers/net/ixgbe/ixgbe_main.c b/trunk/drivers/net/ixgbe/ixgbe_main.c index 2dce3d038188..fa01b0b03b77 100644 --- a/trunk/drivers/net/ixgbe/ixgbe_main.c +++ b/trunk/drivers/net/ixgbe/ixgbe_main.c @@ -5136,11 +5136,6 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) return err; } -static void ring_free_rcu(struct rcu_head *head) -{ - kfree(container_of(head, struct ixgbe_ring, rcu)); -} - /** * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings * @adapter: board private structure to clear interrupt scheme on @@ -5162,7 +5157,7 @@ void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter) /* ixgbe_get_stats64() might access this ring, we must wait * a grace period before freeing it. */ - call_rcu(&ring->rcu, ring_free_rcu); + kfree_rcu(ring, rcu); adapter->rx_ring[i] = NULL; } diff --git a/trunk/drivers/net/lantiq_etop.c b/trunk/drivers/net/lantiq_etop.c new file mode 100644 index 000000000000..45f252b7da30 --- /dev/null +++ b/trunk/drivers/net/lantiq_etop.c @@ -0,0 +1,805 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) 2011 John Crispin + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +#define LTQ_ETOP_MDIO 0x11804 +#define MDIO_REQUEST 0x80000000 +#define MDIO_READ 0x40000000 +#define MDIO_ADDR_MASK 0x1f +#define MDIO_ADDR_OFFSET 0x15 +#define MDIO_REG_MASK 0x1f +#define MDIO_REG_OFFSET 0x10 +#define MDIO_VAL_MASK 0xffff + +#define PPE32_CGEN 0x800 +#define LQ_PPE32_ENET_MAC_CFG 0x1840 + +#define LTQ_ETOP_ENETS0 0x11850 +#define LTQ_ETOP_MAC_DA0 0x1186C +#define LTQ_ETOP_MAC_DA1 0x11870 +#define LTQ_ETOP_CFG 0x16020 +#define LTQ_ETOP_IGPLEN 0x16080 + +#define MAX_DMA_CHAN 0x8 +#define MAX_DMA_CRC_LEN 0x4 +#define MAX_DMA_DATA_LEN 0x600 + +#define ETOP_FTCU BIT(28) +#define ETOP_MII_MASK 0xf +#define ETOP_MII_NORMAL 0xd +#define ETOP_MII_REVERSE 0xe +#define ETOP_PLEN_UNDER 0x40 +#define ETOP_CGEN 0x800 + +/* use 2 static channels for TX/RX */ +#define LTQ_ETOP_TX_CHANNEL 1 +#define LTQ_ETOP_RX_CHANNEL 6 +#define IS_TX(x) (x == LTQ_ETOP_TX_CHANNEL) +#define IS_RX(x) (x == LTQ_ETOP_RX_CHANNEL) + +#define ltq_etop_r32(x) ltq_r32(ltq_etop_membase + (x)) +#define ltq_etop_w32(x, y) ltq_w32(x, ltq_etop_membase + (y)) +#define ltq_etop_w32_mask(x, y, z) \ + ltq_w32_mask(x, y, ltq_etop_membase + (z)) + +#define DRV_VERSION "1.0" + +static void __iomem *ltq_etop_membase; + +struct ltq_etop_chan { + int idx; + int tx_free; + struct net_device *netdev; + struct napi_struct napi; + struct ltq_dma_channel dma; + struct sk_buff *skb[LTQ_DESC_NUM]; +}; + +struct ltq_etop_priv { + struct net_device *netdev; + struct ltq_eth_data *pldata; + struct resource *res; + + struct mii_bus *mii_bus; + struct phy_device *phydev; + + struct ltq_etop_chan ch[MAX_DMA_CHAN]; + int tx_free[MAX_DMA_CHAN >> 1]; + + spinlock_t lock; +}; + +static int +ltq_etop_alloc_skb(struct ltq_etop_chan *ch) +{ + ch->skb[ch->dma.desc] = dev_alloc_skb(MAX_DMA_DATA_LEN); + if (!ch->skb[ch->dma.desc]) + return -ENOMEM; + ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(NULL, + ch->skb[ch->dma.desc]->data, MAX_DMA_DATA_LEN, + DMA_FROM_DEVICE); + ch->dma.desc_base[ch->dma.desc].addr = + CPHYSADDR(ch->skb[ch->dma.desc]->data); + ch->dma.desc_base[ch->dma.desc].ctl = + LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | + MAX_DMA_DATA_LEN; + skb_reserve(ch->skb[ch->dma.desc], NET_IP_ALIGN); + return 0; +} + +static void +ltq_etop_hw_receive(struct ltq_etop_chan *ch) +{ + struct ltq_etop_priv *priv = netdev_priv(ch->netdev); + struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; + struct sk_buff *skb = ch->skb[ch->dma.desc]; + int len = (desc->ctl & LTQ_DMA_SIZE_MASK) - MAX_DMA_CRC_LEN; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + if (ltq_etop_alloc_skb(ch)) { + netdev_err(ch->netdev, + "failed to allocate new rx buffer, stopping DMA\n"); + ltq_dma_close(&ch->dma); + } + ch->dma.desc++; + ch->dma.desc %= LTQ_DESC_NUM; + spin_unlock_irqrestore(&priv->lock, flags); + + skb_put(skb, len); + skb->dev = ch->netdev; + skb->protocol = eth_type_trans(skb, ch->netdev); + netif_receive_skb(skb); +} + +static int +ltq_etop_poll_rx(struct napi_struct *napi, int budget) +{ + struct ltq_etop_chan *ch = container_of(napi, + struct ltq_etop_chan, napi); + int rx = 0; + int complete = 0; + + while ((rx < budget) && !complete) { + struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; + + if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) { + ltq_etop_hw_receive(ch); + rx++; + } else { + complete = 1; + } + } + if (complete || !rx) { + napi_complete(&ch->napi); + ltq_dma_ack_irq(&ch->dma); + } + return rx; +} + +static int +ltq_etop_poll_tx(struct napi_struct *napi, int budget) +{ + struct ltq_etop_chan *ch = + container_of(napi, struct ltq_etop_chan, napi); + struct ltq_etop_priv *priv = netdev_priv(ch->netdev); + struct netdev_queue *txq = + netdev_get_tx_queue(ch->netdev, ch->idx >> 1); + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + while ((ch->dma.desc_base[ch->tx_free].ctl & + (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) { + dev_kfree_skb_any(ch->skb[ch->tx_free]); + ch->skb[ch->tx_free] = NULL; + memset(&ch->dma.desc_base[ch->tx_free], 0, + sizeof(struct ltq_dma_desc)); + ch->tx_free++; + ch->tx_free %= LTQ_DESC_NUM; + } + spin_unlock_irqrestore(&priv->lock, flags); + + if (netif_tx_queue_stopped(txq)) + netif_tx_start_queue(txq); + napi_complete(&ch->napi); + ltq_dma_ack_irq(&ch->dma); + return 1; +} + +static irqreturn_t +ltq_etop_dma_irq(int irq, void *_priv) +{ + struct ltq_etop_priv *priv = _priv; + int ch = irq - LTQ_DMA_CH0_INT; + + napi_schedule(&priv->ch[ch].napi); + return IRQ_HANDLED; +} + +static void +ltq_etop_free_channel(struct net_device *dev, struct ltq_etop_chan *ch) +{ + struct ltq_etop_priv *priv = netdev_priv(dev); + + ltq_dma_free(&ch->dma); + if (ch->dma.irq) + free_irq(ch->dma.irq, priv); + if (IS_RX(ch->idx)) { + int desc; + for (desc = 0; desc < LTQ_DESC_NUM; desc++) + dev_kfree_skb_any(ch->skb[ch->dma.desc]); + } +} + +static void +ltq_etop_hw_exit(struct net_device *dev) +{ + struct ltq_etop_priv *priv = netdev_priv(dev); + int i; + + ltq_pmu_disable(PMU_PPE); + for (i = 0; i < MAX_DMA_CHAN; i++) + if (IS_TX(i) || IS_RX(i)) + ltq_etop_free_channel(dev, &priv->ch[i]); +} + +static int +ltq_etop_hw_init(struct net_device *dev) +{ + struct ltq_etop_priv *priv = netdev_priv(dev); + int i; + + ltq_pmu_enable(PMU_PPE); + + switch (priv->pldata->mii_mode) { + case PHY_INTERFACE_MODE_RMII: + ltq_etop_w32_mask(ETOP_MII_MASK, + ETOP_MII_REVERSE, LTQ_ETOP_CFG); + break; + + case PHY_INTERFACE_MODE_MII: + ltq_etop_w32_mask(ETOP_MII_MASK, + ETOP_MII_NORMAL, LTQ_ETOP_CFG); + break; + + default: + netdev_err(dev, "unknown mii mode %d\n", + priv->pldata->mii_mode); + return -ENOTSUPP; + } + + /* enable crc generation */ + ltq_etop_w32(PPE32_CGEN, LQ_PPE32_ENET_MAC_CFG); + + ltq_dma_init_port(DMA_PORT_ETOP); + + for (i = 0; i < MAX_DMA_CHAN; i++) { + int irq = LTQ_DMA_CH0_INT + i; + struct ltq_etop_chan *ch = &priv->ch[i]; + + ch->idx = ch->dma.nr = i; + + if (IS_TX(i)) { + ltq_dma_alloc_tx(&ch->dma); + request_irq(irq, ltq_etop_dma_irq, IRQF_DISABLED, + "etop_tx", priv); + } else if (IS_RX(i)) { + ltq_dma_alloc_rx(&ch->dma); + for (ch->dma.desc = 0; ch->dma.desc < LTQ_DESC_NUM; + ch->dma.desc++) + if (ltq_etop_alloc_skb(ch)) + return -ENOMEM; + ch->dma.desc = 0; + request_irq(irq, ltq_etop_dma_irq, IRQF_DISABLED, + "etop_rx", priv); + } + ch->dma.irq = irq; + } + return 0; +} + +static void +ltq_etop_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + strcpy(info->driver, "Lantiq ETOP"); + strcpy(info->bus_info, "internal"); + strcpy(info->version, DRV_VERSION); +} + +static int +ltq_etop_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct ltq_etop_priv *priv = netdev_priv(dev); + + return phy_ethtool_gset(priv->phydev, cmd); +} + +static int +ltq_etop_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct ltq_etop_priv *priv = netdev_priv(dev); + + return phy_ethtool_sset(priv->phydev, cmd); +} + +static int +ltq_etop_nway_reset(struct net_device *dev) +{ + struct ltq_etop_priv *priv = netdev_priv(dev); + + return phy_start_aneg(priv->phydev); +} + +static const struct ethtool_ops ltq_etop_ethtool_ops = { + .get_drvinfo = ltq_etop_get_drvinfo, + .get_settings = ltq_etop_get_settings, + .set_settings = ltq_etop_set_settings, + .nway_reset = ltq_etop_nway_reset, +}; + +static int +ltq_etop_mdio_wr(struct mii_bus *bus, int phy_addr, int phy_reg, u16 phy_data) +{ + u32 val = MDIO_REQUEST | + ((phy_addr & MDIO_ADDR_MASK) << MDIO_ADDR_OFFSET) | + ((phy_reg & MDIO_REG_MASK) << MDIO_REG_OFFSET) | + phy_data; + + while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST) + ; + ltq_etop_w32(val, LTQ_ETOP_MDIO); + return 0; +} + +static int +ltq_etop_mdio_rd(struct mii_bus *bus, int phy_addr, int phy_reg) +{ + u32 val = MDIO_REQUEST | MDIO_READ | + ((phy_addr & MDIO_ADDR_MASK) << MDIO_ADDR_OFFSET) | + ((phy_reg & MDIO_REG_MASK) << MDIO_REG_OFFSET); + + while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST) + ; + ltq_etop_w32(val, LTQ_ETOP_MDIO); + while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST) + ; + val = ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_VAL_MASK; + return val; +} + +static void +ltq_etop_mdio_link(struct net_device *dev) +{ + /* nothing to do */ +} + +static int +ltq_etop_mdio_probe(struct net_device *dev) +{ + struct ltq_etop_priv *priv = netdev_priv(dev); + struct phy_device *phydev = NULL; + int phy_addr; + + for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { + if (priv->mii_bus->phy_map[phy_addr]) { + phydev = priv->mii_bus->phy_map[phy_addr]; + break; + } + } + + if (!phydev) { + netdev_err(dev, "no PHY found\n"); + return -ENODEV; + } + + phydev = phy_connect(dev, dev_name(&phydev->dev), <q_etop_mdio_link, + 0, priv->pldata->mii_mode); + + if (IS_ERR(phydev)) { + netdev_err(dev, "Could not attach to PHY\n"); + return PTR_ERR(phydev); + } + + phydev->supported &= (SUPPORTED_10baseT_Half + | SUPPORTED_10baseT_Full + | SUPPORTED_100baseT_Half + | SUPPORTED_100baseT_Full + | SUPPORTED_Autoneg + | SUPPORTED_MII + | SUPPORTED_TP); + + phydev->advertising = phydev->supported; + priv->phydev = phydev; + pr_info("%s: attached PHY [%s] (phy_addr=%s, irq=%d)\n", + dev->name, phydev->drv->name, + dev_name(&phydev->dev), phydev->irq); + + return 0; +} + +static int +ltq_etop_mdio_init(struct net_device *dev) +{ + struct ltq_etop_priv *priv = netdev_priv(dev); + int i; + int err; + + priv->mii_bus = mdiobus_alloc(); + if (!priv->mii_bus) { + netdev_err(dev, "failed to allocate mii bus\n"); + err = -ENOMEM; + goto err_out; + } + + priv->mii_bus->priv = dev; + priv->mii_bus->read = ltq_etop_mdio_rd; + priv->mii_bus->write = ltq_etop_mdio_wr; + priv->mii_bus->name = "ltq_mii"; + snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%x", 0); + priv->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); + if (!priv->mii_bus->irq) { + err = -ENOMEM; + goto err_out_free_mdiobus; + } + + for (i = 0; i < PHY_MAX_ADDR; ++i) + priv->mii_bus->irq[i] = PHY_POLL; + + if (mdiobus_register(priv->mii_bus)) { + err = -ENXIO; + goto err_out_free_mdio_irq; + } + + if (ltq_etop_mdio_probe(dev)) { + err = -ENXIO; + goto err_out_unregister_bus; + } + return 0; + +err_out_unregister_bus: + mdiobus_unregister(priv->mii_bus); +err_out_free_mdio_irq: + kfree(priv->mii_bus->irq); +err_out_free_mdiobus: + mdiobus_free(priv->mii_bus); +err_out: + return err; +} + +static void +ltq_etop_mdio_cleanup(struct net_device *dev) +{ + struct ltq_etop_priv *priv = netdev_priv(dev); + + phy_disconnect(priv->phydev); + mdiobus_unregister(priv->mii_bus); + kfree(priv->mii_bus->irq); + mdiobus_free(priv->mii_bus); +} + +static int +ltq_etop_open(struct net_device *dev) +{ + struct ltq_etop_priv *priv = netdev_priv(dev); + int i; + + for (i = 0; i < MAX_DMA_CHAN; i++) { + struct ltq_etop_chan *ch = &priv->ch[i]; + + if (!IS_TX(i) && (!IS_RX(i))) + continue; + ltq_dma_open(&ch->dma); + napi_enable(&ch->napi); + } + phy_start(priv->phydev); + netif_tx_start_all_queues(dev); + return 0; +} + +static int +ltq_etop_stop(struct net_device *dev) +{ + struct ltq_etop_priv *priv = netdev_priv(dev); + int i; + + netif_tx_stop_all_queues(dev); + phy_stop(priv->phydev); + for (i = 0; i < MAX_DMA_CHAN; i++) { + struct ltq_etop_chan *ch = &priv->ch[i]; + + if (!IS_RX(i) && !IS_TX(i)) + continue; + napi_disable(&ch->napi); + ltq_dma_close(&ch->dma); + } + return 0; +} + +static int +ltq_etop_tx(struct sk_buff *skb, struct net_device *dev) +{ + int queue = skb_get_queue_mapping(skb); + struct netdev_queue *txq = netdev_get_tx_queue(dev, queue); + struct ltq_etop_priv *priv = netdev_priv(dev); + struct ltq_etop_chan *ch = &priv->ch[(queue << 1) | 1]; + struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; + int len; + unsigned long flags; + u32 byte_offset; + + len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len; + + if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) { + dev_kfree_skb_any(skb); + netdev_err(dev, "tx ring full\n"); + netif_tx_stop_queue(txq); + return NETDEV_TX_BUSY; + } + + /* dma needs to start on a 16 byte aligned address */ + byte_offset = CPHYSADDR(skb->data) % 16; + ch->skb[ch->dma.desc] = skb; + + dev->trans_start = jiffies; + + spin_lock_irqsave(&priv->lock, flags); + desc->addr = ((unsigned int) dma_map_single(NULL, skb->data, len, + DMA_TO_DEVICE)) - byte_offset; + wmb(); + desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP | + LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK); + ch->dma.desc++; + ch->dma.desc %= LTQ_DESC_NUM; + spin_unlock_irqrestore(&priv->lock, flags); + + if (ch->dma.desc_base[ch->dma.desc].ctl & LTQ_DMA_OWN) + netif_tx_stop_queue(txq); + + return NETDEV_TX_OK; +} + +static int +ltq_etop_change_mtu(struct net_device *dev, int new_mtu) +{ + int ret = eth_change_mtu(dev, new_mtu); + + if (!ret) { + struct ltq_etop_priv *priv = netdev_priv(dev); + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + ltq_etop_w32((ETOP_PLEN_UNDER << 16) | new_mtu, + LTQ_ETOP_IGPLEN); + spin_unlock_irqrestore(&priv->lock, flags); + } + return ret; +} + +static int +ltq_etop_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct ltq_etop_priv *priv = netdev_priv(dev); + + /* TODO: mii-toll reports "No MII transceiver present!." ?!*/ + return phy_mii_ioctl(priv->phydev, rq, cmd); +} + +static int +ltq_etop_set_mac_address(struct net_device *dev, void *p) +{ + int ret = eth_mac_addr(dev, p); + + if (!ret) { + struct ltq_etop_priv *priv = netdev_priv(dev); + unsigned long flags; + + /* store the mac for the unicast filter */ + spin_lock_irqsave(&priv->lock, flags); + ltq_etop_w32(*((u32 *)dev->dev_addr), LTQ_ETOP_MAC_DA0); + ltq_etop_w32(*((u16 *)&dev->dev_addr[4]) << 16, + LTQ_ETOP_MAC_DA1); + spin_unlock_irqrestore(&priv->lock, flags); + } + return ret; +} + +static void +ltq_etop_set_multicast_list(struct net_device *dev) +{ + struct ltq_etop_priv *priv = netdev_priv(dev); + unsigned long flags; + + /* ensure that the unicast filter is not enabled in promiscious mode */ + spin_lock_irqsave(&priv->lock, flags); + if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI)) + ltq_etop_w32_mask(ETOP_FTCU, 0, LTQ_ETOP_ENETS0); + else + ltq_etop_w32_mask(0, ETOP_FTCU, LTQ_ETOP_ENETS0); + spin_unlock_irqrestore(&priv->lock, flags); +} + +static u16 +ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb) +{ + /* we are currently only using the first queue */ + return 0; +} + +static int +ltq_etop_init(struct net_device *dev) +{ + struct ltq_etop_priv *priv = netdev_priv(dev); + struct sockaddr mac; + int err; + + ether_setup(dev); + dev->watchdog_timeo = 10 * HZ; + err = ltq_etop_hw_init(dev); + if (err) + goto err_hw; + ltq_etop_change_mtu(dev, 1500); + + memcpy(&mac, &priv->pldata->mac, sizeof(struct sockaddr)); + if (!is_valid_ether_addr(mac.sa_data)) { + pr_warn("etop: invalid MAC, using random\n"); + random_ether_addr(mac.sa_data); + } + + err = ltq_etop_set_mac_address(dev, &mac); + if (err) + goto err_netdev; + ltq_etop_set_multicast_list(dev); + err = ltq_etop_mdio_init(dev); + if (err) + goto err_netdev; + return 0; + +err_netdev: + unregister_netdev(dev); + free_netdev(dev); +err_hw: + ltq_etop_hw_exit(dev); + return err; +} + +static void +ltq_etop_tx_timeout(struct net_device *dev) +{ + int err; + + ltq_etop_hw_exit(dev); + err = ltq_etop_hw_init(dev); + if (err) + goto err_hw; + dev->trans_start = jiffies; + netif_wake_queue(dev); + return; + +err_hw: + ltq_etop_hw_exit(dev); + netdev_err(dev, "failed to restart etop after TX timeout\n"); +} + +static const struct net_device_ops ltq_eth_netdev_ops = { + .ndo_open = ltq_etop_open, + .ndo_stop = ltq_etop_stop, + .ndo_start_xmit = ltq_etop_tx, + .ndo_change_mtu = ltq_etop_change_mtu, + .ndo_do_ioctl = ltq_etop_ioctl, + .ndo_set_mac_address = ltq_etop_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_multicast_list = ltq_etop_set_multicast_list, + .ndo_select_queue = ltq_etop_select_queue, + .ndo_init = ltq_etop_init, + .ndo_tx_timeout = ltq_etop_tx_timeout, +}; + +static int __init +ltq_etop_probe(struct platform_device *pdev) +{ + struct net_device *dev; + struct ltq_etop_priv *priv; + struct resource *res; + int err; + int i; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "failed to get etop resource\n"); + err = -ENOENT; + goto err_out; + } + + res = devm_request_mem_region(&pdev->dev, res->start, + resource_size(res), dev_name(&pdev->dev)); + if (!res) { + dev_err(&pdev->dev, "failed to request etop resource\n"); + err = -EBUSY; + goto err_out; + } + + ltq_etop_membase = devm_ioremap_nocache(&pdev->dev, + res->start, resource_size(res)); + if (!ltq_etop_membase) { + dev_err(&pdev->dev, "failed to remap etop engine %d\n", + pdev->id); + err = -ENOMEM; + goto err_out; + } + + dev = alloc_etherdev_mq(sizeof(struct ltq_etop_priv), 4); + strcpy(dev->name, "eth%d"); + dev->netdev_ops = <q_eth_netdev_ops; + dev->ethtool_ops = <q_etop_ethtool_ops; + priv = netdev_priv(dev); + priv->res = res; + priv->pldata = dev_get_platdata(&pdev->dev); + priv->netdev = dev; + spin_lock_init(&priv->lock); + + for (i = 0; i < MAX_DMA_CHAN; i++) { + if (IS_TX(i)) + netif_napi_add(dev, &priv->ch[i].napi, + ltq_etop_poll_tx, 8); + else if (IS_RX(i)) + netif_napi_add(dev, &priv->ch[i].napi, + ltq_etop_poll_rx, 32); + priv->ch[i].netdev = dev; + } + + err = register_netdev(dev); + if (err) + goto err_free; + + platform_set_drvdata(pdev, dev); + return 0; + +err_free: + kfree(dev); +err_out: + return err; +} + +static int __devexit +ltq_etop_remove(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + + if (dev) { + netif_tx_stop_all_queues(dev); + ltq_etop_hw_exit(dev); + ltq_etop_mdio_cleanup(dev); + unregister_netdev(dev); + } + return 0; +} + +static struct platform_driver ltq_mii_driver = { + .remove = __devexit_p(ltq_etop_remove), + .driver = { + .name = "ltq_etop", + .owner = THIS_MODULE, + }, +}; + +int __init +init_ltq_etop(void) +{ + int ret = platform_driver_probe(<q_mii_driver, ltq_etop_probe); + + if (ret) + pr_err("ltq_etop: Error registering platfom driver!"); + return ret; +} + +static void __exit +exit_ltq_etop(void) +{ + platform_driver_unregister(<q_mii_driver); +} + +module_init(init_ltq_etop); +module_exit(exit_ltq_etop); + +MODULE_AUTHOR("John Crispin "); +MODULE_DESCRIPTION("Lantiq SoC ETOP"); +MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/net/macvlan.c b/trunk/drivers/net/macvlan.c index bbcf80afaf16..d72a70615c0f 100644 --- a/trunk/drivers/net/macvlan.c +++ b/trunk/drivers/net/macvlan.c @@ -590,21 +590,13 @@ static int macvlan_port_create(struct net_device *dev) return err; } -static void macvlan_port_rcu_free(struct rcu_head *head) -{ - struct macvlan_port *port; - - port = container_of(head, struct macvlan_port, rcu); - kfree(port); -} - static void macvlan_port_destroy(struct net_device *dev) { struct macvlan_port *port = macvlan_port_get(dev); dev->priv_flags &= ~IFF_MACVLAN_PORT; netdev_rx_handler_unregister(dev); - call_rcu(&port->rcu, macvlan_port_rcu_free); + kfree_rcu(port, rcu); } static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[]) diff --git a/trunk/drivers/net/sunhme.c b/trunk/drivers/net/sunhme.c index d381a0f9ee18..30aad54b1b3a 100644 --- a/trunk/drivers/net/sunhme.c +++ b/trunk/drivers/net/sunhme.c @@ -3238,15 +3238,18 @@ static void happy_meal_pci_exit(void) #endif #ifdef CONFIG_SBUS +static const struct of_device_id hme_sbus_match[]; static int __devinit hme_sbus_probe(struct platform_device *op) { + const struct of_device_id *match; struct device_node *dp = op->dev.of_node; const char *model = of_get_property(dp, "model", NULL); int is_qfe; - if (!op->dev.of_match) + match = of_match_device(hme_sbus_match, &op->dev); + if (!match) return -EINVAL; - is_qfe = (op->dev.of_match->data != NULL); + is_qfe = (match->data != NULL); if (!is_qfe && model && !strcmp(model, "SUNW,sbus-qfe")) is_qfe = 1; diff --git a/trunk/drivers/of/irq.c b/trunk/drivers/of/irq.c index 75b0d3cb7676..9f689f1da0fc 100644 --- a/trunk/drivers/of/irq.c +++ b/trunk/drivers/of/irq.c @@ -56,7 +56,7 @@ EXPORT_SYMBOL_GPL(irq_of_parse_and_map); * Returns a pointer to the interrupt parent node, or NULL if the interrupt * parent could not be determined. */ -static struct device_node *of_irq_find_parent(struct device_node *child) +struct device_node *of_irq_find_parent(struct device_node *child) { struct device_node *p; const __be32 *parp; diff --git a/trunk/drivers/parport/parport_pc.c b/trunk/drivers/parport/parport_pc.c index a3755ffc03d4..bc8ce48f0778 100644 --- a/trunk/drivers/parport/parport_pc.c +++ b/trunk/drivers/parport/parport_pc.c @@ -2550,7 +2550,6 @@ static int __devinit sio_ite_8872_probe(struct pci_dev *pdev, int autoirq, const struct parport_pc_via_data *via) { short inta_addr[6] = { 0x2A0, 0x2C0, 0x220, 0x240, 0x1E0 }; - struct resource *base_res; u32 ite8872set; u32 ite8872_lpt, ite8872_lpthi; u8 ite8872_irq, type; @@ -2561,8 +2560,7 @@ static int __devinit sio_ite_8872_probe(struct pci_dev *pdev, int autoirq, /* make sure which one chip */ for (i = 0; i < 5; i++) { - base_res = request_region(inta_addr[i], 32, "it887x"); - if (base_res) { + if (request_region(inta_addr[i], 32, "it887x")) { int test; pci_write_config_dword(pdev, 0x60, 0xe5000000 | inta_addr[i]); @@ -2571,7 +2569,7 @@ static int __devinit sio_ite_8872_probe(struct pci_dev *pdev, int autoirq, test = inb(inta_addr[i]); if (test != 0xff) break; - release_region(inta_addr[i], 0x8); + release_region(inta_addr[i], 32); } } if (i >= 5) { @@ -2635,7 +2633,7 @@ static int __devinit sio_ite_8872_probe(struct pci_dev *pdev, int autoirq, /* * Release the resource so that parport_pc_probe_port can get it. */ - release_resource(base_res); + release_region(inta_addr[i], 32); if (parport_pc_probe_port(ite8872_lpt, ite8872_lpthi, irq, PARPORT_DMA_NONE, &pdev->dev, 0)) { printk(KERN_INFO diff --git a/trunk/drivers/pci/Kconfig b/trunk/drivers/pci/Kconfig index c8ff646c0b05..0fa466a91bf4 100644 --- a/trunk/drivers/pci/Kconfig +++ b/trunk/drivers/pci/Kconfig @@ -88,4 +88,6 @@ config PCI_IOAPIC depends on HOTPLUG default y -select NLS if (DMI || ACPI) +config PCI_LABEL + def_bool y if (DMI || ACPI) + select NLS diff --git a/trunk/drivers/pci/Makefile b/trunk/drivers/pci/Makefile index 98d61c8e984b..c85f744270a5 100644 --- a/trunk/drivers/pci/Makefile +++ b/trunk/drivers/pci/Makefile @@ -56,10 +56,10 @@ obj-$(CONFIG_TILE) += setup-bus.o setup-irq.o # ACPI Related PCI FW Functions # ACPI _DSM provided firmware instance and string name # -obj-$(CONFIG_ACPI) += pci-acpi.o pci-label.o +obj-$(CONFIG_ACPI) += pci-acpi.o # SMBIOS provided firmware instance and labels -obj-$(CONFIG_DMI) += pci-label.o +obj-$(CONFIG_PCI_LABEL) += pci-label.o # Cardbus & CompactPCI use setup-bus obj-$(CONFIG_HOTPLUG) += setup-bus.o diff --git a/trunk/drivers/pci/intel-iommu.c b/trunk/drivers/pci/intel-iommu.c index 505c1c7075f0..6af6b628175b 100644 --- a/trunk/drivers/pci/intel-iommu.c +++ b/trunk/drivers/pci/intel-iommu.c @@ -39,6 +39,7 @@ #include #include #include +#include #include #include #include "pci.h" @@ -1299,7 +1300,7 @@ static void iommu_detach_domain(struct dmar_domain *domain, static struct iova_domain reserved_iova_list; static struct lock_class_key reserved_rbtree_key; -static void dmar_init_reserved_ranges(void) +static int dmar_init_reserved_ranges(void) { struct pci_dev *pdev = NULL; struct iova *iova; @@ -1313,8 +1314,10 @@ static void dmar_init_reserved_ranges(void) /* IOAPIC ranges shouldn't be accessed by DMA */ iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START), IOVA_PFN(IOAPIC_RANGE_END)); - if (!iova) + if (!iova) { printk(KERN_ERR "Reserve IOAPIC range failed\n"); + return -ENODEV; + } /* Reserve all PCI MMIO to avoid peer-to-peer access */ for_each_pci_dev(pdev) { @@ -1327,11 +1330,13 @@ static void dmar_init_reserved_ranges(void) iova = reserve_iova(&reserved_iova_list, IOVA_PFN(r->start), IOVA_PFN(r->end)); - if (!iova) + if (!iova) { printk(KERN_ERR "Reserve iova failed\n"); + return -ENODEV; + } } } - + return 0; } static void domain_reserve_special_ranges(struct dmar_domain *domain) @@ -1835,7 +1840,7 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw) ret = iommu_attach_domain(domain, iommu); if (ret) { - domain_exit(domain); + free_domain_mem(domain); goto error; } @@ -2213,7 +2218,7 @@ static int __init iommu_prepare_static_identity_mapping(int hw) return 0; } -int __init init_dmars(void) +static int __init init_dmars(int force_on) { struct dmar_drhd_unit *drhd; struct dmar_rmrr_unit *rmrr; @@ -2393,8 +2398,15 @@ int __init init_dmars(void) * enable translation */ for_each_drhd_unit(drhd) { - if (drhd->ignored) + if (drhd->ignored) { + /* + * we always have to disable PMRs or DMA may fail on + * this device + */ + if (force_on) + iommu_disable_protect_mem_regions(drhd->iommu); continue; + } iommu = drhd->iommu; iommu_flush_write_buffer(iommu); @@ -3240,9 +3252,15 @@ static int device_notifier(struct notifier_block *nb, if (!domain) return 0; - if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) + if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) { domain_remove_one_dev_info(domain, pdev); + if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) && + !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) && + list_empty(&domain->devices)) + domain_exit(domain); + } + return 0; } @@ -3277,12 +3295,21 @@ int __init intel_iommu_init(void) if (no_iommu || dmar_disabled) return -ENODEV; - iommu_init_mempool(); - dmar_init_reserved_ranges(); + if (iommu_init_mempool()) { + if (force_on) + panic("tboot: Failed to initialize iommu memory\n"); + return -ENODEV; + } + + if (dmar_init_reserved_ranges()) { + if (force_on) + panic("tboot: Failed to reserve iommu ranges\n"); + return -ENODEV; + } init_no_remapping_devices(); - ret = init_dmars(); + ret = init_dmars(force_on); if (ret) { if (force_on) panic("tboot: Failed to initialize DMARs\n"); @@ -3391,6 +3418,11 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain, domain->iommu_count--; domain_update_iommu_cap(domain); spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags); + + spin_lock_irqsave(&iommu->lock, tmp_flags); + clear_bit(domain->id, iommu->domain_ids); + iommu->domains[domain->id] = NULL; + spin_unlock_irqrestore(&iommu->lock, tmp_flags); } spin_unlock_irqrestore(&device_domain_lock, flags); @@ -3607,9 +3639,9 @@ static int intel_iommu_attach_device(struct iommu_domain *domain, pte = dmar_domain->pgd; if (dma_pte_present(pte)) { - free_pgtable_page(dmar_domain->pgd); dmar_domain->pgd = (struct dma_pte *) phys_to_virt(dma_pte_addr(pte)); + free_pgtable_page(pte); } dmar_domain->agaw--; } diff --git a/trunk/drivers/pci/iov.c b/trunk/drivers/pci/iov.c index 553d8ee55c1c..42fae4776515 100644 --- a/trunk/drivers/pci/iov.c +++ b/trunk/drivers/pci/iov.c @@ -13,6 +13,7 @@ #include #include #include +#include #include "pci.h" #define VIRTFN_ID_LEN 16 diff --git a/trunk/drivers/pci/pci-driver.c b/trunk/drivers/pci/pci-driver.c index d86ea8b01137..135df164a4c1 100644 --- a/trunk/drivers/pci/pci-driver.c +++ b/trunk/drivers/pci/pci-driver.c @@ -781,7 +781,7 @@ static int pci_pm_resume(struct device *dev) #endif /* !CONFIG_SUSPEND */ -#ifdef CONFIG_HIBERNATION +#ifdef CONFIG_HIBERNATE_CALLBACKS static int pci_pm_freeze(struct device *dev) { @@ -970,7 +970,7 @@ static int pci_pm_restore(struct device *dev) return error; } -#else /* !CONFIG_HIBERNATION */ +#else /* !CONFIG_HIBERNATE_CALLBACKS */ #define pci_pm_freeze NULL #define pci_pm_freeze_noirq NULL @@ -981,7 +981,7 @@ static int pci_pm_restore(struct device *dev) #define pci_pm_restore NULL #define pci_pm_restore_noirq NULL -#endif /* !CONFIG_HIBERNATION */ +#endif /* !CONFIG_HIBERNATE_CALLBACKS */ #ifdef CONFIG_PM_RUNTIME diff --git a/trunk/drivers/pci/pci.h b/trunk/drivers/pci/pci.h index a6ec200fe5ee..4020025f854e 100644 --- a/trunk/drivers/pci/pci.h +++ b/trunk/drivers/pci/pci.h @@ -250,15 +250,6 @@ struct pci_sriov { u8 __iomem *mstate; /* VF Migration State Array */ }; -/* Address Translation Service */ -struct pci_ats { - int pos; /* capability position */ - int stu; /* Smallest Translation Unit */ - int qdep; /* Invalidate Queue Depth */ - int ref_cnt; /* Physical Function reference count */ - unsigned int is_enabled:1; /* Enable bit is set */ -}; - #ifdef CONFIG_PCI_IOV extern int pci_iov_init(struct pci_dev *dev); extern void pci_iov_release(struct pci_dev *dev); @@ -269,19 +260,6 @@ extern resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, extern void pci_restore_iov_state(struct pci_dev *dev); extern int pci_iov_bus_range(struct pci_bus *bus); -extern int pci_enable_ats(struct pci_dev *dev, int ps); -extern void pci_disable_ats(struct pci_dev *dev); -extern int pci_ats_queue_depth(struct pci_dev *dev); -/** - * pci_ats_enabled - query the ATS status - * @dev: the PCI device - * - * Returns 1 if ATS capability is enabled, or 0 if not. - */ -static inline int pci_ats_enabled(struct pci_dev *dev) -{ - return dev->ats && dev->ats->is_enabled; -} #else static inline int pci_iov_init(struct pci_dev *dev) { @@ -304,21 +282,6 @@ static inline int pci_iov_bus_range(struct pci_bus *bus) return 0; } -static inline int pci_enable_ats(struct pci_dev *dev, int ps) -{ - return -ENODEV; -} -static inline void pci_disable_ats(struct pci_dev *dev) -{ -} -static inline int pci_ats_queue_depth(struct pci_dev *dev) -{ - return -ENODEV; -} -static inline int pci_ats_enabled(struct pci_dev *dev) -{ - return 0; -} #endif /* CONFIG_PCI_IOV */ static inline resource_size_t pci_resource_alignment(struct pci_dev *dev, diff --git a/trunk/drivers/pci/setup-bus.c b/trunk/drivers/pci/setup-bus.c index ebf51ad1b714..a806cb321d2e 100644 --- a/trunk/drivers/pci/setup-bus.c +++ b/trunk/drivers/pci/setup-bus.c @@ -579,7 +579,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, } size0 = calculate_iosize(size, min_size, size1, resource_size(b_res), 4096); - size1 = !add_size? size0: + size1 = (!add_head || (add_head && !add_size)) ? size0 : calculate_iosize(size, min_size+add_size, size1, resource_size(b_res), 4096); if (!size0 && !size1) { @@ -677,7 +677,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, align += aligns[order]; } size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align); - size1 = !add_size ? size : + size1 = (!add_head || (add_head && !add_size)) ? size0 : calculate_memsize(size, min_size+add_size, 0, resource_size(b_res), min_align); if (!size0 && !size1) { diff --git a/trunk/drivers/pcmcia/pcmcia_resource.c b/trunk/drivers/pcmcia/pcmcia_resource.c index fe77e8223841..e8c19def1b0f 100644 --- a/trunk/drivers/pcmcia/pcmcia_resource.c +++ b/trunk/drivers/pcmcia/pcmcia_resource.c @@ -173,7 +173,7 @@ static int pcmcia_access_config(struct pcmcia_device *p_dev, c = p_dev->function_config; if (!(c->state & CONFIG_LOCKED)) { - dev_dbg(&p_dev->dev, "Configuration isn't't locked\n"); + dev_dbg(&p_dev->dev, "Configuration isn't locked\n"); mutex_unlock(&s->ops_mutex); return -EACCES; } diff --git a/trunk/drivers/pcmcia/pxa2xx_balloon3.c b/trunk/drivers/pcmcia/pxa2xx_balloon3.c index 453c54c97612..4c3e94c0ae85 100644 --- a/trunk/drivers/pcmcia/pxa2xx_balloon3.c +++ b/trunk/drivers/pcmcia/pxa2xx_balloon3.c @@ -25,6 +25,8 @@ #include +#include + #include "soc_common.h" /* @@ -127,6 +129,9 @@ static int __init balloon3_pcmcia_init(void) { int ret; + if (!machine_is_balloon3()) + return -ENODEV; + balloon3_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1); if (!balloon3_pcmcia_device) return -ENOMEM; diff --git a/trunk/drivers/pcmcia/pxa2xx_trizeps4.c b/trunk/drivers/pcmcia/pxa2xx_trizeps4.c index b7e596620db1..b829e655457b 100644 --- a/trunk/drivers/pcmcia/pxa2xx_trizeps4.c +++ b/trunk/drivers/pcmcia/pxa2xx_trizeps4.c @@ -69,15 +69,15 @@ static int trizeps_pcmcia_hw_init(struct soc_pcmcia_socket *skt) for (i = 0; i < ARRAY_SIZE(irqs); i++) { if (irqs[i].sock != skt->nr) continue; - if (gpio_request(IRQ_TO_GPIO(irqs[i].irq), irqs[i].str) < 0) { + if (gpio_request(irq_to_gpio(irqs[i].irq), irqs[i].str) < 0) { pr_err("%s: sock %d unable to request gpio %d\n", - __func__, skt->nr, IRQ_TO_GPIO(irqs[i].irq)); + __func__, skt->nr, irq_to_gpio(irqs[i].irq)); ret = -EBUSY; goto error; } - if (gpio_direction_input(IRQ_TO_GPIO(irqs[i].irq)) < 0) { + if (gpio_direction_input(irq_to_gpio(irqs[i].irq)) < 0) { pr_err("%s: sock %d unable to set input gpio %d\n", - __func__, skt->nr, IRQ_TO_GPIO(irqs[i].irq)); + __func__, skt->nr, irq_to_gpio(irqs[i].irq)); ret = -EINVAL; goto error; } @@ -86,7 +86,7 @@ static int trizeps_pcmcia_hw_init(struct soc_pcmcia_socket *skt) error: for (; i >= 0; i--) { - gpio_free(IRQ_TO_GPIO(irqs[i].irq)); + gpio_free(irq_to_gpio(irqs[i].irq)); } return (ret); } @@ -97,7 +97,7 @@ static void trizeps_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt) /* free allocated gpio's */ gpio_free(GPIO_PRDY); for (i = 0; i < ARRAY_SIZE(irqs); i++) - gpio_free(IRQ_TO_GPIO(irqs[i].irq)); + gpio_free(irq_to_gpio(irqs[i].irq)); } static unsigned long trizeps_pcmcia_status[2]; @@ -226,6 +226,9 @@ static int __init trizeps_pcmcia_init(void) { int ret; + if (!machine_is_trizeps4() && !machine_is_trizeps4wl()) + return -ENODEV; + trizeps_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1); if (!trizeps_pcmcia_device) return -ENOMEM; diff --git a/trunk/drivers/platform/x86/Kconfig b/trunk/drivers/platform/x86/Kconfig index 2ee442c2a5db..0485e394712a 100644 --- a/trunk/drivers/platform/x86/Kconfig +++ b/trunk/drivers/platform/x86/Kconfig @@ -187,7 +187,8 @@ config MSI_LAPTOP depends on ACPI depends on BACKLIGHT_CLASS_DEVICE depends on RFKILL - depends on SERIO_I8042 + depends on INPUT && SERIO_I8042 + select INPUT_SPARSEKMAP ---help--- This is a driver for laptops built by MSI (MICRO-STAR INTERNATIONAL): diff --git a/trunk/drivers/platform/x86/acer-wmi.c b/trunk/drivers/platform/x86/acer-wmi.c index 5ea6c3477d17..ac4e7f83ce6c 100644 --- a/trunk/drivers/platform/x86/acer-wmi.c +++ b/trunk/drivers/platform/x86/acer-wmi.c @@ -89,7 +89,7 @@ MODULE_LICENSE("GPL"); #define ACERWMID_EVENT_GUID "676AA15E-6A47-4D9F-A2CC-1E6D18D14026" MODULE_ALIAS("wmi:67C3371D-95A3-4C37-BB61-DD47B491DAAB"); -MODULE_ALIAS("wmi:6AF4F258-B401-42Fd-BE91-3D4AC2D7C0D3"); +MODULE_ALIAS("wmi:6AF4F258-B401-42FD-BE91-3D4AC2D7C0D3"); MODULE_ALIAS("wmi:676AA15E-6A47-4D9F-A2CC-1E6D18D14026"); enum acer_wmi_event_ids { diff --git a/trunk/drivers/platform/x86/asus-wmi.c b/trunk/drivers/platform/x86/asus-wmi.c index efc776cb0c66..832a3fd7c1c8 100644 --- a/trunk/drivers/platform/x86/asus-wmi.c +++ b/trunk/drivers/platform/x86/asus-wmi.c @@ -201,8 +201,8 @@ static int asus_wmi_input_init(struct asus_wmi *asus) if (!asus->inputdev) return -ENOMEM; - asus->inputdev->name = asus->driver->input_phys; - asus->inputdev->phys = asus->driver->input_name; + asus->inputdev->name = asus->driver->input_name; + asus->inputdev->phys = asus->driver->input_phys; asus->inputdev->id.bustype = BUS_HOST; asus->inputdev->dev.parent = &asus->platform_device->dev; diff --git a/trunk/drivers/platform/x86/eeepc-laptop.c b/trunk/drivers/platform/x86/eeepc-laptop.c index 5f2dd386152b..2c1abf63957f 100644 --- a/trunk/drivers/platform/x86/eeepc-laptop.c +++ b/trunk/drivers/platform/x86/eeepc-laptop.c @@ -585,8 +585,9 @@ static bool eeepc_wlan_rfkill_blocked(struct eeepc_laptop *eeepc) return true; } -static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc) +static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc, acpi_handle handle) { + struct pci_dev *port; struct pci_dev *dev; struct pci_bus *bus; bool blocked = eeepc_wlan_rfkill_blocked(eeepc); @@ -599,9 +600,16 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc) mutex_lock(&eeepc->hotplug_lock); if (eeepc->hotplug_slot) { - bus = pci_find_bus(0, 1); + port = acpi_get_pci_dev(handle); + if (!port) { + pr_warning("Unable to find port\n"); + goto out_unlock; + } + + bus = port->subordinate; + if (!bus) { - pr_warning("Unable to find PCI bus 1?\n"); + pr_warning("Unable to find PCI bus?\n"); goto out_unlock; } @@ -609,6 +617,7 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc) pr_err("Unable to read PCI config space?\n"); goto out_unlock; } + absent = (l == 0xffffffff); if (blocked != absent) { @@ -647,6 +656,17 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc) mutex_unlock(&eeepc->hotplug_lock); } +static void eeepc_rfkill_hotplug_update(struct eeepc_laptop *eeepc, char *node) +{ + acpi_status status = AE_OK; + acpi_handle handle; + + status = acpi_get_handle(NULL, node, &handle); + + if (ACPI_SUCCESS(status)) + eeepc_rfkill_hotplug(eeepc, handle); +} + static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data) { struct eeepc_laptop *eeepc = data; @@ -654,7 +674,7 @@ static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data) if (event != ACPI_NOTIFY_BUS_CHECK) return; - eeepc_rfkill_hotplug(eeepc); + eeepc_rfkill_hotplug(eeepc, handle); } static int eeepc_register_rfkill_notifier(struct eeepc_laptop *eeepc, @@ -672,6 +692,11 @@ static int eeepc_register_rfkill_notifier(struct eeepc_laptop *eeepc, eeepc); if (ACPI_FAILURE(status)) pr_warning("Failed to register notify on %s\n", node); + /* + * Refresh pci hotplug in case the rfkill state was + * changed during setup. + */ + eeepc_rfkill_hotplug(eeepc, handle); } else return -ENODEV; @@ -693,6 +718,12 @@ static void eeepc_unregister_rfkill_notifier(struct eeepc_laptop *eeepc, if (ACPI_FAILURE(status)) pr_err("Error removing rfkill notify handler %s\n", node); + /* + * Refresh pci hotplug in case the rfkill + * state was changed after + * eeepc_unregister_rfkill_notifier() + */ + eeepc_rfkill_hotplug(eeepc, handle); } } @@ -816,11 +847,7 @@ static void eeepc_rfkill_exit(struct eeepc_laptop *eeepc) rfkill_destroy(eeepc->wlan_rfkill); eeepc->wlan_rfkill = NULL; } - /* - * Refresh pci hotplug in case the rfkill state was changed after - * eeepc_unregister_rfkill_notifier() - */ - eeepc_rfkill_hotplug(eeepc); + if (eeepc->hotplug_slot) pci_hp_deregister(eeepc->hotplug_slot); @@ -889,11 +916,6 @@ static int eeepc_rfkill_init(struct eeepc_laptop *eeepc) eeepc_register_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P5"); eeepc_register_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P6"); eeepc_register_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P7"); - /* - * Refresh pci hotplug in case the rfkill state was changed during - * setup. - */ - eeepc_rfkill_hotplug(eeepc); exit: if (result && result != -ENODEV) @@ -928,8 +950,11 @@ static int eeepc_hotk_restore(struct device *device) struct eeepc_laptop *eeepc = dev_get_drvdata(device); /* Refresh both wlan rfkill state and pci hotplug */ - if (eeepc->wlan_rfkill) - eeepc_rfkill_hotplug(eeepc); + if (eeepc->wlan_rfkill) { + eeepc_rfkill_hotplug_update(eeepc, "\\_SB.PCI0.P0P5"); + eeepc_rfkill_hotplug_update(eeepc, "\\_SB.PCI0.P0P6"); + eeepc_rfkill_hotplug_update(eeepc, "\\_SB.PCI0.P0P7"); + } if (eeepc->bluetooth_rfkill) rfkill_set_sw_state(eeepc->bluetooth_rfkill, diff --git a/trunk/drivers/platform/x86/eeepc-wmi.c b/trunk/drivers/platform/x86/eeepc-wmi.c index 0ddc434fb93b..649dcadd8ea3 100644 --- a/trunk/drivers/platform/x86/eeepc-wmi.c +++ b/trunk/drivers/platform/x86/eeepc-wmi.c @@ -67,9 +67,11 @@ static const struct key_entry eeepc_wmi_keymap[] = { { KE_KEY, 0x82, { KEY_CAMERA } }, { KE_KEY, 0x83, { KEY_CAMERA_ZOOMIN } }, { KE_KEY, 0x88, { KEY_WLAN } }, + { KE_KEY, 0xbd, { KEY_CAMERA } }, { KE_KEY, 0xcc, { KEY_SWITCHVIDEOMODE } }, { KE_KEY, 0xe0, { KEY_PROG1 } }, /* Task Manager */ { KE_KEY, 0xe1, { KEY_F14 } }, /* Change Resolution */ + { KE_KEY, 0xe8, { KEY_SCREENLOCK } }, { KE_KEY, 0xe9, { KEY_BRIGHTNESS_ZERO } }, { KE_KEY, 0xeb, { KEY_CAMERA_ZOOMOUT } }, { KE_KEY, 0xec, { KEY_CAMERA_UP } }, diff --git a/trunk/drivers/platform/x86/intel_pmic_gpio.c b/trunk/drivers/platform/x86/intel_pmic_gpio.c index d653104b59cb..464bb3fc4d88 100644 --- a/trunk/drivers/platform/x86/intel_pmic_gpio.c +++ b/trunk/drivers/platform/x86/intel_pmic_gpio.c @@ -74,6 +74,19 @@ struct pmic_gpio { u32 trigger_type; }; +static void pmic_program_irqtype(int gpio, int type) +{ + if (type & IRQ_TYPE_EDGE_RISING) + intel_scu_ipc_update_register(GPIO0 + gpio, 0x20, 0x20); + else + intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x20); + + if (type & IRQ_TYPE_EDGE_FALLING) + intel_scu_ipc_update_register(GPIO0 + gpio, 0x10, 0x10); + else + intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x10); +}; + static int pmic_gpio_direction_input(struct gpio_chip *chip, unsigned offset) { if (offset > 8) { @@ -166,16 +179,38 @@ static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned offset) return pg->irq_base + offset; } +static void pmic_bus_lock(struct irq_data *data) +{ + struct pmic_gpio *pg = irq_data_get_irq_chip_data(data); + + mutex_lock(&pg->buslock); +} + +static void pmic_bus_sync_unlock(struct irq_data *data) +{ + struct pmic_gpio *pg = irq_data_get_irq_chip_data(data); + + if (pg->update_type) { + unsigned int gpio = pg->update_type & ~GPIO_UPDATE_TYPE; + + pmic_program_irqtype(gpio, pg->trigger_type); + pg->update_type = 0; + } + mutex_unlock(&pg->buslock); +} + /* the gpiointr register is read-clear, so just do nothing. */ static void pmic_irq_unmask(struct irq_data *data) { } static void pmic_irq_mask(struct irq_data *data) { } static struct irq_chip pmic_irqchip = { - .name = "PMIC-GPIO", - .irq_mask = pmic_irq_mask, - .irq_unmask = pmic_irq_unmask, - .irq_set_type = pmic_irq_type, + .name = "PMIC-GPIO", + .irq_mask = pmic_irq_mask, + .irq_unmask = pmic_irq_unmask, + .irq_set_type = pmic_irq_type, + .irq_bus_lock = pmic_bus_lock, + .irq_bus_sync_unlock = pmic_bus_sync_unlock, }; static irqreturn_t pmic_irq_handler(int irq, void *data) diff --git a/trunk/drivers/platform/x86/samsung-laptop.c b/trunk/drivers/platform/x86/samsung-laptop.c index de434c6dc2d6..d347116d150e 100644 --- a/trunk/drivers/platform/x86/samsung-laptop.c +++ b/trunk/drivers/platform/x86/samsung-laptop.c @@ -570,6 +570,16 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = { }, .callback = dmi_check_cb, }, + { + .ident = "R410 Plus", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, + "SAMSUNG ELECTRONICS CO., LTD."), + DMI_MATCH(DMI_PRODUCT_NAME, "R410P"), + DMI_MATCH(DMI_BOARD_NAME, "R460"), + }, + .callback = dmi_check_cb, + }, { .ident = "R518", .matches = { @@ -591,12 +601,12 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = { .callback = dmi_check_cb, }, { - .ident = "N150/N210/N220", + .ident = "N150/N210/N220/N230", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220"), - DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220"), + DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220/N230"), + DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220/N230"), }, .callback = dmi_check_cb, }, @@ -771,6 +781,7 @@ static int __init samsung_init(void) /* create a backlight device to talk to this one */ memset(&props, 0, sizeof(struct backlight_properties)); + props.type = BACKLIGHT_PLATFORM; props.max_brightness = sabi_config->max_brightness; backlight_device = backlight_device_register("samsung", &sdev->dev, NULL, &backlight_ops, diff --git a/trunk/drivers/platform/x86/sony-laptop.c b/trunk/drivers/platform/x86/sony-laptop.c index e642f5f29504..6fe8cd6e23b5 100644 --- a/trunk/drivers/platform/x86/sony-laptop.c +++ b/trunk/drivers/platform/x86/sony-laptop.c @@ -138,6 +138,8 @@ MODULE_PARM_DESC(kbd_backlight_timeout, "1 for 30 seconds, 2 for 60 seconds and 3 to disable timeout " "(default: 0)"); +static void sony_nc_kbd_backlight_resume(void); + enum sony_nc_rfkill { SONY_WIFI, SONY_BLUETOOTH, @@ -771,11 +773,6 @@ static int sony_nc_handles_setup(struct platform_device *pd) if (!handles) return -ENOMEM; - sysfs_attr_init(&handles->devattr.attr); - handles->devattr.attr.name = "handles"; - handles->devattr.attr.mode = S_IRUGO; - handles->devattr.show = sony_nc_handles_show; - for (i = 0; i < ARRAY_SIZE(handles->cap); i++) { if (!acpi_callsetfunc(sony_nc_acpi_handle, "SN00", i + 0x20, &result)) { @@ -785,11 +782,18 @@ static int sony_nc_handles_setup(struct platform_device *pd) } } - /* allow reading capabilities via sysfs */ - if (device_create_file(&pd->dev, &handles->devattr)) { - kfree(handles); - handles = NULL; - return -1; + if (debug) { + sysfs_attr_init(&handles->devattr.attr); + handles->devattr.attr.name = "handles"; + handles->devattr.attr.mode = S_IRUGO; + handles->devattr.show = sony_nc_handles_show; + + /* allow reading capabilities via sysfs */ + if (device_create_file(&pd->dev, &handles->devattr)) { + kfree(handles); + handles = NULL; + return -1; + } } return 0; @@ -798,7 +802,8 @@ static int sony_nc_handles_setup(struct platform_device *pd) static int sony_nc_handles_cleanup(struct platform_device *pd) { if (handles) { - device_remove_file(&pd->dev, &handles->devattr); + if (debug) + device_remove_file(&pd->dev, &handles->devattr); kfree(handles); handles = NULL; } @@ -808,6 +813,11 @@ static int sony_nc_handles_cleanup(struct platform_device *pd) static int sony_find_snc_handle(int handle) { int i; + + /* not initialized yet, return early */ + if (!handles) + return -1; + for (i = 0; i < 0x10; i++) { if (handles->cap[i] == handle) { dprintk("found handle 0x%.4x (offset: 0x%.2x)\n", @@ -924,6 +934,14 @@ static ssize_t sony_nc_sysfs_store(struct device *dev, /* * Backlight device */ +struct sony_backlight_props { + struct backlight_device *dev; + int handle; + u8 offset; + u8 maxlvl; +}; +struct sony_backlight_props sony_bl_props; + static int sony_backlight_update_status(struct backlight_device *bd) { return acpi_callsetfunc(sony_nc_acpi_handle, "SBRT", @@ -944,21 +962,26 @@ static int sony_nc_get_brightness_ng(struct backlight_device *bd) { int result; int *handle = (int *)bl_get_data(bd); + struct sony_backlight_props *sdev = + (struct sony_backlight_props *)bl_get_data(bd); - sony_call_snc_handle(*handle, 0x0200, &result); + sony_call_snc_handle(sdev->handle, 0x0200, &result); - return result & 0xff; + return (result & 0xff) - sdev->offset; } static int sony_nc_update_status_ng(struct backlight_device *bd) { int value, result; int *handle = (int *)bl_get_data(bd); + struct sony_backlight_props *sdev = + (struct sony_backlight_props *)bl_get_data(bd); - value = bd->props.brightness; - sony_call_snc_handle(*handle, 0x0100 | (value << 16), &result); + value = bd->props.brightness + sdev->offset; + if (sony_call_snc_handle(sdev->handle, 0x0100 | (value << 16), &result)) + return -EIO; - return sony_nc_get_brightness_ng(bd); + return value; } static const struct backlight_ops sony_backlight_ops = { @@ -971,8 +994,6 @@ static const struct backlight_ops sony_backlight_ng_ops = { .update_status = sony_nc_update_status_ng, .get_brightness = sony_nc_get_brightness_ng, }; -static int backlight_ng_handle; -static struct backlight_device *sony_backlight_device; /* * New SNC-only Vaios event mapping to driver known keys @@ -1168,6 +1189,9 @@ static int sony_nc_resume(struct acpi_device *device) /* re-read rfkill state */ sony_nc_rfkill_update(); + /* restore kbd backlight states */ + sony_nc_kbd_backlight_resume(); + return 0; } @@ -1355,6 +1379,7 @@ static void sony_nc_rfkill_setup(struct acpi_device *device) #define KBDBL_HANDLER 0x137 #define KBDBL_PRESENT 0xB00 #define SET_MODE 0xC00 +#define SET_STATE 0xD00 #define SET_TIMEOUT 0xE00 struct kbd_backlight { @@ -1377,6 +1402,10 @@ static ssize_t __sony_nc_kbd_backlight_mode_set(u8 value) (value << 0x10) | SET_MODE, &result)) return -EIO; + /* Try to turn the light on/off immediately */ + sony_call_snc_handle(KBDBL_HANDLER, (value << 0x10) | SET_STATE, + &result); + kbdbl_handle->mode = value; return 0; @@ -1458,7 +1487,7 @@ static int sony_nc_kbd_backlight_setup(struct platform_device *pd) { int result; - if (sony_call_snc_handle(0x137, KBDBL_PRESENT, &result)) + if (sony_call_snc_handle(KBDBL_HANDLER, KBDBL_PRESENT, &result)) return 0; if (!(result & 0x02)) return 0; @@ -1501,13 +1530,105 @@ static int sony_nc_kbd_backlight_setup(struct platform_device *pd) static int sony_nc_kbd_backlight_cleanup(struct platform_device *pd) { if (kbdbl_handle) { + int result; + device_remove_file(&pd->dev, &kbdbl_handle->mode_attr); device_remove_file(&pd->dev, &kbdbl_handle->timeout_attr); + + /* restore the default hw behaviour */ + sony_call_snc_handle(KBDBL_HANDLER, 0x1000 | SET_MODE, &result); + sony_call_snc_handle(KBDBL_HANDLER, SET_TIMEOUT, &result); + kfree(kbdbl_handle); } return 0; } +static void sony_nc_kbd_backlight_resume(void) +{ + int ignore = 0; + + if (!kbdbl_handle) + return; + + if (kbdbl_handle->mode == 0) + sony_call_snc_handle(KBDBL_HANDLER, SET_MODE, &ignore); + + if (kbdbl_handle->timeout != 0) + sony_call_snc_handle(KBDBL_HANDLER, + (kbdbl_handle->timeout << 0x10) | SET_TIMEOUT, + &ignore); +} + +static void sony_nc_backlight_ng_read_limits(int handle, + struct sony_backlight_props *props) +{ + int offset; + acpi_status status; + u8 brlvl, i; + u8 min = 0xff, max = 0x00; + struct acpi_object_list params; + union acpi_object in_obj; + union acpi_object *lvl_enum; + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + + props->handle = handle; + props->offset = 0; + props->maxlvl = 0xff; + + offset = sony_find_snc_handle(handle); + if (offset < 0) + return; + + /* try to read the boundaries from ACPI tables, if we fail the above + * defaults should be reasonable + */ + params.count = 1; + params.pointer = &in_obj; + in_obj.type = ACPI_TYPE_INTEGER; + in_obj.integer.value = offset; + status = acpi_evaluate_object(sony_nc_acpi_handle, "SN06", ¶ms, + &buffer); + if (ACPI_FAILURE(status)) + return; + + lvl_enum = (union acpi_object *) buffer.pointer; + if (!lvl_enum) { + pr_err("No SN06 return object."); + return; + } + if (lvl_enum->type != ACPI_TYPE_BUFFER) { + pr_err("Invalid SN06 return object 0x%.2x\n", + lvl_enum->type); + goto out_invalid; + } + + /* the buffer lists brightness levels available, brightness levels are + * from 0 to 8 in the array, other values are used by ALS control. + */ + for (i = 0; i < 9 && i < lvl_enum->buffer.length; i++) { + + brlvl = *(lvl_enum->buffer.pointer + i); + dprintk("Brightness level: %d\n", brlvl); + + if (!brlvl) + break; + + if (brlvl > max) + max = brlvl; + if (brlvl < min) + min = brlvl; + } + props->offset = min; + props->maxlvl = max; + dprintk("Brightness levels: min=%d max=%d\n", props->offset, + props->maxlvl); + +out_invalid: + kfree(buffer.pointer); + return; +} + static void sony_nc_backlight_setup(void) { acpi_handle unused; @@ -1516,14 +1637,14 @@ static void sony_nc_backlight_setup(void) struct backlight_properties props; if (sony_find_snc_handle(0x12f) != -1) { - backlight_ng_handle = 0x12f; ops = &sony_backlight_ng_ops; - max_brightness = 0xff; + sony_nc_backlight_ng_read_limits(0x12f, &sony_bl_props); + max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset; } else if (sony_find_snc_handle(0x137) != -1) { - backlight_ng_handle = 0x137; ops = &sony_backlight_ng_ops; - max_brightness = 0xff; + sony_nc_backlight_ng_read_limits(0x137, &sony_bl_props); + max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset; } else if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "GBRT", &unused))) { @@ -1536,22 +1657,22 @@ static void sony_nc_backlight_setup(void) memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_PLATFORM; props.max_brightness = max_brightness; - sony_backlight_device = backlight_device_register("sony", NULL, - &backlight_ng_handle, - ops, &props); + sony_bl_props.dev = backlight_device_register("sony", NULL, + &sony_bl_props, + ops, &props); - if (IS_ERR(sony_backlight_device)) { - pr_warning(DRV_PFX "unable to register backlight device\n"); - sony_backlight_device = NULL; + if (IS_ERR(sony_bl_props.dev)) { + pr_warn(DRV_PFX "unable to register backlight device\n"); + sony_bl_props.dev = NULL; } else - sony_backlight_device->props.brightness = - ops->get_brightness(sony_backlight_device); + sony_bl_props.dev->props.brightness = + ops->get_brightness(sony_bl_props.dev); } static void sony_nc_backlight_cleanup(void) { - if (sony_backlight_device) - backlight_device_unregister(sony_backlight_device); + if (sony_bl_props.dev) + backlight_device_unregister(sony_bl_props.dev); } static int sony_nc_add(struct acpi_device *device) @@ -2549,7 +2670,7 @@ static long sonypi_misc_ioctl(struct file *fp, unsigned int cmd, mutex_lock(&spic_dev.lock); switch (cmd) { case SONYPI_IOCGBRT: - if (sony_backlight_device == NULL) { + if (sony_bl_props.dev == NULL) { ret = -EIO; break; } @@ -2562,7 +2683,7 @@ static long sonypi_misc_ioctl(struct file *fp, unsigned int cmd, ret = -EFAULT; break; case SONYPI_IOCSBRT: - if (sony_backlight_device == NULL) { + if (sony_bl_props.dev == NULL) { ret = -EIO; break; } @@ -2576,8 +2697,8 @@ static long sonypi_misc_ioctl(struct file *fp, unsigned int cmd, break; } /* sync the backlight device status */ - sony_backlight_device->props.brightness = - sony_backlight_get_brightness(sony_backlight_device); + sony_bl_props.dev->props.brightness = + sony_backlight_get_brightness(sony_bl_props.dev); break; case SONYPI_IOCGBAT1CAP: if (ec_read16(SONYPI_BAT1_FULL, &val16)) { diff --git a/trunk/drivers/platform/x86/thinkpad_acpi.c b/trunk/drivers/platform/x86/thinkpad_acpi.c index a08561f5349e..562fcf0dd2b5 100644 --- a/trunk/drivers/platform/x86/thinkpad_acpi.c +++ b/trunk/drivers/platform/x86/thinkpad_acpi.c @@ -128,7 +128,8 @@ enum { }; /* ACPI HIDs */ -#define TPACPI_ACPI_HKEY_HID "IBM0068" +#define TPACPI_ACPI_IBM_HKEY_HID "IBM0068" +#define TPACPI_ACPI_LENOVO_HKEY_HID "LEN0068" #define TPACPI_ACPI_EC_HID "PNP0C09" /* Input IDs */ @@ -3879,7 +3880,8 @@ static int hotkey_write(char *buf) } static const struct acpi_device_id ibm_htk_device_ids[] = { - {TPACPI_ACPI_HKEY_HID, 0}, + {TPACPI_ACPI_IBM_HKEY_HID, 0}, + {TPACPI_ACPI_LENOVO_HKEY_HID, 0}, {"", 0}, }; @@ -8618,8 +8620,7 @@ static bool __pure __init tpacpi_is_valid_fw_id(const char* const s, tpacpi_is_fw_digit(s[1]) && s[2] == t && s[3] == 'T' && tpacpi_is_fw_digit(s[4]) && - tpacpi_is_fw_digit(s[5]) && - s[6] == 'W' && s[7] == 'W'; + tpacpi_is_fw_digit(s[5]); } /* returns 0 - probe ok, or < 0 - probe error. diff --git a/trunk/drivers/rapidio/rio.c b/trunk/drivers/rapidio/rio.c index c29719cacbca..86c9a091a2ff 100644 --- a/trunk/drivers/rapidio/rio.c +++ b/trunk/drivers/rapidio/rio.c @@ -1171,16 +1171,17 @@ static int rio_hdid_setup(char *str) __setup("riohdid=", rio_hdid_setup); -void rio_register_mport(struct rio_mport *port) +int rio_register_mport(struct rio_mport *port) { if (next_portid >= RIO_MAX_MPORTS) { pr_err("RIO: reached specified max number of mports\n"); - return; + return 1; } port->id = next_portid++; port->host_deviceid = rio_get_hdid(port->id); list_add_tail(&port->node, &rio_mports); + return 0; } EXPORT_SYMBOL_GPL(rio_local_get_device_id); diff --git a/trunk/drivers/rapidio/switches/idt_gen2.c b/trunk/drivers/rapidio/switches/idt_gen2.c index 095016a9dec1..043ee3136e40 100644 --- a/trunk/drivers/rapidio/switches/idt_gen2.c +++ b/trunk/drivers/rapidio/switches/idt_gen2.c @@ -95,6 +95,9 @@ idtg2_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, else table++; + if (route_port == RIO_INVALID_ROUTE) + route_port = IDT_DEFAULT_ROUTE; + rio_mport_write_config_32(mport, destid, hopcount, LOCAL_RTE_CONF_DESTID_SEL, table); @@ -411,6 +414,12 @@ static int idtg2_switch_init(struct rio_dev *rdev, int do_enum) rdev->rswitch->em_handle = idtg2_em_handler; rdev->rswitch->sw_sysfs = idtg2_sysfs; + if (do_enum) { + /* Ensure that default routing is disabled on startup */ + rio_write_config_32(rdev, + RIO_STD_RTE_DEFAULT_PORT, IDT_NO_ROUTE); + } + return 0; } @@ -418,3 +427,4 @@ DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1848, idtg2_switch_init); DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1616, idtg2_switch_init); DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTVPS1616, idtg2_switch_init); DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTSPS1616, idtg2_switch_init); +DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1432, idtg2_switch_init); diff --git a/trunk/drivers/rapidio/switches/idtcps.c b/trunk/drivers/rapidio/switches/idtcps.c index 3a971077e7bf..d06ee2d44b44 100644 --- a/trunk/drivers/rapidio/switches/idtcps.c +++ b/trunk/drivers/rapidio/switches/idtcps.c @@ -26,6 +26,9 @@ idtcps_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, { u32 result; + if (route_port == RIO_INVALID_ROUTE) + route_port = CPS_DEFAULT_ROUTE; + if (table == RIO_GLOBAL_TABLE) { rio_mport_write_config_32(mport, destid, hopcount, RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid); @@ -130,6 +133,9 @@ static int idtcps_switch_init(struct rio_dev *rdev, int do_enum) /* set TVAL = ~50us */ rio_write_config_32(rdev, rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8); + /* Ensure that default routing is disabled on startup */ + rio_write_config_32(rdev, + RIO_STD_RTE_DEFAULT_PORT, CPS_NO_ROUTE); } return 0; diff --git a/trunk/drivers/rapidio/switches/tsi57x.c b/trunk/drivers/rapidio/switches/tsi57x.c index 1a62934bfebc..db8b8028988d 100644 --- a/trunk/drivers/rapidio/switches/tsi57x.c +++ b/trunk/drivers/rapidio/switches/tsi57x.c @@ -303,6 +303,12 @@ static int tsi57x_switch_init(struct rio_dev *rdev, int do_enum) rdev->rswitch->em_init = tsi57x_em_init; rdev->rswitch->em_handle = tsi57x_em_handler; + if (do_enum) { + /* Ensure that default routing is disabled on startup */ + rio_write_config_32(rdev, RIO_STD_RTE_DEFAULT_PORT, + RIO_INVALID_ROUTE); + } + return 0; } diff --git a/trunk/drivers/rtc/Kconfig b/trunk/drivers/rtc/Kconfig index e1878877399c..42891726ea72 100644 --- a/trunk/drivers/rtc/Kconfig +++ b/trunk/drivers/rtc/Kconfig @@ -3,10 +3,10 @@ # config RTC_LIB - tristate + bool menuconfig RTC_CLASS - tristate "Real Time Clock" + bool "Real Time Clock" default n depends on !S390 select RTC_LIB @@ -15,9 +15,6 @@ menuconfig RTC_CLASS be allowed to plug one or more RTCs to your system. You will probably want to enable one or more of the interfaces below. - This driver can also be built as a module. If so, the module - will be called rtc-core. - if RTC_CLASS config RTC_HCTOSYS diff --git a/trunk/drivers/rtc/class.c b/trunk/drivers/rtc/class.c index 09b4437b3e61..4194e59e14cd 100644 --- a/trunk/drivers/rtc/class.c +++ b/trunk/drivers/rtc/class.c @@ -41,26 +41,21 @@ static void rtc_device_release(struct device *dev) * system's wall clock; restore it on resume(). */ -static struct timespec delta; static time_t oldtime; +static struct timespec oldts; static int rtc_suspend(struct device *dev, pm_message_t mesg) { struct rtc_device *rtc = to_rtc_device(dev); struct rtc_time tm; - struct timespec ts = current_kernel_time(); if (strcmp(dev_name(&rtc->dev), CONFIG_RTC_HCTOSYS_DEVICE) != 0) return 0; rtc_read_time(rtc, &tm); + ktime_get_ts(&oldts); rtc_tm_to_time(&tm, &oldtime); - /* RTC precision is 1 second; adjust delta for avg 1/2 sec err */ - set_normalized_timespec(&delta, - ts.tv_sec - oldtime, - ts.tv_nsec - (NSEC_PER_SEC >> 1)); - return 0; } @@ -70,10 +65,12 @@ static int rtc_resume(struct device *dev) struct rtc_time tm; time_t newtime; struct timespec time; + struct timespec newts; if (strcmp(dev_name(&rtc->dev), CONFIG_RTC_HCTOSYS_DEVICE) != 0) return 0; + ktime_get_ts(&newts); rtc_read_time(rtc, &tm); if (rtc_valid_tm(&tm) != 0) { pr_debug("%s: bogus resume time\n", dev_name(&rtc->dev)); @@ -85,15 +82,13 @@ static int rtc_resume(struct device *dev) pr_debug("%s: time travel!\n", dev_name(&rtc->dev)); return 0; } + /* calculate the RTC time delta */ + set_normalized_timespec(&time, newtime - oldtime, 0); - /* restore wall clock using delta against this RTC; - * adjust again for avg 1/2 second RTC sampling error - */ - set_normalized_timespec(&time, - newtime + delta.tv_sec, - (NSEC_PER_SEC >> 1) + delta.tv_nsec); - do_settimeofday(&time); + /* subtract kernel time between rtc_suspend to rtc_resume */ + time = timespec_sub(time, timespec_sub(newts, oldts)); + timekeeping_inject_sleeptime(&time); return 0; } @@ -171,7 +166,7 @@ struct rtc_device *rtc_device_register(const char *name, struct device *dev, err = __rtc_read_alarm(rtc, &alrm); if (!err && !rtc_valid_tm(&alrm.time)) - rtc_set_alarm(rtc, &alrm); + rtc_initialize_alarm(rtc, &alrm); strlcpy(rtc->name, name, RTC_DEVICE_NAME_SIZE); dev_set_name(&rtc->dev, "rtc%d", id); diff --git a/trunk/drivers/rtc/interface.c b/trunk/drivers/rtc/interface.c index 23719f0acbf6..ef6316acec43 100644 --- a/trunk/drivers/rtc/interface.c +++ b/trunk/drivers/rtc/interface.c @@ -375,6 +375,32 @@ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) } EXPORT_SYMBOL_GPL(rtc_set_alarm); +/* Called once per device from rtc_device_register */ +int rtc_initialize_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) +{ + int err; + + err = rtc_valid_tm(&alarm->time); + if (err != 0) + return err; + + err = mutex_lock_interruptible(&rtc->ops_lock); + if (err) + return err; + + rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time); + rtc->aie_timer.period = ktime_set(0, 0); + if (alarm->enabled) { + rtc->aie_timer.enabled = 1; + timerqueue_add(&rtc->timerqueue, &rtc->aie_timer.node); + } + mutex_unlock(&rtc->ops_lock); + return err; +} +EXPORT_SYMBOL_GPL(rtc_initialize_alarm); + + + int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled) { int err = mutex_lock_interruptible(&rtc->ops_lock); diff --git a/trunk/drivers/rtc/rtc-bfin.c b/trunk/drivers/rtc/rtc-bfin.c index a0fc4cf42abf..90d866272c8e 100644 --- a/trunk/drivers/rtc/rtc-bfin.c +++ b/trunk/drivers/rtc/rtc-bfin.c @@ -250,6 +250,8 @@ static int bfin_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) bfin_rtc_int_set_alarm(rtc); else bfin_rtc_int_clear(~(RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY)); + + return 0; } static int bfin_rtc_read_time(struct device *dev, struct rtc_time *tm) diff --git a/trunk/drivers/rtc/rtc-coh901331.c b/trunk/drivers/rtc/rtc-coh901331.c index 316f484999b5..80f9c88214c5 100644 --- a/trunk/drivers/rtc/rtc-coh901331.c +++ b/trunk/drivers/rtc/rtc-coh901331.c @@ -220,6 +220,7 @@ static int __init coh901331_probe(struct platform_device *pdev) } clk_disable(rtap->clk); + platform_set_drvdata(pdev, rtap); rtap->rtc = rtc_device_register("coh901331", &pdev->dev, &coh901331_ops, THIS_MODULE); if (IS_ERR(rtap->rtc)) { @@ -227,11 +228,10 @@ static int __init coh901331_probe(struct platform_device *pdev) goto out_no_rtc; } - platform_set_drvdata(pdev, rtap); - return 0; out_no_rtc: + platform_set_drvdata(pdev, NULL); out_no_clk_enable: clk_put(rtap->clk); out_no_clk: diff --git a/trunk/drivers/rtc/rtc-davinci.c b/trunk/drivers/rtc/rtc-davinci.c index 8d46838dff8a..755e1fe914af 100644 --- a/trunk/drivers/rtc/rtc-davinci.c +++ b/trunk/drivers/rtc/rtc-davinci.c @@ -524,6 +524,8 @@ static int __init davinci_rtc_probe(struct platform_device *pdev) goto fail2; } + platform_set_drvdata(pdev, davinci_rtc); + davinci_rtc->rtc = rtc_device_register(pdev->name, &pdev->dev, &davinci_rtc_ops, THIS_MODULE); if (IS_ERR(davinci_rtc->rtc)) { @@ -553,8 +555,6 @@ static int __init davinci_rtc_probe(struct platform_device *pdev) rtcss_write(davinci_rtc, PRTCSS_RTC_CCTRL_CAEN, PRTCSS_RTC_CCTRL); - platform_set_drvdata(pdev, davinci_rtc); - device_init_wakeup(&pdev->dev, 0); return 0; @@ -562,6 +562,7 @@ static int __init davinci_rtc_probe(struct platform_device *pdev) fail4: rtc_device_unregister(davinci_rtc->rtc); fail3: + platform_set_drvdata(pdev, NULL); iounmap(davinci_rtc->base); fail2: release_mem_region(davinci_rtc->pbase, davinci_rtc->base_size); diff --git a/trunk/drivers/rtc/rtc-ds1286.c b/trunk/drivers/rtc/rtc-ds1286.c index 60ce69600828..47e681df31e2 100644 --- a/trunk/drivers/rtc/rtc-ds1286.c +++ b/trunk/drivers/rtc/rtc-ds1286.c @@ -355,6 +355,7 @@ static int __devinit ds1286_probe(struct platform_device *pdev) goto out; } spin_lock_init(&priv->lock); + platform_set_drvdata(pdev, priv); rtc = rtc_device_register("ds1286", &pdev->dev, &ds1286_ops, THIS_MODULE); if (IS_ERR(rtc)) { @@ -362,7 +363,6 @@ static int __devinit ds1286_probe(struct platform_device *pdev) goto out; } priv->rtc = rtc; - platform_set_drvdata(pdev, priv); return 0; out: diff --git a/trunk/drivers/rtc/rtc-ep93xx.c b/trunk/drivers/rtc/rtc-ep93xx.c index 11ae64dcbf3c..335551d333b2 100644 --- a/trunk/drivers/rtc/rtc-ep93xx.c +++ b/trunk/drivers/rtc/rtc-ep93xx.c @@ -151,6 +151,7 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev) return -ENXIO; pdev->dev.platform_data = ep93xx_rtc; + platform_set_drvdata(pdev, rtc); rtc = rtc_device_register(pdev->name, &pdev->dev, &ep93xx_rtc_ops, THIS_MODULE); @@ -159,8 +160,6 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev) goto exit; } - platform_set_drvdata(pdev, rtc); - err = sysfs_create_group(&pdev->dev.kobj, &ep93xx_rtc_sysfs_files); if (err) goto fail; @@ -168,9 +167,9 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev) return 0; fail: - platform_set_drvdata(pdev, NULL); rtc_device_unregister(rtc); exit: + platform_set_drvdata(pdev, NULL); pdev->dev.platform_data = NULL; return err; } diff --git a/trunk/drivers/rtc/rtc-m41t80.c b/trunk/drivers/rtc/rtc-m41t80.c index 69fe664a2228..eda128fc1d38 100644 --- a/trunk/drivers/rtc/rtc-m41t80.c +++ b/trunk/drivers/rtc/rtc-m41t80.c @@ -783,6 +783,9 @@ static int m41t80_probe(struct i2c_client *client, goto exit; } + clientdata->features = id->driver_data; + i2c_set_clientdata(client, clientdata); + rtc = rtc_device_register(client->name, &client->dev, &m41t80_rtc_ops, THIS_MODULE); if (IS_ERR(rtc)) { @@ -792,8 +795,6 @@ static int m41t80_probe(struct i2c_client *client, } clientdata->rtc = rtc; - clientdata->features = id->driver_data; - i2c_set_clientdata(client, clientdata); /* Make sure HT (Halt Update) bit is cleared */ rc = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_HOUR); diff --git a/trunk/drivers/rtc/rtc-max8925.c b/trunk/drivers/rtc/rtc-max8925.c index 174036dda786..3bc046f427e0 100644 --- a/trunk/drivers/rtc/rtc-max8925.c +++ b/trunk/drivers/rtc/rtc-max8925.c @@ -257,6 +257,10 @@ static int __devinit max8925_rtc_probe(struct platform_device *pdev) goto out_irq; } + dev_set_drvdata(&pdev->dev, info); + /* XXX - isn't this redundant? */ + platform_set_drvdata(pdev, info); + info->rtc_dev = rtc_device_register("max8925-rtc", &pdev->dev, &max8925_rtc_ops, THIS_MODULE); ret = PTR_ERR(info->rtc_dev); @@ -265,11 +269,9 @@ static int __devinit max8925_rtc_probe(struct platform_device *pdev) goto out_rtc; } - dev_set_drvdata(&pdev->dev, info); - platform_set_drvdata(pdev, info); - return 0; out_rtc: + platform_set_drvdata(pdev, NULL); free_irq(chip->irq_base + MAX8925_IRQ_RTC_ALARM0, info); out_irq: kfree(info); diff --git a/trunk/drivers/rtc/rtc-max8998.c b/trunk/drivers/rtc/rtc-max8998.c index 3f7bc6b9fefa..2e48aa604273 100644 --- a/trunk/drivers/rtc/rtc-max8998.c +++ b/trunk/drivers/rtc/rtc-max8998.c @@ -265,6 +265,8 @@ static int __devinit max8998_rtc_probe(struct platform_device *pdev) info->rtc = max8998->rtc; info->irq = max8998->irq_base + MAX8998_IRQ_ALARM0; + platform_set_drvdata(pdev, info); + info->rtc_dev = rtc_device_register("max8998-rtc", &pdev->dev, &max8998_rtc_ops, THIS_MODULE); @@ -274,8 +276,6 @@ static int __devinit max8998_rtc_probe(struct platform_device *pdev) goto out_rtc; } - platform_set_drvdata(pdev, info); - ret = request_threaded_irq(info->irq, NULL, max8998_rtc_alarm_irq, 0, "rtc-alarm0", info); @@ -293,6 +293,7 @@ static int __devinit max8998_rtc_probe(struct platform_device *pdev) return 0; out_rtc: + platform_set_drvdata(pdev, NULL); kfree(info); return ret; } diff --git a/trunk/drivers/rtc/rtc-mc13xxx.c b/trunk/drivers/rtc/rtc-mc13xxx.c index c42006469559..a1a278bc340d 100644 --- a/trunk/drivers/rtc/rtc-mc13xxx.c +++ b/trunk/drivers/rtc/rtc-mc13xxx.c @@ -349,11 +349,15 @@ static int __devinit mc13xxx_rtc_probe(struct platform_device *pdev) if (ret) goto err_alarm_irq_request; + mc13xxx_unlock(mc13xxx); + priv->rtc = rtc_device_register(pdev->name, &pdev->dev, &mc13xxx_rtc_ops, THIS_MODULE); if (IS_ERR(priv->rtc)) { ret = PTR_ERR(priv->rtc); + mc13xxx_lock(mc13xxx); + mc13xxx_irq_free(mc13xxx, MC13XXX_IRQ_TODA, priv); err_alarm_irq_request: @@ -365,12 +369,12 @@ static int __devinit mc13xxx_rtc_probe(struct platform_device *pdev) mc13xxx_irq_free(mc13xxx, MC13XXX_IRQ_RTCRST, priv); err_reset_irq_request: + mc13xxx_unlock(mc13xxx); + platform_set_drvdata(pdev, NULL); kfree(priv); } - mc13xxx_unlock(mc13xxx); - return ret; } @@ -401,6 +405,7 @@ const struct platform_device_id mc13xxx_rtc_idtable[] = { }, { .name = "mc13892-rtc", }, + { } }; static struct platform_driver mc13xxx_rtc_driver = { diff --git a/trunk/drivers/rtc/rtc-msm6242.c b/trunk/drivers/rtc/rtc-msm6242.c index 67820626e18f..fcb113c11122 100644 --- a/trunk/drivers/rtc/rtc-msm6242.c +++ b/trunk/drivers/rtc/rtc-msm6242.c @@ -214,6 +214,7 @@ static int __init msm6242_rtc_probe(struct platform_device *dev) error = -ENOMEM; goto out_free_priv; } + platform_set_drvdata(dev, priv); rtc = rtc_device_register("rtc-msm6242", &dev->dev, &msm6242_rtc_ops, THIS_MODULE); @@ -223,10 +224,10 @@ static int __init msm6242_rtc_probe(struct platform_device *dev) } priv->rtc = rtc; - platform_set_drvdata(dev, priv); return 0; out_unmap: + platform_set_drvdata(dev, NULL); iounmap(priv->regs); out_free_priv: kfree(priv); diff --git a/trunk/drivers/rtc/rtc-mxc.c b/trunk/drivers/rtc/rtc-mxc.c index 826ab64a8fa9..d814417bee8c 100644 --- a/trunk/drivers/rtc/rtc-mxc.c +++ b/trunk/drivers/rtc/rtc-mxc.c @@ -418,14 +418,6 @@ static int __init mxc_rtc_probe(struct platform_device *pdev) goto exit_put_clk; } - rtc = rtc_device_register(pdev->name, &pdev->dev, &mxc_rtc_ops, - THIS_MODULE); - if (IS_ERR(rtc)) { - ret = PTR_ERR(rtc); - goto exit_put_clk; - } - - pdata->rtc = rtc; platform_set_drvdata(pdev, pdata); /* Configure and enable the RTC */ @@ -438,8 +430,19 @@ static int __init mxc_rtc_probe(struct platform_device *pdev) pdata->irq = -1; } + rtc = rtc_device_register(pdev->name, &pdev->dev, &mxc_rtc_ops, + THIS_MODULE); + if (IS_ERR(rtc)) { + ret = PTR_ERR(rtc); + goto exit_clr_drvdata; + } + + pdata->rtc = rtc; + return 0; +exit_clr_drvdata: + platform_set_drvdata(pdev, NULL); exit_put_clk: clk_disable(pdata->clk); clk_put(pdata->clk); diff --git a/trunk/drivers/rtc/rtc-omap.c b/trunk/drivers/rtc/rtc-omap.c index de0dd7b1f146..bcae8dd41496 100644 --- a/trunk/drivers/rtc/rtc-omap.c +++ b/trunk/drivers/rtc/rtc-omap.c @@ -394,7 +394,7 @@ static int __init omap_rtc_probe(struct platform_device *pdev) return 0; fail2: - free_irq(omap_rtc_timer, NULL); + free_irq(omap_rtc_timer, rtc); fail1: rtc_device_unregister(rtc); fail0: diff --git a/trunk/drivers/rtc/rtc-pcap.c b/trunk/drivers/rtc/rtc-pcap.c index a633abc42896..cd4f198cc2ef 100644 --- a/trunk/drivers/rtc/rtc-pcap.c +++ b/trunk/drivers/rtc/rtc-pcap.c @@ -151,6 +151,8 @@ static int __devinit pcap_rtc_probe(struct platform_device *pdev) pcap_rtc->pcap = dev_get_drvdata(pdev->dev.parent); + platform_set_drvdata(pdev, pcap_rtc); + pcap_rtc->rtc = rtc_device_register("pcap", &pdev->dev, &pcap_rtc_ops, THIS_MODULE); if (IS_ERR(pcap_rtc->rtc)) { @@ -158,7 +160,6 @@ static int __devinit pcap_rtc_probe(struct platform_device *pdev) goto fail_rtc; } - platform_set_drvdata(pdev, pcap_rtc); timer_irq = pcap_to_irq(pcap_rtc->pcap, PCAP_IRQ_1HZ); alarm_irq = pcap_to_irq(pcap_rtc->pcap, PCAP_IRQ_TODA); @@ -177,6 +178,7 @@ static int __devinit pcap_rtc_probe(struct platform_device *pdev) fail_timer: rtc_device_unregister(pcap_rtc->rtc); fail_rtc: + platform_set_drvdata(pdev, NULL); kfree(pcap_rtc); return err; } diff --git a/trunk/drivers/rtc/rtc-rp5c01.c b/trunk/drivers/rtc/rtc-rp5c01.c index 694da39b6dd2..359da6d020b9 100644 --- a/trunk/drivers/rtc/rtc-rp5c01.c +++ b/trunk/drivers/rtc/rtc-rp5c01.c @@ -249,15 +249,15 @@ static int __init rp5c01_rtc_probe(struct platform_device *dev) spin_lock_init(&priv->lock); + platform_set_drvdata(dev, priv); + rtc = rtc_device_register("rtc-rp5c01", &dev->dev, &rp5c01_rtc_ops, THIS_MODULE); if (IS_ERR(rtc)) { error = PTR_ERR(rtc); goto out_unmap; } - priv->rtc = rtc; - platform_set_drvdata(dev, priv); error = sysfs_create_bin_file(&dev->dev.kobj, &priv->nvram_attr); if (error) @@ -268,6 +268,7 @@ static int __init rp5c01_rtc_probe(struct platform_device *dev) out_unregister: rtc_device_unregister(rtc); out_unmap: + platform_set_drvdata(dev, NULL); iounmap(priv->regs); out_free_priv: kfree(priv); diff --git a/trunk/drivers/rtc/rtc-s3c.c b/trunk/drivers/rtc/rtc-s3c.c index 714964913e5e..16512ecae31a 100644 --- a/trunk/drivers/rtc/rtc-s3c.c +++ b/trunk/drivers/rtc/rtc-s3c.c @@ -46,6 +46,7 @@ static struct clk *rtc_clk; static void __iomem *s3c_rtc_base; static int s3c_rtc_alarmno = NO_IRQ; static int s3c_rtc_tickno = NO_IRQ; +static bool wake_en; static enum s3c_cpu_type s3c_rtc_cpu_type; static DEFINE_SPINLOCK(s3c_rtc_pie_lock); @@ -336,7 +337,6 @@ static void s3c_rtc_release(struct device *dev) /* do not clear AIE here, it may be needed for wake */ - s3c_rtc_setpie(dev, 0); free_irq(s3c_rtc_alarmno, rtc_dev); free_irq(s3c_rtc_tickno, rtc_dev); } @@ -408,7 +408,6 @@ static int __devexit s3c_rtc_remove(struct platform_device *dev) platform_set_drvdata(dev, NULL); rtc_device_unregister(rtc); - s3c_rtc_setpie(&dev->dev, 0); s3c_rtc_setaie(&dev->dev, 0); clk_disable(rtc_clk); @@ -564,8 +563,12 @@ static int s3c_rtc_suspend(struct platform_device *pdev, pm_message_t state) } s3c_rtc_enable(pdev, 0); - if (device_may_wakeup(&pdev->dev)) - enable_irq_wake(s3c_rtc_alarmno); + if (device_may_wakeup(&pdev->dev) && !wake_en) { + if (enable_irq_wake(s3c_rtc_alarmno) == 0) + wake_en = true; + else + dev_err(&pdev->dev, "enable_irq_wake failed\n"); + } return 0; } @@ -581,8 +584,10 @@ static int s3c_rtc_resume(struct platform_device *pdev) writew(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON); } - if (device_may_wakeup(&pdev->dev)) + if (device_may_wakeup(&pdev->dev) && wake_en) { disable_irq_wake(s3c_rtc_alarmno); + wake_en = false; + } return 0; } diff --git a/trunk/drivers/s390/block/dasd.c b/trunk/drivers/s390/block/dasd.c index 4d2df2f76ea0..86b6f1cc1b10 100644 --- a/trunk/drivers/s390/block/dasd.c +++ b/trunk/drivers/s390/block/dasd.c @@ -1742,11 +1742,20 @@ int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) static inline int _dasd_term_running_cqr(struct dasd_device *device) { struct dasd_ccw_req *cqr; + int rc; if (list_empty(&device->ccw_queue)) return 0; cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); - return device->discipline->term_IO(cqr); + rc = device->discipline->term_IO(cqr); + if (!rc) + /* + * CQR terminated because a more important request is pending. + * Undo decreasing of retry counter because this is + * not an error case. + */ + cqr->retries++; + return rc; } int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) @@ -2314,15 +2323,14 @@ static void dasd_flush_request_queue(struct dasd_block *block) static int dasd_open(struct block_device *bdev, fmode_t mode) { - struct dasd_block *block = bdev->bd_disk->private_data; struct dasd_device *base; int rc; - if (!block) + base = dasd_device_from_gendisk(bdev->bd_disk); + if (!base) return -ENODEV; - base = block->base; - atomic_inc(&block->open_count); + atomic_inc(&base->block->open_count); if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) { rc = -ENODEV; goto unlock; @@ -2355,21 +2363,28 @@ static int dasd_open(struct block_device *bdev, fmode_t mode) goto out; } + dasd_put_device(base); return 0; out: module_put(base->discipline->owner); unlock: - atomic_dec(&block->open_count); + atomic_dec(&base->block->open_count); + dasd_put_device(base); return rc; } static int dasd_release(struct gendisk *disk, fmode_t mode) { - struct dasd_block *block = disk->private_data; + struct dasd_device *base; - atomic_dec(&block->open_count); - module_put(block->base->discipline->owner); + base = dasd_device_from_gendisk(disk); + if (!base) + return -ENODEV; + + atomic_dec(&base->block->open_count); + module_put(base->discipline->owner); + dasd_put_device(base); return 0; } @@ -2378,20 +2393,20 @@ static int dasd_release(struct gendisk *disk, fmode_t mode) */ static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) { - struct dasd_block *block; struct dasd_device *base; - block = bdev->bd_disk->private_data; - if (!block) + base = dasd_device_from_gendisk(bdev->bd_disk); + if (!base) return -ENODEV; - base = block->base; if (!base->discipline || - !base->discipline->fill_geometry) + !base->discipline->fill_geometry) { + dasd_put_device(base); return -EINVAL; - - base->discipline->fill_geometry(block, geo); - geo->start = get_start_sect(bdev) >> block->s2b_shift; + } + base->discipline->fill_geometry(base->block, geo); + geo->start = get_start_sect(bdev) >> base->block->s2b_shift; + dasd_put_device(base); return 0; } @@ -2528,7 +2543,6 @@ void dasd_generic_remove(struct ccw_device *cdev) dasd_set_target_state(device, DASD_STATE_NEW); /* dasd_delete_device destroys the device reference. */ block = device->block; - device->block = NULL; dasd_delete_device(device); /* * life cycle of block is bound to device, so delete it after @@ -2650,7 +2664,6 @@ int dasd_generic_set_offline(struct ccw_device *cdev) dasd_set_target_state(device, DASD_STATE_NEW); /* dasd_delete_device destroys the device reference. */ block = device->block; - device->block = NULL; dasd_delete_device(device); /* * life cycle of block is bound to device, so delete it after diff --git a/trunk/drivers/s390/block/dasd_devmap.c b/trunk/drivers/s390/block/dasd_devmap.c index 42e1bf35f689..d71511c7850a 100644 --- a/trunk/drivers/s390/block/dasd_devmap.c +++ b/trunk/drivers/s390/block/dasd_devmap.c @@ -674,6 +674,36 @@ dasd_device_from_cdev(struct ccw_device *cdev) return device; } +void dasd_add_link_to_gendisk(struct gendisk *gdp, struct dasd_device *device) +{ + struct dasd_devmap *devmap; + + devmap = dasd_find_busid(dev_name(&device->cdev->dev)); + if (IS_ERR(devmap)) + return; + spin_lock(&dasd_devmap_lock); + gdp->private_data = devmap; + spin_unlock(&dasd_devmap_lock); +} + +struct dasd_device *dasd_device_from_gendisk(struct gendisk *gdp) +{ + struct dasd_device *device; + struct dasd_devmap *devmap; + + if (!gdp->private_data) + return NULL; + device = NULL; + spin_lock(&dasd_devmap_lock); + devmap = gdp->private_data; + if (devmap && devmap->device) { + device = devmap->device; + dasd_get_device(device); + } + spin_unlock(&dasd_devmap_lock); + return device; +} + /* * SECTION: files in sysfs */ diff --git a/trunk/drivers/s390/block/dasd_diag.c b/trunk/drivers/s390/block/dasd_diag.c index 29143eda9dd9..85dddb1e4126 100644 --- a/trunk/drivers/s390/block/dasd_diag.c +++ b/trunk/drivers/s390/block/dasd_diag.c @@ -239,7 +239,6 @@ static void dasd_ext_handler(unsigned int ext_int_code, addr_t ip; int rc; - kstat_cpu(smp_processor_id()).irqs[EXTINT_DSD]++; switch (ext_int_code >> 24) { case DASD_DIAG_CODE_31BIT: ip = (addr_t) param32; @@ -250,6 +249,7 @@ static void dasd_ext_handler(unsigned int ext_int_code, default: return; } + kstat_cpu(smp_processor_id()).irqs[EXTINT_DSD]++; if (!ip) { /* no intparm: unsolicited interrupt */ DBF_EVENT(DBF_NOTICE, "%s", "caught unsolicited " "interrupt"); diff --git a/trunk/drivers/s390/block/dasd_eckd.c b/trunk/drivers/s390/block/dasd_eckd.c index db8005d9f2fd..3ebdf5f92f8f 100644 --- a/trunk/drivers/s390/block/dasd_eckd.c +++ b/trunk/drivers/s390/block/dasd_eckd.c @@ -2037,7 +2037,7 @@ static void dasd_eckd_check_for_device_change(struct dasd_device *device, return; /* summary unit check */ - if ((sense[7] == 0x0D) && + if ((sense[27] & DASD_SENSE_BIT_0) && (sense[7] == 0x0D) && (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) { dasd_alias_handle_summary_unit_check(device, irb); return; @@ -2053,7 +2053,8 @@ static void dasd_eckd_check_for_device_change(struct dasd_device *device, /* loss of device reservation is handled via base devices only * as alias devices may be used with several bases */ - if (device->block && (sense[7] == 0x3F) && + if (device->block && (sense[27] & DASD_SENSE_BIT_0) && + (sense[7] == 0x3F) && (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) && test_bit(DASD_FLAG_IS_RESERVED, &device->flags)) { if (device->features & DASD_FEATURE_FAILONSLCK) diff --git a/trunk/drivers/s390/block/dasd_genhd.c b/trunk/drivers/s390/block/dasd_genhd.c index 5505bc07e1e7..19a1ff03d65e 100644 --- a/trunk/drivers/s390/block/dasd_genhd.c +++ b/trunk/drivers/s390/block/dasd_genhd.c @@ -73,7 +73,7 @@ int dasd_gendisk_alloc(struct dasd_block *block) if (base->features & DASD_FEATURE_READONLY || test_bit(DASD_FLAG_DEVICE_RO, &base->flags)) set_disk_ro(gdp, 1); - gdp->private_data = block; + dasd_add_link_to_gendisk(gdp, base); gdp->queue = block->request_queue; block->gdp = gdp; set_capacity(block->gdp, 0); diff --git a/trunk/drivers/s390/block/dasd_int.h b/trunk/drivers/s390/block/dasd_int.h index df9f6999411d..d1e4f2c1264c 100644 --- a/trunk/drivers/s390/block/dasd_int.h +++ b/trunk/drivers/s390/block/dasd_int.h @@ -686,6 +686,9 @@ struct dasd_device *dasd_device_from_cdev(struct ccw_device *); struct dasd_device *dasd_device_from_cdev_locked(struct ccw_device *); struct dasd_device *dasd_device_from_devindex(int); +void dasd_add_link_to_gendisk(struct gendisk *, struct dasd_device *); +struct dasd_device *dasd_device_from_gendisk(struct gendisk *); + int dasd_parse(void); int dasd_busid_known(const char *); diff --git a/trunk/drivers/s390/block/dasd_ioctl.c b/trunk/drivers/s390/block/dasd_ioctl.c index 26075e95b1ba..72261e4c516d 100644 --- a/trunk/drivers/s390/block/dasd_ioctl.c +++ b/trunk/drivers/s390/block/dasd_ioctl.c @@ -42,16 +42,22 @@ dasd_ioctl_api_version(void __user *argp) static int dasd_ioctl_enable(struct block_device *bdev) { - struct dasd_block *block = bdev->bd_disk->private_data; + struct dasd_device *base; if (!capable(CAP_SYS_ADMIN)) return -EACCES; - dasd_enable_device(block->base); + base = dasd_device_from_gendisk(bdev->bd_disk); + if (!base) + return -ENODEV; + + dasd_enable_device(base); /* Formatting the dasd device can change the capacity. */ mutex_lock(&bdev->bd_mutex); - i_size_write(bdev->bd_inode, (loff_t)get_capacity(block->gdp) << 9); + i_size_write(bdev->bd_inode, + (loff_t)get_capacity(base->block->gdp) << 9); mutex_unlock(&bdev->bd_mutex); + dasd_put_device(base); return 0; } @@ -62,11 +68,14 @@ dasd_ioctl_enable(struct block_device *bdev) static int dasd_ioctl_disable(struct block_device *bdev) { - struct dasd_block *block = bdev->bd_disk->private_data; + struct dasd_device *base; if (!capable(CAP_SYS_ADMIN)) return -EACCES; + base = dasd_device_from_gendisk(bdev->bd_disk); + if (!base) + return -ENODEV; /* * Man this is sick. We don't do a real disable but only downgrade * the device to DASD_STATE_BASIC. The reason is that dasdfmt uses @@ -75,7 +84,7 @@ dasd_ioctl_disable(struct block_device *bdev) * using the BIODASDFMT ioctl. Therefore the correct state for the * device is DASD_STATE_BASIC that allows to do basic i/o. */ - dasd_set_target_state(block->base, DASD_STATE_BASIC); + dasd_set_target_state(base, DASD_STATE_BASIC); /* * Set i_size to zero, since read, write, etc. check against this * value. @@ -83,6 +92,7 @@ dasd_ioctl_disable(struct block_device *bdev) mutex_lock(&bdev->bd_mutex); i_size_write(bdev->bd_inode, 0); mutex_unlock(&bdev->bd_mutex); + dasd_put_device(base); return 0; } @@ -191,26 +201,36 @@ static int dasd_format(struct dasd_block *block, struct format_data_t *fdata) static int dasd_ioctl_format(struct block_device *bdev, void __user *argp) { - struct dasd_block *block = bdev->bd_disk->private_data; + struct dasd_device *base; struct format_data_t fdata; + int rc; if (!capable(CAP_SYS_ADMIN)) return -EACCES; if (!argp) return -EINVAL; - - if (block->base->features & DASD_FEATURE_READONLY || - test_bit(DASD_FLAG_DEVICE_RO, &block->base->flags)) + base = dasd_device_from_gendisk(bdev->bd_disk); + if (!base) + return -ENODEV; + if (base->features & DASD_FEATURE_READONLY || + test_bit(DASD_FLAG_DEVICE_RO, &base->flags)) { + dasd_put_device(base); return -EROFS; - if (copy_from_user(&fdata, argp, sizeof(struct format_data_t))) + } + if (copy_from_user(&fdata, argp, sizeof(struct format_data_t))) { + dasd_put_device(base); return -EFAULT; + } if (bdev != bdev->bd_contains) { pr_warning("%s: The specified DASD is a partition and cannot " "be formatted\n", - dev_name(&block->base->cdev->dev)); + dev_name(&base->cdev->dev)); + dasd_put_device(base); return -EINVAL; } - return dasd_format(block, &fdata); + rc = dasd_format(base->block, &fdata); + dasd_put_device(base); + return rc; } #ifdef CONFIG_DASD_PROFILE @@ -340,8 +360,8 @@ static int dasd_ioctl_information(struct dasd_block *block, static int dasd_ioctl_set_ro(struct block_device *bdev, void __user *argp) { - struct dasd_block *block = bdev->bd_disk->private_data; - int intval; + struct dasd_device *base; + int intval, rc; if (!capable(CAP_SYS_ADMIN)) return -EACCES; @@ -350,10 +370,17 @@ dasd_ioctl_set_ro(struct block_device *bdev, void __user *argp) return -EINVAL; if (get_user(intval, (int __user *)argp)) return -EFAULT; - if (!intval && test_bit(DASD_FLAG_DEVICE_RO, &block->base->flags)) + base = dasd_device_from_gendisk(bdev->bd_disk); + if (!base) + return -ENODEV; + if (!intval && test_bit(DASD_FLAG_DEVICE_RO, &base->flags)) { + dasd_put_device(base); return -EROFS; + } set_disk_ro(bdev->bd_disk, intval); - return dasd_set_feature(block->base->cdev, DASD_FEATURE_READONLY, intval); + rc = dasd_set_feature(base->cdev, DASD_FEATURE_READONLY, intval); + dasd_put_device(base); + return rc; } static int dasd_ioctl_readall_cmb(struct dasd_block *block, unsigned int cmd, @@ -372,59 +399,78 @@ static int dasd_ioctl_readall_cmb(struct dasd_block *block, unsigned int cmd, int dasd_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { - struct dasd_block *block = bdev->bd_disk->private_data; + struct dasd_block *block; + struct dasd_device *base; void __user *argp; + int rc; if (is_compat_task()) argp = compat_ptr(arg); else argp = (void __user *)arg; - if (!block) - return -ENODEV; - if ((_IOC_DIR(cmd) != _IOC_NONE) && !arg) { PRINT_DEBUG("empty data ptr"); return -EINVAL; } + base = dasd_device_from_gendisk(bdev->bd_disk); + if (!base) + return -ENODEV; + block = base->block; + rc = 0; switch (cmd) { case BIODASDDISABLE: - return dasd_ioctl_disable(bdev); + rc = dasd_ioctl_disable(bdev); + break; case BIODASDENABLE: - return dasd_ioctl_enable(bdev); + rc = dasd_ioctl_enable(bdev); + break; case BIODASDQUIESCE: - return dasd_ioctl_quiesce(block); + rc = dasd_ioctl_quiesce(block); + break; case BIODASDRESUME: - return dasd_ioctl_resume(block); + rc = dasd_ioctl_resume(block); + break; case BIODASDFMT: - return dasd_ioctl_format(bdev, argp); + rc = dasd_ioctl_format(bdev, argp); + break; case BIODASDINFO: - return dasd_ioctl_information(block, cmd, argp); + rc = dasd_ioctl_information(block, cmd, argp); + break; case BIODASDINFO2: - return dasd_ioctl_information(block, cmd, argp); + rc = dasd_ioctl_information(block, cmd, argp); + break; case BIODASDPRRD: - return dasd_ioctl_read_profile(block, argp); + rc = dasd_ioctl_read_profile(block, argp); + break; case BIODASDPRRST: - return dasd_ioctl_reset_profile(block); + rc = dasd_ioctl_reset_profile(block); + break; case BLKROSET: - return dasd_ioctl_set_ro(bdev, argp); + rc = dasd_ioctl_set_ro(bdev, argp); + break; case DASDAPIVER: - return dasd_ioctl_api_version(argp); + rc = dasd_ioctl_api_version(argp); + break; case BIODASDCMFENABLE: - return enable_cmf(block->base->cdev); + rc = enable_cmf(base->cdev); + break; case BIODASDCMFDISABLE: - return disable_cmf(block->base->cdev); + rc = disable_cmf(base->cdev); + break; case BIODASDREADALLCMB: - return dasd_ioctl_readall_cmb(block, cmd, argp); + rc = dasd_ioctl_readall_cmb(block, cmd, argp); + break; default: /* if the discipline has an ioctl method try it. */ - if (block->base->discipline->ioctl) { - int rval = block->base->discipline->ioctl(block, cmd, argp); - if (rval != -ENOIOCTLCMD) - return rval; - } - - return -EINVAL; + if (base->discipline->ioctl) { + rc = base->discipline->ioctl(block, cmd, argp); + if (rc == -ENOIOCTLCMD) + rc = -EINVAL; + } else + rc = -EINVAL; } + dasd_put_device(base); + return rc; } diff --git a/trunk/drivers/s390/char/sclp_cmd.c b/trunk/drivers/s390/char/sclp_cmd.c index 4b60ede07f0e..be55fb2b1b1c 100644 --- a/trunk/drivers/s390/char/sclp_cmd.c +++ b/trunk/drivers/s390/char/sclp_cmd.c @@ -518,6 +518,8 @@ static void __init insert_increment(u16 rn, int standby, int assigned) return; new_incr->rn = rn; new_incr->standby = standby; + if (!standby) + new_incr->usecount = 1; last_rn = 0; prev = &sclp_mem_list; list_for_each_entry(incr, &sclp_mem_list, list) { diff --git a/trunk/drivers/s390/char/tape_block.c b/trunk/drivers/s390/char/tape_block.c index 83cea9a55e2f..1b3924c2fffd 100644 --- a/trunk/drivers/s390/char/tape_block.c +++ b/trunk/drivers/s390/char/tape_block.c @@ -236,7 +236,6 @@ tapeblock_setup_device(struct tape_device * device) disk->major = tapeblock_major; disk->first_minor = device->first_minor; disk->fops = &tapeblock_fops; - disk->events = DISK_EVENT_MEDIA_CHANGE; disk->private_data = tape_get_device(device); disk->queue = blkdat->request_queue; set_capacity(disk, 0); diff --git a/trunk/drivers/s390/cio/qdio_main.c b/trunk/drivers/s390/cio/qdio_main.c index c532ba929ccd..e8f267eb8887 100644 --- a/trunk/drivers/s390/cio/qdio_main.c +++ b/trunk/drivers/s390/cio/qdio_main.c @@ -407,8 +407,11 @@ static inline void account_sbals(struct qdio_q *q, int count) q->q_stats.nr_sbals[pos]++; } -static void announce_buffer_error(struct qdio_q *q, int count) +static void process_buffer_error(struct qdio_q *q, int count) { + unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT : + SLSB_P_OUTPUT_NOT_INIT; + q->qdio_error |= QDIO_ERROR_SLSB_STATE; /* special handling for no target buffer empty */ @@ -426,6 +429,12 @@ static void announce_buffer_error(struct qdio_q *q, int count) DBF_ERROR("F14:%2x F15:%2x", q->sbal[q->first_to_check]->element[14].flags & 0xff, q->sbal[q->first_to_check]->element[15].flags & 0xff); + + /* + * Interrupts may be avoided as long as the error is present + * so change the buffer state immediately to avoid starvation. + */ + set_buf_states(q, q->first_to_check, state, count); } static inline void inbound_primed(struct qdio_q *q, int count) @@ -506,8 +515,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q) account_sbals(q, count); break; case SLSB_P_INPUT_ERROR: - announce_buffer_error(q, count); - /* process the buffer, the upper layer will take care of it */ + process_buffer_error(q, count); q->first_to_check = add_buf(q->first_to_check, count); atomic_sub(count, &q->nr_buf_used); if (q->irq_ptr->perf_stat_enabled) @@ -677,8 +685,7 @@ static int get_outbound_buffer_frontier(struct qdio_q *q) account_sbals(q, count); break; case SLSB_P_OUTPUT_ERROR: - announce_buffer_error(q, count); - /* process the buffer, the upper layer will take care of it */ + process_buffer_error(q, count); q->first_to_check = add_buf(q->first_to_check, count); atomic_sub(count, &q->nr_buf_used); if (q->irq_ptr->perf_stat_enabled) diff --git a/trunk/drivers/s390/kvm/kvm_virtio.c b/trunk/drivers/s390/kvm/kvm_virtio.c index 414427d64a8f..607998f0b7d8 100644 --- a/trunk/drivers/s390/kvm/kvm_virtio.c +++ b/trunk/drivers/s390/kvm/kvm_virtio.c @@ -381,10 +381,10 @@ static void kvm_extint_handler(unsigned int ext_int_code, u16 subcode; u32 param; - kstat_cpu(smp_processor_id()).irqs[EXTINT_VRT]++; subcode = ext_int_code >> 16; if ((subcode & 0xff00) != VIRTIO_SUBCODE_64) return; + kstat_cpu(smp_processor_id()).irqs[EXTINT_VRT]++; /* The LSB might be overloaded, we have to mask it */ vq = (struct virtqueue *)(param64 & ~1UL); diff --git a/trunk/drivers/scsi/arcmsr/arcmsr_hba.c b/trunk/drivers/scsi/arcmsr/arcmsr_hba.c index da7b9887ec48..f980600f78a8 100644 --- a/trunk/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/trunk/drivers/scsi/arcmsr/arcmsr_hba.c @@ -75,8 +75,10 @@ MODULE_AUTHOR("Nick Cheng "); MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/16xx/1880) SATA/SAS RAID Host Bus Adapter"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(ARCMSR_DRIVER_VERSION); -static int sleeptime = 10; -static int retrycount = 12; + +#define ARCMSR_SLEEPTIME 10 +#define ARCMSR_RETRYCOUNT 12 + wait_queue_head_t wait_q; static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, struct scsi_cmnd *cmd); @@ -171,24 +173,6 @@ static struct pci_driver arcmsr_pci_driver = { **************************************************************************** **************************************************************************** */ -int arcmsr_sleep_for_bus_reset(struct scsi_cmnd *cmd) -{ - struct Scsi_Host *shost = NULL; - int i, isleep; - shost = cmd->device->host; - isleep = sleeptime / 10; - if (isleep > 0) { - for (i = 0; i < isleep; i++) { - msleep(10000); - } - } - - isleep = sleeptime % 10; - if (isleep > 0) { - msleep(isleep*1000); - } - return 0; -} static void arcmsr_free_hbb_mu(struct AdapterControlBlock *acb) { @@ -323,66 +307,64 @@ static void arcmsr_define_adapter_type(struct AdapterControlBlock *acb) default: acb->adapter_type = ACB_ADAPTER_TYPE_A; } -} +} static uint8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb) { struct MessageUnit_A __iomem *reg = acb->pmuA; - uint32_t Index; - uint8_t Retries = 0x00; - do { - for (Index = 0; Index < 100; Index++) { - if (readl(®->outbound_intstatus) & - ARCMSR_MU_OUTBOUND_MESSAGE0_INT) { - writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, - ®->outbound_intstatus); - return true; - } - msleep(10); - }/*max 1 seconds*/ + int i; + + for (i = 0; i < 2000; i++) { + if (readl(®->outbound_intstatus) & + ARCMSR_MU_OUTBOUND_MESSAGE0_INT) { + writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, + ®->outbound_intstatus); + return true; + } + msleep(10); + } /* max 20 seconds */ - } while (Retries++ < 20);/*max 20 sec*/ return false; } static uint8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb) { struct MessageUnit_B *reg = acb->pmuB; - uint32_t Index; - uint8_t Retries = 0x00; - do { - for (Index = 0; Index < 100; Index++) { - if (readl(reg->iop2drv_doorbell) - & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) { - writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN - , reg->iop2drv_doorbell); - writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell); - return true; - } - msleep(10); - }/*max 1 seconds*/ + int i; + + for (i = 0; i < 2000; i++) { + if (readl(reg->iop2drv_doorbell) + & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) { + writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, + reg->iop2drv_doorbell); + writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, + reg->drv2iop_doorbell); + return true; + } + msleep(10); + } /* max 20 seconds */ - } while (Retries++ < 20);/*max 20 sec*/ return false; } static uint8_t arcmsr_hbc_wait_msgint_ready(struct AdapterControlBlock *pACB) { struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)pACB->pmuC; - unsigned char Retries = 0x00; - uint32_t Index; - do { - for (Index = 0; Index < 100; Index++) { - if (readl(&phbcmu->outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) { - writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &phbcmu->outbound_doorbell_clear);/*clear interrupt*/ - return true; - } - /* one us delay */ - msleep(10); - } /*max 1 seconds*/ - } while (Retries++ < 20); /*max 20 sec*/ + int i; + + for (i = 0; i < 2000; i++) { + if (readl(&phbcmu->outbound_doorbell) + & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) { + writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, + &phbcmu->outbound_doorbell_clear); /*clear interrupt*/ + return true; + } + msleep(10); + } /* max 20 seconds */ + return false; } + static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb) { struct MessageUnit_A __iomem *reg = acb->pmuA; @@ -459,10 +441,11 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) struct CommandControlBlock *ccb_tmp; int i = 0, j = 0; dma_addr_t cdb_phyaddr; - unsigned long roundup_ccbsize = 0, offset; + unsigned long roundup_ccbsize; unsigned long max_xfer_len; unsigned long max_sg_entrys; uint32_t firm_config_version; + for (i = 0; i < ARCMSR_MAX_TARGETID; i++) for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++) acb->devstate[i][j] = ARECA_RAID_GONE; @@ -472,23 +455,20 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) firm_config_version = acb->firm_cfg_version; if((firm_config_version & 0xFF) >= 3){ max_xfer_len = (ARCMSR_CDB_SG_PAGE_LENGTH << ((firm_config_version >> 8) & 0xFF)) * 1024;/* max 4M byte */ - max_sg_entrys = (max_xfer_len/4096); + max_sg_entrys = (max_xfer_len/4096); } acb->host->max_sectors = max_xfer_len/512; acb->host->sg_tablesize = max_sg_entrys; roundup_ccbsize = roundup(sizeof(struct CommandControlBlock) + (max_sg_entrys - 1) * sizeof(struct SG64ENTRY), 32); - acb->uncache_size = roundup_ccbsize * ARCMSR_MAX_FREECCB_NUM + 32; + acb->uncache_size = roundup_ccbsize * ARCMSR_MAX_FREECCB_NUM; dma_coherent = dma_alloc_coherent(&pdev->dev, acb->uncache_size, &dma_coherent_handle, GFP_KERNEL); if(!dma_coherent){ - printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error \n", acb->host->host_no); + printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error\n", acb->host->host_no); return -ENOMEM; } acb->dma_coherent = dma_coherent; acb->dma_coherent_handle = dma_coherent_handle; memset(dma_coherent, 0, acb->uncache_size); - offset = roundup((unsigned long)dma_coherent, 32) - (unsigned long)dma_coherent; - dma_coherent_handle = dma_coherent_handle + offset; - dma_coherent = (struct CommandControlBlock *)dma_coherent + offset; ccb_tmp = dma_coherent; acb->vir2phy_offset = (unsigned long)dma_coherent - (unsigned long)dma_coherent_handle; for(i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++){ @@ -2602,12 +2582,8 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb) if (cdb_phyaddr_hi32 != 0) { struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC; - if (cdb_phyaddr_hi32 != 0) { - unsigned char Retries = 0x00; - do { - printk(KERN_NOTICE "arcmsr%d: cdb_phyaddr_hi32=0x%x \n", acb->adapter_index, cdb_phyaddr_hi32); - } while (Retries++ < 100); - } + printk(KERN_NOTICE "arcmsr%d: cdb_phyaddr_hi32=0x%x\n", + acb->adapter_index, cdb_phyaddr_hi32); writel(ARCMSR_SIGNATURE_SET_CONFIG, ®->msgcode_rwbuffer[0]); writel(cdb_phyaddr_hi32, ®->msgcode_rwbuffer[1]); writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, ®->inbound_msgaddr0); @@ -2955,12 +2931,12 @@ static int arcmsr_bus_reset(struct scsi_cmnd *cmd) arcmsr_hardware_reset(acb); acb->acb_flags &= ~ACB_F_IOP_INITED; sleep_again: - arcmsr_sleep_for_bus_reset(cmd); + ssleep(ARCMSR_SLEEPTIME); if ((readl(®->outbound_msgaddr1) & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) { - printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, retry=%d \n", acb->host->host_no, retry_count); - if (retry_count > retrycount) { + printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, retry=%d\n", acb->host->host_no, retry_count); + if (retry_count > ARCMSR_RETRYCOUNT) { acb->fw_flag = FW_DEADLOCK; - printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, RETRY TERMINATED!! \n", acb->host->host_no); + printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, RETRY TERMINATED!!\n", acb->host->host_no); return FAILED; } retry_count++; @@ -3025,12 +3001,12 @@ static int arcmsr_bus_reset(struct scsi_cmnd *cmd) arcmsr_hardware_reset(acb); acb->acb_flags &= ~ACB_F_IOP_INITED; sleep: - arcmsr_sleep_for_bus_reset(cmd); + ssleep(ARCMSR_SLEEPTIME); if ((readl(®->host_diagnostic) & 0x04) != 0) { - printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, retry=%d \n", acb->host->host_no, retry_count); - if (retry_count > retrycount) { + printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, retry=%d\n", acb->host->host_no, retry_count); + if (retry_count > ARCMSR_RETRYCOUNT) { acb->fw_flag = FW_DEADLOCK; - printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, RETRY TERMINATED!! \n", acb->host->host_no); + printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, RETRY TERMINATED!!\n", acb->host->host_no); return FAILED; } retry_count++; diff --git a/trunk/drivers/scsi/be2iscsi/be.h b/trunk/drivers/scsi/be2iscsi/be.h index 1cb8a5e85c7f..1d7b976c850f 100644 --- a/trunk/drivers/scsi/be2iscsi/be.h +++ b/trunk/drivers/scsi/be2iscsi/be.h @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2010 ServerEngines + * Copyright (C) 2005 - 2011 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -8,11 +8,11 @@ * Public License is included in this distribution in the file called COPYING. * * Contact Information: - * linux-drivers@serverengines.com + * linux-drivers@emulex.com * - * ServerEngines - * 209 N. Fair Oaks Ave - * Sunnyvale, CA 94085 + * Emulex + * 3333 Susan Street + * Costa Mesa, CA 92626 */ #ifndef BEISCSI_H diff --git a/trunk/drivers/scsi/be2iscsi/be_cmds.c b/trunk/drivers/scsi/be2iscsi/be_cmds.c index ad246369d373..b8a82f2c62c8 100644 --- a/trunk/drivers/scsi/be2iscsi/be_cmds.c +++ b/trunk/drivers/scsi/be2iscsi/be_cmds.c @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2010 ServerEngines + * Copyright (C) 2005 - 2011 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -8,11 +8,11 @@ * Public License is included in this distribution in the file called COPYING. * * Contact Information: - * linux-drivers@serverengines.com + * linux-drivers@emulex.com * - * ServerEngines - * 209 N. Fair Oaks Ave - * Sunnyvale, CA 94085 + * Emulex + * 3333 Susan Street + * Costa Mesa, CA 92626 */ #include "be.h" @@ -458,6 +458,7 @@ void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr, req_hdr->opcode = opcode; req_hdr->subsystem = subsystem; req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr)); + req_hdr->timeout = 120; } static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages, diff --git a/trunk/drivers/scsi/be2iscsi/be_cmds.h b/trunk/drivers/scsi/be2iscsi/be_cmds.h index fbd1dc2c15f7..497eb29e5c9e 100644 --- a/trunk/drivers/scsi/be2iscsi/be_cmds.h +++ b/trunk/drivers/scsi/be2iscsi/be_cmds.h @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2010 ServerEngines + * Copyright (C) 2005 - 2011 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -8,11 +8,11 @@ * Public License is included in this distribution in the file called COPYING. * * Contact Information: - * linux-drivers@serverengines.com + * linux-drivers@emulex.com * - * ServerEngines - * 209 N. Fair Oaks Ave - * Sunnyvale, CA 94085 + * Emulex + * 3333 Susan Street + * Costa Mesa, CA 92626 */ #ifndef BEISCSI_CMDS_H diff --git a/trunk/drivers/scsi/be2iscsi/be_iscsi.c b/trunk/drivers/scsi/be2iscsi/be_iscsi.c index 868cc5590145..3cad10605023 100644 --- a/trunk/drivers/scsi/be2iscsi/be_iscsi.c +++ b/trunk/drivers/scsi/be2iscsi/be_iscsi.c @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2010 ServerEngines + * Copyright (C) 2005 - 2011 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -7,15 +7,14 @@ * as published by the Free Software Foundation. The full GNU General * Public License is included in this distribution in the file called COPYING. * - * Written by: Jayamohan Kallickal (jayamohank@serverengines.com) + * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com) * * Contact Information: - * linux-drivers@serverengines.com - * - * ServerEngines - * 209 N. Fair Oaks Ave - * Sunnyvale, CA 94085 + * linux-drivers@emulex.com * + * Emulex + * 3333 Susan Street + * Costa Mesa, CA 92626 */ #include diff --git a/trunk/drivers/scsi/be2iscsi/be_iscsi.h b/trunk/drivers/scsi/be2iscsi/be_iscsi.h index 9c532797c29e..ff60b7fd92d6 100644 --- a/trunk/drivers/scsi/be2iscsi/be_iscsi.h +++ b/trunk/drivers/scsi/be2iscsi/be_iscsi.h @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2010 ServerEngines + * Copyright (C) 2005 - 2011 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -7,15 +7,14 @@ * as published by the Free Software Foundation. The full GNU General * Public License is included in this distribution in the file called COPYING. * - * Written by: Jayamohan Kallickal (jayamohank@serverengines.com) + * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com) * * Contact Information: - * linux-drivers@serverengines.com - * - * ServerEngines - * 209 N. Fair Oaks Ave - * Sunnyvale, CA 94085 + * linux-drivers@emulex.com * + * Emulex + * 3333 Susan Street + * Costa Mesa, CA 92626 */ #ifndef _BE_ISCSI_ diff --git a/trunk/drivers/scsi/be2iscsi/be_main.c b/trunk/drivers/scsi/be2iscsi/be_main.c index 24e20ba9633c..cea9b275965c 100644 --- a/trunk/drivers/scsi/be2iscsi/be_main.c +++ b/trunk/drivers/scsi/be2iscsi/be_main.c @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2010 ServerEngines + * Copyright (C) 2005 - 2011 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -7,16 +7,16 @@ * as published by the Free Software Foundation. The full GNU General * Public License is included in this distribution in the file called COPYING. * - * Written by: Jayamohan Kallickal (jayamohank@serverengines.com) + * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com) * * Contact Information: - * linux-drivers@serverengines.com - * - * ServerEngines - * 209 N. Fair Oaks Ave - * Sunnyvale, CA 94085 + * linux-drivers@emulex.com * + * Emulex + * 3333 Susan Street + * Costa Mesa, CA 92626 */ + #include #include #include @@ -420,7 +420,8 @@ static int beiscsi_setup_boot_info(struct beiscsi_hba *phba) return 0; free_kset: - iscsi_boot_destroy_kset(phba->boot_kset); + if (phba->boot_kset) + iscsi_boot_destroy_kset(phba->boot_kset); return -ENOMEM; } @@ -3464,23 +3465,23 @@ static void hwi_enable_intr(struct beiscsi_hba *phba) addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET); reg = ioread32(addr); - SE_DEBUG(DBG_LVL_8, "reg =x%08x\n", reg); enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; if (!enabled) { reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p\n", reg, addr); iowrite32(reg, addr); - if (!phba->msix_enabled) { - eq = &phwi_context->be_eq[0].q; + } + + if (!phba->msix_enabled) { + eq = &phwi_context->be_eq[0].q; + SE_DEBUG(DBG_LVL_8, "eq->id=%d\n", eq->id); + hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1); + } else { + for (i = 0; i <= phba->num_cpus; i++) { + eq = &phwi_context->be_eq[i].q; SE_DEBUG(DBG_LVL_8, "eq->id=%d\n", eq->id); hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1); - } else { - for (i = 0; i <= phba->num_cpus; i++) { - eq = &phwi_context->be_eq[i].q; - SE_DEBUG(DBG_LVL_8, "eq->id=%d\n", eq->id); - hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1); - } } } } @@ -4019,12 +4020,17 @@ static int beiscsi_mtask(struct iscsi_task *task) hwi_write_buffer(pwrb, task); break; case ISCSI_OP_NOOP_OUT: - AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, - INI_RD_CMD); - if (task->hdr->ttt == ISCSI_RESERVED_TAG) + if (task->hdr->ttt != ISCSI_RESERVED_TAG) { + AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, + TGT_DM_CMD); + AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, + pwrb, 0); AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); - else + } else { + AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, + INI_RD_CMD); AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 1); + } hwi_write_buffer(pwrb, task); break; case ISCSI_OP_TEXT: @@ -4144,10 +4150,11 @@ static void beiscsi_remove(struct pci_dev *pcidev) phba->ctrl.mbox_mem_alloced.size, phba->ctrl.mbox_mem_alloced.va, phba->ctrl.mbox_mem_alloced.dma); + if (phba->boot_kset) + iscsi_boot_destroy_kset(phba->boot_kset); iscsi_host_remove(phba->shost); pci_dev_put(phba->pcidev); iscsi_host_free(phba->shost); - iscsi_boot_destroy_kset(phba->boot_kset); } static void beiscsi_msix_enable(struct beiscsi_hba *phba) diff --git a/trunk/drivers/scsi/be2iscsi/be_main.h b/trunk/drivers/scsi/be2iscsi/be_main.h index 90eb74f6bcab..081c171a1ed6 100644 --- a/trunk/drivers/scsi/be2iscsi/be_main.h +++ b/trunk/drivers/scsi/be2iscsi/be_main.h @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2010 ServerEngines + * Copyright (C) 2005 - 2011 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -7,15 +7,14 @@ * as published by the Free Software Foundation. The full GNU General * Public License is included in this distribution in the file called COPYING. * - * Written by: Jayamohan Kallickal (jayamohank@serverengines.com) + * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com) * * Contact Information: - * linux-drivers@serverengines.com - * - * ServerEngines - * 209 N. Fair Oaks Ave - * Sunnyvale, CA 94085 + * linux-drivers@emulex.com * + * Emulex + * 3333 Susan Street + * Costa Mesa, CA 92626 */ #ifndef _BEISCSI_MAIN_ @@ -35,7 +34,7 @@ #include "be.h" #define DRV_NAME "be2iscsi" -#define BUILD_STR "2.0.549.0" +#define BUILD_STR "2.103.298.0" #define BE_NAME "ServerEngines BladeEngine2" \ "Linux iSCSI Driver version" BUILD_STR #define DRV_DESC BE_NAME " " "Driver" diff --git a/trunk/drivers/scsi/be2iscsi/be_mgmt.c b/trunk/drivers/scsi/be2iscsi/be_mgmt.c index 877324fc594c..44762cfa3e12 100644 --- a/trunk/drivers/scsi/be2iscsi/be_mgmt.c +++ b/trunk/drivers/scsi/be2iscsi/be_mgmt.c @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2010 ServerEngines + * Copyright (C) 2005 - 2011 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -7,15 +7,14 @@ * as published by the Free Software Foundation. The full GNU General * Public License is included in this distribution in the file called COPYING. * - * Written by: Jayamohan Kallickal (jayamohank@serverengines.com) + * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com) * * Contact Information: - * linux-drivers@serverengines.com - * - * ServerEngines - * 209 N. Fair Oaks Ave - * Sunnyvale, CA 94085 + * linux-drivers@emulex.com * + * Emulex + * 3333 Susan Street + * Costa Mesa, CA 92626 */ #include "be_mgmt.h" @@ -203,8 +202,8 @@ int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute) OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req)); req->chute = chute; - req->hdr_ring_id = 0; - req->data_ring_id = 0; + req->hdr_ring_id = cpu_to_le16(HWI_GET_DEF_HDRQ_ID(phba)); + req->data_ring_id = cpu_to_le16(HWI_GET_DEF_BUFQ_ID(phba)); status = be_mcc_notify_wait(phba); if (status) diff --git a/trunk/drivers/scsi/be2iscsi/be_mgmt.h b/trunk/drivers/scsi/be2iscsi/be_mgmt.h index b9acedf78653..08428824ace2 100644 --- a/trunk/drivers/scsi/be2iscsi/be_mgmt.h +++ b/trunk/drivers/scsi/be2iscsi/be_mgmt.h @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2010 ServerEngines + * Copyright (C) 2005 - 2011 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -7,15 +7,14 @@ * as published by the Free Software Foundation. The full GNU General * Public License is included in this distribution in the file called COPYING. * - * Written by: Jayamohan Kallickal (jayamohank@serverengines.com) + * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com) * * Contact Information: - * linux-drivers@serverengines.com - * - * ServerEngines - * 209 N. Fair Oaks Ave - * Sunnyvale, CA 94085 + * linux-drivers@emulex.com * + * Emulex + * 3333 Susan Street + * Costa Mesa, CA 92626 */ #ifndef _BEISCSI_MGMT_ diff --git a/trunk/drivers/scsi/bfa/bfad.c b/trunk/drivers/scsi/bfa/bfad.c index 0fd510a01561..59b5e9b61d71 100644 --- a/trunk/drivers/scsi/bfa/bfad.c +++ b/trunk/drivers/scsi/bfa/bfad.c @@ -57,9 +57,19 @@ int pcie_max_read_reqsz; int bfa_debugfs_enable = 1; int msix_disable_cb = 0, msix_disable_ct = 0; +/* Firmware releated */ u32 bfi_image_ct_fc_size, bfi_image_ct_cna_size, bfi_image_cb_fc_size; u32 *bfi_image_ct_fc, *bfi_image_ct_cna, *bfi_image_cb_fc; +#define BFAD_FW_FILE_CT_FC "ctfw_fc.bin" +#define BFAD_FW_FILE_CT_CNA "ctfw_cna.bin" +#define BFAD_FW_FILE_CB_FC "cbfw_fc.bin" + +static u32 *bfad_load_fwimg(struct pci_dev *pdev); +static void bfad_free_fwimg(void); +static void bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image, + u32 *bfi_image_size, char *fw_name); + static const char *msix_name_ct[] = { "cpe0", "cpe1", "cpe2", "cpe3", "rme0", "rme1", "rme2", "rme3", @@ -222,6 +232,9 @@ bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event) if ((bfad->bfad_flags & BFAD_HAL_INIT_DONE)) { bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS); } else { + printk(KERN_WARNING + "bfa %s: bfa init failed\n", + bfad->pci_name); bfad->bfad_flags |= BFAD_HAL_INIT_FAIL; bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED); } @@ -991,10 +1004,6 @@ bfad_cfg_pport(struct bfad_s *bfad, enum bfa_lport_role role) bfad->pport.roles |= BFA_LPORT_ROLE_FCP_IM; } - /* Setup the debugfs node for this scsi_host */ - if (bfa_debugfs_enable) - bfad_debugfs_init(&bfad->pport); - bfad->bfad_flags |= BFAD_CFG_PPORT_DONE; out: @@ -1004,10 +1013,6 @@ bfad_cfg_pport(struct bfad_s *bfad, enum bfa_lport_role role) void bfad_uncfg_pport(struct bfad_s *bfad) { - /* Remove the debugfs node for this scsi_host */ - kfree(bfad->regdata); - bfad_debugfs_exit(&bfad->pport); - if ((supported_fc4s & BFA_LPORT_ROLE_FCP_IM) && (bfad->pport.roles & BFA_LPORT_ROLE_FCP_IM)) { bfad_im_scsi_host_free(bfad, bfad->pport.im_port); @@ -1389,6 +1394,10 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) bfad->pport.bfad = bfad; INIT_LIST_HEAD(&bfad->pbc_vport_list); + /* Setup the debugfs node for this bfad */ + if (bfa_debugfs_enable) + bfad_debugfs_init(&bfad->pport); + retval = bfad_drv_init(bfad); if (retval != BFA_STATUS_OK) goto out_drv_init_failure; @@ -1404,6 +1413,9 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) bfa_detach(&bfad->bfa); bfad_hal_mem_release(bfad); out_drv_init_failure: + /* Remove the debugfs node for this bfad */ + kfree(bfad->regdata); + bfad_debugfs_exit(&bfad->pport); mutex_lock(&bfad_mutex); bfad_inst--; list_del(&bfad->list_entry); @@ -1445,6 +1457,10 @@ bfad_pci_remove(struct pci_dev *pdev) spin_unlock_irqrestore(&bfad->bfad_lock, flags); bfad_hal_mem_release(bfad); + /* Remove the debugfs node for this bfad */ + kfree(bfad->regdata); + bfad_debugfs_exit(&bfad->pport); + /* Cleaning the BFAD instance */ mutex_lock(&bfad_mutex); bfad_inst--; @@ -1550,7 +1566,7 @@ bfad_exit(void) } /* Firmware handling */ -u32 * +static void bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image, u32 *bfi_image_size, char *fw_name) { @@ -1558,27 +1574,25 @@ bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image, if (request_firmware(&fw, fw_name, &pdev->dev)) { printk(KERN_ALERT "Can't locate firmware %s\n", fw_name); - goto error; + *bfi_image = NULL; + goto out; } *bfi_image = vmalloc(fw->size); if (NULL == *bfi_image) { printk(KERN_ALERT "Fail to allocate buffer for fw image " "size=%x!\n", (u32) fw->size); - goto error; + goto out; } memcpy(*bfi_image, fw->data, fw->size); *bfi_image_size = fw->size/sizeof(u32); - - return *bfi_image; - -error: - return NULL; +out: + release_firmware(fw); } -u32 * -bfad_get_firmware_buf(struct pci_dev *pdev) +static u32 * +bfad_load_fwimg(struct pci_dev *pdev) { if (pdev->device == BFA_PCI_DEVICE_ID_CT_FC) { if (bfi_image_ct_fc_size == 0) @@ -1598,6 +1612,17 @@ bfad_get_firmware_buf(struct pci_dev *pdev) } } +static void +bfad_free_fwimg(void) +{ + if (bfi_image_ct_fc_size && bfi_image_ct_fc) + vfree(bfi_image_ct_fc); + if (bfi_image_ct_cna_size && bfi_image_ct_cna) + vfree(bfi_image_ct_cna); + if (bfi_image_cb_fc_size && bfi_image_cb_fc) + vfree(bfi_image_cb_fc); +} + module_init(bfad_init); module_exit(bfad_exit); MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/scsi/bfa/bfad_debugfs.c b/trunk/drivers/scsi/bfa/bfad_debugfs.c index c66e32eced7b..48be0c54f2de 100644 --- a/trunk/drivers/scsi/bfa/bfad_debugfs.c +++ b/trunk/drivers/scsi/bfa/bfad_debugfs.c @@ -28,10 +28,10 @@ * mount -t debugfs none /sys/kernel/debug * * BFA Hierarchy: - * - bfa/host# - * where the host number corresponds to the one under /sys/class/scsi_host/host# + * - bfa/pci_dev: + * where the pci_name corresponds to the one under /sys/bus/pci/drivers/bfa * - * Debugging service available per host: + * Debugging service available per pci_dev: * fwtrc: To collect current firmware trace. * drvtrc: To collect current driver trace * fwsave: To collect last saved fw trace as a result of firmware crash. @@ -489,11 +489,9 @@ static atomic_t bfa_debugfs_port_count; inline void bfad_debugfs_init(struct bfad_port_s *port) { - struct bfad_im_port_s *im_port = port->im_port; - struct bfad_s *bfad = im_port->bfad; - struct Scsi_Host *shost = im_port->shost; + struct bfad_s *bfad = port->bfad; const struct bfad_debugfs_entry *file; - char name[16]; + char name[64]; int i; if (!bfa_debugfs_enable) @@ -510,17 +508,15 @@ bfad_debugfs_init(struct bfad_port_s *port) } } - /* - * Setup the host# directory for the port, - * corresponds to the scsi_host num of this port. - */ - snprintf(name, sizeof(name), "host%d", shost->host_no); + /* Setup the pci_dev debugfs directory for the port */ + snprintf(name, sizeof(name), "pci_dev:%s", bfad->pci_name); if (!port->port_debugfs_root) { port->port_debugfs_root = debugfs_create_dir(name, bfa_debugfs_root); if (!port->port_debugfs_root) { printk(KERN_WARNING - "BFA host root dir creation failed\n"); + "bfa %s: debugfs root creation failed\n", + bfad->pci_name); goto err; } @@ -536,8 +532,8 @@ bfad_debugfs_init(struct bfad_port_s *port) file->fops); if (!bfad->bfad_dentry_files[i]) { printk(KERN_WARNING - "BFA host%d: create %s entry failed\n", - shost->host_no, file->name); + "bfa %s: debugfs %s creation failed\n", + bfad->pci_name, file->name); goto err; } } @@ -550,8 +546,7 @@ bfad_debugfs_init(struct bfad_port_s *port) inline void bfad_debugfs_exit(struct bfad_port_s *port) { - struct bfad_im_port_s *im_port = port->im_port; - struct bfad_s *bfad = im_port->bfad; + struct bfad_s *bfad = port->bfad; int i; for (i = 0; i < ARRAY_SIZE(bfad_debugfs_files); i++) { @@ -562,9 +557,7 @@ bfad_debugfs_exit(struct bfad_port_s *port) } /* - * Remove the host# directory for the port, - * corresponds to the scsi_host num of this port. - */ + * Remove the pci_dev debugfs directory for the port */ if (port->port_debugfs_root) { debugfs_remove(port->port_debugfs_root); port->port_debugfs_root = NULL; diff --git a/trunk/drivers/scsi/bfa/bfad_im.h b/trunk/drivers/scsi/bfa/bfad_im.h index bfee63b16fa9..c296c8968511 100644 --- a/trunk/drivers/scsi/bfa/bfad_im.h +++ b/trunk/drivers/scsi/bfa/bfad_im.h @@ -141,29 +141,4 @@ extern struct device_attribute *bfad_im_vport_attrs[]; irqreturn_t bfad_intx(int irq, void *dev_id); -/* Firmware releated */ -#define BFAD_FW_FILE_CT_FC "ctfw_fc.bin" -#define BFAD_FW_FILE_CT_CNA "ctfw_cna.bin" -#define BFAD_FW_FILE_CB_FC "cbfw_fc.bin" - -u32 *bfad_get_firmware_buf(struct pci_dev *pdev); -u32 *bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image, - u32 *bfi_image_size, char *fw_name); - -static inline u32 * -bfad_load_fwimg(struct pci_dev *pdev) -{ - return bfad_get_firmware_buf(pdev); -} - -static inline void -bfad_free_fwimg(void) -{ - if (bfi_image_ct_fc_size && bfi_image_ct_fc) - vfree(bfi_image_ct_fc); - if (bfi_image_ct_cna_size && bfi_image_ct_cna) - vfree(bfi_image_ct_cna); - if (bfi_image_cb_fc_size && bfi_image_cb_fc) - vfree(bfi_image_cb_fc); -} #endif diff --git a/trunk/drivers/scsi/bnx2fc/bnx2fc.h b/trunk/drivers/scsi/bnx2fc/bnx2fc.h index b6d350ac4288..0a404bfb44fe 100644 --- a/trunk/drivers/scsi/bnx2fc/bnx2fc.h +++ b/trunk/drivers/scsi/bnx2fc/bnx2fc.h @@ -130,7 +130,7 @@ #define BNX2FC_TM_TIMEOUT 60 /* secs */ #define BNX2FC_IO_TIMEOUT 20000UL /* msecs */ -#define BNX2FC_WAIT_CNT 120 +#define BNX2FC_WAIT_CNT 1200 #define BNX2FC_FW_TIMEOUT (3 * HZ) #define PORT_MAX 2 diff --git a/trunk/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/trunk/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index cd050196a163..ab255fbc7f36 100644 --- a/trunk/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/trunk/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -1133,7 +1133,7 @@ static void bnx2fc_interface_release(struct kref *kref) struct net_device *phys_dev; hba = container_of(kref, struct bnx2fc_hba, kref); - BNX2FC_HBA_DBG(hba->ctlr.lp, "Interface is being released\n"); + BNX2FC_MISC_DBG("Interface is being released\n"); netdev = hba->netdev; phys_dev = hba->phys_dev; @@ -1257,20 +1257,17 @@ static int bnx2fc_interface_setup(struct bnx2fc_hba *hba, static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba, struct device *parent, int npiv) { - struct fc_lport *lport = NULL; + struct fc_lport *lport, *n_port; struct fcoe_port *port; struct Scsi_Host *shost; struct fc_vport *vport = dev_to_vport(parent); int rc = 0; /* Allocate Scsi_Host structure */ - if (!npiv) { - lport = libfc_host_alloc(&bnx2fc_shost_template, - sizeof(struct fcoe_port)); - } else { - lport = libfc_vport_create(vport, - sizeof(struct fcoe_port)); - } + if (!npiv) + lport = libfc_host_alloc(&bnx2fc_shost_template, sizeof(*port)); + else + lport = libfc_vport_create(vport, sizeof(*port)); if (!lport) { printk(KERN_ERR PFX "could not allocate scsi host structure\n"); @@ -1288,7 +1285,6 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba, goto lp_config_err; if (npiv) { - vport = dev_to_vport(parent); printk(KERN_ERR PFX "Setting vport names, 0x%llX 0x%llX\n", vport->node_name, vport->port_name); fc_set_wwnn(lport, vport->node_name); @@ -1317,12 +1313,17 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba, fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN; /* Allocate exchange manager */ - if (!npiv) { + if (!npiv) rc = bnx2fc_em_config(lport); - if (rc) { - printk(KERN_ERR PFX "Error on bnx2fc_em_config\n"); - goto shost_err; - } + else { + shost = vport_to_shost(vport); + n_port = shost_priv(shost); + rc = fc_exch_mgr_list_clone(n_port, lport); + } + + if (rc) { + printk(KERN_ERR PFX "Error on bnx2fc_em_config\n"); + goto shost_err; } bnx2fc_interface_get(hba); @@ -1355,8 +1356,6 @@ static void bnx2fc_if_destroy(struct fc_lport *lport) /* Free existing transmit skbs */ fcoe_clean_pending_queue(lport); - bnx2fc_interface_put(hba); - /* Free queued packets for the receive thread */ bnx2fc_clean_rx_queue(lport); @@ -1375,6 +1374,8 @@ static void bnx2fc_if_destroy(struct fc_lport *lport) /* Release Scsi_Host */ scsi_host_put(lport->host); + + bnx2fc_interface_put(hba); } /** diff --git a/trunk/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/trunk/drivers/scsi/bnx2fc/bnx2fc_hwi.c index 1b680e288c56..f756d5f85c7a 100644 --- a/trunk/drivers/scsi/bnx2fc/bnx2fc_hwi.c +++ b/trunk/drivers/scsi/bnx2fc/bnx2fc_hwi.c @@ -522,6 +522,7 @@ void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt, fp = fc_frame_alloc(lport, payload_len); if (!fp) { printk(KERN_ERR PFX "fc_frame_alloc failure\n"); + kfree(unsol_els); return; } @@ -547,6 +548,7 @@ void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt, */ printk(KERN_ERR PFX "dropping ELS 0x%x\n", op); kfree_skb(skb); + kfree(unsol_els); return; } } @@ -563,6 +565,7 @@ void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt, } else { BNX2FC_HBA_DBG(lport, "fh_r_ctl = 0x%x\n", fh->fh_r_ctl); kfree_skb(skb); + kfree(unsol_els); } } diff --git a/trunk/drivers/scsi/bnx2fc/bnx2fc_io.c b/trunk/drivers/scsi/bnx2fc/bnx2fc_io.c index 1decefbf32e3..b5b5c346d779 100644 --- a/trunk/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/trunk/drivers/scsi/bnx2fc/bnx2fc_io.c @@ -1663,6 +1663,12 @@ int bnx2fc_queuecommand(struct Scsi_Host *host, tgt = (struct bnx2fc_rport *)&rp[1]; if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { + if (test_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags)) { + sc_cmd->result = DID_NO_CONNECT << 16; + sc_cmd->scsi_done(sc_cmd); + return 0; + + } /* * Session is not offloaded yet. Let SCSI-ml retry * the command. diff --git a/trunk/drivers/scsi/constants.c b/trunk/drivers/scsi/constants.c index d0c82340f0e2..60d2ef291646 100644 --- a/trunk/drivers/scsi/constants.c +++ b/trunk/drivers/scsi/constants.c @@ -772,6 +772,7 @@ static const struct error_info additional[] = {0x3802, "Esn - power management class event"}, {0x3804, "Esn - media class event"}, {0x3806, "Esn - device busy class event"}, + {0x3807, "Thin Provisioning soft threshold reached"}, {0x3900, "Saving parameters not supported"}, diff --git a/trunk/drivers/scsi/dc395x.c b/trunk/drivers/scsi/dc395x.c index b10b3841535c..f5b718d3c31b 100644 --- a/trunk/drivers/scsi/dc395x.c +++ b/trunk/drivers/scsi/dc395x.c @@ -778,8 +778,8 @@ static void srb_free_insert(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb) static void srb_waiting_insert(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb) { - dprintkdbg(DBG_0, "srb_waiting_insert: (pid#%li) <%02i-%i> srb=%p\n", - srb->cmd->serial_number, dcb->target_id, dcb->target_lun, srb); + dprintkdbg(DBG_0, "srb_waiting_insert: (0x%p) <%02i-%i> srb=%p\n", + srb->cmd, dcb->target_id, dcb->target_lun, srb); list_add(&srb->list, &dcb->srb_waiting_list); } @@ -787,16 +787,16 @@ static void srb_waiting_insert(struct DeviceCtlBlk *dcb, static void srb_waiting_append(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb) { - dprintkdbg(DBG_0, "srb_waiting_append: (pid#%li) <%02i-%i> srb=%p\n", - srb->cmd->serial_number, dcb->target_id, dcb->target_lun, srb); + dprintkdbg(DBG_0, "srb_waiting_append: (0x%p) <%02i-%i> srb=%p\n", + srb->cmd, dcb->target_id, dcb->target_lun, srb); list_add_tail(&srb->list, &dcb->srb_waiting_list); } static void srb_going_append(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb) { - dprintkdbg(DBG_0, "srb_going_append: (pid#%li) <%02i-%i> srb=%p\n", - srb->cmd->serial_number, dcb->target_id, dcb->target_lun, srb); + dprintkdbg(DBG_0, "srb_going_append: (0x%p) <%02i-%i> srb=%p\n", + srb->cmd, dcb->target_id, dcb->target_lun, srb); list_add_tail(&srb->list, &dcb->srb_going_list); } @@ -805,8 +805,8 @@ static void srb_going_remove(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb) { struct ScsiReqBlk *i; struct ScsiReqBlk *tmp; - dprintkdbg(DBG_0, "srb_going_remove: (pid#%li) <%02i-%i> srb=%p\n", - srb->cmd->serial_number, dcb->target_id, dcb->target_lun, srb); + dprintkdbg(DBG_0, "srb_going_remove: (0x%p) <%02i-%i> srb=%p\n", + srb->cmd, dcb->target_id, dcb->target_lun, srb); list_for_each_entry_safe(i, tmp, &dcb->srb_going_list, list) if (i == srb) { @@ -821,8 +821,8 @@ static void srb_waiting_remove(struct DeviceCtlBlk *dcb, { struct ScsiReqBlk *i; struct ScsiReqBlk *tmp; - dprintkdbg(DBG_0, "srb_waiting_remove: (pid#%li) <%02i-%i> srb=%p\n", - srb->cmd->serial_number, dcb->target_id, dcb->target_lun, srb); + dprintkdbg(DBG_0, "srb_waiting_remove: (0x%p) <%02i-%i> srb=%p\n", + srb->cmd, dcb->target_id, dcb->target_lun, srb); list_for_each_entry_safe(i, tmp, &dcb->srb_waiting_list, list) if (i == srb) { @@ -836,8 +836,8 @@ static void srb_going_to_waiting_move(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb) { dprintkdbg(DBG_0, - "srb_going_to_waiting_move: (pid#%li) <%02i-%i> srb=%p\n", - srb->cmd->serial_number, dcb->target_id, dcb->target_lun, srb); + "srb_going_to_waiting_move: (0x%p) <%02i-%i> srb=%p\n", + srb->cmd, dcb->target_id, dcb->target_lun, srb); list_move(&srb->list, &dcb->srb_waiting_list); } @@ -846,8 +846,8 @@ static void srb_waiting_to_going_move(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb) { dprintkdbg(DBG_0, - "srb_waiting_to_going_move: (pid#%li) <%02i-%i> srb=%p\n", - srb->cmd->serial_number, dcb->target_id, dcb->target_lun, srb); + "srb_waiting_to_going_move: (0x%p) <%02i-%i> srb=%p\n", + srb->cmd, dcb->target_id, dcb->target_lun, srb); list_move(&srb->list, &dcb->srb_going_list); } @@ -982,8 +982,8 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb, { int nseg; enum dma_data_direction dir = cmd->sc_data_direction; - dprintkdbg(DBG_0, "build_srb: (pid#%li) <%02i-%i>\n", - cmd->serial_number, dcb->target_id, dcb->target_lun); + dprintkdbg(DBG_0, "build_srb: (0x%p) <%02i-%i>\n", + cmd, dcb->target_id, dcb->target_lun); srb->dcb = dcb; srb->cmd = cmd; @@ -1086,8 +1086,8 @@ static int dc395x_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct s struct ScsiReqBlk *srb; struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)cmd->device->host->hostdata; - dprintkdbg(DBG_0, "queue_command: (pid#%li) <%02i-%i> cmnd=0x%02x\n", - cmd->serial_number, cmd->device->id, cmd->device->lun, cmd->cmnd[0]); + dprintkdbg(DBG_0, "queue_command: (0x%p) <%02i-%i> cmnd=0x%02x\n", + cmd, cmd->device->id, cmd->device->lun, cmd->cmnd[0]); /* Assume BAD_TARGET; will be cleared later */ cmd->result = DID_BAD_TARGET << 16; @@ -1140,7 +1140,7 @@ static int dc395x_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct s /* process immediately */ send_srb(acb, srb); } - dprintkdbg(DBG_1, "queue_command: (pid#%li) done\n", cmd->serial_number); + dprintkdbg(DBG_1, "queue_command: (0x%p) done\n", cmd); return 0; complete: @@ -1203,9 +1203,9 @@ static void dump_register_info(struct AdapterCtlBlk *acb, dprintkl(KERN_INFO, "dump: srb=%p cmd=%p OOOPS!\n", srb, srb->cmd); else - dprintkl(KERN_INFO, "dump: srb=%p cmd=%p (pid#%li) " + dprintkl(KERN_INFO, "dump: srb=%p cmd=%p " "cmnd=0x%02x <%02i-%i>\n", - srb, srb->cmd, srb->cmd->serial_number, + srb, srb->cmd, srb->cmd->cmnd[0], srb->cmd->device->id, srb->cmd->device->lun); printk(" sglist=%p cnt=%i idx=%i len=%zu\n", @@ -1301,8 +1301,8 @@ static int __dc395x_eh_bus_reset(struct scsi_cmnd *cmd) struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)cmd->device->host->hostdata; dprintkl(KERN_INFO, - "eh_bus_reset: (pid#%li) target=<%02i-%i> cmd=%p\n", - cmd->serial_number, cmd->device->id, cmd->device->lun, cmd); + "eh_bus_reset: (0%p) target=<%02i-%i> cmd=%p\n", + cmd, cmd->device->id, cmd->device->lun, cmd); if (timer_pending(&acb->waiting_timer)) del_timer(&acb->waiting_timer); @@ -1368,8 +1368,8 @@ static int dc395x_eh_abort(struct scsi_cmnd *cmd) (struct AdapterCtlBlk *)cmd->device->host->hostdata; struct DeviceCtlBlk *dcb; struct ScsiReqBlk *srb; - dprintkl(KERN_INFO, "eh_abort: (pid#%li) target=<%02i-%i> cmd=%p\n", - cmd->serial_number, cmd->device->id, cmd->device->lun, cmd); + dprintkl(KERN_INFO, "eh_abort: (0x%p) target=<%02i-%i> cmd=%p\n", + cmd, cmd->device->id, cmd->device->lun, cmd); dcb = find_dcb(acb, cmd->device->id, cmd->device->lun); if (!dcb) { @@ -1495,8 +1495,8 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb, u16 s_stat2, return_code; u8 s_stat, scsicommand, i, identify_message; u8 *ptr; - dprintkdbg(DBG_0, "start_scsi: (pid#%li) <%02i-%i> srb=%p\n", - srb->cmd->serial_number, dcb->target_id, dcb->target_lun, srb); + dprintkdbg(DBG_0, "start_scsi: (0x%p) <%02i-%i> srb=%p\n", + dcb->target_id, dcb->target_lun, srb); srb->tag_number = TAG_NONE; /* acb->tag_max_num: had error read in eeprom */ @@ -1505,8 +1505,8 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb, s_stat2 = DC395x_read16(acb, TRM_S1040_SCSI_STATUS); #if 1 if (s_stat & 0x20 /* s_stat2 & 0x02000 */ ) { - dprintkdbg(DBG_KG, "start_scsi: (pid#%li) BUSY %02x %04x\n", - srb->cmd->serial_number, s_stat, s_stat2); + dprintkdbg(DBG_KG, "start_scsi: (0x%p) BUSY %02x %04x\n", + s_stat, s_stat2); /* * Try anyway? * @@ -1522,16 +1522,15 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb, } #endif if (acb->active_dcb) { - dprintkl(KERN_DEBUG, "start_scsi: (pid#%li) Attempt to start a" - "command while another command (pid#%li) is active.", - srb->cmd->serial_number, + dprintkl(KERN_DEBUG, "start_scsi: (0x%p) Attempt to start a" + "command while another command (0x%p) is active.", + srb->cmd, acb->active_dcb->active_srb ? - acb->active_dcb->active_srb->cmd->serial_number : 0); + acb->active_dcb->active_srb->cmd : 0); return 1; } if (DC395x_read16(acb, TRM_S1040_SCSI_STATUS) & SCSIINTERRUPT) { - dprintkdbg(DBG_KG, "start_scsi: (pid#%li) Failed (busy)\n", - srb->cmd->serial_number); + dprintkdbg(DBG_KG, "start_scsi: (0x%p) Failed (busy)\n", srb->cmd); return 1; } /* Allow starting of SCSI commands half a second before we allow the mid-level @@ -1603,9 +1602,9 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb, tag_number++; } if (tag_number >= dcb->max_command) { - dprintkl(KERN_WARNING, "start_scsi: (pid#%li) " + dprintkl(KERN_WARNING, "start_scsi: (0x%p) " "Out of tags target=<%02i-%i>)\n", - srb->cmd->serial_number, srb->cmd->device->id, + srb->cmd, srb->cmd->device->id, srb->cmd->device->lun); srb->state = SRB_READY; DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, @@ -1623,8 +1622,8 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb, #endif /*polling:*/ /* Send CDB ..command block ......... */ - dprintkdbg(DBG_KG, "start_scsi: (pid#%li) <%02i-%i> cmnd=0x%02x tag=%i\n", - srb->cmd->serial_number, srb->cmd->device->id, srb->cmd->device->lun, + dprintkdbg(DBG_KG, "start_scsi: (0x%p) <%02i-%i> cmnd=0x%02x tag=%i\n", + srb->cmd, srb->cmd->device->id, srb->cmd->device->lun, srb->cmd->cmnd[0], srb->tag_number); if (srb->flag & AUTO_REQSENSE) { DC395x_write8(acb, TRM_S1040_SCSI_FIFO, REQUEST_SENSE); @@ -1647,8 +1646,8 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb, * we caught an interrupt (must be reset or reselection ... ) * : Let's process it first! */ - dprintkdbg(DBG_0, "start_scsi: (pid#%li) <%02i-%i> Failed - busy\n", - srb->cmd->serial_number, dcb->target_id, dcb->target_lun); + dprintkdbg(DBG_0, "start_scsi: (0x%p) <%02i-%i> Failed - busy\n", + srb->cmd, dcb->target_id, dcb->target_lun); srb->state = SRB_READY; free_tag(dcb, srb); srb->msg_count = 0; @@ -1843,7 +1842,7 @@ static irqreturn_t dc395x_interrupt(int irq, void *dev_id) static void msgout_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 *pscsi_status) { - dprintkdbg(DBG_0, "msgout_phase0: (pid#%li)\n", srb->cmd->serial_number); + dprintkdbg(DBG_0, "msgout_phase0: (0x%p)\n", srb->cmd); if (srb->state & (SRB_UNEXPECT_RESEL + SRB_ABORT_SENT)) *pscsi_status = PH_BUS_FREE; /*.. initial phase */ @@ -1857,18 +1856,18 @@ static void msgout_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, { u16 i; u8 *ptr; - dprintkdbg(DBG_0, "msgout_phase1: (pid#%li)\n", srb->cmd->serial_number); + dprintkdbg(DBG_0, "msgout_phase1: (0x%p)\n", srb->cmd); clear_fifo(acb, "msgout_phase1"); if (!(srb->state & SRB_MSGOUT)) { srb->state |= SRB_MSGOUT; dprintkl(KERN_DEBUG, - "msgout_phase1: (pid#%li) Phase unexpected\n", - srb->cmd->serial_number); /* So what ? */ + "msgout_phase1: (0x%p) Phase unexpected\n", + srb->cmd); /* So what ? */ } if (!srb->msg_count) { - dprintkdbg(DBG_0, "msgout_phase1: (pid#%li) NOP msg\n", - srb->cmd->serial_number); + dprintkdbg(DBG_0, "msgout_phase1: (0x%p) NOP msg\n", + srb->cmd); DC395x_write8(acb, TRM_S1040_SCSI_FIFO, MSG_NOP); DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */ DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT); @@ -1888,7 +1887,7 @@ static void msgout_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, static void command_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 *pscsi_status) { - dprintkdbg(DBG_0, "command_phase0: (pid#%li)\n", srb->cmd->serial_number); + dprintkdbg(DBG_0, "command_phase0: (0x%p)\n", srb->cmd); DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); } @@ -1899,7 +1898,7 @@ static void command_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, struct DeviceCtlBlk *dcb; u8 *ptr; u16 i; - dprintkdbg(DBG_0, "command_phase1: (pid#%li)\n", srb->cmd->serial_number); + dprintkdbg(DBG_0, "command_phase1: (0x%p)\n", srb->cmd); clear_fifo(acb, "command_phase1"); DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_CLRATN); @@ -2041,8 +2040,8 @@ static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, struct DeviceCtlBlk *dcb = srb->dcb; u16 scsi_status = *pscsi_status; u32 d_left_counter = 0; - dprintkdbg(DBG_0, "data_out_phase0: (pid#%li) <%02i-%i>\n", - srb->cmd->serial_number, srb->cmd->device->id, srb->cmd->device->lun); + dprintkdbg(DBG_0, "data_out_phase0: (0x%p) <%02i-%i>\n", + srb->cmd, srb->cmd->device->id, srb->cmd->device->lun); /* * KG: We need to drain the buffers before we draw any conclusions! @@ -2171,8 +2170,8 @@ static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, static void data_out_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 *pscsi_status) { - dprintkdbg(DBG_0, "data_out_phase1: (pid#%li) <%02i-%i>\n", - srb->cmd->serial_number, srb->cmd->device->id, srb->cmd->device->lun); + dprintkdbg(DBG_0, "data_out_phase1: (0x%p) <%02i-%i>\n", + srb->cmd, srb->cmd->device->id, srb->cmd->device->lun); clear_fifo(acb, "data_out_phase1"); /* do prepare before transfer when data out phase */ data_io_transfer(acb, srb, XFERDATAOUT); @@ -2183,8 +2182,8 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, { u16 scsi_status = *pscsi_status; - dprintkdbg(DBG_0, "data_in_phase0: (pid#%li) <%02i-%i>\n", - srb->cmd->serial_number, srb->cmd->device->id, srb->cmd->device->lun); + dprintkdbg(DBG_0, "data_in_phase0: (0x%p) <%02i-%i>\n", + srb->cmd, srb->cmd->device->id, srb->cmd->device->lun); /* * KG: DataIn is much more tricky than DataOut. When the device is finished @@ -2204,8 +2203,8 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, unsigned int sc, fc; if (scsi_status & PARITYERROR) { - dprintkl(KERN_INFO, "data_in_phase0: (pid#%li) " - "Parity Error\n", srb->cmd->serial_number); + dprintkl(KERN_INFO, "data_in_phase0: (0x%p) " + "Parity Error\n", srb->cmd); srb->status |= PARITY_ERROR; } /* @@ -2394,8 +2393,8 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, static void data_in_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 *pscsi_status) { - dprintkdbg(DBG_0, "data_in_phase1: (pid#%li) <%02i-%i>\n", - srb->cmd->serial_number, srb->cmd->device->id, srb->cmd->device->lun); + dprintkdbg(DBG_0, "data_in_phase1: (0x%p) <%02i-%i>\n", + srb->cmd, srb->cmd->device->id, srb->cmd->device->lun); data_io_transfer(acb, srb, XFERDATAIN); } @@ -2406,8 +2405,8 @@ static void data_io_transfer(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb = srb->dcb; u8 bval; dprintkdbg(DBG_0, - "data_io_transfer: (pid#%li) <%02i-%i> %c len=%i, sg=(%i/%i)\n", - srb->cmd->serial_number, srb->cmd->device->id, srb->cmd->device->lun, + "data_io_transfer: (0x%p) <%02i-%i> %c len=%i, sg=(%i/%i)\n", + srb->cmd, srb->cmd->device->id, srb->cmd->device->lun, ((io_dir & DMACMD_DIR) ? 'r' : 'w'), srb->total_xfer_length, srb->sg_index, srb->sg_count); if (srb == acb->tmp_srb) @@ -2579,8 +2578,8 @@ static void data_io_transfer(struct AdapterCtlBlk *acb, static void status_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 *pscsi_status) { - dprintkdbg(DBG_0, "status_phase0: (pid#%li) <%02i-%i>\n", - srb->cmd->serial_number, srb->cmd->device->id, srb->cmd->device->lun); + dprintkdbg(DBG_0, "status_phase0: (0x%p) <%02i-%i>\n", + srb->cmd, srb->cmd->device->id, srb->cmd->device->lun); srb->target_status = DC395x_read8(acb, TRM_S1040_SCSI_FIFO); srb->end_message = DC395x_read8(acb, TRM_S1040_SCSI_FIFO); /* get message */ srb->state = SRB_COMPLETED; @@ -2593,8 +2592,8 @@ static void status_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, static void status_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 *pscsi_status) { - dprintkdbg(DBG_0, "status_phase1: (pid#%li) <%02i-%i>\n", - srb->cmd->serial_number, srb->cmd->device->id, srb->cmd->device->lun); + dprintkdbg(DBG_0, "status_phase1: (0x%p) <%02i-%i>\n", + srb->cmd, srb->cmd->device->id, srb->cmd->device->lun); srb->state = SRB_STATUS; DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */ DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_COMP); @@ -2635,8 +2634,8 @@ static struct ScsiReqBlk *msgin_qtag(struct AdapterCtlBlk *acb, { struct ScsiReqBlk *srb = NULL; struct ScsiReqBlk *i; - dprintkdbg(DBG_0, "msgin_qtag: (pid#%li) tag=%i srb=%p\n", - srb->cmd->serial_number, tag, srb); + dprintkdbg(DBG_0, "msgin_qtag: (0x%p) tag=%i srb=%p\n", + srb->cmd, tag, srb); if (!(dcb->tag_mask & (1 << tag))) dprintkl(KERN_DEBUG, @@ -2654,8 +2653,8 @@ static struct ScsiReqBlk *msgin_qtag(struct AdapterCtlBlk *acb, if (!srb) goto mingx0; - dprintkdbg(DBG_0, "msgin_qtag: (pid#%li) <%02i-%i>\n", - srb->cmd->serial_number, srb->dcb->target_id, srb->dcb->target_lun); + dprintkdbg(DBG_0, "msgin_qtag: (0x%p) <%02i-%i>\n", + srb->cmd, srb->dcb->target_id, srb->dcb->target_lun); if (dcb->flag & ABORT_DEV_) { /*srb->state = SRB_ABORT_SENT; */ enable_msgout_abort(acb, srb); @@ -2865,7 +2864,7 @@ static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 *pscsi_status) { struct DeviceCtlBlk *dcb = acb->active_dcb; - dprintkdbg(DBG_0, "msgin_phase0: (pid#%li)\n", srb->cmd->serial_number); + dprintkdbg(DBG_0, "msgin_phase0: (0x%p)\n", srb->cmd); srb->msgin_buf[acb->msg_len++] = DC395x_read8(acb, TRM_S1040_SCSI_FIFO); if (msgin_completed(srb->msgin_buf, acb->msg_len)) { @@ -2931,9 +2930,9 @@ static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, * SAVE POINTER may be ignored as we have the struct * ScsiReqBlk* associated with the scsi command. */ - dprintkdbg(DBG_0, "msgin_phase0: (pid#%li) " + dprintkdbg(DBG_0, "msgin_phase0: (0x%p) " "SAVE POINTER rem=%i Ignore\n", - srb->cmd->serial_number, srb->total_xfer_length); + srb->cmd, srb->total_xfer_length); break; case RESTORE_POINTERS: @@ -2941,9 +2940,9 @@ static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, break; case ABORT: - dprintkdbg(DBG_0, "msgin_phase0: (pid#%li) " + dprintkdbg(DBG_0, "msgin_phase0: (0x%p) " "<%02i-%i> ABORT msg\n", - srb->cmd->serial_number, dcb->target_id, + srb->cmd, dcb->target_id, dcb->target_lun); dcb->flag |= ABORT_DEV_; enable_msgout_abort(acb, srb); @@ -2975,7 +2974,7 @@ static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, static void msgin_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 *pscsi_status) { - dprintkdbg(DBG_0, "msgin_phase1: (pid#%li)\n", srb->cmd->serial_number); + dprintkdbg(DBG_0, "msgin_phase1: (0x%p)\n", srb->cmd); clear_fifo(acb, "msgin_phase1"); DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, 1); if (!(srb->state & SRB_MSGIN)) { @@ -3041,7 +3040,7 @@ static void disconnect(struct AdapterCtlBlk *acb) } srb = dcb->active_srb; acb->active_dcb = NULL; - dprintkdbg(DBG_0, "disconnect: (pid#%li)\n", srb->cmd->serial_number); + dprintkdbg(DBG_0, "disconnect: (0x%p)\n", srb->cmd); srb->scsi_phase = PH_BUS_FREE; /* initial phase */ clear_fifo(acb, "disconnect"); @@ -3071,14 +3070,14 @@ static void disconnect(struct AdapterCtlBlk *acb) && srb->state != SRB_MSGOUT) { srb->state = SRB_READY; dprintkl(KERN_DEBUG, - "disconnect: (pid#%li) Unexpected\n", - srb->cmd->serial_number); + "disconnect: (0x%p) Unexpected\n", + srb->cmd); srb->target_status = SCSI_STAT_SEL_TIMEOUT; goto disc1; } else { /* Normal selection timeout */ - dprintkdbg(DBG_KG, "disconnect: (pid#%li) " - "<%02i-%i> SelTO\n", srb->cmd->serial_number, + dprintkdbg(DBG_KG, "disconnect: (0x%p) " + "<%02i-%i> SelTO\n", srb->cmd, dcb->target_id, dcb->target_lun); if (srb->retry_count++ > DC395x_MAX_RETRIES || acb->scan_devices) { @@ -3089,8 +3088,8 @@ static void disconnect(struct AdapterCtlBlk *acb) free_tag(dcb, srb); srb_going_to_waiting_move(dcb, srb); dprintkdbg(DBG_KG, - "disconnect: (pid#%li) Retry\n", - srb->cmd->serial_number); + "disconnect: (0x%p) Retry\n", + srb->cmd); waiting_set_timer(acb, HZ / 20); } } else if (srb->state & SRB_DISCONNECT) { @@ -3142,9 +3141,9 @@ static void reselect(struct AdapterCtlBlk *acb) } /* Why the if ? */ if (!acb->scan_devices) { - dprintkdbg(DBG_KG, "reselect: (pid#%li) <%02i-%i> " + dprintkdbg(DBG_KG, "reselect: (0x%p) <%02i-%i> " "Arb lost but Resel win rsel=%i stat=0x%04x\n", - srb->cmd->serial_number, dcb->target_id, + srb->cmd, dcb->target_id, dcb->target_lun, rsel_tar_lun_id, DC395x_read16(acb, TRM_S1040_SCSI_STATUS)); arblostflag = 1; @@ -3318,7 +3317,7 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb, enum dma_data_direction dir = cmd->sc_data_direction; int ckc_only = 1; - dprintkdbg(DBG_1, "srb_done: (pid#%li) <%02i-%i>\n", srb->cmd->serial_number, + dprintkdbg(DBG_1, "srb_done: (0x%p) <%02i-%i>\n", srb->cmd, srb->cmd->device->id, srb->cmd->device->lun); dprintkdbg(DBG_SG, "srb_done: srb=%p sg=%i(%i/%i) buf=%p\n", srb, scsi_sg_count(cmd), srb->sg_index, srb->sg_count, @@ -3497,9 +3496,9 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb, cmd->SCp.buffers_residual = 0; if (debug_enabled(DBG_KG)) { if (srb->total_xfer_length) - dprintkdbg(DBG_KG, "srb_done: (pid#%li) <%02i-%i> " + dprintkdbg(DBG_KG, "srb_done: (0x%p) <%02i-%i> " "cmnd=0x%02x Missed %i bytes\n", - cmd->serial_number, cmd->device->id, cmd->device->lun, + cmd, cmd->device->id, cmd->device->lun, cmd->cmnd[0], srb->total_xfer_length); } @@ -3508,8 +3507,8 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb, if (srb == acb->tmp_srb) dprintkl(KERN_ERR, "srb_done: ERROR! Completed cmd with tmp_srb\n"); else { - dprintkdbg(DBG_0, "srb_done: (pid#%li) done result=0x%08x\n", - cmd->serial_number, cmd->result); + dprintkdbg(DBG_0, "srb_done: (0x%p) done result=0x%08x\n", + cmd, cmd->result); srb_free_insert(acb, srb); } pci_unmap_srb(acb, srb); @@ -3538,7 +3537,7 @@ static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag, p = srb->cmd; dir = p->sc_data_direction; result = MK_RES(0, did_flag, 0, 0); - printk("G:%li(%02i-%i) ", p->serial_number, + printk("G:%p(%02i-%i) ", p, p->device->id, p->device->lun); srb_going_remove(dcb, srb); free_tag(dcb, srb); @@ -3568,7 +3567,7 @@ static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag, p = srb->cmd; result = MK_RES(0, did_flag, 0, 0); - printk("W:%li<%02i-%i>", p->serial_number, p->device->id, + printk("W:%p<%02i-%i>", p, p->device->id, p->device->lun); srb_waiting_remove(dcb, srb); srb_free_insert(acb, srb); @@ -3677,8 +3676,8 @@ static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb) { struct scsi_cmnd *cmd = srb->cmd; - dprintkdbg(DBG_1, "request_sense: (pid#%li) <%02i-%i>\n", - cmd->serial_number, cmd->device->id, cmd->device->lun); + dprintkdbg(DBG_1, "request_sense: (0x%p) <%02i-%i>\n", + cmd, cmd->device->id, cmd->device->lun); srb->flag |= AUTO_REQSENSE; srb->adapter_status = 0; @@ -3708,8 +3707,8 @@ static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb, if (start_scsi(acb, dcb, srb)) { /* Should only happen, if sb. else grabs the bus */ dprintkl(KERN_DEBUG, - "request_sense: (pid#%li) failed <%02i-%i>\n", - srb->cmd->serial_number, dcb->target_id, dcb->target_lun); + "request_sense: (0x%p) failed <%02i-%i>\n", + srb->cmd, dcb->target_id, dcb->target_lun); srb_going_to_waiting_move(dcb, srb); waiting_set_timer(acb, HZ / 100); } @@ -4717,13 +4716,13 @@ static int dc395x_proc_info(struct Scsi_Host *host, char *buffer, dcb->target_id, dcb->target_lun, list_size(&dcb->srb_waiting_list)); list_for_each_entry(srb, &dcb->srb_waiting_list, list) - SPRINTF(" %li", srb->cmd->serial_number); + SPRINTF(" %p", srb->cmd); if (!list_empty(&dcb->srb_going_list)) SPRINTF("\nDCB (%02i-%i): Going : %i:", dcb->target_id, dcb->target_lun, list_size(&dcb->srb_going_list)); list_for_each_entry(srb, &dcb->srb_going_list, list) - SPRINTF(" %li", srb->cmd->serial_number); + SPRINTF(" %p", srb->cmd); if (!list_empty(&dcb->srb_waiting_list) || !list_empty(&dcb->srb_going_list)) SPRINTF("\n"); } diff --git a/trunk/drivers/scsi/device_handler/scsi_dh.c b/trunk/drivers/scsi/device_handler/scsi_dh.c index 564e6ecd17c2..0119b8147797 100644 --- a/trunk/drivers/scsi/device_handler/scsi_dh.c +++ b/trunk/drivers/scsi/device_handler/scsi_dh.c @@ -394,12 +394,14 @@ int scsi_dh_activate(struct request_queue *q, activate_complete fn, void *data) unsigned long flags; struct scsi_device *sdev; struct scsi_device_handler *scsi_dh = NULL; + struct device *dev = NULL; spin_lock_irqsave(q->queue_lock, flags); sdev = q->queuedata; if (sdev && sdev->scsi_dh_data) scsi_dh = sdev->scsi_dh_data->scsi_dh; - if (!scsi_dh || !get_device(&sdev->sdev_gendev) || + dev = get_device(&sdev->sdev_gendev); + if (!scsi_dh || !dev || sdev->sdev_state == SDEV_CANCEL || sdev->sdev_state == SDEV_DEL) err = SCSI_DH_NOSYS; @@ -410,12 +412,13 @@ int scsi_dh_activate(struct request_queue *q, activate_complete fn, void *data) if (err) { if (fn) fn(data, err); - return err; + goto out; } if (scsi_dh->activate) err = scsi_dh->activate(sdev, fn, data); - put_device(&sdev->sdev_gendev); +out: + put_device(dev); return err; } EXPORT_SYMBOL_GPL(scsi_dh_activate); diff --git a/trunk/drivers/scsi/device_handler/scsi_dh_alua.c b/trunk/drivers/scsi/device_handler/scsi_dh_alua.c index 42fe52902add..6fec9fe5dc39 100644 --- a/trunk/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/trunk/drivers/scsi/device_handler/scsi_dh_alua.c @@ -782,7 +782,7 @@ static int alua_bus_attach(struct scsi_device *sdev) h->sdev = sdev; err = alua_initialize(sdev, h); - if (err != SCSI_DH_OK) + if ((err != SCSI_DH_OK) && (err != SCSI_DH_DEV_OFFLINED)) goto failed; if (!try_module_get(THIS_MODULE)) diff --git a/trunk/drivers/scsi/device_handler/scsi_dh_rdac.c b/trunk/drivers/scsi/device_handler/scsi_dh_rdac.c index 293c183dfe6d..e7fc70d6b478 100644 --- a/trunk/drivers/scsi/device_handler/scsi_dh_rdac.c +++ b/trunk/drivers/scsi/device_handler/scsi_dh_rdac.c @@ -182,14 +182,24 @@ struct rdac_dh_data { struct rdac_controller *ctlr; #define UNINITIALIZED_LUN (1 << 8) unsigned lun; + +#define RDAC_MODE 0 +#define RDAC_MODE_AVT 1 +#define RDAC_MODE_IOSHIP 2 + unsigned char mode; + #define RDAC_STATE_ACTIVE 0 #define RDAC_STATE_PASSIVE 1 unsigned char state; #define RDAC_LUN_UNOWNED 0 #define RDAC_LUN_OWNED 1 -#define RDAC_LUN_AVT 2 char lun_state; + +#define RDAC_PREFERRED 0 +#define RDAC_NON_PREFERRED 1 + char preferred; + unsigned char sense[SCSI_SENSE_BUFFERSIZE]; union { struct c2_inquiry c2; @@ -199,11 +209,15 @@ struct rdac_dh_data { } inq; }; +static const char *mode[] = { + "RDAC", + "AVT", + "IOSHIP", +}; static const char *lun_state[] = { "unowned", "owned", - "owned (AVT mode)", }; struct rdac_queue_data { @@ -458,25 +472,33 @@ static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h) int err; struct c9_inquiry *inqp; - h->lun_state = RDAC_LUN_UNOWNED; h->state = RDAC_STATE_ACTIVE; err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry), h); if (err == SCSI_DH_OK) { inqp = &h->inq.c9; - if ((inqp->avte_cvp >> 7) == 0x1) { - /* LUN in AVT mode */ - sdev_printk(KERN_NOTICE, sdev, - "%s: AVT mode detected\n", - RDAC_NAME); - h->lun_state = RDAC_LUN_AVT; - } else if ((inqp->avte_cvp & 0x1) != 0) { - /* LUN was owned by the controller */ + /* detect the operating mode */ + if ((inqp->avte_cvp >> 5) & 0x1) + h->mode = RDAC_MODE_IOSHIP; /* LUN in IOSHIP mode */ + else if (inqp->avte_cvp >> 7) + h->mode = RDAC_MODE_AVT; /* LUN in AVT mode */ + else + h->mode = RDAC_MODE; /* LUN in RDAC mode */ + + /* Update ownership */ + if (inqp->avte_cvp & 0x1) h->lun_state = RDAC_LUN_OWNED; + else { + h->lun_state = RDAC_LUN_UNOWNED; + if (h->mode == RDAC_MODE) + h->state = RDAC_STATE_PASSIVE; } - } - if (h->lun_state == RDAC_LUN_UNOWNED) - h->state = RDAC_STATE_PASSIVE; + /* Update path prio*/ + if (inqp->path_prio & 0x1) + h->preferred = RDAC_PREFERRED; + else + h->preferred = RDAC_NON_PREFERRED; + } return err; } @@ -648,12 +670,27 @@ static int rdac_activate(struct scsi_device *sdev, { struct rdac_dh_data *h = get_rdac_data(sdev); int err = SCSI_DH_OK; + int act = 0; err = check_ownership(sdev, h); if (err != SCSI_DH_OK) goto done; - if (h->lun_state == RDAC_LUN_UNOWNED) { + switch (h->mode) { + case RDAC_MODE: + if (h->lun_state == RDAC_LUN_UNOWNED) + act = 1; + break; + case RDAC_MODE_IOSHIP: + if ((h->lun_state == RDAC_LUN_UNOWNED) && + (h->preferred == RDAC_PREFERRED)) + act = 1; + break; + default: + break; + } + + if (act) { err = queue_mode_select(sdev, fn, data); if (err == SCSI_DH_OK) return 0; @@ -836,8 +873,9 @@ static int rdac_bus_attach(struct scsi_device *sdev) spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); sdev_printk(KERN_NOTICE, sdev, - "%s: LUN %d (%s)\n", - RDAC_NAME, h->lun, lun_state[(int)h->lun_state]); + "%s: LUN %d (%s) (%s)\n", + RDAC_NAME, h->lun, mode[(int)h->mode], + lun_state[(int)h->lun_state]); return 0; diff --git a/trunk/drivers/scsi/dpt_i2o.c b/trunk/drivers/scsi/dpt_i2o.c index cffcb108ac96..b4f6c9a84e71 100644 --- a/trunk/drivers/scsi/dpt_i2o.c +++ b/trunk/drivers/scsi/dpt_i2o.c @@ -780,7 +780,7 @@ static int adpt_abort(struct scsi_cmnd * cmd) return FAILED; } pHba = (adpt_hba*) cmd->device->host->hostdata[0]; - printk(KERN_INFO"%s: Trying to Abort cmd=%ld\n",pHba->name, cmd->serial_number); + printk(KERN_INFO"%s: Trying to Abort\n",pHba->name); if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) { printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name); return FAILED; @@ -802,10 +802,10 @@ static int adpt_abort(struct scsi_cmnd * cmd) printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name); return FAILED; } - printk(KERN_INFO"%s: Abort cmd=%ld failed.\n",pHba->name, cmd->serial_number); + printk(KERN_INFO"%s: Abort failed.\n",pHba->name); return FAILED; } - printk(KERN_INFO"%s: Abort cmd=%ld complete.\n",pHba->name, cmd->serial_number); + printk(KERN_INFO"%s: Abort complete.\n",pHba->name); return SUCCESS; } diff --git a/trunk/drivers/scsi/eata.c b/trunk/drivers/scsi/eata.c index 0eb4fe6a4c8a..94de88955a99 100644 --- a/trunk/drivers/scsi/eata.c +++ b/trunk/drivers/scsi/eata.c @@ -1766,8 +1766,8 @@ static int eata2x_queuecommand_lck(struct scsi_cmnd *SCpnt, struct mscp *cpp; if (SCpnt->host_scribble) - panic("%s: qcomm, pid %ld, SCpnt %p already active.\n", - ha->board_name, SCpnt->serial_number, SCpnt); + panic("%s: qcomm, SCpnt %p already active.\n", + ha->board_name, SCpnt); /* i is the mailbox number, look for the first free mailbox starting from last_cp_used */ @@ -1801,7 +1801,7 @@ static int eata2x_queuecommand_lck(struct scsi_cmnd *SCpnt, if (do_trace) scmd_printk(KERN_INFO, SCpnt, - "qcomm, mbox %d, pid %ld.\n", i, SCpnt->serial_number); + "qcomm, mbox %d.\n", i); cpp->reqsen = 1; cpp->dispri = 1; @@ -1833,8 +1833,7 @@ static int eata2x_queuecommand_lck(struct scsi_cmnd *SCpnt, if (do_dma(shost->io_port, cpp->cp_dma_addr, SEND_CP_DMA)) { unmap_dma(i, ha); SCpnt->host_scribble = NULL; - scmd_printk(KERN_INFO, SCpnt, - "qcomm, pid %ld, adapter busy.\n", SCpnt->serial_number); + scmd_printk(KERN_INFO, SCpnt, "qcomm, adapter busy.\n"); return 1; } @@ -1851,14 +1850,12 @@ static int eata2x_eh_abort(struct scsi_cmnd *SCarg) unsigned int i; if (SCarg->host_scribble == NULL) { - scmd_printk(KERN_INFO, SCarg, - "abort, pid %ld inactive.\n", SCarg->serial_number); + scmd_printk(KERN_INFO, SCarg, "abort, cmd inactive.\n"); return SUCCESS; } i = *(unsigned int *)SCarg->host_scribble; - scmd_printk(KERN_WARNING, SCarg, - "abort, mbox %d, pid %ld.\n", i, SCarg->serial_number); + scmd_printk(KERN_WARNING, SCarg, "abort, mbox %d.\n", i); if (i >= shost->can_queue) panic("%s: abort, invalid SCarg->host_scribble.\n", ha->board_name); @@ -1902,8 +1899,8 @@ static int eata2x_eh_abort(struct scsi_cmnd *SCarg) SCarg->result = DID_ABORT << 16; SCarg->host_scribble = NULL; ha->cp_stat[i] = FREE; - printk("%s, abort, mbox %d ready, DID_ABORT, pid %ld done.\n", - ha->board_name, i, SCarg->serial_number); + printk("%s, abort, mbox %d ready, DID_ABORT, done.\n", + ha->board_name, i); SCarg->scsi_done(SCarg); return SUCCESS; } @@ -1919,13 +1916,12 @@ static int eata2x_eh_host_reset(struct scsi_cmnd *SCarg) struct Scsi_Host *shost = SCarg->device->host; struct hostdata *ha = (struct hostdata *)shost->hostdata; - scmd_printk(KERN_INFO, SCarg, - "reset, enter, pid %ld.\n", SCarg->serial_number); + scmd_printk(KERN_INFO, SCarg, "reset, enter.\n"); spin_lock_irq(shost->host_lock); if (SCarg->host_scribble == NULL) - printk("%s: reset, pid %ld inactive.\n", ha->board_name, SCarg->serial_number); + printk("%s: reset, inactive.\n", ha->board_name); if (ha->in_reset) { printk("%s: reset, exit, already in reset.\n", ha->board_name); @@ -1964,14 +1960,14 @@ static int eata2x_eh_host_reset(struct scsi_cmnd *SCarg) if (ha->cp_stat[i] == READY || ha->cp_stat[i] == ABORTING) { ha->cp_stat[i] = ABORTING; - printk("%s: reset, mbox %d aborting, pid %ld.\n", - ha->board_name, i, SCpnt->serial_number); + printk("%s: reset, mbox %d aborting.\n", + ha->board_name, i); } else { ha->cp_stat[i] = IN_RESET; - printk("%s: reset, mbox %d in reset, pid %ld.\n", - ha->board_name, i, SCpnt->serial_number); + printk("%s: reset, mbox %d in reset.\n", + ha->board_name, i); } if (SCpnt->host_scribble == NULL) @@ -2025,8 +2021,8 @@ static int eata2x_eh_host_reset(struct scsi_cmnd *SCarg) ha->cp_stat[i] = LOCKED; printk - ("%s, reset, mbox %d locked, DID_RESET, pid %ld done.\n", - ha->board_name, i, SCpnt->serial_number); + ("%s, reset, mbox %d locked, DID_RESET, done.\n", + ha->board_name, i); } else if (ha->cp_stat[i] == ABORTING) { @@ -2039,8 +2035,8 @@ static int eata2x_eh_host_reset(struct scsi_cmnd *SCarg) ha->cp_stat[i] = FREE; printk - ("%s, reset, mbox %d aborting, DID_RESET, pid %ld done.\n", - ha->board_name, i, SCpnt->serial_number); + ("%s, reset, mbox %d aborting, DID_RESET, done.\n", + ha->board_name, i); } else @@ -2054,7 +2050,7 @@ static int eata2x_eh_host_reset(struct scsi_cmnd *SCarg) do_trace = 0; if (arg_done) - printk("%s: reset, exit, pid %ld done.\n", ha->board_name, SCarg->serial_number); + printk("%s: reset, exit, done.\n", ha->board_name); else printk("%s: reset, exit.\n", ha->board_name); @@ -2238,10 +2234,10 @@ static int reorder(struct hostdata *ha, unsigned long cursec, cpp = &ha->cp[k]; SCpnt = cpp->SCpnt; scmd_printk(KERN_INFO, SCpnt, - "%s pid %ld mb %d fc %d nr %d sec %ld ns %u" + "%s mb %d fc %d nr %d sec %ld ns %u" " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n", (ihdlr ? "ihdlr" : "qcomm"), - SCpnt->serial_number, k, flushcount, + k, flushcount, n_ready, blk_rq_pos(SCpnt->request), blk_rq_sectors(SCpnt->request), cursec, YESNO(s), YESNO(r), YESNO(rev), YESNO(input_only), @@ -2285,10 +2281,10 @@ static void flush_dev(struct scsi_device *dev, unsigned long cursec, if (do_dma(dev->host->io_port, cpp->cp_dma_addr, SEND_CP_DMA)) { scmd_printk(KERN_INFO, SCpnt, - "%s, pid %ld, mbox %d, adapter" + "%s, mbox %d, adapter" " busy, will abort.\n", (ihdlr ? "ihdlr" : "qcomm"), - SCpnt->serial_number, k); + k); ha->cp_stat[k] = ABORTING; continue; } @@ -2398,12 +2394,12 @@ static irqreturn_t ihdlr(struct Scsi_Host *shost) panic("%s: ihdlr, mbox %d, SCpnt == NULL.\n", ha->board_name, i); if (SCpnt->host_scribble == NULL) - panic("%s: ihdlr, mbox %d, pid %ld, SCpnt %p garbled.\n", ha->board_name, - i, SCpnt->serial_number, SCpnt); + panic("%s: ihdlr, mbox %d, SCpnt %p garbled.\n", ha->board_name, + i, SCpnt); if (*(unsigned int *)SCpnt->host_scribble != i) - panic("%s: ihdlr, mbox %d, pid %ld, index mismatch %d.\n", - ha->board_name, i, SCpnt->serial_number, + panic("%s: ihdlr, mbox %d, index mismatch %d.\n", + ha->board_name, i, *(unsigned int *)SCpnt->host_scribble); sync_dma(i, ha); @@ -2449,11 +2445,11 @@ static irqreturn_t ihdlr(struct Scsi_Host *shost) if (spp->target_status && SCpnt->device->type == TYPE_DISK && (!(tstatus == CHECK_CONDITION && ha->iocount <= 1000 && (SCpnt->sense_buffer[2] & 0xf) == NOT_READY))) - printk("%s: ihdlr, target %d.%d:%d, pid %ld, " + printk("%s: ihdlr, target %d.%d:%d, " "target_status 0x%x, sense key 0x%x.\n", ha->board_name, SCpnt->device->channel, SCpnt->device->id, - SCpnt->device->lun, SCpnt->serial_number, + SCpnt->device->lun, spp->target_status, SCpnt->sense_buffer[2]); ha->target_to[SCpnt->device->id][SCpnt->device->channel] = 0; @@ -2522,9 +2518,9 @@ static irqreturn_t ihdlr(struct Scsi_Host *shost) do_trace || msg_byte(spp->target_status)) #endif scmd_printk(KERN_INFO, SCpnt, "ihdlr, mbox %2d, err 0x%x:%x," - " pid %ld, reg 0x%x, count %d.\n", + " reg 0x%x, count %d.\n", i, spp->adapter_status, spp->target_status, - SCpnt->serial_number, reg, ha->iocount); + reg, ha->iocount); unmap_dma(i, ha); diff --git a/trunk/drivers/scsi/eata_pio.c b/trunk/drivers/scsi/eata_pio.c index 4a9641e69f54..d5f8362335d3 100644 --- a/trunk/drivers/scsi/eata_pio.c +++ b/trunk/drivers/scsi/eata_pio.c @@ -372,8 +372,7 @@ static int eata_pio_queue_lck(struct scsi_cmnd *cmd, cp->status = USED; /* claim free slot */ DBG(DBG_QUEUE, scmd_printk(KERN_DEBUG, cmd, - "eata_pio_queue pid %ld, y %d\n", - cmd->serial_number, y)); + "eata_pio_queue 0x%p, y %d\n", cmd, y)); cmd->scsi_done = (void *) done; @@ -417,8 +416,8 @@ static int eata_pio_queue_lck(struct scsi_cmnd *cmd, if (eata_pio_send_command(base, EATA_CMD_PIO_SEND_CP)) { cmd->result = DID_BUS_BUSY << 16; scmd_printk(KERN_NOTICE, cmd, - "eata_pio_queue pid %ld, HBA busy, " - "returning DID_BUS_BUSY, done.\n", cmd->serial_number); + "eata_pio_queue pid 0x%p, HBA busy, " + "returning DID_BUS_BUSY, done.\n", cmd); done(cmd); cp->status = FREE; return 0; @@ -432,8 +431,8 @@ static int eata_pio_queue_lck(struct scsi_cmnd *cmd, outw(0, base + HA_RDATA); DBG(DBG_QUEUE, scmd_printk(KERN_DEBUG, cmd, - "Queued base %#.4lx pid: %ld " - "slot %d irq %d\n", sh->base, cmd->serial_number, y, sh->irq)); + "Queued base %#.4lx cmd: 0x%p " + "slot %d irq %d\n", sh->base, cmd, y, sh->irq)); return 0; } @@ -445,8 +444,7 @@ static int eata_pio_abort(struct scsi_cmnd *cmd) unsigned int loop = 100; DBG(DBG_ABNORM, scmd_printk(KERN_WARNING, cmd, - "eata_pio_abort called pid: %ld\n", - cmd->serial_number)); + "eata_pio_abort called pid: 0x%p\n", cmd)); while (inb(cmd->device->host->base + HA_RAUXSTAT) & HA_ABUSY) if (--loop == 0) { @@ -481,8 +479,7 @@ static int eata_pio_host_reset(struct scsi_cmnd *cmd) struct Scsi_Host *host = cmd->device->host; DBG(DBG_ABNORM, scmd_printk(KERN_WARNING, cmd, - "eata_pio_reset called pid:%ld\n", - cmd->serial_number)); + "eata_pio_reset called\n")); spin_lock_irq(host->host_lock); @@ -501,7 +498,7 @@ static int eata_pio_host_reset(struct scsi_cmnd *cmd) sp = HD(cmd)->ccb[x].cmd; HD(cmd)->ccb[x].status = RESET; - printk(KERN_WARNING "eata_pio_reset: slot %d in reset, pid %ld.\n", x, sp->serial_number); + printk(KERN_WARNING "eata_pio_reset: slot %d in reset.\n", x); if (sp == NULL) panic("eata_pio_reset: slot %d, sp==NULL.\n", x); diff --git a/trunk/drivers/scsi/esp_scsi.c b/trunk/drivers/scsi/esp_scsi.c index 57558523c1b8..9a1af1d6071a 100644 --- a/trunk/drivers/scsi/esp_scsi.c +++ b/trunk/drivers/scsi/esp_scsi.c @@ -708,8 +708,7 @@ static void esp_maybe_execute_command(struct esp *esp) tp = &esp->target[tgt]; lp = dev->hostdata; - list_del(&ent->list); - list_add(&ent->list, &esp->active_cmds); + list_move(&ent->list, &esp->active_cmds); esp->active_cmd = ent; @@ -1244,8 +1243,7 @@ static int esp_finish_select(struct esp *esp) /* Now that the state is unwound properly, put back onto * the issue queue. This command is no longer active. */ - list_del(&ent->list); - list_add(&ent->list, &esp->queued_cmds); + list_move(&ent->list, &esp->queued_cmds); esp->active_cmd = NULL; /* Return value ignored by caller, it directly invokes diff --git a/trunk/drivers/scsi/fcoe/fcoe.c b/trunk/drivers/scsi/fcoe/fcoe.c index 04f346b562da..cc23bd9480b2 100644 --- a/trunk/drivers/scsi/fcoe/fcoe.c +++ b/trunk/drivers/scsi/fcoe/fcoe.c @@ -380,6 +380,42 @@ static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev, return fcoe; } +/** + * fcoe_interface_release() - fcoe_port kref release function + * @kref: Embedded reference count in an fcoe_interface struct + */ +static void fcoe_interface_release(struct kref *kref) +{ + struct fcoe_interface *fcoe; + struct net_device *netdev; + + fcoe = container_of(kref, struct fcoe_interface, kref); + netdev = fcoe->netdev; + /* tear-down the FCoE controller */ + fcoe_ctlr_destroy(&fcoe->ctlr); + kfree(fcoe); + dev_put(netdev); + module_put(THIS_MODULE); +} + +/** + * fcoe_interface_get() - Get a reference to a FCoE interface + * @fcoe: The FCoE interface to be held + */ +static inline void fcoe_interface_get(struct fcoe_interface *fcoe) +{ + kref_get(&fcoe->kref); +} + +/** + * fcoe_interface_put() - Put a reference to a FCoE interface + * @fcoe: The FCoE interface to be released + */ +static inline void fcoe_interface_put(struct fcoe_interface *fcoe) +{ + kref_put(&fcoe->kref, fcoe_interface_release); +} + /** * fcoe_interface_cleanup() - Clean up a FCoE interface * @fcoe: The FCoE interface to be cleaned up @@ -392,6 +428,21 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe) struct fcoe_ctlr *fip = &fcoe->ctlr; u8 flogi_maddr[ETH_ALEN]; const struct net_device_ops *ops; + struct fcoe_port *port = lport_priv(fcoe->ctlr.lp); + + FCOE_NETDEV_DBG(netdev, "Destroying interface\n"); + + /* Logout of the fabric */ + fc_fabric_logoff(fcoe->ctlr.lp); + + /* Cleanup the fc_lport */ + fc_lport_destroy(fcoe->ctlr.lp); + + /* Stop the transmit retry timer */ + del_timer_sync(&port->timer); + + /* Free existing transmit skbs */ + fcoe_clean_pending_queue(fcoe->ctlr.lp); /* * Don't listen for Ethernet packets anymore. @@ -414,6 +465,9 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe) } else dev_mc_del(netdev, FIP_ALL_ENODE_MACS); + if (!is_zero_ether_addr(port->data_src_addr)) + dev_uc_del(netdev, port->data_src_addr); + /* Tell the LLD we are done w/ FCoE */ ops = netdev->netdev_ops; if (ops->ndo_fcoe_disable) { @@ -421,42 +475,7 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe) FCOE_NETDEV_DBG(netdev, "Failed to disable FCoE" " specific feature for LLD.\n"); } -} - -/** - * fcoe_interface_release() - fcoe_port kref release function - * @kref: Embedded reference count in an fcoe_interface struct - */ -static void fcoe_interface_release(struct kref *kref) -{ - struct fcoe_interface *fcoe; - struct net_device *netdev; - - fcoe = container_of(kref, struct fcoe_interface, kref); - netdev = fcoe->netdev; - /* tear-down the FCoE controller */ - fcoe_ctlr_destroy(&fcoe->ctlr); - kfree(fcoe); - dev_put(netdev); - module_put(THIS_MODULE); -} - -/** - * fcoe_interface_get() - Get a reference to a FCoE interface - * @fcoe: The FCoE interface to be held - */ -static inline void fcoe_interface_get(struct fcoe_interface *fcoe) -{ - kref_get(&fcoe->kref); -} - -/** - * fcoe_interface_put() - Put a reference to a FCoE interface - * @fcoe: The FCoE interface to be released - */ -static inline void fcoe_interface_put(struct fcoe_interface *fcoe) -{ - kref_put(&fcoe->kref, fcoe_interface_release); + fcoe_interface_put(fcoe); } /** @@ -821,39 +840,9 @@ static inline int fcoe_em_config(struct fc_lport *lport) * fcoe_if_destroy() - Tear down a SW FCoE instance * @lport: The local port to be destroyed * - * Locking: must be called with the RTNL mutex held and RTNL mutex - * needed to be dropped by this function since not dropping RTNL - * would cause circular locking warning on synchronous fip worker - * cancelling thru fcoe_interface_put invoked by this function. - * */ static void fcoe_if_destroy(struct fc_lport *lport) { - struct fcoe_port *port = lport_priv(lport); - struct fcoe_interface *fcoe = port->priv; - struct net_device *netdev = fcoe->netdev; - - FCOE_NETDEV_DBG(netdev, "Destroying interface\n"); - - /* Logout of the fabric */ - fc_fabric_logoff(lport); - - /* Cleanup the fc_lport */ - fc_lport_destroy(lport); - - /* Stop the transmit retry timer */ - del_timer_sync(&port->timer); - - /* Free existing transmit skbs */ - fcoe_clean_pending_queue(lport); - - if (!is_zero_ether_addr(port->data_src_addr)) - dev_uc_del(netdev, port->data_src_addr); - rtnl_unlock(); - - /* receives may not be stopped until after this */ - fcoe_interface_put(fcoe); - /* Free queued packets for the per-CPU receive threads */ fcoe_percpu_clean(lport); @@ -1783,23 +1772,8 @@ static int fcoe_disable(struct net_device *netdev) int rc = 0; mutex_lock(&fcoe_config_mutex); -#ifdef CONFIG_FCOE_MODULE - /* - * Make sure the module has been initialized, and is not about to be - * removed. Module paramter sysfs files are writable before the - * module_init function is called and after module_exit. - */ - if (THIS_MODULE->state != MODULE_STATE_LIVE) { - rc = -ENODEV; - goto out_nodev; - } -#endif - - if (!rtnl_trylock()) { - mutex_unlock(&fcoe_config_mutex); - return -ERESTARTSYS; - } + rtnl_lock(); fcoe = fcoe_hostlist_lookup_port(netdev); rtnl_unlock(); @@ -1809,7 +1783,6 @@ static int fcoe_disable(struct net_device *netdev) } else rc = -ENODEV; -out_nodev: mutex_unlock(&fcoe_config_mutex); return rc; } @@ -1828,22 +1801,7 @@ static int fcoe_enable(struct net_device *netdev) int rc = 0; mutex_lock(&fcoe_config_mutex); -#ifdef CONFIG_FCOE_MODULE - /* - * Make sure the module has been initialized, and is not about to be - * removed. Module paramter sysfs files are writable before the - * module_init function is called and after module_exit. - */ - if (THIS_MODULE->state != MODULE_STATE_LIVE) { - rc = -ENODEV; - goto out_nodev; - } -#endif - if (!rtnl_trylock()) { - mutex_unlock(&fcoe_config_mutex); - return -ERESTARTSYS; - } - + rtnl_lock(); fcoe = fcoe_hostlist_lookup_port(netdev); rtnl_unlock(); @@ -1852,7 +1810,6 @@ static int fcoe_enable(struct net_device *netdev) else if (!fcoe_link_ok(fcoe->ctlr.lp)) fcoe_ctlr_link_up(&fcoe->ctlr); -out_nodev: mutex_unlock(&fcoe_config_mutex); return rc; } @@ -1868,35 +1825,22 @@ static int fcoe_enable(struct net_device *netdev) static int fcoe_destroy(struct net_device *netdev) { struct fcoe_interface *fcoe; + struct fc_lport *lport; int rc = 0; mutex_lock(&fcoe_config_mutex); -#ifdef CONFIG_FCOE_MODULE - /* - * Make sure the module has been initialized, and is not about to be - * removed. Module paramter sysfs files are writable before the - * module_init function is called and after module_exit. - */ - if (THIS_MODULE->state != MODULE_STATE_LIVE) { - rc = -ENODEV; - goto out_nodev; - } -#endif - if (!rtnl_trylock()) { - mutex_unlock(&fcoe_config_mutex); - return -ERESTARTSYS; - } - + rtnl_lock(); fcoe = fcoe_hostlist_lookup_port(netdev); if (!fcoe) { rtnl_unlock(); rc = -ENODEV; goto out_nodev; } - fcoe_interface_cleanup(fcoe); + lport = fcoe->ctlr.lp; list_del(&fcoe->list); - /* RTNL mutex is dropped by fcoe_if_destroy */ - fcoe_if_destroy(fcoe->ctlr.lp); + fcoe_interface_cleanup(fcoe); + rtnl_unlock(); + fcoe_if_destroy(lport); out_nodev: mutex_unlock(&fcoe_config_mutex); return rc; @@ -1912,8 +1856,6 @@ static void fcoe_destroy_work(struct work_struct *work) port = container_of(work, struct fcoe_port, destroy_work); mutex_lock(&fcoe_config_mutex); - rtnl_lock(); - /* RTNL mutex is dropped by fcoe_if_destroy */ fcoe_if_destroy(port->lport); mutex_unlock(&fcoe_config_mutex); } @@ -1948,23 +1890,7 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode) struct fc_lport *lport; mutex_lock(&fcoe_config_mutex); - - if (!rtnl_trylock()) { - mutex_unlock(&fcoe_config_mutex); - return -ERESTARTSYS; - } - -#ifdef CONFIG_FCOE_MODULE - /* - * Make sure the module has been initialized, and is not about to be - * removed. Module paramter sysfs files are writable before the - * module_init function is called and after module_exit. - */ - if (THIS_MODULE->state != MODULE_STATE_LIVE) { - rc = -ENODEV; - goto out_nodev; - } -#endif + rtnl_lock(); /* look for existing lport */ if (fcoe_hostlist_lookup(netdev)) { diff --git a/trunk/drivers/scsi/fcoe/fcoe_ctlr.c b/trunk/drivers/scsi/fcoe/fcoe_ctlr.c index 9d38be2a41f9..229e4af5508a 100644 --- a/trunk/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/trunk/drivers/scsi/fcoe/fcoe_ctlr.c @@ -978,10 +978,8 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb) * the FCF that answers multicast solicitations, not the others that * are sending periodic multicast advertisements. */ - if (mtu_valid) { - list_del(&fcf->list); - list_add(&fcf->list, &fip->fcfs); - } + if (mtu_valid) + list_move(&fcf->list, &fip->fcfs); /* * If this is the first validated FCF, note the time and diff --git a/trunk/drivers/scsi/fcoe/fcoe_transport.c b/trunk/drivers/scsi/fcoe/fcoe_transport.c index 258684101bfd..f81f77c8569e 100644 --- a/trunk/drivers/scsi/fcoe/fcoe_transport.c +++ b/trunk/drivers/scsi/fcoe/fcoe_transport.c @@ -335,7 +335,7 @@ int fcoe_transport_attach(struct fcoe_transport *ft) EXPORT_SYMBOL(fcoe_transport_attach); /** - * fcoe_transport_attach - Detaches an FCoE transport + * fcoe_transport_detach - Detaches an FCoE transport * @ft: The fcoe transport to be attached * * Returns : 0 for success @@ -343,6 +343,7 @@ EXPORT_SYMBOL(fcoe_transport_attach); int fcoe_transport_detach(struct fcoe_transport *ft) { int rc = 0; + struct fcoe_netdev_mapping *nm = NULL, *tmp; mutex_lock(&ft_mutex); if (!ft->attached) { @@ -352,6 +353,19 @@ int fcoe_transport_detach(struct fcoe_transport *ft) goto out_attach; } + /* remove netdev mapping for this transport as it is going away */ + mutex_lock(&fn_mutex); + list_for_each_entry_safe(nm, tmp, &fcoe_netdevs, list) { + if (nm->ft == ft) { + LIBFCOE_TRANSPORT_DBG("transport %s going away, " + "remove its netdev mapping for %s\n", + ft->name, nm->netdev->name); + list_del(&nm->list); + kfree(nm); + } + } + mutex_unlock(&fn_mutex); + list_del(&ft->list); ft->attached = false; LIBFCOE_TRANSPORT_DBG("detaching transport %s\n", ft->name); @@ -371,9 +385,9 @@ static int fcoe_transport_show(char *buffer, const struct kernel_param *kp) i = j = sprintf(buffer, "Attached FCoE transports:"); mutex_lock(&ft_mutex); list_for_each_entry(ft, &fcoe_transports, list) { - i += snprintf(&buffer[i], IFNAMSIZ, "%s ", ft->name); - if (i >= PAGE_SIZE) + if (i >= PAGE_SIZE - IFNAMSIZ) break; + i += snprintf(&buffer[i], IFNAMSIZ, "%s ", ft->name); } mutex_unlock(&ft_mutex); if (i == j) @@ -530,9 +544,6 @@ static int fcoe_transport_create(const char *buffer, struct kernel_param *kp) struct fcoe_transport *ft = NULL; enum fip_state fip_mode = (enum fip_state)(long)kp->arg; - if (!mutex_trylock(&ft_mutex)) - return restart_syscall(); - #ifdef CONFIG_LIBFCOE_MODULE /* * Make sure the module has been initialized, and is not about to be @@ -543,6 +554,8 @@ static int fcoe_transport_create(const char *buffer, struct kernel_param *kp) goto out_nodev; #endif + mutex_lock(&ft_mutex); + netdev = fcoe_if_to_netdev(buffer); if (!netdev) { LIBFCOE_TRANSPORT_DBG("Invalid device %s.\n", buffer); @@ -586,10 +599,7 @@ static int fcoe_transport_create(const char *buffer, struct kernel_param *kp) dev_put(netdev); out_nodev: mutex_unlock(&ft_mutex); - if (rc == -ERESTARTSYS) - return restart_syscall(); - else - return rc; + return rc; } /** @@ -608,9 +618,6 @@ static int fcoe_transport_destroy(const char *buffer, struct kernel_param *kp) struct net_device *netdev = NULL; struct fcoe_transport *ft = NULL; - if (!mutex_trylock(&ft_mutex)) - return restart_syscall(); - #ifdef CONFIG_LIBFCOE_MODULE /* * Make sure the module has been initialized, and is not about to be @@ -621,6 +628,8 @@ static int fcoe_transport_destroy(const char *buffer, struct kernel_param *kp) goto out_nodev; #endif + mutex_lock(&ft_mutex); + netdev = fcoe_if_to_netdev(buffer); if (!netdev) { LIBFCOE_TRANSPORT_DBG("invalid device %s.\n", buffer); @@ -645,11 +654,7 @@ static int fcoe_transport_destroy(const char *buffer, struct kernel_param *kp) dev_put(netdev); out_nodev: mutex_unlock(&ft_mutex); - - if (rc == -ERESTARTSYS) - return restart_syscall(); - else - return rc; + return rc; } /** @@ -667,9 +672,6 @@ static int fcoe_transport_disable(const char *buffer, struct kernel_param *kp) struct net_device *netdev = NULL; struct fcoe_transport *ft = NULL; - if (!mutex_trylock(&ft_mutex)) - return restart_syscall(); - #ifdef CONFIG_LIBFCOE_MODULE /* * Make sure the module has been initialized, and is not about to be @@ -680,6 +682,8 @@ static int fcoe_transport_disable(const char *buffer, struct kernel_param *kp) goto out_nodev; #endif + mutex_lock(&ft_mutex); + netdev = fcoe_if_to_netdev(buffer); if (!netdev) goto out_nodev; @@ -716,9 +720,6 @@ static int fcoe_transport_enable(const char *buffer, struct kernel_param *kp) struct net_device *netdev = NULL; struct fcoe_transport *ft = NULL; - if (!mutex_trylock(&ft_mutex)) - return restart_syscall(); - #ifdef CONFIG_LIBFCOE_MODULE /* * Make sure the module has been initialized, and is not about to be @@ -729,6 +730,8 @@ static int fcoe_transport_enable(const char *buffer, struct kernel_param *kp) goto out_nodev; #endif + mutex_lock(&ft_mutex); + netdev = fcoe_if_to_netdev(buffer); if (!netdev) goto out_nodev; @@ -743,10 +746,7 @@ static int fcoe_transport_enable(const char *buffer, struct kernel_param *kp) dev_put(netdev); out_nodev: mutex_unlock(&ft_mutex); - if (rc == -ERESTARTSYS) - return restart_syscall(); - else - return rc; + return rc; } /** diff --git a/trunk/drivers/scsi/hpsa.c b/trunk/drivers/scsi/hpsa.c index 415ad4fb50d4..c6c0434d8034 100644 --- a/trunk/drivers/scsi/hpsa.c +++ b/trunk/drivers/scsi/hpsa.c @@ -273,7 +273,7 @@ static ssize_t host_show_transport_mode(struct device *dev, "performant" : "simple"); } -/* List of controllers which cannot be reset on kexec with reset_devices */ +/* List of controllers which cannot be hard reset on kexec with reset_devices */ static u32 unresettable_controller[] = { 0x324a103C, /* Smart Array P712m */ 0x324b103C, /* SmartArray P711m */ @@ -291,16 +291,45 @@ static u32 unresettable_controller[] = { 0x409D0E11, /* Smart Array 6400 EM */ }; -static int ctlr_is_resettable(struct ctlr_info *h) +/* List of controllers which cannot even be soft reset */ +static u32 soft_unresettable_controller[] = { + /* Exclude 640x boards. These are two pci devices in one slot + * which share a battery backed cache module. One controls the + * cache, the other accesses the cache through the one that controls + * it. If we reset the one controlling the cache, the other will + * likely not be happy. Just forbid resetting this conjoined mess. + * The 640x isn't really supported by hpsa anyway. + */ + 0x409C0E11, /* Smart Array 6400 */ + 0x409D0E11, /* Smart Array 6400 EM */ +}; + +static int ctlr_is_hard_resettable(u32 board_id) { int i; for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++) - if (unresettable_controller[i] == h->board_id) + if (unresettable_controller[i] == board_id) + return 0; + return 1; +} + +static int ctlr_is_soft_resettable(u32 board_id) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++) + if (soft_unresettable_controller[i] == board_id) return 0; return 1; } +static int ctlr_is_resettable(u32 board_id) +{ + return ctlr_is_hard_resettable(board_id) || + ctlr_is_soft_resettable(board_id); +} + static ssize_t host_show_resettable(struct device *dev, struct device_attribute *attr, char *buf) { @@ -308,7 +337,7 @@ static ssize_t host_show_resettable(struct device *dev, struct Scsi_Host *shost = class_to_shost(dev); h = shost_to_hba(shost); - return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h)); + return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id)); } static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[]) @@ -929,13 +958,6 @@ static void hpsa_slave_destroy(struct scsi_device *sdev) /* nothing to do. */ } -static void hpsa_scsi_setup(struct ctlr_info *h) -{ - h->ndevices = 0; - h->scsi_host = NULL; - spin_lock_init(&h->devlock); -} - static void hpsa_free_sg_chain_blocks(struct ctlr_info *h) { int i; @@ -1006,8 +1028,7 @@ static void hpsa_unmap_sg_chain_block(struct ctlr_info *h, pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE); } -static void complete_scsi_command(struct CommandList *cp, - int timeout, u32 tag) +static void complete_scsi_command(struct CommandList *cp) { struct scsi_cmnd *cmd; struct ctlr_info *h; @@ -1308,7 +1329,7 @@ static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h, int retry_count = 0; do { - memset(c->err_info, 0, sizeof(c->err_info)); + memset(c->err_info, 0, sizeof(*c->err_info)); hpsa_scsi_do_simple_cmd_core(h, c); retry_count++; } while (check_for_unit_attention(h, c) && retry_count <= 3); @@ -1570,6 +1591,7 @@ static unsigned char *msa2xxx_model[] = { "MSA2024", "MSA2312", "MSA2324", + "P2000 G3 SAS", NULL, }; @@ -2751,6 +2773,26 @@ static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg) } } +static int __devinit hpsa_send_host_reset(struct ctlr_info *h, + unsigned char *scsi3addr, u8 reset_type) +{ + struct CommandList *c; + + c = cmd_alloc(h); + if (!c) + return -ENOMEM; + fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, + RAID_CTLR_LUNID, TYPE_MSG); + c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */ + c->waiting = NULL; + enqueue_cmd_and_start_io(h, c); + /* Don't wait for completion, the reset won't complete. Don't free + * the command either. This is the last command we will send before + * re-initializing everything, so it doesn't matter and won't leak. + */ + return 0; +} + static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, void *buff, size_t size, u8 page_code, unsigned char *scsi3addr, int cmd_type) @@ -2828,7 +2870,8 @@ static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, c->Request.Type.Attribute = ATTR_SIMPLE; c->Request.Type.Direction = XFER_NONE; c->Request.Timeout = 0; /* Don't time out */ - c->Request.CDB[0] = 0x01; /* RESET_MSG is 0x01 */ + memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB)); + c->Request.CDB[0] = cmd; c->Request.CDB[1] = 0x03; /* Reset target above */ /* If bytes 4-7 are zero, it means reset the */ /* LunID device */ @@ -2936,7 +2979,7 @@ static inline void finish_cmd(struct CommandList *c, u32 raw_tag) { removeQ(c); if (likely(c->cmd_type == CMD_SCSI)) - complete_scsi_command(c, 0, raw_tag); + complete_scsi_command(c); else if (c->cmd_type == CMD_IOCTL_PEND) complete(c->waiting); } @@ -2994,6 +3037,63 @@ static inline u32 process_nonindexed_cmd(struct ctlr_info *h, return next_command(h); } +/* Some controllers, like p400, will give us one interrupt + * after a soft reset, even if we turned interrupts off. + * Only need to check for this in the hpsa_xxx_discard_completions + * functions. + */ +static int ignore_bogus_interrupt(struct ctlr_info *h) +{ + if (likely(!reset_devices)) + return 0; + + if (likely(h->interrupts_enabled)) + return 0; + + dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled " + "(known firmware bug.) Ignoring.\n"); + + return 1; +} + +static irqreturn_t hpsa_intx_discard_completions(int irq, void *dev_id) +{ + struct ctlr_info *h = dev_id; + unsigned long flags; + u32 raw_tag; + + if (ignore_bogus_interrupt(h)) + return IRQ_NONE; + + if (interrupt_not_for_us(h)) + return IRQ_NONE; + spin_lock_irqsave(&h->lock, flags); + while (interrupt_pending(h)) { + raw_tag = get_next_completion(h); + while (raw_tag != FIFO_EMPTY) + raw_tag = next_command(h); + } + spin_unlock_irqrestore(&h->lock, flags); + return IRQ_HANDLED; +} + +static irqreturn_t hpsa_msix_discard_completions(int irq, void *dev_id) +{ + struct ctlr_info *h = dev_id; + unsigned long flags; + u32 raw_tag; + + if (ignore_bogus_interrupt(h)) + return IRQ_NONE; + + spin_lock_irqsave(&h->lock, flags); + raw_tag = get_next_completion(h); + while (raw_tag != FIFO_EMPTY) + raw_tag = next_command(h); + spin_unlock_irqrestore(&h->lock, flags); + return IRQ_HANDLED; +} + static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id) { struct ctlr_info *h = dev_id; @@ -3132,11 +3232,10 @@ static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode, return 0; } -#define hpsa_soft_reset_controller(p) hpsa_message(p, 1, 0) #define hpsa_noop(p) hpsa_message(p, 3, 0) static int hpsa_controller_hard_reset(struct pci_dev *pdev, - void * __iomem vaddr, bool use_doorbell) + void * __iomem vaddr, u32 use_doorbell) { u16 pmcsr; int pos; @@ -3147,8 +3246,7 @@ static int hpsa_controller_hard_reset(struct pci_dev *pdev, * other way using the doorbell register. */ dev_info(&pdev->dev, "using doorbell to reset controller\n"); - writel(DOORBELL_CTLR_RESET, vaddr + SA5_DOORBELL); - msleep(1000); + writel(use_doorbell, vaddr + SA5_DOORBELL); } else { /* Try to do it the PCI power state way */ /* Quoting from the Open CISS Specification: "The Power @@ -3179,12 +3277,63 @@ static int hpsa_controller_hard_reset(struct pci_dev *pdev, pmcsr &= ~PCI_PM_CTRL_STATE_MASK; pmcsr |= PCI_D0; pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); - - msleep(500); } return 0; } +static __devinit void init_driver_version(char *driver_version, int len) +{ + memset(driver_version, 0, len); + strncpy(driver_version, "hpsa " HPSA_DRIVER_VERSION, len - 1); +} + +static __devinit int write_driver_ver_to_cfgtable( + struct CfgTable __iomem *cfgtable) +{ + char *driver_version; + int i, size = sizeof(cfgtable->driver_version); + + driver_version = kmalloc(size, GFP_KERNEL); + if (!driver_version) + return -ENOMEM; + + init_driver_version(driver_version, size); + for (i = 0; i < size; i++) + writeb(driver_version[i], &cfgtable->driver_version[i]); + kfree(driver_version); + return 0; +} + +static __devinit void read_driver_ver_from_cfgtable( + struct CfgTable __iomem *cfgtable, unsigned char *driver_ver) +{ + int i; + + for (i = 0; i < sizeof(cfgtable->driver_version); i++) + driver_ver[i] = readb(&cfgtable->driver_version[i]); +} + +static __devinit int controller_reset_failed( + struct CfgTable __iomem *cfgtable) +{ + + char *driver_ver, *old_driver_ver; + int rc, size = sizeof(cfgtable->driver_version); + + old_driver_ver = kmalloc(2 * size, GFP_KERNEL); + if (!old_driver_ver) + return -ENOMEM; + driver_ver = old_driver_ver + size; + + /* After a reset, the 32 bytes of "driver version" in the cfgtable + * should have been changed, otherwise we know the reset failed. + */ + init_driver_version(old_driver_ver, size); + read_driver_ver_from_cfgtable(cfgtable, driver_ver); + rc = !memcmp(driver_ver, old_driver_ver, size); + kfree(old_driver_ver); + return rc; +} /* This does a hard reset of the controller using PCI power management * states or the using the doorbell register. */ @@ -3195,10 +3344,10 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) u64 cfg_base_addr_index; void __iomem *vaddr; unsigned long paddr; - u32 misc_fw_support, active_transport; + u32 misc_fw_support; int rc; struct CfgTable __iomem *cfgtable; - bool use_doorbell; + u32 use_doorbell; u32 board_id; u16 command_register; @@ -3215,20 +3364,15 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) * using the doorbell register. */ - /* Exclude 640x boards. These are two pci devices in one slot - * which share a battery backed cache module. One controls the - * cache, the other accesses the cache through the one that controls - * it. If we reset the one controlling the cache, the other will - * likely not be happy. Just forbid resetting this conjoined mess. - * The 640x isn't really supported by hpsa anyway. - */ rc = hpsa_lookup_board_id(pdev, &board_id); - if (rc < 0) { + if (rc < 0 || !ctlr_is_resettable(board_id)) { dev_warn(&pdev->dev, "Not resetting device.\n"); return -ENODEV; } - if (board_id == 0x409C0E11 || board_id == 0x409D0E11) - return -ENOTSUPP; + + /* if controller is soft- but not hard resettable... */ + if (!ctlr_is_hard_resettable(board_id)) + return -ENOTSUPP; /* try soft reset later. */ /* Save the PCI command register */ pci_read_config_word(pdev, 4, &command_register); @@ -3257,10 +3401,28 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) rc = -ENOMEM; goto unmap_vaddr; } + rc = write_driver_ver_to_cfgtable(cfgtable); + if (rc) + goto unmap_vaddr; - /* If reset via doorbell register is supported, use that. */ + /* If reset via doorbell register is supported, use that. + * There are two such methods. Favor the newest method. + */ misc_fw_support = readl(&cfgtable->misc_fw_support); - use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; + use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2; + if (use_doorbell) { + use_doorbell = DOORBELL_CTLR_RESET2; + } else { + use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; + if (use_doorbell) { + dev_warn(&pdev->dev, "Controller claims that " + "'Bit 2 doorbell reset' is " + "supported, but not 'bit 5 doorbell reset'. " + "Firmware update is recommended.\n"); + rc = -ENOTSUPP; /* try soft reset */ + goto unmap_cfgtable; + } + } rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell); if (rc) @@ -3279,30 +3441,32 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) msleep(HPSA_POST_RESET_PAUSE_MSECS); /* Wait for board to become not ready, then ready. */ - dev_info(&pdev->dev, "Waiting for board to become ready.\n"); + dev_info(&pdev->dev, "Waiting for board to reset.\n"); rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY); - if (rc) + if (rc) { dev_warn(&pdev->dev, - "failed waiting for board to become not ready\n"); + "failed waiting for board to reset." + " Will try soft reset.\n"); + rc = -ENOTSUPP; /* Not expected, but try soft reset later */ + goto unmap_cfgtable; + } rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY); if (rc) { dev_warn(&pdev->dev, - "failed waiting for board to become ready\n"); + "failed waiting for board to become ready " + "after hard reset\n"); goto unmap_cfgtable; } - dev_info(&pdev->dev, "board ready.\n"); - /* Controller should be in simple mode at this point. If it's not, - * It means we're on one of those controllers which doesn't support - * the doorbell reset method and on which the PCI power management reset - * method doesn't work (P800, for example.) - * In those cases, don't try to proceed, as it generally doesn't work. - */ - active_transport = readl(&cfgtable->TransportActive); - if (active_transport & PERFORMANT_MODE) { - dev_warn(&pdev->dev, "Unable to successfully reset controller," - " Ignoring controller.\n"); - rc = -ENODEV; + rc = controller_reset_failed(vaddr); + if (rc < 0) + goto unmap_cfgtable; + if (rc) { + dev_warn(&pdev->dev, "Unable to successfully reset " + "controller. Will try soft reset.\n"); + rc = -ENOTSUPP; + } else { + dev_info(&pdev->dev, "board ready after hard reset.\n"); } unmap_cfgtable: @@ -3543,6 +3707,9 @@ static int __devinit hpsa_find_cfgtables(struct ctlr_info *h) cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable)); if (!h->cfgtable) return -ENOMEM; + rc = write_driver_ver_to_cfgtable(h->cfgtable); + if (rc) + return rc; /* Find performant mode table. */ trans_offset = readl(&h->cfgtable->TransMethodOffset); h->transtable = remap_pci_mem(pci_resource_start(h->pdev, @@ -3777,11 +3944,12 @@ static __devinit int hpsa_init_reset_devices(struct pci_dev *pdev) * due to concerns about shared bbwc between 6402/6404 pair. */ if (rc == -ENOTSUPP) - return 0; /* just try to do the kdump anyhow. */ + return rc; /* just try to do the kdump anyhow. */ if (rc) return -ENODEV; /* Now try to get the controller to respond to a no-op */ + dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n"); for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) { if (hpsa_noop(pdev) == 0) break; @@ -3792,18 +3960,133 @@ static __devinit int hpsa_init_reset_devices(struct pci_dev *pdev) return 0; } +static __devinit int hpsa_allocate_cmd_pool(struct ctlr_info *h) +{ + h->cmd_pool_bits = kzalloc( + DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) * + sizeof(unsigned long), GFP_KERNEL); + h->cmd_pool = pci_alloc_consistent(h->pdev, + h->nr_cmds * sizeof(*h->cmd_pool), + &(h->cmd_pool_dhandle)); + h->errinfo_pool = pci_alloc_consistent(h->pdev, + h->nr_cmds * sizeof(*h->errinfo_pool), + &(h->errinfo_pool_dhandle)); + if ((h->cmd_pool_bits == NULL) + || (h->cmd_pool == NULL) + || (h->errinfo_pool == NULL)) { + dev_err(&h->pdev->dev, "out of memory in %s", __func__); + return -ENOMEM; + } + return 0; +} + +static void hpsa_free_cmd_pool(struct ctlr_info *h) +{ + kfree(h->cmd_pool_bits); + if (h->cmd_pool) + pci_free_consistent(h->pdev, + h->nr_cmds * sizeof(struct CommandList), + h->cmd_pool, h->cmd_pool_dhandle); + if (h->errinfo_pool) + pci_free_consistent(h->pdev, + h->nr_cmds * sizeof(struct ErrorInfo), + h->errinfo_pool, + h->errinfo_pool_dhandle); +} + +static int hpsa_request_irq(struct ctlr_info *h, + irqreturn_t (*msixhandler)(int, void *), + irqreturn_t (*intxhandler)(int, void *)) +{ + int rc; + + if (h->msix_vector || h->msi_vector) + rc = request_irq(h->intr[h->intr_mode], msixhandler, + IRQF_DISABLED, h->devname, h); + else + rc = request_irq(h->intr[h->intr_mode], intxhandler, + IRQF_DISABLED, h->devname, h); + if (rc) { + dev_err(&h->pdev->dev, "unable to get irq %d for %s\n", + h->intr[h->intr_mode], h->devname); + return -ENODEV; + } + return 0; +} + +static int __devinit hpsa_kdump_soft_reset(struct ctlr_info *h) +{ + if (hpsa_send_host_reset(h, RAID_CTLR_LUNID, + HPSA_RESET_TYPE_CONTROLLER)) { + dev_warn(&h->pdev->dev, "Resetting array controller failed.\n"); + return -EIO; + } + + dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n"); + if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) { + dev_warn(&h->pdev->dev, "Soft reset had no effect.\n"); + return -1; + } + + dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n"); + if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) { + dev_warn(&h->pdev->dev, "Board failed to become ready " + "after soft reset.\n"); + return -1; + } + + return 0; +} + +static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h) +{ + free_irq(h->intr[h->intr_mode], h); +#ifdef CONFIG_PCI_MSI + if (h->msix_vector) + pci_disable_msix(h->pdev); + else if (h->msi_vector) + pci_disable_msi(h->pdev); +#endif /* CONFIG_PCI_MSI */ + hpsa_free_sg_chain_blocks(h); + hpsa_free_cmd_pool(h); + kfree(h->blockFetchTable); + pci_free_consistent(h->pdev, h->reply_pool_size, + h->reply_pool, h->reply_pool_dhandle); + if (h->vaddr) + iounmap(h->vaddr); + if (h->transtable) + iounmap(h->transtable); + if (h->cfgtable) + iounmap(h->cfgtable); + pci_release_regions(h->pdev); + kfree(h); +} + static int __devinit hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { int dac, rc; struct ctlr_info *h; + int try_soft_reset = 0; + unsigned long flags; if (number_of_controllers == 0) printk(KERN_INFO DRIVER_NAME "\n"); rc = hpsa_init_reset_devices(pdev); - if (rc) - return rc; + if (rc) { + if (rc != -ENOTSUPP) + return rc; + /* If the reset fails in a particular way (it has no way to do + * a proper hard reset, so returns -ENOTSUPP) we can try to do + * a soft reset once we get the controller configured up to the + * point that it can accept a command. + */ + try_soft_reset = 1; + rc = 0; + } + +reinit_after_soft_reset: /* Command structures must be aligned on a 32-byte boundary because * the 5 lower bits of the address are used by the hardware. and by @@ -3847,54 +4130,82 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev, /* make sure the board interrupts are off */ h->access.set_intr_mask(h, HPSA_INTR_OFF); - if (h->msix_vector || h->msi_vector) - rc = request_irq(h->intr[h->intr_mode], do_hpsa_intr_msi, - IRQF_DISABLED, h->devname, h); - else - rc = request_irq(h->intr[h->intr_mode], do_hpsa_intr_intx, - IRQF_DISABLED, h->devname, h); - if (rc) { - dev_err(&pdev->dev, "unable to get irq %d for %s\n", - h->intr[h->intr_mode], h->devname); + if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx)) goto clean2; - } - dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n", h->devname, pdev->device, h->intr[h->intr_mode], dac ? "" : " not"); - - h->cmd_pool_bits = - kmalloc(((h->nr_cmds + BITS_PER_LONG - - 1) / BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL); - h->cmd_pool = pci_alloc_consistent(h->pdev, - h->nr_cmds * sizeof(*h->cmd_pool), - &(h->cmd_pool_dhandle)); - h->errinfo_pool = pci_alloc_consistent(h->pdev, - h->nr_cmds * sizeof(*h->errinfo_pool), - &(h->errinfo_pool_dhandle)); - if ((h->cmd_pool_bits == NULL) - || (h->cmd_pool == NULL) - || (h->errinfo_pool == NULL)) { - dev_err(&pdev->dev, "out of memory"); - rc = -ENOMEM; + if (hpsa_allocate_cmd_pool(h)) goto clean4; - } if (hpsa_allocate_sg_chain_blocks(h)) goto clean4; init_waitqueue_head(&h->scan_wait_queue); h->scan_finished = 1; /* no scan currently in progress */ pci_set_drvdata(pdev, h); - memset(h->cmd_pool_bits, 0, - ((h->nr_cmds + BITS_PER_LONG - - 1) / BITS_PER_LONG) * sizeof(unsigned long)); + h->ndevices = 0; + h->scsi_host = NULL; + spin_lock_init(&h->devlock); + hpsa_put_ctlr_into_performant_mode(h); + + /* At this point, the controller is ready to take commands. + * Now, if reset_devices and the hard reset didn't work, try + * the soft reset and see if that works. + */ + if (try_soft_reset) { + + /* This is kind of gross. We may or may not get a completion + * from the soft reset command, and if we do, then the value + * from the fifo may or may not be valid. So, we wait 10 secs + * after the reset throwing away any completions we get during + * that time. Unregister the interrupt handler and register + * fake ones to scoop up any residual completions. + */ + spin_lock_irqsave(&h->lock, flags); + h->access.set_intr_mask(h, HPSA_INTR_OFF); + spin_unlock_irqrestore(&h->lock, flags); + free_irq(h->intr[h->intr_mode], h); + rc = hpsa_request_irq(h, hpsa_msix_discard_completions, + hpsa_intx_discard_completions); + if (rc) { + dev_warn(&h->pdev->dev, "Failed to request_irq after " + "soft reset.\n"); + goto clean4; + } + + rc = hpsa_kdump_soft_reset(h); + if (rc) + /* Neither hard nor soft reset worked, we're hosed. */ + goto clean4; + + dev_info(&h->pdev->dev, "Board READY.\n"); + dev_info(&h->pdev->dev, + "Waiting for stale completions to drain.\n"); + h->access.set_intr_mask(h, HPSA_INTR_ON); + msleep(10000); + h->access.set_intr_mask(h, HPSA_INTR_OFF); + + rc = controller_reset_failed(h->cfgtable); + if (rc) + dev_info(&h->pdev->dev, + "Soft reset appears to have failed.\n"); + + /* since the controller's reset, we have to go back and re-init + * everything. Easiest to just forget what we've done and do it + * all over again. + */ + hpsa_undo_allocations_after_kdump_soft_reset(h); + try_soft_reset = 0; + if (rc) + /* don't go to clean4, we already unallocated */ + return -ENODEV; - hpsa_scsi_setup(h); + goto reinit_after_soft_reset; + } /* Turn the interrupts on so we can service requests */ h->access.set_intr_mask(h, HPSA_INTR_ON); - hpsa_put_ctlr_into_performant_mode(h); hpsa_hba_inquiry(h); hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */ h->busy_initializing = 0; @@ -3902,16 +4213,7 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev, clean4: hpsa_free_sg_chain_blocks(h); - kfree(h->cmd_pool_bits); - if (h->cmd_pool) - pci_free_consistent(h->pdev, - h->nr_cmds * sizeof(struct CommandList), - h->cmd_pool, h->cmd_pool_dhandle); - if (h->errinfo_pool) - pci_free_consistent(h->pdev, - h->nr_cmds * sizeof(struct ErrorInfo), - h->errinfo_pool, - h->errinfo_pool_dhandle); + hpsa_free_cmd_pool(h); free_irq(h->intr[h->intr_mode], h); clean2: clean1: diff --git a/trunk/drivers/scsi/hpsa.h b/trunk/drivers/scsi/hpsa.h index 621a1530054a..6d8dcd4dd06b 100644 --- a/trunk/drivers/scsi/hpsa.h +++ b/trunk/drivers/scsi/hpsa.h @@ -127,10 +127,12 @@ struct ctlr_info { }; #define HPSA_ABORT_MSG 0 #define HPSA_DEVICE_RESET_MSG 1 -#define HPSA_BUS_RESET_MSG 2 -#define HPSA_HOST_RESET_MSG 3 +#define HPSA_RESET_TYPE_CONTROLLER 0x00 +#define HPSA_RESET_TYPE_BUS 0x01 +#define HPSA_RESET_TYPE_TARGET 0x03 +#define HPSA_RESET_TYPE_LUN 0x04 #define HPSA_MSG_SEND_RETRY_LIMIT 10 -#define HPSA_MSG_SEND_RETRY_INTERVAL_MSECS 1000 +#define HPSA_MSG_SEND_RETRY_INTERVAL_MSECS (10000) /* Maximum time in seconds driver will wait for command completions * when polling before giving up. @@ -155,7 +157,7 @@ struct ctlr_info { * HPSA_BOARD_READY_ITERATIONS are derived from those. */ #define HPSA_BOARD_READY_WAIT_SECS (120) -#define HPSA_BOARD_NOT_READY_WAIT_SECS (10) +#define HPSA_BOARD_NOT_READY_WAIT_SECS (100) #define HPSA_BOARD_READY_POLL_INTERVAL_MSECS (100) #define HPSA_BOARD_READY_POLL_INTERVAL \ ((HPSA_BOARD_READY_POLL_INTERVAL_MSECS * HZ) / 1000) @@ -212,6 +214,7 @@ static void SA5_submit_command(struct ctlr_info *h, dev_dbg(&h->pdev->dev, "Sending %x, tag = %x\n", c->busaddr, c->Header.Tag.lower); writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); + (void) readl(h->vaddr + SA5_REQUEST_PORT_OFFSET); h->commands_outstanding++; if (h->commands_outstanding > h->max_outstanding) h->max_outstanding = h->commands_outstanding; @@ -227,10 +230,12 @@ static void SA5_intr_mask(struct ctlr_info *h, unsigned long val) if (val) { /* Turn interrupts on */ h->interrupts_enabled = 1; writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); + (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); } else { /* Turn them off */ h->interrupts_enabled = 0; writel(SA5_INTR_OFF, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); + (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); } } @@ -239,10 +244,12 @@ static void SA5_performant_intr_mask(struct ctlr_info *h, unsigned long val) if (val) { /* turn on interrupts */ h->interrupts_enabled = 1; writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); + (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); } else { h->interrupts_enabled = 0; writel(SA5_PERF_INTR_OFF, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); + (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); } } diff --git a/trunk/drivers/scsi/hpsa_cmd.h b/trunk/drivers/scsi/hpsa_cmd.h index 18464900e761..55d741b019db 100644 --- a/trunk/drivers/scsi/hpsa_cmd.h +++ b/trunk/drivers/scsi/hpsa_cmd.h @@ -101,6 +101,7 @@ #define CFGTBL_ChangeReq 0x00000001l #define CFGTBL_AccCmds 0x00000001l #define DOORBELL_CTLR_RESET 0x00000004l +#define DOORBELL_CTLR_RESET2 0x00000020l #define CFGTBL_Trans_Simple 0x00000002l #define CFGTBL_Trans_Performant 0x00000004l @@ -256,14 +257,6 @@ struct ErrorInfo { #define CMD_IOCTL_PEND 0x01 #define CMD_SCSI 0x03 -/* This structure needs to be divisible by 32 for new - * indexing method and performant mode. - */ -#define PAD32 32 -#define PAD64DIFF 0 -#define USEEXTRA ((sizeof(void *) - 4)/4) -#define PADSIZE (PAD32 + PAD64DIFF * USEEXTRA) - #define DIRECT_LOOKUP_SHIFT 5 #define DIRECT_LOOKUP_BIT 0x10 #define DIRECT_LOOKUP_MASK (~((1 << DIRECT_LOOKUP_SHIFT) - 1)) @@ -345,6 +338,8 @@ struct CfgTable { u8 reserved[0x78 - 0x58]; u32 misc_fw_support; /* offset 0x78 */ #define MISC_FW_DOORBELL_RESET (0x02) +#define MISC_FW_DOORBELL_RESET2 (0x010) + u8 driver_version[32]; }; #define NUM_BLOCKFETCH_ENTRIES 8 diff --git a/trunk/drivers/scsi/ibmvscsi/ibmvscsi.c b/trunk/drivers/scsi/ibmvscsi/ibmvscsi.c index 041958453e2a..3d391dc3f11f 100644 --- a/trunk/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/trunk/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -1849,8 +1849,7 @@ static void ibmvscsi_do_work(struct ibmvscsi_host_data *hostdata) rc = ibmvscsi_ops->reset_crq_queue(&hostdata->queue, hostdata); if (!rc) rc = ibmvscsi_ops->send_crq(hostdata, 0xC001000000000000LL, 0); - if (!rc) - rc = vio_enable_interrupts(to_vio_dev(hostdata->dev)); + vio_enable_interrupts(to_vio_dev(hostdata->dev)); } else if (hostdata->reenable_crq) { smp_rmb(); action = "enable"; diff --git a/trunk/drivers/scsi/in2000.c b/trunk/drivers/scsi/in2000.c index 6568aab745a0..92109b126391 100644 --- a/trunk/drivers/scsi/in2000.c +++ b/trunk/drivers/scsi/in2000.c @@ -343,7 +343,7 @@ static int in2000_queuecommand_lck(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)) instance = cmd->device->host; hostdata = (struct IN2000_hostdata *) instance->hostdata; - DB(DB_QUEUE_COMMAND, scmd_printk(KERN_DEBUG, cmd, "Q-%02x-%ld(", cmd->cmnd[0], cmd->serial_number)) + DB(DB_QUEUE_COMMAND, scmd_printk(KERN_DEBUG, cmd, "Q-%02x(", cmd->cmnd[0])) /* Set up a few fields in the Scsi_Cmnd structure for our own use: * - host_scribble is the pointer to the next cmd in the input queue @@ -427,7 +427,7 @@ static int in2000_queuecommand_lck(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)) in2000_execute(cmd->device->host); - DB(DB_QUEUE_COMMAND, printk(")Q-%ld ", cmd->serial_number)) + DB(DB_QUEUE_COMMAND, printk(")Q ")) return 0; } @@ -705,7 +705,7 @@ static void in2000_execute(struct Scsi_Host *instance) * to search the input_Q again... */ - DB(DB_EXECUTE, printk("%s%ld)EX-2 ", (cmd->SCp.phase) ? "d:" : "", cmd->serial_number)) + DB(DB_EXECUTE, printk("%s)EX-2 ", (cmd->SCp.phase) ? "d:" : "")) } @@ -1149,7 +1149,7 @@ static irqreturn_t in2000_intr(int irqnum, void *dev_id) case CSR_XFER_DONE | PHS_COMMAND: case CSR_UNEXP | PHS_COMMAND: case CSR_SRV_REQ | PHS_COMMAND: - DB(DB_INTR, printk("CMND-%02x,%ld", cmd->cmnd[0], cmd->serial_number)) + DB(DB_INTR, printk("CMND-%02x", cmd->cmnd[0])) transfer_pio(cmd->cmnd, cmd->cmd_len, DATA_OUT_DIR, hostdata); hostdata->state = S_CONNECTED; break; @@ -1191,7 +1191,7 @@ static irqreturn_t in2000_intr(int irqnum, void *dev_id) switch (msg) { case COMMAND_COMPLETE: - DB(DB_INTR, printk("CCMP-%ld", cmd->serial_number)) + DB(DB_INTR, printk("CCMP")) write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK); hostdata->state = S_PRE_CMP_DISC; break; @@ -1329,7 +1329,7 @@ static irqreturn_t in2000_intr(int irqnum, void *dev_id) write_3393(hostdata, WD_SOURCE_ID, SRCID_ER); if (phs == 0x60) { - DB(DB_INTR, printk("SX-DONE-%ld", cmd->serial_number)) + DB(DB_INTR, printk("SX-DONE")) cmd->SCp.Message = COMMAND_COMPLETE; lun = read_3393(hostdata, WD_TARGET_LUN); DB(DB_INTR, printk(":%d.%d", cmd->SCp.Status, lun)) @@ -1350,7 +1350,7 @@ static irqreturn_t in2000_intr(int irqnum, void *dev_id) in2000_execute(instance); } else { - printk("%02x:%02x:%02x-%ld: Unknown SEL_XFER_DONE phase!!---", asr, sr, phs, cmd->serial_number); + printk("%02x:%02x:%02x: Unknown SEL_XFER_DONE phase!!---", asr, sr, phs); } break; @@ -1417,7 +1417,7 @@ static irqreturn_t in2000_intr(int irqnum, void *dev_id) spin_unlock_irqrestore(instance->host_lock, flags); return IRQ_HANDLED; } - DB(DB_INTR, printk("UNEXP_DISC-%ld", cmd->serial_number)) + DB(DB_INTR, printk("UNEXP_DISC")) hostdata->connected = NULL; hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); hostdata->state = S_UNCONNECTED; @@ -1442,7 +1442,7 @@ static irqreturn_t in2000_intr(int irqnum, void *dev_id) */ write_3393(hostdata, WD_SOURCE_ID, SRCID_ER); - DB(DB_INTR, printk("DISC-%ld", cmd->serial_number)) + DB(DB_INTR, printk("DISC")) if (cmd == NULL) { printk(" - Already disconnected! "); hostdata->state = S_UNCONNECTED; @@ -1575,7 +1575,6 @@ static irqreturn_t in2000_intr(int irqnum, void *dev_id) } else hostdata->state = S_CONNECTED; - DB(DB_INTR, printk("-%ld", cmd->serial_number)) break; default: @@ -1704,7 +1703,7 @@ static int __in2000_abort(Scsi_Cmnd * cmd) prev->host_scribble = cmd->host_scribble; cmd->host_scribble = NULL; cmd->result = DID_ABORT << 16; - printk(KERN_WARNING "scsi%d: Abort - removing command %ld from input_Q. ", instance->host_no, cmd->serial_number); + printk(KERN_WARNING "scsi%d: Abort - removing command from input_Q. ", instance->host_no); cmd->scsi_done(cmd); return SUCCESS; } @@ -1725,7 +1724,7 @@ static int __in2000_abort(Scsi_Cmnd * cmd) if (hostdata->connected == cmd) { - printk(KERN_WARNING "scsi%d: Aborting connected command %ld - ", instance->host_no, cmd->serial_number); + printk(KERN_WARNING "scsi%d: Aborting connected command - ", instance->host_no); printk("sending wd33c93 ABORT command - "); write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED); @@ -2270,7 +2269,7 @@ static int in2000_proc_info(struct Scsi_Host *instance, char *buf, char **start, strcat(bp, "\nconnected: "); if (hd->connected) { cmd = (Scsi_Cmnd *) hd->connected; - sprintf(tbuf, " %ld-%d:%d(%02x)", cmd->serial_number, cmd->device->id, cmd->device->lun, cmd->cmnd[0]); + sprintf(tbuf, " %d:%d(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]); strcat(bp, tbuf); } } @@ -2278,7 +2277,7 @@ static int in2000_proc_info(struct Scsi_Host *instance, char *buf, char **start, strcat(bp, "\ninput_Q: "); cmd = (Scsi_Cmnd *) hd->input_Q; while (cmd) { - sprintf(tbuf, " %ld-%d:%d(%02x)", cmd->serial_number, cmd->device->id, cmd->device->lun, cmd->cmnd[0]); + sprintf(tbuf, " %d:%d(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]); strcat(bp, tbuf); cmd = (Scsi_Cmnd *) cmd->host_scribble; } @@ -2287,7 +2286,7 @@ static int in2000_proc_info(struct Scsi_Host *instance, char *buf, char **start, strcat(bp, "\ndisconnected_Q:"); cmd = (Scsi_Cmnd *) hd->disconnected_Q; while (cmd) { - sprintf(tbuf, " %ld-%d:%d(%02x)", cmd->serial_number, cmd->device->id, cmd->device->lun, cmd->cmnd[0]); + sprintf(tbuf, " %d:%d(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]); strcat(bp, tbuf); cmd = (Scsi_Cmnd *) cmd->host_scribble; } diff --git a/trunk/drivers/scsi/ipr.c b/trunk/drivers/scsi/ipr.c index 0621238fac4a..12868ca46110 100644 --- a/trunk/drivers/scsi/ipr.c +++ b/trunk/drivers/scsi/ipr.c @@ -60,6 +60,7 @@ #include #include #include +#include #include #include #include @@ -2717,13 +2718,18 @@ static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg, unsigned long pci_address, u32 length) { int bytes_copied = 0; - int cur_len, rc, rem_len, rem_page_len; + int cur_len, rc, rem_len, rem_page_len, max_dump_size; __be32 *page; unsigned long lock_flags = 0; struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump; + if (ioa_cfg->sis64) + max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE; + else + max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE; + while (bytes_copied < length && - (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) { + (ioa_dump->hdr.len + bytes_copied) < max_dump_size) { if (ioa_dump->page_offset >= PAGE_SIZE || ioa_dump->page_offset == 0) { page = (__be32 *)__get_free_page(GFP_ATOMIC); @@ -2885,8 +2891,8 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump) unsigned long lock_flags = 0; struct ipr_driver_dump *driver_dump = &dump->driver_dump; struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump; - u32 num_entries, start_off, end_off; - u32 bytes_to_copy, bytes_copied, rc; + u32 num_entries, max_num_entries, start_off, end_off; + u32 max_dump_size, bytes_to_copy, bytes_copied, rc; struct ipr_sdt *sdt; int valid = 1; int i; @@ -2947,8 +2953,18 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump) on entries in this table */ sdt = &ioa_dump->sdt; + if (ioa_cfg->sis64) { + max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES; + max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE; + } else { + max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES; + max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE; + } + + bytes_to_copy = offsetof(struct ipr_sdt, entry) + + (max_num_entries * sizeof(struct ipr_sdt_entry)); rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt, - sizeof(struct ipr_sdt) / sizeof(__be32)); + bytes_to_copy / sizeof(__be32)); /* Smart Dump table is ready to use and the first entry is valid */ if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) && @@ -2964,13 +2980,20 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump) num_entries = be32_to_cpu(sdt->hdr.num_entries_used); - if (num_entries > IPR_NUM_SDT_ENTRIES) - num_entries = IPR_NUM_SDT_ENTRIES; + if (num_entries > max_num_entries) + num_entries = max_num_entries; + + /* Update dump length to the actual data to be copied */ + dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header); + if (ioa_cfg->sis64) + dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry); + else + dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); for (i = 0; i < num_entries; i++) { - if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) { + if (ioa_dump->hdr.len > max_dump_size) { driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS; break; } @@ -2989,7 +3012,7 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump) valid = 0; } if (valid) { - if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) { + if (bytes_to_copy > max_dump_size) { sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY; continue; } @@ -3044,6 +3067,7 @@ static void ipr_release_dump(struct kref *kref) for (i = 0; i < dump->ioa_dump.next_page_index; i++) free_page((unsigned long) dump->ioa_dump.ioa_data[i]); + vfree(dump->ioa_dump.ioa_data); kfree(dump); LEAVE; } @@ -3835,7 +3859,7 @@ static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj, struct ipr_dump *dump; unsigned long lock_flags = 0; char *src; - int len; + int len, sdt_end; size_t rc = count; if (!capable(CAP_SYS_ADMIN)) @@ -3875,9 +3899,17 @@ static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj, off -= sizeof(dump->driver_dump); - if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) { - if (off + count > offsetof(struct ipr_ioa_dump, ioa_data)) - len = offsetof(struct ipr_ioa_dump, ioa_data) - off; + if (ioa_cfg->sis64) + sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) + + (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) * + sizeof(struct ipr_sdt_entry)); + else + sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) + + (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry)); + + if (count && off < sdt_end) { + if (off + count > sdt_end) + len = sdt_end - off; else len = count; src = (u8 *)&dump->ioa_dump + off; @@ -3887,7 +3919,7 @@ static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj, count -= len; } - off -= offsetof(struct ipr_ioa_dump, ioa_data); + off -= sdt_end; while (count) { if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK)) @@ -3916,6 +3948,7 @@ static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj, static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg) { struct ipr_dump *dump; + __be32 **ioa_data; unsigned long lock_flags = 0; dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL); @@ -3925,6 +3958,19 @@ static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg) return -ENOMEM; } + if (ioa_cfg->sis64) + ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *)); + else + ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *)); + + if (!ioa_data) { + ipr_err("Dump memory allocation failed\n"); + kfree(dump); + return -ENOMEM; + } + + dump->ioa_dump.ioa_data = ioa_data; + kref_init(&dump->kref); dump->ioa_cfg = ioa_cfg; @@ -3932,6 +3978,7 @@ static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg) if (INACTIVE != ioa_cfg->sdt_state) { spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + vfree(dump->ioa_dump.ioa_data); kfree(dump); return 0; } @@ -4953,9 +5000,35 @@ static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd) * IRQ_NONE / IRQ_HANDLED **/ static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg, - volatile u32 int_reg) + u32 int_reg) { irqreturn_t rc = IRQ_HANDLED; + u32 int_mask_reg; + + int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32); + int_reg &= ~int_mask_reg; + + /* If an interrupt on the adapter did not occur, ignore it. + * Or in the case of SIS 64, check for a stage change interrupt. + */ + if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) { + if (ioa_cfg->sis64) { + int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); + int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; + if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) { + + /* clear stage change */ + writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg); + int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; + list_del(&ioa_cfg->reset_cmd->queue); + del_timer(&ioa_cfg->reset_cmd->timer); + ipr_reset_ioa_job(ioa_cfg->reset_cmd); + return IRQ_HANDLED; + } + } + + return IRQ_NONE; + } if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { /* Mask the interrupt */ @@ -4968,6 +5041,13 @@ static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg, list_del(&ioa_cfg->reset_cmd->queue); del_timer(&ioa_cfg->reset_cmd->timer); ipr_reset_ioa_job(ioa_cfg->reset_cmd); + } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) { + if (ipr_debug && printk_ratelimit()) + dev_err(&ioa_cfg->pdev->dev, + "Spurious interrupt detected. 0x%08X\n", int_reg); + writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32); + int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); + return IRQ_NONE; } else { if (int_reg & IPR_PCII_IOA_UNIT_CHECKED) ioa_cfg->ioa_unit_checked = 1; @@ -5016,10 +5096,11 @@ static irqreturn_t ipr_isr(int irq, void *devp) { struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp; unsigned long lock_flags = 0; - volatile u32 int_reg, int_mask_reg; + u32 int_reg = 0; u32 ioasc; u16 cmd_index; int num_hrrq = 0; + int irq_none = 0; struct ipr_cmnd *ipr_cmd; irqreturn_t rc = IRQ_NONE; @@ -5031,33 +5112,6 @@ static irqreturn_t ipr_isr(int irq, void *devp) return IRQ_NONE; } - int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32); - int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg; - - /* If an interrupt on the adapter did not occur, ignore it. - * Or in the case of SIS 64, check for a stage change interrupt. - */ - if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) { - if (ioa_cfg->sis64) { - int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); - int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; - if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) { - - /* clear stage change */ - writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg); - int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; - list_del(&ioa_cfg->reset_cmd->queue); - del_timer(&ioa_cfg->reset_cmd->timer); - ipr_reset_ioa_job(ioa_cfg->reset_cmd); - spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); - return IRQ_HANDLED; - } - } - - spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); - return IRQ_NONE; - } - while (1) { ipr_cmd = NULL; @@ -5097,7 +5151,7 @@ static irqreturn_t ipr_isr(int irq, void *devp) /* Clear the PCI interrupt */ do { writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32); - int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg; + int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); } while (int_reg & IPR_PCII_HRRQ_UPDATED && num_hrrq++ < IPR_MAX_HRRQ_RETRIES); @@ -5107,6 +5161,9 @@ static irqreturn_t ipr_isr(int irq, void *devp) return IRQ_HANDLED; } + } else if (rc == IRQ_NONE && irq_none == 0) { + int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); + irq_none++; } else break; } @@ -5143,7 +5200,8 @@ static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg, nseg = scsi_dma_map(scsi_cmd); if (nseg < 0) { - dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n"); + if (printk_ratelimit()) + dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n"); return -1; } @@ -5773,7 +5831,8 @@ static int ipr_queuecommand_lck(struct scsi_cmnd *scsi_cmd, } ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC; - ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST; + if (ipr_is_gscsi(res)) + ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST; ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR; ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd); } @@ -7516,7 +7575,7 @@ static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd) static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; - volatile u32 int_reg; + u32 int_reg; ENTER; ioa_cfg->pdev->state_saved = true; @@ -7555,7 +7614,10 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd) ipr_cmd->job_step = ipr_reset_enable_ioa; if (GET_DUMP == ioa_cfg->sdt_state) { - ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT); + if (ioa_cfg->sis64) + ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT); + else + ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT); ipr_cmd->job_step = ipr_reset_wait_for_dump; schedule_work(&ioa_cfg->work_q); return IPR_RC_JOB_RETURN; diff --git a/trunk/drivers/scsi/ipr.h b/trunk/drivers/scsi/ipr.h index 13f425fb8851..f93f8637c5a1 100644 --- a/trunk/drivers/scsi/ipr.h +++ b/trunk/drivers/scsi/ipr.h @@ -38,8 +38,8 @@ /* * Literals */ -#define IPR_DRIVER_VERSION "2.5.1" -#define IPR_DRIVER_DATE "(August 10, 2010)" +#define IPR_DRIVER_VERSION "2.5.2" +#define IPR_DRIVER_DATE "(April 27, 2011)" /* * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding @@ -217,7 +217,8 @@ #define IPR_CHECK_FOR_RESET_TIMEOUT (HZ / 10) #define IPR_WAIT_FOR_BIST_TIMEOUT (2 * HZ) #define IPR_PCI_RESET_TIMEOUT (HZ / 2) -#define IPR_DUMP_TIMEOUT (15 * HZ) +#define IPR_SIS32_DUMP_TIMEOUT (15 * HZ) +#define IPR_SIS64_DUMP_TIMEOUT (40 * HZ) #define IPR_DUMP_DELAY_SECONDS 4 #define IPR_DUMP_DELAY_TIMEOUT (IPR_DUMP_DELAY_SECONDS * HZ) @@ -285,9 +286,12 @@ IPR_PCII_NO_HOST_RRQ | IPR_PCII_IOARRIN_LOST | IPR_PCII_MMIO_ERROR) /* * Dump literals */ -#define IPR_MAX_IOA_DUMP_SIZE (4 * 1024 * 1024) -#define IPR_NUM_SDT_ENTRIES 511 -#define IPR_MAX_NUM_DUMP_PAGES ((IPR_MAX_IOA_DUMP_SIZE / PAGE_SIZE) + 1) +#define IPR_FMT2_MAX_IOA_DUMP_SIZE (4 * 1024 * 1024) +#define IPR_FMT3_MAX_IOA_DUMP_SIZE (32 * 1024 * 1024) +#define IPR_FMT2_NUM_SDT_ENTRIES 511 +#define IPR_FMT3_NUM_SDT_ENTRIES 0xFFF +#define IPR_FMT2_MAX_NUM_DUMP_PAGES ((IPR_FMT2_MAX_IOA_DUMP_SIZE / PAGE_SIZE) + 1) +#define IPR_FMT3_MAX_NUM_DUMP_PAGES ((IPR_FMT3_MAX_IOA_DUMP_SIZE / PAGE_SIZE) + 1) /* * Misc literals @@ -474,7 +478,7 @@ struct ipr_cmd_pkt { u8 flags_lo; #define IPR_FLAGS_LO_ALIGNED_BFR 0x20 -#define IPR_FLAGS_LO_DELAY_AFTER_RST 0x10 +#define IPR_FLAGS_LO_DELAY_AFTER_RST 0x10 #define IPR_FLAGS_LO_UNTAGGED_TASK 0x00 #define IPR_FLAGS_LO_SIMPLE_TASK 0x02 #define IPR_FLAGS_LO_ORDERED_TASK 0x04 @@ -1164,7 +1168,7 @@ struct ipr_sdt_header { struct ipr_sdt { struct ipr_sdt_header hdr; - struct ipr_sdt_entry entry[IPR_NUM_SDT_ENTRIES]; + struct ipr_sdt_entry entry[IPR_FMT3_NUM_SDT_ENTRIES]; }__attribute__((packed, aligned (4))); struct ipr_uc_sdt { @@ -1608,7 +1612,7 @@ struct ipr_driver_dump { struct ipr_ioa_dump { struct ipr_dump_entry_header hdr; struct ipr_sdt sdt; - __be32 *ioa_data[IPR_MAX_NUM_DUMP_PAGES]; + __be32 **ioa_data; u32 reserved; u32 next_page_index; u32 page_offset; diff --git a/trunk/drivers/scsi/libfc/fc_fcp.c b/trunk/drivers/scsi/libfc/fc_fcp.c index 5b799a37ad09..2a3a4720a771 100644 --- a/trunk/drivers/scsi/libfc/fc_fcp.c +++ b/trunk/drivers/scsi/libfc/fc_fcp.c @@ -57,9 +57,6 @@ static struct kmem_cache *scsi_pkt_cachep; #define FC_SRB_READ (1 << 1) #define FC_SRB_WRITE (1 << 0) -/* constant added to e_d_tov timeout to get rec_tov value */ -#define REC_TOV_CONST 1 - /* * The SCp.ptr should be tested and set under the scsi_pkt_queue lock */ @@ -248,7 +245,7 @@ static inline void fc_fcp_unlock_pkt(struct fc_fcp_pkt *fsp) /** * fc_fcp_timer_set() - Start a timer for a fcp_pkt * @fsp: The FCP packet to start a timer for - * @delay: The timeout period for the timer + * @delay: The timeout period in jiffies */ static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay) { @@ -335,22 +332,23 @@ static void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp) /** * fc_fcp_can_queue_ramp_up() - increases can_queue * @lport: lport to ramp up can_queue - * - * Locking notes: Called with Scsi_Host lock held */ static void fc_fcp_can_queue_ramp_up(struct fc_lport *lport) { struct fc_fcp_internal *si = fc_get_scsi_internal(lport); + unsigned long flags; int can_queue; + spin_lock_irqsave(lport->host->host_lock, flags); + if (si->last_can_queue_ramp_up_time && (time_before(jiffies, si->last_can_queue_ramp_up_time + FC_CAN_QUEUE_PERIOD))) - return; + goto unlock; if (time_before(jiffies, si->last_can_queue_ramp_down_time + FC_CAN_QUEUE_PERIOD)) - return; + goto unlock; si->last_can_queue_ramp_up_time = jiffies; @@ -362,6 +360,9 @@ static void fc_fcp_can_queue_ramp_up(struct fc_lport *lport) lport->host->can_queue = can_queue; shost_printk(KERN_ERR, lport->host, "libfc: increased " "can_queue to %d.\n", can_queue); + +unlock: + spin_unlock_irqrestore(lport->host->host_lock, flags); } /** @@ -373,18 +374,19 @@ static void fc_fcp_can_queue_ramp_up(struct fc_lport *lport) * commands complete or timeout, then try again with a reduced * can_queue. Eventually we will hit the point where we run * on all reserved structs. - * - * Locking notes: Called with Scsi_Host lock held */ static void fc_fcp_can_queue_ramp_down(struct fc_lport *lport) { struct fc_fcp_internal *si = fc_get_scsi_internal(lport); + unsigned long flags; int can_queue; + spin_lock_irqsave(lport->host->host_lock, flags); + if (si->last_can_queue_ramp_down_time && (time_before(jiffies, si->last_can_queue_ramp_down_time + FC_CAN_QUEUE_PERIOD))) - return; + goto unlock; si->last_can_queue_ramp_down_time = jiffies; @@ -395,6 +397,9 @@ static void fc_fcp_can_queue_ramp_down(struct fc_lport *lport) lport->host->can_queue = can_queue; shost_printk(KERN_ERR, lport->host, "libfc: Could not allocate frame.\n" "Reducing can_queue to %d.\n", can_queue); + +unlock: + spin_unlock_irqrestore(lport->host->host_lock, flags); } /* @@ -409,16 +414,13 @@ static inline struct fc_frame *fc_fcp_frame_alloc(struct fc_lport *lport, size_t len) { struct fc_frame *fp; - unsigned long flags; fp = fc_frame_alloc(lport, len); if (likely(fp)) return fp; /* error case */ - spin_lock_irqsave(lport->host->host_lock, flags); fc_fcp_can_queue_ramp_down(lport); - spin_unlock_irqrestore(lport->host->host_lock, flags); return NULL; } @@ -1093,16 +1095,14 @@ static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp) /** * get_fsp_rec_tov() - Helper function to get REC_TOV * @fsp: the FCP packet + * + * Returns rec tov in jiffies as rpriv->e_d_tov + 1 second */ static inline unsigned int get_fsp_rec_tov(struct fc_fcp_pkt *fsp) { - struct fc_rport *rport; - struct fc_rport_libfc_priv *rpriv; - - rport = fsp->rport; - rpriv = rport->dd_data; + struct fc_rport_libfc_priv *rpriv = fsp->rport->dd_data; - return rpriv->e_d_tov + REC_TOV_CONST; + return msecs_to_jiffies(rpriv->e_d_tov) + HZ; } /** @@ -1122,7 +1122,6 @@ static int fc_fcp_cmd_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp, struct fc_rport_libfc_priv *rpriv; const size_t len = sizeof(fsp->cdb_cmd); int rc = 0; - unsigned int rec_tov; if (fc_fcp_lock_pkt(fsp)) return 0; @@ -1153,12 +1152,9 @@ static int fc_fcp_cmd_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp, fsp->seq_ptr = seq; fc_fcp_pkt_hold(fsp); /* hold for fc_fcp_pkt_destroy */ - rec_tov = get_fsp_rec_tov(fsp); - setup_timer(&fsp->timer, fc_fcp_timeout, (unsigned long)fsp); - if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED) - fc_fcp_timer_set(fsp, rec_tov); + fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp)); unlock: fc_fcp_unlock_pkt(fsp); @@ -1235,16 +1231,14 @@ static void fc_lun_reset_send(unsigned long data) { struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data; struct fc_lport *lport = fsp->lp; - unsigned int rec_tov; if (lport->tt.fcp_cmd_send(lport, fsp, fc_tm_done)) { if (fsp->recov_retry++ >= FC_MAX_RECOV_RETRY) return; if (fc_fcp_lock_pkt(fsp)) return; - rec_tov = get_fsp_rec_tov(fsp); setup_timer(&fsp->timer, fc_lun_reset_send, (unsigned long)fsp); - fc_fcp_timer_set(fsp, rec_tov); + fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp)); fc_fcp_unlock_pkt(fsp); } } @@ -1536,12 +1530,11 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) } fc_fcp_srr(fsp, r_ctl, offset); } else if (e_stat & ESB_ST_SEQ_INIT) { - unsigned int rec_tov = get_fsp_rec_tov(fsp); /* * The remote port has the initiative, so just * keep waiting for it to complete. */ - fc_fcp_timer_set(fsp, rec_tov); + fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp)); } else { /* @@ -1705,7 +1698,6 @@ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) { struct fc_fcp_pkt *fsp = arg; struct fc_frame_header *fh; - unsigned int rec_tov; if (IS_ERR(fp)) { fc_fcp_srr_error(fsp, fp); @@ -1732,8 +1724,7 @@ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) switch (fc_frame_payload_op(fp)) { case ELS_LS_ACC: fsp->recov_retry = 0; - rec_tov = get_fsp_rec_tov(fsp); - fc_fcp_timer_set(fsp, rec_tov); + fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp)); break; case ELS_LS_RJT: default: diff --git a/trunk/drivers/scsi/libfc/fc_lport.c b/trunk/drivers/scsi/libfc/fc_lport.c index 906bbcad0e2d..389ab80aef0a 100644 --- a/trunk/drivers/scsi/libfc/fc_lport.c +++ b/trunk/drivers/scsi/libfc/fc_lport.c @@ -1590,7 +1590,6 @@ void fc_lport_enter_flogi(struct fc_lport *lport) */ int fc_lport_config(struct fc_lport *lport) { - INIT_LIST_HEAD(&lport->ema_list); INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout); mutex_init(&lport->lp_mutex); diff --git a/trunk/drivers/scsi/lpfc/lpfc.h b/trunk/drivers/scsi/lpfc/lpfc.h index 60e98a62f308..02d53d89534f 100644 --- a/trunk/drivers/scsi/lpfc/lpfc.h +++ b/trunk/drivers/scsi/lpfc/lpfc.h @@ -805,6 +805,8 @@ struct lpfc_hba { struct dentry *idiag_root; struct dentry *idiag_pci_cfg; struct dentry *idiag_que_info; + struct dentry *idiag_que_acc; + struct dentry *idiag_drb_acc; #endif /* Used for deferred freeing of ELS data buffers */ diff --git a/trunk/drivers/scsi/lpfc/lpfc_bsg.c b/trunk/drivers/scsi/lpfc/lpfc_bsg.c index 77b2871d96b7..37e2a1272f86 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_bsg.c +++ b/trunk/drivers/scsi/lpfc/lpfc_bsg.c @@ -2426,6 +2426,7 @@ lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) { struct bsg_job_data *dd_data; struct fc_bsg_job *job; + struct lpfc_mbx_nembed_cmd *nembed_sge; uint32_t size; unsigned long flags; uint8_t *to; @@ -2469,9 +2470,8 @@ lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) memcpy(to, from, size); } else if ((phba->sli_rev == LPFC_SLI_REV4) && (pmboxq->u.mb.mbxCommand == MBX_SLI4_CONFIG)) { - struct lpfc_mbx_nembed_cmd *nembed_sge = - (struct lpfc_mbx_nembed_cmd *) - &pmboxq->u.mb.un.varWords[0]; + nembed_sge = (struct lpfc_mbx_nembed_cmd *) + &pmboxq->u.mb.un.varWords[0]; from = (uint8_t *)dd_data->context_un.mbox.dmp->dma. virt; @@ -2496,16 +2496,18 @@ lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) job->reply_payload.sg_cnt, from, size); job->reply->result = 0; - + /* need to hold the lock until we set job->dd_data to NULL + * to hold off the timeout handler returning to the mid-layer + * while we are still processing the job. + */ job->dd_data = NULL; + dd_data->context_un.mbox.set_job = NULL; + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); job->job_done(job); + } else { + dd_data->context_un.mbox.set_job = NULL; + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); } - dd_data->context_un.mbox.set_job = NULL; - /* need to hold the lock until we call job done to hold off - * the timeout handler returning to the midlayer while - * we are stillprocessing the job - */ - spin_unlock_irqrestore(&phba->ct_ev_lock, flags); kfree(dd_data->context_un.mbox.mb); mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool); @@ -2644,6 +2646,11 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job, struct ulp_bde64 *rxbpl = NULL; struct dfc_mbox_req *mbox_req = (struct dfc_mbox_req *) job->request->rqst_data.h_vendor.vendor_cmd; + struct READ_EVENT_LOG_VAR *rdEventLog; + uint32_t transmit_length, receive_length, mode; + struct lpfc_mbx_nembed_cmd *nembed_sge; + struct mbox_header *header; + struct ulp_bde64 *bde; uint8_t *ext = NULL; int rc = 0; uint8_t *from; @@ -2651,9 +2658,16 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job, /* in case no data is transferred */ job->reply->reply_payload_rcv_len = 0; + /* sanity check to protect driver */ + if (job->reply_payload.payload_len > BSG_MBOX_SIZE || + job->request_payload.payload_len > BSG_MBOX_SIZE) { + rc = -ERANGE; + goto job_done; + } + /* check if requested extended data lengths are valid */ - if ((mbox_req->inExtWLen > MAILBOX_EXT_SIZE) || - (mbox_req->outExtWLen > MAILBOX_EXT_SIZE)) { + if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) || + (mbox_req->outExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t))) { rc = -ERANGE; goto job_done; } @@ -2744,8 +2758,8 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job, * use ours */ if (pmb->mbxCommand == MBX_RUN_BIU_DIAG64) { - uint32_t transmit_length = pmb->un.varWords[1]; - uint32_t receive_length = pmb->un.varWords[4]; + transmit_length = pmb->un.varWords[1]; + receive_length = pmb->un.varWords[4]; /* transmit length cannot be greater than receive length or * mailbox extension size */ @@ -2795,10 +2809,9 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job, from += sizeof(MAILBOX_t); memcpy((uint8_t *)dmp->dma.virt, from, transmit_length); } else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) { - struct READ_EVENT_LOG_VAR *rdEventLog = - &pmb->un.varRdEventLog ; - uint32_t receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize; - uint32_t mode = bf_get(lpfc_event_log, rdEventLog); + rdEventLog = &pmb->un.varRdEventLog; + receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize; + mode = bf_get(lpfc_event_log, rdEventLog); /* receive length cannot be greater than mailbox * extension size @@ -2843,7 +2856,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job, /* rebuild the command for sli4 using our own buffers * like we do for biu diags */ - uint32_t receive_length = pmb->un.varWords[2]; + receive_length = pmb->un.varWords[2]; /* receive length cannot be greater than mailbox * extension size */ @@ -2879,8 +2892,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job, pmb->un.varWords[4] = putPaddrHigh(dmp->dma.phys); } else if ((pmb->mbxCommand == MBX_UPDATE_CFG) && pmb->un.varUpdateCfg.co) { - struct ulp_bde64 *bde = - (struct ulp_bde64 *)&pmb->un.varWords[4]; + bde = (struct ulp_bde64 *)&pmb->un.varWords[4]; /* bde size cannot be greater than mailbox ext size */ if (bde->tus.f.bdeSize > MAILBOX_EXT_SIZE) { @@ -2921,10 +2933,6 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job, memcpy((uint8_t *)dmp->dma.virt, from, bde->tus.f.bdeSize); } else if (pmb->mbxCommand == MBX_SLI4_CONFIG) { - struct lpfc_mbx_nembed_cmd *nembed_sge; - struct mbox_header *header; - uint32_t receive_length; - /* rebuild the command for sli4 using our own buffers * like we do for biu diags */ @@ -3386,6 +3394,7 @@ lpfc_menlo_cmd(struct fc_bsg_job *job) job->dd_data = NULL; return rc; } + /** * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job * @job: fc_bsg_job to handle diff --git a/trunk/drivers/scsi/lpfc/lpfc_bsg.h b/trunk/drivers/scsi/lpfc/lpfc_bsg.h index a2c33e7c9152..b542aca6f5ae 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_bsg.h +++ b/trunk/drivers/scsi/lpfc/lpfc_bsg.h @@ -109,3 +109,133 @@ struct menlo_response { uint32_t xri; /* return the xri of the iocb exchange */ }; +/* + * macros and data structures for handling sli-config mailbox command + * pass-through support, this header file is shared between user and + * kernel spaces, note the set of macros are duplicates from lpfc_hw4.h, + * with macro names prefixed with bsg_, as the macros defined in + * lpfc_hw4.h are not accessible from user space. + */ + +/* Macros to deal with bit fields. Each bit field must have 3 #defines + * associated with it (_SHIFT, _MASK, and _WORD). + * EG. For a bit field that is in the 7th bit of the "field4" field of a + * structure and is 2 bits in size the following #defines must exist: + * struct temp { + * uint32_t field1; + * uint32_t field2; + * uint32_t field3; + * uint32_t field4; + * #define example_bit_field_SHIFT 7 + * #define example_bit_field_MASK 0x03 + * #define example_bit_field_WORD field4 + * uint32_t field5; + * }; + * Then the macros below may be used to get or set the value of that field. + * EG. To get the value of the bit field from the above example: + * struct temp t1; + * value = bsg_bf_get(example_bit_field, &t1); + * And then to set that bit field: + * bsg_bf_set(example_bit_field, &t1, 2); + * Or clear that bit field: + * bsg_bf_set(example_bit_field, &t1, 0); + */ +#define bsg_bf_get_le32(name, ptr) \ + ((le32_to_cpu((ptr)->name##_WORD) >> name##_SHIFT) & name##_MASK) +#define bsg_bf_get(name, ptr) \ + (((ptr)->name##_WORD >> name##_SHIFT) & name##_MASK) +#define bsg_bf_set_le32(name, ptr, value) \ + ((ptr)->name##_WORD = cpu_to_le32(((((value) & \ + name##_MASK) << name##_SHIFT) | (le32_to_cpu((ptr)->name##_WORD) & \ + ~(name##_MASK << name##_SHIFT))))) +#define bsg_bf_set(name, ptr, value) \ + ((ptr)->name##_WORD = ((((value) & name##_MASK) << name##_SHIFT) | \ + ((ptr)->name##_WORD & ~(name##_MASK << name##_SHIFT)))) + +/* + * The sli_config structure specified here is based on the following + * restriction: + * + * -- SLI_CONFIG EMB=0, carrying MSEs, will carry subcommands without + * carrying HBD. + * -- SLI_CONFIG EMB=1, not carrying MSE, will carry subcommands with or + * without carrying HBDs. + */ + +struct lpfc_sli_config_mse { + uint32_t pa_lo; + uint32_t pa_hi; + uint32_t buf_len; +#define lpfc_mbox_sli_config_mse_len_SHIFT 0 +#define lpfc_mbox_sli_config_mse_len_MASK 0xffffff +#define lpfc_mbox_sli_config_mse_len_WORD buf_len +}; + +struct lpfc_sli_config_subcmd_hbd { + uint32_t buf_len; +#define lpfc_mbox_sli_config_ecmn_hbd_len_SHIFT 0 +#define lpfc_mbox_sli_config_ecmn_hbd_len_MASK 0xffffff +#define lpfc_mbox_sli_config_ecmn_hbd_len_WORD buf_len + uint32_t pa_lo; + uint32_t pa_hi; +}; + +struct lpfc_sli_config_hdr { + uint32_t word1; +#define lpfc_mbox_hdr_emb_SHIFT 0 +#define lpfc_mbox_hdr_emb_MASK 0x00000001 +#define lpfc_mbox_hdr_emb_WORD word1 +#define lpfc_mbox_hdr_mse_cnt_SHIFT 3 +#define lpfc_mbox_hdr_mse_cnt_MASK 0x0000001f +#define lpfc_mbox_hdr_mse_cnt_WORD word1 + uint32_t payload_length; + uint32_t tag_lo; + uint32_t tag_hi; + uint32_t reserved5; +}; + +struct lpfc_sli_config_generic { + struct lpfc_sli_config_hdr sli_config_hdr; +#define LPFC_MBX_SLI_CONFIG_MAX_MSE 19 + struct lpfc_sli_config_mse mse[LPFC_MBX_SLI_CONFIG_MAX_MSE]; +}; + +struct lpfc_sli_config_subcmnd { + struct lpfc_sli_config_hdr sli_config_hdr; + uint32_t word6; +#define lpfc_subcmnd_opcode_SHIFT 0 +#define lpfc_subcmnd_opcode_MASK 0xff +#define lpfc_subcmnd_opcode_WORD word6 +#define lpfc_subcmnd_subsys_SHIFT 8 +#define lpfc_subcmnd_subsys_MASK 0xff +#define lpfc_subcmnd_subsys_WORD word6 + uint32_t timeout; + uint32_t request_length; + uint32_t word9; +#define lpfc_subcmnd_version_SHIFT 0 +#define lpfc_subcmnd_version_MASK 0xff +#define lpfc_subcmnd_version_WORD word9 + uint32_t word10; +#define lpfc_subcmnd_ask_rd_len_SHIFT 0 +#define lpfc_subcmnd_ask_rd_len_MASK 0xffffff +#define lpfc_subcmnd_ask_rd_len_WORD word10 + uint32_t rd_offset; + uint32_t obj_name[26]; + uint32_t hbd_count; +#define LPFC_MBX_SLI_CONFIG_MAX_HBD 10 + struct lpfc_sli_config_subcmd_hbd hbd[LPFC_MBX_SLI_CONFIG_MAX_HBD]; +}; + +struct lpfc_sli_config_mbox { + uint32_t word0; +#define lpfc_mqe_status_SHIFT 16 +#define lpfc_mqe_status_MASK 0x0000FFFF +#define lpfc_mqe_status_WORD word0 +#define lpfc_mqe_command_SHIFT 8 +#define lpfc_mqe_command_MASK 0x000000FF +#define lpfc_mqe_command_WORD word0 + union { + struct lpfc_sli_config_generic sli_config_generic; + struct lpfc_sli_config_subcmnd sli_config_subcmnd; + } un; +}; diff --git a/trunk/drivers/scsi/lpfc/lpfc_debugfs.c b/trunk/drivers/scsi/lpfc/lpfc_debugfs.c index 3d967741c708..c93fca058603 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/trunk/drivers/scsi/lpfc/lpfc_debugfs.c @@ -1119,172 +1119,14 @@ lpfc_debugfs_dumpDataDif_release(struct inode *inode, struct file *file) } /* + * --------------------------------- * iDiag debugfs file access methods - */ - -/* - * iDiag PCI config space register access methods: - * - * The PCI config space register accessees of read, write, read-modify-write - * for set bits, and read-modify-write for clear bits to SLI4 PCI functions - * are provided. In the proper SLI4 PCI function's debugfs iDiag directory, - * - * /sys/kernel/debug/lpfc/fn<#>/iDiag - * - * the access is through the debugfs entry pciCfg: - * - * 1. For PCI config space register read access, there are two read methods: - * A) read a single PCI config space register in the size of a byte - * (8 bits), a word (16 bits), or a dword (32 bits); or B) browse through - * the 4K extended PCI config space. - * - * A) Read a single PCI config space register consists of two steps: - * - * Step-1: Set up PCI config space register read command, the command - * syntax is, - * - * echo 1 > pciCfg - * - * where, 1 is the iDiag command for PCI config space read, is the - * offset from the beginning of the device's PCI config space to read from, - * and is the size of PCI config space register data to read back, - * it will be 1 for reading a byte (8 bits), 2 for reading a word (16 bits - * or 2 bytes), or 4 for reading a dword (32 bits or 4 bytes). - * - * Setp-2: Perform the debugfs read operation to execute the idiag command - * set up in Step-1, - * - * cat pciCfg - * - * Examples: - * To read PCI device's vendor-id and device-id from PCI config space, - * - * echo 1 0 4 > pciCfg - * cat pciCfg - * - * To read PCI device's currnt command from config space, - * - * echo 1 4 2 > pciCfg - * cat pciCfg - * - * B) Browse through the entire 4K extended PCI config space also consists - * of two steps: - * - * Step-1: Set up PCI config space register browsing command, the command - * syntax is, - * - * echo 1 0 4096 > pciCfg - * - * where, 1 is the iDiag command for PCI config space read, 0 must be used - * as the offset for PCI config space register browse, and 4096 must be - * used as the count for PCI config space register browse. - * - * Step-2: Repeately issue the debugfs read operation to browse through - * the entire PCI config space registers: - * - * cat pciCfg - * cat pciCfg - * cat pciCfg - * ... - * - * When browsing to the end of the 4K PCI config space, the browse method - * shall wrap around to start reading from beginning again, and again... - * - * 2. For PCI config space register write access, it supports a single PCI - * config space register write in the size of a byte (8 bits), a word - * (16 bits), or a dword (32 bits). The command syntax is, - * - * echo 2 > pciCfg - * - * where, 2 is the iDiag command for PCI config space write, is - * the offset from the beginning of the device's PCI config space to write - * into, is the size of data to write into the PCI config space, - * it will be 1 for writing a byte (8 bits), 2 for writing a word (16 bits - * or 2 bytes), or 4 for writing a dword (32 bits or 4 bytes), and - * is the data to be written into the PCI config space register at the - * offset. - * - * Examples: - * To disable PCI device's interrupt assertion, - * - * 1) Read in device's PCI config space register command field : - * - * echo 1 4 2 > pciCfg - * cat pciCfg - * - * 2) Set bit 10 (Interrupt Disable bit) in the : - * - * = | (1 < 10) - * - * 3) Write the modified command back: - * - * echo 2 4 2 > pciCfg - * - * 3. For PCI config space register set bits access, it supports a single PCI - * config space register set bits in the size of a byte (8 bits), a word - * (16 bits), or a dword (32 bits). The command syntax is, - * - * echo 3 > pciCfg - * - * where, 3 is the iDiag command for PCI config space set bits, is - * the offset from the beginning of the device's PCI config space to set - * bits into, is the size of the bitmask to set into the PCI config - * space, it will be 1 for setting a byte (8 bits), 2 for setting a word - * (16 bits or 2 bytes), or 4 for setting a dword (32 bits or 4 bytes), and - * is the bitmask, indicating the bits to be set into the PCI - * config space register at the offset. The logic performed to the content - * of the PCI config space register, regval, is, - * - * regval |= - * - * 4. For PCI config space register clear bits access, it supports a single - * PCI config space register clear bits in the size of a byte (8 bits), - * a word (16 bits), or a dword (32 bits). The command syntax is, - * - * echo 4 > pciCfg - * - * where, 4 is the iDiag command for PCI config space clear bits, - * is the offset from the beginning of the device's PCI config space to - * clear bits from, is the size of the bitmask to set into the PCI - * config space, it will be 1 for setting a byte (8 bits), 2 for setting - * a word(16 bits or 2 bytes), or 4 for setting a dword (32 bits or 4 - * bytes), and is the bitmask, indicating the bits to be cleared - * from the PCI config space register at the offset. the logic performed - * to the content of the PCI config space register, regval, is, - * - * regval &= ~ - * - * Note, for all single register read, write, set bits, or clear bits access, - * the offset () must be aligned with the size of the data: - * - * For data size of byte (8 bits), the offset must be aligned to the byte - * boundary; for data size of word (16 bits), the offset must be aligned - * to the word boundary; while for data size of dword (32 bits), the offset - * must be aligned to the dword boundary. Otherwise, the interface will - * return the error: + * --------------------------------- * - * "-bash: echo: write error: Invalid argument". + * All access methods are through the proper SLI4 PCI function's debugfs + * iDiag directory: * - * For example: - * - * echo 1 2 4 > pciCfg - * -bash: echo: write error: Invalid argument - * - * Note also, all of the numbers in the command fields for all read, write, - * set bits, and clear bits PCI config space register command fields can be - * either decimal or hex. - * - * For example, - * echo 1 0 4096 > pciCfg - * - * will be the same as - * echo 1 0 0x1000 > pciCfg - * - * And, - * echo 2 155 1 10 > pciCfg - * - * will be - * echo 2 0x9b 1 0xa > pciCfg + * /sys/kernel/debug/lpfc/fn<#>/iDiag */ /** @@ -1331,10 +1173,10 @@ static int lpfc_idiag_cmd_get(const char __user *buf, size_t nbytes, for (i = 0; i < LPFC_IDIAG_CMD_DATA_SIZE; i++) { step_str = strsep(&pbuf, "\t "); if (!step_str) - return 0; + return i; idiag_cmd->data[i] = simple_strtol(step_str, NULL, 0); } - return 0; + return i; } /** @@ -1403,7 +1245,7 @@ lpfc_idiag_release(struct inode *inode, struct file *file) * Description: * This routine frees the buffer that was allocated when the debugfs file * was opened. It also reset the fields in the idiag command struct in the - * case the command is not continuous browsing of the data structure. + * case of command for write operation. * * Returns: * This function returns zero. @@ -1413,18 +1255,20 @@ lpfc_idiag_cmd_release(struct inode *inode, struct file *file) { struct lpfc_debug *debug = file->private_data; - /* Read PCI config register, if not read all, clear command fields */ - if ((debug->op == LPFC_IDIAG_OP_RD) && - (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_RD)) - if ((idiag.cmd.data[1] == sizeof(uint8_t)) || - (idiag.cmd.data[1] == sizeof(uint16_t)) || - (idiag.cmd.data[1] == sizeof(uint32_t))) + if (debug->op == LPFC_IDIAG_OP_WR) { + switch (idiag.cmd.opcode) { + case LPFC_IDIAG_CMD_PCICFG_WR: + case LPFC_IDIAG_CMD_PCICFG_ST: + case LPFC_IDIAG_CMD_PCICFG_CL: + case LPFC_IDIAG_CMD_QUEACC_WR: + case LPFC_IDIAG_CMD_QUEACC_ST: + case LPFC_IDIAG_CMD_QUEACC_CL: memset(&idiag, 0, sizeof(idiag)); - - /* Write PCI config register, clear command fields */ - if ((debug->op == LPFC_IDIAG_OP_WR) && - (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR)) - memset(&idiag, 0, sizeof(idiag)); + break; + default: + break; + } + } /* Free the buffers to the file operation */ kfree(debug->buffer); @@ -1504,7 +1348,7 @@ lpfc_idiag_pcicfg_read(struct file *file, char __user *buf, size_t nbytes, len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, "%03x: %08x\n", where, u32val); break; - case LPFC_PCI_CFG_SIZE: /* browse all */ + case LPFC_PCI_CFG_BROWSE: /* browse all */ goto pcicfg_browse; break; default: @@ -1586,16 +1430,21 @@ lpfc_idiag_pcicfg_write(struct file *file, const char __user *buf, debug->op = LPFC_IDIAG_OP_WR; rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd); - if (rc) + if (rc < 0) return rc; if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_RD) { + /* Sanity check on PCI config read command line arguments */ + if (rc != LPFC_PCI_CFG_RD_CMD_ARG) + goto error_out; /* Read command from PCI config space, set up command fields */ where = idiag.cmd.data[0]; count = idiag.cmd.data[1]; - if (count == LPFC_PCI_CFG_SIZE) { - if (where != 0) + if (count == LPFC_PCI_CFG_BROWSE) { + if (where % sizeof(uint32_t)) goto error_out; + /* Starting offset to browse */ + idiag.offset.last_rd = where; } else if ((count != sizeof(uint8_t)) && (count != sizeof(uint16_t)) && (count != sizeof(uint32_t))) @@ -1621,6 +1470,9 @@ lpfc_idiag_pcicfg_write(struct file *file, const char __user *buf, } else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR || idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST || idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) { + /* Sanity check on PCI config write command line arguments */ + if (rc != LPFC_PCI_CFG_WR_CMD_ARG) + goto error_out; /* Write command to PCI config space, read-modify-write */ where = idiag.cmd.data[0]; count = idiag.cmd.data[1]; @@ -1753,10 +1605,12 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes, len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "Slow-path EQ information:\n"); len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\tID [%02d], EQE-COUNT [%04d], " - "HOST-INDEX [%04x], PORT-INDEX [%04x]\n\n", + "\tEQID[%02d], " + "QE-COUNT[%04d], QE-SIZE[%04d], " + "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n", phba->sli4_hba.sp_eq->queue_id, phba->sli4_hba.sp_eq->entry_count, + phba->sli4_hba.sp_eq->entry_size, phba->sli4_hba.sp_eq->host_index, phba->sli4_hba.sp_eq->hba_index); @@ -1765,10 +1619,12 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes, "Fast-path EQ information:\n"); for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) { len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\tID [%02d], EQE-COUNT [%04d], " - "HOST-INDEX [%04x], PORT-INDEX [%04x]\n", + "\tEQID[%02d], " + "QE-COUNT[%04d], QE-SIZE[%04d], " + "HOST-INDEX[%04d], PORT-INDEX[%04d]\n", phba->sli4_hba.fp_eq[fcp_qidx]->queue_id, phba->sli4_hba.fp_eq[fcp_qidx]->entry_count, + phba->sli4_hba.fp_eq[fcp_qidx]->entry_size, phba->sli4_hba.fp_eq[fcp_qidx]->host_index, phba->sli4_hba.fp_eq[fcp_qidx]->hba_index); } @@ -1776,89 +1632,101 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes, /* Get mailbox complete queue information */ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, - "Mailbox CQ information:\n"); + "Slow-path MBX CQ information:\n"); len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\t\tAssociated EQ-ID [%02d]:\n", + "Associated EQID[%02d]:\n", phba->sli4_hba.mbx_cq->assoc_qid); len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\tID [%02d], CQE-COUNT [%04d], " - "HOST-INDEX [%04x], PORT-INDEX [%04x]\n\n", + "\tCQID[%02d], " + "QE-COUNT[%04d], QE-SIZE[%04d], " + "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n", phba->sli4_hba.mbx_cq->queue_id, phba->sli4_hba.mbx_cq->entry_count, + phba->sli4_hba.mbx_cq->entry_size, phba->sli4_hba.mbx_cq->host_index, phba->sli4_hba.mbx_cq->hba_index); /* Get slow-path complete queue information */ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, - "Slow-path CQ information:\n"); + "Slow-path ELS CQ information:\n"); len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\t\tAssociated EQ-ID [%02d]:\n", + "Associated EQID[%02d]:\n", phba->sli4_hba.els_cq->assoc_qid); len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\tID [%02d], CQE-COUNT [%04d], " - "HOST-INDEX [%04x], PORT-INDEX [%04x]\n\n", + "\tCQID [%02d], " + "QE-COUNT[%04d], QE-SIZE[%04d], " + "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n", phba->sli4_hba.els_cq->queue_id, phba->sli4_hba.els_cq->entry_count, + phba->sli4_hba.els_cq->entry_size, phba->sli4_hba.els_cq->host_index, phba->sli4_hba.els_cq->hba_index); /* Get fast-path complete queue information */ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, - "Fast-path CQ information:\n"); + "Fast-path FCP CQ information:\n"); for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) { len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\t\tAssociated EQ-ID [%02d]:\n", + "Associated EQID[%02d]:\n", phba->sli4_hba.fcp_cq[fcp_qidx]->assoc_qid); len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\tID [%02d], EQE-COUNT [%04d], " - "HOST-INDEX [%04x], PORT-INDEX [%04x]\n", - phba->sli4_hba.fcp_cq[fcp_qidx]->queue_id, - phba->sli4_hba.fcp_cq[fcp_qidx]->entry_count, - phba->sli4_hba.fcp_cq[fcp_qidx]->host_index, - phba->sli4_hba.fcp_cq[fcp_qidx]->hba_index); + "\tCQID[%02d], " + "QE-COUNT[%04d], QE-SIZE[%04d], " + "HOST-INDEX[%04d], PORT-INDEX[%04d]\n", + phba->sli4_hba.fcp_cq[fcp_qidx]->queue_id, + phba->sli4_hba.fcp_cq[fcp_qidx]->entry_count, + phba->sli4_hba.fcp_cq[fcp_qidx]->entry_size, + phba->sli4_hba.fcp_cq[fcp_qidx]->host_index, + phba->sli4_hba.fcp_cq[fcp_qidx]->hba_index); } len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); /* Get mailbox queue information */ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, - "Mailbox MQ information:\n"); + "Slow-path MBX MQ information:\n"); len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\t\tAssociated CQ-ID [%02d]:\n", + "Associated CQID[%02d]:\n", phba->sli4_hba.mbx_wq->assoc_qid); len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\tID [%02d], MQE-COUNT [%04d], " - "HOST-INDEX [%04x], PORT-INDEX [%04x]\n\n", + "\tWQID[%02d], " + "QE-COUNT[%04d], QE-SIZE[%04d], " + "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n", phba->sli4_hba.mbx_wq->queue_id, phba->sli4_hba.mbx_wq->entry_count, + phba->sli4_hba.mbx_wq->entry_size, phba->sli4_hba.mbx_wq->host_index, phba->sli4_hba.mbx_wq->hba_index); /* Get slow-path work queue information */ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, - "Slow-path WQ information:\n"); + "Slow-path ELS WQ information:\n"); len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\t\tAssociated CQ-ID [%02d]:\n", + "Associated CQID[%02d]:\n", phba->sli4_hba.els_wq->assoc_qid); len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\tID [%02d], WQE-COUNT [%04d], " - "HOST-INDEX [%04x], PORT-INDEX [%04x]\n\n", + "\tWQID[%02d], " + "QE-COUNT[%04d], QE-SIZE[%04d], " + "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n", phba->sli4_hba.els_wq->queue_id, phba->sli4_hba.els_wq->entry_count, + phba->sli4_hba.els_wq->entry_size, phba->sli4_hba.els_wq->host_index, phba->sli4_hba.els_wq->hba_index); /* Get fast-path work queue information */ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, - "Fast-path WQ information:\n"); + "Fast-path FCP WQ information:\n"); for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) { len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\t\tAssociated CQ-ID [%02d]:\n", + "Associated CQID[%02d]:\n", phba->sli4_hba.fcp_wq[fcp_qidx]->assoc_qid); len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\tID [%02d], WQE-COUNT [%04d], " - "HOST-INDEX [%04x], PORT-INDEX [%04x]\n", + "\tWQID[%02d], " + "QE-COUNT[%04d], WQE-SIZE[%04d], " + "HOST-INDEX[%04d], PORT-INDEX[%04d]\n", phba->sli4_hba.fcp_wq[fcp_qidx]->queue_id, phba->sli4_hba.fcp_wq[fcp_qidx]->entry_count, + phba->sli4_hba.fcp_wq[fcp_qidx]->entry_size, phba->sli4_hba.fcp_wq[fcp_qidx]->host_index, phba->sli4_hba.fcp_wq[fcp_qidx]->hba_index); } @@ -1868,26 +1736,597 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes, len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "Slow-path RQ information:\n"); len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\t\tAssociated CQ-ID [%02d]:\n", + "Associated CQID[%02d]:\n", phba->sli4_hba.hdr_rq->assoc_qid); len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\tID [%02d], RHQE-COUNT [%04d], " - "HOST-INDEX [%04x], PORT-INDEX [%04x]\n", + "\tHQID[%02d], " + "QE-COUNT[%04d], QE-SIZE[%04d], " + "HOST-INDEX[%04d], PORT-INDEX[%04d]\n", phba->sli4_hba.hdr_rq->queue_id, phba->sli4_hba.hdr_rq->entry_count, + phba->sli4_hba.hdr_rq->entry_size, phba->sli4_hba.hdr_rq->host_index, phba->sli4_hba.hdr_rq->hba_index); len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, - "\tID [%02d], RDQE-COUNT [%04d], " - "HOST-INDEX [%04x], PORT-INDEX [%04x]\n", + "\tDQID[%02d], " + "QE-COUNT[%04d], QE-SIZE[%04d], " + "HOST-INDEX[%04d], PORT-INDEX[%04d]\n", phba->sli4_hba.dat_rq->queue_id, phba->sli4_hba.dat_rq->entry_count, + phba->sli4_hba.dat_rq->entry_size, phba->sli4_hba.dat_rq->host_index, phba->sli4_hba.dat_rq->hba_index); return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); } +/** + * lpfc_idiag_que_param_check - queue access command parameter sanity check + * @q: The pointer to queue structure. + * @index: The index into a queue entry. + * @count: The number of queue entries to access. + * + * Description: + * The routine performs sanity check on device queue access method commands. + * + * Returns: + * This function returns -EINVAL when fails the sanity check, otherwise, it + * returns 0. + **/ +static int +lpfc_idiag_que_param_check(struct lpfc_queue *q, int index, int count) +{ + /* Only support single entry read or browsing */ + if ((count != 1) && (count != LPFC_QUE_ACC_BROWSE)) + return -EINVAL; + if (index > q->entry_count - 1) + return -EINVAL; + return 0; +} + +/** + * lpfc_idiag_queacc_read_qe - read a single entry from the given queue index + * @pbuffer: The pointer to buffer to copy the read data into. + * @pque: The pointer to the queue to be read. + * @index: The index into the queue entry. + * + * Description: + * This routine reads out a single entry from the given queue's index location + * and copies it into the buffer provided. + * + * Returns: + * This function returns 0 when it fails, otherwise, it returns the length of + * the data read into the buffer provided. + **/ +static int +lpfc_idiag_queacc_read_qe(char *pbuffer, int len, struct lpfc_queue *pque, + uint32_t index) +{ + int offset, esize; + uint32_t *pentry; + + if (!pbuffer || !pque) + return 0; + + esize = pque->entry_size; + len += snprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len, + "QE-INDEX[%04d]:\n", index); + + offset = 0; + pentry = pque->qe[index].address; + while (esize > 0) { + len += snprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len, + "%08x ", *pentry); + pentry++; + offset += sizeof(uint32_t); + esize -= sizeof(uint32_t); + if (esize > 0 && !(offset % (4 * sizeof(uint32_t)))) + len += snprintf(pbuffer+len, + LPFC_QUE_ACC_BUF_SIZE-len, "\n"); + } + len += snprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len, "\n"); + + return len; +} + +/** + * lpfc_idiag_queacc_read - idiag debugfs read port queue + * @file: The file pointer to read from. + * @buf: The buffer to copy the data to. + * @nbytes: The number of bytes to read. + * @ppos: The position in the file to start reading from. + * + * Description: + * This routine reads data from the @phba device queue memory according to the + * idiag command, and copies to user @buf. Depending on the queue dump read + * command setup, it does either a single queue entry read or browing through + * all entries of the queue. + * + * Returns: + * This function returns the amount of data that was read (this could be less + * than @nbytes if the end of the file was reached) or a negative error value. + **/ +static ssize_t +lpfc_idiag_queacc_read(struct file *file, char __user *buf, size_t nbytes, + loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + uint32_t last_index, index, count; + struct lpfc_queue *pque = NULL; + char *pbuffer; + int len = 0; + + /* This is a user read operation */ + debug->op = LPFC_IDIAG_OP_RD; + + if (!debug->buffer) + debug->buffer = kmalloc(LPFC_QUE_ACC_BUF_SIZE, GFP_KERNEL); + if (!debug->buffer) + return 0; + pbuffer = debug->buffer; + + if (*ppos) + return 0; + + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_RD) { + index = idiag.cmd.data[2]; + count = idiag.cmd.data[3]; + pque = (struct lpfc_queue *)idiag.ptr_private; + } else + return 0; + + /* Browse the queue starting from index */ + if (count == LPFC_QUE_ACC_BROWSE) + goto que_browse; + + /* Read a single entry from the queue */ + len = lpfc_idiag_queacc_read_qe(pbuffer, len, pque, index); + + return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); + +que_browse: + + /* Browse all entries from the queue */ + last_index = idiag.offset.last_rd; + index = last_index; + + while (len < LPFC_QUE_ACC_SIZE - pque->entry_size) { + len = lpfc_idiag_queacc_read_qe(pbuffer, len, pque, index); + index++; + if (index > pque->entry_count - 1) + break; + } + + /* Set up the offset for next portion of pci cfg read */ + if (index > pque->entry_count - 1) + index = 0; + idiag.offset.last_rd = index; + + return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); +} + +/** + * lpfc_idiag_queacc_write - Syntax check and set up idiag queacc commands + * @file: The file pointer to read from. + * @buf: The buffer to copy the user data from. + * @nbytes: The number of bytes to get. + * @ppos: The position in the file to start reading from. + * + * This routine get the debugfs idiag command struct from user space and then + * perform the syntax check for port queue read (dump) or write (set) command + * accordingly. In the case of port queue read command, it sets up the command + * in the idiag command struct for the following debugfs read operation. In + * the case of port queue write operation, it executes the write operation + * into the port queue entry accordingly. + * + * It returns the @nbytges passing in from debugfs user space when successful. + * In case of error conditions, it returns proper error code back to the user + * space. + **/ +static ssize_t +lpfc_idiag_queacc_write(struct file *file, const char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; + uint32_t qidx, quetp, queid, index, count, offset, value; + uint32_t *pentry; + struct lpfc_queue *pque; + int rc; + + /* This is a user write operation */ + debug->op = LPFC_IDIAG_OP_WR; + + rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd); + if (rc < 0) + return rc; + + /* Get and sanity check on command feilds */ + quetp = idiag.cmd.data[0]; + queid = idiag.cmd.data[1]; + index = idiag.cmd.data[2]; + count = idiag.cmd.data[3]; + offset = idiag.cmd.data[4]; + value = idiag.cmd.data[5]; + + /* Sanity check on command line arguments */ + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_WR || + idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_ST || + idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_CL) { + if (rc != LPFC_QUE_ACC_WR_CMD_ARG) + goto error_out; + if (count != 1) + goto error_out; + } else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_RD) { + if (rc != LPFC_QUE_ACC_RD_CMD_ARG) + goto error_out; + } else + goto error_out; + + switch (quetp) { + case LPFC_IDIAG_EQ: + /* Slow-path event queue */ + if (phba->sli4_hba.sp_eq->queue_id == queid) { + /* Sanity check */ + rc = lpfc_idiag_que_param_check( + phba->sli4_hba.sp_eq, index, count); + if (rc) + goto error_out; + idiag.ptr_private = phba->sli4_hba.sp_eq; + goto pass_check; + } + /* Fast-path event queue */ + for (qidx = 0; qidx < phba->cfg_fcp_eq_count; qidx++) { + if (phba->sli4_hba.fp_eq[qidx]->queue_id == queid) { + /* Sanity check */ + rc = lpfc_idiag_que_param_check( + phba->sli4_hba.fp_eq[qidx], + index, count); + if (rc) + goto error_out; + idiag.ptr_private = phba->sli4_hba.fp_eq[qidx]; + goto pass_check; + } + } + goto error_out; + break; + case LPFC_IDIAG_CQ: + /* MBX complete queue */ + if (phba->sli4_hba.mbx_cq->queue_id == queid) { + /* Sanity check */ + rc = lpfc_idiag_que_param_check( + phba->sli4_hba.mbx_cq, index, count); + if (rc) + goto error_out; + idiag.ptr_private = phba->sli4_hba.mbx_cq; + goto pass_check; + } + /* ELS complete queue */ + if (phba->sli4_hba.els_cq->queue_id == queid) { + /* Sanity check */ + rc = lpfc_idiag_que_param_check( + phba->sli4_hba.els_cq, index, count); + if (rc) + goto error_out; + idiag.ptr_private = phba->sli4_hba.els_cq; + goto pass_check; + } + /* FCP complete queue */ + for (qidx = 0; qidx < phba->cfg_fcp_eq_count; qidx++) { + if (phba->sli4_hba.fcp_cq[qidx]->queue_id == queid) { + /* Sanity check */ + rc = lpfc_idiag_que_param_check( + phba->sli4_hba.fcp_cq[qidx], + index, count); + if (rc) + goto error_out; + idiag.ptr_private = + phba->sli4_hba.fcp_cq[qidx]; + goto pass_check; + } + } + goto error_out; + break; + case LPFC_IDIAG_MQ: + /* MBX work queue */ + if (phba->sli4_hba.mbx_wq->queue_id == queid) { + /* Sanity check */ + rc = lpfc_idiag_que_param_check( + phba->sli4_hba.mbx_wq, index, count); + if (rc) + goto error_out; + idiag.ptr_private = phba->sli4_hba.mbx_wq; + goto pass_check; + } + break; + case LPFC_IDIAG_WQ: + /* ELS work queue */ + if (phba->sli4_hba.els_wq->queue_id == queid) { + /* Sanity check */ + rc = lpfc_idiag_que_param_check( + phba->sli4_hba.els_wq, index, count); + if (rc) + goto error_out; + idiag.ptr_private = phba->sli4_hba.els_wq; + goto pass_check; + } + /* FCP work queue */ + for (qidx = 0; qidx < phba->cfg_fcp_wq_count; qidx++) { + if (phba->sli4_hba.fcp_wq[qidx]->queue_id == queid) { + /* Sanity check */ + rc = lpfc_idiag_que_param_check( + phba->sli4_hba.fcp_wq[qidx], + index, count); + if (rc) + goto error_out; + idiag.ptr_private = + phba->sli4_hba.fcp_wq[qidx]; + goto pass_check; + } + } + goto error_out; + break; + case LPFC_IDIAG_RQ: + /* HDR queue */ + if (phba->sli4_hba.hdr_rq->queue_id == queid) { + /* Sanity check */ + rc = lpfc_idiag_que_param_check( + phba->sli4_hba.hdr_rq, index, count); + if (rc) + goto error_out; + idiag.ptr_private = phba->sli4_hba.hdr_rq; + goto pass_check; + } + /* DAT queue */ + if (phba->sli4_hba.dat_rq->queue_id == queid) { + /* Sanity check */ + rc = lpfc_idiag_que_param_check( + phba->sli4_hba.dat_rq, index, count); + if (rc) + goto error_out; + idiag.ptr_private = phba->sli4_hba.dat_rq; + goto pass_check; + } + goto error_out; + break; + default: + goto error_out; + break; + } + +pass_check: + + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_RD) { + if (count == LPFC_QUE_ACC_BROWSE) + idiag.offset.last_rd = index; + } + + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_WR || + idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_ST || + idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_CL) { + /* Additional sanity checks on write operation */ + pque = (struct lpfc_queue *)idiag.ptr_private; + if (offset > pque->entry_size/sizeof(uint32_t) - 1) + goto error_out; + pentry = pque->qe[index].address; + pentry += offset; + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_WR) + *pentry = value; + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_ST) + *pentry |= value; + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_CL) + *pentry &= ~value; + } + return nbytes; + +error_out: + /* Clean out command structure on command error out */ + memset(&idiag, 0, sizeof(idiag)); + return -EINVAL; +} + +/** + * lpfc_idiag_drbacc_read_reg - idiag debugfs read a doorbell register + * @phba: The pointer to hba structure. + * @pbuffer: The pointer to the buffer to copy the data to. + * @len: The lenght of bytes to copied. + * @drbregid: The id to doorbell registers. + * + * Description: + * This routine reads a doorbell register and copies its content to the + * user buffer pointed to by @pbuffer. + * + * Returns: + * This function returns the amount of data that was copied into @pbuffer. + **/ +static int +lpfc_idiag_drbacc_read_reg(struct lpfc_hba *phba, char *pbuffer, + int len, uint32_t drbregid) +{ + + if (!pbuffer) + return 0; + + switch (drbregid) { + case LPFC_DRB_EQCQ: + len += snprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len, + "EQCQ-DRB-REG: 0x%08x\n", + readl(phba->sli4_hba.EQCQDBregaddr)); + break; + case LPFC_DRB_MQ: + len += snprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len, + "MQ-DRB-REG: 0x%08x\n", + readl(phba->sli4_hba.MQDBregaddr)); + break; + case LPFC_DRB_WQ: + len += snprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len, + "WQ-DRB-REG: 0x%08x\n", + readl(phba->sli4_hba.WQDBregaddr)); + break; + case LPFC_DRB_RQ: + len += snprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len, + "RQ-DRB-REG: 0x%08x\n", + readl(phba->sli4_hba.RQDBregaddr)); + break; + default: + break; + } + + return len; +} + +/** + * lpfc_idiag_drbacc_read - idiag debugfs read port doorbell + * @file: The file pointer to read from. + * @buf: The buffer to copy the data to. + * @nbytes: The number of bytes to read. + * @ppos: The position in the file to start reading from. + * + * Description: + * This routine reads data from the @phba device doorbell register according + * to the idiag command, and copies to user @buf. Depending on the doorbell + * register read command setup, it does either a single doorbell register + * read or dump all doorbell registers. + * + * Returns: + * This function returns the amount of data that was read (this could be less + * than @nbytes if the end of the file was reached) or a negative error value. + **/ +static ssize_t +lpfc_idiag_drbacc_read(struct file *file, char __user *buf, size_t nbytes, + loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; + uint32_t drb_reg_id, i; + char *pbuffer; + int len = 0; + + /* This is a user read operation */ + debug->op = LPFC_IDIAG_OP_RD; + + if (!debug->buffer) + debug->buffer = kmalloc(LPFC_DRB_ACC_BUF_SIZE, GFP_KERNEL); + if (!debug->buffer) + return 0; + pbuffer = debug->buffer; + + if (*ppos) + return 0; + + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_RD) + drb_reg_id = idiag.cmd.data[0]; + else + return 0; + + if (drb_reg_id == LPFC_DRB_ACC_ALL) + for (i = 1; i <= LPFC_DRB_MAX; i++) + len = lpfc_idiag_drbacc_read_reg(phba, + pbuffer, len, i); + else + len = lpfc_idiag_drbacc_read_reg(phba, + pbuffer, len, drb_reg_id); + + return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); +} + +/** + * lpfc_idiag_drbacc_write - Syntax check and set up idiag drbacc commands + * @file: The file pointer to read from. + * @buf: The buffer to copy the user data from. + * @nbytes: The number of bytes to get. + * @ppos: The position in the file to start reading from. + * + * This routine get the debugfs idiag command struct from user space and then + * perform the syntax check for port doorbell register read (dump) or write + * (set) command accordingly. In the case of port queue read command, it sets + * up the command in the idiag command struct for the following debugfs read + * operation. In the case of port doorbell register write operation, it + * executes the write operation into the port doorbell register accordingly. + * + * It returns the @nbytges passing in from debugfs user space when successful. + * In case of error conditions, it returns proper error code back to the user + * space. + **/ +static ssize_t +lpfc_idiag_drbacc_write(struct file *file, const char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; + uint32_t drb_reg_id, value, reg_val; + void __iomem *drb_reg; + int rc; + + /* This is a user write operation */ + debug->op = LPFC_IDIAG_OP_WR; + + rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd); + if (rc < 0) + return rc; + + /* Sanity check on command line arguments */ + drb_reg_id = idiag.cmd.data[0]; + value = idiag.cmd.data[1]; + + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_WR || + idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_ST || + idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_CL) { + if (rc != LPFC_DRB_ACC_WR_CMD_ARG) + goto error_out; + if (drb_reg_id > LPFC_DRB_MAX) + goto error_out; + } else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_RD) { + if (rc != LPFC_DRB_ACC_RD_CMD_ARG) + goto error_out; + if ((drb_reg_id > LPFC_DRB_MAX) && + (drb_reg_id != LPFC_DRB_ACC_ALL)) + goto error_out; + } else + goto error_out; + + /* Perform the write access operation */ + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_WR || + idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_ST || + idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_CL) { + switch (drb_reg_id) { + case LPFC_DRB_EQCQ: + drb_reg = phba->sli4_hba.EQCQDBregaddr; + break; + case LPFC_DRB_MQ: + drb_reg = phba->sli4_hba.MQDBregaddr; + break; + case LPFC_DRB_WQ: + drb_reg = phba->sli4_hba.WQDBregaddr; + break; + case LPFC_DRB_RQ: + drb_reg = phba->sli4_hba.RQDBregaddr; + break; + default: + goto error_out; + } + + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_WR) + reg_val = value; + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_ST) { + reg_val = readl(drb_reg); + reg_val |= value; + } + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_CL) { + reg_val = readl(drb_reg); + reg_val &= ~value; + } + writel(reg_val, drb_reg); + readl(drb_reg); /* flush */ + } + return nbytes; + +error_out: + /* Clean out command structure on command error out */ + memset(&idiag, 0, sizeof(idiag)); + return -EINVAL; +} + #undef lpfc_debugfs_op_disc_trc static const struct file_operations lpfc_debugfs_op_disc_trc = { .owner = THIS_MODULE, @@ -1986,6 +2425,26 @@ static const struct file_operations lpfc_idiag_op_queInfo = { .release = lpfc_idiag_release, }; +#undef lpfc_idiag_op_queacc +static const struct file_operations lpfc_idiag_op_queAcc = { + .owner = THIS_MODULE, + .open = lpfc_idiag_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_idiag_queacc_read, + .write = lpfc_idiag_queacc_write, + .release = lpfc_idiag_cmd_release, +}; + +#undef lpfc_idiag_op_drbacc +static const struct file_operations lpfc_idiag_op_drbAcc = { + .owner = THIS_MODULE, + .open = lpfc_idiag_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_idiag_drbacc_read, + .write = lpfc_idiag_drbacc_write, + .release = lpfc_idiag_cmd_release, +}; + #endif /** @@ -2261,6 +2720,32 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) } } + /* iDiag access PCI function queue */ + snprintf(name, sizeof(name), "queAcc"); + if (!phba->idiag_que_acc) { + phba->idiag_que_acc = + debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, + phba->idiag_root, phba, &lpfc_idiag_op_queAcc); + if (!phba->idiag_que_acc) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "2926 Can't create idiag debugfs\n"); + goto debug_failed; + } + } + + /* iDiag access PCI function doorbell registers */ + snprintf(name, sizeof(name), "drbAcc"); + if (!phba->idiag_drb_acc) { + phba->idiag_drb_acc = + debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, + phba->idiag_root, phba, &lpfc_idiag_op_drbAcc); + if (!phba->idiag_drb_acc) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "2927 Can't create idiag debugfs\n"); + goto debug_failed; + } + } + debug_failed: return; #endif @@ -2339,6 +2824,16 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport) * iDiag release */ if (phba->sli_rev == LPFC_SLI_REV4) { + if (phba->idiag_drb_acc) { + /* iDiag drbAcc */ + debugfs_remove(phba->idiag_drb_acc); + phba->idiag_drb_acc = NULL; + } + if (phba->idiag_que_acc) { + /* iDiag queAcc */ + debugfs_remove(phba->idiag_que_acc); + phba->idiag_que_acc = NULL; + } if (phba->idiag_que_info) { /* iDiag queInfo */ debugfs_remove(phba->idiag_que_info); diff --git a/trunk/drivers/scsi/lpfc/lpfc_debugfs.h b/trunk/drivers/scsi/lpfc/lpfc_debugfs.h index 91b9a9427cda..6525a5e62d27 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_debugfs.h +++ b/trunk/drivers/scsi/lpfc/lpfc_debugfs.h @@ -39,13 +39,42 @@ /* hbqinfo output buffer size */ #define LPFC_HBQINFO_SIZE 8192 -/* rdPciConf output buffer size */ +/* pciConf */ +#define LPFC_PCI_CFG_BROWSE 0xffff +#define LPFC_PCI_CFG_RD_CMD_ARG 2 +#define LPFC_PCI_CFG_WR_CMD_ARG 3 #define LPFC_PCI_CFG_SIZE 4096 #define LPFC_PCI_CFG_RD_BUF_SIZE (LPFC_PCI_CFG_SIZE/2) #define LPFC_PCI_CFG_RD_SIZE (LPFC_PCI_CFG_SIZE/4) -/* queue info output buffer size */ -#define LPFC_QUE_INFO_GET_BUF_SIZE 2048 +/* queue info */ +#define LPFC_QUE_INFO_GET_BUF_SIZE 4096 + +/* queue acc */ +#define LPFC_QUE_ACC_BROWSE 0xffff +#define LPFC_QUE_ACC_RD_CMD_ARG 4 +#define LPFC_QUE_ACC_WR_CMD_ARG 6 +#define LPFC_QUE_ACC_BUF_SIZE 4096 +#define LPFC_QUE_ACC_SIZE (LPFC_QUE_ACC_BUF_SIZE/2) + +#define LPFC_IDIAG_EQ 1 +#define LPFC_IDIAG_CQ 2 +#define LPFC_IDIAG_MQ 3 +#define LPFC_IDIAG_WQ 4 +#define LPFC_IDIAG_RQ 5 + +/* doorbell acc */ +#define LPFC_DRB_ACC_ALL 0xffff +#define LPFC_DRB_ACC_RD_CMD_ARG 1 +#define LPFC_DRB_ACC_WR_CMD_ARG 2 +#define LPFC_DRB_ACC_BUF_SIZE 256 + +#define LPFC_DRB_EQCQ 1 +#define LPFC_DRB_MQ 2 +#define LPFC_DRB_WQ 3 +#define LPFC_DRB_RQ 4 + +#define LPFC_DRB_MAX 4 #define SIZE_U8 sizeof(uint8_t) #define SIZE_U16 sizeof(uint16_t) @@ -73,13 +102,23 @@ struct lpfc_idiag_offset { uint32_t last_rd; }; -#define LPFC_IDIAG_CMD_DATA_SIZE 4 +#define LPFC_IDIAG_CMD_DATA_SIZE 8 struct lpfc_idiag_cmd { uint32_t opcode; #define LPFC_IDIAG_CMD_PCICFG_RD 0x00000001 #define LPFC_IDIAG_CMD_PCICFG_WR 0x00000002 #define LPFC_IDIAG_CMD_PCICFG_ST 0x00000003 #define LPFC_IDIAG_CMD_PCICFG_CL 0x00000004 + +#define LPFC_IDIAG_CMD_QUEACC_RD 0x00000011 +#define LPFC_IDIAG_CMD_QUEACC_WR 0x00000012 +#define LPFC_IDIAG_CMD_QUEACC_ST 0x00000013 +#define LPFC_IDIAG_CMD_QUEACC_CL 0x00000014 + +#define LPFC_IDIAG_CMD_DRBACC_RD 0x00000021 +#define LPFC_IDIAG_CMD_DRBACC_WR 0x00000022 +#define LPFC_IDIAG_CMD_DRBACC_ST 0x00000023 +#define LPFC_IDIAG_CMD_DRBACC_CL 0x00000024 uint32_t data[LPFC_IDIAG_CMD_DATA_SIZE]; }; @@ -87,6 +126,7 @@ struct lpfc_idiag { uint32_t active; struct lpfc_idiag_cmd cmd; struct lpfc_idiag_offset offset; + void *ptr_private; }; #endif diff --git a/trunk/drivers/scsi/lpfc/lpfc_els.c b/trunk/drivers/scsi/lpfc/lpfc_els.c index d34b69f9cdb1..e2c452467c8b 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_els.c +++ b/trunk/drivers/scsi/lpfc/lpfc_els.c @@ -670,6 +670,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, * Driver needs to re-reg VPI in order for f/w * to update the MAC address. */ + lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); lpfc_register_new_vport(phba, vport, ndlp); return 0; } @@ -869,8 +870,8 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, */ if ((phba->hba_flag & HBA_FIP_SUPPORT) && (phba->fcf.fcf_flag & FCF_DISCOVERY) && - (irsp->ulpStatus != IOSTAT_LOCAL_REJECT) && - (irsp->un.ulpWord[4] != IOERR_SLI_ABORTED)) { + !((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && + (irsp->un.ulpWord[4] == IOERR_SLI_ABORTED))) { lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, "2611 FLOGI failed on FCF (x%x), " "status:x%x/x%x, tmo:x%x, perform " @@ -1085,14 +1086,15 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if (sp->cmn.fcphHigh < FC_PH3) sp->cmn.fcphHigh = FC_PH3; - if ((phba->sli_rev == LPFC_SLI_REV4) && - (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == - LPFC_SLI_INTF_IF_TYPE_0)) { - elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1); - elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1); - /* FLOGI needs to be 3 for WQE FCFI */ - /* Set the fcfi to the fcfi we registered with */ - elsiocb->iocb.ulpContext = phba->fcf.fcfi; + if (phba->sli_rev == LPFC_SLI_REV4) { + if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == + LPFC_SLI_INTF_IF_TYPE_0) { + elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1); + elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1); + /* FLOGI needs to be 3 for WQE FCFI */ + /* Set the fcfi to the fcfi we registered with */ + elsiocb->iocb.ulpContext = phba->fcf.fcfi; + } } else if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { sp->cmn.request_multiple_Nport = 1; /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */ @@ -4107,13 +4109,13 @@ lpfc_els_clear_rrq(struct lpfc_vport *vport, pcmd += sizeof(uint32_t); rrq = (struct RRQ *)pcmd; rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg); - rxid = be16_to_cpu(bf_get(rrq_rxid, rrq)); + rxid = bf_get(rrq_rxid, rrq); lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x" " x%x x%x\n", be32_to_cpu(bf_get(rrq_did, rrq)), - be16_to_cpu(bf_get(rrq_oxid, rrq)), + bf_get(rrq_oxid, rrq), rxid, iocb->iotag, iocb->iocb.ulpContext); @@ -4121,7 +4123,7 @@ lpfc_els_clear_rrq(struct lpfc_vport *vport, "Clear RRQ: did:x%x flg:x%x exchg:x%.08x", ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg); if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq))) - xri = be16_to_cpu(bf_get(rrq_oxid, rrq)); + xri = bf_get(rrq_oxid, rrq); else xri = rxid; prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID); @@ -7290,8 +7292,9 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_vport *vport = cmdiocb->vport; IOCB_t *irsp; struct lpfc_nodelist *ndlp; - ndlp = (struct lpfc_nodelist *)cmdiocb->context1; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + ndlp = (struct lpfc_nodelist *)cmdiocb->context1; irsp = &rspiocb->iocb; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "LOGO npiv cmpl: status:x%x/x%x did:x%x", @@ -7302,6 +7305,19 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Trigger the release of the ndlp after logo */ lpfc_nlp_put(ndlp); + + /* NPIV LOGO completes to NPort */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "2928 NPIV LOGO completes to NPort x%x " + "Data: x%x x%x x%x x%x\n", + ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], + irsp->ulpTimeout, vport->num_disc_nodes); + + if (irsp->ulpStatus == IOSTAT_SUCCESS) { + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_FABRIC; + spin_unlock_irq(shost->host_lock); + } } /** diff --git a/trunk/drivers/scsi/lpfc/lpfc_hbadisc.c b/trunk/drivers/scsi/lpfc/lpfc_hbadisc.c index 301498301a8f..7a35df5e2038 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/trunk/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2009 Emulex. All rights reserved. * + * Copyright (C) 2004-2011 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -3569,6 +3569,10 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) "rport add: did:x%x flg:x%x type x%x", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); + /* Don't add the remote port if unloading. */ + if (vport->load_flag & FC_UNLOADING) + return; + ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids); if (!rport || !get_device(&rport->dev)) { dev_printk(KERN_WARNING, &phba->pcidev->dev, diff --git a/trunk/drivers/scsi/lpfc/lpfc_hw4.h b/trunk/drivers/scsi/lpfc/lpfc_hw4.h index 8433ac0d9fb4..4dff668ebdad 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_hw4.h +++ b/trunk/drivers/scsi/lpfc/lpfc_hw4.h @@ -1059,6 +1059,11 @@ struct rq_context { #define lpfc_rq_context_rqe_size_SHIFT 8 /* Version 1 Only */ #define lpfc_rq_context_rqe_size_MASK 0x0000000F #define lpfc_rq_context_rqe_size_WORD word0 +#define LPFC_RQE_SIZE_8 2 +#define LPFC_RQE_SIZE_16 3 +#define LPFC_RQE_SIZE_32 4 +#define LPFC_RQE_SIZE_64 5 +#define LPFC_RQE_SIZE_128 6 #define lpfc_rq_context_page_size_SHIFT 0 /* Version 1 Only */ #define lpfc_rq_context_page_size_MASK 0x000000FF #define lpfc_rq_context_page_size_WORD word0 @@ -2108,6 +2113,8 @@ struct lpfc_mbx_pc_sli4_params { #define sgl_pp_align_WORD word12 uint32_t rsvd_13_63[51]; }; +#define SLI4_PAGE_ALIGN(addr) (((addr)+((SLI4_PAGE_SIZE)-1)) \ + &(~((SLI4_PAGE_SIZE)-1))) struct lpfc_sli4_parameters { uint32_t word0; @@ -2491,6 +2498,9 @@ struct wqe_common { #define wqe_reqtag_SHIFT 0 #define wqe_reqtag_MASK 0x0000FFFF #define wqe_reqtag_WORD word9 +#define wqe_temp_rpi_SHIFT 16 +#define wqe_temp_rpi_MASK 0x0000FFFF +#define wqe_temp_rpi_WORD word9 #define wqe_rcvoxid_SHIFT 16 #define wqe_rcvoxid_MASK 0x0000FFFF #define wqe_rcvoxid_WORD word9 @@ -2524,7 +2534,7 @@ struct wqe_common { #define wqe_wqes_WORD word10 /* Note that this field overlaps above fields */ #define wqe_wqid_SHIFT 1 -#define wqe_wqid_MASK 0x0000007f +#define wqe_wqid_MASK 0x00007fff #define wqe_wqid_WORD word10 #define wqe_pri_SHIFT 16 #define wqe_pri_MASK 0x00000007 @@ -2621,7 +2631,11 @@ struct xmit_els_rsp64_wqe { uint32_t rsvd4; struct wqe_did wqe_dest; struct wqe_common wqe_com; /* words 6-11 */ - uint32_t rsvd_12_15[4]; + uint32_t word12; +#define wqe_rsp_temp_rpi_SHIFT 0 +#define wqe_rsp_temp_rpi_MASK 0x0000FFFF +#define wqe_rsp_temp_rpi_WORD word12 + uint32_t rsvd_13_15[3]; }; struct xmit_bls_rsp64_wqe { diff --git a/trunk/drivers/scsi/lpfc/lpfc_init.c b/trunk/drivers/scsi/lpfc/lpfc_init.c index 505f88443b5c..7dda036a1af3 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_init.c +++ b/trunk/drivers/scsi/lpfc/lpfc_init.c @@ -3209,9 +3209,9 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba, phba->sli4_hba.link_state.logical_speed = bf_get(lpfc_acqe_logical_link_speed, acqe_link); lpfc_printf_log(phba, KERN_INFO, LOG_SLI, - "2900 Async FCoE Link event - Speed:%dGBit duplex:x%x " - "LA Type:x%x Port Type:%d Port Number:%d Logical " - "speed:%dMbps Fault:%d\n", + "2900 Async FC/FCoE Link event - Speed:%dGBit " + "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d " + "Logical speed:%dMbps Fault:%d\n", phba->sli4_hba.link_state.speed, phba->sli4_hba.link_state.topology, phba->sli4_hba.link_state.status, @@ -4906,6 +4906,7 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) uint16_t rpi_limit, curr_rpi_range; struct lpfc_dmabuf *dmabuf; struct lpfc_rpi_hdr *rpi_hdr; + uint32_t rpi_count; rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base + phba->sli4_hba.max_cfg_param.max_rpi - 1; @@ -4920,7 +4921,9 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) * and to allow the full max_rpi range per port. */ if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit) - return NULL; + rpi_count = rpi_limit - curr_rpi_range; + else + rpi_count = LPFC_RPI_HDR_COUNT; /* * First allocate the protocol header region for the port. The @@ -4961,7 +4964,7 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) * The next_rpi stores the next module-64 rpi value to post * in any subsequent rpi memory region postings. */ - phba->sli4_hba.next_rpi += LPFC_RPI_HDR_COUNT; + phba->sli4_hba.next_rpi += rpi_count; spin_unlock_irq(&phba->hbalock); return rpi_hdr; @@ -7004,7 +7007,8 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) lpfc_sli4_bar0_register_memmap(phba, if_type); } - if (pci_resource_start(pdev, 2)) { + if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && + (pci_resource_start(pdev, 2))) { /* * Map SLI4 if type 0 HBA Control Register base to a kernel * virtual address and setup the registers. @@ -7021,7 +7025,8 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) lpfc_sli4_bar1_register_memmap(phba); } - if (pci_resource_start(pdev, 4)) { + if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && + (pci_resource_start(pdev, 4))) { /* * Map SLI4 if type 0 HBA Doorbell Register base to a kernel * virtual address and setup the registers. diff --git a/trunk/drivers/scsi/lpfc/lpfc_mbox.c b/trunk/drivers/scsi/lpfc/lpfc_mbox.c index fbab9734e9b4..e6ce9033f85e 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_mbox.c +++ b/trunk/drivers/scsi/lpfc/lpfc_mbox.c @@ -1736,7 +1736,7 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox, } /* Setup for the none-embedded mbox command */ - pcount = (PAGE_ALIGN(length))/SLI4_PAGE_SIZE; + pcount = (SLI4_PAGE_ALIGN(length))/SLI4_PAGE_SIZE; pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ? LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount; /* Allocate record for keeping SGE virtual addresses */ diff --git a/trunk/drivers/scsi/lpfc/lpfc_scsi.c b/trunk/drivers/scsi/lpfc/lpfc_scsi.c index fe7cc84e773b..84e4481b2406 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_scsi.c +++ b/trunk/drivers/scsi/lpfc/lpfc_scsi.c @@ -3238,9 +3238,8 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) if (!lpfc_cmd) { lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, "2873 SCSI Layer I/O Abort Request IO CMPL Status " - "x%x ID %d " - "LUN %d snum %#lx\n", ret, cmnd->device->id, - cmnd->device->lun, cmnd->serial_number); + "x%x ID %d LUN %d\n", + ret, cmnd->device->id, cmnd->device->lun); return SUCCESS; } @@ -3318,16 +3317,15 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, "0748 abort handler timed out waiting " "for abort to complete: ret %#x, ID %d, " - "LUN %d, snum %#lx\n", - ret, cmnd->device->id, cmnd->device->lun, - cmnd->serial_number); + "LUN %d\n", + ret, cmnd->device->id, cmnd->device->lun); } out: lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, "0749 SCSI Layer I/O Abort Request Status x%x ID %d " - "LUN %d snum %#lx\n", ret, cmnd->device->id, - cmnd->device->lun, cmnd->serial_number); + "LUN %d\n", ret, cmnd->device->id, + cmnd->device->lun); return ret; } diff --git a/trunk/drivers/scsi/lpfc/lpfc_sli.c b/trunk/drivers/scsi/lpfc/lpfc_sli.c index dacabbe0a586..837d272cb2d6 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_sli.c +++ b/trunk/drivers/scsi/lpfc/lpfc_sli.c @@ -4769,8 +4769,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) else phba->hba_flag &= ~HBA_FIP_SUPPORT; - if (phba->sli_rev != LPFC_SLI_REV4 || - !(phba->hba_flag & HBA_FCOE_MODE)) { + if (phba->sli_rev != LPFC_SLI_REV4) { lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, "0376 READ_REV Error. SLI Level %d " "FCoE enabled %d\n", @@ -5018,10 +5017,11 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) lpfc_reg_fcfi(phba, mboxq); mboxq->vport = phba->pport; rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); - if (rc == MBX_SUCCESS) - rc = 0; - else + if (rc != MBX_SUCCESS) goto out_unset_queue; + rc = 0; + phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, + &mboxq->u.mqe.un.reg_fcfi); } /* * The port is ready, set the host's link state to LINK_DOWN @@ -6402,6 +6402,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, uint32_t els_id = LPFC_ELS_ID_DEFAULT; int numBdes, i; struct ulp_bde64 bde; + struct lpfc_nodelist *ndlp; fip = phba->hba_flag & HBA_FIP_SUPPORT; /* The fcp commands will set command type */ @@ -6447,6 +6448,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, switch (iocbq->iocb.ulpCommand) { case CMD_ELS_REQUEST64_CR: + ndlp = (struct lpfc_nodelist *)iocbq->context1; if (!iocbq->iocb.ulpLe) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "2007 Only Limited Edition cmd Format" @@ -6472,6 +6474,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK) >> LPFC_FIP_ELS_ID_SHIFT); } + bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com, ndlp->nlp_rpi); bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id); bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1); bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ); @@ -6604,6 +6607,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, command_type = OTHER_COMMAND; break; case CMD_XMIT_ELS_RSP64_CX: + ndlp = (struct lpfc_nodelist *)iocbq->context1; /* words0-2 BDE memcpy */ /* word3 iocb=iotag32 wqe=response_payload_len */ wqe->xmit_els_rsp.response_payload_len = xmit_len; @@ -6626,6 +6630,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_LENLOC_WORD3); bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0); + bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp, ndlp->nlp_rpi); command_type = OTHER_COMMAND; break; case CMD_CLOSE_XRI_CN: @@ -10522,8 +10527,8 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, bf_set(lpfc_mbox_hdr_version, &shdr->request, phba->sli4_hba.pc_sli4_params.cqv); if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) { - bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, - (PAGE_SIZE/SLI4_PAGE_SIZE)); + /* FW only supports 1. Should be PAGE_SIZE/SLI4_PAGE_SIZE */ + bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 1); bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context, eq->queue_id); } else { @@ -10967,6 +10972,12 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, &rq_create->u.request.context, hrq->entry_count); rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE; + bf_set(lpfc_rq_context_rqe_size, + &rq_create->u.request.context, + LPFC_RQE_SIZE_8); + bf_set(lpfc_rq_context_page_size, + &rq_create->u.request.context, + (PAGE_SIZE/SLI4_PAGE_SIZE)); } else { switch (hrq->entry_count) { default: @@ -11042,9 +11053,12 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, phba->sli4_hba.pc_sli4_params.rqv); if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { bf_set(lpfc_rq_context_rqe_count_1, - &rq_create->u.request.context, - hrq->entry_count); + &rq_create->u.request.context, hrq->entry_count); rq_create->u.request.context.buffer_size = LPFC_DATA_BUF_SIZE; + bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context, + LPFC_RQE_SIZE_8); + bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context, + (PAGE_SIZE/SLI4_PAGE_SIZE)); } else { switch (drq->entry_count) { default: diff --git a/trunk/drivers/scsi/lpfc/lpfc_version.h b/trunk/drivers/scsi/lpfc/lpfc_version.h index 2404d1d65563..c03921b1232c 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_version.h +++ b/trunk/drivers/scsi/lpfc/lpfc_version.h @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "8.3.22" +#define LPFC_DRIVER_VERSION "8.3.23" #define LPFC_DRIVER_NAME "lpfc" #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" #define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" diff --git a/trunk/drivers/scsi/megaraid.c b/trunk/drivers/scsi/megaraid.c index f2684dd09ed0..5c1776406c96 100644 --- a/trunk/drivers/scsi/megaraid.c +++ b/trunk/drivers/scsi/megaraid.c @@ -1469,8 +1469,8 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status) if( scb->state & SCB_ABORT ) { printk(KERN_WARNING - "megaraid: aborted cmd %lx[%x] complete.\n", - scb->cmd->serial_number, scb->idx); + "megaraid: aborted cmd [%x] complete.\n", + scb->idx); scb->cmd->result = (DID_ABORT << 16); @@ -1488,8 +1488,8 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status) if( scb->state & SCB_RESET ) { printk(KERN_WARNING - "megaraid: reset cmd %lx[%x] complete.\n", - scb->cmd->serial_number, scb->idx); + "megaraid: reset cmd [%x] complete.\n", + scb->idx); scb->cmd->result = (DID_RESET << 16); @@ -1958,8 +1958,8 @@ megaraid_abort_and_reset(adapter_t *adapter, Scsi_Cmnd *cmd, int aor) struct list_head *pos, *next; scb_t *scb; - printk(KERN_WARNING "megaraid: %s-%lx cmd=%x \n", - (aor == SCB_ABORT)? "ABORTING":"RESET", cmd->serial_number, + printk(KERN_WARNING "megaraid: %s cmd=%x \n", + (aor == SCB_ABORT)? "ABORTING":"RESET", cmd->cmnd[0], cmd->device->channel, cmd->device->id, cmd->device->lun); @@ -1983,9 +1983,9 @@ megaraid_abort_and_reset(adapter_t *adapter, Scsi_Cmnd *cmd, int aor) if( scb->state & SCB_ISSUED ) { printk(KERN_WARNING - "megaraid: %s-%lx[%x], fw owner.\n", + "megaraid: %s[%x], fw owner.\n", (aor==SCB_ABORT) ? "ABORTING":"RESET", - cmd->serial_number, scb->idx); + scb->idx); return FALSE; } @@ -1996,9 +1996,9 @@ megaraid_abort_and_reset(adapter_t *adapter, Scsi_Cmnd *cmd, int aor) * list */ printk(KERN_WARNING - "megaraid: %s-%lx[%x], driver owner.\n", + "megaraid: %s-[%x], driver owner.\n", (aor==SCB_ABORT) ? "ABORTING":"RESET", - cmd->serial_number, scb->idx); + scb->idx); mega_free_scb(adapter, scb); diff --git a/trunk/drivers/scsi/megaraid/megaraid_mbox.c b/trunk/drivers/scsi/megaraid/megaraid_mbox.c index 1dba32870b4c..2e6619eff3ea 100644 --- a/trunk/drivers/scsi/megaraid/megaraid_mbox.c +++ b/trunk/drivers/scsi/megaraid/megaraid_mbox.c @@ -2315,8 +2315,8 @@ megaraid_mbox_dpc(unsigned long devp) // Was an abort issued for this command earlier if (scb->state & SCB_ABORT) { con_log(CL_ANN, (KERN_NOTICE - "megaraid: aborted cmd %lx[%x] completed\n", - scp->serial_number, scb->sno)); + "megaraid: aborted cmd [%x] completed\n", + scb->sno)); } /* @@ -2472,8 +2472,8 @@ megaraid_abort_handler(struct scsi_cmnd *scp) raid_dev = ADAP2RAIDDEV(adapter); con_log(CL_ANN, (KERN_WARNING - "megaraid: aborting-%ld cmd=%x \n", - scp->serial_number, scp->cmnd[0], SCP2CHANNEL(scp), + "megaraid: aborting cmd=%x \n", + scp->cmnd[0], SCP2CHANNEL(scp), SCP2TARGET(scp), SCP2LUN(scp))); // If FW has stopped responding, simply return failure @@ -2496,9 +2496,8 @@ megaraid_abort_handler(struct scsi_cmnd *scp) list_del_init(&scb->list); // from completed list con_log(CL_ANN, (KERN_WARNING - "megaraid: %ld:%d[%d:%d], abort from completed list\n", - scp->serial_number, scb->sno, - scb->dev_channel, scb->dev_target)); + "megaraid: %d[%d:%d], abort from completed list\n", + scb->sno, scb->dev_channel, scb->dev_target)); scp->result = (DID_ABORT << 16); scp->scsi_done(scp); @@ -2527,9 +2526,8 @@ megaraid_abort_handler(struct scsi_cmnd *scp) ASSERT(!(scb->state & SCB_ISSUED)); con_log(CL_ANN, (KERN_WARNING - "megaraid abort: %ld[%d:%d], driver owner\n", - scp->serial_number, scb->dev_channel, - scb->dev_target)); + "megaraid abort: [%d:%d], driver owner\n", + scb->dev_channel, scb->dev_target)); scp->result = (DID_ABORT << 16); scp->scsi_done(scp); @@ -2560,25 +2558,21 @@ megaraid_abort_handler(struct scsi_cmnd *scp) if (!(scb->state & SCB_ISSUED)) { con_log(CL_ANN, (KERN_WARNING - "megaraid abort: %ld%d[%d:%d], invalid state\n", - scp->serial_number, scb->sno, scb->dev_channel, - scb->dev_target)); + "megaraid abort: %d[%d:%d], invalid state\n", + scb->sno, scb->dev_channel, scb->dev_target)); BUG(); } else { con_log(CL_ANN, (KERN_WARNING - "megaraid abort: %ld:%d[%d:%d], fw owner\n", - scp->serial_number, scb->sno, scb->dev_channel, - scb->dev_target)); + "megaraid abort: %d[%d:%d], fw owner\n", + scb->sno, scb->dev_channel, scb->dev_target)); } } } spin_unlock_irq(&adapter->lock); if (!found) { - con_log(CL_ANN, (KERN_WARNING - "megaraid abort: scsi cmd:%ld, do now own\n", - scp->serial_number)); + con_log(CL_ANN, (KERN_WARNING "megaraid abort: do now own\n")); // FIXME: Should there be a callback for this command? return SUCCESS; @@ -2649,9 +2643,8 @@ megaraid_reset_handler(struct scsi_cmnd *scp) } else { if (scb->scp == scp) { // Found command con_log(CL_ANN, (KERN_WARNING - "megaraid: %ld:%d[%d:%d], reset from pending list\n", - scp->serial_number, scb->sno, - scb->dev_channel, scb->dev_target)); + "megaraid: %d[%d:%d], reset from pending list\n", + scb->sno, scb->dev_channel, scb->dev_target)); } else { con_log(CL_ANN, (KERN_WARNING "megaraid: IO packet with %d[%d:%d] being reset\n", diff --git a/trunk/drivers/scsi/megaraid/megaraid_sas_base.c b/trunk/drivers/scsi/megaraid/megaraid_sas_base.c index 66d4cea4df98..89c623ebadbc 100644 --- a/trunk/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/trunk/drivers/scsi/megaraid/megaraid_sas_base.c @@ -1751,10 +1751,9 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance) list_del_init(&reset_cmd->list); if (reset_cmd->scmd) { reset_cmd->scmd->result = DID_RESET << 16; - printk(KERN_NOTICE "%d:%p reset [%02x], %#lx\n", + printk(KERN_NOTICE "%d:%p reset [%02x]\n", reset_index, reset_cmd, - reset_cmd->scmd->cmnd[0], - reset_cmd->scmd->serial_number); + reset_cmd->scmd->cmnd[0]); reset_cmd->scmd->scsi_done(reset_cmd->scmd); megasas_return_cmd(instance, reset_cmd); @@ -1879,8 +1878,8 @@ static int megasas_generic_reset(struct scsi_cmnd *scmd) instance = (struct megasas_instance *)scmd->device->host->hostdata; - scmd_printk(KERN_NOTICE, scmd, "megasas: RESET -%ld cmd=%x retries=%x\n", - scmd->serial_number, scmd->cmnd[0], scmd->retries); + scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n", + scmd->cmnd[0], scmd->retries); if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) { printk(KERN_ERR "megasas: cannot recover from previous reset " @@ -2349,9 +2348,9 @@ megasas_issue_pending_cmds_again(struct megasas_instance *instance) cmd->frame_phys_addr , 0, instance->reg_set); } else if (cmd->scmd) { - printk(KERN_NOTICE "megasas: %p scsi cmd [%02x],%#lx" + printk(KERN_NOTICE "megasas: %p scsi cmd [%02x]" "detected on the internal queue, issue again.\n", - cmd, cmd->scmd->cmnd[0], cmd->scmd->serial_number); + cmd, cmd->scmd->cmnd[0]); atomic_inc(&instance->fw_outstanding); instance->instancet->fire_cmd(instance, diff --git a/trunk/drivers/scsi/mesh.c b/trunk/drivers/scsi/mesh.c index 197aa1b3f0f3..494474779532 100644 --- a/trunk/drivers/scsi/mesh.c +++ b/trunk/drivers/scsi/mesh.c @@ -415,8 +415,7 @@ static void mesh_start_cmd(struct mesh_state *ms, struct scsi_cmnd *cmd) #if 1 if (DEBUG_TARGET(cmd)) { int i; - printk(KERN_DEBUG "mesh_start: %p ser=%lu tgt=%d cmd=", - cmd, cmd->serial_number, id); + printk(KERN_DEBUG "mesh_start: %p tgt=%d cmd=", cmd, id); for (i = 0; i < cmd->cmd_len; ++i) printk(" %x", cmd->cmnd[i]); printk(" use_sg=%d buffer=%p bufflen=%u\n", diff --git a/trunk/drivers/scsi/mpt2sas/mpt2sas_base.c b/trunk/drivers/scsi/mpt2sas/mpt2sas_base.c index 3346357031e9..efa0255491c2 100644 --- a/trunk/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/trunk/drivers/scsi/mpt2sas/mpt2sas_base.c @@ -522,7 +522,8 @@ _base_display_event_data(struct MPT2SAS_ADAPTER *ioc, desc = "Device Status Change"; break; case MPI2_EVENT_IR_OPERATION_STATUS: - desc = "IR Operation Status"; + if (!ioc->hide_ir_msg) + desc = "IR Operation Status"; break; case MPI2_EVENT_SAS_DISCOVERY: { @@ -553,16 +554,20 @@ _base_display_event_data(struct MPT2SAS_ADAPTER *ioc, desc = "SAS Enclosure Device Status Change"; break; case MPI2_EVENT_IR_VOLUME: - desc = "IR Volume"; + if (!ioc->hide_ir_msg) + desc = "IR Volume"; break; case MPI2_EVENT_IR_PHYSICAL_DISK: - desc = "IR Physical Disk"; + if (!ioc->hide_ir_msg) + desc = "IR Physical Disk"; break; case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST: - desc = "IR Configuration Change List"; + if (!ioc->hide_ir_msg) + desc = "IR Configuration Change List"; break; case MPI2_EVENT_LOG_ENTRY_ADDED: - desc = "Log Entry Added"; + if (!ioc->hide_ir_msg) + desc = "Log Entry Added"; break; } @@ -616,7 +621,10 @@ _base_sas_log_info(struct MPT2SAS_ADAPTER *ioc , u32 log_info) originator_str = "PL"; break; case 2: - originator_str = "IR"; + if (!ioc->hide_ir_msg) + originator_str = "IR"; + else + originator_str = "WarpDrive"; break; } @@ -1508,6 +1516,7 @@ mpt2sas_base_free_smid(struct MPT2SAS_ADAPTER *ioc, u16 smid) } ioc->scsi_lookup[i].cb_idx = 0xFF; ioc->scsi_lookup[i].scmd = NULL; + ioc->scsi_lookup[i].direct_io = 0; list_add_tail(&ioc->scsi_lookup[i].tracker_list, &ioc->free_list); spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); @@ -1844,10 +1853,12 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc) printk("), "); printk("Capabilities=("); - if (ioc->facts.IOCCapabilities & - MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) { - printk("Raid"); - i++; + if (!ioc->hide_ir_msg) { + if (ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) { + printk("Raid"); + i++; + } } if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) { @@ -3680,6 +3691,7 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) u32 reply_address; u16 smid; struct _tr_list *delayed_tr, *delayed_tr_next; + u8 hide_flag; dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, __func__)); @@ -3706,6 +3718,7 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) ioc->scsi_lookup[i].cb_idx = 0xFF; ioc->scsi_lookup[i].smid = smid; ioc->scsi_lookup[i].scmd = NULL; + ioc->scsi_lookup[i].direct_io = 0; list_add_tail(&ioc->scsi_lookup[i].tracker_list, &ioc->free_list); } @@ -3766,6 +3779,15 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) if (sleep_flag == CAN_SLEEP) _base_static_config_pages(ioc); + if (ioc->wait_for_port_enable_to_complete && ioc->is_warpdrive) { + if (ioc->manu_pg10.OEMIdentifier == 0x80) { + hide_flag = (u8) (ioc->manu_pg10.OEMSpecificFlags0 & + MFG_PAGE10_HIDE_SSDS_MASK); + if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK) + ioc->mfg_pg10_hide_flag = hide_flag; + } + } + if (ioc->wait_for_port_enable_to_complete) { if (diag_buffer_enable != 0) mpt2sas_enable_diag_buffer(ioc, diag_buffer_enable); diff --git a/trunk/drivers/scsi/mpt2sas/mpt2sas_base.h b/trunk/drivers/scsi/mpt2sas/mpt2sas_base.h index 500328245f61..2a3c05f6db8b 100644 --- a/trunk/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/trunk/drivers/scsi/mpt2sas/mpt2sas_base.h @@ -69,11 +69,11 @@ #define MPT2SAS_DRIVER_NAME "mpt2sas" #define MPT2SAS_AUTHOR "LSI Corporation " #define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" -#define MPT2SAS_DRIVER_VERSION "08.100.00.00" +#define MPT2SAS_DRIVER_VERSION "08.100.00.01" #define MPT2SAS_MAJOR_VERSION 08 #define MPT2SAS_MINOR_VERSION 100 #define MPT2SAS_BUILD_VERSION 00 -#define MPT2SAS_RELEASE_VERSION 00 +#define MPT2SAS_RELEASE_VERSION 01 /* * Set MPT2SAS_SG_DEPTH value based on user input. @@ -188,6 +188,16 @@ #define MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID 0x0044 #define MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID 0x0046 +/* + * WarpDrive Specific Log codes + */ + +#define MPT2_WARPDRIVE_LOGENTRY (0x8002) +#define MPT2_WARPDRIVE_LC_SSDT (0x41) +#define MPT2_WARPDRIVE_LC_SSDLW (0x43) +#define MPT2_WARPDRIVE_LC_SSDLF (0x44) +#define MPT2_WARPDRIVE_LC_BRMF (0x4D) + /* * per target private data */ @@ -199,6 +209,7 @@ * struct MPT2SAS_TARGET - starget private hostdata * @starget: starget object * @sas_address: target sas address + * @raid_device: raid_device pointer to access volume data * @handle: device handle * @num_luns: number luns * @flags: MPT_TARGET_FLAGS_XXX flags @@ -208,6 +219,7 @@ struct MPT2SAS_TARGET { struct scsi_target *starget; u64 sas_address; + struct _raid_device *raid_device; u16 handle; int num_luns; u32 flags; @@ -215,6 +227,7 @@ struct MPT2SAS_TARGET { u8 tm_busy; }; + /* * per device private data */ @@ -262,6 +275,12 @@ typedef struct _MPI2_CONFIG_PAGE_MAN_10 { MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_10, Mpi2ManufacturingPage10_t, MPI2_POINTER pMpi2ManufacturingPage10_t; +#define MFG_PAGE10_HIDE_SSDS_MASK (0x00000003) +#define MFG_PAGE10_HIDE_ALL_DISKS (0x00) +#define MFG_PAGE10_EXPOSE_ALL_DISKS (0x01) +#define MFG_PAGE10_HIDE_IF_VOL_PRESENT (0x02) + + struct MPT2SAS_DEVICE { struct MPT2SAS_TARGET *sas_target; unsigned int lun; @@ -341,6 +360,7 @@ struct _sas_device { * @sdev: scsi device struct (volumes are single lun) * @wwid: unique identifier for the volume * @handle: device handle + * @block_size: Block size of the volume * @id: target id * @channel: target channel * @volume_type: the raid level @@ -348,20 +368,33 @@ struct _sas_device { * @num_pds: number of hidden raid components * @responding: used in _scsih_raid_device_mark_responding * @percent_complete: resync percent complete + * @direct_io_enabled: Whether direct io to PDs are allowed or not + * @stripe_exponent: X where 2powX is the stripe sz in blocks + * @max_lba: Maximum number of LBA in the volume + * @stripe_sz: Stripe Size of the volume + * @device_info: Device info of the volume member disk + * @pd_handle: Array of handles of the physical drives for direct I/O in le16 */ +#define MPT_MAX_WARPDRIVE_PDS 8 struct _raid_device { struct list_head list; struct scsi_target *starget; struct scsi_device *sdev; u64 wwid; u16 handle; + u16 block_sz; int id; int channel; u8 volume_type; - u32 device_info; u8 num_pds; u8 responding; u8 percent_complete; + u8 direct_io_enabled; + u8 stripe_exponent; + u64 max_lba; + u32 stripe_sz; + u32 device_info; + u16 pd_handle[MPT_MAX_WARPDRIVE_PDS]; }; /** @@ -470,6 +503,7 @@ struct chain_tracker { * @smid: system message id * @scmd: scsi request pointer * @cb_idx: callback index + * @direct_io: To indicate whether I/O is direct (WARPDRIVE) * @chain_list: list of chains associated to this IO * @tracker_list: list of free request (ioc->free_list) */ @@ -477,14 +511,14 @@ struct scsiio_tracker { u16 smid; struct scsi_cmnd *scmd; u8 cb_idx; + u8 direct_io; struct list_head chain_list; struct list_head tracker_list; }; /** - * struct request_tracker - misc mf request tracker + * struct request_tracker - firmware request tracker * @smid: system message id - * @scmd: scsi request pointer * @cb_idx: callback index * @tracker_list: list of free request (ioc->free_list) */ @@ -832,6 +866,11 @@ struct MPT2SAS_ADAPTER { u32 diagnostic_flags[MPI2_DIAG_BUF_TYPE_COUNT]; u32 ring_buffer_offset; u32 ring_buffer_sz; + u8 is_warpdrive; + u8 hide_ir_msg; + u8 mfg_pg10_hide_flag; + u8 hide_drives; + }; typedef u8 (*MPT_CALLBACK)(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, diff --git a/trunk/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/trunk/drivers/scsi/mpt2sas/mpt2sas_ctl.c index 1c6d2b405eef..437c2d94c45a 100644 --- a/trunk/drivers/scsi/mpt2sas/mpt2sas_ctl.c +++ b/trunk/drivers/scsi/mpt2sas/mpt2sas_ctl.c @@ -688,6 +688,13 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc, goto out; } + /* Check for overflow and wraparound */ + if (karg.data_sge_offset * 4 > ioc->request_sz || + karg.data_sge_offset > (UINT_MAX / 4)) { + ret = -EINVAL; + goto out; + } + /* copy in request message frame from user */ if (copy_from_user(mpi_request, mf, karg.data_sge_offset*4)) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, @@ -1034,7 +1041,10 @@ _ctl_getiocinfo(void __user *arg) __func__)); memset(&karg, 0 , sizeof(karg)); - karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2; + if (ioc->is_warpdrive) + karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2_SSS6200; + else + karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2; if (ioc->pfacts) karg.port_number = ioc->pfacts[0].PortNumber; pci_read_config_byte(ioc->pdev, PCI_CLASS_REVISION, &revision); @@ -1963,7 +1973,7 @@ _ctl_diag_read_buffer(void __user *arg, enum block_state state) Mpi2DiagBufferPostReply_t *mpi_reply; int rc, i; u8 buffer_type; - unsigned long timeleft; + unsigned long timeleft, request_size, copy_size; u16 smid; u16 ioc_status; u8 issue_reset = 0; @@ -1999,6 +2009,8 @@ _ctl_diag_read_buffer(void __user *arg, enum block_state state) return -ENOMEM; } + request_size = ioc->diag_buffer_sz[buffer_type]; + if ((karg.starting_offset % 4) || (karg.bytes_to_read % 4)) { printk(MPT2SAS_ERR_FMT "%s: either the starting_offset " "or bytes_to_read are not 4 byte aligned\n", ioc->name, @@ -2006,13 +2018,23 @@ _ctl_diag_read_buffer(void __user *arg, enum block_state state) return -EINVAL; } + if (karg.starting_offset > request_size) + return -EINVAL; + diag_data = (void *)(request_data + karg.starting_offset); dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: diag_buffer(%p), " "offset(%d), sz(%d)\n", ioc->name, __func__, diag_data, karg.starting_offset, karg.bytes_to_read)); + /* Truncate data on requests that are too large */ + if ((diag_data + karg.bytes_to_read < diag_data) || + (diag_data + karg.bytes_to_read > request_data + request_size)) + copy_size = request_size - karg.starting_offset; + else + copy_size = karg.bytes_to_read; + if (copy_to_user((void __user *)uarg->diagnostic_data, - diag_data, karg.bytes_to_read)) { + diag_data, copy_size)) { printk(MPT2SAS_ERR_FMT "%s: Unable to write " "mpt_diag_read_buffer_t data @ %p\n", ioc->name, __func__, diag_data); diff --git a/trunk/drivers/scsi/mpt2sas/mpt2sas_ctl.h b/trunk/drivers/scsi/mpt2sas/mpt2sas_ctl.h index 69916e46e04f..11ff1d5fb8f0 100644 --- a/trunk/drivers/scsi/mpt2sas/mpt2sas_ctl.h +++ b/trunk/drivers/scsi/mpt2sas/mpt2sas_ctl.h @@ -133,6 +133,7 @@ struct mpt2_ioctl_pci_info { #define MPT2_IOCTL_INTERFACE_FC_IP (0x02) #define MPT2_IOCTL_INTERFACE_SAS (0x03) #define MPT2_IOCTL_INTERFACE_SAS2 (0x04) +#define MPT2_IOCTL_INTERFACE_SAS2_SSS6200 (0x05) #define MPT2_IOCTL_VERSION_LENGTH (32) /** diff --git a/trunk/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/trunk/drivers/scsi/mpt2sas/mpt2sas_scsih.c index d2064a0533ae..f12e02358d6d 100644 --- a/trunk/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/trunk/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -233,6 +233,9 @@ static struct pci_device_id scsih_pci_table[] = { PCI_ANY_ID, PCI_ANY_ID }, { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3, PCI_ANY_ID, PCI_ANY_ID }, + /* SSS6200 */ + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200, + PCI_ANY_ID, PCI_ANY_ID }, {0} /* Terminating entry */ }; MODULE_DEVICE_TABLE(pci, scsih_pci_table); @@ -1256,6 +1259,7 @@ _scsih_target_alloc(struct scsi_target *starget) sas_target_priv_data->handle = raid_device->handle; sas_target_priv_data->sas_address = raid_device->wwid; sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME; + sas_target_priv_data->raid_device = raid_device; raid_device->starget = starget; } spin_unlock_irqrestore(&ioc->raid_device_lock, flags); @@ -1455,7 +1459,10 @@ static int _scsih_is_raid(struct device *dev) { struct scsi_device *sdev = to_scsi_device(dev); + struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host); + if (ioc->is_warpdrive) + return 0; return (sdev->channel == RAID_CHANNEL) ? 1 : 0; } @@ -1480,7 +1487,7 @@ _scsih_get_resync(struct device *dev) sdev->channel); spin_unlock_irqrestore(&ioc->raid_device_lock, flags); - if (!raid_device) + if (!raid_device || ioc->is_warpdrive) goto out; if (mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0, @@ -1640,6 +1647,212 @@ _scsih_get_volume_capabilities(struct MPT2SAS_ADAPTER *ioc, kfree(vol_pg0); } +/** + * _scsih_disable_ddio - Disable direct I/O for all the volumes + * @ioc: per adapter object + */ +static void +_scsih_disable_ddio(struct MPT2SAS_ADAPTER *ioc) +{ + Mpi2RaidVolPage1_t vol_pg1; + Mpi2ConfigReply_t mpi_reply; + struct _raid_device *raid_device; + u16 handle; + u16 ioc_status; + + handle = 0xFFFF; + while (!(mpt2sas_config_get_raid_volume_pg1(ioc, &mpi_reply, + &vol_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) + break; + handle = le16_to_cpu(vol_pg1.DevHandle); + raid_device = _scsih_raid_device_find_by_handle(ioc, handle); + if (raid_device) + raid_device->direct_io_enabled = 0; + } + return; +} + + +/** + * _scsih_get_num_volumes - Get number of volumes in the ioc + * @ioc: per adapter object + */ +static u8 +_scsih_get_num_volumes(struct MPT2SAS_ADAPTER *ioc) +{ + Mpi2RaidVolPage1_t vol_pg1; + Mpi2ConfigReply_t mpi_reply; + u16 handle; + u8 vol_cnt = 0; + u16 ioc_status; + + handle = 0xFFFF; + while (!(mpt2sas_config_get_raid_volume_pg1(ioc, &mpi_reply, + &vol_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) + break; + vol_cnt++; + handle = le16_to_cpu(vol_pg1.DevHandle); + } + return vol_cnt; +} + + +/** + * _scsih_init_warpdrive_properties - Set properties for warpdrive direct I/O. + * @ioc: per adapter object + * @raid_device: the raid_device object + */ +static void +_scsih_init_warpdrive_properties(struct MPT2SAS_ADAPTER *ioc, + struct _raid_device *raid_device) +{ + Mpi2RaidVolPage0_t *vol_pg0; + Mpi2RaidPhysDiskPage0_t pd_pg0; + Mpi2ConfigReply_t mpi_reply; + u16 sz; + u8 num_pds, count; + u64 mb = 1024 * 1024; + u64 tb_2 = 2 * mb * mb; + u64 capacity; + u32 stripe_sz; + u8 i, stripe_exp; + + if (!ioc->is_warpdrive) + return; + + if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS) { + printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled " + "globally as drives are exposed\n", ioc->name); + return; + } + if (_scsih_get_num_volumes(ioc) > 1) { + _scsih_disable_ddio(ioc); + printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled " + "globally as number of drives > 1\n", ioc->name); + return; + } + if ((mpt2sas_config_get_number_pds(ioc, raid_device->handle, + &num_pds)) || !num_pds) { + printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled " + "Failure in computing number of drives\n", ioc->name); + return; + } + + sz = offsetof(Mpi2RaidVolPage0_t, PhysDisk) + (num_pds * + sizeof(Mpi2RaidVol0PhysDisk_t)); + vol_pg0 = kzalloc(sz, GFP_KERNEL); + if (!vol_pg0) { + printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled " + "Memory allocation failure for RVPG0\n", ioc->name); + return; + } + + if ((mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0, + MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) { + printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled " + "Failure in retrieving RVPG0\n", ioc->name); + kfree(vol_pg0); + return; + } + + /* + * WARPDRIVE:If number of physical disks in a volume exceeds the max pds + * assumed for WARPDRIVE, disable direct I/O + */ + if (num_pds > MPT_MAX_WARPDRIVE_PDS) { + printk(MPT2SAS_WARN_FMT "WarpDrive : Direct IO is disabled " + "for the drive with handle(0x%04x): num_mem=%d, " + "max_mem_allowed=%d\n", ioc->name, raid_device->handle, + num_pds, MPT_MAX_WARPDRIVE_PDS); + kfree(vol_pg0); + return; + } + for (count = 0; count < num_pds; count++) { + if (mpt2sas_config_get_phys_disk_pg0(ioc, &mpi_reply, + &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM, + vol_pg0->PhysDisk[count].PhysDiskNum) || + pd_pg0.DevHandle == MPT2SAS_INVALID_DEVICE_HANDLE) { + printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is " + "disabled for the drive with handle(0x%04x) member" + "handle retrieval failed for member number=%d\n", + ioc->name, raid_device->handle, + vol_pg0->PhysDisk[count].PhysDiskNum); + goto out_error; + } + raid_device->pd_handle[count] = le16_to_cpu(pd_pg0.DevHandle); + } + + /* + * Assumption for WD: Direct I/O is not supported if the volume is + * not RAID0, if the stripe size is not 64KB, if the block size is + * not 512 and if the volume size is >2TB + */ + if (raid_device->volume_type != MPI2_RAID_VOL_TYPE_RAID0 || + le16_to_cpu(vol_pg0->BlockSize) != 512) { + printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled " + "for the drive with handle(0x%04x): type=%d, " + "s_sz=%uK, blk_size=%u\n", ioc->name, + raid_device->handle, raid_device->volume_type, + le32_to_cpu(vol_pg0->StripeSize)/2, + le16_to_cpu(vol_pg0->BlockSize)); + goto out_error; + } + + capacity = (u64) le16_to_cpu(vol_pg0->BlockSize) * + (le64_to_cpu(vol_pg0->MaxLBA) + 1); + + if (capacity > tb_2) { + printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled " + "for the drive with handle(0x%04x) since drive sz > 2TB\n", + ioc->name, raid_device->handle); + goto out_error; + } + + stripe_sz = le32_to_cpu(vol_pg0->StripeSize); + stripe_exp = 0; + for (i = 0; i < 32; i++) { + if (stripe_sz & 1) + break; + stripe_exp++; + stripe_sz >>= 1; + } + if (i == 32) { + printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled " + "for the drive with handle(0x%04x) invalid stripe sz %uK\n", + ioc->name, raid_device->handle, + le32_to_cpu(vol_pg0->StripeSize)/2); + goto out_error; + } + raid_device->stripe_exponent = stripe_exp; + raid_device->direct_io_enabled = 1; + + printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is Enabled for the drive" + " with handle(0x%04x)\n", ioc->name, raid_device->handle); + /* + * WARPDRIVE: Though the following fields are not used for direct IO, + * stored for future purpose: + */ + raid_device->max_lba = le64_to_cpu(vol_pg0->MaxLBA); + raid_device->stripe_sz = le32_to_cpu(vol_pg0->StripeSize); + raid_device->block_sz = le16_to_cpu(vol_pg0->BlockSize); + + + kfree(vol_pg0); + return; + +out_error: + raid_device->direct_io_enabled = 0; + for (count = 0; count < num_pds; count++) + raid_device->pd_handle[count] = 0; + kfree(vol_pg0); + return; +} /** * _scsih_enable_tlr - setting TLR flags @@ -1710,6 +1923,11 @@ _scsih_slave_configure(struct scsi_device *sdev) _scsih_get_volume_capabilities(ioc, raid_device); + /* + * WARPDRIVE: Initialize the required data for Direct IO + */ + _scsih_init_warpdrive_properties(ioc, raid_device); + /* RAID Queue Depth Support * IS volume = underlying qdepth of drive type, either * MPT2SAS_SAS_QUEUE_DEPTH or MPT2SAS_SATA_QUEUE_DEPTH @@ -1757,14 +1975,16 @@ _scsih_slave_configure(struct scsi_device *sdev) break; } - sdev_printk(KERN_INFO, sdev, "%s: " - "handle(0x%04x), wwid(0x%016llx), pd_count(%d), type(%s)\n", - r_level, raid_device->handle, - (unsigned long long)raid_device->wwid, - raid_device->num_pds, ds); + if (!ioc->hide_ir_msg) + sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " + "wwid(0x%016llx), pd_count(%d), type(%s)\n", + r_level, raid_device->handle, + (unsigned long long)raid_device->wwid, + raid_device->num_pds, ds); _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT); /* raid transport support */ - _scsih_set_level(sdev, raid_device); + if (!ioc->is_warpdrive) + _scsih_set_level(sdev, raid_device); return 0; } @@ -2133,8 +2353,7 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel, switch (type) { case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK: scmd_lookup = _scsih_scsi_lookup_get(ioc, smid_task); - if (scmd_lookup && (scmd_lookup->serial_number == - scmd->serial_number)) + if (scmd_lookup) rc = FAILED; else rc = SUCCESS; @@ -2182,16 +2401,20 @@ _scsih_tm_display_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd) struct MPT2SAS_TARGET *priv_target = starget->hostdata; struct _sas_device *sas_device = NULL; unsigned long flags; + char *device_str = NULL; if (!priv_target) return; + if (ioc->hide_ir_msg) + device_str = "WarpDrive"; + else + device_str = "volume"; scsi_print_command(scmd); if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) { - starget_printk(KERN_INFO, starget, "volume handle(0x%04x), " - "volume wwid(0x%016llx)\n", - priv_target->handle, - (unsigned long long)priv_target->sas_address); + starget_printk(KERN_INFO, starget, "%s handle(0x%04x), " + "%s wwid(0x%016llx)\n", device_str, priv_target->handle, + device_str, (unsigned long long)priv_target->sas_address); } else { spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, @@ -3130,6 +3353,9 @@ _scsih_check_ir_config_unhide_events(struct MPT2SAS_ADAPTER *ioc, a = 0; b = 0; + if (ioc->is_warpdrive) + return; + /* Volume Resets for Deleted or Removed */ element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; for (i = 0; i < event_data->NumElements; i++, element++) { @@ -3346,6 +3572,105 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status) SAM_STAT_CHECK_CONDITION; } +/** + * _scsih_scsi_direct_io_get - returns direct io flag + * @ioc: per adapter object + * @smid: system request message index + * + * Returns the smid stored scmd pointer. + */ +static inline u8 +_scsih_scsi_direct_io_get(struct MPT2SAS_ADAPTER *ioc, u16 smid) +{ + return ioc->scsi_lookup[smid - 1].direct_io; +} + +/** + * _scsih_scsi_direct_io_set - sets direct io flag + * @ioc: per adapter object + * @smid: system request message index + * @direct_io: Zero or non-zero value to set in the direct_io flag + * + * Returns Nothing. + */ +static inline void +_scsih_scsi_direct_io_set(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 direct_io) +{ + ioc->scsi_lookup[smid - 1].direct_io = direct_io; +} + + +/** + * _scsih_setup_direct_io - setup MPI request for WARPDRIVE Direct I/O + * @ioc: per adapter object + * @scmd: pointer to scsi command object + * @raid_device: pointer to raid device data structure + * @mpi_request: pointer to the SCSI_IO reqest message frame + * @smid: system request message index + * + * Returns nothing + */ +static void +_scsih_setup_direct_io(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, + struct _raid_device *raid_device, Mpi2SCSIIORequest_t *mpi_request, + u16 smid) +{ + u32 v_lba, p_lba, stripe_off, stripe_unit, column, io_size; + u32 stripe_sz, stripe_exp; + u8 num_pds, *cdb_ptr, *tmp_ptr, *lba_ptr1, *lba_ptr2; + u8 cdb0 = scmd->cmnd[0]; + + /* + * Try Direct I/O to RAID memeber disks + */ + if (cdb0 == READ_16 || cdb0 == READ_10 || + cdb0 == WRITE_16 || cdb0 == WRITE_10) { + cdb_ptr = mpi_request->CDB.CDB32; + + if ((cdb0 < READ_16) || !(cdb_ptr[2] | cdb_ptr[3] | cdb_ptr[4] + | cdb_ptr[5])) { + io_size = scsi_bufflen(scmd) >> 9; + /* get virtual lba */ + lba_ptr1 = lba_ptr2 = (cdb0 < READ_16) ? &cdb_ptr[2] : + &cdb_ptr[6]; + tmp_ptr = (u8 *)&v_lba + 3; + *tmp_ptr-- = *lba_ptr1++; + *tmp_ptr-- = *lba_ptr1++; + *tmp_ptr-- = *lba_ptr1++; + *tmp_ptr = *lba_ptr1; + + if (((u64)v_lba + (u64)io_size - 1) <= + (u32)raid_device->max_lba) { + stripe_sz = raid_device->stripe_sz; + stripe_exp = raid_device->stripe_exponent; + stripe_off = v_lba & (stripe_sz - 1); + + /* Check whether IO falls within a stripe */ + if ((stripe_off + io_size) <= stripe_sz) { + num_pds = raid_device->num_pds; + p_lba = v_lba >> stripe_exp; + stripe_unit = p_lba / num_pds; + column = p_lba % num_pds; + p_lba = (stripe_unit << stripe_exp) + + stripe_off; + mpi_request->DevHandle = + cpu_to_le16(raid_device-> + pd_handle[column]); + tmp_ptr = (u8 *)&p_lba + 3; + *lba_ptr2++ = *tmp_ptr--; + *lba_ptr2++ = *tmp_ptr--; + *lba_ptr2++ = *tmp_ptr--; + *lba_ptr2 = *tmp_ptr; + /* + * WD: To indicate this I/O is directI/O + */ + _scsih_scsi_direct_io_set(ioc, smid, 1); + } + } + } + } +} + /** * _scsih_qcmd - main scsi request entry point * @scmd: pointer to scsi command object @@ -3363,6 +3688,7 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); struct MPT2SAS_DEVICE *sas_device_priv_data; struct MPT2SAS_TARGET *sas_target_priv_data; + struct _raid_device *raid_device; Mpi2SCSIIORequest_t *mpi_request; u32 mpi_control; u16 smid; @@ -3424,8 +3750,10 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) } else mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; - /* Make sure Device is not raid volume */ - if (!_scsih_is_raid(&scmd->device->sdev_gendev) && + /* Make sure Device is not raid volume. + * We do not expose raid functionality to upper layer for warpdrive. + */ + if (!ioc->is_warpdrive && !_scsih_is_raid(&scmd->device->sdev_gendev) && sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32) mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON; @@ -3473,9 +3801,14 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) } } + raid_device = sas_target_priv_data->raid_device; + if (raid_device && raid_device->direct_io_enabled) + _scsih_setup_direct_io(ioc, scmd, raid_device, mpi_request, + smid); + if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) mpt2sas_base_put_smid_scsi_io(ioc, smid, - sas_device_priv_data->sas_target->handle); + le16_to_cpu(mpi_request->DevHandle)); else mpt2sas_base_put_smid_default(ioc, smid); return 0; @@ -3540,10 +3873,16 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, unsigned long flags; struct scsi_target *starget = scmd->device->sdev_target; struct MPT2SAS_TARGET *priv_target = starget->hostdata; + char *device_str = NULL; if (!priv_target) return; + if (ioc->hide_ir_msg) + device_str = "WarpDrive"; + else + device_str = "volume"; + if (log_info == 0x31170000) return; @@ -3660,8 +3999,8 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, scsi_print_command(scmd); if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) { - printk(MPT2SAS_WARN_FMT "\tvolume wwid(0x%016llx)\n", ioc->name, - (unsigned long long)priv_target->sas_address); + printk(MPT2SAS_WARN_FMT "\t%s wwid(0x%016llx)\n", ioc->name, + device_str, (unsigned long long)priv_target->sas_address); } else { spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, @@ -3840,6 +4179,20 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) scmd->result = DID_NO_CONNECT << 16; goto out; } + /* + * WARPDRIVE: If direct_io is set then it is directIO, + * the failed direct I/O should be redirected to volume + */ + if (_scsih_scsi_direct_io_get(ioc, smid)) { + _scsih_scsi_direct_io_set(ioc, smid, 0); + memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len); + mpi_request->DevHandle = + cpu_to_le16(sas_device_priv_data->sas_target->handle); + mpt2sas_base_put_smid_scsi_io(ioc, smid, + sas_device_priv_data->sas_target->handle); + return 0; + } + /* turning off TLR */ scsi_state = mpi_reply->SCSIState; @@ -3848,7 +4201,10 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF; if (!sas_device_priv_data->tlr_snoop_check) { sas_device_priv_data->tlr_snoop_check++; - if (!_scsih_is_raid(&scmd->device->sdev_gendev) && + /* Make sure Device is not raid volume. + * We do not expose raid functionality to upper layer for warpdrive. + */ + if (!ioc->is_warpdrive && !_scsih_is_raid(&scmd->device->sdev_gendev) && sas_is_tlr_enabled(scmd->device) && response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) { sas_disable_tlr(scmd->device); @@ -4681,8 +5037,10 @@ _scsih_remove_device(struct MPT2SAS_ADAPTER *ioc, _scsih_ublock_io_device(ioc, sas_device_backup.handle); - mpt2sas_transport_port_remove(ioc, sas_device_backup.sas_address, - sas_device_backup.sas_address_parent); + if (!ioc->hide_drives) + mpt2sas_transport_port_remove(ioc, + sas_device_backup.sas_address, + sas_device_backup.sas_address_parent); printk(MPT2SAS_INFO_FMT "removing handle(0x%04x), sas_addr" "(0x%016llx)\n", ioc->name, sas_device_backup.handle, @@ -5413,6 +5771,7 @@ _scsih_sas_pd_hide(struct MPT2SAS_ADAPTER *ioc, &sas_device->volume_wwid); set_bit(handle, ioc->pd_handles); _scsih_reprobe_target(sas_device->starget, 1); + } /** @@ -5591,7 +5950,8 @@ _scsih_sas_ir_config_change_event(struct MPT2SAS_ADAPTER *ioc, Mpi2EventDataIrConfigChangeList_t *event_data = fw_event->event_data; #ifdef CONFIG_SCSI_MPT2SAS_LOGGING - if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) + if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) + && !ioc->hide_ir_msg) _scsih_sas_ir_config_change_event_debug(ioc, event_data); #endif @@ -5614,16 +5974,20 @@ _scsih_sas_ir_config_change_event(struct MPT2SAS_ADAPTER *ioc, le16_to_cpu(element->VolDevHandle)); break; case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED: - _scsih_sas_pd_hide(ioc, element); + if (!ioc->is_warpdrive) + _scsih_sas_pd_hide(ioc, element); break; case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED: - _scsih_sas_pd_expose(ioc, element); + if (!ioc->is_warpdrive) + _scsih_sas_pd_expose(ioc, element); break; case MPI2_EVENT_IR_CHANGE_RC_HIDE: - _scsih_sas_pd_add(ioc, element); + if (!ioc->is_warpdrive) + _scsih_sas_pd_add(ioc, element); break; case MPI2_EVENT_IR_CHANGE_RC_UNHIDE: - _scsih_sas_pd_delete(ioc, element); + if (!ioc->is_warpdrive) + _scsih_sas_pd_delete(ioc, element); break; } } @@ -5654,9 +6018,10 @@ _scsih_sas_ir_volume_event(struct MPT2SAS_ADAPTER *ioc, handle = le16_to_cpu(event_data->VolDevHandle); state = le32_to_cpu(event_data->NewValue); - dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: handle(0x%04x), " - "old(0x%08x), new(0x%08x)\n", ioc->name, __func__, handle, - le32_to_cpu(event_data->PreviousValue), state)); + if (!ioc->hide_ir_msg) + dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: handle(0x%04x), " + "old(0x%08x), new(0x%08x)\n", ioc->name, __func__, handle, + le32_to_cpu(event_data->PreviousValue), state)); switch (state) { case MPI2_RAID_VOL_STATE_MISSING: @@ -5736,9 +6101,10 @@ _scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc, handle = le16_to_cpu(event_data->PhysDiskDevHandle); state = le32_to_cpu(event_data->NewValue); - dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: handle(0x%04x), " - "old(0x%08x), new(0x%08x)\n", ioc->name, __func__, handle, - le32_to_cpu(event_data->PreviousValue), state)); + if (!ioc->hide_ir_msg) + dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: handle(0x%04x), " + "old(0x%08x), new(0x%08x)\n", ioc->name, __func__, handle, + le32_to_cpu(event_data->PreviousValue), state)); switch (state) { case MPI2_RAID_PD_STATE_ONLINE: @@ -5747,7 +6113,8 @@ _scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc, case MPI2_RAID_PD_STATE_OPTIMAL: case MPI2_RAID_PD_STATE_HOT_SPARE: - set_bit(handle, ioc->pd_handles); + if (!ioc->is_warpdrive) + set_bit(handle, ioc->pd_handles); spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = _scsih_sas_device_find_by_handle(ioc, handle); @@ -5851,7 +6218,8 @@ _scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc, u16 handle; #ifdef CONFIG_SCSI_MPT2SAS_LOGGING - if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) + if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) + && !ioc->hide_ir_msg) _scsih_sas_ir_operation_status_event_debug(ioc, event_data); #endif @@ -5910,7 +6278,7 @@ static void _scsih_mark_responding_sas_device(struct MPT2SAS_ADAPTER *ioc, u64 sas_address, u16 slot, u16 handle) { - struct MPT2SAS_TARGET *sas_target_priv_data; + struct MPT2SAS_TARGET *sas_target_priv_data = NULL; struct scsi_target *starget; struct _sas_device *sas_device; unsigned long flags; @@ -5918,7 +6286,7 @@ _scsih_mark_responding_sas_device(struct MPT2SAS_ADAPTER *ioc, u64 sas_address, spin_lock_irqsave(&ioc->sas_device_lock, flags); list_for_each_entry(sas_device, &ioc->sas_device_list, list) { if (sas_device->sas_address == sas_address && - sas_device->slot == slot && sas_device->starget) { + sas_device->slot == slot) { sas_device->responding = 1; starget = sas_device->starget; if (starget && starget->hostdata) { @@ -5927,13 +6295,15 @@ _scsih_mark_responding_sas_device(struct MPT2SAS_ADAPTER *ioc, u64 sas_address, sas_target_priv_data->deleted = 0; } else sas_target_priv_data = NULL; - starget_printk(KERN_INFO, sas_device->starget, - "handle(0x%04x), sas_addr(0x%016llx), enclosure " - "logical id(0x%016llx), slot(%d)\n", handle, - (unsigned long long)sas_device->sas_address, - (unsigned long long) - sas_device->enclosure_logical_id, - sas_device->slot); + if (starget) + starget_printk(KERN_INFO, starget, + "handle(0x%04x), sas_addr(0x%016llx), " + "enclosure logical id(0x%016llx), " + "slot(%d)\n", handle, + (unsigned long long)sas_device->sas_address, + (unsigned long long) + sas_device->enclosure_logical_id, + sas_device->slot); if (sas_device->handle == handle) goto out; printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n", @@ -6025,6 +6395,12 @@ _scsih_mark_responding_raid_device(struct MPT2SAS_ADAPTER *ioc, u64 wwid, starget_printk(KERN_INFO, raid_device->starget, "handle(0x%04x), wwid(0x%016llx)\n", handle, (unsigned long long)raid_device->wwid); + /* + * WARPDRIVE: The handles of the PDs might have changed + * across the host reset so re-initialize the + * required data for Direct IO + */ + _scsih_init_warpdrive_properties(ioc, raid_device); if (raid_device->handle == handle) goto out; printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n", @@ -6086,18 +6462,20 @@ _scsih_search_responding_raid_devices(struct MPT2SAS_ADAPTER *ioc) } /* refresh the pd_handles */ - phys_disk_num = 0xFF; - memset(ioc->pd_handles, 0, ioc->pd_handles_sz); - while (!(mpt2sas_config_get_phys_disk_pg0(ioc, &mpi_reply, - &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM, - phys_disk_num))) { - ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & - MPI2_IOCSTATUS_MASK; - if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) - break; - phys_disk_num = pd_pg0.PhysDiskNum; - handle = le16_to_cpu(pd_pg0.DevHandle); - set_bit(handle, ioc->pd_handles); + if (!ioc->is_warpdrive) { + phys_disk_num = 0xFF; + memset(ioc->pd_handles, 0, ioc->pd_handles_sz); + while (!(mpt2sas_config_get_phys_disk_pg0(ioc, &mpi_reply, + &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM, + phys_disk_num))) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) + break; + phys_disk_num = pd_pg0.PhysDiskNum; + handle = le16_to_cpu(pd_pg0.DevHandle); + set_bit(handle, ioc->pd_handles); + } } } @@ -6242,6 +6620,50 @@ _scsih_remove_unresponding_sas_devices(struct MPT2SAS_ADAPTER *ioc) } } +/** + * _scsih_hide_unhide_sas_devices - add/remove device to/from OS + * @ioc: per adapter object + * + * Return nothing. + */ +static void +_scsih_hide_unhide_sas_devices(struct MPT2SAS_ADAPTER *ioc) +{ + struct _sas_device *sas_device, *sas_device_next; + + if (!ioc->is_warpdrive || ioc->mfg_pg10_hide_flag != + MFG_PAGE10_HIDE_IF_VOL_PRESENT) + return; + + if (ioc->hide_drives) { + if (_scsih_get_num_volumes(ioc)) + return; + ioc->hide_drives = 0; + list_for_each_entry_safe(sas_device, sas_device_next, + &ioc->sas_device_list, list) { + if (!mpt2sas_transport_port_add(ioc, sas_device->handle, + sas_device->sas_address_parent)) { + _scsih_sas_device_remove(ioc, sas_device); + } else if (!sas_device->starget) { + mpt2sas_transport_port_remove(ioc, + sas_device->sas_address, + sas_device->sas_address_parent); + _scsih_sas_device_remove(ioc, sas_device); + } + } + } else { + if (!_scsih_get_num_volumes(ioc)) + return; + ioc->hide_drives = 1; + list_for_each_entry_safe(sas_device, sas_device_next, + &ioc->sas_device_list, list) { + mpt2sas_transport_port_remove(ioc, + sas_device->sas_address, + sas_device->sas_address_parent); + } + } +} + /** * mpt2sas_scsih_reset_handler - reset callback handler (for scsih) * @ioc: per adapter object @@ -6326,6 +6748,7 @@ _firmware_event_work(struct work_struct *work) spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); _scsih_remove_unresponding_sas_devices(ioc); + _scsih_hide_unhide_sas_devices(ioc); return; } @@ -6425,6 +6848,53 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, (Mpi2EventDataIrVolume_t *) mpi_reply->EventData); break; + case MPI2_EVENT_LOG_ENTRY_ADDED: + { + Mpi2EventDataLogEntryAdded_t *log_entry; + u32 *log_code; + + if (!ioc->is_warpdrive) + break; + + log_entry = (Mpi2EventDataLogEntryAdded_t *) + mpi_reply->EventData; + log_code = (u32 *)log_entry->LogData; + + if (le16_to_cpu(log_entry->LogEntryQualifier) + != MPT2_WARPDRIVE_LOGENTRY) + break; + + switch (le32_to_cpu(*log_code)) { + case MPT2_WARPDRIVE_LC_SSDT: + printk(MPT2SAS_WARN_FMT "WarpDrive Warning: " + "IO Throttling has occurred in the WarpDrive " + "subsystem. Check WarpDrive documentation for " + "additional details.\n", ioc->name); + break; + case MPT2_WARPDRIVE_LC_SSDLW: + printk(MPT2SAS_WARN_FMT "WarpDrive Warning: " + "Program/Erase Cycles for the WarpDrive subsystem " + "in degraded range. Check WarpDrive documentation " + "for additional details.\n", ioc->name); + break; + case MPT2_WARPDRIVE_LC_SSDLF: + printk(MPT2SAS_ERR_FMT "WarpDrive Fatal Error: " + "There are no Program/Erase Cycles for the " + "WarpDrive subsystem. The storage device will be " + "in read-only mode. Check WarpDrive documentation " + "for additional details.\n", ioc->name); + break; + case MPT2_WARPDRIVE_LC_BRMF: + printk(MPT2SAS_ERR_FMT "WarpDrive Fatal Error: " + "The Backup Rail Monitor has failed on the " + "WarpDrive subsystem. Check WarpDrive " + "documentation for additional details.\n", + ioc->name); + break; + } + + break; + } case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: case MPI2_EVENT_IR_OPERATION_STATUS: case MPI2_EVENT_SAS_DISCOVERY: @@ -6583,7 +7053,8 @@ _scsih_ir_shutdown(struct MPT2SAS_ADAPTER *ioc) mpi_request->Function = MPI2_FUNCTION_RAID_ACTION; mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED; - printk(MPT2SAS_INFO_FMT "IR shutdown (sending)\n", ioc->name); + if (!ioc->hide_ir_msg) + printk(MPT2SAS_INFO_FMT "IR shutdown (sending)\n", ioc->name); init_completion(&ioc->scsih_cmds.done); mpt2sas_base_put_smid_default(ioc, smid); wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ); @@ -6597,10 +7068,11 @@ _scsih_ir_shutdown(struct MPT2SAS_ADAPTER *ioc) if (ioc->scsih_cmds.status & MPT2_CMD_REPLY_VALID) { mpi_reply = ioc->scsih_cmds.reply; - printk(MPT2SAS_INFO_FMT "IR shutdown (complete): " - "ioc_status(0x%04x), loginfo(0x%08x)\n", - ioc->name, le16_to_cpu(mpi_reply->IOCStatus), - le32_to_cpu(mpi_reply->IOCLogInfo)); + if (!ioc->hide_ir_msg) + printk(MPT2SAS_INFO_FMT "IR shutdown (complete): " + "ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo)); } out: @@ -6759,6 +7231,9 @@ _scsih_probe_boot_devices(struct MPT2SAS_ADAPTER *ioc) spin_lock_irqsave(&ioc->sas_device_lock, flags); list_move_tail(&sas_device->list, &ioc->sas_device_list); spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + if (ioc->hide_drives) + return; if (!mpt2sas_transport_port_add(ioc, sas_device->handle, sas_device->sas_address_parent)) { _scsih_sas_device_remove(ioc, sas_device); @@ -6812,6 +7287,9 @@ _scsih_probe_sas(struct MPT2SAS_ADAPTER *ioc) list_move_tail(&sas_device->list, &ioc->sas_device_list); spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (ioc->hide_drives) + continue; + if (!mpt2sas_transport_port_add(ioc, sas_device->handle, sas_device->sas_address_parent)) { _scsih_sas_device_remove(ioc, sas_device); @@ -6882,6 +7360,11 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) ioc->id = mpt_ids++; sprintf(ioc->name, "%s%d", MPT2SAS_DRIVER_NAME, ioc->id); ioc->pdev = pdev; + if (id->device == MPI2_MFGPAGE_DEVID_SSS6200) { + ioc->is_warpdrive = 1; + ioc->hide_ir_msg = 1; + } else + ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS; ioc->scsi_io_cb_idx = scsi_io_cb_idx; ioc->tm_cb_idx = tm_cb_idx; ioc->ctl_cb_idx = ctl_cb_idx; @@ -6947,6 +7430,20 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) } ioc->wait_for_port_enable_to_complete = 0; + if (ioc->is_warpdrive) { + if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS) + ioc->hide_drives = 0; + else if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_HIDE_ALL_DISKS) + ioc->hide_drives = 1; + else { + if (_scsih_get_num_volumes(ioc)) + ioc->hide_drives = 1; + else + ioc->hide_drives = 0; + } + } else + ioc->hide_drives = 0; + _scsih_probe_devices(ioc); return 0; diff --git a/trunk/drivers/scsi/mvsas/Kconfig b/trunk/drivers/scsi/mvsas/Kconfig index 6de7af27e507..c82b012aba37 100644 --- a/trunk/drivers/scsi/mvsas/Kconfig +++ b/trunk/drivers/scsi/mvsas/Kconfig @@ -3,6 +3,7 @@ # # Copyright 2007 Red Hat, Inc. # Copyright 2008 Marvell. +# Copyright 2009-20011 Marvell. # # This file is licensed under GPLv2. # diff --git a/trunk/drivers/scsi/mvsas/Makefile b/trunk/drivers/scsi/mvsas/Makefile index ffbf759e46f1..87b231a5bd5e 100644 --- a/trunk/drivers/scsi/mvsas/Makefile +++ b/trunk/drivers/scsi/mvsas/Makefile @@ -3,6 +3,7 @@ # # Copyright 2007 Red Hat, Inc. # Copyright 2008 Marvell. +# Copyright 2009-2011 Marvell. # # This file is licensed under GPLv2. # diff --git a/trunk/drivers/scsi/mvsas/mv_64xx.c b/trunk/drivers/scsi/mvsas/mv_64xx.c index afc7f6f3a13e..13c960481391 100644 --- a/trunk/drivers/scsi/mvsas/mv_64xx.c +++ b/trunk/drivers/scsi/mvsas/mv_64xx.c @@ -3,6 +3,7 @@ * * Copyright 2007 Red Hat, Inc. * Copyright 2008 Marvell. + * Copyright 2009-2011 Marvell. * * This file is licensed under GPLv2. * diff --git a/trunk/drivers/scsi/mvsas/mv_64xx.h b/trunk/drivers/scsi/mvsas/mv_64xx.h index 42e947d9795e..545889bd9753 100644 --- a/trunk/drivers/scsi/mvsas/mv_64xx.h +++ b/trunk/drivers/scsi/mvsas/mv_64xx.h @@ -3,6 +3,7 @@ * * Copyright 2007 Red Hat, Inc. * Copyright 2008 Marvell. + * Copyright 2009-2011 Marvell. * * This file is licensed under GPLv2. * diff --git a/trunk/drivers/scsi/mvsas/mv_94xx.c b/trunk/drivers/scsi/mvsas/mv_94xx.c index eed4c5c72013..78162c3c36e6 100644 --- a/trunk/drivers/scsi/mvsas/mv_94xx.c +++ b/trunk/drivers/scsi/mvsas/mv_94xx.c @@ -3,6 +3,7 @@ * * Copyright 2007 Red Hat, Inc. * Copyright 2008 Marvell. + * Copyright 2009-2011 Marvell. * * This file is licensed under GPLv2. * diff --git a/trunk/drivers/scsi/mvsas/mv_94xx.h b/trunk/drivers/scsi/mvsas/mv_94xx.h index 23ed9b164669..8835befe2c0e 100644 --- a/trunk/drivers/scsi/mvsas/mv_94xx.h +++ b/trunk/drivers/scsi/mvsas/mv_94xx.h @@ -3,6 +3,7 @@ * * Copyright 2007 Red Hat, Inc. * Copyright 2008 Marvell. + * Copyright 2009-2011 Marvell. * * This file is licensed under GPLv2. * diff --git a/trunk/drivers/scsi/mvsas/mv_chips.h b/trunk/drivers/scsi/mvsas/mv_chips.h index a67e1c4172f9..1753a6fc42d0 100644 --- a/trunk/drivers/scsi/mvsas/mv_chips.h +++ b/trunk/drivers/scsi/mvsas/mv_chips.h @@ -3,6 +3,7 @@ * * Copyright 2007 Red Hat, Inc. * Copyright 2008 Marvell. + * Copyright 2009-2011 Marvell. * * This file is licensed under GPLv2. * diff --git a/trunk/drivers/scsi/mvsas/mv_defs.h b/trunk/drivers/scsi/mvsas/mv_defs.h index 1849da1f030d..bc00c940743c 100644 --- a/trunk/drivers/scsi/mvsas/mv_defs.h +++ b/trunk/drivers/scsi/mvsas/mv_defs.h @@ -3,6 +3,7 @@ * * Copyright 2007 Red Hat, Inc. * Copyright 2008 Marvell. + * Copyright 2009-2011 Marvell. * * This file is licensed under GPLv2. * @@ -34,6 +35,8 @@ enum chip_flavors { chip_6485, chip_9480, chip_9180, + chip_9445, + chip_9485, chip_1300, chip_1320 }; diff --git a/trunk/drivers/scsi/mvsas/mv_init.c b/trunk/drivers/scsi/mvsas/mv_init.c index 938d045e4180..90b636611cde 100644 --- a/trunk/drivers/scsi/mvsas/mv_init.c +++ b/trunk/drivers/scsi/mvsas/mv_init.c @@ -3,6 +3,7 @@ * * Copyright 2007 Red Hat, Inc. * Copyright 2008 Marvell. + * Copyright 2009-2011 Marvell. * * This file is licensed under GPLv2. * @@ -25,13 +26,24 @@ #include "mv_sas.h" +static int lldd_max_execute_num = 1; +module_param_named(collector, lldd_max_execute_num, int, S_IRUGO); +MODULE_PARM_DESC(collector, "\n" + "\tIf greater than one, tells the SAS Layer to run in Task Collector\n" + "\tMode. If 1 or 0, tells the SAS Layer to run in Direct Mode.\n" + "\tThe mvsas SAS LLDD supports both modes.\n" + "\tDefault: 1 (Direct Mode).\n"); + static struct scsi_transport_template *mvs_stt; +struct kmem_cache *mvs_task_list_cache; static const struct mvs_chip_info mvs_chips[] = { [chip_6320] = { 1, 2, 0x400, 17, 16, 9, &mvs_64xx_dispatch, }, [chip_6440] = { 1, 4, 0x400, 17, 16, 9, &mvs_64xx_dispatch, }, [chip_6485] = { 1, 8, 0x800, 33, 32, 10, &mvs_64xx_dispatch, }, [chip_9180] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, }, [chip_9480] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, }, + [chip_9445] = { 1, 4, 0x800, 17, 64, 11, &mvs_94xx_dispatch, }, + [chip_9485] = { 2, 4, 0x800, 17, 64, 11, &mvs_94xx_dispatch, }, [chip_1300] = { 1, 4, 0x400, 17, 16, 9, &mvs_64xx_dispatch, }, [chip_1320] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, }, }; @@ -107,7 +119,6 @@ static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id) static void mvs_free(struct mvs_info *mvi) { - int i; struct mvs_wq *mwq; int slot_nr; @@ -119,12 +130,8 @@ static void mvs_free(struct mvs_info *mvi) else slot_nr = MVS_SLOTS; - for (i = 0; i < mvi->tags_num; i++) { - struct mvs_slot_info *slot = &mvi->slot_info[i]; - if (slot->buf) - dma_free_coherent(mvi->dev, MVS_SLOT_BUF_SZ, - slot->buf, slot->buf_dma); - } + if (mvi->dma_pool) + pci_pool_destroy(mvi->dma_pool); if (mvi->tx) dma_free_coherent(mvi->dev, @@ -213,6 +220,7 @@ static irqreturn_t mvs_interrupt(int irq, void *opaque) static int __devinit mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost) { int i = 0, slot_nr; + char pool_name[32]; if (mvi->flags & MVF_FLAG_SOC) slot_nr = MVS_SOC_SLOTS; @@ -272,18 +280,14 @@ static int __devinit mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost) if (!mvi->bulk_buffer) goto err_out; #endif - for (i = 0; i < slot_nr; i++) { - struct mvs_slot_info *slot = &mvi->slot_info[i]; - - slot->buf = dma_alloc_coherent(mvi->dev, MVS_SLOT_BUF_SZ, - &slot->buf_dma, GFP_KERNEL); - if (!slot->buf) { - printk(KERN_DEBUG"failed to allocate slot->buf.\n"); + sprintf(pool_name, "%s%d", "mvs_dma_pool", mvi->id); + mvi->dma_pool = pci_pool_create(pool_name, mvi->pdev, MVS_SLOT_BUF_SZ, 16, 0); + if (!mvi->dma_pool) { + printk(KERN_DEBUG "failed to create dma pool %s.\n", pool_name); goto err_out; - } - memset(slot->buf, 0, MVS_SLOT_BUF_SZ); - ++mvi->tags_num; } + mvi->tags_num = slot_nr; + /* Initialize tags */ mvs_tag_init(mvi); return 0; @@ -484,7 +488,7 @@ static void __devinit mvs_post_sas_ha_init(struct Scsi_Host *shost, sha->num_phys = nr_core * chip_info->n_phy; - sha->lldd_max_execute_num = 1; + sha->lldd_max_execute_num = lldd_max_execute_num; if (mvi->flags & MVF_FLAG_SOC) can_queue = MVS_SOC_CAN_QUEUE; @@ -670,6 +674,24 @@ static struct pci_device_id __devinitdata mvs_pci_table[] = { { PCI_VDEVICE(TTI, 0x2740), chip_9480 }, { PCI_VDEVICE(TTI, 0x2744), chip_9480 }, { PCI_VDEVICE(TTI, 0x2760), chip_9480 }, + { + .vendor = 0x1b4b, + .device = 0x9445, + .subvendor = PCI_ANY_ID, + .subdevice = 0x9480, + .class = 0, + .class_mask = 0, + .driver_data = chip_9445, + }, + { + .vendor = 0x1b4b, + .device = 0x9485, + .subvendor = PCI_ANY_ID, + .subdevice = 0x9480, + .class = 0, + .class_mask = 0, + .driver_data = chip_9485, + }, { } /* terminate list */ }; @@ -690,6 +712,14 @@ static int __init mvs_init(void) if (!mvs_stt) return -ENOMEM; + mvs_task_list_cache = kmem_cache_create("mvs_task_list", sizeof(struct mvs_task_list), + 0, SLAB_HWCACHE_ALIGN, NULL); + if (!mvs_task_list_cache) { + rc = -ENOMEM; + mv_printk("%s: mvs_task_list_cache alloc failed! \n", __func__); + goto err_out; + } + rc = pci_register_driver(&mvs_pci_driver); if (rc) @@ -706,6 +736,7 @@ static void __exit mvs_exit(void) { pci_unregister_driver(&mvs_pci_driver); sas_release_transport(mvs_stt); + kmem_cache_destroy(mvs_task_list_cache); } module_init(mvs_init); diff --git a/trunk/drivers/scsi/mvsas/mv_sas.c b/trunk/drivers/scsi/mvsas/mv_sas.c index adedaa916ecb..0ef27425c447 100644 --- a/trunk/drivers/scsi/mvsas/mv_sas.c +++ b/trunk/drivers/scsi/mvsas/mv_sas.c @@ -3,6 +3,7 @@ * * Copyright 2007 Red Hat, Inc. * Copyright 2008 Marvell. + * Copyright 2009-2011 Marvell. * * This file is licensed under GPLv2. * @@ -862,178 +863,286 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi, } #define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == NO_DEVICE))) -static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags, - struct completion *completion,int is_tmf, - struct mvs_tmf_task *tmf) +static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf, + struct mvs_tmf_task *tmf, int *pass) { struct domain_device *dev = task->dev; - struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; - struct mvs_info *mvi = mvi_dev->mvi_info; + struct mvs_device *mvi_dev = dev->lldd_dev; struct mvs_task_exec_info tei; - struct sas_task *t = task; struct mvs_slot_info *slot; - u32 tag = 0xdeadbeef, rc, n_elem = 0; - u32 n = num, pass = 0; - unsigned long flags = 0, flags_libsas = 0; + u32 tag = 0xdeadbeef, n_elem = 0; + int rc = 0; if (!dev->port) { - struct task_status_struct *tsm = &t->task_status; + struct task_status_struct *tsm = &task->task_status; tsm->resp = SAS_TASK_UNDELIVERED; tsm->stat = SAS_PHY_DOWN; + /* + * libsas will use dev->port, should + * not call task_done for sata + */ if (dev->dev_type != SATA_DEV) - t->task_done(t); - return 0; + task->task_done(task); + return rc; } - spin_lock_irqsave(&mvi->lock, flags); - do { - dev = t->dev; - mvi_dev = dev->lldd_dev; - if (DEV_IS_GONE(mvi_dev)) { - if (mvi_dev) - mv_dprintk("device %d not ready.\n", - mvi_dev->device_id); - else - mv_dprintk("device %016llx not ready.\n", - SAS_ADDR(dev->sas_addr)); + if (DEV_IS_GONE(mvi_dev)) { + if (mvi_dev) + mv_dprintk("device %d not ready.\n", + mvi_dev->device_id); + else + mv_dprintk("device %016llx not ready.\n", + SAS_ADDR(dev->sas_addr)); rc = SAS_PHY_DOWN; - goto out_done; - } + return rc; + } + tei.port = dev->port->lldd_port; + if (tei.port && !tei.port->port_attached && !tmf) { + if (sas_protocol_ata(task->task_proto)) { + struct task_status_struct *ts = &task->task_status; + mv_dprintk("SATA/STP port %d does not attach" + "device.\n", dev->port->id); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_PHY_DOWN; - if (dev->port->id >= mvi->chip->n_phy) - tei.port = &mvi->port[dev->port->id - mvi->chip->n_phy]; - else - tei.port = &mvi->port[dev->port->id]; - - if (tei.port && !tei.port->port_attached) { - if (sas_protocol_ata(t->task_proto)) { - struct task_status_struct *ts = &t->task_status; - - mv_dprintk("port %d does not" - "attached device.\n", dev->port->id); - ts->stat = SAS_PROTO_RESPONSE; - ts->stat = SAS_PHY_DOWN; - spin_unlock_irqrestore(dev->sata_dev.ap->lock, - flags_libsas); - spin_unlock_irqrestore(&mvi->lock, flags); - t->task_done(t); - spin_lock_irqsave(&mvi->lock, flags); - spin_lock_irqsave(dev->sata_dev.ap->lock, - flags_libsas); - if (n > 1) - t = list_entry(t->list.next, - struct sas_task, list); - continue; - } else { - struct task_status_struct *ts = &t->task_status; - ts->resp = SAS_TASK_UNDELIVERED; - ts->stat = SAS_PHY_DOWN; - t->task_done(t); - if (n > 1) - t = list_entry(t->list.next, - struct sas_task, list); - continue; - } - } + task->task_done(task); - if (!sas_protocol_ata(t->task_proto)) { - if (t->num_scatter) { - n_elem = dma_map_sg(mvi->dev, - t->scatter, - t->num_scatter, - t->data_dir); - if (!n_elem) { - rc = -ENOMEM; - goto err_out; - } - } } else { - n_elem = t->num_scatter; + struct task_status_struct *ts = &task->task_status; + mv_dprintk("SAS port %d does not attach" + "device.\n", dev->port->id); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_PHY_DOWN; + task->task_done(task); } + return rc; + } - rc = mvs_tag_alloc(mvi, &tag); - if (rc) - goto err_out; + if (!sas_protocol_ata(task->task_proto)) { + if (task->num_scatter) { + n_elem = dma_map_sg(mvi->dev, + task->scatter, + task->num_scatter, + task->data_dir); + if (!n_elem) { + rc = -ENOMEM; + goto prep_out; + } + } + } else { + n_elem = task->num_scatter; + } - slot = &mvi->slot_info[tag]; + rc = mvs_tag_alloc(mvi, &tag); + if (rc) + goto err_out; + slot = &mvi->slot_info[tag]; - t->lldd_task = NULL; - slot->n_elem = n_elem; - slot->slot_tag = tag; - memset(slot->buf, 0, MVS_SLOT_BUF_SZ); + task->lldd_task = NULL; + slot->n_elem = n_elem; + slot->slot_tag = tag; + + slot->buf = pci_pool_alloc(mvi->dma_pool, GFP_ATOMIC, &slot->buf_dma); + if (!slot->buf) + goto err_out_tag; + memset(slot->buf, 0, MVS_SLOT_BUF_SZ); + + tei.task = task; + tei.hdr = &mvi->slot[tag]; + tei.tag = tag; + tei.n_elem = n_elem; + switch (task->task_proto) { + case SAS_PROTOCOL_SMP: + rc = mvs_task_prep_smp(mvi, &tei); + break; + case SAS_PROTOCOL_SSP: + rc = mvs_task_prep_ssp(mvi, &tei, is_tmf, tmf); + break; + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: + rc = mvs_task_prep_ata(mvi, &tei); + break; + default: + dev_printk(KERN_ERR, mvi->dev, + "unknown sas_task proto: 0x%x\n", + task->task_proto); + rc = -EINVAL; + break; + } - tei.task = t; - tei.hdr = &mvi->slot[tag]; - tei.tag = tag; - tei.n_elem = n_elem; - switch (t->task_proto) { - case SAS_PROTOCOL_SMP: - rc = mvs_task_prep_smp(mvi, &tei); - break; - case SAS_PROTOCOL_SSP: - rc = mvs_task_prep_ssp(mvi, &tei, is_tmf, tmf); - break; - case SAS_PROTOCOL_SATA: - case SAS_PROTOCOL_STP: - case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: - rc = mvs_task_prep_ata(mvi, &tei); - break; - default: - dev_printk(KERN_ERR, mvi->dev, - "unknown sas_task proto: 0x%x\n", - t->task_proto); - rc = -EINVAL; - break; - } + if (rc) { + mv_dprintk("rc is %x\n", rc); + goto err_out_slot_buf; + } + slot->task = task; + slot->port = tei.port; + task->lldd_task = slot; + list_add_tail(&slot->entry, &tei.port->list); + spin_lock(&task->task_state_lock); + task->task_state_flags |= SAS_TASK_AT_INITIATOR; + spin_unlock(&task->task_state_lock); - if (rc) { - mv_dprintk("rc is %x\n", rc); - goto err_out_tag; - } - slot->task = t; - slot->port = tei.port; - t->lldd_task = slot; - list_add_tail(&slot->entry, &tei.port->list); - /* TODO: select normal or high priority */ - spin_lock(&t->task_state_lock); - t->task_state_flags |= SAS_TASK_AT_INITIATOR; - spin_unlock(&t->task_state_lock); - - mvs_hba_memory_dump(mvi, tag, t->task_proto); - mvi_dev->running_req++; - ++pass; - mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1); - if (n > 1) - t = list_entry(t->list.next, struct sas_task, list); - if (likely(pass)) - MVS_CHIP_DISP->start_delivery(mvi, (mvi->tx_prod - 1) & - (MVS_CHIP_SLOT_SZ - 1)); + mvs_hba_memory_dump(mvi, tag, task->task_proto); + mvi_dev->running_req++; + ++(*pass); + mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1); - } while (--n); - rc = 0; - goto out_done; + return rc; +err_out_slot_buf: + pci_pool_free(mvi->dma_pool, slot->buf, slot->buf_dma); err_out_tag: mvs_tag_free(mvi, tag); err_out: - dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc); - if (!sas_protocol_ata(t->task_proto)) + dev_printk(KERN_ERR, mvi->dev, "mvsas prep failed[%d]!\n", rc); + if (!sas_protocol_ata(task->task_proto)) if (n_elem) - dma_unmap_sg(mvi->dev, t->scatter, n_elem, - t->data_dir); -out_done: + dma_unmap_sg(mvi->dev, task->scatter, n_elem, + task->data_dir); +prep_out: + return rc; +} + +static struct mvs_task_list *mvs_task_alloc_list(int *num, gfp_t gfp_flags) +{ + struct mvs_task_list *first = NULL; + + for (; *num > 0; --*num) { + struct mvs_task_list *mvs_list = kmem_cache_zalloc(mvs_task_list_cache, gfp_flags); + + if (!mvs_list) + break; + + INIT_LIST_HEAD(&mvs_list->list); + if (!first) + first = mvs_list; + else + list_add_tail(&mvs_list->list, &first->list); + + } + + return first; +} + +static inline void mvs_task_free_list(struct mvs_task_list *mvs_list) +{ + LIST_HEAD(list); + struct list_head *pos, *a; + struct mvs_task_list *mlist = NULL; + + __list_add(&list, mvs_list->list.prev, &mvs_list->list); + + list_for_each_safe(pos, a, &list) { + list_del_init(pos); + mlist = list_entry(pos, struct mvs_task_list, list); + kmem_cache_free(mvs_task_list_cache, mlist); + } +} + +static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags, + struct completion *completion, int is_tmf, + struct mvs_tmf_task *tmf) +{ + struct domain_device *dev = task->dev; + struct mvs_info *mvi = NULL; + u32 rc = 0; + u32 pass = 0; + unsigned long flags = 0; + + mvi = ((struct mvs_device *)task->dev->lldd_dev)->mvi_info; + + if ((dev->dev_type == SATA_DEV) && (dev->sata_dev.ap != NULL)) + spin_unlock_irq(dev->sata_dev.ap->lock); + + spin_lock_irqsave(&mvi->lock, flags); + rc = mvs_task_prep(task, mvi, is_tmf, tmf, &pass); + if (rc) + dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc); + + if (likely(pass)) + MVS_CHIP_DISP->start_delivery(mvi, (mvi->tx_prod - 1) & + (MVS_CHIP_SLOT_SZ - 1)); spin_unlock_irqrestore(&mvi->lock, flags); + + if ((dev->dev_type == SATA_DEV) && (dev->sata_dev.ap != NULL)) + spin_lock_irq(dev->sata_dev.ap->lock); + + return rc; +} + +static int mvs_collector_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags, + struct completion *completion, int is_tmf, + struct mvs_tmf_task *tmf) +{ + struct domain_device *dev = task->dev; + struct mvs_prv_info *mpi = dev->port->ha->lldd_ha; + struct mvs_info *mvi = NULL; + struct sas_task *t = task; + struct mvs_task_list *mvs_list = NULL, *a; + LIST_HEAD(q); + int pass[2] = {0}; + u32 rc = 0; + u32 n = num; + unsigned long flags = 0; + + mvs_list = mvs_task_alloc_list(&n, gfp_flags); + if (n) { + printk(KERN_ERR "%s: mvs alloc list failed.\n", __func__); + rc = -ENOMEM; + goto free_list; + } + + __list_add(&q, mvs_list->list.prev, &mvs_list->list); + + list_for_each_entry(a, &q, list) { + a->task = t; + t = list_entry(t->list.next, struct sas_task, list); + } + + list_for_each_entry(a, &q , list) { + + t = a->task; + mvi = ((struct mvs_device *)t->dev->lldd_dev)->mvi_info; + + spin_lock_irqsave(&mvi->lock, flags); + rc = mvs_task_prep(t, mvi, is_tmf, tmf, &pass[mvi->id]); + if (rc) + dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc); + spin_unlock_irqrestore(&mvi->lock, flags); + } + + if (likely(pass[0])) + MVS_CHIP_DISP->start_delivery(mpi->mvi[0], + (mpi->mvi[0]->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1)); + + if (likely(pass[1])) + MVS_CHIP_DISP->start_delivery(mpi->mvi[1], + (mpi->mvi[1]->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1)); + + list_del_init(&q); + +free_list: + if (mvs_list) + mvs_task_free_list(mvs_list); + return rc; } int mvs_queue_command(struct sas_task *task, const int num, gfp_t gfp_flags) { - return mvs_task_exec(task, num, gfp_flags, NULL, 0, NULL); + struct mvs_device *mvi_dev = task->dev->lldd_dev; + struct sas_ha_struct *sas = mvi_dev->mvi_info->sas; + + if (sas->lldd_max_execute_num < 2) + return mvs_task_exec(task, num, gfp_flags, NULL, 0, NULL); + else + return mvs_collector_task_exec(task, num, gfp_flags, NULL, 0, NULL); } static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc) @@ -1067,6 +1176,11 @@ static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task, /* do nothing */ break; } + + if (slot->buf) { + pci_pool_free(mvi->dma_pool, slot->buf, slot->buf_dma); + slot->buf = NULL; + } list_del_init(&slot->entry); task->lldd_task = NULL; slot->task = NULL; @@ -1255,6 +1369,7 @@ static void mvs_port_notify_formed(struct asd_sas_phy *sas_phy, int lock) spin_lock_irqsave(&mvi->lock, flags); port->port_attached = 1; phy->port = port; + sas_port->lldd_port = port; if (phy->phy_type & PORT_TYPE_SAS) { port->wide_port_phymap = sas_port->phy_mask; mv_printk("set wide port phy map %x\n", sas_port->phy_mask); diff --git a/trunk/drivers/scsi/mvsas/mv_sas.h b/trunk/drivers/scsi/mvsas/mv_sas.h index 77ddc7c1e5f2..1367d8b9350d 100644 --- a/trunk/drivers/scsi/mvsas/mv_sas.h +++ b/trunk/drivers/scsi/mvsas/mv_sas.h @@ -3,6 +3,7 @@ * * Copyright 2007 Red Hat, Inc. * Copyright 2008 Marvell. + * Copyright 2009-2011 Marvell. * * This file is licensed under GPLv2. * @@ -67,6 +68,7 @@ extern struct mvs_tgt_initiator mvs_tgt; extern struct mvs_info *tgt_mvi; extern const struct mvs_dispatch mvs_64xx_dispatch; extern const struct mvs_dispatch mvs_94xx_dispatch; +extern struct kmem_cache *mvs_task_list_cache; #define DEV_IS_EXPANDER(type) \ ((type == EDGE_DEV) || (type == FANOUT_DEV)) @@ -341,6 +343,7 @@ struct mvs_info { dma_addr_t bulk_buffer_dma; #define TRASH_BUCKET_SIZE 0x20000 #endif + void *dma_pool; struct mvs_slot_info slot_info[0]; }; @@ -367,6 +370,11 @@ struct mvs_task_exec_info { int n_elem; }; +struct mvs_task_list { + struct sas_task *task; + struct list_head list; +}; + /******************** function prototype *********************/ void mvs_get_sas_addr(void *buf, u32 buflen); diff --git a/trunk/drivers/scsi/ncr53c8xx.c b/trunk/drivers/scsi/ncr53c8xx.c index 835d8d66e696..4b3b4755945c 100644 --- a/trunk/drivers/scsi/ncr53c8xx.c +++ b/trunk/drivers/scsi/ncr53c8xx.c @@ -8147,7 +8147,7 @@ static int ncr53c8xx_abort(struct scsi_cmnd *cmd) unsigned long flags; struct scsi_cmnd *done_list; - printk("ncr53c8xx_abort: command pid %lu\n", cmd->serial_number); + printk("ncr53c8xx_abort\n"); NCR_LOCK_NCB(np, flags); diff --git a/trunk/drivers/scsi/pmcraid.c b/trunk/drivers/scsi/pmcraid.c index 96d5ad0c1e42..7f636b118287 100644 --- a/trunk/drivers/scsi/pmcraid.c +++ b/trunk/drivers/scsi/pmcraid.c @@ -3814,6 +3814,9 @@ static long pmcraid_ioctl_passthrough( rc = -EFAULT; goto out_free_buffer; } + } else if (request_size < 0) { + rc = -EINVAL; + goto out_free_buffer; } /* check if we have any additional command parameters */ diff --git a/trunk/drivers/scsi/qla1280.c b/trunk/drivers/scsi/qla1280.c index 8ba5744c267e..d838205ab169 100644 --- a/trunk/drivers/scsi/qla1280.c +++ b/trunk/drivers/scsi/qla1280.c @@ -4066,7 +4066,7 @@ __qla1280_print_scsi_cmd(struct scsi_cmnd *cmd) } */ printk(" tag=%d, transfersize=0x%x \n", cmd->tag, cmd->transfersize); - printk(" Pid=%li, SP=0x%p\n", cmd->serial_number, CMD_SP(cmd)); + printk(" SP=0x%p\n", CMD_SP(cmd)); printk(" underflow size = 0x%x, direction=0x%x\n", cmd->underflow, cmd->sc_data_direction); } diff --git a/trunk/drivers/scsi/qla2xxx/qla_attr.c b/trunk/drivers/scsi/qla2xxx/qla_attr.c index d3e58d763b43..532313e0725e 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_attr.c +++ b/trunk/drivers/scsi/qla2xxx/qla_attr.c @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2010 QLogic Corporation + * Copyright (c) 2003-2011 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -496,8 +496,8 @@ qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj, offset = 0; } - rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, addr, offset, - SFP_BLOCK_SIZE); + rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, ha->sfp_data, + addr, offset, SFP_BLOCK_SIZE, 0); if (rval != QLA_SUCCESS) { qla_printk(KERN_WARNING, ha, "Unable to read SFP data (%x/%x/%x).\n", rval, @@ -628,12 +628,12 @@ qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj, memcpy(ha->edc_data, &buf[8], len); - rval = qla2x00_write_edc(vha, dev, adr, ha->edc_data_dma, - ha->edc_data, len, opt); + rval = qla2x00_write_sfp(vha, ha->edc_data_dma, ha->edc_data, + dev, adr, len, opt); if (rval != QLA_SUCCESS) { DEBUG2(qla_printk(KERN_INFO, ha, "Unable to write EDC (%x) %02x:%02x:%04x:%02x:%02x.\n", - rval, dev, adr, opt, len, *buf)); + rval, dev, adr, opt, len, buf[8])); return 0; } @@ -685,8 +685,8 @@ qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj, return -EINVAL; memset(ha->edc_data, 0, len); - rval = qla2x00_read_edc(vha, dev, adr, ha->edc_data_dma, - ha->edc_data, len, opt); + rval = qla2x00_read_sfp(vha, ha->edc_data_dma, ha->edc_data, + dev, adr, len, opt); if (rval != QLA_SUCCESS) { DEBUG2(qla_printk(KERN_INFO, ha, "Unable to write EDC status (%x) %02x:%02x:%04x:%02x.\n", @@ -1568,7 +1568,7 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport) /* Now that the rport has been deleted, set the fcport state to FCS_DEVICE_DEAD */ - atomic_set(&fcport->state, FCS_DEVICE_DEAD); + qla2x00_set_fcport_state(fcport, FCS_DEVICE_DEAD); /* * Transport has effectively 'deleted' the rport, clear @@ -1877,14 +1877,15 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) scsi_remove_host(vha->host); + /* Allow timer to run to drain queued items, when removing vp */ + qla24xx_deallocate_vp_id(vha); + if (vha->timer_active) { qla2x00_vp_stop_timer(vha); DEBUG15(printk(KERN_INFO "scsi(%ld): timer for the vport[%d]" " = %p has stopped\n", vha->host_no, vha->vp_idx, vha)); } - qla24xx_deallocate_vp_id(vha); - /* No pending activities shall be there on the vha now */ DEBUG(msleep(random32()%10)); /* Just to see if something falls on * the net we have placed below */ diff --git a/trunk/drivers/scsi/qla2xxx/qla_bsg.c b/trunk/drivers/scsi/qla2xxx/qla_bsg.c index 903b0586ded3..8c10e2c4928e 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_bsg.c +++ b/trunk/drivers/scsi/qla2xxx/qla_bsg.c @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2010 QLogic Corporation + * Copyright (c) 2003-2011 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ diff --git a/trunk/drivers/scsi/qla2xxx/qla_bsg.h b/trunk/drivers/scsi/qla2xxx/qla_bsg.h index 074a999c7017..0f0f54e35f06 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_bsg.h +++ b/trunk/drivers/scsi/qla2xxx/qla_bsg.h @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2010 QLogic Corporation + * Copyright (c) 2003-2011 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ diff --git a/trunk/drivers/scsi/qla2xxx/qla_dbg.c b/trunk/drivers/scsi/qla2xxx/qla_dbg.c index 096141148257..c53719a9a747 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_dbg.c +++ b/trunk/drivers/scsi/qla2xxx/qla_dbg.c @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2010 QLogic Corporation + * Copyright (c) 2003-2011 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ diff --git a/trunk/drivers/scsi/qla2xxx/qla_dbg.h b/trunk/drivers/scsi/qla2xxx/qla_dbg.h index b74e6b5743dc..930414541ec6 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_dbg.h +++ b/trunk/drivers/scsi/qla2xxx/qla_dbg.h @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2010 QLogic Corporation + * Copyright (c) 2003-2011 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ diff --git a/trunk/drivers/scsi/qla2xxx/qla_def.h b/trunk/drivers/scsi/qla2xxx/qla_def.h index ee20353c8550..cc5a79259d33 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_def.h +++ b/trunk/drivers/scsi/qla2xxx/qla_def.h @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2010 QLogic Corporation + * Copyright (c) 2003-2011 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -1717,6 +1717,14 @@ typedef struct fc_port { #define FCS_DEVICE_LOST 3 #define FCS_ONLINE 4 +static const char * const port_state_str[] = { + "Unknown", + "UNCONFIGURED", + "DEAD", + "LOST", + "ONLINE" +}; + /* * FC port flags. */ diff --git a/trunk/drivers/scsi/qla2xxx/qla_dfs.c b/trunk/drivers/scsi/qla2xxx/qla_dfs.c index 6271353e8c51..a5a4e1275bf2 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_dfs.c +++ b/trunk/drivers/scsi/qla2xxx/qla_dfs.c @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2010 QLogic Corporation + * Copyright (c) 2003-2011 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ diff --git a/trunk/drivers/scsi/qla2xxx/qla_fw.h b/trunk/drivers/scsi/qla2xxx/qla_fw.h index f5ba09c8a663..691783abfb69 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_fw.h +++ b/trunk/drivers/scsi/qla2xxx/qla_fw.h @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2010 QLogic Corporation + * Copyright (c) 2003-2011 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -416,8 +416,7 @@ struct cmd_type_6 { uint8_t vp_index; uint32_t fcp_data_dseg_address[2]; /* Data segment address. */ - uint16_t fcp_data_dseg_len; /* Data segment length. */ - uint16_t reserved_1; /* MUST be set to 0. */ + uint32_t fcp_data_dseg_len; /* Data segment length. */ }; #define COMMAND_TYPE_7 0x18 /* Command Type 7 entry */ diff --git a/trunk/drivers/scsi/qla2xxx/qla_gbl.h b/trunk/drivers/scsi/qla2xxx/qla_gbl.h index d48326ee3f61..0b381224ae4b 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_gbl.h +++ b/trunk/drivers/scsi/qla2xxx/qla_gbl.h @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2010 QLogic Corporation + * Copyright (c) 2003-2011 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -39,6 +39,8 @@ extern int qla81xx_load_risc(scsi_qla_host_t *, uint32_t *); extern int qla2x00_perform_loop_resync(scsi_qla_host_t *); extern int qla2x00_loop_resync(scsi_qla_host_t *); +extern int qla2x00_find_new_loop_id(scsi_qla_host_t *, fc_port_t *); + extern int qla2x00_fabric_login(scsi_qla_host_t *, fc_port_t *, uint16_t *); extern int qla2x00_local_device_login(scsi_qla_host_t *, fc_port_t *); @@ -100,6 +102,8 @@ extern int ql2xgffidenable; extern int ql2xenabledif; extern int ql2xenablehba_err_chk; extern int ql2xtargetreset; +extern int ql2xdontresethba; +extern unsigned int ql2xmaxlun; extern int qla2x00_loop_reset(scsi_qla_host_t *); extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); @@ -319,15 +323,12 @@ extern int qla2x00_disable_fce_trace(scsi_qla_host_t *, uint64_t *, uint64_t *); extern int -qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint16_t, uint16_t, uint16_t); - -extern int -qla2x00_read_edc(scsi_qla_host_t *, uint16_t, uint16_t, dma_addr_t, - uint8_t *, uint16_t, uint16_t); +qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint8_t *, + uint16_t, uint16_t, uint16_t, uint16_t); extern int -qla2x00_write_edc(scsi_qla_host_t *, uint16_t, uint16_t, dma_addr_t, - uint8_t *, uint16_t, uint16_t); +qla2x00_write_sfp(scsi_qla_host_t *, dma_addr_t, uint8_t *, + uint16_t, uint16_t, uint16_t, uint16_t); extern int qla2x00_set_idma_speed(scsi_qla_host_t *, uint16_t, uint16_t, uint16_t *); @@ -549,7 +550,6 @@ extern int qla82xx_wr_32(struct qla_hw_data *, ulong, u32); extern int qla82xx_rd_32(struct qla_hw_data *, ulong); extern int qla82xx_rdmem(struct qla_hw_data *, u64, void *, int); extern int qla82xx_wrmem(struct qla_hw_data *, u64, void *, int); -extern void qla82xx_rom_unlock(struct qla_hw_data *); /* ISP 8021 IDC */ extern void qla82xx_clear_drv_active(struct qla_hw_data *); diff --git a/trunk/drivers/scsi/qla2xxx/qla_gs.c b/trunk/drivers/scsi/qla2xxx/qla_gs.c index 74a91b6dfc68..8cd9066ad906 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_gs.c +++ b/trunk/drivers/scsi/qla2xxx/qla_gs.c @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2010 QLogic Corporation + * Copyright (c) 2003-2011 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ diff --git a/trunk/drivers/scsi/qla2xxx/qla_init.c b/trunk/drivers/scsi/qla2xxx/qla_init.c index 8575808dbae0..920b76bfbb93 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_init.c +++ b/trunk/drivers/scsi/qla2xxx/qla_init.c @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2010 QLogic Corporation + * Copyright (c) 2003-2011 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -35,8 +35,6 @@ static int qla2x00_fabric_dev_login(scsi_qla_host_t *, fc_port_t *, static int qla2x00_restart_isp(scsi_qla_host_t *); -static int qla2x00_find_new_loop_id(scsi_qla_host_t *, fc_port_t *); - static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *); static int qla84xx_init_chip(scsi_qla_host_t *); static int qla25xx_init_queues(struct qla_hw_data *); @@ -385,8 +383,18 @@ qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport, switch (data[0]) { case MBS_COMMAND_COMPLETE: + /* + * Driver must validate login state - If PRLI not complete, + * force a relogin attempt via implicit LOGO, PLOGI, and PRLI + * requests. + */ + rval = qla2x00_get_port_database(vha, fcport, 0); + if (rval != QLA_SUCCESS) { + qla2x00_post_async_logout_work(vha, fcport, NULL); + qla2x00_post_async_login_work(vha, fcport, NULL); + break; + } if (fcport->flags & FCF_FCP2_DEVICE) { - fcport->flags |= FCF_ASYNC_SENT; qla2x00_post_async_adisc_work(vha, fcport, data); break; } @@ -397,7 +405,7 @@ qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport, if (data[1] & QLA_LOGIO_LOGIN_RETRIED) set_bit(RELOGIN_NEEDED, &vha->dpc_flags); else - qla2x00_mark_device_lost(vha, fcport, 1, 1); + qla2x00_mark_device_lost(vha, fcport, 1, 0); break; case MBS_PORT_ID_USED: fcport->loop_id = data[1]; @@ -409,7 +417,7 @@ qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport, rval = qla2x00_find_new_loop_id(vha, fcport); if (rval != QLA_SUCCESS) { fcport->flags &= ~FCF_ASYNC_SENT; - qla2x00_mark_device_lost(vha, fcport, 1, 1); + qla2x00_mark_device_lost(vha, fcport, 1, 0); break; } qla2x00_post_async_login_work(vha, fcport, NULL); @@ -441,7 +449,7 @@ qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport, if (data[1] & QLA_LOGIO_LOGIN_RETRIED) set_bit(RELOGIN_NEEDED, &vha->dpc_flags); else - qla2x00_mark_device_lost(vha, fcport, 1, 1); + qla2x00_mark_device_lost(vha, fcport, 1, 0); return; } @@ -2536,7 +2544,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags) fcport->vp_idx = vha->vp_idx; fcport->port_type = FCT_UNKNOWN; fcport->loop_id = FC_NO_LOOP_ID; - atomic_set(&fcport->state, FCS_UNCONFIGURED); + qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED); fcport->supported_classes = FC_COS_UNSPECIFIED; return fcport; @@ -2722,7 +2730,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) "loop_id=0x%04x\n", vha->host_no, fcport->loop_id)); - atomic_set(&fcport->state, FCS_DEVICE_LOST); + qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); } } @@ -2934,7 +2942,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) qla2x00_iidma_fcport(vha, fcport); qla24xx_update_fcport_fcp_prio(vha, fcport); qla2x00_reg_remote_port(vha, fcport); - atomic_set(&fcport->state, FCS_ONLINE); + qla2x00_set_fcport_state(fcport, FCS_ONLINE); } /* @@ -3391,7 +3399,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, * Context: * Kernel context. */ -static int +int qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev) { int rval; @@ -5202,7 +5210,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) } /* Reset Initialization control block */ - memset(icb, 0, sizeof(struct init_cb_81xx)); + memset(icb, 0, ha->init_cb_size); /* Copy 1st segment. */ dptr1 = (uint8_t *)icb; @@ -5427,6 +5435,13 @@ qla82xx_restart_isp(scsi_qla_host_t *vha) ha->isp_abort_cnt = 0; clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); + /* Update the firmware version */ + qla2x00_get_fw_version(vha, &ha->fw_major_version, + &ha->fw_minor_version, &ha->fw_subminor_version, + &ha->fw_attributes, &ha->fw_memory_size, + ha->mpi_version, &ha->mpi_capabilities, + ha->phy_version); + if (ha->fce) { ha->flags.fce_enabled = 1; memset(ha->fce, 0, @@ -5508,26 +5523,26 @@ qla81xx_update_fw_options(scsi_qla_host_t *vha) * * Return: * non-zero (if found) - * 0 (if not found) + * -1 (if not found) * * Context: * Kernel context */ -uint8_t +static int qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport) { int i, entries; uint8_t pid_match, wwn_match; - uint8_t priority; + int priority; uint32_t pid1, pid2; uint64_t wwn1, wwn2; struct qla_fcp_prio_entry *pri_entry; struct qla_hw_data *ha = vha->hw; if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled) - return 0; + return -1; - priority = 0; + priority = -1; entries = ha->fcp_prio_cfg->num_entries; pri_entry = &ha->fcp_prio_cfg->entry[0]; @@ -5610,7 +5625,7 @@ int qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport) { int ret; - uint8_t priority; + int priority; uint16_t mb[5]; if (fcport->port_type != FCT_TARGET || @@ -5618,6 +5633,9 @@ qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport) return QLA_FUNCTION_FAILED; priority = qla24xx_get_fcp_prio(vha, fcport); + if (priority < 0) + return QLA_FUNCTION_FAILED; + ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb); if (ret == QLA_SUCCESS) fcport->fcp_prio = priority; diff --git a/trunk/drivers/scsi/qla2xxx/qla_inline.h b/trunk/drivers/scsi/qla2xxx/qla_inline.h index 48f97a92e33d..4c8167e11f69 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_inline.h +++ b/trunk/drivers/scsi/qla2xxx/qla_inline.h @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2010 QLogic Corporation + * Copyright (c) 2003-2011 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -83,3 +83,22 @@ qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp) } INIT_LIST_HEAD(&((struct crc_context *)sp->ctx)->dsd_list); } + +static inline void +qla2x00_set_fcport_state(fc_port_t *fcport, int state) +{ + int old_state; + + old_state = atomic_read(&fcport->state); + atomic_set(&fcport->state, state); + + /* Don't print state transitions during initial allocation of fcport */ + if (old_state && old_state != state) { + DEBUG(qla_printk(KERN_WARNING, fcport->vha->hw, + "scsi(%ld): FCPort state transitioned from %s to %s - " + "portid=%02x%02x%02x.\n", fcport->vha->host_no, + port_state_str[old_state], port_state_str[state], + fcport->d_id.b.domain, fcport->d_id.b.area, + fcport->d_id.b.al_pa)); + } +} diff --git a/trunk/drivers/scsi/qla2xxx/qla_iocb.c b/trunk/drivers/scsi/qla2xxx/qla_iocb.c index d78d5896fc33..7bac3cd109d6 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_iocb.c +++ b/trunk/drivers/scsi/qla2xxx/qla_iocb.c @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2010 QLogic Corporation + * Copyright (c) 2003-2011 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ diff --git a/trunk/drivers/scsi/qla2xxx/qla_isr.c b/trunk/drivers/scsi/qla2xxx/qla_isr.c index 712518d05128..9c0f0e3389eb 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_isr.c +++ b/trunk/drivers/scsi/qla2xxx/qla_isr.c @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2010 QLogic Corporation + * Copyright (c) 2003-2011 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -843,7 +843,10 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha, qla_printk(KERN_WARNING, ha, "Invalid SCSI completion handle %d.\n", index); - set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + if (IS_QLA82XX(ha)) + set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); + else + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); return; } @@ -861,7 +864,10 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha, qla_printk(KERN_WARNING, ha, "Invalid ISP SCSI completion handle\n"); - set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + if (IS_QLA82XX(ha)) + set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); + else + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); } } @@ -878,7 +884,10 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func, if (index >= MAX_OUTSTANDING_COMMANDS) { qla_printk(KERN_WARNING, ha, "%s: Invalid completion handle (%x).\n", func, index); - set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + if (IS_QLA82XX(ha)) + set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); + else + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); goto done; } sp = req->outstanding_cmds[index]; @@ -1564,7 +1573,10 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) "scsi(%ld): Invalid status handle (0x%x).\n", vha->host_no, sts->handle); - set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + if (IS_QLA82XX(ha)) + set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); + else + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); return; } @@ -1794,12 +1806,13 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) if (logit) DEBUG2(qla_printk(KERN_INFO, ha, "scsi(%ld:%d:%d) FCP command status: 0x%x-0x%x (0x%x) " - "oxid=0x%x cdb=%02x%02x%02x len=0x%x " + "portid=%02x%02x%02x oxid=0x%x cdb=%02x%02x%02x len=0x%x " "rsp_info=0x%x resid=0x%x fw_resid=0x%x\n", vha->host_no, cp->device->id, cp->device->lun, comp_status, scsi_status, - cp->result, ox_id, cp->cmnd[0], - cp->cmnd[1], cp->cmnd[2], scsi_bufflen(cp), rsp_info_len, - resid_len, fw_resid_len)); + cp->result, fcport->d_id.b.domain, fcport->d_id.b.area, + fcport->d_id.b.al_pa, ox_id, cp->cmnd[0], cp->cmnd[1], + cp->cmnd[2], scsi_bufflen(cp), rsp_info_len, resid_len, + fw_resid_len)); if (rsp->status_srb == NULL) qla2x00_sp_compl(ha, sp); @@ -1908,13 +1921,17 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) qla2x00_sp_compl(ha, sp); } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type == - COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7) { + COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7 + || pkt->entry_type == COMMAND_TYPE_6) { DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n", - vha->host_no)); + vha->host_no)); qla_printk(KERN_WARNING, ha, - "Error entry - invalid handle\n"); + "Error entry - invalid handle\n"); - set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + if (IS_QLA82XX(ha)) + set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); + else + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); } } diff --git a/trunk/drivers/scsi/qla2xxx/qla_mbx.c b/trunk/drivers/scsi/qla2xxx/qla_mbx.c index 34893397ac84..c26f0acdfecc 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_mbx.c +++ b/trunk/drivers/scsi/qla2xxx/qla_mbx.c @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2010 QLogic Corporation + * Copyright (c) 2003-2011 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -1261,11 +1261,12 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt) /* Check for logged in state. */ if (pd24->current_login_state != PDS_PRLI_COMPLETE && pd24->last_login_state != PDS_PRLI_COMPLETE) { - DEBUG2(printk("%s(%ld): Unable to verify " - "login-state (%x/%x) for loop_id %x\n", - __func__, vha->host_no, - pd24->current_login_state, - pd24->last_login_state, fcport->loop_id)); + DEBUG2(qla_printk(KERN_WARNING, ha, + "scsi(%ld): Unable to verify login-state (%x/%x) " + " - portid=%02x%02x%02x.\n", vha->host_no, + pd24->current_login_state, pd24->last_login_state, + fcport->d_id.b.domain, fcport->d_id.b.area, + fcport->d_id.b.al_pa)); rval = QLA_FUNCTION_FAILED; goto gpd_error_out; } @@ -1289,6 +1290,12 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt) /* Check for logged in state. */ if (pd->master_state != PD_STATE_PORT_LOGGED_IN && pd->slave_state != PD_STATE_PORT_LOGGED_IN) { + DEBUG2(qla_printk(KERN_WARNING, ha, + "scsi(%ld): Unable to verify login-state (%x/%x) " + " - portid=%02x%02x%02x.\n", vha->host_no, + pd->master_state, pd->slave_state, + fcport->d_id.b.domain, fcport->d_id.b.area, + fcport->d_id.b.al_pa)); rval = QLA_FUNCTION_FAILED; goto gpd_error_out; } @@ -1883,7 +1890,8 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, lg->handle = MAKE_HANDLE(req->id, lg->handle); lg->nport_handle = cpu_to_le16(loop_id); lg->control_flags = - __constant_cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO); + __constant_cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO| + LCF_FREE_NPORT); lg->port_id[0] = al_pa; lg->port_id[1] = area; lg->port_id[2] = domain; @@ -2362,7 +2370,7 @@ qla24xx_abort_command(srb_t *sp) abt->entry_count = 1; abt->handle = MAKE_HANDLE(req->id, abt->handle); abt->nport_handle = cpu_to_le16(fcport->loop_id); - abt->handle_to_abort = handle; + abt->handle_to_abort = MAKE_HANDLE(req->id, handle); abt->port_id[0] = fcport->d_id.b.al_pa; abt->port_id[1] = fcport->d_id.b.area; abt->port_id[2] = fcport->d_id.b.domain; @@ -2778,44 +2786,6 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd) return rval; } -int -qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint16_t addr, - uint16_t off, uint16_t count) -{ - int rval; - mbx_cmd_t mc; - mbx_cmd_t *mcp = &mc; - - if (!IS_FWI2_CAPABLE(vha->hw)) - return QLA_FUNCTION_FAILED; - - DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); - - mcp->mb[0] = MBC_READ_SFP; - mcp->mb[1] = addr; - mcp->mb[2] = MSW(sfp_dma); - mcp->mb[3] = LSW(sfp_dma); - mcp->mb[6] = MSW(MSD(sfp_dma)); - mcp->mb[7] = LSW(MSD(sfp_dma)); - mcp->mb[8] = count; - mcp->mb[9] = off; - mcp->mb[10] = 0; - mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; - mcp->in_mb = MBX_0; - mcp->tov = MBX_TOV_SECONDS; - mcp->flags = 0; - rval = qla2x00_mailbox_command(vha, mcp); - - if (rval != QLA_SUCCESS) { - DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__, - vha->host_no, rval, mcp->mb[0])); - } else { - DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); - } - - return rval; -} - int qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t *port_speed, uint16_t *mb) @@ -3581,15 +3551,22 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha) } int -qla2x00_read_edc(scsi_qla_host_t *vha, uint16_t dev, uint16_t adr, - dma_addr_t sfp_dma, uint8_t *sfp, uint16_t len, uint16_t opt) +qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp, + uint16_t dev, uint16_t off, uint16_t len, uint16_t opt) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; + struct qla_hw_data *ha = vha->hw; + + if (!IS_FWI2_CAPABLE(ha)) + return QLA_FUNCTION_FAILED; DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); + if (len == 1) + opt |= BIT_0; + mcp->mb[0] = MBC_READ_SFP; mcp->mb[1] = dev; mcp->mb[2] = MSW(sfp_dma); @@ -3597,17 +3574,16 @@ qla2x00_read_edc(scsi_qla_host_t *vha, uint16_t dev, uint16_t adr, mcp->mb[6] = MSW(MSD(sfp_dma)); mcp->mb[7] = LSW(MSD(sfp_dma)); mcp->mb[8] = len; - mcp->mb[9] = adr; + mcp->mb[9] = off; mcp->mb[10] = opt; mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; - mcp->in_mb = MBX_0; + mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (opt & BIT_0) - if (sfp) - *sfp = mcp->mb[8]; + *sfp = mcp->mb[1]; if (rval != QLA_SUCCESS) { DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__, @@ -3620,18 +3596,24 @@ qla2x00_read_edc(scsi_qla_host_t *vha, uint16_t dev, uint16_t adr, } int -qla2x00_write_edc(scsi_qla_host_t *vha, uint16_t dev, uint16_t adr, - dma_addr_t sfp_dma, uint8_t *sfp, uint16_t len, uint16_t opt) +qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp, + uint16_t dev, uint16_t off, uint16_t len, uint16_t opt) { int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; + struct qla_hw_data *ha = vha->hw; + + if (!IS_FWI2_CAPABLE(ha)) + return QLA_FUNCTION_FAILED; DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); + if (len == 1) + opt |= BIT_0; + if (opt & BIT_0) - if (sfp) - len = *sfp; + len = *sfp; mcp->mb[0] = MBC_WRITE_SFP; mcp->mb[1] = dev; @@ -3640,10 +3622,10 @@ qla2x00_write_edc(scsi_qla_host_t *vha, uint16_t dev, uint16_t adr, mcp->mb[6] = MSW(MSD(sfp_dma)); mcp->mb[7] = LSW(MSD(sfp_dma)); mcp->mb[8] = len; - mcp->mb[9] = adr; + mcp->mb[9] = off; mcp->mb[10] = opt; mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; - mcp->in_mb = MBX_0; + mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); @@ -4160,63 +4142,32 @@ int qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac) { int rval; - mbx_cmd_t mc; - mbx_cmd_t *mcp = &mc; + uint8_t byte; struct qla_hw_data *ha = vha->hw; - DEBUG11(printk(KERN_INFO "%s(%ld): entered.\n", __func__, ha->host_no)); + DEBUG11(printk(KERN_INFO "%s(%ld): entered.\n", __func__, vha->host_no)); - /* High bits. */ - mcp->mb[0] = MBC_READ_SFP; - mcp->mb[1] = 0x98; - mcp->mb[2] = 0; - mcp->mb[3] = 0; - mcp->mb[6] = 0; - mcp->mb[7] = 0; - mcp->mb[8] = 1; - mcp->mb[9] = 0x01; - mcp->mb[10] = BIT_13|BIT_0; - mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; - mcp->in_mb = MBX_1|MBX_0; - mcp->tov = MBX_TOV_SECONDS; - mcp->flags = 0; - rval = qla2x00_mailbox_command(vha, mcp); + /* Integer part */ + rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x01, 1, BIT_13|BIT_0); if (rval != QLA_SUCCESS) { DEBUG2_3_11(printk(KERN_WARNING - "%s(%ld): failed=%x (%x).\n", __func__, - vha->host_no, rval, mcp->mb[0])); + "%s(%ld): failed=%x.\n", __func__, vha->host_no, rval)); ha->flags.thermal_supported = 0; goto fail; } - *temp = mcp->mb[1] & 0xFF; + *temp = byte; - /* Low bits. */ - mcp->mb[0] = MBC_READ_SFP; - mcp->mb[1] = 0x98; - mcp->mb[2] = 0; - mcp->mb[3] = 0; - mcp->mb[6] = 0; - mcp->mb[7] = 0; - mcp->mb[8] = 1; - mcp->mb[9] = 0x10; - mcp->mb[10] = BIT_13|BIT_0; - mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; - mcp->in_mb = MBX_1|MBX_0; - mcp->tov = MBX_TOV_SECONDS; - mcp->flags = 0; - rval = qla2x00_mailbox_command(vha, mcp); + /* Fraction part */ + rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x10, 1, BIT_13|BIT_0); if (rval != QLA_SUCCESS) { DEBUG2_3_11(printk(KERN_WARNING - "%s(%ld): failed=%x (%x).\n", __func__, - vha->host_no, rval, mcp->mb[0])); + "%s(%ld): failed=%x.\n", __func__, vha->host_no, rval)); ha->flags.thermal_supported = 0; goto fail; } - *frac = ((mcp->mb[1] & 0xFF) >> 6) * 25; + *frac = (byte >> 6) * 25; - if (rval == QLA_SUCCESS) - DEBUG11(printk(KERN_INFO - "%s(%ld): done.\n", __func__, ha->host_no)); + DEBUG11(printk(KERN_INFO "%s(%ld): done.\n", __func__, vha->host_no)); fail: return rval; } diff --git a/trunk/drivers/scsi/qla2xxx/qla_mid.c b/trunk/drivers/scsi/qla2xxx/qla_mid.c index 2b69392a71a1..5e343919acad 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_mid.c +++ b/trunk/drivers/scsi/qla2xxx/qla_mid.c @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2010 QLogic Corporation + * Copyright (c) 2003-2011 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -136,7 +136,7 @@ qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha) vha->host_no, fcport->loop_id, fcport->vp_idx)); qla2x00_mark_device_lost(vha, fcport, 0, 0); - atomic_set(&fcport->state, FCS_UNCONFIGURED); + qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED); } } @@ -456,7 +456,7 @@ qla24xx_create_vhost(struct fc_vport *fc_vport) else host->max_cmd_len = MAX_CMDSZ; host->max_channel = MAX_BUSES - 1; - host->max_lun = MAX_LUNS; + host->max_lun = ql2xmaxlun; host->unique_id = host->host_no; host->max_id = MAX_TARGETS_2200; host->transportt = qla2xxx_transport_vport_template; diff --git a/trunk/drivers/scsi/qla2xxx/qla_nx.c b/trunk/drivers/scsi/qla2xxx/qla_nx.c index 455fe134d31d..e1138bcc834c 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_nx.c +++ b/trunk/drivers/scsi/qla2xxx/qla_nx.c @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2010 QLogic Corporation + * Copyright (c) 2003-2011 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -844,6 +844,12 @@ qla82xx_rom_lock(struct qla_hw_data *ha) return 0; } +static void +qla82xx_rom_unlock(struct qla_hw_data *ha) +{ + qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); +} + static int qla82xx_wait_rom_busy(struct qla_hw_data *ha) { @@ -924,7 +930,7 @@ qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp) return -1; } ret = qla82xx_do_rom_fast_read(ha, addr, valp); - qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); + qla82xx_rom_unlock(ha); return ret; } @@ -1056,7 +1062,7 @@ qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr, ret = qla82xx_flash_wait_write_finish(ha); done_write: - qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); + qla82xx_rom_unlock(ha); return ret; } @@ -1081,12 +1087,26 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha) /* Halt all the indiviual PEGs and other blocks of the ISP */ qla82xx_rom_lock(ha); - /* mask all niu interrupts */ + /* disable all I2Q */ + qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x10, 0x0); + qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x14, 0x0); + qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x18, 0x0); + qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x1c, 0x0); + qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x20, 0x0); + qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x24, 0x0); + + /* disable all niu interrupts */ qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff); /* disable xge rx/tx */ qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00); /* disable xg1 rx/tx */ qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00); + /* disable sideband mac */ + qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x90000, 0x00); + /* disable ap0 mac */ + qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xa0000, 0x00); + /* disable ap1 mac */ + qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xb0000, 0x00); /* halt sre */ val = qla82xx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000); @@ -1101,6 +1121,7 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha) qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0); qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0); qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0); + qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x200, 0x0); /* halt pegs */ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1); @@ -1108,9 +1129,9 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha) qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1); + msleep(20); /* big hammer */ - msleep(1000); if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) /* don't reset CAM block on reset */ qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff); @@ -1129,7 +1150,7 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha) qla82xx_wr_32(ha, QLA82XX_CRB_QDR_NET + 0xe4, val); msleep(20); - qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); + qla82xx_rom_unlock(ha); /* Read the signature value from the flash. * Offset 0: Contain signature (0xcafecafe) @@ -2395,9 +2416,13 @@ qla82xx_load_fw(scsi_qla_host_t *vha) if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) { qla_printk(KERN_ERR, ha, - "Firmware loaded successfully from flash\n"); + "Firmware loaded successfully from flash\n"); return QLA_SUCCESS; + } else { + qla_printk(KERN_ERR, ha, + "Firmware load from flash failed\n"); } + try_blob_fw: qla_printk(KERN_INFO, ha, "Attempting to load firmware from blob\n"); @@ -2548,11 +2573,11 @@ qla2xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt, dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address; *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); - cmd_pkt->fcp_data_dseg_len = dsd_list_len; + *dsd_seg++ = cpu_to_le32(dsd_list_len); } else { *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); - *cur_dsd++ = dsd_list_len; + *cur_dsd++ = cpu_to_le32(dsd_list_len); } cur_dsd = (uint32_t *)next_dsd; while (avail_dsds) { @@ -2991,7 +3016,7 @@ qla82xx_unprotect_flash(struct qla_hw_data *ha) qla_printk(KERN_WARNING, ha, "Write disable failed\n"); done_unprotect: - qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); + qla82xx_rom_unlock(ha); return ret; } @@ -3020,7 +3045,7 @@ qla82xx_protect_flash(struct qla_hw_data *ha) if (qla82xx_write_disable_flash(ha) != 0) qla_printk(KERN_WARNING, ha, "Write disable failed\n"); done_protect: - qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); + qla82xx_rom_unlock(ha); return ret; } @@ -3048,7 +3073,7 @@ qla82xx_erase_sector(struct qla_hw_data *ha, int addr) } ret = qla82xx_flash_wait_write_finish(ha); done: - qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); + qla82xx_rom_unlock(ha); return ret; } @@ -3228,7 +3253,7 @@ void qla82xx_rom_lock_recovery(struct qla_hw_data *ha) * else died while holding it. * In either case, unlock. */ - qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); + qla82xx_rom_unlock(ha); } /* @@ -3528,15 +3553,18 @@ int qla82xx_device_state_handler(scsi_qla_host_t *vha) { uint32_t dev_state; + uint32_t old_dev_state; int rval = QLA_SUCCESS; unsigned long dev_init_timeout; struct qla_hw_data *ha = vha->hw; + int loopcount = 0; qla82xx_idc_lock(ha); if (!vha->flags.init_done) qla82xx_set_drv_active(vha); dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); + old_dev_state = dev_state; qla_printk(KERN_INFO, ha, "1:Device state is 0x%x = %s\n", dev_state, dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown"); @@ -3553,10 +3581,16 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha) break; } dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); - qla_printk(KERN_INFO, ha, - "2:Device state is 0x%x = %s\n", dev_state, - dev_state < MAX_STATES ? - qdev_state[dev_state] : "Unknown"); + if (old_dev_state != dev_state) { + loopcount = 0; + old_dev_state = dev_state; + } + if (loopcount < 5) { + qla_printk(KERN_INFO, ha, + "2:Device state is 0x%x = %s\n", dev_state, + dev_state < MAX_STATES ? + qdev_state[dev_state] : "Unknown"); + } switch (dev_state) { case QLA82XX_DEV_READY: @@ -3570,6 +3604,7 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha) qla82xx_idc_lock(ha); break; case QLA82XX_DEV_NEED_RESET: + if (!ql2xdontresethba) qla82xx_need_reset_handler(vha); dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ); @@ -3604,6 +3639,7 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha) msleep(1000); qla82xx_idc_lock(ha); } + loopcount++; } exit: qla82xx_idc_unlock(ha); @@ -3621,7 +3657,8 @@ void qla82xx_watchdog(scsi_qla_host_t *vha) if (dev_state == QLA82XX_DEV_NEED_RESET && !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) { qla_printk(KERN_WARNING, ha, - "%s(): Adapter reset needed!\n", __func__); + "scsi(%ld) %s: Adapter reset needed!\n", + vha->host_no, __func__); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT && @@ -3632,10 +3669,27 @@ void qla82xx_watchdog(scsi_qla_host_t *vha) set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); } else { - qla82xx_check_fw_alive(vha); if (qla82xx_check_fw_alive(vha)) { halt_status = qla82xx_rd_32(ha, QLA82XX_PEG_HALT_STATUS1); + qla_printk(KERN_INFO, ha, + "scsi(%ld): %s, Dumping hw/fw registers:\n " + " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n " + " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n " + " PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n " + " PEG_NET_4_PC: 0x%x\n", + vha->host_no, __func__, halt_status, + qla82xx_rd_32(ha, QLA82XX_PEG_HALT_STATUS2), + qla82xx_rd_32(ha, + QLA82XX_CRB_PEG_NET_0 + 0x3c), + qla82xx_rd_32(ha, + QLA82XX_CRB_PEG_NET_1 + 0x3c), + qla82xx_rd_32(ha, + QLA82XX_CRB_PEG_NET_2 + 0x3c), + qla82xx_rd_32(ha, + QLA82XX_CRB_PEG_NET_3 + 0x3c), + qla82xx_rd_32(ha, + QLA82XX_CRB_PEG_NET_4 + 0x3c)); if (halt_status & HALT_STATUS_UNRECOVERABLE) { set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags); @@ -3651,8 +3705,9 @@ void qla82xx_watchdog(scsi_qla_host_t *vha) if (ha->flags.mbox_busy) { ha->flags.mbox_int = 1; DEBUG2(qla_printk(KERN_ERR, ha, - "Due to fw hung, doing premature " - "completion of mbx command\n")); + "scsi(%ld) Due to fw hung, doing " + "premature completion of mbx " + "command\n", vha->host_no)); if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags)) complete(&ha->mbx_intr_comp); diff --git a/trunk/drivers/scsi/qla2xxx/qla_nx.h b/trunk/drivers/scsi/qla2xxx/qla_nx.h index ed5883f1778a..8a21832c6693 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_nx.h +++ b/trunk/drivers/scsi/qla2xxx/qla_nx.h @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2010 QLogic Corporation + * Copyright (c) 2003-2011 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ diff --git a/trunk/drivers/scsi/qla2xxx/qla_os.c b/trunk/drivers/scsi/qla2xxx/qla_os.c index aa7747529165..f461925a9dfc 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_os.c +++ b/trunk/drivers/scsi/qla2xxx/qla_os.c @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2010 QLogic Corporation + * Copyright (c) 2003-2011 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -164,6 +164,20 @@ module_param(ql2xasynctmfenable, int, S_IRUGO); MODULE_PARM_DESC(ql2xasynctmfenable, "Enables issue of TM IOCBs asynchronously via IOCB mechanism" "Default is 0 - Issue TM IOCBs via mailbox mechanism."); + +int ql2xdontresethba; +module_param(ql2xdontresethba, int, S_IRUGO); +MODULE_PARM_DESC(ql2xdontresethba, + "Option to specify reset behaviour\n" + " 0 (Default) -- Reset on failure.\n" + " 1 -- Do not reset on failure.\n"); + +uint ql2xmaxlun = MAX_LUNS; +module_param(ql2xmaxlun, uint, S_IRUGO); +MODULE_PARM_DESC(ql2xmaxlun, + "Defines the maximum LU number to register with the SCSI " + "midlayer. Default is 65535."); + /* * SCSI host template entry points */ @@ -528,7 +542,7 @@ qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport, static int qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) { - scsi_qla_host_t *vha = shost_priv(cmd->device->host); + scsi_qla_host_t *vha = shost_priv(host); fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device)); struct qla_hw_data *ha = vha->hw; @@ -2128,7 +2142,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) else host->max_cmd_len = MAX_CMDSZ; host->max_channel = MAX_BUSES - 1; - host->max_lun = MAX_LUNS; + host->max_lun = ql2xmaxlun; host->transportt = qla2xxx_transport_template; sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC); @@ -2360,21 +2374,26 @@ qla2x00_remove_one(struct pci_dev *pdev) base_vha = pci_get_drvdata(pdev); ha = base_vha->hw; - spin_lock_irqsave(&ha->vport_slock, flags); - list_for_each_entry(vha, &ha->vp_list, list) { - atomic_inc(&vha->vref_count); + mutex_lock(&ha->vport_lock); + while (ha->cur_vport_count) { + struct Scsi_Host *scsi_host; - if (vha->fc_vport) { - spin_unlock_irqrestore(&ha->vport_slock, flags); + spin_lock_irqsave(&ha->vport_slock, flags); - fc_vport_terminate(vha->fc_vport); + BUG_ON(base_vha->list.next == &ha->vp_list); + /* This assumes first entry in ha->vp_list is always base vha */ + vha = list_first_entry(&base_vha->list, scsi_qla_host_t, list); + scsi_host = scsi_host_get(vha->host); - spin_lock_irqsave(&ha->vport_slock, flags); - } + spin_unlock_irqrestore(&ha->vport_slock, flags); + mutex_unlock(&ha->vport_lock); + + fc_vport_terminate(vha->fc_vport); + scsi_host_put(vha->host); - atomic_dec(&vha->vref_count); + mutex_lock(&ha->vport_lock); } - spin_unlock_irqrestore(&ha->vport_slock, flags); + mutex_unlock(&ha->vport_lock); set_bit(UNLOADING, &base_vha->dpc_flags); @@ -2544,7 +2563,7 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport, { if (atomic_read(&fcport->state) == FCS_ONLINE && vha->vp_idx == fcport->vp_idx) { - atomic_set(&fcport->state, FCS_DEVICE_LOST); + qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); qla2x00_schedule_rport_del(vha, fcport, defer); } /* @@ -2552,7 +2571,7 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport, * port but do the retries. */ if (atomic_read(&fcport->state) != FCS_DEVICE_DEAD) - atomic_set(&fcport->state, FCS_DEVICE_LOST); + qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); if (!do_login) return; @@ -2607,7 +2626,7 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer) if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD) continue; if (atomic_read(&fcport->state) == FCS_ONLINE) { - atomic_set(&fcport->state, FCS_DEVICE_LOST); + qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); if (defer) qla2x00_schedule_rport_del(vha, fcport, defer); else if (vha->vp_idx == fcport->vp_idx) @@ -3214,6 +3233,17 @@ void qla2x00_relogin(struct scsi_qla_host *vha) fcport->d_id.b.area, fcport->d_id.b.al_pa); + if (fcport->loop_id == FC_NO_LOOP_ID) { + fcport->loop_id = next_loopid = + ha->min_external_loopid; + status = qla2x00_find_new_loop_id( + vha, fcport); + if (status != QLA_SUCCESS) { + /* Ran out of IDs to use */ + break; + } + } + if (IS_ALOGIO_CAPABLE(ha)) { fcport->flags |= FCF_ASYNC_SENT; data[0] = 0; @@ -3604,7 +3634,8 @@ qla2x00_timer(scsi_qla_host_t *vha) if (!pci_channel_offline(ha->pdev)) pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); - if (IS_QLA82XX(ha)) { + /* Make sure qla82xx_watchdog is run only for physical port */ + if (!vha->vp_idx && IS_QLA82XX(ha)) { if (test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) start_dpc++; qla82xx_watchdog(vha); @@ -3612,7 +3643,8 @@ qla2x00_timer(scsi_qla_host_t *vha) /* Loop down handler. */ if (atomic_read(&vha->loop_down_timer) > 0 && - !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) + !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) && + !(test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags)) && vha->flags.online) { if (atomic_read(&vha->loop_down_timer) == @@ -3648,7 +3680,11 @@ qla2x00_timer(scsi_qla_host_t *vha) if (!(sfcp->flags & FCF_FCP2_DEVICE)) continue; - set_bit(ISP_ABORT_NEEDED, + if (IS_QLA82XX(ha)) + set_bit(FCOE_CTX_RESET_NEEDED, + &vha->dpc_flags); + else + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); break; } @@ -3667,7 +3703,12 @@ qla2x00_timer(scsi_qla_host_t *vha) qla_printk(KERN_WARNING, ha, "Loop down - aborting ISP.\n"); - set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + if (IS_QLA82XX(ha)) + set_bit(FCOE_CTX_RESET_NEEDED, + &vha->dpc_flags); + else + set_bit(ISP_ABORT_NEEDED, + &vha->dpc_flags); } } DEBUG3(printk("scsi(%ld): Loop Down - seconds remaining %d\n", @@ -3675,8 +3716,8 @@ qla2x00_timer(scsi_qla_host_t *vha) atomic_read(&vha->loop_down_timer))); } - /* Check if beacon LED needs to be blinked */ - if (ha->beacon_blink_led == 1) { + /* Check if beacon LED needs to be blinked for physical host only */ + if (!vha->vp_idx && (ha->beacon_blink_led == 1)) { set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags); start_dpc++; } diff --git a/trunk/drivers/scsi/qla2xxx/qla_settings.h b/trunk/drivers/scsi/qla2xxx/qla_settings.h index f0b2b9986a55..d70f03008981 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_settings.h +++ b/trunk/drivers/scsi/qla2xxx/qla_settings.h @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2010 QLogic Corporation + * Copyright (c) 2003-2011 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ diff --git a/trunk/drivers/scsi/qla2xxx/qla_sup.c b/trunk/drivers/scsi/qla2xxx/qla_sup.c index 22070621206c..693647661ed1 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_sup.c +++ b/trunk/drivers/scsi/qla2xxx/qla_sup.c @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2010 QLogic Corporation + * Copyright (c) 2003-2011 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ diff --git a/trunk/drivers/scsi/qla2xxx/qla_version.h b/trunk/drivers/scsi/qla2xxx/qla_version.h index 3a260c3f055a..062c97bf62f5 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_version.h +++ b/trunk/drivers/scsi/qla2xxx/qla_version.h @@ -1,15 +1,15 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2010 QLogic Corporation + * Copyright (c) 2003-2011 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ /* * Driver version */ -#define QLA2XXX_VERSION "8.03.07.00" +#define QLA2XXX_VERSION "8.03.07.03-k" #define QLA_DRIVER_MAJOR_VER 8 #define QLA_DRIVER_MINOR_VER 3 #define QLA_DRIVER_PATCH_VER 7 -#define QLA_DRIVER_BETA_VER 0 +#define QLA_DRIVER_BETA_VER 3 diff --git a/trunk/drivers/scsi/qla4xxx/ql4_os.c b/trunk/drivers/scsi/qla4xxx/ql4_os.c index 230ba097d28c..c22f2a764d9d 100644 --- a/trunk/drivers/scsi/qla4xxx/ql4_os.c +++ b/trunk/drivers/scsi/qla4xxx/ql4_os.c @@ -2068,15 +2068,14 @@ static int qla4xxx_eh_abort(struct scsi_cmnd *cmd) struct scsi_qla_host *ha = to_qla_host(cmd->device->host); unsigned int id = cmd->device->id; unsigned int lun = cmd->device->lun; - unsigned long serial = cmd->serial_number; unsigned long flags; struct srb *srb = NULL; int ret = SUCCESS; int wait = 0; ql4_printk(KERN_INFO, ha, - "scsi%ld:%d:%d: Abort command issued cmd=%p, pid=%ld\n", - ha->host_no, id, lun, cmd, serial); + "scsi%ld:%d:%d: Abort command issued cmd=%p\n", + ha->host_no, id, lun, cmd); spin_lock_irqsave(&ha->hardware_lock, flags); srb = (struct srb *) CMD_SP(cmd); diff --git a/trunk/drivers/scsi/qlogicpti.c b/trunk/drivers/scsi/qlogicpti.c index e2d45c91b8e8..9689d41c7888 100644 --- a/trunk/drivers/scsi/qlogicpti.c +++ b/trunk/drivers/scsi/qlogicpti.c @@ -1292,8 +1292,10 @@ static struct scsi_host_template qpti_template = { .use_clustering = ENABLE_CLUSTERING, }; +static const struct of_device_id qpti_match[]; static int __devinit qpti_sbus_probe(struct platform_device *op) { + const struct of_device_id *match; struct scsi_host_template *tpnt; struct device_node *dp = op->dev.of_node; struct Scsi_Host *host; @@ -1301,9 +1303,10 @@ static int __devinit qpti_sbus_probe(struct platform_device *op) static int nqptis; const char *fcode; - if (!op->dev.of_match) + match = of_match_device(qpti_match, &op->dev); + if (!match) return -EINVAL; - tpnt = op->dev.of_match->data; + tpnt = match->data; /* Sometimes Antares cards come up not completely * setup, and we get a report of a zero IRQ. diff --git a/trunk/drivers/scsi/scsi_error.c b/trunk/drivers/scsi/scsi_error.c index 633c2395a92a..abea2cf05c2e 100644 --- a/trunk/drivers/scsi/scsi_error.c +++ b/trunk/drivers/scsi/scsi_error.c @@ -321,6 +321,12 @@ static int scsi_check_sense(struct scsi_cmnd *scmd) "changed. The Linux SCSI layer does not " "automatically adjust these parameters.\n"); + if (sshdr.asc == 0x38 && sshdr.ascq == 0x07) + scmd_printk(KERN_WARNING, scmd, + "Warning! Received an indication that the " + "LUN reached a thin provisioning soft " + "threshold.\n"); + /* * Pass the UA upwards for a determination in the completion * functions. diff --git a/trunk/drivers/scsi/scsi_lib.c b/trunk/drivers/scsi/scsi_lib.c index 6d5c7ff43f5b..ec1803a48723 100644 --- a/trunk/drivers/scsi/scsi_lib.c +++ b/trunk/drivers/scsi/scsi_lib.c @@ -74,8 +74,6 @@ struct kmem_cache *scsi_sdb_cache; */ #define SCSI_QUEUE_DELAY 3 -static void scsi_run_queue(struct request_queue *q); - /* * Function: scsi_unprep_request() * @@ -161,7 +159,7 @@ static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy) blk_requeue_request(q, cmd->request); spin_unlock_irqrestore(q->queue_lock, flags); - scsi_run_queue(q); + kblockd_schedule_work(q, &device->requeue_work); return 0; } @@ -400,10 +398,15 @@ static inline int scsi_host_is_busy(struct Scsi_Host *shost) static void scsi_run_queue(struct request_queue *q) { struct scsi_device *sdev = q->queuedata; - struct Scsi_Host *shost = sdev->host; + struct Scsi_Host *shost; LIST_HEAD(starved_list); unsigned long flags; + /* if the device is dead, sdev will be NULL, so no queue to run */ + if (!sdev) + return; + + shost = sdev->host; if (scsi_target(sdev)->single_lun) scsi_single_lun_run(sdev); @@ -411,8 +414,6 @@ static void scsi_run_queue(struct request_queue *q) list_splice_init(&shost->starved_list, &starved_list); while (!list_empty(&starved_list)) { - int flagset; - /* * As long as shost is accepting commands and we have * starved queues, call blk_run_queue. scsi_request_fn @@ -436,18 +437,9 @@ static void scsi_run_queue(struct request_queue *q) } spin_unlock(shost->host_lock); - spin_lock(sdev->request_queue->queue_lock); - flagset = test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) && - !test_bit(QUEUE_FLAG_REENTER, - &sdev->request_queue->queue_flags); - if (flagset) - queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue); - __blk_run_queue(sdev->request_queue, false); - if (flagset) - queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue); + __blk_run_queue(sdev->request_queue); spin_unlock(sdev->request_queue->queue_lock); - spin_lock(shost->host_lock); } /* put any unprocessed entries back */ @@ -457,6 +449,16 @@ static void scsi_run_queue(struct request_queue *q) blk_run_queue(q); } +void scsi_requeue_run_queue(struct work_struct *work) +{ + struct scsi_device *sdev; + struct request_queue *q; + + sdev = container_of(work, struct scsi_device, requeue_work); + q = sdev->request_queue; + scsi_run_queue(q); +} + /* * Function: scsi_requeue_command() * diff --git a/trunk/drivers/scsi/scsi_proc.c b/trunk/drivers/scsi/scsi_proc.c index c99da926fdac..f46855cd853d 100644 --- a/trunk/drivers/scsi/scsi_proc.c +++ b/trunk/drivers/scsi/scsi_proc.c @@ -386,13 +386,59 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf, * @s: output goes here * @p: not used */ -static int proc_scsi_show(struct seq_file *s, void *p) +static int always_match(struct device *dev, void *data) { - seq_printf(s, "Attached devices:\n"); - bus_for_each_dev(&scsi_bus_type, NULL, s, proc_print_scsidevice); - return 0; + return 1; +} + +static inline struct device *next_scsi_device(struct device *start) +{ + struct device *next = bus_find_device(&scsi_bus_type, start, NULL, + always_match); + put_device(start); + return next; } +static void *scsi_seq_start(struct seq_file *sfile, loff_t *pos) +{ + struct device *dev = NULL; + loff_t n = *pos; + + while ((dev = next_scsi_device(dev))) { + if (!n--) + break; + sfile->private++; + } + return dev; +} + +static void *scsi_seq_next(struct seq_file *sfile, void *v, loff_t *pos) +{ + (*pos)++; + sfile->private++; + return next_scsi_device(v); +} + +static void scsi_seq_stop(struct seq_file *sfile, void *v) +{ + put_device(v); +} + +static int scsi_seq_show(struct seq_file *sfile, void *dev) +{ + if (!sfile->private) + seq_puts(sfile, "Attached devices:\n"); + + return proc_print_scsidevice(dev, sfile); +} + +static const struct seq_operations scsi_seq_ops = { + .start = scsi_seq_start, + .next = scsi_seq_next, + .stop = scsi_seq_stop, + .show = scsi_seq_show +}; + /** * proc_scsi_open - glue function * @inode: not used @@ -406,7 +452,7 @@ static int proc_scsi_open(struct inode *inode, struct file *file) * We don't really need this for the write case but it doesn't * harm either. */ - return single_open(file, proc_scsi_show, NULL); + return seq_open(file, &scsi_seq_ops); } static const struct file_operations proc_scsi_operations = { @@ -415,7 +461,7 @@ static const struct file_operations proc_scsi_operations = { .read = seq_read, .write = proc_scsi_write, .llseek = seq_lseek, - .release = single_release, + .release = seq_release, }; /** diff --git a/trunk/drivers/scsi/scsi_scan.c b/trunk/drivers/scsi/scsi_scan.c index 087821fac8fe..58584dc0724a 100644 --- a/trunk/drivers/scsi/scsi_scan.c +++ b/trunk/drivers/scsi/scsi_scan.c @@ -242,6 +242,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, int display_failure_msg = 1, ret; struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); extern void scsi_evt_thread(struct work_struct *work); + extern void scsi_requeue_run_queue(struct work_struct *work); sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size, GFP_ATOMIC); @@ -264,6 +265,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, INIT_LIST_HEAD(&sdev->event_list); spin_lock_init(&sdev->list_lock); INIT_WORK(&sdev->event_work, scsi_evt_thread); + INIT_WORK(&sdev->requeue_work, scsi_requeue_run_queue); sdev->sdev_gendev.parent = get_device(&starget->dev); sdev->sdev_target = starget; diff --git a/trunk/drivers/scsi/scsi_sysfs.c b/trunk/drivers/scsi/scsi_sysfs.c index e44ff64233fd..e63912510fb9 100644 --- a/trunk/drivers/scsi/scsi_sysfs.c +++ b/trunk/drivers/scsi/scsi_sysfs.c @@ -322,14 +322,8 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work) kfree(evt); } - if (sdev->request_queue) { - sdev->request_queue->queuedata = NULL; - /* user context needed to free queue */ - scsi_free_queue(sdev->request_queue); - /* temporary expedient, try to catch use of queue lock - * after free of sdev */ - sdev->request_queue = NULL; - } + /* NULL queue means the device can't be used */ + sdev->request_queue = NULL; scsi_target_reap(scsi_target(sdev)); @@ -937,6 +931,12 @@ void __scsi_remove_device(struct scsi_device *sdev) if (sdev->host->hostt->slave_destroy) sdev->host->hostt->slave_destroy(sdev); transport_destroy_device(dev); + + /* cause the request function to reject all I/O requests */ + sdev->request_queue->queuedata = NULL; + + /* Freeing the queue signals to block that we're done */ + scsi_free_queue(sdev->request_queue); put_device(dev); } diff --git a/trunk/drivers/scsi/scsi_tgt_lib.c b/trunk/drivers/scsi/scsi_tgt_lib.c index 8bca8c25ba69..84a1fdf67864 100644 --- a/trunk/drivers/scsi/scsi_tgt_lib.c +++ b/trunk/drivers/scsi/scsi_tgt_lib.c @@ -275,10 +275,8 @@ void scsi_tgt_free_queue(struct Scsi_Host *shost) for (i = 0; i < ARRAY_SIZE(qdata->cmd_hash); i++) { list_for_each_entry_safe(tcmd, n, &qdata->cmd_hash[i], - hash_list) { - list_del(&tcmd->hash_list); - list_add(&tcmd->hash_list, &cmds); - } + hash_list) + list_move(&tcmd->hash_list, &cmds); } spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags); diff --git a/trunk/drivers/scsi/scsi_transport_fc.c b/trunk/drivers/scsi/scsi_transport_fc.c index fdf3fa639056..1b214910b714 100644 --- a/trunk/drivers/scsi/scsi_transport_fc.c +++ b/trunk/drivers/scsi/scsi_transport_fc.c @@ -422,8 +422,7 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev, snprintf(fc_host->work_q_name, sizeof(fc_host->work_q_name), "fc_wq_%d", shost->host_no); - fc_host->work_q = create_singlethread_workqueue( - fc_host->work_q_name); + fc_host->work_q = alloc_workqueue(fc_host->work_q_name, 0, 0); if (!fc_host->work_q) return -ENOMEM; @@ -431,8 +430,8 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev, snprintf(fc_host->devloss_work_q_name, sizeof(fc_host->devloss_work_q_name), "fc_dl_%d", shost->host_no); - fc_host->devloss_work_q = create_singlethread_workqueue( - fc_host->devloss_work_q_name); + fc_host->devloss_work_q = + alloc_workqueue(fc_host->devloss_work_q_name, 0, 0); if (!fc_host->devloss_work_q) { destroy_workqueue(fc_host->work_q); fc_host->work_q = NULL; @@ -2489,6 +2488,8 @@ fc_rport_final_delete(struct work_struct *work) unsigned long flags; int do_callback = 0; + fc_terminate_rport_io(rport); + /* * if a scan is pending, flush the SCSI Host work_q so that * that we can reclaim the rport scan work element. @@ -2496,8 +2497,6 @@ fc_rport_final_delete(struct work_struct *work) if (rport->flags & FC_RPORT_SCAN_PENDING) scsi_flush_work(shost); - fc_terminate_rport_io(rport); - /* * Cancel any outstanding timers. These should really exist * only when rmmod'ing the LLDD and we're asking for @@ -3816,28 +3815,17 @@ fc_bsg_host_dispatch(struct request_queue *q, struct Scsi_Host *shost, static void fc_bsg_goose_queue(struct fc_rport *rport) { - int flagset; - unsigned long flags; - if (!rport->rqst_q) return; + /* + * This get/put dance makes no sense + */ get_device(&rport->dev); - - spin_lock_irqsave(rport->rqst_q->queue_lock, flags); - flagset = test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags) && - !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags); - if (flagset) - queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q); - __blk_run_queue(rport->rqst_q, false); - if (flagset) - queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q); - spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags); - + blk_run_queue_async(rport->rqst_q); put_device(&rport->dev); } - /** * fc_bsg_rport_dispatch - process rport bsg requests and dispatch to LLDD * @q: rport request queue diff --git a/trunk/drivers/scsi/tmscsim.c b/trunk/drivers/scsi/tmscsim.c index a124a28f2ccb..a1baccce05f0 100644 --- a/trunk/drivers/scsi/tmscsim.c +++ b/trunk/drivers/scsi/tmscsim.c @@ -565,12 +565,12 @@ dc390_StartSCSI( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_sr pDCB->TagMask |= 1 << tag[1]; pSRB->TagNumber = tag[1]; DC390_write8(ScsiFifo, tag[1]); - DEBUG1(printk(KERN_INFO "DC390: Select w/DisCn for Cmd %li (SRB %p), block tag %02x\n", scmd->serial_number, pSRB, tag[1])); + DEBUG1(printk(KERN_INFO "DC390: Select w/DisCn for SRB %p, block tag %02x\n", pSRB, tag[1])); cmd = SEL_W_ATN3; } else { /* No TagQ */ //no_tag: - DEBUG1(printk(KERN_INFO "DC390: Select w%s/DisCn for Cmd %li (SRB %p), No TagQ\n", disc_allowed ? "" : "o", scmd->serial_number, pSRB)); + DEBUG1(printk(KERN_INFO "DC390: Select w%s/DisCn for SRB %p, No TagQ\n", disc_allowed ? "" : "o", pSRB)); } pSRB->SRBState = SRB_START_; @@ -620,8 +620,8 @@ dc390_StartSCSI( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_sr if (DC390_read8 (Scsi_Status) & INTERRUPT) { dc390_freetag (pDCB, pSRB); - DEBUG0(printk ("DC390: Interrupt during Start SCSI (pid %li, target %02i-%02i)\n", - scmd->serial_number, scmd->device->id, scmd->device->lun)); + DEBUG0(printk ("DC390: Interrupt during Start SCSI (target %02i-%02i)\n", + scmd->device->id, scmd->device->lun)); pSRB->SRBState = SRB_READY; //DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD); pACB->SelLost++; @@ -1705,8 +1705,7 @@ dc390_SRBdone( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb* status = pSRB->TargetStatus; - DEBUG0(printk (" SRBdone (%02x,%08x), SRB %p, pid %li\n", status, pcmd->result,\ - pSRB, pcmd->serial_number)); + DEBUG0(printk (" SRBdone (%02x,%08x), SRB %p\n", status, pcmd->result, pSRB)); if(pSRB->SRBFlag & AUTO_REQSENSE) { /* Last command was a Request Sense */ pSRB->SRBFlag &= ~AUTO_REQSENSE; @@ -1727,7 +1726,7 @@ dc390_SRBdone( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb* } else { SET_RES_DRV(pcmd->result, DRIVER_SENSE); //pSRB->ScsiCmdLen = (u8) (pSRB->Segment1[0] >> 8); - DEBUG0 (printk ("DC390: RETRY pid %li (%02x), target %02i-%02i\n", pcmd->serial_number, pcmd->cmnd[0], pcmd->device->id, pcmd->device->lun)); + DEBUG0 (printk ("DC390: RETRY (%02x), target %02i-%02i\n", pcmd->cmnd[0], pcmd->device->id, pcmd->device->lun)); pSRB->TotalXferredLen = 0; SET_RES_DID(pcmd->result, DID_SOFT_ERROR); } @@ -1747,7 +1746,7 @@ dc390_SRBdone( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb* else if (status == SAM_STAT_TASK_SET_FULL) { scsi_track_queue_full(pcmd->device, pDCB->GoingSRBCnt - 1); - DEBUG0 (printk ("DC390: RETRY pid %li (%02x), target %02i-%02i\n", pcmd->serial_number, pcmd->cmnd[0], pcmd->device->id, pcmd->device->lun)); + DEBUG0 (printk ("DC390: RETRY (%02x), target %02i-%02i\n", pcmd->cmnd[0], pcmd->device->id, pcmd->device->lun)); pSRB->TotalXferredLen = 0; SET_RES_DID(pcmd->result, DID_SOFT_ERROR); } @@ -1801,7 +1800,7 @@ dc390_SRBdone( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb* /* Add to free list */ dc390_Free_insert (pACB, pSRB); - DEBUG0(printk (KERN_DEBUG "DC390: SRBdone: done pid %li\n", pcmd->serial_number)); + DEBUG0(printk (KERN_DEBUG "DC390: SRBdone: done\n")); pcmd->scsi_done (pcmd); return; @@ -1997,8 +1996,7 @@ static int DC390_abort(struct scsi_cmnd *cmd) struct dc390_acb *pACB = (struct dc390_acb*) cmd->device->host->hostdata; struct dc390_dcb *pDCB = (struct dc390_dcb*) cmd->device->hostdata; - scmd_printk(KERN_WARNING, cmd, - "DC390: Abort command (pid %li)\n", cmd->serial_number); + scmd_printk(KERN_WARNING, cmd, "DC390: Abort command\n"); /* abort() is too stupid for already sent commands at the moment. * If it's called we are in trouble anyway, so let's dump some info @@ -2006,7 +2004,7 @@ static int DC390_abort(struct scsi_cmnd *cmd) dc390_dumpinfo(pACB, pDCB, NULL); pDCB->DCBFlag |= ABORT_DEV_; - printk(KERN_INFO "DC390: Aborted pid %li\n", cmd->serial_number); + printk(KERN_INFO "DC390: Aborted.\n"); return FAILED; } diff --git a/trunk/drivers/scsi/u14-34f.c b/trunk/drivers/scsi/u14-34f.c index edfc5da8be4c..90e104d6b558 100644 --- a/trunk/drivers/scsi/u14-34f.c +++ b/trunk/drivers/scsi/u14-34f.c @@ -1256,8 +1256,8 @@ static int u14_34f_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct j = ((struct hostdata *) SCpnt->device->host->hostdata)->board_number; if (SCpnt->host_scribble) - panic("%s: qcomm, pid %ld, SCpnt %p already active.\n", - BN(j), SCpnt->serial_number, SCpnt); + panic("%s: qcomm, SCpnt %p already active.\n", + BN(j), SCpnt); /* i is the mailbox number, look for the first free mailbox starting from last_cp_used */ @@ -1286,9 +1286,9 @@ static int u14_34f_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct cpp->cpp_index = i; SCpnt->host_scribble = (unsigned char *) &cpp->cpp_index; - if (do_trace) printk("%s: qcomm, mbox %d, target %d.%d:%d, pid %ld.\n", + if (do_trace) printk("%s: qcomm, mbox %d, target %d.%d:%d.\n", BN(j), i, SCpnt->device->channel, SCpnt->device->id, - SCpnt->device->lun, SCpnt->serial_number); + SCpnt->device->lun); cpp->opcode = OP_SCSI; cpp->channel = SCpnt->device->channel; @@ -1315,7 +1315,7 @@ static int u14_34f_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct unmap_dma(i, j); SCpnt->host_scribble = NULL; scmd_printk(KERN_INFO, SCpnt, - "qcomm, pid %ld, adapter busy.\n", SCpnt->serial_number); + "qcomm, adapter busy.\n"); return 1; } @@ -1337,14 +1337,12 @@ static int u14_34f_eh_abort(struct scsi_cmnd *SCarg) { j = ((struct hostdata *) SCarg->device->host->hostdata)->board_number; if (SCarg->host_scribble == NULL) { - scmd_printk(KERN_INFO, SCarg, "abort, pid %ld inactive.\n", - SCarg->serial_number); + scmd_printk(KERN_INFO, SCarg, "abort, command inactive.\n"); return SUCCESS; } i = *(unsigned int *)SCarg->host_scribble; - scmd_printk(KERN_INFO, SCarg, "abort, mbox %d, pid %ld.\n", - i, SCarg->serial_number); + scmd_printk(KERN_INFO, SCarg, "abort, mbox %d.\n", i); if (i >= sh[j]->can_queue) panic("%s: abort, invalid SCarg->host_scribble.\n", BN(j)); @@ -1387,8 +1385,7 @@ static int u14_34f_eh_abort(struct scsi_cmnd *SCarg) { SCarg->result = DID_ABORT << 16; SCarg->host_scribble = NULL; HD(j)->cp_stat[i] = FREE; - printk("%s, abort, mbox %d ready, DID_ABORT, pid %ld done.\n", - BN(j), i, SCarg->serial_number); + printk("%s, abort, mbox %d ready, DID_ABORT, done.\n", BN(j), i); SCarg->scsi_done(SCarg); return SUCCESS; } @@ -1403,12 +1400,12 @@ static int u14_34f_eh_host_reset(struct scsi_cmnd *SCarg) { struct scsi_cmnd *SCpnt; j = ((struct hostdata *) SCarg->device->host->hostdata)->board_number; - scmd_printk(KERN_INFO, SCarg, "reset, enter, pid %ld.\n", SCarg->serial_number); + scmd_printk(KERN_INFO, SCarg, "reset, enter.\n"); spin_lock_irq(sh[j]->host_lock); if (SCarg->host_scribble == NULL) - printk("%s: reset, pid %ld inactive.\n", BN(j), SCarg->serial_number); + printk("%s: reset, inactive.\n", BN(j)); if (HD(j)->in_reset) { printk("%s: reset, exit, already in reset.\n", BN(j)); @@ -1445,14 +1442,12 @@ static int u14_34f_eh_host_reset(struct scsi_cmnd *SCarg) { if (HD(j)->cp_stat[i] == READY || HD(j)->cp_stat[i] == ABORTING) { HD(j)->cp_stat[i] = ABORTING; - printk("%s: reset, mbox %d aborting, pid %ld.\n", - BN(j), i, SCpnt->serial_number); + printk("%s: reset, mbox %d aborting.\n", BN(j), i); } else { HD(j)->cp_stat[i] = IN_RESET; - printk("%s: reset, mbox %d in reset, pid %ld.\n", - BN(j), i, SCpnt->serial_number); + printk("%s: reset, mbox %d in reset.\n", BN(j), i); } if (SCpnt->host_scribble == NULL) @@ -1500,8 +1495,7 @@ static int u14_34f_eh_host_reset(struct scsi_cmnd *SCarg) { /* This mailbox is still waiting for its interrupt */ HD(j)->cp_stat[i] = LOCKED; - printk("%s, reset, mbox %d locked, DID_RESET, pid %ld done.\n", - BN(j), i, SCpnt->serial_number); + printk("%s, reset, mbox %d locked, DID_RESET, done.\n", BN(j), i); } else if (HD(j)->cp_stat[i] == ABORTING) { @@ -1513,8 +1507,7 @@ static int u14_34f_eh_host_reset(struct scsi_cmnd *SCarg) { /* This mailbox was never queued to the adapter */ HD(j)->cp_stat[i] = FREE; - printk("%s, reset, mbox %d aborting, DID_RESET, pid %ld done.\n", - BN(j), i, SCpnt->serial_number); + printk("%s, reset, mbox %d aborting, DID_RESET, done.\n", BN(j), i); } else @@ -1528,7 +1521,7 @@ static int u14_34f_eh_host_reset(struct scsi_cmnd *SCarg) { HD(j)->in_reset = FALSE; do_trace = FALSE; - if (arg_done) printk("%s: reset, exit, pid %ld done.\n", BN(j), SCarg->serial_number); + if (arg_done) printk("%s: reset, exit, done.\n", BN(j)); else printk("%s: reset, exit.\n", BN(j)); spin_unlock_irq(sh[j]->host_lock); @@ -1671,10 +1664,10 @@ static int reorder(unsigned int j, unsigned long cursec, if (link_statistics && (overlap || !(flushcount % link_statistics))) for (n = 0; n < n_ready; n++) { k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt; - printk("%s %d.%d:%d pid %ld mb %d fc %d nr %d sec %ld ns %u"\ + printk("%s %d.%d:%d mb %d fc %d nr %d sec %ld ns %u"\ " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n", (ihdlr ? "ihdlr" : "qcomm"), SCpnt->channel, SCpnt->target, - SCpnt->lun, SCpnt->serial_number, k, flushcount, n_ready, + SCpnt->lun, k, flushcount, n_ready, blk_rq_pos(SCpnt->request), blk_rq_sectors(SCpnt->request), cursec, YESNO(s), YESNO(r), YESNO(rev), YESNO(input_only), YESNO(overlap), cpp->xdir); @@ -1709,9 +1702,9 @@ static void flush_dev(struct scsi_device *dev, unsigned long cursec, unsigned in if (wait_on_busy(sh[j]->io_port, MAXLOOP)) { scmd_printk(KERN_INFO, SCpnt, - "%s, pid %ld, mbox %d, adapter" + "%s, mbox %d, adapter" " busy, will abort.\n", (ihdlr ? "ihdlr" : "qcomm"), - SCpnt->serial_number, k); + k); HD(j)->cp_stat[k] = ABORTING; continue; } @@ -1793,12 +1786,12 @@ static irqreturn_t ihdlr(unsigned int j) if (SCpnt == NULL) panic("%s: ihdlr, mbox %d, SCpnt == NULL.\n", BN(j), i); if (SCpnt->host_scribble == NULL) - panic("%s: ihdlr, mbox %d, pid %ld, SCpnt %p garbled.\n", BN(j), i, - SCpnt->serial_number, SCpnt); + panic("%s: ihdlr, mbox %d, SCpnt %p garbled.\n", BN(j), i, + SCpnt); if (*(unsigned int *)SCpnt->host_scribble != i) - panic("%s: ihdlr, mbox %d, pid %ld, index mismatch %d.\n", - BN(j), i, SCpnt->serial_number, *(unsigned int *)SCpnt->host_scribble); + panic("%s: ihdlr, mbox %d, index mismatch %d.\n", + BN(j), i, *(unsigned int *)SCpnt->host_scribble); sync_dma(i, j); @@ -1841,8 +1834,8 @@ static irqreturn_t ihdlr(unsigned int j) (!(tstatus == CHECK_CONDITION && HD(j)->iocount <= 1000 && (SCpnt->sense_buffer[2] & 0xf) == NOT_READY))) scmd_printk(KERN_INFO, SCpnt, - "ihdlr, pid %ld, target_status 0x%x, sense key 0x%x.\n", - SCpnt->serial_number, spp->target_status, + "ihdlr, target_status 0x%x, sense key 0x%x.\n", + spp->target_status, SCpnt->sense_buffer[2]); HD(j)->target_to[scmd_id(SCpnt)][scmd_channel(SCpnt)] = 0; @@ -1913,8 +1906,8 @@ static irqreturn_t ihdlr(unsigned int j) do_trace || msg_byte(spp->target_status)) #endif scmd_printk(KERN_INFO, SCpnt, "ihdlr, mbox %2d, err 0x%x:%x,"\ - " pid %ld, reg 0x%x, count %d.\n", - i, spp->adapter_status, spp->target_status, SCpnt->serial_number, + " reg 0x%x, count %d.\n", + i, spp->adapter_status, spp->target_status, reg, HD(j)->iocount); unmap_dma(i, j); diff --git a/trunk/drivers/scsi/wd33c93.c b/trunk/drivers/scsi/wd33c93.c index 4468ae3610f7..97ae716134d0 100644 --- a/trunk/drivers/scsi/wd33c93.c +++ b/trunk/drivers/scsi/wd33c93.c @@ -381,7 +381,7 @@ wd33c93_queuecommand_lck(struct scsi_cmnd *cmd, hostdata = (struct WD33C93_hostdata *) cmd->device->host->hostdata; DB(DB_QUEUE_COMMAND, - printk("Q-%d-%02x-%ld( ", cmd->device->id, cmd->cmnd[0], cmd->serial_number)) + printk("Q-%d-%02x( ", cmd->device->id, cmd->cmnd[0])) /* Set up a few fields in the scsi_cmnd structure for our own use: * - host_scribble is the pointer to the next cmd in the input queue @@ -462,7 +462,7 @@ wd33c93_queuecommand_lck(struct scsi_cmnd *cmd, wd33c93_execute(cmd->device->host); - DB(DB_QUEUE_COMMAND, printk(")Q-%ld ", cmd->serial_number)) + DB(DB_QUEUE_COMMAND, printk(")Q ")) spin_unlock_irq(&hostdata->lock); return 0; @@ -687,7 +687,7 @@ wd33c93_execute(struct Scsi_Host *instance) */ DB(DB_EXECUTE, - printk("%s%ld)EX-2 ", (cmd->SCp.phase) ? "d:" : "", cmd->serial_number)) + printk("%s)EX-2 ", (cmd->SCp.phase) ? "d:" : "")) } static void @@ -963,7 +963,7 @@ wd33c93_intr(struct Scsi_Host *instance) case CSR_XFER_DONE | PHS_COMMAND: case CSR_UNEXP | PHS_COMMAND: case CSR_SRV_REQ | PHS_COMMAND: - DB(DB_INTR, printk("CMND-%02x,%ld", cmd->cmnd[0], cmd->serial_number)) + DB(DB_INTR, printk("CMND-%02x", cmd->cmnd[0])) transfer_pio(regs, cmd->cmnd, cmd->cmd_len, DATA_OUT_DIR, hostdata); hostdata->state = S_CONNECTED; @@ -1007,7 +1007,7 @@ wd33c93_intr(struct Scsi_Host *instance) switch (msg) { case COMMAND_COMPLETE: - DB(DB_INTR, printk("CCMP-%ld", cmd->serial_number)) + DB(DB_INTR, printk("CCMP")) write_wd33c93_cmd(regs, WD_CMD_NEGATE_ACK); hostdata->state = S_PRE_CMP_DISC; break; @@ -1174,7 +1174,7 @@ wd33c93_intr(struct Scsi_Host *instance) write_wd33c93(regs, WD_SOURCE_ID, SRCID_ER); if (phs == 0x60) { - DB(DB_INTR, printk("SX-DONE-%ld", cmd->serial_number)) + DB(DB_INTR, printk("SX-DONE")) cmd->SCp.Message = COMMAND_COMPLETE; lun = read_wd33c93(regs, WD_TARGET_LUN); DB(DB_INTR, printk(":%d.%d", cmd->SCp.Status, lun)) @@ -1200,8 +1200,8 @@ wd33c93_intr(struct Scsi_Host *instance) wd33c93_execute(instance); } else { printk - ("%02x:%02x:%02x-%ld: Unknown SEL_XFER_DONE phase!!---", - asr, sr, phs, cmd->serial_number); + ("%02x:%02x:%02x: Unknown SEL_XFER_DONE phase!!---", + asr, sr, phs); spin_unlock_irqrestore(&hostdata->lock, flags); } break; @@ -1266,7 +1266,7 @@ wd33c93_intr(struct Scsi_Host *instance) spin_unlock_irqrestore(&hostdata->lock, flags); return; } - DB(DB_INTR, printk("UNEXP_DISC-%ld", cmd->serial_number)) + DB(DB_INTR, printk("UNEXP_DISC")) hostdata->connected = NULL; hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); hostdata->state = S_UNCONNECTED; @@ -1292,7 +1292,7 @@ wd33c93_intr(struct Scsi_Host *instance) */ write_wd33c93(regs, WD_SOURCE_ID, SRCID_ER); - DB(DB_INTR, printk("DISC-%ld", cmd->serial_number)) + DB(DB_INTR, printk("DISC")) if (cmd == NULL) { printk(" - Already disconnected! "); hostdata->state = S_UNCONNECTED; @@ -1491,7 +1491,6 @@ wd33c93_intr(struct Scsi_Host *instance) } else hostdata->state = S_CONNECTED; - DB(DB_INTR, printk("-%ld", cmd->serial_number)) spin_unlock_irqrestore(&hostdata->lock, flags); break; @@ -1637,8 +1636,8 @@ wd33c93_abort(struct scsi_cmnd * cmd) cmd->host_scribble = NULL; cmd->result = DID_ABORT << 16; printk - ("scsi%d: Abort - removing command %ld from input_Q. ", - instance->host_no, cmd->serial_number); + ("scsi%d: Abort - removing command from input_Q. ", + instance->host_no); enable_irq(cmd->device->host->irq); cmd->scsi_done(cmd); return SUCCESS; @@ -1662,8 +1661,8 @@ wd33c93_abort(struct scsi_cmnd * cmd) uchar sr, asr; unsigned long timeout; - printk("scsi%d: Aborting connected command %ld - ", - instance->host_no, cmd->serial_number); + printk("scsi%d: Aborting connected command - ", + instance->host_no); printk("stopping DMA - "); if (hostdata->dma == D_DMA_RUNNING) { @@ -1729,8 +1728,8 @@ wd33c93_abort(struct scsi_cmnd * cmd) while (tmp) { if (tmp == cmd) { printk - ("scsi%d: Abort - command %ld found on disconnected_Q - ", - instance->host_no, cmd->serial_number); + ("scsi%d: Abort - command found on disconnected_Q - ", + instance->host_no); printk("Abort SNOOZE. "); enable_irq(cmd->device->host->irq); return FAILED; @@ -2180,8 +2179,8 @@ wd33c93_proc_info(struct Scsi_Host *instance, char *buf, char **start, off_t off strcat(bp, "\nconnected: "); if (hd->connected) { cmd = (struct scsi_cmnd *) hd->connected; - sprintf(tbuf, " %ld-%d:%d(%02x)", - cmd->serial_number, cmd->device->id, cmd->device->lun, cmd->cmnd[0]); + sprintf(tbuf, " %d:%d(%02x)", + cmd->device->id, cmd->device->lun, cmd->cmnd[0]); strcat(bp, tbuf); } } @@ -2189,8 +2188,8 @@ wd33c93_proc_info(struct Scsi_Host *instance, char *buf, char **start, off_t off strcat(bp, "\ninput_Q: "); cmd = (struct scsi_cmnd *) hd->input_Q; while (cmd) { - sprintf(tbuf, " %ld-%d:%d(%02x)", - cmd->serial_number, cmd->device->id, cmd->device->lun, cmd->cmnd[0]); + sprintf(tbuf, " %d:%d(%02x)", + cmd->device->id, cmd->device->lun, cmd->cmnd[0]); strcat(bp, tbuf); cmd = (struct scsi_cmnd *) cmd->host_scribble; } @@ -2199,8 +2198,8 @@ wd33c93_proc_info(struct Scsi_Host *instance, char *buf, char **start, off_t off strcat(bp, "\ndisconnected_Q:"); cmd = (struct scsi_cmnd *) hd->disconnected_Q; while (cmd) { - sprintf(tbuf, " %ld-%d:%d(%02x)", - cmd->serial_number, cmd->device->id, cmd->device->lun, cmd->cmnd[0]); + sprintf(tbuf, " %d:%d(%02x)", + cmd->device->id, cmd->device->lun, cmd->cmnd[0]); strcat(bp, tbuf); cmd = (struct scsi_cmnd *) cmd->host_scribble; } diff --git a/trunk/drivers/spi/amba-pl022.c b/trunk/drivers/spi/amba-pl022.c index 5825370bad25..08de58e7f59f 100644 --- a/trunk/drivers/spi/amba-pl022.c +++ b/trunk/drivers/spi/amba-pl022.c @@ -1555,7 +1555,7 @@ static int stop_queue(struct pl022 *pl022) * A wait_queue on the pl022->busy could be used, but then the common * execution path (pump_messages) would be required to call wake_up or * friends on every SPI message. Do this instead */ - while (!list_empty(&pl022->queue) && pl022->busy && limit--) { + while ((!list_empty(&pl022->queue) || pl022->busy) && limit--) { spin_unlock_irqrestore(&pl022->queue_lock, flags); msleep(10); spin_lock_irqsave(&pl022->queue_lock, flags); diff --git a/trunk/drivers/spi/dw_spi.c b/trunk/drivers/spi/dw_spi.c index b1a4b9f503ae..871e337c917f 100644 --- a/trunk/drivers/spi/dw_spi.c +++ b/trunk/drivers/spi/dw_spi.c @@ -821,7 +821,7 @@ static int stop_queue(struct dw_spi *dws) spin_lock_irqsave(&dws->lock, flags); dws->run = QUEUE_STOPPED; - while (!list_empty(&dws->queue) && dws->busy && limit--) { + while ((!list_empty(&dws->queue) || dws->busy) && limit--) { spin_unlock_irqrestore(&dws->lock, flags); msleep(10); spin_lock_irqsave(&dws->lock, flags); diff --git a/trunk/drivers/spi/pxa2xx_spi.c b/trunk/drivers/spi/pxa2xx_spi.c index 9c74aad6be93..dc25bee8d33f 100644 --- a/trunk/drivers/spi/pxa2xx_spi.c +++ b/trunk/drivers/spi/pxa2xx_spi.c @@ -1493,7 +1493,7 @@ static int stop_queue(struct driver_data *drv_data) * execution path (pump_messages) would be required to call wake_up or * friends on every SPI message. Do this instead */ drv_data->run = QUEUE_STOPPED; - while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) { + while ((!list_empty(&drv_data->queue) || drv_data->busy) && limit--) { spin_unlock_irqrestore(&drv_data->lock, flags); msleep(10); spin_lock_irqsave(&drv_data->lock, flags); diff --git a/trunk/drivers/spi/spi_bfin5xx.c b/trunk/drivers/spi/spi_bfin5xx.c index bdb7289a1d22..f706dba165cf 100644 --- a/trunk/drivers/spi/spi_bfin5xx.c +++ b/trunk/drivers/spi/spi_bfin5xx.c @@ -1284,7 +1284,7 @@ static inline int bfin_spi_stop_queue(struct bfin_spi_master_data *drv_data) * friends on every SPI message. Do this instead */ drv_data->running = false; - while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) { + while ((!list_empty(&drv_data->queue) || drv_data->busy) && limit--) { spin_unlock_irqrestore(&drv_data->lock, flags); msleep(10); spin_lock_irqsave(&drv_data->lock, flags); diff --git a/trunk/drivers/ssb/pci.c b/trunk/drivers/ssb/pci.c index 6f34963b3c64..7ad48585c5e6 100644 --- a/trunk/drivers/ssb/pci.c +++ b/trunk/drivers/ssb/pci.c @@ -662,7 +662,6 @@ static int sprom_extract(struct ssb_bus *bus, struct ssb_sprom *out, static int ssb_pci_sprom_get(struct ssb_bus *bus, struct ssb_sprom *sprom) { - const struct ssb_sprom *fallback; int err; u16 *buf; @@ -707,10 +706,17 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus, if (err) { /* All CRC attempts failed. * Maybe there is no SPROM on the device? - * If we have a fallback, use that. */ - fallback = ssb_get_fallback_sprom(); - if (fallback) { - memcpy(sprom, fallback, sizeof(*sprom)); + * Now we ask the arch code if there is some sprom + * available for this device in some other storage */ + err = ssb_fill_sprom_with_fallback(bus, sprom); + if (err) { + ssb_printk(KERN_WARNING PFX "WARNING: Using" + " fallback SPROM failed (err %d)\n", + err); + } else { + ssb_dprintk(KERN_DEBUG PFX "Using SPROM" + " revision %d provided by" + " platform.\n", sprom->revision); err = 0; goto out_free; } diff --git a/trunk/drivers/ssb/sprom.c b/trunk/drivers/ssb/sprom.c index 5f34d7a3e3a5..45ff0e3a3828 100644 --- a/trunk/drivers/ssb/sprom.c +++ b/trunk/drivers/ssb/sprom.c @@ -17,7 +17,7 @@ #include -static const struct ssb_sprom *fallback_sprom; +static int(*get_fallback_sprom)(struct ssb_bus *dev, struct ssb_sprom *out); static int sprom2hex(const u16 *sprom, char *buf, size_t buf_len, @@ -145,36 +145,43 @@ ssize_t ssb_attr_sprom_store(struct ssb_bus *bus, } /** - * ssb_arch_set_fallback_sprom - Set a fallback SPROM for use if no SPROM is found. + * ssb_arch_register_fallback_sprom - Registers a method providing a + * fallback SPROM if no SPROM is found. * - * @sprom: The SPROM data structure to register. + * @sprom_callback: The callback function. * - * With this function the architecture implementation may register a fallback - * SPROM data structure. The fallback is only used for PCI based SSB devices, - * where no valid SPROM can be found in the shadow registers. + * With this function the architecture implementation may register a + * callback handler which fills the SPROM data structure. The fallback is + * only used for PCI based SSB devices, where no valid SPROM can be found + * in the shadow registers. * - * This function is useful for weird architectures that have a half-assed SSB device - * hardwired to their PCI bus. + * This function is useful for weird architectures that have a half-assed + * SSB device hardwired to their PCI bus. * - * Note that it does only work with PCI attached SSB devices. PCMCIA devices currently - * don't use this fallback. - * Architectures must provide the SPROM for native SSB devices anyway, - * so the fallback also isn't used for native devices. + * Note that it does only work with PCI attached SSB devices. PCMCIA + * devices currently don't use this fallback. + * Architectures must provide the SPROM for native SSB devices anyway, so + * the fallback also isn't used for native devices. * - * This function is available for architecture code, only. So it is not exported. + * This function is available for architecture code, only. So it is not + * exported. */ -int ssb_arch_set_fallback_sprom(const struct ssb_sprom *sprom) +int ssb_arch_register_fallback_sprom(int (*sprom_callback)(struct ssb_bus *bus, + struct ssb_sprom *out)) { - if (fallback_sprom) + if (get_fallback_sprom) return -EEXIST; - fallback_sprom = sprom; + get_fallback_sprom = sprom_callback; return 0; } -const struct ssb_sprom *ssb_get_fallback_sprom(void) +int ssb_fill_sprom_with_fallback(struct ssb_bus *bus, struct ssb_sprom *out) { - return fallback_sprom; + if (!get_fallback_sprom) + return -ENOENT; + + return get_fallback_sprom(bus, out); } /* http://bcm-v4.sipsolutions.net/802.11/IsSpromAvailable */ diff --git a/trunk/drivers/ssb/ssb_private.h b/trunk/drivers/ssb/ssb_private.h index 0331139a726f..77653014db0b 100644 --- a/trunk/drivers/ssb/ssb_private.h +++ b/trunk/drivers/ssb/ssb_private.h @@ -171,7 +171,8 @@ ssize_t ssb_attr_sprom_store(struct ssb_bus *bus, const char *buf, size_t count, int (*sprom_check_crc)(const u16 *sprom, size_t size), int (*sprom_write)(struct ssb_bus *bus, const u16 *sprom)); -extern const struct ssb_sprom *ssb_get_fallback_sprom(void); +extern int ssb_fill_sprom_with_fallback(struct ssb_bus *bus, + struct ssb_sprom *out); /* core.c */ diff --git a/trunk/drivers/staging/Kconfig b/trunk/drivers/staging/Kconfig index dca4a0bb6ca9..e3786f161bc3 100644 --- a/trunk/drivers/staging/Kconfig +++ b/trunk/drivers/staging/Kconfig @@ -131,8 +131,6 @@ source "drivers/staging/wlags49_h2/Kconfig" source "drivers/staging/wlags49_h25/Kconfig" -source "drivers/staging/samsung-laptop/Kconfig" - source "drivers/staging/sm7xx/Kconfig" source "drivers/staging/dt3155v4l/Kconfig" diff --git a/trunk/drivers/staging/Makefile b/trunk/drivers/staging/Makefile index eb93012b6f59..f0d5c5315612 100644 --- a/trunk/drivers/staging/Makefile +++ b/trunk/drivers/staging/Makefile @@ -48,7 +48,6 @@ obj-$(CONFIG_XVMALLOC) += zram/ obj-$(CONFIG_ZCACHE) += zcache/ obj-$(CONFIG_WLAGS49_H2) += wlags49_h2/ obj-$(CONFIG_WLAGS49_H25) += wlags49_h25/ -obj-$(CONFIG_SAMSUNG_LAPTOP) += samsung-laptop/ obj-$(CONFIG_FB_SM7XX) += sm7xx/ obj-$(CONFIG_VIDEO_DT3155) += dt3155v4l/ obj-$(CONFIG_CRYSTALHD) += crystalhd/ diff --git a/trunk/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c b/trunk/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c index eeb7dd43f9a8..830822f86e41 100644 --- a/trunk/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c +++ b/trunk/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c @@ -2288,7 +2288,3 @@ struct net_device *init_ft1000_card(struct pcmcia_device *link, free_netdev(dev); return NULL; } - -EXPORT_SYMBOL(init_ft1000_card); -EXPORT_SYMBOL(stop_ft1000_card); -EXPORT_SYMBOL(flarion_ft1000_cnt); diff --git a/trunk/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c b/trunk/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c index 935608e72007..bdfb1aec58df 100644 --- a/trunk/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c +++ b/trunk/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c @@ -214,6 +214,3 @@ void ft1000CleanupProc(struct net_device *dev) remove_proc_entry(FT1000_PROC, init_net.proc_net); unregister_netdevice_notifier(&ft1000_netdev_notifier); } - -EXPORT_SYMBOL(ft1000InitProc); -EXPORT_SYMBOL(ft1000CleanupProc); diff --git a/trunk/drivers/staging/gma500/Kconfig b/trunk/drivers/staging/gma500/Kconfig index 5501eb9b3355..ce8bedaeaac2 100644 --- a/trunk/drivers/staging/gma500/Kconfig +++ b/trunk/drivers/staging/gma500/Kconfig @@ -1,6 +1,6 @@ config DRM_PSB tristate "Intel GMA500 KMS Framebuffer" - depends on DRM && PCI + depends on DRM && PCI && X86 select FB_CFB_COPYAREA select FB_CFB_FILLRECT select FB_CFB_IMAGEBLIT diff --git a/trunk/drivers/staging/intel_sst/intelmid_v1_control.c b/trunk/drivers/staging/intel_sst/intelmid_v1_control.c index 9cc15c1c18d4..1ea814218059 100644 --- a/trunk/drivers/staging/intel_sst/intelmid_v1_control.c +++ b/trunk/drivers/staging/intel_sst/intelmid_v1_control.c @@ -28,6 +28,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include +#include #include #include #include diff --git a/trunk/drivers/staging/intel_sst/intelmid_v2_control.c b/trunk/drivers/staging/intel_sst/intelmid_v2_control.c index 26d815a67eb8..3c6b3abff3c3 100644 --- a/trunk/drivers/staging/intel_sst/intelmid_v2_control.c +++ b/trunk/drivers/staging/intel_sst/intelmid_v2_control.c @@ -29,6 +29,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include +#include #include #include "intel_sst.h" #include "intelmid_snd_control.h" diff --git a/trunk/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c b/trunk/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c index b5d21f6497f9..22c04eabed41 100644 --- a/trunk/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c +++ b/trunk/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c @@ -12,6 +12,7 @@ */ #include #include +#include #include #include "olpc_dcon.h" diff --git a/trunk/drivers/staging/pohmelfs/inode.c b/trunk/drivers/staging/pohmelfs/inode.c index c93ef207b0b4..c0f0ac7c1cdb 100644 --- a/trunk/drivers/staging/pohmelfs/inode.c +++ b/trunk/drivers/staging/pohmelfs/inode.c @@ -29,6 +29,7 @@ #include #include #include +#include #include "netfs.h" diff --git a/trunk/drivers/staging/rt2860/common/cmm_data_pci.c b/trunk/drivers/staging/rt2860/common/cmm_data_pci.c index bef0bbd8cef7..f01a51c381f1 100644 --- a/trunk/drivers/staging/rt2860/common/cmm_data_pci.c +++ b/trunk/drivers/staging/rt2860/common/cmm_data_pci.c @@ -444,7 +444,7 @@ int RTMPCheckRxError(struct rt_rtmp_adapter *pAd, return (NDIS_STATUS_FAILURE); } } - /* Drop not U2M frames, can't's drop here because we will drop beacon in this case */ + /* Drop not U2M frames, can't drop here because we will drop beacon in this case */ /* I am kind of doubting the U2M bit operation */ /* if (pRxD->U2M == 0) */ /* return(NDIS_STATUS_FAILURE); */ diff --git a/trunk/drivers/staging/rt2860/common/cmm_data_usb.c b/trunk/drivers/staging/rt2860/common/cmm_data_usb.c index 5637857ae9eb..83a62faa7e57 100644 --- a/trunk/drivers/staging/rt2860/common/cmm_data_usb.c +++ b/trunk/drivers/staging/rt2860/common/cmm_data_usb.c @@ -860,7 +860,7 @@ int RTMPCheckRxError(struct rt_rtmp_adapter *pAd, DBGPRINT_RAW(RT_DEBUG_ERROR, ("received packet too long\n")); return NDIS_STATUS_FAILURE; } - /* Drop not U2M frames, can't's drop here because we will drop beacon in this case */ + /* Drop not U2M frames, can't drop here because we will drop beacon in this case */ /* I am kind of doubting the U2M bit operation */ /* if (pRxD->U2M == 0) */ /* return(NDIS_STATUS_FAILURE); */ diff --git a/trunk/drivers/staging/rts_pstor/debug.h b/trunk/drivers/staging/rts_pstor/debug.h index e1408b0e7ae4..ab305be96fb5 100644 --- a/trunk/drivers/staging/rts_pstor/debug.h +++ b/trunk/drivers/staging/rts_pstor/debug.h @@ -28,7 +28,7 @@ #define RTSX_STOR "rts_pstor: " -#if CONFIG_RTS_PSTOR_DEBUG +#ifdef CONFIG_RTS_PSTOR_DEBUG #define RTSX_DEBUGP(x...) printk(KERN_DEBUG RTSX_STOR x) #define RTSX_DEBUGPN(x...) printk(KERN_DEBUG x) #define RTSX_DEBUGPX(x...) printk(x) diff --git a/trunk/drivers/staging/rts_pstor/ms.c b/trunk/drivers/staging/rts_pstor/ms.c index 810e170894f5..d89795c6a3ac 100644 --- a/trunk/drivers/staging/rts_pstor/ms.c +++ b/trunk/drivers/staging/rts_pstor/ms.c @@ -23,6 +23,7 @@ #include #include #include +#include #include "rtsx.h" #include "rtsx_transport.h" diff --git a/trunk/drivers/staging/rts_pstor/rtsx_chip.c b/trunk/drivers/staging/rts_pstor/rtsx_chip.c index d2f1c715a684..4e60780ea804 100644 --- a/trunk/drivers/staging/rts_pstor/rtsx_chip.c +++ b/trunk/drivers/staging/rts_pstor/rtsx_chip.c @@ -24,6 +24,7 @@ #include #include #include +#include #include "rtsx.h" #include "rtsx_transport.h" @@ -1311,11 +1312,11 @@ void rtsx_polling_func(struct rtsx_chip *chip) #ifdef SUPPORT_OCP if (CHECK_LUN_MODE(chip, SD_MS_2LUN)) { - #if CONFIG_RTS_PSTOR_DEBUG +#ifdef CONFIG_RTS_PSTOR_DEBUG if (chip->ocp_stat & (SD_OC_NOW | SD_OC_EVER | MS_OC_NOW | MS_OC_EVER)) { RTSX_DEBUGP("Over current, OCPSTAT is 0x%x\n", chip->ocp_stat); } - #endif +#endif if (chip->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) { if (chip->card_exist & SD_CARD) { diff --git a/trunk/drivers/staging/rts_pstor/rtsx_scsi.c b/trunk/drivers/staging/rts_pstor/rtsx_scsi.c index 20c2464a20f9..7de1fae443fc 100644 --- a/trunk/drivers/staging/rts_pstor/rtsx_scsi.c +++ b/trunk/drivers/staging/rts_pstor/rtsx_scsi.c @@ -23,6 +23,7 @@ #include #include #include +#include #include "rtsx.h" #include "rtsx_transport.h" diff --git a/trunk/drivers/staging/rts_pstor/sd.c b/trunk/drivers/staging/rts_pstor/sd.c index 8d066bd428c4..b1277a6c7a8b 100644 --- a/trunk/drivers/staging/rts_pstor/sd.c +++ b/trunk/drivers/staging/rts_pstor/sd.c @@ -909,7 +909,7 @@ static int sd_change_phase(struct rtsx_chip *chip, u8 sample_point, u8 tune_dir) RTSX_WRITE_REG(chip, SD_VPCLK0_CTL, PHASE_NOT_RESET, PHASE_NOT_RESET); RTSX_WRITE_REG(chip, CLK_CTL, CHANGE_CLK, 0); } else { -#if CONFIG_RTS_PSTOR_DEBUG +#ifdef CONFIG_RTS_PSTOR_DEBUG rtsx_read_register(chip, SD_VP_CTL, &val); RTSX_DEBUGP("SD_VP_CTL: 0x%x\n", val); rtsx_read_register(chip, SD_DCMPS_CTL, &val); @@ -958,7 +958,7 @@ static int sd_change_phase(struct rtsx_chip *chip, u8 sample_point, u8 tune_dir) return STATUS_SUCCESS; Fail: -#if CONFIG_RTS_PSTOR_DEBUG +#ifdef CONFIG_RTS_PSTOR_DEBUG rtsx_read_register(chip, SD_VP_CTL, &val); RTSX_DEBUGP("SD_VP_CTL: 0x%x\n", val); rtsx_read_register(chip, SD_DCMPS_CTL, &val); diff --git a/trunk/drivers/staging/rts_pstor/trace.h b/trunk/drivers/staging/rts_pstor/trace.h index 2c668bae6ff4..bc83b49a4eb4 100644 --- a/trunk/drivers/staging/rts_pstor/trace.h +++ b/trunk/drivers/staging/rts_pstor/trace.h @@ -82,7 +82,7 @@ do { \ #define TRACE_GOTO(chip, label) goto label #endif -#if CONFIG_RTS_PSTOR_DEBUG +#ifdef CONFIG_RTS_PSTOR_DEBUG static inline void rtsx_dump(u8 *buf, int buf_len) { int i; diff --git a/trunk/drivers/staging/rts_pstor/xd.c b/trunk/drivers/staging/rts_pstor/xd.c index 7bcd468b8f2c..9f3add1e8f59 100644 --- a/trunk/drivers/staging/rts_pstor/xd.c +++ b/trunk/drivers/staging/rts_pstor/xd.c @@ -23,6 +23,7 @@ #include #include #include +#include #include "rtsx.h" #include "rtsx_transport.h" diff --git a/trunk/drivers/staging/samsung-laptop/Kconfig b/trunk/drivers/staging/samsung-laptop/Kconfig deleted file mode 100644 index f27c60864c26..000000000000 --- a/trunk/drivers/staging/samsung-laptop/Kconfig +++ /dev/null @@ -1,10 +0,0 @@ -config SAMSUNG_LAPTOP - tristate "Samsung Laptop driver" - default n - depends on RFKILL && BACKLIGHT_CLASS_DEVICE && X86 - help - This module implements a driver for the N128 Samsung Laptop - providing control over the Wireless LED and the LCD backlight - - To compile this driver as a module, choose - M here: the module will be called samsung-laptop. diff --git a/trunk/drivers/staging/samsung-laptop/Makefile b/trunk/drivers/staging/samsung-laptop/Makefile deleted file mode 100644 index 3c6f42045211..000000000000 --- a/trunk/drivers/staging/samsung-laptop/Makefile +++ /dev/null @@ -1 +0,0 @@ -obj-$(CONFIG_SAMSUNG_LAPTOP) += samsung-laptop.o diff --git a/trunk/drivers/staging/samsung-laptop/TODO b/trunk/drivers/staging/samsung-laptop/TODO deleted file mode 100644 index f7a6d589916e..000000000000 --- a/trunk/drivers/staging/samsung-laptop/TODO +++ /dev/null @@ -1,5 +0,0 @@ -TODO: - - review from other developers - - figure out ACPI video issues - -Please send patches to Greg Kroah-Hartman diff --git a/trunk/drivers/staging/samsung-laptop/samsung-laptop.c b/trunk/drivers/staging/samsung-laptop/samsung-laptop.c deleted file mode 100644 index 25294462b8b6..000000000000 --- a/trunk/drivers/staging/samsung-laptop/samsung-laptop.c +++ /dev/null @@ -1,843 +0,0 @@ -/* - * Samsung Laptop driver - * - * Copyright (C) 2009,2011 Greg Kroah-Hartman (gregkh@suse.de) - * Copyright (C) 2009,2011 Novell Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - */ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* - * This driver is needed because a number of Samsung laptops do not hook - * their control settings through ACPI. So we have to poke around in the - * BIOS to do things like brightness values, and "special" key controls. - */ - -/* - * We have 0 - 8 as valid brightness levels. The specs say that level 0 should - * be reserved by the BIOS (which really doesn't make much sense), we tell - * userspace that the value is 0 - 7 and then just tell the hardware 1 - 8 - */ -#define MAX_BRIGHT 0x07 - - -#define SABI_IFACE_MAIN 0x00 -#define SABI_IFACE_SUB 0x02 -#define SABI_IFACE_COMPLETE 0x04 -#define SABI_IFACE_DATA 0x05 - -/* Structure to get data back to the calling function */ -struct sabi_retval { - u8 retval[20]; -}; - -struct sabi_header_offsets { - u8 port; - u8 re_mem; - u8 iface_func; - u8 en_mem; - u8 data_offset; - u8 data_segment; -}; - -struct sabi_commands { - /* - * Brightness is 0 - 8, as described above. - * Value 0 is for the BIOS to use - */ - u8 get_brightness; - u8 set_brightness; - - /* - * first byte: - * 0x00 - wireless is off - * 0x01 - wireless is on - * second byte: - * 0x02 - 3G is off - * 0x03 - 3G is on - * TODO, verify 3G is correct, that doesn't seem right... - */ - u8 get_wireless_button; - u8 set_wireless_button; - - /* 0 is off, 1 is on */ - u8 get_backlight; - u8 set_backlight; - - /* - * 0x80 or 0x00 - no action - * 0x81 - recovery key pressed - */ - u8 get_recovery_mode; - u8 set_recovery_mode; - - /* - * on seclinux: 0 is low, 1 is high, - * on swsmi: 0 is normal, 1 is silent, 2 is turbo - */ - u8 get_performance_level; - u8 set_performance_level; - - /* - * Tell the BIOS that Linux is running on this machine. - * 81 is on, 80 is off - */ - u8 set_linux; -}; - -struct sabi_performance_level { - const char *name; - u8 value; -}; - -struct sabi_config { - const char *test_string; - u16 main_function; - const struct sabi_header_offsets header_offsets; - const struct sabi_commands commands; - const struct sabi_performance_level performance_levels[4]; - u8 min_brightness; - u8 max_brightness; -}; - -static const struct sabi_config sabi_configs[] = { - { - .test_string = "SECLINUX", - - .main_function = 0x4c49, - - .header_offsets = { - .port = 0x00, - .re_mem = 0x02, - .iface_func = 0x03, - .en_mem = 0x04, - .data_offset = 0x05, - .data_segment = 0x07, - }, - - .commands = { - .get_brightness = 0x00, - .set_brightness = 0x01, - - .get_wireless_button = 0x02, - .set_wireless_button = 0x03, - - .get_backlight = 0x04, - .set_backlight = 0x05, - - .get_recovery_mode = 0x06, - .set_recovery_mode = 0x07, - - .get_performance_level = 0x08, - .set_performance_level = 0x09, - - .set_linux = 0x0a, - }, - - .performance_levels = { - { - .name = "silent", - .value = 0, - }, - { - .name = "normal", - .value = 1, - }, - { }, - }, - .min_brightness = 1, - .max_brightness = 8, - }, - { - .test_string = "SwSmi@", - - .main_function = 0x5843, - - .header_offsets = { - .port = 0x00, - .re_mem = 0x04, - .iface_func = 0x02, - .en_mem = 0x03, - .data_offset = 0x05, - .data_segment = 0x07, - }, - - .commands = { - .get_brightness = 0x10, - .set_brightness = 0x11, - - .get_wireless_button = 0x12, - .set_wireless_button = 0x13, - - .get_backlight = 0x2d, - .set_backlight = 0x2e, - - .get_recovery_mode = 0xff, - .set_recovery_mode = 0xff, - - .get_performance_level = 0x31, - .set_performance_level = 0x32, - - .set_linux = 0xff, - }, - - .performance_levels = { - { - .name = "normal", - .value = 0, - }, - { - .name = "silent", - .value = 1, - }, - { - .name = "overclock", - .value = 2, - }, - { }, - }, - .min_brightness = 0, - .max_brightness = 8, - }, - { }, -}; - -static const struct sabi_config *sabi_config; - -static void __iomem *sabi; -static void __iomem *sabi_iface; -static void __iomem *f0000_segment; -static struct backlight_device *backlight_device; -static struct mutex sabi_mutex; -static struct platform_device *sdev; -static struct rfkill *rfk; - -static int force; -module_param(force, bool, 0); -MODULE_PARM_DESC(force, - "Disable the DMI check and forces the driver to be loaded"); - -static int debug; -module_param(debug, bool, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(debug, "Debug enabled or not"); - -static int sabi_get_command(u8 command, struct sabi_retval *sretval) -{ - int retval = 0; - u16 port = readw(sabi + sabi_config->header_offsets.port); - u8 complete, iface_data; - - mutex_lock(&sabi_mutex); - - /* enable memory to be able to write to it */ - outb(readb(sabi + sabi_config->header_offsets.en_mem), port); - - /* write out the command */ - writew(sabi_config->main_function, sabi_iface + SABI_IFACE_MAIN); - writew(command, sabi_iface + SABI_IFACE_SUB); - writeb(0, sabi_iface + SABI_IFACE_COMPLETE); - outb(readb(sabi + sabi_config->header_offsets.iface_func), port); - - /* write protect memory to make it safe */ - outb(readb(sabi + sabi_config->header_offsets.re_mem), port); - - /* see if the command actually succeeded */ - complete = readb(sabi_iface + SABI_IFACE_COMPLETE); - iface_data = readb(sabi_iface + SABI_IFACE_DATA); - if (complete != 0xaa || iface_data == 0xff) { - pr_warn("SABI get command 0x%02x failed with completion flag 0x%02x and data 0x%02x\n", - command, complete, iface_data); - retval = -EINVAL; - goto exit; - } - /* - * Save off the data into a structure so the caller use it. - * Right now we only want the first 4 bytes, - * There are commands that need more, but not for the ones we - * currently care about. - */ - sretval->retval[0] = readb(sabi_iface + SABI_IFACE_DATA); - sretval->retval[1] = readb(sabi_iface + SABI_IFACE_DATA + 1); - sretval->retval[2] = readb(sabi_iface + SABI_IFACE_DATA + 2); - sretval->retval[3] = readb(sabi_iface + SABI_IFACE_DATA + 3); - -exit: - mutex_unlock(&sabi_mutex); - return retval; - -} - -static int sabi_set_command(u8 command, u8 data) -{ - int retval = 0; - u16 port = readw(sabi + sabi_config->header_offsets.port); - u8 complete, iface_data; - - mutex_lock(&sabi_mutex); - - /* enable memory to be able to write to it */ - outb(readb(sabi + sabi_config->header_offsets.en_mem), port); - - /* write out the command */ - writew(sabi_config->main_function, sabi_iface + SABI_IFACE_MAIN); - writew(command, sabi_iface + SABI_IFACE_SUB); - writeb(0, sabi_iface + SABI_IFACE_COMPLETE); - writeb(data, sabi_iface + SABI_IFACE_DATA); - outb(readb(sabi + sabi_config->header_offsets.iface_func), port); - - /* write protect memory to make it safe */ - outb(readb(sabi + sabi_config->header_offsets.re_mem), port); - - /* see if the command actually succeeded */ - complete = readb(sabi_iface + SABI_IFACE_COMPLETE); - iface_data = readb(sabi_iface + SABI_IFACE_DATA); - if (complete != 0xaa || iface_data == 0xff) { - pr_warn("SABI set command 0x%02x failed with completion flag 0x%02x and data 0x%02x\n", - command, complete, iface_data); - retval = -EINVAL; - } - - mutex_unlock(&sabi_mutex); - return retval; -} - -static void test_backlight(void) -{ - struct sabi_retval sretval; - - sabi_get_command(sabi_config->commands.get_backlight, &sretval); - printk(KERN_DEBUG "backlight = 0x%02x\n", sretval.retval[0]); - - sabi_set_command(sabi_config->commands.set_backlight, 0); - printk(KERN_DEBUG "backlight should be off\n"); - - sabi_get_command(sabi_config->commands.get_backlight, &sretval); - printk(KERN_DEBUG "backlight = 0x%02x\n", sretval.retval[0]); - - msleep(1000); - - sabi_set_command(sabi_config->commands.set_backlight, 1); - printk(KERN_DEBUG "backlight should be on\n"); - - sabi_get_command(sabi_config->commands.get_backlight, &sretval); - printk(KERN_DEBUG "backlight = 0x%02x\n", sretval.retval[0]); -} - -static void test_wireless(void) -{ - struct sabi_retval sretval; - - sabi_get_command(sabi_config->commands.get_wireless_button, &sretval); - printk(KERN_DEBUG "wireless led = 0x%02x\n", sretval.retval[0]); - - sabi_set_command(sabi_config->commands.set_wireless_button, 0); - printk(KERN_DEBUG "wireless led should be off\n"); - - sabi_get_command(sabi_config->commands.get_wireless_button, &sretval); - printk(KERN_DEBUG "wireless led = 0x%02x\n", sretval.retval[0]); - - msleep(1000); - - sabi_set_command(sabi_config->commands.set_wireless_button, 1); - printk(KERN_DEBUG "wireless led should be on\n"); - - sabi_get_command(sabi_config->commands.get_wireless_button, &sretval); - printk(KERN_DEBUG "wireless led = 0x%02x\n", sretval.retval[0]); -} - -static u8 read_brightness(void) -{ - struct sabi_retval sretval; - int user_brightness = 0; - int retval; - - retval = sabi_get_command(sabi_config->commands.get_brightness, - &sretval); - if (!retval) { - user_brightness = sretval.retval[0]; - if (user_brightness != 0) - user_brightness -= sabi_config->min_brightness; - } - return user_brightness; -} - -static void set_brightness(u8 user_brightness) -{ - u8 user_level = user_brightness - sabi_config->min_brightness; - - sabi_set_command(sabi_config->commands.set_brightness, user_level); -} - -static int get_brightness(struct backlight_device *bd) -{ - return (int)read_brightness(); -} - -static int update_status(struct backlight_device *bd) -{ - set_brightness(bd->props.brightness); - - if (bd->props.power == FB_BLANK_UNBLANK) - sabi_set_command(sabi_config->commands.set_backlight, 1); - else - sabi_set_command(sabi_config->commands.set_backlight, 0); - return 0; -} - -static const struct backlight_ops backlight_ops = { - .get_brightness = get_brightness, - .update_status = update_status, -}; - -static int rfkill_set(void *data, bool blocked) -{ - /* Do something with blocked...*/ - /* - * blocked == false is on - * blocked == true is off - */ - if (blocked) - sabi_set_command(sabi_config->commands.set_wireless_button, 0); - else - sabi_set_command(sabi_config->commands.set_wireless_button, 1); - - return 0; -} - -static struct rfkill_ops rfkill_ops = { - .set_block = rfkill_set, -}; - -static int init_wireless(struct platform_device *sdev) -{ - int retval; - - rfk = rfkill_alloc("samsung-wifi", &sdev->dev, RFKILL_TYPE_WLAN, - &rfkill_ops, NULL); - if (!rfk) - return -ENOMEM; - - retval = rfkill_register(rfk); - if (retval) { - rfkill_destroy(rfk); - return -ENODEV; - } - - return 0; -} - -static void destroy_wireless(void) -{ - rfkill_unregister(rfk); - rfkill_destroy(rfk); -} - -static ssize_t get_performance_level(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct sabi_retval sretval; - int retval; - int i; - - /* Read the state */ - retval = sabi_get_command(sabi_config->commands.get_performance_level, - &sretval); - if (retval) - return retval; - - /* The logic is backwards, yeah, lots of fun... */ - for (i = 0; sabi_config->performance_levels[i].name; ++i) { - if (sretval.retval[0] == sabi_config->performance_levels[i].value) - return sprintf(buf, "%s\n", sabi_config->performance_levels[i].name); - } - return sprintf(buf, "%s\n", "unknown"); -} - -static ssize_t set_performance_level(struct device *dev, - struct device_attribute *attr, const char *buf, - size_t count) -{ - if (count >= 1) { - int i; - for (i = 0; sabi_config->performance_levels[i].name; ++i) { - const struct sabi_performance_level *level = - &sabi_config->performance_levels[i]; - if (!strncasecmp(level->name, buf, strlen(level->name))) { - sabi_set_command(sabi_config->commands.set_performance_level, - level->value); - break; - } - } - if (!sabi_config->performance_levels[i].name) - return -EINVAL; - } - return count; -} -static DEVICE_ATTR(performance_level, S_IWUSR | S_IRUGO, - get_performance_level, set_performance_level); - - -static int __init dmi_check_cb(const struct dmi_system_id *id) -{ - pr_info("found laptop model '%s'\n", - id->ident); - return 0; -} - -static struct dmi_system_id __initdata samsung_dmi_table[] = { - { - .ident = "N128", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, - "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "N128"), - DMI_MATCH(DMI_BOARD_NAME, "N128"), - }, - .callback = dmi_check_cb, - }, - { - .ident = "N130", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, - "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "N130"), - DMI_MATCH(DMI_BOARD_NAME, "N130"), - }, - .callback = dmi_check_cb, - }, - { - .ident = "X125", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, - "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "X125"), - DMI_MATCH(DMI_BOARD_NAME, "X125"), - }, - .callback = dmi_check_cb, - }, - { - .ident = "X120/X170", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, - "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "X120/X170"), - DMI_MATCH(DMI_BOARD_NAME, "X120/X170"), - }, - .callback = dmi_check_cb, - }, - { - .ident = "NC10", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, - "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "NC10"), - DMI_MATCH(DMI_BOARD_NAME, "NC10"), - }, - .callback = dmi_check_cb, - }, - { - .ident = "NP-Q45", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, - "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "SQ45S70S"), - DMI_MATCH(DMI_BOARD_NAME, "SQ45S70S"), - }, - .callback = dmi_check_cb, - }, - { - .ident = "X360", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, - "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "X360"), - DMI_MATCH(DMI_BOARD_NAME, "X360"), - }, - .callback = dmi_check_cb, - }, - { - .ident = "R410 Plus", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, - "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "R410P"), - DMI_MATCH(DMI_BOARD_NAME, "R460"), - }, - .callback = dmi_check_cb, - }, - { - .ident = "R518", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, - "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "R518"), - DMI_MATCH(DMI_BOARD_NAME, "R518"), - }, - .callback = dmi_check_cb, - }, - { - .ident = "R519/R719", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, - "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "R519/R719"), - DMI_MATCH(DMI_BOARD_NAME, "R519/R719"), - }, - .callback = dmi_check_cb, - }, - { - .ident = "N150/N210/N220/N230", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, - "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220/N230"), - DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220/N230"), - }, - .callback = dmi_check_cb, - }, - { - .ident = "N150P/N210P/N220P", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, - "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "N150P/N210P/N220P"), - DMI_MATCH(DMI_BOARD_NAME, "N150P/N210P/N220P"), - }, - .callback = dmi_check_cb, - }, - { - .ident = "R530/R730", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "R530/R730"), - DMI_MATCH(DMI_BOARD_NAME, "R530/R730"), - }, - .callback = dmi_check_cb, - }, - { - .ident = "NF110/NF210/NF310", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "NF110/NF210/NF310"), - DMI_MATCH(DMI_BOARD_NAME, "NF110/NF210/NF310"), - }, - .callback = dmi_check_cb, - }, - { - .ident = "N145P/N250P/N260P", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "N145P/N250P/N260P"), - DMI_MATCH(DMI_BOARD_NAME, "N145P/N250P/N260P"), - }, - .callback = dmi_check_cb, - }, - { - .ident = "R70/R71", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, - "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "R70/R71"), - DMI_MATCH(DMI_BOARD_NAME, "R70/R71"), - }, - .callback = dmi_check_cb, - }, - { - .ident = "P460", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "P460"), - DMI_MATCH(DMI_BOARD_NAME, "P460"), - }, - .callback = dmi_check_cb, - }, - { }, -}; -MODULE_DEVICE_TABLE(dmi, samsung_dmi_table); - -static int find_signature(void __iomem *memcheck, const char *testStr) -{ - int i = 0; - int loca; - - for (loca = 0; loca < 0xffff; loca++) { - char temp = readb(memcheck + loca); - - if (temp == testStr[i]) { - if (i == strlen(testStr)-1) - break; - ++i; - } else { - i = 0; - } - } - return loca; -} - -static int __init samsung_init(void) -{ - struct backlight_properties props; - struct sabi_retval sretval; - unsigned int ifaceP; - int i; - int loca; - int retval; - - mutex_init(&sabi_mutex); - - if (!force && !dmi_check_system(samsung_dmi_table)) - return -ENODEV; - - f0000_segment = ioremap_nocache(0xf0000, 0xffff); - if (!f0000_segment) { - pr_err("Can't map the segment at 0xf0000\n"); - return -EINVAL; - } - - /* Try to find one of the signatures in memory to find the header */ - for (i = 0; sabi_configs[i].test_string != 0; ++i) { - sabi_config = &sabi_configs[i]; - loca = find_signature(f0000_segment, sabi_config->test_string); - if (loca != 0xffff) - break; - } - - if (loca == 0xffff) { - pr_err("This computer does not support SABI\n"); - goto error_no_signature; - } - - /* point to the SMI port Number */ - loca += 1; - sabi = (f0000_segment + loca); - - if (debug) { - printk(KERN_DEBUG "This computer supports SABI==%x\n", - loca + 0xf0000 - 6); - printk(KERN_DEBUG "SABI header:\n"); - printk(KERN_DEBUG " SMI Port Number = 0x%04x\n", - readw(sabi + sabi_config->header_offsets.port)); - printk(KERN_DEBUG " SMI Interface Function = 0x%02x\n", - readb(sabi + sabi_config->header_offsets.iface_func)); - printk(KERN_DEBUG " SMI enable memory buffer = 0x%02x\n", - readb(sabi + sabi_config->header_offsets.en_mem)); - printk(KERN_DEBUG " SMI restore memory buffer = 0x%02x\n", - readb(sabi + sabi_config->header_offsets.re_mem)); - printk(KERN_DEBUG " SABI data offset = 0x%04x\n", - readw(sabi + sabi_config->header_offsets.data_offset)); - printk(KERN_DEBUG " SABI data segment = 0x%04x\n", - readw(sabi + sabi_config->header_offsets.data_segment)); - } - - /* Get a pointer to the SABI Interface */ - ifaceP = (readw(sabi + sabi_config->header_offsets.data_segment) & 0x0ffff) << 4; - ifaceP += readw(sabi + sabi_config->header_offsets.data_offset) & 0x0ffff; - sabi_iface = ioremap_nocache(ifaceP, 16); - if (!sabi_iface) { - pr_err("Can't remap %x\n", ifaceP); - goto exit; - } - if (debug) { - printk(KERN_DEBUG "ifaceP = 0x%08x\n", ifaceP); - printk(KERN_DEBUG "sabi_iface = %p\n", sabi_iface); - - test_backlight(); - test_wireless(); - - retval = sabi_get_command(sabi_config->commands.get_brightness, - &sretval); - printk(KERN_DEBUG "brightness = 0x%02x\n", sretval.retval[0]); - } - - /* Turn on "Linux" mode in the BIOS */ - if (sabi_config->commands.set_linux != 0xff) { - retval = sabi_set_command(sabi_config->commands.set_linux, - 0x81); - if (retval) { - pr_warn("Linux mode was not set!\n"); - goto error_no_platform; - } - } - - /* knock up a platform device to hang stuff off of */ - sdev = platform_device_register_simple("samsung", -1, NULL, 0); - if (IS_ERR(sdev)) - goto error_no_platform; - - /* create a backlight device to talk to this one */ - memset(&props, 0, sizeof(struct backlight_properties)); - props.type = BACKLIGHT_PLATFORM; - props.max_brightness = sabi_config->max_brightness; - backlight_device = backlight_device_register("samsung", &sdev->dev, - NULL, &backlight_ops, - &props); - if (IS_ERR(backlight_device)) - goto error_no_backlight; - - backlight_device->props.brightness = read_brightness(); - backlight_device->props.power = FB_BLANK_UNBLANK; - backlight_update_status(backlight_device); - - retval = init_wireless(sdev); - if (retval) - goto error_no_rfk; - - retval = device_create_file(&sdev->dev, &dev_attr_performance_level); - if (retval) - goto error_file_create; - -exit: - return 0; - -error_file_create: - destroy_wireless(); - -error_no_rfk: - backlight_device_unregister(backlight_device); - -error_no_backlight: - platform_device_unregister(sdev); - -error_no_platform: - iounmap(sabi_iface); - -error_no_signature: - iounmap(f0000_segment); - return -EINVAL; -} - -static void __exit samsung_exit(void) -{ - /* Turn off "Linux" mode in the BIOS */ - if (sabi_config->commands.set_linux != 0xff) - sabi_set_command(sabi_config->commands.set_linux, 0x80); - - device_remove_file(&sdev->dev, &dev_attr_performance_level); - backlight_device_unregister(backlight_device); - destroy_wireless(); - iounmap(sabi_iface); - iounmap(f0000_segment); - platform_device_unregister(sdev); -} - -module_init(samsung_init); -module_exit(samsung_exit); - -MODULE_AUTHOR("Greg Kroah-Hartman "); -MODULE_DESCRIPTION("Samsung Backlight driver"); -MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/staging/solo6x10/Kconfig b/trunk/drivers/staging/solo6x10/Kconfig index 2cf77c940860..03dcac4ea4d0 100644 --- a/trunk/drivers/staging/solo6x10/Kconfig +++ b/trunk/drivers/staging/solo6x10/Kconfig @@ -2,6 +2,7 @@ config SOLO6X10 tristate "Softlogic 6x10 MPEG codec cards" depends on PCI && VIDEO_DEV && SND && I2C select VIDEOBUF_DMA_SG + select SND_PCM ---help--- This driver supports the Softlogic based MPEG-4 and h.264 codec codec cards. diff --git a/trunk/drivers/staging/spectra/ffsport.c b/trunk/drivers/staging/spectra/ffsport.c index 20dae73d3b78..506547b603e1 100644 --- a/trunk/drivers/staging/spectra/ffsport.c +++ b/trunk/drivers/staging/spectra/ffsport.c @@ -653,7 +653,7 @@ static int SBD_setup_device(struct spectra_nand_dev *dev, int which) } dev->queue->queuedata = dev; - /* As Linux block layer does't support >4KB hardware sector, */ + /* As Linux block layer doesn't support >4KB hardware sector, */ /* Here we force report 512 byte hardware sector size to Kernel */ blk_queue_logical_block_size(dev->queue, 512); diff --git a/trunk/drivers/staging/tidspbridge/dynload/cload.c b/trunk/drivers/staging/tidspbridge/dynload/cload.c index 5cecd237e3f6..fe1ef0addb09 100644 --- a/trunk/drivers/staging/tidspbridge/dynload/cload.c +++ b/trunk/drivers/staging/tidspbridge/dynload/cload.c @@ -718,7 +718,7 @@ static void dload_symbols(struct dload_state *dlthis) * as a temporary for .dllview record construction. * Allocate storage for the whole table. Add 1 to the section count * in case a trampoline section is auto-generated as well as the - * size of the trampoline section name so DLLView does't get lost. + * size of the trampoline section name so DLLView doesn't get lost. */ siz = sym_count * sizeof(struct local_symbol); diff --git a/trunk/drivers/staging/tty/specialix.c b/trunk/drivers/staging/tty/specialix.c index cb24c6d999db..5c3598ec7456 100644 --- a/trunk/drivers/staging/tty/specialix.c +++ b/trunk/drivers/staging/tty/specialix.c @@ -978,7 +978,7 @@ static void sx_change_speed(struct specialix_board *bp, spin_lock_irqsave(&bp->lock, flags); sx_out(bp, CD186x_CAR, port_No(port)); - /* The Specialix board does't implement the RTS lines. + /* The Specialix board doesn't implement the RTS lines. They are used to set the IRQ level. Don't touch them. */ if (sx_crtscts(tty)) port->MSVR = MSVR_DTR | (sx_in(bp, CD186x_MSVR) & MSVR_RTS); diff --git a/trunk/drivers/staging/usbip/vhci_hcd.c b/trunk/drivers/staging/usbip/vhci_hcd.c index 0f02a4b12ae4..4f4f13321f40 100644 --- a/trunk/drivers/staging/usbip/vhci_hcd.c +++ b/trunk/drivers/staging/usbip/vhci_hcd.c @@ -876,8 +876,10 @@ static void vhci_shutdown_connection(struct usbip_device *ud) } /* kill threads related to this sdev, if v.c. exists */ - kthread_stop(vdev->ud.tcp_rx); - kthread_stop(vdev->ud.tcp_tx); + if (vdev->ud.tcp_rx) + kthread_stop(vdev->ud.tcp_rx); + if (vdev->ud.tcp_tx) + kthread_stop(vdev->ud.tcp_tx); usbip_uinfo("stop threads\n"); @@ -949,9 +951,6 @@ static void vhci_device_init(struct vhci_device *vdev) { memset(vdev, 0, sizeof(*vdev)); - vdev->ud.tcp_rx = kthread_create(vhci_rx_loop, &vdev->ud, "vhci_rx"); - vdev->ud.tcp_tx = kthread_create(vhci_tx_loop, &vdev->ud, "vhci_tx"); - vdev->ud.side = USBIP_VHCI; vdev->ud.status = VDEV_ST_NULL; /* vdev->ud.lock = SPIN_LOCK_UNLOCKED; */ @@ -1139,7 +1138,7 @@ static int vhci_hcd_probe(struct platform_device *pdev) usbip_uerr("create hcd failed\n"); return -ENOMEM; } - + hcd->has_tt = 1; /* this is private data for vhci_hcd */ the_controller = hcd_to_vhci(hcd); diff --git a/trunk/drivers/staging/usbip/vhci_sysfs.c b/trunk/drivers/staging/usbip/vhci_sysfs.c index 3f2459f30415..e2dadbd5ef1e 100644 --- a/trunk/drivers/staging/usbip/vhci_sysfs.c +++ b/trunk/drivers/staging/usbip/vhci_sysfs.c @@ -21,6 +21,7 @@ #include "vhci.h" #include +#include /* TODO: refine locking ?*/ @@ -220,13 +221,13 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr, vdev->ud.tcp_socket = socket; vdev->ud.status = VDEV_ST_NOTASSIGNED; - wake_up_process(vdev->ud.tcp_rx); - wake_up_process(vdev->ud.tcp_tx); - spin_unlock(&vdev->ud.lock); spin_unlock(&the_controller->lock); /* end the lock */ + vdev->ud.tcp_rx = kthread_run(vhci_rx_loop, &vdev->ud, "vhci_rx"); + vdev->ud.tcp_tx = kthread_run(vhci_tx_loop, &vdev->ud, "vhci_tx"); + rh_port_connect(rhport, speed); return count; diff --git a/trunk/drivers/staging/wlan-ng/cfg80211.c b/trunk/drivers/staging/wlan-ng/cfg80211.c index 6a71f52c59b1..76378397b763 100644 --- a/trunk/drivers/staging/wlan-ng/cfg80211.c +++ b/trunk/drivers/staging/wlan-ng/cfg80211.c @@ -273,7 +273,7 @@ int prism2_del_key(struct wiphy *wiphy, struct net_device *dev, } int prism2_set_default_key(struct wiphy *wiphy, struct net_device *dev, - u8 key_index) + u8 key_index, bool unicast, bool multicast) { wlandevice_t *wlandev = dev->ml_priv; diff --git a/trunk/drivers/target/Kconfig b/trunk/drivers/target/Kconfig index 9ef2dbbfa62b..5cb0f0ef6af0 100644 --- a/trunk/drivers/target/Kconfig +++ b/trunk/drivers/target/Kconfig @@ -30,5 +30,6 @@ config TCM_PSCSI passthrough access to Linux/SCSI device source "drivers/target/loopback/Kconfig" +source "drivers/target/tcm_fc/Kconfig" endif diff --git a/trunk/drivers/target/Makefile b/trunk/drivers/target/Makefile index 1178bbfc68fe..21df808a992c 100644 --- a/trunk/drivers/target/Makefile +++ b/trunk/drivers/target/Makefile @@ -24,3 +24,5 @@ obj-$(CONFIG_TCM_PSCSI) += target_core_pscsi.o # Fabric modules obj-$(CONFIG_LOOPBACK_TARGET) += loopback/ + +obj-$(CONFIG_TCM_FC) += tcm_fc/ diff --git a/trunk/drivers/target/tcm_fc/Kconfig b/trunk/drivers/target/tcm_fc/Kconfig new file mode 100644 index 000000000000..40caf458e89e --- /dev/null +++ b/trunk/drivers/target/tcm_fc/Kconfig @@ -0,0 +1,5 @@ +config TCM_FC + tristate "TCM_FC fabric Plugin" + depends on LIBFC + help + Say Y here to enable the TCM FC plugin for accessing FC fabrics in TCM diff --git a/trunk/drivers/target/tcm_fc/Makefile b/trunk/drivers/target/tcm_fc/Makefile new file mode 100644 index 000000000000..7a5c2b64cf65 --- /dev/null +++ b/trunk/drivers/target/tcm_fc/Makefile @@ -0,0 +1,15 @@ +EXTRA_CFLAGS += -I$(srctree)/drivers/target/ \ + -I$(srctree)/drivers/scsi/ \ + -I$(srctree)/include/scsi/ \ + -I$(srctree)/drivers/target/tcm_fc/ + +tcm_fc-y += tfc_cmd.o \ + tfc_conf.o \ + tfc_io.o \ + tfc_sess.o + +obj-$(CONFIG_TCM_FC) += tcm_fc.o + +ifdef CONFIGFS_TCM_FC_DEBUG +EXTRA_CFLAGS += -DTCM_FC_DEBUG +endif diff --git a/trunk/drivers/target/tcm_fc/tcm_fc.h b/trunk/drivers/target/tcm_fc/tcm_fc.h new file mode 100644 index 000000000000..defff32b7880 --- /dev/null +++ b/trunk/drivers/target/tcm_fc/tcm_fc.h @@ -0,0 +1,215 @@ +/* + * Copyright (c) 2010 Cisco Systems, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + */ +#ifndef __TCM_FC_H__ +#define __TCM_FC_H__ + +#define FT_VERSION "0.3" + +#define FT_NAMELEN 32 /* length of ASCII WWPNs including pad */ +#define FT_TPG_NAMELEN 32 /* max length of TPG name */ +#define FT_LUN_NAMELEN 32 /* max length of LUN name */ + +/* + * Debug options. + */ +#define FT_DEBUG_CONF 0x01 /* configuration messages */ +#define FT_DEBUG_SESS 0x02 /* session messages */ +#define FT_DEBUG_TM 0x04 /* TM operations */ +#define FT_DEBUG_IO 0x08 /* I/O commands */ +#define FT_DEBUG_DATA 0x10 /* Data transfer */ + +extern unsigned int ft_debug_logging; /* debug options */ + +#define FT_DEBUG(mask, fmt, args...) \ + do { \ + if (ft_debug_logging & (mask)) \ + printk(KERN_INFO "tcm_fc: %s: " fmt, \ + __func__, ##args); \ + } while (0) + +#define FT_CONF_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_CONF, fmt, ##args) +#define FT_SESS_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_SESS, fmt, ##args) +#define FT_TM_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_TM, fmt, ##args) +#define FT_IO_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_IO, fmt, ##args) +#define FT_DATA_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_DATA, fmt, ##args) + +struct ft_transport_id { + __u8 format; + __u8 __resvd1[7]; + __u8 wwpn[8]; + __u8 __resvd2[8]; +} __attribute__((__packed__)); + +/* + * Session (remote port). + */ +struct ft_sess { + u32 port_id; /* for hash lookup use only */ + u32 params; + u16 max_frame; /* maximum frame size */ + u64 port_name; /* port name for transport ID */ + struct ft_tport *tport; + struct se_session *se_sess; + struct hlist_node hash; /* linkage in ft_sess_hash table */ + struct rcu_head rcu; + struct kref kref; /* ref for hash and outstanding I/Os */ +}; + +/* + * Hash table of sessions per local port. + * Hash lookup by remote port FC_ID. + */ +#define FT_SESS_HASH_BITS 6 +#define FT_SESS_HASH_SIZE (1 << FT_SESS_HASH_BITS) + +/* + * Per local port data. + * This is created only after a TPG exists that allows target function + * for the local port. If the TPG exists, this is allocated when + * we're notified that the local port has been created, or when + * the first PRLI provider callback is received. + */ +struct ft_tport { + struct fc_lport *lport; + struct ft_tpg *tpg; /* NULL if TPG deleted before tport */ + u32 sess_count; /* number of sessions in hash */ + struct rcu_head rcu; + struct hlist_head hash[FT_SESS_HASH_SIZE]; /* list of sessions */ +}; + +/* + * Node ID and authentication. + */ +struct ft_node_auth { + u64 port_name; + u64 node_name; +}; + +/* + * Node ACL for FC remote port session. + */ +struct ft_node_acl { + struct ft_node_auth node_auth; + struct se_node_acl se_node_acl; +}; + +struct ft_lun { + u32 index; + char name[FT_LUN_NAMELEN]; +}; + +/* + * Target portal group (local port). + */ +struct ft_tpg { + u32 index; + struct ft_lport_acl *lport_acl; + struct ft_tport *tport; /* active tport or NULL */ + struct list_head list; /* linkage in ft_lport_acl tpg_list */ + struct list_head lun_list; /* head of LUNs */ + struct se_portal_group se_tpg; + struct task_struct *thread; /* processing thread */ + struct se_queue_obj qobj; /* queue for processing thread */ +}; + +struct ft_lport_acl { + u64 wwpn; + char name[FT_NAMELEN]; + struct list_head list; + struct list_head tpg_list; + struct se_wwn fc_lport_wwn; +}; + +enum ft_cmd_state { + FC_CMD_ST_NEW = 0, + FC_CMD_ST_REJ +}; + +/* + * Commands + */ +struct ft_cmd { + enum ft_cmd_state state; + u16 lun; /* LUN from request */ + struct ft_sess *sess; /* session held for cmd */ + struct fc_seq *seq; /* sequence in exchange mgr */ + struct se_cmd se_cmd; /* Local TCM I/O descriptor */ + struct fc_frame *req_frame; + unsigned char *cdb; /* pointer to CDB inside frame */ + u32 write_data_len; /* data received on writes */ + struct se_queue_req se_req; + /* Local sense buffer */ + unsigned char ft_sense_buffer[TRANSPORT_SENSE_BUFFER]; + u32 was_ddp_setup:1; /* Set only if ddp is setup */ + struct scatterlist *sg; /* Set only if DDP is setup */ + u32 sg_cnt; /* No. of item in scatterlist */ +}; + +extern struct list_head ft_lport_list; +extern struct mutex ft_lport_lock; +extern struct fc4_prov ft_prov; +extern struct target_fabric_configfs *ft_configfs; + +/* + * Fabric methods. + */ + +/* + * Session ops. + */ +void ft_sess_put(struct ft_sess *); +int ft_sess_shutdown(struct se_session *); +void ft_sess_close(struct se_session *); +void ft_sess_stop(struct se_session *, int, int); +int ft_sess_logged_in(struct se_session *); +u32 ft_sess_get_index(struct se_session *); +u32 ft_sess_get_port_name(struct se_session *, unsigned char *, u32); +void ft_sess_set_erl0(struct se_session *); + +void ft_lport_add(struct fc_lport *, void *); +void ft_lport_del(struct fc_lport *, void *); +int ft_lport_notify(struct notifier_block *, unsigned long, void *); + +/* + * IO methods. + */ +void ft_check_stop_free(struct se_cmd *); +void ft_release_cmd(struct se_cmd *); +int ft_queue_status(struct se_cmd *); +int ft_queue_data_in(struct se_cmd *); +int ft_write_pending(struct se_cmd *); +int ft_write_pending_status(struct se_cmd *); +u32 ft_get_task_tag(struct se_cmd *); +int ft_get_cmd_state(struct se_cmd *); +void ft_new_cmd_failure(struct se_cmd *); +int ft_queue_tm_resp(struct se_cmd *); +int ft_is_state_remove(struct se_cmd *); + +/* + * other internal functions. + */ +int ft_thread(void *); +void ft_recv_req(struct ft_sess *, struct fc_frame *); +struct ft_tpg *ft_lport_find_tpg(struct fc_lport *); +struct ft_node_acl *ft_acl_get(struct ft_tpg *, struct fc_rport_priv *); + +void ft_recv_write_data(struct ft_cmd *, struct fc_frame *); +void ft_dump_cmd(struct ft_cmd *, const char *caller); + +ssize_t ft_format_wwn(char *, size_t, u64); + +#endif /* __TCM_FC_H__ */ diff --git a/trunk/drivers/target/tcm_fc/tfc_cmd.c b/trunk/drivers/target/tcm_fc/tfc_cmd.c new file mode 100644 index 000000000000..49e51778f733 --- /dev/null +++ b/trunk/drivers/target/tcm_fc/tfc_cmd.c @@ -0,0 +1,696 @@ +/* + * Copyright (c) 2010 Cisco Systems, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + */ + +/* XXX TBD some includes may be extraneous */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tcm_fc.h" + +/* + * Dump cmd state for debugging. + */ +void ft_dump_cmd(struct ft_cmd *cmd, const char *caller) +{ + struct fc_exch *ep; + struct fc_seq *sp; + struct se_cmd *se_cmd; + struct se_mem *mem; + struct se_transport_task *task; + + if (!(ft_debug_logging & FT_DEBUG_IO)) + return; + + se_cmd = &cmd->se_cmd; + printk(KERN_INFO "%s: cmd %p state %d sess %p seq %p se_cmd %p\n", + caller, cmd, cmd->state, cmd->sess, cmd->seq, se_cmd); + printk(KERN_INFO "%s: cmd %p cdb %p\n", + caller, cmd, cmd->cdb); + printk(KERN_INFO "%s: cmd %p lun %d\n", caller, cmd, cmd->lun); + + task = T_TASK(se_cmd); + printk(KERN_INFO "%s: cmd %p task %p se_num %u buf %p len %u se_cmd_flags <0x%x>\n", + caller, cmd, task, task->t_tasks_se_num, + task->t_task_buf, se_cmd->data_length, se_cmd->se_cmd_flags); + if (task->t_mem_list) + list_for_each_entry(mem, task->t_mem_list, se_list) + printk(KERN_INFO "%s: cmd %p mem %p page %p " + "len 0x%x off 0x%x\n", + caller, cmd, mem, + mem->se_page, mem->se_len, mem->se_off); + sp = cmd->seq; + if (sp) { + ep = fc_seq_exch(sp); + printk(KERN_INFO "%s: cmd %p sid %x did %x " + "ox_id %x rx_id %x seq_id %x e_stat %x\n", + caller, cmd, ep->sid, ep->did, ep->oxid, ep->rxid, + sp->id, ep->esb_stat); + } + print_hex_dump(KERN_INFO, "ft_dump_cmd ", DUMP_PREFIX_NONE, + 16, 4, cmd->cdb, MAX_COMMAND_SIZE, 0); +} + +/* + * Get LUN from CDB. + */ +static int ft_get_lun_for_cmd(struct ft_cmd *cmd, u8 *lunp) +{ + u64 lun; + + lun = lunp[1]; + switch (lunp[0] >> 6) { + case 0: + break; + case 1: + lun |= (lunp[0] & 0x3f) << 8; + break; + default: + return -1; + } + if (lun >= TRANSPORT_MAX_LUNS_PER_TPG) + return -1; + cmd->lun = lun; + return transport_get_lun_for_cmd(&cmd->se_cmd, NULL, lun); +} + +static void ft_queue_cmd(struct ft_sess *sess, struct ft_cmd *cmd) +{ + struct se_queue_obj *qobj; + unsigned long flags; + + qobj = &sess->tport->tpg->qobj; + spin_lock_irqsave(&qobj->cmd_queue_lock, flags); + list_add_tail(&cmd->se_req.qr_list, &qobj->qobj_list); + spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); + atomic_inc(&qobj->queue_cnt); + wake_up_interruptible(&qobj->thread_wq); +} + +static struct ft_cmd *ft_dequeue_cmd(struct se_queue_obj *qobj) +{ + unsigned long flags; + struct se_queue_req *qr; + + spin_lock_irqsave(&qobj->cmd_queue_lock, flags); + if (list_empty(&qobj->qobj_list)) { + spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); + return NULL; + } + qr = list_first_entry(&qobj->qobj_list, struct se_queue_req, qr_list); + list_del(&qr->qr_list); + atomic_dec(&qobj->queue_cnt); + spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); + return container_of(qr, struct ft_cmd, se_req); +} + +static void ft_free_cmd(struct ft_cmd *cmd) +{ + struct fc_frame *fp; + struct fc_lport *lport; + + if (!cmd) + return; + fp = cmd->req_frame; + lport = fr_dev(fp); + if (fr_seq(fp)) + lport->tt.seq_release(fr_seq(fp)); + fc_frame_free(fp); + ft_sess_put(cmd->sess); /* undo get from lookup at recv */ + kfree(cmd); +} + +void ft_release_cmd(struct se_cmd *se_cmd) +{ + struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); + + ft_free_cmd(cmd); +} + +void ft_check_stop_free(struct se_cmd *se_cmd) +{ + transport_generic_free_cmd(se_cmd, 0, 1, 0); +} + +/* + * Send response. + */ +int ft_queue_status(struct se_cmd *se_cmd) +{ + struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); + struct fc_frame *fp; + struct fcp_resp_with_ext *fcp; + struct fc_lport *lport; + struct fc_exch *ep; + size_t len; + + ft_dump_cmd(cmd, __func__); + ep = fc_seq_exch(cmd->seq); + lport = ep->lp; + len = sizeof(*fcp) + se_cmd->scsi_sense_length; + fp = fc_frame_alloc(lport, len); + if (!fp) { + /* XXX shouldn't just drop it - requeue and retry? */ + return 0; + } + fcp = fc_frame_payload_get(fp, len); + memset(fcp, 0, len); + fcp->resp.fr_status = se_cmd->scsi_status; + + len = se_cmd->scsi_sense_length; + if (len) { + fcp->resp.fr_flags |= FCP_SNS_LEN_VAL; + fcp->ext.fr_sns_len = htonl(len); + memcpy((fcp + 1), se_cmd->sense_buffer, len); + } + + /* + * Test underflow and overflow with one mask. Usually both are off. + * Bidirectional commands are not handled yet. + */ + if (se_cmd->se_cmd_flags & (SCF_OVERFLOW_BIT | SCF_UNDERFLOW_BIT)) { + if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) + fcp->resp.fr_flags |= FCP_RESID_OVER; + else + fcp->resp.fr_flags |= FCP_RESID_UNDER; + fcp->ext.fr_resid = cpu_to_be32(se_cmd->residual_count); + } + + /* + * Send response. + */ + cmd->seq = lport->tt.seq_start_next(cmd->seq); + fc_fill_fc_hdr(fp, FC_RCTL_DD_CMD_STATUS, ep->did, ep->sid, FC_TYPE_FCP, + FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ, 0); + + lport->tt.seq_send(lport, cmd->seq, fp); + lport->tt.exch_done(cmd->seq); + return 0; +} + +int ft_write_pending_status(struct se_cmd *se_cmd) +{ + struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); + + return cmd->write_data_len != se_cmd->data_length; +} + +/* + * Send TX_RDY (transfer ready). + */ +int ft_write_pending(struct se_cmd *se_cmd) +{ + struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); + struct fc_frame *fp; + struct fcp_txrdy *txrdy; + struct fc_lport *lport; + struct fc_exch *ep; + struct fc_frame_header *fh; + u32 f_ctl; + + ft_dump_cmd(cmd, __func__); + + ep = fc_seq_exch(cmd->seq); + lport = ep->lp; + fp = fc_frame_alloc(lport, sizeof(*txrdy)); + if (!fp) + return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; + + txrdy = fc_frame_payload_get(fp, sizeof(*txrdy)); + memset(txrdy, 0, sizeof(*txrdy)); + txrdy->ft_burst_len = htonl(se_cmd->data_length); + + cmd->seq = lport->tt.seq_start_next(cmd->seq); + fc_fill_fc_hdr(fp, FC_RCTL_DD_DATA_DESC, ep->did, ep->sid, FC_TYPE_FCP, + FC_FC_EX_CTX | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); + + fh = fc_frame_header_get(fp); + f_ctl = ntoh24(fh->fh_f_ctl); + + /* Only if it is 'Exchange Responder' */ + if (f_ctl & FC_FC_EX_CTX) { + /* Target is 'exchange responder' and sending XFER_READY + * to 'exchange initiator (initiator)' + */ + if ((ep->xid <= lport->lro_xid) && + (fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) { + if (se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { + /* + * Map se_mem list to scatterlist, so that + * DDP can be setup. DDP setup function require + * scatterlist. se_mem_list is internal to + * TCM/LIO target + */ + transport_do_task_sg_chain(se_cmd); + cmd->sg = T_TASK(se_cmd)->t_tasks_sg_chained; + cmd->sg_cnt = + T_TASK(se_cmd)->t_tasks_sg_chained_no; + } + if (cmd->sg && lport->tt.ddp_setup(lport, ep->xid, + cmd->sg, cmd->sg_cnt)) + cmd->was_ddp_setup = 1; + } + } + lport->tt.seq_send(lport, cmd->seq, fp); + return 0; +} + +u32 ft_get_task_tag(struct se_cmd *se_cmd) +{ + struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); + + return fc_seq_exch(cmd->seq)->rxid; +} + +int ft_get_cmd_state(struct se_cmd *se_cmd) +{ + struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); + + return cmd->state; +} + +int ft_is_state_remove(struct se_cmd *se_cmd) +{ + return 0; /* XXX TBD */ +} + +void ft_new_cmd_failure(struct se_cmd *se_cmd) +{ + /* XXX TBD */ + printk(KERN_INFO "%s: se_cmd %p\n", __func__, se_cmd); +} + +/* + * FC sequence response handler for follow-on sequences (data) and aborts. + */ +static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg) +{ + struct ft_cmd *cmd = arg; + struct fc_frame_header *fh; + + if (IS_ERR(fp)) { + /* XXX need to find cmd if queued */ + cmd->se_cmd.t_state = TRANSPORT_REMOVE; + cmd->seq = NULL; + transport_generic_free_cmd(&cmd->se_cmd, 0, 1, 0); + return; + } + + fh = fc_frame_header_get(fp); + + switch (fh->fh_r_ctl) { + case FC_RCTL_DD_SOL_DATA: /* write data */ + ft_recv_write_data(cmd, fp); + break; + case FC_RCTL_DD_UNSOL_CTL: /* command */ + case FC_RCTL_DD_SOL_CTL: /* transfer ready */ + case FC_RCTL_DD_DATA_DESC: /* transfer ready */ + default: + printk(KERN_INFO "%s: unhandled frame r_ctl %x\n", + __func__, fh->fh_r_ctl); + fc_frame_free(fp); + transport_generic_free_cmd(&cmd->se_cmd, 0, 1, 0); + break; + } +} + +/* + * Send a FCP response including SCSI status and optional FCP rsp_code. + * status is SAM_STAT_GOOD (zero) iff code is valid. + * This is used in error cases, such as allocation failures. + */ +static void ft_send_resp_status(struct fc_lport *lport, + const struct fc_frame *rx_fp, + u32 status, enum fcp_resp_rsp_codes code) +{ + struct fc_frame *fp; + struct fc_seq *sp; + const struct fc_frame_header *fh; + size_t len; + struct fcp_resp_with_ext *fcp; + struct fcp_resp_rsp_info *info; + + fh = fc_frame_header_get(rx_fp); + FT_IO_DBG("FCP error response: did %x oxid %x status %x code %x\n", + ntoh24(fh->fh_s_id), ntohs(fh->fh_ox_id), status, code); + len = sizeof(*fcp); + if (status == SAM_STAT_GOOD) + len += sizeof(*info); + fp = fc_frame_alloc(lport, len); + if (!fp) + return; + fcp = fc_frame_payload_get(fp, len); + memset(fcp, 0, len); + fcp->resp.fr_status = status; + if (status == SAM_STAT_GOOD) { + fcp->ext.fr_rsp_len = htonl(sizeof(*info)); + fcp->resp.fr_flags |= FCP_RSP_LEN_VAL; + info = (struct fcp_resp_rsp_info *)(fcp + 1); + info->rsp_code = code; + } + + fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_DD_CMD_STATUS, 0); + sp = fr_seq(fp); + if (sp) + lport->tt.seq_send(lport, sp, fp); + else + lport->tt.frame_send(lport, fp); +} + +/* + * Send error or task management response. + * Always frees the cmd and associated state. + */ +static void ft_send_resp_code(struct ft_cmd *cmd, enum fcp_resp_rsp_codes code) +{ + ft_send_resp_status(cmd->sess->tport->lport, + cmd->req_frame, SAM_STAT_GOOD, code); + ft_free_cmd(cmd); +} + +/* + * Handle Task Management Request. + */ +static void ft_send_tm(struct ft_cmd *cmd) +{ + struct se_tmr_req *tmr; + struct fcp_cmnd *fcp; + u8 tm_func; + + fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp)); + + switch (fcp->fc_tm_flags) { + case FCP_TMF_LUN_RESET: + tm_func = TMR_LUN_RESET; + if (ft_get_lun_for_cmd(cmd, fcp->fc_lun) < 0) { + ft_dump_cmd(cmd, __func__); + transport_send_check_condition_and_sense(&cmd->se_cmd, + cmd->se_cmd.scsi_sense_reason, 0); + ft_sess_put(cmd->sess); + return; + } + break; + case FCP_TMF_TGT_RESET: + tm_func = TMR_TARGET_WARM_RESET; + break; + case FCP_TMF_CLR_TASK_SET: + tm_func = TMR_CLEAR_TASK_SET; + break; + case FCP_TMF_ABT_TASK_SET: + tm_func = TMR_ABORT_TASK_SET; + break; + case FCP_TMF_CLR_ACA: + tm_func = TMR_CLEAR_ACA; + break; + default: + /* + * FCP4r01 indicates having a combination of + * tm_flags set is invalid. + */ + FT_TM_DBG("invalid FCP tm_flags %x\n", fcp->fc_tm_flags); + ft_send_resp_code(cmd, FCP_CMND_FIELDS_INVALID); + return; + } + + FT_TM_DBG("alloc tm cmd fn %d\n", tm_func); + tmr = core_tmr_alloc_req(&cmd->se_cmd, cmd, tm_func); + if (!tmr) { + FT_TM_DBG("alloc failed\n"); + ft_send_resp_code(cmd, FCP_TMF_FAILED); + return; + } + cmd->se_cmd.se_tmr_req = tmr; + transport_generic_handle_tmr(&cmd->se_cmd); +} + +/* + * Send status from completed task management request. + */ +int ft_queue_tm_resp(struct se_cmd *se_cmd) +{ + struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); + struct se_tmr_req *tmr = se_cmd->se_tmr_req; + enum fcp_resp_rsp_codes code; + + switch (tmr->response) { + case TMR_FUNCTION_COMPLETE: + code = FCP_TMF_CMPL; + break; + case TMR_LUN_DOES_NOT_EXIST: + code = FCP_TMF_INVALID_LUN; + break; + case TMR_FUNCTION_REJECTED: + code = FCP_TMF_REJECTED; + break; + case TMR_TASK_DOES_NOT_EXIST: + case TMR_TASK_STILL_ALLEGIANT: + case TMR_TASK_FAILOVER_NOT_SUPPORTED: + case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED: + case TMR_FUNCTION_AUTHORIZATION_FAILED: + default: + code = FCP_TMF_FAILED; + break; + } + FT_TM_DBG("tmr fn %d resp %d fcp code %d\n", + tmr->function, tmr->response, code); + ft_send_resp_code(cmd, code); + return 0; +} + +/* + * Handle incoming FCP command. + */ +static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp) +{ + struct ft_cmd *cmd; + struct fc_lport *lport = sess->tport->lport; + + cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); + if (!cmd) + goto busy; + cmd->sess = sess; + cmd->seq = lport->tt.seq_assign(lport, fp); + if (!cmd->seq) { + kfree(cmd); + goto busy; + } + cmd->req_frame = fp; /* hold frame during cmd */ + ft_queue_cmd(sess, cmd); + return; + +busy: + FT_IO_DBG("cmd or seq allocation failure - sending BUSY\n"); + ft_send_resp_status(lport, fp, SAM_STAT_BUSY, 0); + fc_frame_free(fp); + ft_sess_put(sess); /* undo get from lookup */ +} + + +/* + * Handle incoming FCP frame. + * Caller has verified that the frame is type FCP. + */ +void ft_recv_req(struct ft_sess *sess, struct fc_frame *fp) +{ + struct fc_frame_header *fh = fc_frame_header_get(fp); + + switch (fh->fh_r_ctl) { + case FC_RCTL_DD_UNSOL_CMD: /* command */ + ft_recv_cmd(sess, fp); + break; + case FC_RCTL_DD_SOL_DATA: /* write data */ + case FC_RCTL_DD_UNSOL_CTL: + case FC_RCTL_DD_SOL_CTL: + case FC_RCTL_DD_DATA_DESC: /* transfer ready */ + case FC_RCTL_ELS4_REQ: /* SRR, perhaps */ + default: + printk(KERN_INFO "%s: unhandled frame r_ctl %x\n", + __func__, fh->fh_r_ctl); + fc_frame_free(fp); + ft_sess_put(sess); /* undo get from lookup */ + break; + } +} + +/* + * Send new command to target. + */ +static void ft_send_cmd(struct ft_cmd *cmd) +{ + struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame); + struct se_cmd *se_cmd; + struct fcp_cmnd *fcp; + int data_dir; + u32 data_len; + int task_attr; + int ret; + + fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp)); + if (!fcp) + goto err; + + if (fcp->fc_flags & FCP_CFL_LEN_MASK) + goto err; /* not handling longer CDBs yet */ + + if (fcp->fc_tm_flags) { + task_attr = FCP_PTA_SIMPLE; + data_dir = DMA_NONE; + data_len = 0; + } else { + switch (fcp->fc_flags & (FCP_CFL_RDDATA | FCP_CFL_WRDATA)) { + case 0: + data_dir = DMA_NONE; + break; + case FCP_CFL_RDDATA: + data_dir = DMA_FROM_DEVICE; + break; + case FCP_CFL_WRDATA: + data_dir = DMA_TO_DEVICE; + break; + case FCP_CFL_WRDATA | FCP_CFL_RDDATA: + goto err; /* TBD not supported by tcm_fc yet */ + } + + /* FCP_PTA_ maps 1:1 to TASK_ATTR_ */ + task_attr = fcp->fc_pri_ta & FCP_PTA_MASK; + data_len = ntohl(fcp->fc_dl); + cmd->cdb = fcp->fc_cdb; + } + + se_cmd = &cmd->se_cmd; + /* + * Initialize struct se_cmd descriptor from target_core_mod + * infrastructure + */ + transport_init_se_cmd(se_cmd, &ft_configfs->tf_ops, cmd->sess->se_sess, + data_len, data_dir, task_attr, + &cmd->ft_sense_buffer[0]); + /* + * Check for FCP task management flags + */ + if (fcp->fc_tm_flags) { + ft_send_tm(cmd); + return; + } + + fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd); + + ret = ft_get_lun_for_cmd(cmd, fcp->fc_lun); + if (ret < 0) { + ft_dump_cmd(cmd, __func__); + transport_send_check_condition_and_sense(&cmd->se_cmd, + cmd->se_cmd.scsi_sense_reason, 0); + return; + } + + ret = transport_generic_allocate_tasks(se_cmd, cmd->cdb); + + FT_IO_DBG("r_ctl %x alloc task ret %d\n", fh->fh_r_ctl, ret); + ft_dump_cmd(cmd, __func__); + + if (ret == -1) { + transport_send_check_condition_and_sense(se_cmd, + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); + transport_generic_free_cmd(se_cmd, 0, 1, 0); + return; + } + if (ret == -2) { + if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT) + ft_queue_status(se_cmd); + else + transport_send_check_condition_and_sense(se_cmd, + se_cmd->scsi_sense_reason, 0); + transport_generic_free_cmd(se_cmd, 0, 1, 0); + return; + } + transport_generic_handle_cdb(se_cmd); + return; + +err: + ft_send_resp_code(cmd, FCP_CMND_FIELDS_INVALID); + return; +} + +/* + * Handle request in the command thread. + */ +static void ft_exec_req(struct ft_cmd *cmd) +{ + FT_IO_DBG("cmd state %x\n", cmd->state); + switch (cmd->state) { + case FC_CMD_ST_NEW: + ft_send_cmd(cmd); + break; + default: + break; + } +} + +/* + * Processing thread. + * Currently one thread per tpg. + */ +int ft_thread(void *arg) +{ + struct ft_tpg *tpg = arg; + struct se_queue_obj *qobj = &tpg->qobj; + struct ft_cmd *cmd; + int ret; + + set_user_nice(current, -20); + + while (!kthread_should_stop()) { + ret = wait_event_interruptible(qobj->thread_wq, + atomic_read(&qobj->queue_cnt) || kthread_should_stop()); + if (ret < 0 || kthread_should_stop()) + goto out; + cmd = ft_dequeue_cmd(qobj); + if (cmd) + ft_exec_req(cmd); + } + +out: + return 0; +} diff --git a/trunk/drivers/target/tcm_fc/tfc_conf.c b/trunk/drivers/target/tcm_fc/tfc_conf.c new file mode 100644 index 000000000000..fcdbbffe88cc --- /dev/null +++ b/trunk/drivers/target/tcm_fc/tfc_conf.c @@ -0,0 +1,677 @@ +/******************************************************************************* + * Filename: tcm_fc.c + * + * This file contains the configfs implementation for TCM_fc fabric node. + * Based on tcm_loop_configfs.c + * + * Copyright (c) 2010 Cisco Systems, Inc. + * Copyright (c) 2009,2010 Rising Tide, Inc. + * Copyright (c) 2009,2010 Linux-iSCSI.org + * + * Copyright (c) 2009,2010 Nicholas A. Bellinger + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + ****************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tcm_fc.h" + +struct target_fabric_configfs *ft_configfs; + +LIST_HEAD(ft_lport_list); +DEFINE_MUTEX(ft_lport_lock); + +unsigned int ft_debug_logging; +module_param_named(debug_logging, ft_debug_logging, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); + +/* + * Parse WWN. + * If strict, we require lower-case hex and colon separators to be sure + * the name is the same as what would be generated by ft_format_wwn() + * so the name and wwn are mapped one-to-one. + */ +static ssize_t ft_parse_wwn(const char *name, u64 *wwn, int strict) +{ + const char *cp; + char c; + u32 nibble; + u32 byte = 0; + u32 pos = 0; + u32 err; + + *wwn = 0; + for (cp = name; cp < &name[FT_NAMELEN - 1]; cp++) { + c = *cp; + if (c == '\n' && cp[1] == '\0') + continue; + if (strict && pos++ == 2 && byte++ < 7) { + pos = 0; + if (c == ':') + continue; + err = 1; + goto fail; + } + if (c == '\0') { + err = 2; + if (strict && byte != 8) + goto fail; + return cp - name; + } + err = 3; + if (isdigit(c)) + nibble = c - '0'; + else if (isxdigit(c) && (islower(c) || !strict)) + nibble = tolower(c) - 'a' + 10; + else + goto fail; + *wwn = (*wwn << 4) | nibble; + } + err = 4; +fail: + FT_CONF_DBG("err %u len %zu pos %u byte %u\n", + err, cp - name, pos, byte); + return -1; +} + +ssize_t ft_format_wwn(char *buf, size_t len, u64 wwn) +{ + u8 b[8]; + + put_unaligned_be64(wwn, b); + return snprintf(buf, len, + "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x", + b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]); +} + +static ssize_t ft_wwn_show(void *arg, char *buf) +{ + u64 *wwn = arg; + ssize_t len; + + len = ft_format_wwn(buf, PAGE_SIZE - 2, *wwn); + buf[len++] = '\n'; + return len; +} + +static ssize_t ft_wwn_store(void *arg, const char *buf, size_t len) +{ + ssize_t ret; + u64 wwn; + + ret = ft_parse_wwn(buf, &wwn, 0); + if (ret > 0) + *(u64 *)arg = wwn; + return ret; +} + +/* + * ACL auth ops. + */ + +static ssize_t ft_nacl_show_port_name( + struct se_node_acl *se_nacl, + char *page) +{ + struct ft_node_acl *acl = container_of(se_nacl, + struct ft_node_acl, se_node_acl); + + return ft_wwn_show(&acl->node_auth.port_name, page); +} + +static ssize_t ft_nacl_store_port_name( + struct se_node_acl *se_nacl, + const char *page, + size_t count) +{ + struct ft_node_acl *acl = container_of(se_nacl, + struct ft_node_acl, se_node_acl); + + return ft_wwn_store(&acl->node_auth.port_name, page, count); +} + +TF_NACL_BASE_ATTR(ft, port_name, S_IRUGO | S_IWUSR); + +static ssize_t ft_nacl_show_node_name( + struct se_node_acl *se_nacl, + char *page) +{ + struct ft_node_acl *acl = container_of(se_nacl, + struct ft_node_acl, se_node_acl); + + return ft_wwn_show(&acl->node_auth.node_name, page); +} + +static ssize_t ft_nacl_store_node_name( + struct se_node_acl *se_nacl, + const char *page, + size_t count) +{ + struct ft_node_acl *acl = container_of(se_nacl, + struct ft_node_acl, se_node_acl); + + return ft_wwn_store(&acl->node_auth.node_name, page, count); +} + +TF_NACL_BASE_ATTR(ft, node_name, S_IRUGO | S_IWUSR); + +static struct configfs_attribute *ft_nacl_base_attrs[] = { + &ft_nacl_port_name.attr, + &ft_nacl_node_name.attr, + NULL, +}; + +/* + * ACL ops. + */ + +/* + * Add ACL for an initiator. The ACL is named arbitrarily. + * The port_name and/or node_name are attributes. + */ +static struct se_node_acl *ft_add_acl( + struct se_portal_group *se_tpg, + struct config_group *group, + const char *name) +{ + struct ft_node_acl *acl; + struct ft_tpg *tpg; + u64 wwpn; + u32 q_depth; + + FT_CONF_DBG("add acl %s\n", name); + tpg = container_of(se_tpg, struct ft_tpg, se_tpg); + + if (ft_parse_wwn(name, &wwpn, 1) < 0) + return ERR_PTR(-EINVAL); + + acl = kzalloc(sizeof(struct ft_node_acl), GFP_KERNEL); + if (!(acl)) + return ERR_PTR(-ENOMEM); + acl->node_auth.port_name = wwpn; + + q_depth = 32; /* XXX bogus default - get from tpg? */ + return core_tpg_add_initiator_node_acl(&tpg->se_tpg, + &acl->se_node_acl, name, q_depth); +} + +static void ft_del_acl(struct se_node_acl *se_acl) +{ + struct se_portal_group *se_tpg = se_acl->se_tpg; + struct ft_tpg *tpg; + struct ft_node_acl *acl = container_of(se_acl, + struct ft_node_acl, se_node_acl); + + FT_CONF_DBG("del acl %s\n", + config_item_name(&se_acl->acl_group.cg_item)); + + tpg = container_of(se_tpg, struct ft_tpg, se_tpg); + FT_CONF_DBG("del acl %p se_acl %p tpg %p se_tpg %p\n", + acl, se_acl, tpg, &tpg->se_tpg); + + core_tpg_del_initiator_node_acl(&tpg->se_tpg, se_acl, 1); + kfree(acl); +} + +struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata) +{ + struct ft_node_acl *found = NULL; + struct ft_node_acl *acl; + struct se_portal_group *se_tpg = &tpg->se_tpg; + struct se_node_acl *se_acl; + + spin_lock_bh(&se_tpg->acl_node_lock); + list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) { + acl = container_of(se_acl, struct ft_node_acl, se_node_acl); + FT_CONF_DBG("acl %p port_name %llx\n", + acl, (unsigned long long)acl->node_auth.port_name); + if (acl->node_auth.port_name == rdata->ids.port_name || + acl->node_auth.node_name == rdata->ids.node_name) { + FT_CONF_DBG("acl %p port_name %llx matched\n", acl, + (unsigned long long)rdata->ids.port_name); + found = acl; + /* XXX need to hold onto ACL */ + break; + } + } + spin_unlock_bh(&se_tpg->acl_node_lock); + return found; +} + +struct se_node_acl *ft_tpg_alloc_fabric_acl(struct se_portal_group *se_tpg) +{ + struct ft_node_acl *acl; + + acl = kzalloc(sizeof(*acl), GFP_KERNEL); + if (!(acl)) { + printk(KERN_ERR "Unable to allocate struct ft_node_acl\n"); + return NULL; + } + FT_CONF_DBG("acl %p\n", acl); + return &acl->se_node_acl; +} + +static void ft_tpg_release_fabric_acl(struct se_portal_group *se_tpg, + struct se_node_acl *se_acl) +{ + struct ft_node_acl *acl = container_of(se_acl, + struct ft_node_acl, se_node_acl); + + FT_CONF_DBG(KERN_INFO "acl %p\n", acl); + kfree(acl); +} + +/* + * local_port port_group (tpg) ops. + */ +static struct se_portal_group *ft_add_tpg( + struct se_wwn *wwn, + struct config_group *group, + const char *name) +{ + struct ft_lport_acl *lacl; + struct ft_tpg *tpg; + unsigned long index; + int ret; + + FT_CONF_DBG("tcm_fc: add tpg %s\n", name); + + /* + * Name must be "tpgt_" followed by the index. + */ + if (strstr(name, "tpgt_") != name) + return NULL; + if (strict_strtoul(name + 5, 10, &index) || index > UINT_MAX) + return NULL; + + lacl = container_of(wwn, struct ft_lport_acl, fc_lport_wwn); + tpg = kzalloc(sizeof(*tpg), GFP_KERNEL); + if (!tpg) + return NULL; + tpg->index = index; + tpg->lport_acl = lacl; + INIT_LIST_HEAD(&tpg->lun_list); + transport_init_queue_obj(&tpg->qobj); + + ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg, + (void *)tpg, TRANSPORT_TPG_TYPE_NORMAL); + if (ret < 0) { + kfree(tpg); + return NULL; + } + + tpg->thread = kthread_run(ft_thread, tpg, "ft_tpg%lu", index); + if (IS_ERR(tpg->thread)) { + kfree(tpg); + return NULL; + } + + mutex_lock(&ft_lport_lock); + list_add_tail(&tpg->list, &lacl->tpg_list); + mutex_unlock(&ft_lport_lock); + + return &tpg->se_tpg; +} + +static void ft_del_tpg(struct se_portal_group *se_tpg) +{ + struct ft_tpg *tpg = container_of(se_tpg, struct ft_tpg, se_tpg); + + FT_CONF_DBG("del tpg %s\n", + config_item_name(&tpg->se_tpg.tpg_group.cg_item)); + + kthread_stop(tpg->thread); + + /* Wait for sessions to be freed thru RCU, for BUG_ON below */ + synchronize_rcu(); + + mutex_lock(&ft_lport_lock); + list_del(&tpg->list); + if (tpg->tport) { + tpg->tport->tpg = NULL; + tpg->tport = NULL; + } + mutex_unlock(&ft_lport_lock); + + core_tpg_deregister(se_tpg); + kfree(tpg); +} + +/* + * Verify that an lport is configured to use the tcm_fc module, and return + * the target port group that should be used. + * + * The caller holds ft_lport_lock. + */ +struct ft_tpg *ft_lport_find_tpg(struct fc_lport *lport) +{ + struct ft_lport_acl *lacl; + struct ft_tpg *tpg; + + list_for_each_entry(lacl, &ft_lport_list, list) { + if (lacl->wwpn == lport->wwpn) { + list_for_each_entry(tpg, &lacl->tpg_list, list) + return tpg; /* XXX for now return first entry */ + return NULL; + } + } + return NULL; +} + +/* + * target config instance ops. + */ + +/* + * Add lport to allowed config. + * The name is the WWPN in lower-case ASCII, colon-separated bytes. + */ +static struct se_wwn *ft_add_lport( + struct target_fabric_configfs *tf, + struct config_group *group, + const char *name) +{ + struct ft_lport_acl *lacl; + struct ft_lport_acl *old_lacl; + u64 wwpn; + + FT_CONF_DBG("add lport %s\n", name); + if (ft_parse_wwn(name, &wwpn, 1) < 0) + return NULL; + lacl = kzalloc(sizeof(*lacl), GFP_KERNEL); + if (!lacl) + return NULL; + lacl->wwpn = wwpn; + INIT_LIST_HEAD(&lacl->tpg_list); + + mutex_lock(&ft_lport_lock); + list_for_each_entry(old_lacl, &ft_lport_list, list) { + if (old_lacl->wwpn == wwpn) { + mutex_unlock(&ft_lport_lock); + kfree(lacl); + return NULL; + } + } + list_add_tail(&lacl->list, &ft_lport_list); + ft_format_wwn(lacl->name, sizeof(lacl->name), wwpn); + mutex_unlock(&ft_lport_lock); + + return &lacl->fc_lport_wwn; +} + +static void ft_del_lport(struct se_wwn *wwn) +{ + struct ft_lport_acl *lacl = container_of(wwn, + struct ft_lport_acl, fc_lport_wwn); + + FT_CONF_DBG("del lport %s\n", + config_item_name(&wwn->wwn_group.cg_item)); + mutex_lock(&ft_lport_lock); + list_del(&lacl->list); + mutex_unlock(&ft_lport_lock); + + kfree(lacl); +} + +static ssize_t ft_wwn_show_attr_version( + struct target_fabric_configfs *tf, + char *page) +{ + return sprintf(page, "TCM FC " FT_VERSION " on %s/%s on " + ""UTS_RELEASE"\n", utsname()->sysname, utsname()->machine); +} + +TF_WWN_ATTR_RO(ft, version); + +static struct configfs_attribute *ft_wwn_attrs[] = { + &ft_wwn_version.attr, + NULL, +}; + +static char *ft_get_fabric_name(void) +{ + return "fc"; +} + +static char *ft_get_fabric_wwn(struct se_portal_group *se_tpg) +{ + struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr; + + return tpg->lport_acl->name; +} + +static u16 ft_get_tag(struct se_portal_group *se_tpg) +{ + struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr; + + /* + * This tag is used when forming SCSI Name identifier in EVPD=1 0x83 + * to represent the SCSI Target Port. + */ + return tpg->index; +} + +static u32 ft_get_default_depth(struct se_portal_group *se_tpg) +{ + return 1; +} + +static int ft_check_false(struct se_portal_group *se_tpg) +{ + return 0; +} + +static void ft_set_default_node_attr(struct se_node_acl *se_nacl) +{ +} + +static u16 ft_get_fabric_sense_len(void) +{ + return 0; +} + +static u16 ft_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_len) +{ + return 0; +} + +static u32 ft_tpg_get_inst_index(struct se_portal_group *se_tpg) +{ + struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr; + + return tpg->index; +} + +static u64 ft_pack_lun(unsigned int index) +{ + WARN_ON(index >= 256); + /* Caller wants this byte-swapped */ + return cpu_to_le64((index & 0xff) << 8); +} + +static struct target_core_fabric_ops ft_fabric_ops = { + .get_fabric_name = ft_get_fabric_name, + .get_fabric_proto_ident = fc_get_fabric_proto_ident, + .tpg_get_wwn = ft_get_fabric_wwn, + .tpg_get_tag = ft_get_tag, + .tpg_get_default_depth = ft_get_default_depth, + .tpg_get_pr_transport_id = fc_get_pr_transport_id, + .tpg_get_pr_transport_id_len = fc_get_pr_transport_id_len, + .tpg_parse_pr_out_transport_id = fc_parse_pr_out_transport_id, + .tpg_check_demo_mode = ft_check_false, + .tpg_check_demo_mode_cache = ft_check_false, + .tpg_check_demo_mode_write_protect = ft_check_false, + .tpg_check_prod_mode_write_protect = ft_check_false, + .tpg_alloc_fabric_acl = ft_tpg_alloc_fabric_acl, + .tpg_release_fabric_acl = ft_tpg_release_fabric_acl, + .tpg_get_inst_index = ft_tpg_get_inst_index, + .check_stop_free = ft_check_stop_free, + .release_cmd_to_pool = ft_release_cmd, + .release_cmd_direct = ft_release_cmd, + .shutdown_session = ft_sess_shutdown, + .close_session = ft_sess_close, + .stop_session = ft_sess_stop, + .fall_back_to_erl0 = ft_sess_set_erl0, + .sess_logged_in = ft_sess_logged_in, + .sess_get_index = ft_sess_get_index, + .sess_get_initiator_sid = NULL, + .write_pending = ft_write_pending, + .write_pending_status = ft_write_pending_status, + .set_default_node_attributes = ft_set_default_node_attr, + .get_task_tag = ft_get_task_tag, + .get_cmd_state = ft_get_cmd_state, + .new_cmd_failure = ft_new_cmd_failure, + .queue_data_in = ft_queue_data_in, + .queue_status = ft_queue_status, + .queue_tm_rsp = ft_queue_tm_resp, + .get_fabric_sense_len = ft_get_fabric_sense_len, + .set_fabric_sense_len = ft_set_fabric_sense_len, + .is_state_remove = ft_is_state_remove, + .pack_lun = ft_pack_lun, + /* + * Setup function pointers for generic logic in + * target_core_fabric_configfs.c + */ + .fabric_make_wwn = &ft_add_lport, + .fabric_drop_wwn = &ft_del_lport, + .fabric_make_tpg = &ft_add_tpg, + .fabric_drop_tpg = &ft_del_tpg, + .fabric_post_link = NULL, + .fabric_pre_unlink = NULL, + .fabric_make_np = NULL, + .fabric_drop_np = NULL, + .fabric_make_nodeacl = &ft_add_acl, + .fabric_drop_nodeacl = &ft_del_acl, +}; + +int ft_register_configfs(void) +{ + struct target_fabric_configfs *fabric; + int ret; + + /* + * Register the top level struct config_item_type with TCM core + */ + fabric = target_fabric_configfs_init(THIS_MODULE, "fc"); + if (!fabric) { + printk(KERN_INFO "%s: target_fabric_configfs_init() failed!\n", + __func__); + return -1; + } + fabric->tf_ops = ft_fabric_ops; + + /* Allowing support for task_sg_chaining */ + fabric->tf_ops.task_sg_chaining = 1; + + /* + * Setup default attribute lists for various fabric->tf_cit_tmpl + */ + TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = ft_wwn_attrs; + TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL; + TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL; + TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL; + TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; + TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = + ft_nacl_base_attrs; + TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; + TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL; + TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL; + /* + * register the fabric for use within TCM + */ + ret = target_fabric_configfs_register(fabric); + if (ret < 0) { + FT_CONF_DBG("target_fabric_configfs_register() for" + " FC Target failed!\n"); + printk(KERN_INFO + "%s: target_fabric_configfs_register() failed!\n", + __func__); + target_fabric_configfs_free(fabric); + return -1; + } + + /* + * Setup our local pointer to *fabric. + */ + ft_configfs = fabric; + return 0; +} + +void ft_deregister_configfs(void) +{ + if (!ft_configfs) + return; + target_fabric_configfs_deregister(ft_configfs); + ft_configfs = NULL; +} + +static struct notifier_block ft_notifier = { + .notifier_call = ft_lport_notify +}; + +static int __init ft_init(void) +{ + if (ft_register_configfs()) + return -1; + if (fc_fc4_register_provider(FC_TYPE_FCP, &ft_prov)) { + ft_deregister_configfs(); + return -1; + } + blocking_notifier_chain_register(&fc_lport_notifier_head, &ft_notifier); + fc_lport_iterate(ft_lport_add, NULL); + return 0; +} + +static void __exit ft_exit(void) +{ + blocking_notifier_chain_unregister(&fc_lport_notifier_head, + &ft_notifier); + fc_fc4_deregister_provider(FC_TYPE_FCP, &ft_prov); + fc_lport_iterate(ft_lport_del, NULL); + ft_deregister_configfs(); + synchronize_rcu(); +} + +#ifdef MODULE +MODULE_DESCRIPTION("FC TCM fabric driver " FT_VERSION); +MODULE_LICENSE("GPL"); +module_init(ft_init); +module_exit(ft_exit); +#endif /* MODULE */ diff --git a/trunk/drivers/target/tcm_fc/tfc_io.c b/trunk/drivers/target/tcm_fc/tfc_io.c new file mode 100644 index 000000000000..4c3c0efbe13f --- /dev/null +++ b/trunk/drivers/target/tcm_fc/tfc_io.c @@ -0,0 +1,374 @@ +/* + * Copyright (c) 2010 Cisco Systems, Inc. + * + * Portions based on tcm_loop_fabric_scsi.c and libfc/fc_fcp.c + * + * Copyright (c) 2007 Intel Corporation. All rights reserved. + * Copyright (c) 2008 Red Hat, Inc. All rights reserved. + * Copyright (c) 2008 Mike Christie + * Copyright (c) 2009 Rising Tide, Inc. + * Copyright (c) 2009 Linux-iSCSI.org + * Copyright (c) 2009 Nicholas A. Bellinger + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + */ + +/* XXX TBD some includes may be extraneous */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tcm_fc.h" + +/* + * Deliver read data back to initiator. + * XXX TBD handle resource problems later. + */ +int ft_queue_data_in(struct se_cmd *se_cmd) +{ + struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); + struct se_transport_task *task; + struct fc_frame *fp = NULL; + struct fc_exch *ep; + struct fc_lport *lport; + struct se_mem *mem; + size_t remaining; + u32 f_ctl = FC_FC_EX_CTX | FC_FC_REL_OFF; + u32 mem_off; + u32 fh_off = 0; + u32 frame_off = 0; + size_t frame_len = 0; + size_t mem_len; + size_t tlen; + size_t off_in_page; + struct page *page; + int use_sg; + int error; + void *page_addr; + void *from; + void *to = NULL; + + ep = fc_seq_exch(cmd->seq); + lport = ep->lp; + cmd->seq = lport->tt.seq_start_next(cmd->seq); + + task = T_TASK(se_cmd); + BUG_ON(!task); + remaining = se_cmd->data_length; + + /* + * Setup to use first mem list entry if any. + */ + if (task->t_tasks_se_num) { + mem = list_first_entry(task->t_mem_list, + struct se_mem, se_list); + mem_len = mem->se_len; + mem_off = mem->se_off; + page = mem->se_page; + } else { + mem = NULL; + mem_len = remaining; + mem_off = 0; + page = NULL; + } + + /* no scatter/gather in skb for odd word length due to fc_seq_send() */ + use_sg = !(remaining % 4); + + while (remaining) { + if (!mem_len) { + BUG_ON(!mem); + mem = list_entry(mem->se_list.next, + struct se_mem, se_list); + mem_len = min((size_t)mem->se_len, remaining); + mem_off = mem->se_off; + page = mem->se_page; + } + if (!frame_len) { + /* + * If lport's has capability of Large Send Offload LSO) + * , then allow 'frame_len' to be as big as 'lso_max' + * if indicated transfer length is >= lport->lso_max + */ + frame_len = (lport->seq_offload) ? lport->lso_max : + cmd->sess->max_frame; + frame_len = min(frame_len, remaining); + fp = fc_frame_alloc(lport, use_sg ? 0 : frame_len); + if (!fp) + return -ENOMEM; + to = fc_frame_payload_get(fp, 0); + fh_off = frame_off; + frame_off += frame_len; + /* + * Setup the frame's max payload which is used by base + * driver to indicate HW about max frame size, so that + * HW can do fragmentation appropriately based on + * "gso_max_size" of underline netdev. + */ + fr_max_payload(fp) = cmd->sess->max_frame; + } + tlen = min(mem_len, frame_len); + + if (use_sg) { + if (!mem) { + BUG_ON(!task->t_task_buf); + page_addr = task->t_task_buf + mem_off; + /* + * In this case, offset is 'offset_in_page' of + * (t_task_buf + mem_off) instead of 'mem_off'. + */ + off_in_page = offset_in_page(page_addr); + page = virt_to_page(page_addr); + tlen = min(tlen, PAGE_SIZE - off_in_page); + } else + off_in_page = mem_off; + BUG_ON(!page); + get_page(page); + skb_fill_page_desc(fp_skb(fp), + skb_shinfo(fp_skb(fp))->nr_frags, + page, off_in_page, tlen); + fr_len(fp) += tlen; + fp_skb(fp)->data_len += tlen; + fp_skb(fp)->truesize += + PAGE_SIZE << compound_order(page); + } else if (mem) { + BUG_ON(!page); + from = kmap_atomic(page + (mem_off >> PAGE_SHIFT), + KM_SOFTIRQ0); + page_addr = from; + from += mem_off & ~PAGE_MASK; + tlen = min(tlen, (size_t)(PAGE_SIZE - + (mem_off & ~PAGE_MASK))); + memcpy(to, from, tlen); + kunmap_atomic(page_addr, KM_SOFTIRQ0); + to += tlen; + } else { + from = task->t_task_buf + mem_off; + memcpy(to, from, tlen); + to += tlen; + } + + mem_off += tlen; + mem_len -= tlen; + frame_len -= tlen; + remaining -= tlen; + + if (frame_len && + (skb_shinfo(fp_skb(fp))->nr_frags < FC_FRAME_SG_LEN)) + continue; + if (!remaining) + f_ctl |= FC_FC_END_SEQ; + fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid, + FC_TYPE_FCP, f_ctl, fh_off); + error = lport->tt.seq_send(lport, cmd->seq, fp); + if (error) { + /* XXX For now, initiator will retry */ + if (printk_ratelimit()) + printk(KERN_ERR "%s: Failed to send frame %p, " + "xid <0x%x>, remaining <0x%x>, " + "lso_max <0x%x>\n", + __func__, fp, ep->xid, + remaining, lport->lso_max); + } + } + return ft_queue_status(se_cmd); +} + +/* + * Receive write data frame. + */ +void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp) +{ + struct se_cmd *se_cmd = &cmd->se_cmd; + struct fc_seq *seq = cmd->seq; + struct fc_exch *ep; + struct fc_lport *lport; + struct se_transport_task *task; + struct fc_frame_header *fh; + struct se_mem *mem; + u32 mem_off; + u32 rel_off; + size_t frame_len; + size_t mem_len; + size_t tlen; + struct page *page; + void *page_addr; + void *from; + void *to; + u32 f_ctl; + void *buf; + + task = T_TASK(se_cmd); + BUG_ON(!task); + + fh = fc_frame_header_get(fp); + if (!(ntoh24(fh->fh_f_ctl) & FC_FC_REL_OFF)) + goto drop; + + /* + * Doesn't expect even single byte of payload. Payload + * is expected to be copied directly to user buffers + * due to DDP (Large Rx offload) feature, hence + * BUG_ON if BUF is non-NULL + */ + buf = fc_frame_payload_get(fp, 1); + if (cmd->was_ddp_setup && buf) { + printk(KERN_INFO "%s: When DDP was setup, not expected to" + "receive frame with payload, Payload shall be" + "copied directly to buffer instead of coming " + "via. legacy receive queues\n", __func__); + BUG_ON(buf); + } + + /* + * If ft_cmd indicated 'ddp_setup', in that case only the last frame + * should come with 'TSI bit being set'. If 'TSI bit is not set and if + * data frame appears here, means error condition. In both the cases + * release the DDP context (ddp_put) and in error case, as well + * initiate error recovery mechanism. + */ + ep = fc_seq_exch(seq); + if (cmd->was_ddp_setup) { + BUG_ON(!ep); + lport = ep->lp; + BUG_ON(!lport); + } + if (cmd->was_ddp_setup && ep->xid != FC_XID_UNKNOWN) { + f_ctl = ntoh24(fh->fh_f_ctl); + /* + * If TSI bit set in f_ctl, means last write data frame is + * received successfully where payload is posted directly + * to user buffer and only the last frame's header is posted + * in legacy receive queue + */ + if (f_ctl & FC_FC_SEQ_INIT) { /* TSI bit set in FC frame */ + cmd->write_data_len = lport->tt.ddp_done(lport, + ep->xid); + goto last_frame; + } else { + /* + * Updating the write_data_len may be meaningless at + * this point, but just in case if required in future + * for debugging or any other purpose + */ + printk(KERN_ERR "%s: Received frame with TSI bit not" + " being SET, dropping the frame, " + "cmd->sg <%p>, cmd->sg_cnt <0x%x>\n", + __func__, cmd->sg, cmd->sg_cnt); + cmd->write_data_len = lport->tt.ddp_done(lport, + ep->xid); + lport->tt.seq_exch_abort(cmd->seq, 0); + goto drop; + } + } + + rel_off = ntohl(fh->fh_parm_offset); + frame_len = fr_len(fp); + if (frame_len <= sizeof(*fh)) + goto drop; + frame_len -= sizeof(*fh); + from = fc_frame_payload_get(fp, 0); + if (rel_off >= se_cmd->data_length) + goto drop; + if (frame_len + rel_off > se_cmd->data_length) + frame_len = se_cmd->data_length - rel_off; + + /* + * Setup to use first mem list entry if any. + */ + if (task->t_tasks_se_num) { + mem = list_first_entry(task->t_mem_list, + struct se_mem, se_list); + mem_len = mem->se_len; + mem_off = mem->se_off; + page = mem->se_page; + } else { + mem = NULL; + page = NULL; + mem_off = 0; + mem_len = frame_len; + } + + while (frame_len) { + if (!mem_len) { + BUG_ON(!mem); + mem = list_entry(mem->se_list.next, + struct se_mem, se_list); + mem_len = mem->se_len; + mem_off = mem->se_off; + page = mem->se_page; + } + if (rel_off >= mem_len) { + rel_off -= mem_len; + mem_len = 0; + continue; + } + mem_off += rel_off; + mem_len -= rel_off; + rel_off = 0; + + tlen = min(mem_len, frame_len); + + if (mem) { + to = kmap_atomic(page + (mem_off >> PAGE_SHIFT), + KM_SOFTIRQ0); + page_addr = to; + to += mem_off & ~PAGE_MASK; + tlen = min(tlen, (size_t)(PAGE_SIZE - + (mem_off & ~PAGE_MASK))); + memcpy(to, from, tlen); + kunmap_atomic(page_addr, KM_SOFTIRQ0); + } else { + to = task->t_task_buf + mem_off; + memcpy(to, from, tlen); + } + from += tlen; + frame_len -= tlen; + mem_off += tlen; + mem_len -= tlen; + cmd->write_data_len += tlen; + } +last_frame: + if (cmd->write_data_len == se_cmd->data_length) + transport_generic_handle_data(se_cmd); +drop: + fc_frame_free(fp); +} diff --git a/trunk/drivers/target/tcm_fc/tfc_sess.c b/trunk/drivers/target/tcm_fc/tfc_sess.c new file mode 100644 index 000000000000..a3bd57f2ea32 --- /dev/null +++ b/trunk/drivers/target/tcm_fc/tfc_sess.c @@ -0,0 +1,541 @@ +/* + * Copyright (c) 2010 Cisco Systems, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + */ + +/* XXX TBD some includes may be extraneous */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include "tcm_fc.h" + +static void ft_sess_delete_all(struct ft_tport *); + +/* + * Lookup or allocate target local port. + * Caller holds ft_lport_lock. + */ +static struct ft_tport *ft_tport_create(struct fc_lport *lport) +{ + struct ft_tpg *tpg; + struct ft_tport *tport; + int i; + + tport = rcu_dereference(lport->prov[FC_TYPE_FCP]); + if (tport && tport->tpg) + return tport; + + tpg = ft_lport_find_tpg(lport); + if (!tpg) + return NULL; + + if (tport) { + tport->tpg = tpg; + return tport; + } + + tport = kzalloc(sizeof(*tport), GFP_KERNEL); + if (!tport) + return NULL; + + tport->lport = lport; + tport->tpg = tpg; + tpg->tport = tport; + for (i = 0; i < FT_SESS_HASH_SIZE; i++) + INIT_HLIST_HEAD(&tport->hash[i]); + + rcu_assign_pointer(lport->prov[FC_TYPE_FCP], tport); + return tport; +} + +/* + * Free tport via RCU. + */ +static void ft_tport_rcu_free(struct rcu_head *rcu) +{ + struct ft_tport *tport = container_of(rcu, struct ft_tport, rcu); + + kfree(tport); +} + +/* + * Delete a target local port. + * Caller holds ft_lport_lock. + */ +static void ft_tport_delete(struct ft_tport *tport) +{ + struct fc_lport *lport; + struct ft_tpg *tpg; + + ft_sess_delete_all(tport); + lport = tport->lport; + BUG_ON(tport != lport->prov[FC_TYPE_FCP]); + rcu_assign_pointer(lport->prov[FC_TYPE_FCP], NULL); + + tpg = tport->tpg; + if (tpg) { + tpg->tport = NULL; + tport->tpg = NULL; + } + call_rcu(&tport->rcu, ft_tport_rcu_free); +} + +/* + * Add local port. + * Called thru fc_lport_iterate(). + */ +void ft_lport_add(struct fc_lport *lport, void *arg) +{ + mutex_lock(&ft_lport_lock); + ft_tport_create(lport); + mutex_unlock(&ft_lport_lock); +} + +/* + * Delete local port. + * Called thru fc_lport_iterate(). + */ +void ft_lport_del(struct fc_lport *lport, void *arg) +{ + struct ft_tport *tport; + + mutex_lock(&ft_lport_lock); + tport = lport->prov[FC_TYPE_FCP]; + if (tport) + ft_tport_delete(tport); + mutex_unlock(&ft_lport_lock); +} + +/* + * Notification of local port change from libfc. + * Create or delete local port and associated tport. + */ +int ft_lport_notify(struct notifier_block *nb, unsigned long event, void *arg) +{ + struct fc_lport *lport = arg; + + switch (event) { + case FC_LPORT_EV_ADD: + ft_lport_add(lport, NULL); + break; + case FC_LPORT_EV_DEL: + ft_lport_del(lport, NULL); + break; + } + return NOTIFY_DONE; +} + +/* + * Hash function for FC_IDs. + */ +static u32 ft_sess_hash(u32 port_id) +{ + return hash_32(port_id, FT_SESS_HASH_BITS); +} + +/* + * Find session in local port. + * Sessions and hash lists are RCU-protected. + * A reference is taken which must be eventually freed. + */ +static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id) +{ + struct ft_tport *tport; + struct hlist_head *head; + struct hlist_node *pos; + struct ft_sess *sess; + + rcu_read_lock(); + tport = rcu_dereference(lport->prov[FC_TYPE_FCP]); + if (!tport) + goto out; + + head = &tport->hash[ft_sess_hash(port_id)]; + hlist_for_each_entry_rcu(sess, pos, head, hash) { + if (sess->port_id == port_id) { + kref_get(&sess->kref); + rcu_read_unlock(); + FT_SESS_DBG("port_id %x found %p\n", port_id, sess); + return sess; + } + } +out: + rcu_read_unlock(); + FT_SESS_DBG("port_id %x not found\n", port_id); + return NULL; +} + +/* + * Allocate session and enter it in the hash for the local port. + * Caller holds ft_lport_lock. + */ +static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id, + struct ft_node_acl *acl) +{ + struct ft_sess *sess; + struct hlist_head *head; + struct hlist_node *pos; + + head = &tport->hash[ft_sess_hash(port_id)]; + hlist_for_each_entry_rcu(sess, pos, head, hash) + if (sess->port_id == port_id) + return sess; + + sess = kzalloc(sizeof(*sess), GFP_KERNEL); + if (!sess) + return NULL; + + sess->se_sess = transport_init_session(); + if (!sess->se_sess) { + kfree(sess); + return NULL; + } + sess->se_sess->se_node_acl = &acl->se_node_acl; + sess->tport = tport; + sess->port_id = port_id; + kref_init(&sess->kref); /* ref for table entry */ + hlist_add_head_rcu(&sess->hash, head); + tport->sess_count++; + + FT_SESS_DBG("port_id %x sess %p\n", port_id, sess); + + transport_register_session(&tport->tpg->se_tpg, &acl->se_node_acl, + sess->se_sess, sess); + return sess; +} + +/* + * Unhash the session. + * Caller holds ft_lport_lock. + */ +static void ft_sess_unhash(struct ft_sess *sess) +{ + struct ft_tport *tport = sess->tport; + + hlist_del_rcu(&sess->hash); + BUG_ON(!tport->sess_count); + tport->sess_count--; + sess->port_id = -1; + sess->params = 0; +} + +/* + * Delete session from hash. + * Caller holds ft_lport_lock. + */ +static struct ft_sess *ft_sess_delete(struct ft_tport *tport, u32 port_id) +{ + struct hlist_head *head; + struct hlist_node *pos; + struct ft_sess *sess; + + head = &tport->hash[ft_sess_hash(port_id)]; + hlist_for_each_entry_rcu(sess, pos, head, hash) { + if (sess->port_id == port_id) { + ft_sess_unhash(sess); + return sess; + } + } + return NULL; +} + +/* + * Delete all sessions from tport. + * Caller holds ft_lport_lock. + */ +static void ft_sess_delete_all(struct ft_tport *tport) +{ + struct hlist_head *head; + struct hlist_node *pos; + struct ft_sess *sess; + + for (head = tport->hash; + head < &tport->hash[FT_SESS_HASH_SIZE]; head++) { + hlist_for_each_entry_rcu(sess, pos, head, hash) { + ft_sess_unhash(sess); + transport_deregister_session_configfs(sess->se_sess); + ft_sess_put(sess); /* release from table */ + } + } +} + +/* + * TCM ops for sessions. + */ + +/* + * Determine whether session is allowed to be shutdown in the current context. + * Returns non-zero if the session should be shutdown. + */ +int ft_sess_shutdown(struct se_session *se_sess) +{ + struct ft_sess *sess = se_sess->fabric_sess_ptr; + + FT_SESS_DBG("port_id %x\n", sess->port_id); + return 1; +} + +/* + * Remove session and send PRLO. + * This is called when the ACL is being deleted or queue depth is changing. + */ +void ft_sess_close(struct se_session *se_sess) +{ + struct ft_sess *sess = se_sess->fabric_sess_ptr; + struct fc_lport *lport; + u32 port_id; + + mutex_lock(&ft_lport_lock); + lport = sess->tport->lport; + port_id = sess->port_id; + if (port_id == -1) { + mutex_lock(&ft_lport_lock); + return; + } + FT_SESS_DBG("port_id %x\n", port_id); + ft_sess_unhash(sess); + mutex_unlock(&ft_lport_lock); + transport_deregister_session_configfs(se_sess); + ft_sess_put(sess); + /* XXX Send LOGO or PRLO */ + synchronize_rcu(); /* let transport deregister happen */ +} + +void ft_sess_stop(struct se_session *se_sess, int sess_sleep, int conn_sleep) +{ + struct ft_sess *sess = se_sess->fabric_sess_ptr; + + FT_SESS_DBG("port_id %x\n", sess->port_id); +} + +int ft_sess_logged_in(struct se_session *se_sess) +{ + struct ft_sess *sess = se_sess->fabric_sess_ptr; + + return sess->port_id != -1; +} + +u32 ft_sess_get_index(struct se_session *se_sess) +{ + struct ft_sess *sess = se_sess->fabric_sess_ptr; + + return sess->port_id; /* XXX TBD probably not what is needed */ +} + +u32 ft_sess_get_port_name(struct se_session *se_sess, + unsigned char *buf, u32 len) +{ + struct ft_sess *sess = se_sess->fabric_sess_ptr; + + return ft_format_wwn(buf, len, sess->port_name); +} + +void ft_sess_set_erl0(struct se_session *se_sess) +{ + /* XXX TBD called when out of memory */ +} + +/* + * libfc ops involving sessions. + */ + +static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len, + const struct fc_els_spp *rspp, struct fc_els_spp *spp) +{ + struct ft_tport *tport; + struct ft_sess *sess; + struct ft_node_acl *acl; + u32 fcp_parm; + + tport = ft_tport_create(rdata->local_port); + if (!tport) + return 0; /* not a target for this local port */ + + acl = ft_acl_get(tport->tpg, rdata); + if (!acl) + return 0; + + if (!rspp) + goto fill; + + if (rspp->spp_flags & (FC_SPP_OPA_VAL | FC_SPP_RPA_VAL)) + return FC_SPP_RESP_NO_PA; + + /* + * If both target and initiator bits are off, the SPP is invalid. + */ + fcp_parm = ntohl(rspp->spp_params); + if (!(fcp_parm & (FCP_SPPF_INIT_FCN | FCP_SPPF_TARG_FCN))) + return FC_SPP_RESP_INVL; + + /* + * Create session (image pair) only if requested by + * EST_IMG_PAIR flag and if the requestor is an initiator. + */ + if (rspp->spp_flags & FC_SPP_EST_IMG_PAIR) { + spp->spp_flags |= FC_SPP_EST_IMG_PAIR; + if (!(fcp_parm & FCP_SPPF_INIT_FCN)) + return FC_SPP_RESP_CONF; + sess = ft_sess_create(tport, rdata->ids.port_id, acl); + if (!sess) + return FC_SPP_RESP_RES; + if (!sess->params) + rdata->prli_count++; + sess->params = fcp_parm; + sess->port_name = rdata->ids.port_name; + sess->max_frame = rdata->maxframe_size; + + /* XXX TBD - clearing actions. unit attn, see 4.10 */ + } + + /* + * OR in our service parameters with other provider (initiator), if any. + * TBD XXX - indicate RETRY capability? + */ +fill: + fcp_parm = ntohl(spp->spp_params); + spp->spp_params = htonl(fcp_parm | FCP_SPPF_TARG_FCN); + return FC_SPP_RESP_ACK; +} + +/** + * tcm_fcp_prli() - Handle incoming or outgoing PRLI for the FCP target + * @rdata: remote port private + * @spp_len: service parameter page length + * @rspp: received service parameter page (NULL for outgoing PRLI) + * @spp: response service parameter page + * + * Returns spp response code. + */ +static int ft_prli(struct fc_rport_priv *rdata, u32 spp_len, + const struct fc_els_spp *rspp, struct fc_els_spp *spp) +{ + int ret; + + mutex_lock(&ft_lport_lock); + ret = ft_prli_locked(rdata, spp_len, rspp, spp); + mutex_unlock(&ft_lport_lock); + FT_SESS_DBG("port_id %x flags %x ret %x\n", + rdata->ids.port_id, rspp ? rspp->spp_flags : 0, ret); + return ret; +} + +static void ft_sess_rcu_free(struct rcu_head *rcu) +{ + struct ft_sess *sess = container_of(rcu, struct ft_sess, rcu); + + transport_deregister_session(sess->se_sess); + kfree(sess); +} + +static void ft_sess_free(struct kref *kref) +{ + struct ft_sess *sess = container_of(kref, struct ft_sess, kref); + + call_rcu(&sess->rcu, ft_sess_rcu_free); +} + +void ft_sess_put(struct ft_sess *sess) +{ + int sess_held = atomic_read(&sess->kref.refcount); + + BUG_ON(!sess_held); + kref_put(&sess->kref, ft_sess_free); +} + +static void ft_prlo(struct fc_rport_priv *rdata) +{ + struct ft_sess *sess; + struct ft_tport *tport; + + mutex_lock(&ft_lport_lock); + tport = rcu_dereference(rdata->local_port->prov[FC_TYPE_FCP]); + if (!tport) { + mutex_unlock(&ft_lport_lock); + return; + } + sess = ft_sess_delete(tport, rdata->ids.port_id); + if (!sess) { + mutex_unlock(&ft_lport_lock); + return; + } + mutex_unlock(&ft_lport_lock); + transport_deregister_session_configfs(sess->se_sess); + ft_sess_put(sess); /* release from table */ + rdata->prli_count--; + /* XXX TBD - clearing actions. unit attn, see 4.10 */ +} + +/* + * Handle incoming FCP request. + * Caller has verified that the frame is type FCP. + */ +static void ft_recv(struct fc_lport *lport, struct fc_frame *fp) +{ + struct ft_sess *sess; + u32 sid = fc_frame_sid(fp); + + FT_SESS_DBG("sid %x\n", sid); + + sess = ft_sess_get(lport, sid); + if (!sess) { + FT_SESS_DBG("sid %x sess lookup failed\n", sid); + /* TBD XXX - if FCP_CMND, send PRLO */ + fc_frame_free(fp); + return; + } + ft_recv_req(sess, fp); /* must do ft_sess_put() */ +} + +/* + * Provider ops for libfc. + */ +struct fc4_prov ft_prov = { + .prli = ft_prli, + .prlo = ft_prlo, + .recv = ft_recv, + .module = THIS_MODULE, +}; diff --git a/trunk/drivers/tty/n_gsm.c b/trunk/drivers/tty/n_gsm.c index 47f8cdb207f1..74273e638c0d 100644 --- a/trunk/drivers/tty/n_gsm.c +++ b/trunk/drivers/tty/n_gsm.c @@ -1658,8 +1658,12 @@ static void gsm_queue(struct gsm_mux *gsm) if ((gsm->control & ~PF) == UI) gsm->fcs = gsm_fcs_add_block(gsm->fcs, gsm->buf, gsm->len); - /* generate final CRC with received FCS */ - gsm->fcs = gsm_fcs_add(gsm->fcs, gsm->received_fcs); + if (gsm->encoding == 0){ + /* WARNING: gsm->received_fcs is used for gsm->encoding = 0 only. + In this case it contain the last piece of data + required to generate final CRC */ + gsm->fcs = gsm_fcs_add(gsm->fcs, gsm->received_fcs); + } if (gsm->fcs != GOOD_FCS) { gsm->bad_fcs++; if (debug & 4) diff --git a/trunk/drivers/tty/serial/Kconfig b/trunk/drivers/tty/serial/Kconfig index 80484af781e1..b1f0f83b870d 100644 --- a/trunk/drivers/tty/serial/Kconfig +++ b/trunk/drivers/tty/serial/Kconfig @@ -1391,6 +1391,14 @@ config SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE help Support for Console on the NWP serial ports. +config SERIAL_LANTIQ + bool "Lantiq serial driver" + depends on LANTIQ + select SERIAL_CORE + select SERIAL_CORE_CONSOLE + help + Support for console and UART on Lantiq SoCs. + config SERIAL_QE tristate "Freescale QUICC Engine serial port support" depends on QUICC_ENGINE diff --git a/trunk/drivers/tty/serial/Makefile b/trunk/drivers/tty/serial/Makefile index fee0690ef8e3..35276043d9d1 100644 --- a/trunk/drivers/tty/serial/Makefile +++ b/trunk/drivers/tty/serial/Makefile @@ -94,3 +94,4 @@ obj-$(CONFIG_SERIAL_IFX6X60) += ifx6x60.o obj-$(CONFIG_SERIAL_PCH_UART) += pch_uart.o obj-$(CONFIG_SERIAL_MSM_SMD) += msm_smd_tty.o obj-$(CONFIG_SERIAL_MXS_AUART) += mxs-auart.o +obj-$(CONFIG_SERIAL_LANTIQ) += lantiq.o diff --git a/trunk/drivers/tty/serial/imx.c b/trunk/drivers/tty/serial/imx.c index cb36b0d4ef3c..62df72d9f0aa 100644 --- a/trunk/drivers/tty/serial/imx.c +++ b/trunk/drivers/tty/serial/imx.c @@ -382,12 +382,13 @@ static void imx_start_tx(struct uart_port *port) static irqreturn_t imx_rtsint(int irq, void *dev_id) { struct imx_port *sport = dev_id; - unsigned int val = readl(sport->port.membase + USR1) & USR1_RTSS; + unsigned int val; unsigned long flags; spin_lock_irqsave(&sport->port.lock, flags); writel(USR1_RTSD, sport->port.membase + USR1); + val = readl(sport->port.membase + USR1) & USR1_RTSS; uart_handle_cts_change(&sport->port, !!val); wake_up_interruptible(&sport->port.state->port.delta_msr_wait); diff --git a/trunk/drivers/tty/serial/lantiq.c b/trunk/drivers/tty/serial/lantiq.c new file mode 100644 index 000000000000..58cf279ed879 --- /dev/null +++ b/trunk/drivers/tty/serial/lantiq.c @@ -0,0 +1,756 @@ +/* + * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Copyright (C) 2004 Infineon IFAP DC COM CPE + * Copyright (C) 2007 Felix Fietkau + * Copyright (C) 2007 John Crispin + * Copyright (C) 2010 Thomas Langer, + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define PORT_LTQ_ASC 111 +#define MAXPORTS 2 +#define UART_DUMMY_UER_RX 1 +#define DRVNAME "ltq_asc" +#ifdef __BIG_ENDIAN +#define LTQ_ASC_TBUF (0x0020 + 3) +#define LTQ_ASC_RBUF (0x0024 + 3) +#else +#define LTQ_ASC_TBUF 0x0020 +#define LTQ_ASC_RBUF 0x0024 +#endif +#define LTQ_ASC_FSTAT 0x0048 +#define LTQ_ASC_WHBSTATE 0x0018 +#define LTQ_ASC_STATE 0x0014 +#define LTQ_ASC_IRNCR 0x00F8 +#define LTQ_ASC_CLC 0x0000 +#define LTQ_ASC_ID 0x0008 +#define LTQ_ASC_PISEL 0x0004 +#define LTQ_ASC_TXFCON 0x0044 +#define LTQ_ASC_RXFCON 0x0040 +#define LTQ_ASC_CON 0x0010 +#define LTQ_ASC_BG 0x0050 +#define LTQ_ASC_IRNREN 0x00F4 + +#define ASC_IRNREN_TX 0x1 +#define ASC_IRNREN_RX 0x2 +#define ASC_IRNREN_ERR 0x4 +#define ASC_IRNREN_TX_BUF 0x8 +#define ASC_IRNCR_TIR 0x1 +#define ASC_IRNCR_RIR 0x2 +#define ASC_IRNCR_EIR 0x4 + +#define ASCOPT_CSIZE 0x3 +#define TXFIFO_FL 1 +#define RXFIFO_FL 1 +#define ASCCLC_DISS 0x2 +#define ASCCLC_RMCMASK 0x0000FF00 +#define ASCCLC_RMCOFFSET 8 +#define ASCCON_M_8ASYNC 0x0 +#define ASCCON_M_7ASYNC 0x2 +#define ASCCON_ODD 0x00000020 +#define ASCCON_STP 0x00000080 +#define ASCCON_BRS 0x00000100 +#define ASCCON_FDE 0x00000200 +#define ASCCON_R 0x00008000 +#define ASCCON_FEN 0x00020000 +#define ASCCON_ROEN 0x00080000 +#define ASCCON_TOEN 0x00100000 +#define ASCSTATE_PE 0x00010000 +#define ASCSTATE_FE 0x00020000 +#define ASCSTATE_ROE 0x00080000 +#define ASCSTATE_ANY (ASCSTATE_ROE|ASCSTATE_PE|ASCSTATE_FE) +#define ASCWHBSTATE_CLRREN 0x00000001 +#define ASCWHBSTATE_SETREN 0x00000002 +#define ASCWHBSTATE_CLRPE 0x00000004 +#define ASCWHBSTATE_CLRFE 0x00000008 +#define ASCWHBSTATE_CLRROE 0x00000020 +#define ASCTXFCON_TXFEN 0x0001 +#define ASCTXFCON_TXFFLU 0x0002 +#define ASCTXFCON_TXFITLMASK 0x3F00 +#define ASCTXFCON_TXFITLOFF 8 +#define ASCRXFCON_RXFEN 0x0001 +#define ASCRXFCON_RXFFLU 0x0002 +#define ASCRXFCON_RXFITLMASK 0x3F00 +#define ASCRXFCON_RXFITLOFF 8 +#define ASCFSTAT_RXFFLMASK 0x003F +#define ASCFSTAT_TXFFLMASK 0x3F00 +#define ASCFSTAT_TXFREEMASK 0x3F000000 +#define ASCFSTAT_TXFREEOFF 24 + +static void lqasc_tx_chars(struct uart_port *port); +static struct ltq_uart_port *lqasc_port[MAXPORTS]; +static struct uart_driver lqasc_reg; +static DEFINE_SPINLOCK(ltq_asc_lock); + +struct ltq_uart_port { + struct uart_port port; + struct clk *clk; + unsigned int tx_irq; + unsigned int rx_irq; + unsigned int err_irq; +}; + +static inline struct +ltq_uart_port *to_ltq_uart_port(struct uart_port *port) +{ + return container_of(port, struct ltq_uart_port, port); +} + +static void +lqasc_stop_tx(struct uart_port *port) +{ + return; +} + +static void +lqasc_start_tx(struct uart_port *port) +{ + unsigned long flags; + spin_lock_irqsave(<q_asc_lock, flags); + lqasc_tx_chars(port); + spin_unlock_irqrestore(<q_asc_lock, flags); + return; +} + +static void +lqasc_stop_rx(struct uart_port *port) +{ + ltq_w32(ASCWHBSTATE_CLRREN, port->membase + LTQ_ASC_WHBSTATE); +} + +static void +lqasc_enable_ms(struct uart_port *port) +{ +} + +static int +lqasc_rx_chars(struct uart_port *port) +{ + struct tty_struct *tty = tty_port_tty_get(&port->state->port); + unsigned int ch = 0, rsr = 0, fifocnt; + + if (!tty) { + dev_dbg(port->dev, "%s:tty is busy now", __func__); + return -EBUSY; + } + fifocnt = + ltq_r32(port->membase + LTQ_ASC_FSTAT) & ASCFSTAT_RXFFLMASK; + while (fifocnt--) { + u8 flag = TTY_NORMAL; + ch = ltq_r8(port->membase + LTQ_ASC_RBUF); + rsr = (ltq_r32(port->membase + LTQ_ASC_STATE) + & ASCSTATE_ANY) | UART_DUMMY_UER_RX; + tty_flip_buffer_push(tty); + port->icount.rx++; + + /* + * Note that the error handling code is + * out of the main execution path + */ + if (rsr & ASCSTATE_ANY) { + if (rsr & ASCSTATE_PE) { + port->icount.parity++; + ltq_w32_mask(0, ASCWHBSTATE_CLRPE, + port->membase + LTQ_ASC_WHBSTATE); + } else if (rsr & ASCSTATE_FE) { + port->icount.frame++; + ltq_w32_mask(0, ASCWHBSTATE_CLRFE, + port->membase + LTQ_ASC_WHBSTATE); + } + if (rsr & ASCSTATE_ROE) { + port->icount.overrun++; + ltq_w32_mask(0, ASCWHBSTATE_CLRROE, + port->membase + LTQ_ASC_WHBSTATE); + } + + rsr &= port->read_status_mask; + + if (rsr & ASCSTATE_PE) + flag = TTY_PARITY; + else if (rsr & ASCSTATE_FE) + flag = TTY_FRAME; + } + + if ((rsr & port->ignore_status_mask) == 0) + tty_insert_flip_char(tty, ch, flag); + + if (rsr & ASCSTATE_ROE) + /* + * Overrun is special, since it's reported + * immediately, and doesn't affect the current + * character + */ + tty_insert_flip_char(tty, 0, TTY_OVERRUN); + } + if (ch != 0) + tty_flip_buffer_push(tty); + tty_kref_put(tty); + return 0; +} + +static void +lqasc_tx_chars(struct uart_port *port) +{ + struct circ_buf *xmit = &port->state->xmit; + if (uart_tx_stopped(port)) { + lqasc_stop_tx(port); + return; + } + + while (((ltq_r32(port->membase + LTQ_ASC_FSTAT) & + ASCFSTAT_TXFREEMASK) >> ASCFSTAT_TXFREEOFF) != 0) { + if (port->x_char) { + ltq_w8(port->x_char, port->membase + LTQ_ASC_TBUF); + port->icount.tx++; + port->x_char = 0; + continue; + } + + if (uart_circ_empty(xmit)) + break; + + ltq_w8(port->state->xmit.buf[port->state->xmit.tail], + port->membase + LTQ_ASC_TBUF); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + port->icount.tx++; + } + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); +} + +static irqreturn_t +lqasc_tx_int(int irq, void *_port) +{ + unsigned long flags; + struct uart_port *port = (struct uart_port *)_port; + spin_lock_irqsave(<q_asc_lock, flags); + ltq_w32(ASC_IRNCR_TIR, port->membase + LTQ_ASC_IRNCR); + spin_unlock_irqrestore(<q_asc_lock, flags); + lqasc_start_tx(port); + return IRQ_HANDLED; +} + +static irqreturn_t +lqasc_err_int(int irq, void *_port) +{ + unsigned long flags; + struct uart_port *port = (struct uart_port *)_port; + spin_lock_irqsave(<q_asc_lock, flags); + /* clear any pending interrupts */ + ltq_w32_mask(0, ASCWHBSTATE_CLRPE | ASCWHBSTATE_CLRFE | + ASCWHBSTATE_CLRROE, port->membase + LTQ_ASC_WHBSTATE); + spin_unlock_irqrestore(<q_asc_lock, flags); + return IRQ_HANDLED; +} + +static irqreturn_t +lqasc_rx_int(int irq, void *_port) +{ + unsigned long flags; + struct uart_port *port = (struct uart_port *)_port; + spin_lock_irqsave(<q_asc_lock, flags); + ltq_w32(ASC_IRNCR_RIR, port->membase + LTQ_ASC_IRNCR); + lqasc_rx_chars(port); + spin_unlock_irqrestore(<q_asc_lock, flags); + return IRQ_HANDLED; +} + +static unsigned int +lqasc_tx_empty(struct uart_port *port) +{ + int status; + status = ltq_r32(port->membase + LTQ_ASC_FSTAT) & ASCFSTAT_TXFFLMASK; + return status ? 0 : TIOCSER_TEMT; +} + +static unsigned int +lqasc_get_mctrl(struct uart_port *port) +{ + return TIOCM_CTS | TIOCM_CAR | TIOCM_DSR; +} + +static void +lqasc_set_mctrl(struct uart_port *port, u_int mctrl) +{ +} + +static void +lqasc_break_ctl(struct uart_port *port, int break_state) +{ +} + +static int +lqasc_startup(struct uart_port *port) +{ + struct ltq_uart_port *ltq_port = to_ltq_uart_port(port); + int retval; + + port->uartclk = clk_get_rate(ltq_port->clk); + + ltq_w32_mask(ASCCLC_DISS | ASCCLC_RMCMASK, (1 << ASCCLC_RMCOFFSET), + port->membase + LTQ_ASC_CLC); + + ltq_w32(0, port->membase + LTQ_ASC_PISEL); + ltq_w32( + ((TXFIFO_FL << ASCTXFCON_TXFITLOFF) & ASCTXFCON_TXFITLMASK) | + ASCTXFCON_TXFEN | ASCTXFCON_TXFFLU, + port->membase + LTQ_ASC_TXFCON); + ltq_w32( + ((RXFIFO_FL << ASCRXFCON_RXFITLOFF) & ASCRXFCON_RXFITLMASK) + | ASCRXFCON_RXFEN | ASCRXFCON_RXFFLU, + port->membase + LTQ_ASC_RXFCON); + /* make sure other settings are written to hardware before + * setting enable bits + */ + wmb(); + ltq_w32_mask(0, ASCCON_M_8ASYNC | ASCCON_FEN | ASCCON_TOEN | + ASCCON_ROEN, port->membase + LTQ_ASC_CON); + + retval = request_irq(ltq_port->tx_irq, lqasc_tx_int, + IRQF_DISABLED, "asc_tx", port); + if (retval) { + pr_err("failed to request lqasc_tx_int\n"); + return retval; + } + + retval = request_irq(ltq_port->rx_irq, lqasc_rx_int, + IRQF_DISABLED, "asc_rx", port); + if (retval) { + pr_err("failed to request lqasc_rx_int\n"); + goto err1; + } + + retval = request_irq(ltq_port->err_irq, lqasc_err_int, + IRQF_DISABLED, "asc_err", port); + if (retval) { + pr_err("failed to request lqasc_err_int\n"); + goto err2; + } + + ltq_w32(ASC_IRNREN_RX | ASC_IRNREN_ERR | ASC_IRNREN_TX, + port->membase + LTQ_ASC_IRNREN); + return 0; + +err2: + free_irq(ltq_port->rx_irq, port); +err1: + free_irq(ltq_port->tx_irq, port); + return retval; +} + +static void +lqasc_shutdown(struct uart_port *port) +{ + struct ltq_uart_port *ltq_port = to_ltq_uart_port(port); + free_irq(ltq_port->tx_irq, port); + free_irq(ltq_port->rx_irq, port); + free_irq(ltq_port->err_irq, port); + + ltq_w32(0, port->membase + LTQ_ASC_CON); + ltq_w32_mask(ASCRXFCON_RXFEN, ASCRXFCON_RXFFLU, + port->membase + LTQ_ASC_RXFCON); + ltq_w32_mask(ASCTXFCON_TXFEN, ASCTXFCON_TXFFLU, + port->membase + LTQ_ASC_TXFCON); +} + +static void +lqasc_set_termios(struct uart_port *port, + struct ktermios *new, struct ktermios *old) +{ + unsigned int cflag; + unsigned int iflag; + unsigned int divisor; + unsigned int baud; + unsigned int con = 0; + unsigned long flags; + + cflag = new->c_cflag; + iflag = new->c_iflag; + + switch (cflag & CSIZE) { + case CS7: + con = ASCCON_M_7ASYNC; + break; + + case CS5: + case CS6: + default: + new->c_cflag &= ~ CSIZE; + new->c_cflag |= CS8; + con = ASCCON_M_8ASYNC; + break; + } + + cflag &= ~CMSPAR; /* Mark/Space parity is not supported */ + + if (cflag & CSTOPB) + con |= ASCCON_STP; + + if (cflag & PARENB) { + if (!(cflag & PARODD)) + con &= ~ASCCON_ODD; + else + con |= ASCCON_ODD; + } + + port->read_status_mask = ASCSTATE_ROE; + if (iflag & INPCK) + port->read_status_mask |= ASCSTATE_FE | ASCSTATE_PE; + + port->ignore_status_mask = 0; + if (iflag & IGNPAR) + port->ignore_status_mask |= ASCSTATE_FE | ASCSTATE_PE; + + if (iflag & IGNBRK) { + /* + * If we're ignoring parity and break indicators, + * ignore overruns too (for real raw support). + */ + if (iflag & IGNPAR) + port->ignore_status_mask |= ASCSTATE_ROE; + } + + if ((cflag & CREAD) == 0) + port->ignore_status_mask |= UART_DUMMY_UER_RX; + + /* set error signals - framing, parity and overrun, enable receiver */ + con |= ASCCON_FEN | ASCCON_TOEN | ASCCON_ROEN; + + spin_lock_irqsave(<q_asc_lock, flags); + + /* set up CON */ + ltq_w32_mask(0, con, port->membase + LTQ_ASC_CON); + + /* Set baud rate - take a divider of 2 into account */ + baud = uart_get_baud_rate(port, new, old, 0, port->uartclk / 16); + divisor = uart_get_divisor(port, baud); + divisor = divisor / 2 - 1; + + /* disable the baudrate generator */ + ltq_w32_mask(ASCCON_R, 0, port->membase + LTQ_ASC_CON); + + /* make sure the fractional divider is off */ + ltq_w32_mask(ASCCON_FDE, 0, port->membase + LTQ_ASC_CON); + + /* set up to use divisor of 2 */ + ltq_w32_mask(ASCCON_BRS, 0, port->membase + LTQ_ASC_CON); + + /* now we can write the new baudrate into the register */ + ltq_w32(divisor, port->membase + LTQ_ASC_BG); + + /* turn the baudrate generator back on */ + ltq_w32_mask(0, ASCCON_R, port->membase + LTQ_ASC_CON); + + /* enable rx */ + ltq_w32(ASCWHBSTATE_SETREN, port->membase + LTQ_ASC_WHBSTATE); + + spin_unlock_irqrestore(<q_asc_lock, flags); + + /* Don't rewrite B0 */ + if (tty_termios_baud_rate(new)) + tty_termios_encode_baud_rate(new, baud, baud); +} + +static const char* +lqasc_type(struct uart_port *port) +{ + if (port->type == PORT_LTQ_ASC) + return DRVNAME; + else + return NULL; +} + +static void +lqasc_release_port(struct uart_port *port) +{ + if (port->flags & UPF_IOREMAP) { + iounmap(port->membase); + port->membase = NULL; + } +} + +static int +lqasc_request_port(struct uart_port *port) +{ + struct platform_device *pdev = to_platform_device(port->dev); + struct resource *res; + int size; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "cannot obtain I/O memory region"); + return -ENODEV; + } + size = resource_size(res); + + res = devm_request_mem_region(&pdev->dev, res->start, + size, dev_name(&pdev->dev)); + if (!res) { + dev_err(&pdev->dev, "cannot request I/O memory region"); + return -EBUSY; + } + + if (port->flags & UPF_IOREMAP) { + port->membase = devm_ioremap_nocache(&pdev->dev, + port->mapbase, size); + if (port->membase == NULL) + return -ENOMEM; + } + return 0; +} + +static void +lqasc_config_port(struct uart_port *port, int flags) +{ + if (flags & UART_CONFIG_TYPE) { + port->type = PORT_LTQ_ASC; + lqasc_request_port(port); + } +} + +static int +lqasc_verify_port(struct uart_port *port, + struct serial_struct *ser) +{ + int ret = 0; + if (ser->type != PORT_UNKNOWN && ser->type != PORT_LTQ_ASC) + ret = -EINVAL; + if (ser->irq < 0 || ser->irq >= NR_IRQS) + ret = -EINVAL; + if (ser->baud_base < 9600) + ret = -EINVAL; + return ret; +} + +static struct uart_ops lqasc_pops = { + .tx_empty = lqasc_tx_empty, + .set_mctrl = lqasc_set_mctrl, + .get_mctrl = lqasc_get_mctrl, + .stop_tx = lqasc_stop_tx, + .start_tx = lqasc_start_tx, + .stop_rx = lqasc_stop_rx, + .enable_ms = lqasc_enable_ms, + .break_ctl = lqasc_break_ctl, + .startup = lqasc_startup, + .shutdown = lqasc_shutdown, + .set_termios = lqasc_set_termios, + .type = lqasc_type, + .release_port = lqasc_release_port, + .request_port = lqasc_request_port, + .config_port = lqasc_config_port, + .verify_port = lqasc_verify_port, +}; + +static void +lqasc_console_putchar(struct uart_port *port, int ch) +{ + int fifofree; + + if (!port->membase) + return; + + do { + fifofree = (ltq_r32(port->membase + LTQ_ASC_FSTAT) + & ASCFSTAT_TXFREEMASK) >> ASCFSTAT_TXFREEOFF; + } while (fifofree == 0); + ltq_w8(ch, port->membase + LTQ_ASC_TBUF); +} + + +static void +lqasc_console_write(struct console *co, const char *s, u_int count) +{ + struct ltq_uart_port *ltq_port; + struct uart_port *port; + unsigned long flags; + + if (co->index >= MAXPORTS) + return; + + ltq_port = lqasc_port[co->index]; + if (!ltq_port) + return; + + port = <q_port->port; + + spin_lock_irqsave(<q_asc_lock, flags); + uart_console_write(port, s, count, lqasc_console_putchar); + spin_unlock_irqrestore(<q_asc_lock, flags); +} + +static int __init +lqasc_console_setup(struct console *co, char *options) +{ + struct ltq_uart_port *ltq_port; + struct uart_port *port; + int baud = 115200; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + + if (co->index >= MAXPORTS) + return -ENODEV; + + ltq_port = lqasc_port[co->index]; + if (!ltq_port) + return -ENODEV; + + port = <q_port->port; + + port->uartclk = clk_get_rate(ltq_port->clk); + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + return uart_set_options(port, co, baud, parity, bits, flow); +} + +static struct console lqasc_console = { + .name = "ttyLTQ", + .write = lqasc_console_write, + .device = uart_console_device, + .setup = lqasc_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &lqasc_reg, +}; + +static int __init +lqasc_console_init(void) +{ + register_console(&lqasc_console); + return 0; +} +console_initcall(lqasc_console_init); + +static struct uart_driver lqasc_reg = { + .owner = THIS_MODULE, + .driver_name = DRVNAME, + .dev_name = "ttyLTQ", + .major = 0, + .minor = 0, + .nr = MAXPORTS, + .cons = &lqasc_console, +}; + +static int __init +lqasc_probe(struct platform_device *pdev) +{ + struct ltq_uart_port *ltq_port; + struct uart_port *port; + struct resource *mmres, *irqres; + int tx_irq, rx_irq, err_irq; + struct clk *clk; + int ret; + + mmres = platform_get_resource(pdev, IORESOURCE_MEM, 0); + irqres = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!mmres || !irqres) + return -ENODEV; + + if (pdev->id >= MAXPORTS) + return -EBUSY; + + if (lqasc_port[pdev->id] != NULL) + return -EBUSY; + + clk = clk_get(&pdev->dev, "fpi"); + if (IS_ERR(clk)) { + pr_err("failed to get fpi clk\n"); + return -ENOENT; + } + + tx_irq = platform_get_irq_byname(pdev, "tx"); + rx_irq = platform_get_irq_byname(pdev, "rx"); + err_irq = platform_get_irq_byname(pdev, "err"); + if ((tx_irq < 0) | (rx_irq < 0) | (err_irq < 0)) + return -ENODEV; + + ltq_port = kzalloc(sizeof(struct ltq_uart_port), GFP_KERNEL); + if (!ltq_port) + return -ENOMEM; + + port = <q_port->port; + + port->iotype = SERIAL_IO_MEM; + port->flags = ASYNC_BOOT_AUTOCONF | UPF_IOREMAP; + port->ops = &lqasc_pops; + port->fifosize = 16; + port->type = PORT_LTQ_ASC, + port->line = pdev->id; + port->dev = &pdev->dev; + + port->irq = tx_irq; /* unused, just to be backward-compatibe */ + port->mapbase = mmres->start; + + ltq_port->clk = clk; + + ltq_port->tx_irq = tx_irq; + ltq_port->rx_irq = rx_irq; + ltq_port->err_irq = err_irq; + + lqasc_port[pdev->id] = ltq_port; + platform_set_drvdata(pdev, ltq_port); + + ret = uart_add_one_port(&lqasc_reg, port); + + return ret; +} + +static struct platform_driver lqasc_driver = { + .driver = { + .name = DRVNAME, + .owner = THIS_MODULE, + }, +}; + +int __init +init_lqasc(void) +{ + int ret; + + ret = uart_register_driver(&lqasc_reg); + if (ret != 0) + return ret; + + ret = platform_driver_probe(&lqasc_driver, lqasc_probe); + if (ret != 0) + uart_unregister_driver(&lqasc_reg); + + return ret; +} + +module_init(init_lqasc); + +MODULE_DESCRIPTION("Lantiq serial port driver"); +MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/tty/serial/of_serial.c b/trunk/drivers/tty/serial/of_serial.c index 0e8eec516df4..c911b2419abb 100644 --- a/trunk/drivers/tty/serial/of_serial.c +++ b/trunk/drivers/tty/serial/of_serial.c @@ -80,14 +80,17 @@ static int __devinit of_platform_serial_setup(struct platform_device *ofdev, /* * Try to register a serial port */ +static struct of_device_id of_platform_serial_table[]; static int __devinit of_platform_serial_probe(struct platform_device *ofdev) { + const struct of_device_id *match; struct of_serial_info *info; struct uart_port port; int port_type; int ret; - if (!ofdev->dev.of_match) + match = of_match_device(of_platform_serial_table, &ofdev->dev); + if (!match) return -EINVAL; if (of_find_property(ofdev->dev.of_node, "used-by-rtas", NULL)) @@ -97,7 +100,7 @@ static int __devinit of_platform_serial_probe(struct platform_device *ofdev) if (info == NULL) return -ENOMEM; - port_type = (unsigned long)ofdev->dev.of_match->data; + port_type = (unsigned long)match->data; ret = of_platform_serial_setup(ofdev, port_type, &port); if (ret) goto out; diff --git a/trunk/drivers/uio/uio.c b/trunk/drivers/uio/uio.c index 51fe1795d5a8..d2efe823c20d 100644 --- a/trunk/drivers/uio/uio.c +++ b/trunk/drivers/uio/uio.c @@ -381,7 +381,13 @@ static int uio_get_minor(struct uio_device *idev) retval = -ENOMEM; goto exit; } - idev->minor = id & MAX_ID_MASK; + if (id < UIO_MAX_DEVICES) { + idev->minor = id; + } else { + dev_err(idev->dev, "too many uio devices\n"); + retval = -EINVAL; + idr_remove(&uio_idr, id); + } exit: mutex_unlock(&minor_lock); return retval; @@ -587,14 +593,12 @@ static ssize_t uio_write(struct file *filep, const char __user *buf, static int uio_find_mem_index(struct vm_area_struct *vma) { - int mi; struct uio_device *idev = vma->vm_private_data; - for (mi = 0; mi < MAX_UIO_MAPS; mi++) { - if (idev->info->mem[mi].size == 0) + if (vma->vm_pgoff < MAX_UIO_MAPS) { + if (idev->info->mem[vma->vm_pgoff].size == 0) return -1; - if (vma->vm_pgoff == mi) - return mi; + return (int)vma->vm_pgoff; } return -1; } diff --git a/trunk/drivers/uio/uio_netx.c b/trunk/drivers/uio/uio_netx.c index 5ffdb483b015..a879fd5741f8 100644 --- a/trunk/drivers/uio/uio_netx.c +++ b/trunk/drivers/uio/uio_netx.c @@ -18,6 +18,9 @@ #define PCI_VENDOR_ID_HILSCHER 0x15CF #define PCI_DEVICE_ID_HILSCHER_NETX 0x0000 +#define PCI_DEVICE_ID_HILSCHER_NETPLC 0x0010 +#define PCI_SUBDEVICE_ID_NETPLC_RAM 0x0000 +#define PCI_SUBDEVICE_ID_NETPLC_FLASH 0x0001 #define PCI_SUBDEVICE_ID_NXSB_PCA 0x3235 #define PCI_SUBDEVICE_ID_NXPCA 0x3335 @@ -66,6 +69,10 @@ static int __devinit netx_pci_probe(struct pci_dev *dev, bar = 0; info->name = "netx"; break; + case PCI_DEVICE_ID_HILSCHER_NETPLC: + bar = 0; + info->name = "netplc"; + break; default: bar = 2; info->name = "netx_plx"; @@ -133,6 +140,18 @@ static struct pci_device_id netx_pci_ids[] = { .subvendor = 0, .subdevice = 0, }, + { + .vendor = PCI_VENDOR_ID_HILSCHER, + .device = PCI_DEVICE_ID_HILSCHER_NETPLC, + .subvendor = PCI_VENDOR_ID_HILSCHER, + .subdevice = PCI_SUBDEVICE_ID_NETPLC_RAM, + }, + { + .vendor = PCI_VENDOR_ID_HILSCHER, + .device = PCI_DEVICE_ID_HILSCHER_NETPLC, + .subvendor = PCI_VENDOR_ID_HILSCHER, + .subdevice = PCI_SUBDEVICE_ID_NETPLC_FLASH, + }, { .vendor = PCI_VENDOR_ID_PLX, .device = PCI_DEVICE_ID_PLX_9030, diff --git a/trunk/drivers/uio/uio_pdrv_genirq.c b/trunk/drivers/uio/uio_pdrv_genirq.c index 7174d518b8a6..0f424af7f109 100644 --- a/trunk/drivers/uio/uio_pdrv_genirq.c +++ b/trunk/drivers/uio/uio_pdrv_genirq.c @@ -189,6 +189,10 @@ static int uio_pdrv_genirq_remove(struct platform_device *pdev) uio_unregister_device(priv->uioinfo); pm_runtime_disable(&pdev->dev); + + priv->uioinfo->handler = NULL; + priv->uioinfo->irqcontrol = NULL; + kfree(priv); return 0; } diff --git a/trunk/drivers/usb/Kconfig b/trunk/drivers/usb/Kconfig index 41b6e51188e4..006489d82dc3 100644 --- a/trunk/drivers/usb/Kconfig +++ b/trunk/drivers/usb/Kconfig @@ -66,6 +66,7 @@ config USB_ARCH_HAS_EHCI default y if ARCH_VT8500 default y if PLAT_SPEAR default y if ARCH_MSM + default y if MICROBLAZE default PCI # ARM SA1111 chips have a non-PCI based "OHCI-compatible" USB host interface. diff --git a/trunk/drivers/usb/core/devices.c b/trunk/drivers/usb/core/devices.c index a3d2e2399655..96fdfb815f89 100644 --- a/trunk/drivers/usb/core/devices.c +++ b/trunk/drivers/usb/core/devices.c @@ -221,7 +221,7 @@ static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end, break; case USB_ENDPOINT_XFER_INT: type = "Int."; - if (speed == USB_SPEED_HIGH) + if (speed == USB_SPEED_HIGH || speed == USB_SPEED_SUPER) interval = 1 << (desc->bInterval - 1); else interval = desc->bInterval; @@ -229,7 +229,8 @@ static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end, default: /* "can't happen" */ return start; } - interval *= (speed == USB_SPEED_HIGH) ? 125 : 1000; + interval *= (speed == USB_SPEED_HIGH || + speed == USB_SPEED_SUPER) ? 125 : 1000; if (interval % 1000) unit = 'u'; else { @@ -542,8 +543,9 @@ static ssize_t usb_device_dump(char __user **buffer, size_t *nbytes, if (level == 0) { int max; - /* high speed reserves 80%, full/low reserves 90% */ - if (usbdev->speed == USB_SPEED_HIGH) + /* super/high speed reserves 80%, full/low reserves 90% */ + if (usbdev->speed == USB_SPEED_HIGH || + usbdev->speed == USB_SPEED_SUPER) max = 800; else max = FRAME_TIME_MAX_USECS_ALLOC; diff --git a/trunk/drivers/usb/core/hcd.c b/trunk/drivers/usb/core/hcd.c index 8eed05d23838..77a7faec8d78 100644 --- a/trunk/drivers/usb/core/hcd.c +++ b/trunk/drivers/usb/core/hcd.c @@ -1908,7 +1908,7 @@ void usb_free_streams(struct usb_interface *interface, /* Streams only apply to bulk endpoints. */ for (i = 0; i < num_eps; i++) - if (!usb_endpoint_xfer_bulk(&eps[i]->desc)) + if (!eps[i] || !usb_endpoint_xfer_bulk(&eps[i]->desc)) return; hcd->driver->free_streams(hcd, dev, eps, num_eps, mem_flags); diff --git a/trunk/drivers/usb/core/hub.c b/trunk/drivers/usb/core/hub.c index 8fb754916c67..93720bdc9efd 100644 --- a/trunk/drivers/usb/core/hub.c +++ b/trunk/drivers/usb/core/hub.c @@ -2285,7 +2285,17 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg) } /* see 7.1.7.6 */ - status = set_port_feature(hub->hdev, port1, USB_PORT_FEAT_SUSPEND); + /* Clear PORT_POWER if it's a USB3.0 device connected to USB 3.0 + * external hub. + * FIXME: this is a temporary workaround to make the system able + * to suspend/resume. + */ + if ((hub->hdev->parent != NULL) && hub_is_superspeed(hub->hdev)) + status = clear_port_feature(hub->hdev, port1, + USB_PORT_FEAT_POWER); + else + status = set_port_feature(hub->hdev, port1, + USB_PORT_FEAT_SUSPEND); if (status) { dev_dbg(hub->intfdev, "can't suspend port %d, status %d\n", port1, status); diff --git a/trunk/drivers/usb/gadget/f_audio.c b/trunk/drivers/usb/gadget/f_audio.c index 9abecfddb27d..0111f8a9cf7f 100644 --- a/trunk/drivers/usb/gadget/f_audio.c +++ b/trunk/drivers/usb/gadget/f_audio.c @@ -706,6 +706,7 @@ f_audio_unbind(struct usb_configuration *c, struct usb_function *f) struct f_audio *audio = func_to_audio(f); usb_free_descriptors(f->descriptors); + usb_free_descriptors(f->hs_descriptors); kfree(audio); } diff --git a/trunk/drivers/usb/gadget/f_eem.c b/trunk/drivers/usb/gadget/f_eem.c index 95dd4662d6a8..b3c304290150 100644 --- a/trunk/drivers/usb/gadget/f_eem.c +++ b/trunk/drivers/usb/gadget/f_eem.c @@ -314,6 +314,9 @@ eem_unbind(struct usb_configuration *c, struct usb_function *f) static void eem_cmd_complete(struct usb_ep *ep, struct usb_request *req) { + struct sk_buff *skb = (struct sk_buff *)req->context; + + dev_kfree_skb_any(skb); } /* @@ -428,10 +431,11 @@ static int eem_unwrap(struct gether *port, skb_trim(skb2, len); put_unaligned_le16(BIT(15) | BIT(11) | len, skb_push(skb2, 2)); - skb_copy_bits(skb, 0, req->buf, skb->len); - req->length = skb->len; + skb_copy_bits(skb2, 0, req->buf, skb2->len); + req->length = skb2->len; req->complete = eem_cmd_complete; req->zero = 1; + req->context = skb2; if (usb_ep_queue(port->in_ep, req, GFP_ATOMIC)) DBG(cdev, "echo response queue fail\n"); break; diff --git a/trunk/drivers/usb/gadget/fsl_qe_udc.c b/trunk/drivers/usb/gadget/fsl_qe_udc.c index aee7e3c53c38..3a68e09309f7 100644 --- a/trunk/drivers/usb/gadget/fsl_qe_udc.c +++ b/trunk/drivers/usb/gadget/fsl_qe_udc.c @@ -1148,6 +1148,12 @@ static int qe_ep_tx(struct qe_ep *ep, struct qe_frame *frame) static int txcomplete(struct qe_ep *ep, unsigned char restart) { if (ep->tx_req != NULL) { + struct qe_req *req = ep->tx_req; + unsigned zlp = 0, last_len = 0; + + last_len = min_t(unsigned, req->req.length - ep->sent, + ep->ep.maxpacket); + if (!restart) { int asent = ep->last; ep->sent += asent; @@ -1156,9 +1162,18 @@ static int txcomplete(struct qe_ep *ep, unsigned char restart) ep->last = 0; } + /* zlp needed when req->re.zero is set */ + if (req->req.zero) { + if (last_len == 0 || + (req->req.length % ep->ep.maxpacket) != 0) + zlp = 0; + else + zlp = 1; + } else + zlp = 0; + /* a request already were transmitted completely */ - if ((ep->tx_req->req.length - ep->sent) <= 0) { - ep->tx_req->req.actual = (unsigned int)ep->sent; + if (((ep->tx_req->req.length - ep->sent) <= 0) && !zlp) { done(ep, ep->tx_req, 0); ep->tx_req = NULL; ep->last = 0; @@ -1191,6 +1206,7 @@ static int qe_usb_senddata(struct qe_ep *ep, struct qe_frame *frame) buf = (u8 *)ep->tx_req->req.buf + ep->sent; if (buf && size) { ep->last = size; + ep->tx_req->req.actual += size; frame_set_data(frame, buf); frame_set_length(frame, size); frame_set_status(frame, FRAME_OK); @@ -2523,15 +2539,18 @@ static void qe_udc_release(struct device *dev) } /* Driver probe functions */ +static const struct of_device_id qe_udc_match[]; static int __devinit qe_udc_probe(struct platform_device *ofdev) { + const struct of_device_id *match; struct device_node *np = ofdev->dev.of_node; struct qe_ep *ep; unsigned int ret = 0; unsigned int i; const void *prop; - if (!ofdev->dev.of_match) + match = of_match_device(qe_udc_match, &ofdev->dev); + if (!match) return -EINVAL; prop = of_get_property(np, "mode", NULL); @@ -2545,7 +2564,7 @@ static int __devinit qe_udc_probe(struct platform_device *ofdev) return -ENOMEM; } - udc_controller->soc_type = (unsigned long)ofdev->dev.of_match->data; + udc_controller->soc_type = (unsigned long)match->data; udc_controller->usb_regs = of_iomap(np, 0); if (!udc_controller->usb_regs) { ret = -ENOMEM; diff --git a/trunk/drivers/usb/gadget/goku_udc.c b/trunk/drivers/usb/gadget/goku_udc.c index 48a760220baf..bf6e11c758d5 100644 --- a/trunk/drivers/usb/gadget/goku_udc.c +++ b/trunk/drivers/usb/gadget/goku_udc.c @@ -38,6 +38,7 @@ #include #include #include +#include #include #include diff --git a/trunk/drivers/usb/gadget/imx_udc.c b/trunk/drivers/usb/gadget/imx_udc.c index 5408186afc35..ade40066decf 100644 --- a/trunk/drivers/usb/gadget/imx_udc.c +++ b/trunk/drivers/usb/gadget/imx_udc.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include diff --git a/trunk/drivers/usb/gadget/inode.c b/trunk/drivers/usb/gadget/inode.c index 3ed73f49cf18..a01383f71f38 100644 --- a/trunk/drivers/usb/gadget/inode.c +++ b/trunk/drivers/usb/gadget/inode.c @@ -386,8 +386,10 @@ ep_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr) /* halt any endpoint by doing a "wrong direction" i/o call */ if (usb_endpoint_dir_in(&data->desc)) { - if (usb_endpoint_xfer_isoc(&data->desc)) + if (usb_endpoint_xfer_isoc(&data->desc)) { + mutex_unlock(&data->lock); return -EINVAL; + } DBG (data->dev, "%s halt\n", data->name); spin_lock_irq (&data->dev->lock); if (likely (data->ep != NULL)) diff --git a/trunk/drivers/usb/gadget/omap_udc.c b/trunk/drivers/usb/gadget/omap_udc.c index cb5cd422f3f5..82fd24935332 100644 --- a/trunk/drivers/usb/gadget/omap_udc.c +++ b/trunk/drivers/usb/gadget/omap_udc.c @@ -44,6 +44,7 @@ #include #include #include +#include #include #include diff --git a/trunk/drivers/usb/gadget/pch_udc.c b/trunk/drivers/usb/gadget/pch_udc.c index 3e4b35e50c24..68dbcc3e4cc2 100644 --- a/trunk/drivers/usb/gadget/pch_udc.c +++ b/trunk/drivers/usb/gadget/pch_udc.c @@ -1608,7 +1608,7 @@ static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq, return -EINVAL; if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN)) return -ESHUTDOWN; - spin_lock_irqsave(&ep->dev->lock, iflags); + spin_lock_irqsave(&dev->lock, iflags); /* map the buffer for dma */ if (usbreq->length && ((usbreq->dma == DMA_ADDR_INVALID) || !usbreq->dma)) { @@ -1625,8 +1625,10 @@ static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq, DMA_FROM_DEVICE); } else { req->buf = kzalloc(usbreq->length, GFP_ATOMIC); - if (!req->buf) - return -ENOMEM; + if (!req->buf) { + retval = -ENOMEM; + goto probe_end; + } if (ep->in) { memcpy(req->buf, usbreq->buf, usbreq->length); req->dma = dma_map_single(&dev->pdev->dev, diff --git a/trunk/drivers/usb/gadget/pxa25x_udc.c b/trunk/drivers/usb/gadget/pxa25x_udc.c index 444b60aa15e9..365c02fc25fc 100644 --- a/trunk/drivers/usb/gadget/pxa25x_udc.c +++ b/trunk/drivers/usb/gadget/pxa25x_udc.c @@ -46,6 +46,7 @@ #include #include #include +#include #include #include diff --git a/trunk/drivers/usb/gadget/pxa27x_udc.c b/trunk/drivers/usb/gadget/pxa27x_udc.c index 78a39a41547d..57607696735c 100644 --- a/trunk/drivers/usb/gadget/pxa27x_udc.c +++ b/trunk/drivers/usb/gadget/pxa27x_udc.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include diff --git a/trunk/drivers/usb/gadget/r8a66597-udc.c b/trunk/drivers/usb/gadget/r8a66597-udc.c index 015118535f77..6dcc1f68fa60 100644 --- a/trunk/drivers/usb/gadget/r8a66597-udc.c +++ b/trunk/drivers/usb/gadget/r8a66597-udc.c @@ -1083,7 +1083,9 @@ static void irq_device_state(struct r8a66597 *r8a66597) if (dvsq == DS_DFLT) { /* bus reset */ + spin_unlock(&r8a66597->lock); r8a66597->driver->disconnect(&r8a66597->gadget); + spin_lock(&r8a66597->lock); r8a66597_update_usb_speed(r8a66597); } if (r8a66597->old_dvsq == DS_CNFG && dvsq != DS_CNFG) diff --git a/trunk/drivers/usb/host/ehci-omap.c b/trunk/drivers/usb/host/ehci-omap.c index 7e41a95c5ceb..627f3a678759 100644 --- a/trunk/drivers/usb/host/ehci-omap.c +++ b/trunk/drivers/usb/host/ehci-omap.c @@ -40,6 +40,7 @@ #include #include #include +#include /* EHCI Register Set */ #define EHCI_INSNREG04 (0xA0) @@ -118,6 +119,8 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev) struct ehci_hcd *omap_ehci; int ret = -ENODEV; int irq; + int i; + char supply[7]; if (usb_disabled()) return -ENODEV; @@ -158,6 +161,23 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev) hcd->rsrc_len = resource_size(res); hcd->regs = regs; + /* get ehci regulator and enable */ + for (i = 0 ; i < OMAP3_HS_USB_PORTS ; i++) { + if (pdata->port_mode[i] != OMAP_EHCI_PORT_MODE_PHY) { + pdata->regulator[i] = NULL; + continue; + } + snprintf(supply, sizeof(supply), "hsusb%d", i); + pdata->regulator[i] = regulator_get(dev, supply); + if (IS_ERR(pdata->regulator[i])) { + pdata->regulator[i] = NULL; + dev_dbg(dev, + "failed to get ehci port%d regulator\n", i); + } else { + regulator_enable(pdata->regulator[i]); + } + } + ret = omap_usbhs_enable(dev); if (ret) { dev_err(dev, "failed to start usbhs with err %d\n", ret); diff --git a/trunk/drivers/usb/host/ehci-q.c b/trunk/drivers/usb/host/ehci-q.c index 98ded66e8d3f..42abd0f603bf 100644 --- a/trunk/drivers/usb/host/ehci-q.c +++ b/trunk/drivers/usb/host/ehci-q.c @@ -1247,24 +1247,27 @@ static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh) static void scan_async (struct ehci_hcd *ehci) { + bool stopped; struct ehci_qh *qh; enum ehci_timer_action action = TIMER_IO_WATCHDOG; ehci->stamp = ehci_readl(ehci, &ehci->regs->frame_index); timer_action_done (ehci, TIMER_ASYNC_SHRINK); rescan: + stopped = !HC_IS_RUNNING(ehci_to_hcd(ehci)->state); qh = ehci->async->qh_next.qh; if (likely (qh != NULL)) { do { /* clean any finished work for this qh */ - if (!list_empty (&qh->qtd_list) - && qh->stamp != ehci->stamp) { + if (!list_empty(&qh->qtd_list) && (stopped || + qh->stamp != ehci->stamp)) { int temp; /* unlinks could happen here; completion * reporting drops the lock. rescan using * the latest schedule, but don't rescan - * qhs we already finished (no looping). + * qhs we already finished (no looping) + * unless the controller is stopped. */ qh = qh_get (qh); qh->stamp = ehci->stamp; @@ -1285,9 +1288,9 @@ static void scan_async (struct ehci_hcd *ehci) */ if (list_empty(&qh->qtd_list) && qh->qh_state == QH_STATE_LINKED) { - if (!ehci->reclaim - && ((ehci->stamp - qh->stamp) & 0x1fff) - >= (EHCI_SHRINK_FRAMES * 8)) + if (!ehci->reclaim && (stopped || + ((ehci->stamp - qh->stamp) & 0x1fff) + >= EHCI_SHRINK_FRAMES * 8)) start_unlink_async(ehci, qh); else action = TIMER_ASYNC_SHRINK; diff --git a/trunk/drivers/usb/host/isp1362-hcd.c b/trunk/drivers/usb/host/isp1362-hcd.c index f97570a847ca..9c37dad3e816 100644 --- a/trunk/drivers/usb/host/isp1362-hcd.c +++ b/trunk/drivers/usb/host/isp1362-hcd.c @@ -81,6 +81,7 @@ #include #include #include +#include #include #include diff --git a/trunk/drivers/usb/host/isp1760-hcd.c b/trunk/drivers/usb/host/isp1760-hcd.c index f50e84ac570a..7b2e69aa2e98 100644 --- a/trunk/drivers/usb/host/isp1760-hcd.c +++ b/trunk/drivers/usb/host/isp1760-hcd.c @@ -295,7 +295,7 @@ static void alloc_mem(struct usb_hcd *hcd, struct isp1760_qtd *qtd) } dev_err(hcd->self.controller, - "%s: Can not allocate %lu bytes of memory\n" + "%s: Cannot allocate %zu bytes of memory\n" "Current memory map:\n", __func__, qtd->length); for (i = 0; i < BLOCKS; i++) { @@ -1633,6 +1633,7 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) ints[i].qh = NULL; ints[i].qtd = NULL; + urb->status = status; isp1760_urb_done(hcd, urb); if (qtd) pe(hcd, qh, qtd); diff --git a/trunk/drivers/usb/host/ohci-au1xxx.c b/trunk/drivers/usb/host/ohci-au1xxx.c index 17a6043c1fa0..958d985f2951 100644 --- a/trunk/drivers/usb/host/ohci-au1xxx.c +++ b/trunk/drivers/usb/host/ohci-au1xxx.c @@ -33,7 +33,7 @@ #ifdef __LITTLE_ENDIAN #define USBH_ENABLE_INIT (USBH_ENABLE_CE | USBH_ENABLE_E | USBH_ENABLE_C) -#elif __BIG_ENDIAN +#elif defined(__BIG_ENDIAN) #define USBH_ENABLE_INIT (USBH_ENABLE_CE | USBH_ENABLE_E | USBH_ENABLE_C | \ USBH_ENABLE_BE) #else diff --git a/trunk/drivers/usb/host/pci-quirks.c b/trunk/drivers/usb/host/pci-quirks.c index 1d586d4f7b56..9b166d70ae91 100644 --- a/trunk/drivers/usb/host/pci-quirks.c +++ b/trunk/drivers/usb/host/pci-quirks.c @@ -84,65 +84,92 @@ int usb_amd_find_chipset_info(void) { u8 rev = 0; unsigned long flags; + struct amd_chipset_info info; + int ret; spin_lock_irqsave(&amd_lock, flags); - amd_chipset.probe_count++; /* probe only once */ - if (amd_chipset.probe_count > 1) { + if (amd_chipset.probe_count > 0) { + amd_chipset.probe_count++; spin_unlock_irqrestore(&amd_lock, flags); return amd_chipset.probe_result; } + memset(&info, 0, sizeof(info)); + spin_unlock_irqrestore(&amd_lock, flags); - amd_chipset.smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, 0x4385, NULL); - if (amd_chipset.smbus_dev) { - rev = amd_chipset.smbus_dev->revision; + info.smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, 0x4385, NULL); + if (info.smbus_dev) { + rev = info.smbus_dev->revision; if (rev >= 0x40) - amd_chipset.sb_type = 1; + info.sb_type = 1; else if (rev >= 0x30 && rev <= 0x3b) - amd_chipset.sb_type = 3; + info.sb_type = 3; } else { - amd_chipset.smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, - 0x780b, NULL); - if (!amd_chipset.smbus_dev) { - spin_unlock_irqrestore(&amd_lock, flags); - return 0; + info.smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, + 0x780b, NULL); + if (!info.smbus_dev) { + ret = 0; + goto commit; } - rev = amd_chipset.smbus_dev->revision; + + rev = info.smbus_dev->revision; if (rev >= 0x11 && rev <= 0x18) - amd_chipset.sb_type = 2; + info.sb_type = 2; } - if (amd_chipset.sb_type == 0) { - if (amd_chipset.smbus_dev) { - pci_dev_put(amd_chipset.smbus_dev); - amd_chipset.smbus_dev = NULL; + if (info.sb_type == 0) { + if (info.smbus_dev) { + pci_dev_put(info.smbus_dev); + info.smbus_dev = NULL; } - spin_unlock_irqrestore(&amd_lock, flags); - return 0; + ret = 0; + goto commit; } - amd_chipset.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x9601, NULL); - if (amd_chipset.nb_dev) { - amd_chipset.nb_type = 1; + info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x9601, NULL); + if (info.nb_dev) { + info.nb_type = 1; } else { - amd_chipset.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, - 0x1510, NULL); - if (amd_chipset.nb_dev) { - amd_chipset.nb_type = 2; - } else { - amd_chipset.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, - 0x9600, NULL); - if (amd_chipset.nb_dev) - amd_chipset.nb_type = 3; + info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1510, NULL); + if (info.nb_dev) { + info.nb_type = 2; + } else { + info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, + 0x9600, NULL); + if (info.nb_dev) + info.nb_type = 3; } } - amd_chipset.probe_result = 1; + ret = info.probe_result = 1; printk(KERN_DEBUG "QUIRK: Enable AMD PLL fix\n"); - spin_unlock_irqrestore(&amd_lock, flags); - return amd_chipset.probe_result; +commit: + + spin_lock_irqsave(&amd_lock, flags); + if (amd_chipset.probe_count > 0) { + /* race - someone else was faster - drop devices */ + + /* Mark that we where here */ + amd_chipset.probe_count++; + ret = amd_chipset.probe_result; + + spin_unlock_irqrestore(&amd_lock, flags); + + if (info.nb_dev) + pci_dev_put(info.nb_dev); + if (info.smbus_dev) + pci_dev_put(info.smbus_dev); + + } else { + /* no race - commit the result */ + info.probe_count++; + amd_chipset = info; + spin_unlock_irqrestore(&amd_lock, flags); + } + + return ret; } EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info); @@ -284,6 +311,7 @@ EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_enable); void usb_amd_dev_put(void) { + struct pci_dev *nb, *smbus; unsigned long flags; spin_lock_irqsave(&amd_lock, flags); @@ -294,20 +322,23 @@ void usb_amd_dev_put(void) return; } - if (amd_chipset.nb_dev) { - pci_dev_put(amd_chipset.nb_dev); - amd_chipset.nb_dev = NULL; - } - if (amd_chipset.smbus_dev) { - pci_dev_put(amd_chipset.smbus_dev); - amd_chipset.smbus_dev = NULL; - } + /* save them to pci_dev_put outside of spinlock */ + nb = amd_chipset.nb_dev; + smbus = amd_chipset.smbus_dev; + + amd_chipset.nb_dev = NULL; + amd_chipset.smbus_dev = NULL; amd_chipset.nb_type = 0; amd_chipset.sb_type = 0; amd_chipset.isoc_reqs = 0; amd_chipset.probe_result = 0; spin_unlock_irqrestore(&amd_lock, flags); + + if (nb) + pci_dev_put(nb); + if (smbus) + pci_dev_put(smbus); } EXPORT_SYMBOL_GPL(usb_amd_dev_put); diff --git a/trunk/drivers/usb/host/sl811-hcd.c b/trunk/drivers/usb/host/sl811-hcd.c index 18b7099a8125..fafccc2fd331 100644 --- a/trunk/drivers/usb/host/sl811-hcd.c +++ b/trunk/drivers/usb/host/sl811-hcd.c @@ -47,6 +47,7 @@ #include #include #include +#include #include #include diff --git a/trunk/drivers/usb/host/xhci-hub.c b/trunk/drivers/usb/host/xhci-hub.c index a78f2ebd11b7..73f75d26436c 100644 --- a/trunk/drivers/usb/host/xhci-hub.c +++ b/trunk/drivers/usb/host/xhci-hub.c @@ -777,7 +777,7 @@ int xhci_bus_suspend(struct usb_hcd *hcd) if (t1 != t2) xhci_writel(xhci, t2, port_array[port_index]); - if (DEV_HIGHSPEED(t1)) { + if (hcd->speed != HCD_USB3) { /* enable remote wake up for USB 2.0 */ u32 __iomem *addr; u32 tmp; @@ -866,6 +866,21 @@ int xhci_bus_resume(struct usb_hcd *hcd) temp |= PORT_LINK_STROBE | XDEV_U0; xhci_writel(xhci, temp, port_array[port_index]); } + /* wait for the port to enter U0 and report port link + * state change. + */ + spin_unlock_irqrestore(&xhci->lock, flags); + msleep(20); + spin_lock_irqsave(&xhci->lock, flags); + + /* Clear PLC */ + temp = xhci_readl(xhci, port_array[port_index]); + if (temp & PORT_PLC) { + temp = xhci_port_state_to_neutral(temp); + temp |= PORT_PLC; + xhci_writel(xhci, temp, port_array[port_index]); + } + slot_id = xhci_find_slot_id_by_port(hcd, xhci, port_index + 1); if (slot_id) @@ -873,7 +888,7 @@ int xhci_bus_resume(struct usb_hcd *hcd) } else xhci_writel(xhci, temp, port_array[port_index]); - if (DEV_HIGHSPEED(temp)) { + if (hcd->speed != HCD_USB3) { /* disable remote wake up for USB 2.0 */ u32 __iomem *addr; u32 tmp; diff --git a/trunk/drivers/usb/host/xhci-mem.c b/trunk/drivers/usb/host/xhci-mem.c index a003e79aacdc..627f3438028c 100644 --- a/trunk/drivers/usb/host/xhci-mem.c +++ b/trunk/drivers/usb/host/xhci-mem.c @@ -846,7 +846,7 @@ static u32 xhci_find_real_port_number(struct xhci_hcd *xhci, * Skip ports that don't have known speeds, or have duplicate * Extended Capabilities port speed entries. */ - if (port_speed == 0 || port_speed == -1) + if (port_speed == 0 || port_speed == DUPLICATE_ENTRY) continue; /* @@ -974,6 +974,47 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud return 0; } +/* + * Convert interval expressed as 2^(bInterval - 1) == interval into + * straight exponent value 2^n == interval. + * + */ +static unsigned int xhci_parse_exponent_interval(struct usb_device *udev, + struct usb_host_endpoint *ep) +{ + unsigned int interval; + + interval = clamp_val(ep->desc.bInterval, 1, 16) - 1; + if (interval != ep->desc.bInterval - 1) + dev_warn(&udev->dev, + "ep %#x - rounding interval to %d microframes\n", + ep->desc.bEndpointAddress, + 1 << interval); + + return interval; +} + +/* + * Convert bInterval expressed in frames (in 1-255 range) to exponent of + * microframes, rounded down to nearest power of 2. + */ +static unsigned int xhci_parse_frame_interval(struct usb_device *udev, + struct usb_host_endpoint *ep) +{ + unsigned int interval; + + interval = fls(8 * ep->desc.bInterval) - 1; + interval = clamp_val(interval, 3, 10); + if ((1 << interval) != 8 * ep->desc.bInterval) + dev_warn(&udev->dev, + "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n", + ep->desc.bEndpointAddress, + 1 << interval, + 8 * ep->desc.bInterval); + + return interval; +} + /* Return the polling or NAK interval. * * The polling interval is expressed in "microframes". If xHCI's Interval field @@ -982,7 +1023,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval * is set to 0. */ -static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev, +static unsigned int xhci_get_endpoint_interval(struct usb_device *udev, struct usb_host_endpoint *ep) { unsigned int interval = 0; @@ -991,45 +1032,38 @@ static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev, case USB_SPEED_HIGH: /* Max NAK rate */ if (usb_endpoint_xfer_control(&ep->desc) || - usb_endpoint_xfer_bulk(&ep->desc)) + usb_endpoint_xfer_bulk(&ep->desc)) { interval = ep->desc.bInterval; + break; + } /* Fall through - SS and HS isoc/int have same decoding */ + case USB_SPEED_SUPER: if (usb_endpoint_xfer_int(&ep->desc) || - usb_endpoint_xfer_isoc(&ep->desc)) { - if (ep->desc.bInterval == 0) - interval = 0; - else - interval = ep->desc.bInterval - 1; - if (interval > 15) - interval = 15; - if (interval != ep->desc.bInterval + 1) - dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n", - ep->desc.bEndpointAddress, 1 << interval); + usb_endpoint_xfer_isoc(&ep->desc)) { + interval = xhci_parse_exponent_interval(udev, ep); } break; - /* Convert bInterval (in 1-255 frames) to microframes and round down to - * nearest power of 2. - */ + case USB_SPEED_FULL: + if (usb_endpoint_xfer_int(&ep->desc)) { + interval = xhci_parse_exponent_interval(udev, ep); + break; + } + /* + * Fall through for isochronous endpoint interval decoding + * since it uses the same rules as low speed interrupt + * endpoints. + */ + case USB_SPEED_LOW: if (usb_endpoint_xfer_int(&ep->desc) || - usb_endpoint_xfer_isoc(&ep->desc)) { - interval = fls(8*ep->desc.bInterval) - 1; - if (interval > 10) - interval = 10; - if (interval < 3) - interval = 3; - if ((1 << interval) != 8*ep->desc.bInterval) - dev_warn(&udev->dev, - "ep %#x - rounding interval" - " to %d microframes, " - "ep desc says %d microframes\n", - ep->desc.bEndpointAddress, - 1 << interval, - 8*ep->desc.bInterval); + usb_endpoint_xfer_isoc(&ep->desc)) { + + interval = xhci_parse_frame_interval(udev, ep); } break; + default: BUG(); } @@ -1041,7 +1075,7 @@ static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev, * transaction opportunities per microframe", but that goes in the Max Burst * endpoint context field. */ -static inline u32 xhci_get_endpoint_mult(struct usb_device *udev, +static u32 xhci_get_endpoint_mult(struct usb_device *udev, struct usb_host_endpoint *ep) { if (udev->speed != USB_SPEED_SUPER || @@ -1050,7 +1084,7 @@ static inline u32 xhci_get_endpoint_mult(struct usb_device *udev, return ep->ss_ep_comp.bmAttributes; } -static inline u32 xhci_get_endpoint_type(struct usb_device *udev, +static u32 xhci_get_endpoint_type(struct usb_device *udev, struct usb_host_endpoint *ep) { int in; @@ -1084,7 +1118,7 @@ static inline u32 xhci_get_endpoint_type(struct usb_device *udev, * Basically, this is the maxpacket size, multiplied by the burst size * and mult size. */ -static inline u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci, +static u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci, struct usb_device *udev, struct usb_host_endpoint *ep) { @@ -1727,12 +1761,12 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, * found a similar duplicate. */ if (xhci->port_array[i] != major_revision && - xhci->port_array[i] != (u8) -1) { + xhci->port_array[i] != DUPLICATE_ENTRY) { if (xhci->port_array[i] == 0x03) xhci->num_usb3_ports--; else xhci->num_usb2_ports--; - xhci->port_array[i] = (u8) -1; + xhci->port_array[i] = DUPLICATE_ENTRY; } /* FIXME: Should we disable the port? */ continue; @@ -1831,7 +1865,7 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags) for (i = 0; i < num_ports; i++) { if (xhci->port_array[i] == 0x03 || xhci->port_array[i] == 0 || - xhci->port_array[i] == -1) + xhci->port_array[i] == DUPLICATE_ENTRY) continue; xhci->usb2_ports[port_index] = diff --git a/trunk/drivers/usb/host/xhci-pci.c b/trunk/drivers/usb/host/xhci-pci.c index ceea9f33491c..a10494c2f3c7 100644 --- a/trunk/drivers/usb/host/xhci-pci.c +++ b/trunk/drivers/usb/host/xhci-pci.c @@ -114,6 +114,10 @@ static int xhci_pci_setup(struct usb_hcd *hcd) if (pdev->vendor == PCI_VENDOR_ID_NEC) xhci->quirks |= XHCI_NEC_HOST; + /* AMD PLL quirk */ + if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info()) + xhci->quirks |= XHCI_AMD_PLL_FIX; + /* Make sure the HC is halted. */ retval = xhci_halt(xhci); if (retval) diff --git a/trunk/drivers/usb/host/xhci-ring.c b/trunk/drivers/usb/host/xhci-ring.c index cfc1ad92473f..7437386a9a50 100644 --- a/trunk/drivers/usb/host/xhci-ring.c +++ b/trunk/drivers/usb/host/xhci-ring.c @@ -93,7 +93,7 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, /* Does this link TRB point to the first segment in a ring, * or was the previous TRB the last TRB on the last segment in the ERST? */ -static inline bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring, +static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring, struct xhci_segment *seg, union xhci_trb *trb) { if (ring == xhci->event_ring) @@ -107,7 +107,7 @@ static inline bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring * segment? I.e. would the updated event TRB pointer step off the end of the * event seg? */ -static inline int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, +static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, struct xhci_segment *seg, union xhci_trb *trb) { if (ring == xhci->event_ring) @@ -116,7 +116,7 @@ static inline int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, return (trb->link.control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK); } -static inline int enqueue_is_link_trb(struct xhci_ring *ring) +static int enqueue_is_link_trb(struct xhci_ring *ring) { struct xhci_link_trb *link = &ring->enqueue->link; return ((link->control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK)); @@ -592,7 +592,7 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, ep->ep_state |= SET_DEQ_PENDING; } -static inline void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci, +static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci, struct xhci_virt_ep *ep) { ep->ep_state &= ~EP_HALT_PENDING; @@ -619,6 +619,13 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci, /* Only giveback urb when this is the last td in urb */ if (urb_priv->td_cnt == urb_priv->length) { + if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { + xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--; + if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { + if (xhci->quirks & XHCI_AMD_PLL_FIX) + usb_amd_quirk_pll_enable(); + } + } usb_hcd_unlink_urb_from_ep(hcd, urb); xhci_dbg(xhci, "Giveback %s URB %p\n", adjective, urb); @@ -1209,7 +1216,7 @@ static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd, * Skip ports that don't have known speeds, or have duplicate * Extended Capabilities port speed entries. */ - if (port_speed == 0 || port_speed == -1) + if (port_speed == 0 || port_speed == DUPLICATE_ENTRY) continue; /* @@ -1235,6 +1242,7 @@ static void handle_port_status(struct xhci_hcd *xhci, u8 major_revision; struct xhci_bus_state *bus_state; u32 __iomem **port_array; + bool bogus_port_status = false; /* Port status change events always have a successful completion code */ if (GET_COMP_CODE(event->generic.field[2]) != COMP_SUCCESS) { @@ -1247,6 +1255,7 @@ static void handle_port_status(struct xhci_hcd *xhci, max_ports = HCS_MAX_PORTS(xhci->hcs_params1); if ((port_id <= 0) || (port_id > max_ports)) { xhci_warn(xhci, "Invalid port id %d\n", port_id); + bogus_port_status = true; goto cleanup; } @@ -1258,12 +1267,14 @@ static void handle_port_status(struct xhci_hcd *xhci, xhci_warn(xhci, "Event for port %u not in " "Extended Capabilities, ignoring.\n", port_id); + bogus_port_status = true; goto cleanup; } - if (major_revision == (u8) -1) { + if (major_revision == DUPLICATE_ENTRY) { xhci_warn(xhci, "Event for port %u duplicated in" "Extended Capabilities, ignoring.\n", port_id); + bogus_port_status = true; goto cleanup; } @@ -1335,6 +1346,13 @@ static void handle_port_status(struct xhci_hcd *xhci, /* Update event ring dequeue pointer before dropping the lock */ inc_deq(xhci, xhci->event_ring, true); + /* Don't make the USB core poll the roothub if we got a bad port status + * change event. Besides, at that point we can't tell which roothub + * (USB 2.0 or USB 3.0) to kick. + */ + if (bogus_port_status) + return; + spin_unlock(&xhci->lock); /* Pass this up to the core */ usb_hcd_poll_rh_status(hcd); @@ -1554,8 +1572,17 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, urb_priv->td_cnt++; /* Giveback the urb when all the tds are completed */ - if (urb_priv->td_cnt == urb_priv->length) + if (urb_priv->td_cnt == urb_priv->length) { ret = 1; + if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { + xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--; + if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs + == 0) { + if (xhci->quirks & XHCI_AMD_PLL_FIX) + usb_amd_quirk_pll_enable(); + } + } + } } return ret; @@ -1675,71 +1702,52 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, struct urb_priv *urb_priv; int idx; int len = 0; - int skip_td = 0; union xhci_trb *cur_trb; struct xhci_segment *cur_seg; + struct usb_iso_packet_descriptor *frame; u32 trb_comp_code; + bool skip_td = false; ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer); trb_comp_code = GET_COMP_CODE(event->transfer_len); urb_priv = td->urb->hcpriv; idx = urb_priv->td_cnt; + frame = &td->urb->iso_frame_desc[idx]; - if (ep->skip) { - /* The transfer is partly done */ - *status = -EXDEV; - td->urb->iso_frame_desc[idx].status = -EXDEV; - } else { - /* handle completion code */ - switch (trb_comp_code) { - case COMP_SUCCESS: - td->urb->iso_frame_desc[idx].status = 0; - xhci_dbg(xhci, "Successful isoc transfer!\n"); - break; - case COMP_SHORT_TX: - if (td->urb->transfer_flags & URB_SHORT_NOT_OK) - td->urb->iso_frame_desc[idx].status = - -EREMOTEIO; - else - td->urb->iso_frame_desc[idx].status = 0; - break; - case COMP_BW_OVER: - td->urb->iso_frame_desc[idx].status = -ECOMM; - skip_td = 1; - break; - case COMP_BUFF_OVER: - case COMP_BABBLE: - td->urb->iso_frame_desc[idx].status = -EOVERFLOW; - skip_td = 1; - break; - case COMP_STALL: - td->urb->iso_frame_desc[idx].status = -EPROTO; - skip_td = 1; - break; - case COMP_STOP: - case COMP_STOP_INVAL: - break; - default: - td->urb->iso_frame_desc[idx].status = -1; - break; - } - } - - /* calc actual length */ - if (ep->skip) { - td->urb->iso_frame_desc[idx].actual_length = 0; - /* Update ring dequeue pointer */ - while (ep_ring->dequeue != td->last_trb) - inc_deq(xhci, ep_ring, false); - inc_deq(xhci, ep_ring, false); - return finish_td(xhci, td, event_trb, event, ep, status, true); + /* handle completion code */ + switch (trb_comp_code) { + case COMP_SUCCESS: + frame->status = 0; + xhci_dbg(xhci, "Successful isoc transfer!\n"); + break; + case COMP_SHORT_TX: + frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ? + -EREMOTEIO : 0; + break; + case COMP_BW_OVER: + frame->status = -ECOMM; + skip_td = true; + break; + case COMP_BUFF_OVER: + case COMP_BABBLE: + frame->status = -EOVERFLOW; + skip_td = true; + break; + case COMP_STALL: + frame->status = -EPROTO; + skip_td = true; + break; + case COMP_STOP: + case COMP_STOP_INVAL: + break; + default: + frame->status = -1; + break; } - if (trb_comp_code == COMP_SUCCESS || skip_td == 1) { - td->urb->iso_frame_desc[idx].actual_length = - td->urb->iso_frame_desc[idx].length; - td->urb->actual_length += - td->urb->iso_frame_desc[idx].length; + if (trb_comp_code == COMP_SUCCESS || skip_td) { + frame->actual_length = frame->length; + td->urb->actual_length += frame->length; } else { for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg; cur_trb != event_trb; @@ -1755,7 +1763,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, TRB_LEN(event->transfer_len); if (trb_comp_code != COMP_STOP_INVAL) { - td->urb->iso_frame_desc[idx].actual_length = len; + frame->actual_length = len; td->urb->actual_length += len; } } @@ -1766,6 +1774,35 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, return finish_td(xhci, td, event_trb, event, ep, status, false); } +static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, + struct xhci_transfer_event *event, + struct xhci_virt_ep *ep, int *status) +{ + struct xhci_ring *ep_ring; + struct urb_priv *urb_priv; + struct usb_iso_packet_descriptor *frame; + int idx; + + ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer); + urb_priv = td->urb->hcpriv; + idx = urb_priv->td_cnt; + frame = &td->urb->iso_frame_desc[idx]; + + /* The transfer is partly done */ + *status = -EXDEV; + frame->status = -EXDEV; + + /* calc actual length */ + frame->actual_length = 0; + + /* Update ring dequeue pointer */ + while (ep_ring->dequeue != td->last_trb) + inc_deq(xhci, ep_ring, false); + inc_deq(xhci, ep_ring, false); + + return finish_td(xhci, td, NULL, event, ep, status, true); +} + /* * Process bulk and interrupt tds, update urb status and actual_length. */ @@ -2024,36 +2061,42 @@ static int handle_tx_event(struct xhci_hcd *xhci, } td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list); + /* Is this a TRB in the currently executing TD? */ event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue, td->last_trb, event_dma); - if (event_seg && ep->skip) { + if (!event_seg) { + if (!ep->skip || + !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) { + /* HC is busted, give up! */ + xhci_err(xhci, + "ERROR Transfer event TRB DMA ptr not " + "part of current TD\n"); + return -ESHUTDOWN; + } + + ret = skip_isoc_td(xhci, td, event, ep, &status); + goto cleanup; + } + + if (ep->skip) { xhci_dbg(xhci, "Found td. Clear skip flag.\n"); ep->skip = false; } - if (!event_seg && - (!ep->skip || !usb_endpoint_xfer_isoc(&td->urb->ep->desc))) { - /* HC is busted, give up! */ - xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not " - "part of current TD\n"); - return -ESHUTDOWN; - } - if (event_seg) { - event_trb = &event_seg->trbs[(event_dma - - event_seg->dma) / sizeof(*event_trb)]; - /* - * No-op TRB should not trigger interrupts. - * If event_trb is a no-op TRB, it means the - * corresponding TD has been cancelled. Just ignore - * the TD. - */ - if ((event_trb->generic.field[3] & TRB_TYPE_BITMASK) - == TRB_TYPE(TRB_TR_NOOP)) { - xhci_dbg(xhci, "event_trb is a no-op TRB. " - "Skip it\n"); - goto cleanup; - } + event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / + sizeof(*event_trb)]; + /* + * No-op TRB should not trigger interrupts. + * If event_trb is a no-op TRB, it means the + * corresponding TD has been cancelled. Just ignore + * the TD. + */ + if ((event_trb->generic.field[3] & TRB_TYPE_BITMASK) + == TRB_TYPE(TRB_TR_NOOP)) { + xhci_dbg(xhci, + "event_trb is a no-op TRB. Skip it\n"); + goto cleanup; } /* Now update the urb's actual_length and give back to @@ -3126,6 +3169,12 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, } } + if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { + if (xhci->quirks & XHCI_AMD_PLL_FIX) + usb_amd_quirk_pll_disable(); + } + xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++; + giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, start_cycle, start_trb); return 0; diff --git a/trunk/drivers/usb/host/xhci.c b/trunk/drivers/usb/host/xhci.c index 196e0181b2ed..81b976e45880 100644 --- a/trunk/drivers/usb/host/xhci.c +++ b/trunk/drivers/usb/host/xhci.c @@ -550,6 +550,9 @@ void xhci_stop(struct usb_hcd *hcd) del_timer_sync(&xhci->event_ring_timer); #endif + if (xhci->quirks & XHCI_AMD_PLL_FIX) + usb_amd_dev_put(); + xhci_dbg(xhci, "// Disabling event ring interrupts\n"); temp = xhci_readl(xhci, &xhci->op_regs->status); xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status); @@ -771,7 +774,9 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) /* If restore operation fails, re-initialize the HC during resume */ if ((temp & STS_SRE) || hibernated) { - usb_root_hub_lost_power(hcd->self.root_hub); + /* Let the USB core know _both_ roothubs lost power. */ + usb_root_hub_lost_power(xhci->main_hcd->self.root_hub); + usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub); xhci_dbg(xhci, "Stop HCD\n"); xhci_halt(xhci); @@ -2386,10 +2391,18 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev) /* Everything but endpoint 0 is disabled, so free or cache the rings. */ last_freed_endpoint = 1; for (i = 1; i < 31; ++i) { - if (!virt_dev->eps[i].ring) - continue; - xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); - last_freed_endpoint = i; + struct xhci_virt_ep *ep = &virt_dev->eps[i]; + + if (ep->ep_state & EP_HAS_STREAMS) { + xhci_free_stream_info(xhci, ep->stream_info); + ep->stream_info = NULL; + ep->ep_state &= ~EP_HAS_STREAMS; + } + + if (ep->ring) { + xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); + last_freed_endpoint = i; + } } xhci_dbg(xhci, "Output context after successful reset device cmd:\n"); xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint); diff --git a/trunk/drivers/usb/host/xhci.h b/trunk/drivers/usb/host/xhci.h index 07e263063e37..ba1be6b7cc6d 100644 --- a/trunk/drivers/usb/host/xhci.h +++ b/trunk/drivers/usb/host/xhci.h @@ -30,6 +30,7 @@ /* Code sharing between pci-quirks and xhci hcd */ #include "xhci-ext-caps.h" +#include "pci-quirks.h" /* xHCI PCI Configuration Registers */ #define XHCI_SBRN_OFFSET (0x60) @@ -232,7 +233,7 @@ struct xhci_op_regs { * notification type that matches a bit set in this bit field. */ #define DEV_NOTE_MASK (0xffff) -#define ENABLE_DEV_NOTE(x) (1 << x) +#define ENABLE_DEV_NOTE(x) (1 << (x)) /* Most of the device notification types should only be used for debug. * SW does need to pay attention to function wake notifications. */ @@ -348,6 +349,9 @@ struct xhci_op_regs { /* Initiate a warm port reset - complete when PORT_WRC is '1' */ #define PORT_WR (1 << 31) +/* We mark duplicate entries with -1 */ +#define DUPLICATE_ENTRY ((u8)(-1)) + /* Port Power Management Status and Control - port_power_base bitmasks */ /* Inactivity timer value for transitions into U1, in microseconds. * Timeout can be up to 127us. 0xFF means an infinite timeout. @@ -601,11 +605,11 @@ struct xhci_ep_ctx { #define EP_STATE_STOPPED 3 #define EP_STATE_ERROR 4 /* Mult - Max number of burtst within an interval, in EP companion desc. */ -#define EP_MULT(p) ((p & 0x3) << 8) +#define EP_MULT(p) (((p) & 0x3) << 8) /* bits 10:14 are Max Primary Streams */ /* bit 15 is Linear Stream Array */ /* Interval - period between requests to an endpoint - 125u increments. */ -#define EP_INTERVAL(p) ((p & 0xff) << 16) +#define EP_INTERVAL(p) (((p) & 0xff) << 16) #define EP_INTERVAL_TO_UFRAMES(p) (1 << (((p) >> 16) & 0xff)) #define EP_MAXPSTREAMS_MASK (0x1f << 10) #define EP_MAXPSTREAMS(p) (((p) << 10) & EP_MAXPSTREAMS_MASK) @@ -1276,6 +1280,7 @@ struct xhci_hcd { #define XHCI_LINK_TRB_QUIRK (1 << 0) #define XHCI_RESET_EP_QUIRK (1 << 1) #define XHCI_NEC_HOST (1 << 2) +#define XHCI_AMD_PLL_FIX (1 << 3) /* There are two roothubs to keep track of bus suspend info for */ struct xhci_bus_state bus_state[2]; /* Is each xHCI roothub port a USB 3.0, USB 2.0, or USB 1.1 port? */ diff --git a/trunk/drivers/usb/musb/Kconfig b/trunk/drivers/usb/musb/Kconfig index 4cbb7e4b368d..74073b363c30 100644 --- a/trunk/drivers/usb/musb/Kconfig +++ b/trunk/drivers/usb/musb/Kconfig @@ -14,7 +14,7 @@ config USB_MUSB_HDRC select TWL4030_USB if MACH_OMAP_3430SDP select TWL6030_USB if MACH_OMAP_4430SDP || MACH_OMAP4_PANDA select USB_OTG_UTILS - tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)' + bool 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)' help Say Y here if your system has a dual role high speed USB controller based on the Mentor Graphics silicon IP. Then @@ -30,8 +30,8 @@ config USB_MUSB_HDRC If you do not know what this is, please say N. - To compile this driver as a module, choose M here; the - module will be called "musb-hdrc". +# To compile this driver as a module, choose M here; the +# module will be called "musb-hdrc". choice prompt "Platform Glue Layer" diff --git a/trunk/drivers/usb/musb/blackfin.c b/trunk/drivers/usb/musb/blackfin.c index 52312e8af213..8e2a1ff8a35a 100644 --- a/trunk/drivers/usb/musb/blackfin.c +++ b/trunk/drivers/usb/musb/blackfin.c @@ -21,6 +21,7 @@ #include #include "musb_core.h" +#include "musbhsdma.h" #include "blackfin.h" struct bfin_glue { @@ -332,6 +333,27 @@ static int bfin_musb_set_mode(struct musb *musb, u8 musb_mode) return -EIO; } +static int bfin_musb_adjust_channel_params(struct dma_channel *channel, + u16 packet_sz, u8 *mode, + dma_addr_t *dma_addr, u32 *len) +{ + struct musb_dma_channel *musb_channel = channel->private_data; + + /* + * Anomaly 05000450 might cause data corruption when using DMA + * MODE 1 transmits with short packet. So to work around this, + * we truncate all MODE 1 transfers down to a multiple of the + * max packet size, and then do the last short packet transfer + * (if there is any) using MODE 0. + */ + if (ANOMALY_05000450) { + if (musb_channel->transmit && *mode == 1) + *len = *len - (*len % packet_sz); + } + + return 0; +} + static void bfin_musb_reg_init(struct musb *musb) { if (ANOMALY_05000346) { @@ -430,6 +452,8 @@ static const struct musb_platform_ops bfin_ops = { .vbus_status = bfin_musb_vbus_status, .set_vbus = bfin_musb_set_vbus, + + .adjust_channel_params = bfin_musb_adjust_channel_params, }; static u64 bfin_dmamask = DMA_BIT_MASK(32); diff --git a/trunk/drivers/usb/musb/cppi_dma.c b/trunk/drivers/usb/musb/cppi_dma.c index de55a3c3259a..ab434fbd8c35 100644 --- a/trunk/drivers/usb/musb/cppi_dma.c +++ b/trunk/drivers/usb/musb/cppi_dma.c @@ -597,12 +597,12 @@ cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx) length = min(n_bds * maxpacket, length); } - DBG(4, "TX DMA%d, pktSz %d %s bds %d dma 0x%x len %u\n", + DBG(4, "TX DMA%d, pktSz %d %s bds %d dma 0x%llx len %u\n", tx->index, maxpacket, rndis ? "rndis" : "transparent", n_bds, - addr, length); + (unsigned long long)addr, length); cppi_rndis_update(tx, 0, musb->ctrl_base, rndis); @@ -820,7 +820,7 @@ cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket) length = min(n_bds * maxpacket, length); DBG(4, "RX DMA%d seg, maxp %d %s bds %d (cnt %d) " - "dma 0x%x len %u %u/%u\n", + "dma 0x%llx len %u %u/%u\n", rx->index, maxpacket, onepacket ? (is_rndis ? "rndis" : "onepacket") @@ -829,7 +829,8 @@ cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket) musb_readl(tibase, DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4)) & 0xffff, - addr, length, rx->channel.actual_len, rx->buf_len); + (unsigned long long)addr, length, + rx->channel.actual_len, rx->buf_len); /* only queue one segment at a time, since the hardware prevents * correct queue shutdown after unexpected short packets @@ -1039,9 +1040,9 @@ static bool cppi_rx_scan(struct cppi *cppi, unsigned ch) if (!completed && (bd->hw_options & CPPI_OWN_SET)) break; - DBG(5, "C/RXBD %08x: nxt %08x buf %08x " + DBG(5, "C/RXBD %llx: nxt %08x buf %08x " "off.len %08x opt.len %08x (%d)\n", - bd->dma, bd->hw_next, bd->hw_bufp, + (unsigned long long)bd->dma, bd->hw_next, bd->hw_bufp, bd->hw_off_len, bd->hw_options, rx->channel.actual_len); @@ -1111,11 +1112,12 @@ static bool cppi_rx_scan(struct cppi *cppi, unsigned ch) musb_ep_select(cppi->mregs, rx->index + 1); csr = musb_readw(regs, MUSB_RXCSR); if (csr & MUSB_RXCSR_DMAENAB) { - DBG(4, "list%d %p/%p, last %08x%s, csr %04x\n", + DBG(4, "list%d %p/%p, last %llx%s, csr %04x\n", rx->index, rx->head, rx->tail, rx->last_processed - ? rx->last_processed->dma + ? (unsigned long long) + rx->last_processed->dma : 0, completed ? ", completed" : "", csr); @@ -1167,8 +1169,11 @@ irqreturn_t cppi_interrupt(int irq, void *dev_id) tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG); rx = musb_readl(tibase, DAVINCI_RXCPPI_MASKED_REG); - if (!tx && !rx) + if (!tx && !rx) { + if (cppi->irq) + spin_unlock_irqrestore(&musb->lock, flags); return IRQ_NONE; + } DBG(4, "CPPI IRQ Tx%x Rx%x\n", tx, rx); @@ -1199,7 +1204,7 @@ irqreturn_t cppi_interrupt(int irq, void *dev_id) */ if (NULL == bd) { DBG(1, "null BD\n"); - tx_ram->tx_complete = 0; + musb_writel(&tx_ram->tx_complete, 0, 0); continue; } @@ -1452,7 +1457,7 @@ static int cppi_channel_abort(struct dma_channel *channel) * compare mode by writing 1 to the tx_complete register. */ cppi_reset_tx(tx_ram, 1); - cppi_ch->head = 0; + cppi_ch->head = NULL; musb_writel(&tx_ram->tx_complete, 0, 1); cppi_dump_tx(5, cppi_ch, " (done teardown)"); diff --git a/trunk/drivers/usb/musb/musb_core.c b/trunk/drivers/usb/musb/musb_core.c index 630ae7f3cd4c..f10ff00ca09e 100644 --- a/trunk/drivers/usb/musb/musb_core.c +++ b/trunk/drivers/usb/musb/musb_core.c @@ -1030,6 +1030,7 @@ static void musb_shutdown(struct platform_device *pdev) struct musb *musb = dev_to_musb(&pdev->dev); unsigned long flags; + pm_runtime_get_sync(musb->controller); spin_lock_irqsave(&musb->lock, flags); musb_platform_disable(musb); musb_generic_disable(musb); @@ -1040,6 +1041,7 @@ static void musb_shutdown(struct platform_device *pdev) musb_writeb(musb->mregs, MUSB_DEVCTL, 0); musb_platform_exit(musb); + pm_runtime_put(musb->controller); /* FIXME power down */ } diff --git a/trunk/drivers/usb/musb/musb_core.h b/trunk/drivers/usb/musb/musb_core.h index 4bd9e2145ee4..0e053b587960 100644 --- a/trunk/drivers/usb/musb/musb_core.h +++ b/trunk/drivers/usb/musb/musb_core.h @@ -261,6 +261,7 @@ enum musb_g_ep0_state { * @try_ilde: tries to idle the IP * @vbus_status: returns vbus status if possible * @set_vbus: forces vbus status + * @channel_program: pre check for standard dma channel_program func */ struct musb_platform_ops { int (*init)(struct musb *musb); @@ -274,6 +275,10 @@ struct musb_platform_ops { int (*vbus_status)(struct musb *musb); void (*set_vbus)(struct musb *musb, int on); + + int (*adjust_channel_params)(struct dma_channel *channel, + u16 packet_sz, u8 *mode, + dma_addr_t *dma_addr, u32 *len); }; /* diff --git a/trunk/drivers/usb/musb/musb_gadget.c b/trunk/drivers/usb/musb/musb_gadget.c index 98519c5d8b5c..f47c20197c61 100644 --- a/trunk/drivers/usb/musb/musb_gadget.c +++ b/trunk/drivers/usb/musb/musb_gadget.c @@ -535,7 +535,7 @@ void musb_g_tx(struct musb *musb, u8 epnum) is_dma = 1; csr |= MUSB_TXCSR_P_WZC_BITS; csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN | - MUSB_TXCSR_TXPKTRDY); + MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET); musb_writew(epio, MUSB_TXCSR, csr); /* Ensure writebuffer is empty. */ csr = musb_readw(epio, MUSB_TXCSR); @@ -1296,7 +1296,7 @@ static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request) } /* if the hardware doesn't have the request, easy ... */ - if (musb_ep->req_list.next != &request->list || musb_ep->busy) + if (musb_ep->req_list.next != &req->list || musb_ep->busy) musb_g_giveback(musb_ep, request, -ECONNRESET); /* ... else abort the dma transfer ... */ @@ -1887,11 +1887,9 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver, otg_set_vbus(musb->xceiv, 1); hcd->self.uses_pio_for_control = 1; - - if (musb->xceiv->last_event == USB_EVENT_NONE) - pm_runtime_put(musb->controller); - } + if (musb->xceiv->last_event == USB_EVENT_NONE) + pm_runtime_put(musb->controller); return 0; diff --git a/trunk/drivers/usb/musb/musbhsdma.c b/trunk/drivers/usb/musb/musbhsdma.c index 0144a2d481fd..d281792db05c 100644 --- a/trunk/drivers/usb/musb/musbhsdma.c +++ b/trunk/drivers/usb/musb/musbhsdma.c @@ -169,6 +169,14 @@ static int dma_channel_program(struct dma_channel *channel, BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN || channel->status == MUSB_DMA_STATUS_BUSY); + /* Let targets check/tweak the arguments */ + if (musb->ops->adjust_channel_params) { + int ret = musb->ops->adjust_channel_params(channel, + packet_sz, &mode, &dma_addr, &len); + if (ret) + return ret; + } + /* * The DMA engine in RTL1.8 and above cannot handle * DMA addresses that are not aligned to a 4 byte boundary. diff --git a/trunk/drivers/usb/musb/omap2430.c b/trunk/drivers/usb/musb/omap2430.c index 25cb8b0003b1..e9e60b6e0583 100644 --- a/trunk/drivers/usb/musb/omap2430.c +++ b/trunk/drivers/usb/musb/omap2430.c @@ -259,9 +259,10 @@ static int musb_otg_notifications(struct notifier_block *nb, case USB_EVENT_VBUS: DBG(4, "VBUS Connect\n"); +#ifdef CONFIG_USB_GADGET_MUSB_HDRC if (musb->gadget_driver) pm_runtime_get_sync(musb->controller); - +#endif otg_init(musb->xceiv); break; @@ -269,7 +270,7 @@ static int musb_otg_notifications(struct notifier_block *nb, DBG(4, "VBUS Disconnect\n"); #ifdef CONFIG_USB_GADGET_MUSB_HDRC - if (is_otg_enabled(musb)) + if (is_otg_enabled(musb) || is_peripheral_enabled(musb)) if (musb->gadget_driver) #endif { diff --git a/trunk/drivers/usb/musb/ux500.c b/trunk/drivers/usb/musb/ux500.c index d6384e4aeef9..f7e04bf34a13 100644 --- a/trunk/drivers/usb/musb/ux500.c +++ b/trunk/drivers/usb/musb/ux500.c @@ -93,6 +93,8 @@ static int __init ux500_probe(struct platform_device *pdev) } musb->dev.parent = &pdev->dev; + musb->dev.dma_mask = pdev->dev.dma_mask; + musb->dev.coherent_dma_mask = pdev->dev.coherent_dma_mask; glue->dev = &pdev->dev; glue->musb = musb; diff --git a/trunk/drivers/usb/serial/ftdi_sio.c b/trunk/drivers/usb/serial/ftdi_sio.c index a973c7a29d6e..4de6ef0ae52a 100644 --- a/trunk/drivers/usb/serial/ftdi_sio.c +++ b/trunk/drivers/usb/serial/ftdi_sio.c @@ -151,6 +151,8 @@ static struct ftdi_sio_quirk ftdi_stmclite_quirk = { * /sys/bus/usb/ftdi_sio/new_id, then send patch/report! */ static struct usb_device_id id_table_combined [] = { + { USB_DEVICE(FTDI_VID, FTDI_CTI_MINI_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CTI_NANO_PID) }, { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) }, @@ -525,6 +527,7 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_8_PID) }, { USB_DEVICE(IDTECH_VID, IDTECH_IDT1221U_PID) }, { USB_DEVICE(OCT_VID, OCT_US101_PID) }, + { USB_DEVICE(OCT_VID, OCT_DK201_PID) }, { USB_DEVICE(FTDI_VID, FTDI_HE_TIRA1_PID), .driver_info = (kernel_ulong_t)&ftdi_HE_TIRA1_quirk }, { USB_DEVICE(FTDI_VID, FTDI_USB_UIRT_PID), @@ -787,6 +790,8 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, MARVELL_OPENRD_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(FTDI_VID, HAMEG_HO820_PID) }, + { USB_DEVICE(FTDI_VID, HAMEG_HO720_PID) }, + { USB_DEVICE(FTDI_VID, HAMEG_HO730_PID) }, { USB_DEVICE(FTDI_VID, HAMEG_HO870_PID) }, { USB_DEVICE(FTDI_VID, MJSG_GENERIC_PID) }, { USB_DEVICE(FTDI_VID, MJSG_SR_RADIO_PID) }, diff --git a/trunk/drivers/usb/serial/ftdi_sio_ids.h b/trunk/drivers/usb/serial/ftdi_sio_ids.h index c543e55bafba..efffc23723bd 100644 --- a/trunk/drivers/usb/serial/ftdi_sio_ids.h +++ b/trunk/drivers/usb/serial/ftdi_sio_ids.h @@ -300,6 +300,8 @@ * Hameg HO820 and HO870 interface (using VID 0x0403) */ #define HAMEG_HO820_PID 0xed74 +#define HAMEG_HO730_PID 0xed73 +#define HAMEG_HO720_PID 0xed72 #define HAMEG_HO870_PID 0xed71 /* @@ -572,6 +574,7 @@ /* Note: OCT US101 is also rebadged as Dick Smith Electronics (NZ) XH6381 */ /* Also rebadged as Dick Smith Electronics (Aus) XH6451 */ /* Also rebadged as SIIG Inc. model US2308 hardware version 1 */ +#define OCT_DK201_PID 0x0103 /* OCT DK201 USB docking station */ #define OCT_US101_PID 0x0421 /* OCT US101 USB to RS-232 */ /* @@ -1141,3 +1144,12 @@ #define QIHARDWARE_VID 0x20B7 #define MILKYMISTONE_JTAGSERIAL_PID 0x0713 +/* + * CTI GmbH RS485 Converter http://www.cti-lean.com/ + */ +/* USB-485-Mini*/ +#define FTDI_CTI_MINI_PID 0xF608 +/* USB-Nano-485*/ +#define FTDI_CTI_NANO_PID 0xF60B + + diff --git a/trunk/drivers/usb/serial/option.c b/trunk/drivers/usb/serial/option.c index 75c7f456eed5..d77ff0435896 100644 --- a/trunk/drivers/usb/serial/option.c +++ b/trunk/drivers/usb/serial/option.c @@ -407,6 +407,10 @@ static void option_instat_callback(struct urb *urb); /* ONDA MT825UP HSDPA 14.2 modem */ #define ONDA_MT825UP 0x000b +/* Samsung products */ +#define SAMSUNG_VENDOR_ID 0x04e8 +#define SAMSUNG_PRODUCT_GT_B3730 0x6889 + /* some devices interfaces need special handling due to a number of reasons */ enum option_blacklist_reason { OPTION_BLACKLIST_NONE = 0, @@ -968,6 +972,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) }, { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */ { USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */ + { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730/GT-B3710 LTE USB modem.*/ { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, option_ids); diff --git a/trunk/drivers/usb/serial/qcserial.c b/trunk/drivers/usb/serial/qcserial.c index 8858201eb1d3..54a9dab1f33b 100644 --- a/trunk/drivers/usb/serial/qcserial.c +++ b/trunk/drivers/usb/serial/qcserial.c @@ -111,7 +111,7 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) ifnum = intf->desc.bInterfaceNumber; dbg("This Interface = %d", ifnum); - data = serial->private = kzalloc(sizeof(struct usb_wwan_intf_private), + data = kzalloc(sizeof(struct usb_wwan_intf_private), GFP_KERNEL); if (!data) return -ENOMEM; @@ -134,8 +134,10 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) usb_endpoint_is_bulk_out(&intf->endpoint[1].desc)) { dbg("QDL port found"); - if (serial->interface->num_altsetting == 1) - return 0; + if (serial->interface->num_altsetting == 1) { + retval = 0; /* Success */ + break; + } retval = usb_set_interface(serial->dev, ifnum, 1); if (retval < 0) { @@ -145,7 +147,6 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) retval = -ENODEV; kfree(data); } - return retval; } break; @@ -166,6 +167,7 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) "Could not set interface, error %d\n", retval); retval = -ENODEV; + kfree(data); } } else if (ifnum == 2) { dbg("Modem port found"); @@ -177,7 +179,6 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) retval = -ENODEV; kfree(data); } - return retval; } else if (ifnum==3) { /* * NMEA (serial line 9600 8N1) @@ -191,6 +192,7 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) "Could not set interface, error %d\n", retval); retval = -ENODEV; + kfree(data); } } break; @@ -199,12 +201,27 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) dev_err(&serial->dev->dev, "unknown number of interfaces: %d\n", nintf); kfree(data); - return -ENODEV; + retval = -ENODEV; } + /* Set serial->private if not returning -ENODEV */ + if (retval != -ENODEV) + usb_set_serial_data(serial, data); return retval; } +static void qc_release(struct usb_serial *serial) +{ + struct usb_wwan_intf_private *priv = usb_get_serial_data(serial); + + dbg("%s", __func__); + + /* Call usb_wwan release & free the private data allocated in qcprobe */ + usb_wwan_release(serial); + usb_set_serial_data(serial, NULL); + kfree(priv); +} + static struct usb_serial_driver qcdevice = { .driver = { .owner = THIS_MODULE, @@ -222,7 +239,7 @@ static struct usb_serial_driver qcdevice = { .chars_in_buffer = usb_wwan_chars_in_buffer, .attach = usb_wwan_startup, .disconnect = usb_wwan_disconnect, - .release = usb_wwan_release, + .release = qc_release, #ifdef CONFIG_PM .suspend = usb_wwan_suspend, .resume = usb_wwan_resume, diff --git a/trunk/drivers/usb/storage/isd200.c b/trunk/drivers/usb/storage/isd200.c index 09e52ba47ddf..ffc4193e9505 100644 --- a/trunk/drivers/usb/storage/isd200.c +++ b/trunk/drivers/usb/storage/isd200.c @@ -499,7 +499,6 @@ static int isd200_action( struct us_data *us, int action, memset(&ata, 0, sizeof(ata)); srb->cmnd = info->cmnd; srb->device = &srb_dev; - ++srb->serial_number; ata.generic.SignatureByte0 = info->ConfigData.ATAMajorCommand; ata.generic.SignatureByte1 = info->ConfigData.ATAMinorCommand; diff --git a/trunk/drivers/vhost/vhost.c b/trunk/drivers/vhost/vhost.c index 2ab291241635..7aa4eea930f1 100644 --- a/trunk/drivers/vhost/vhost.c +++ b/trunk/drivers/vhost/vhost.c @@ -4,7 +4,7 @@ * Author: Michael S. Tsirkin * * Inspiration, some code, and most witty comments come from - * Documentation/lguest/lguest.c, by Rusty Russell + * Documentation/virtual/lguest/lguest.c, by Rusty Russell * * This work is licensed under the terms of the GNU GPL, version 2. * diff --git a/trunk/drivers/video/acornfb.c b/trunk/drivers/video/acornfb.c index 82acb8dc4aa1..6183a57eb69d 100644 --- a/trunk/drivers/video/acornfb.c +++ b/trunk/drivers/video/acornfb.c @@ -66,7 +66,7 @@ * have. Allow 1% either way on the nominal for TVs. */ #define NR_MONTYPES 6 -static struct fb_monspecs monspecs[NR_MONTYPES] __initdata = { +static struct fb_monspecs monspecs[NR_MONTYPES] __devinitdata = { { /* TV */ .hfmin = 15469, .hfmax = 15781, @@ -873,7 +873,7 @@ static struct fb_ops acornfb_ops = { /* * Everything after here is initialisation!!! */ -static struct fb_videomode modedb[] __initdata = { +static struct fb_videomode modedb[] __devinitdata = { { /* 320x256 @ 50Hz */ NULL, 50, 320, 256, 125000, 92, 62, 35, 19, 38, 2, FB_SYNC_COMP_HIGH_ACT, @@ -925,8 +925,7 @@ static struct fb_videomode modedb[] __initdata = { } }; -static struct fb_videomode __initdata -acornfb_default_mode = { +static struct fb_videomode acornfb_default_mode __devinitdata = { .name = NULL, .refresh = 60, .xres = 640, @@ -942,7 +941,7 @@ acornfb_default_mode = { .vmode = FB_VMODE_NONINTERLACED }; -static void __init acornfb_init_fbinfo(void) +static void __devinit acornfb_init_fbinfo(void) { static int first = 1; @@ -1018,8 +1017,7 @@ static void __init acornfb_init_fbinfo(void) * size can optionally be followed by 'M' or 'K' for * MB or KB respectively. */ -static void __init -acornfb_parse_mon(char *opt) +static void __devinit acornfb_parse_mon(char *opt) { char *p = opt; @@ -1066,8 +1064,7 @@ acornfb_parse_mon(char *opt) current_par.montype = -1; } -static void __init -acornfb_parse_montype(char *opt) +static void __devinit acornfb_parse_montype(char *opt) { current_par.montype = -2; @@ -1108,8 +1105,7 @@ acornfb_parse_montype(char *opt) } } -static void __init -acornfb_parse_dram(char *opt) +static void __devinit acornfb_parse_dram(char *opt) { unsigned int size; @@ -1134,15 +1130,14 @@ acornfb_parse_dram(char *opt) static struct options { char *name; void (*parse)(char *opt); -} opt_table[] __initdata = { +} opt_table[] __devinitdata = { { "mon", acornfb_parse_mon }, { "montype", acornfb_parse_montype }, { "dram", acornfb_parse_dram }, { NULL, NULL } }; -int __init -acornfb_setup(char *options) +static int __devinit acornfb_setup(char *options) { struct options *optp; char *opt; @@ -1179,8 +1174,7 @@ acornfb_setup(char *options) * Detect type of monitor connected * For now, we just assume SVGA */ -static int __init -acornfb_detect_monitortype(void) +static int __devinit acornfb_detect_monitortype(void) { return 4; } diff --git a/trunk/drivers/video/atafb.c b/trunk/drivers/video/atafb.c index 5b2b5ef4edba..64e41f5448c4 100644 --- a/trunk/drivers/video/atafb.c +++ b/trunk/drivers/video/atafb.c @@ -3117,7 +3117,7 @@ int __init atafb_init(void) atafb_ops.fb_setcolreg = &falcon_setcolreg; error = request_irq(IRQ_AUTO_4, falcon_vbl_switcher, IRQ_TYPE_PRIO, - "framebuffer/modeswitch", + "framebuffer:modeswitch", falcon_vbl_switcher); if (error) return error; diff --git a/trunk/drivers/video/fbmem.c b/trunk/drivers/video/fbmem.c index e0c2284924b6..5aac00eb1830 100644 --- a/trunk/drivers/video/fbmem.c +++ b/trunk/drivers/video/fbmem.c @@ -42,9 +42,34 @@ #define FBPIXMAPSIZE (1024 * 8) +static DEFINE_MUTEX(registration_lock); struct fb_info *registered_fb[FB_MAX] __read_mostly; int num_registered_fb __read_mostly; +static struct fb_info *get_fb_info(unsigned int idx) +{ + struct fb_info *fb_info; + + if (idx >= FB_MAX) + return ERR_PTR(-ENODEV); + + mutex_lock(®istration_lock); + fb_info = registered_fb[idx]; + if (fb_info) + atomic_inc(&fb_info->count); + mutex_unlock(®istration_lock); + + return fb_info; +} + +static void put_fb_info(struct fb_info *fb_info) +{ + if (!atomic_dec_and_test(&fb_info->count)) + return; + if (fb_info->fbops->fb_destroy) + fb_info->fbops->fb_destroy(fb_info); +} + int lock_fb_info(struct fb_info *info) { mutex_lock(&info->lock); @@ -647,6 +672,7 @@ int fb_show_logo(struct fb_info *info, int rotate) { return 0; } static void *fb_seq_start(struct seq_file *m, loff_t *pos) { + mutex_lock(®istration_lock); return (*pos < FB_MAX) ? pos : NULL; } @@ -658,6 +684,7 @@ static void *fb_seq_next(struct seq_file *m, void *v, loff_t *pos) static void fb_seq_stop(struct seq_file *m, void *v) { + mutex_unlock(®istration_lock); } static int fb_seq_show(struct seq_file *m, void *v) @@ -690,13 +717,30 @@ static const struct file_operations fb_proc_fops = { .release = seq_release, }; -static ssize_t -fb_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) +/* + * We hold a reference to the fb_info in file->private_data, + * but if the current registered fb has changed, we don't + * actually want to use it. + * + * So look up the fb_info using the inode minor number, + * and just verify it against the reference we have. + */ +static struct fb_info *file_fb_info(struct file *file) { - unsigned long p = *ppos; struct inode *inode = file->f_path.dentry->d_inode; int fbidx = iminor(inode); struct fb_info *info = registered_fb[fbidx]; + + if (info != file->private_data) + info = NULL; + return info; +} + +static ssize_t +fb_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) +{ + unsigned long p = *ppos; + struct fb_info *info = file_fb_info(file); u8 *buffer, *dst; u8 __iomem *src; int c, cnt = 0, err = 0; @@ -761,9 +805,7 @@ static ssize_t fb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { unsigned long p = *ppos; - struct inode *inode = file->f_path.dentry->d_inode; - int fbidx = iminor(inode); - struct fb_info *info = registered_fb[fbidx]; + struct fb_info *info = file_fb_info(file); u8 *buffer, *src; u8 __iomem *dst; int c, cnt = 0, err = 0; @@ -1141,10 +1183,10 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd, static long fb_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { - struct inode *inode = file->f_path.dentry->d_inode; - int fbidx = iminor(inode); - struct fb_info *info = registered_fb[fbidx]; + struct fb_info *info = file_fb_info(file); + if (!info) + return -ENODEV; return do_fb_ioctl(info, cmd, arg); } @@ -1265,12 +1307,13 @@ static int fb_get_fscreeninfo(struct fb_info *info, unsigned int cmd, static long fb_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { - struct inode *inode = file->f_path.dentry->d_inode; - int fbidx = iminor(inode); - struct fb_info *info = registered_fb[fbidx]; - struct fb_ops *fb = info->fbops; + struct fb_info *info = file_fb_info(file); + struct fb_ops *fb; long ret = -ENOIOCTLCMD; + if (!info) + return -ENODEV; + fb = info->fbops; switch(cmd) { case FBIOGET_VSCREENINFO: case FBIOPUT_VSCREENINFO: @@ -1303,16 +1346,18 @@ static long fb_compat_ioctl(struct file *file, unsigned int cmd, static int fb_mmap(struct file *file, struct vm_area_struct * vma) { - int fbidx = iminor(file->f_path.dentry->d_inode); - struct fb_info *info = registered_fb[fbidx]; - struct fb_ops *fb = info->fbops; + struct fb_info *info = file_fb_info(file); + struct fb_ops *fb; unsigned long off; unsigned long start; u32 len; + if (!info) + return -ENODEV; if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) return -EINVAL; off = vma->vm_pgoff << PAGE_SHIFT; + fb = info->fbops; if (!fb) return -ENODEV; mutex_lock(&info->mm_lock); @@ -1361,14 +1406,16 @@ __releases(&info->lock) struct fb_info *info; int res = 0; - if (fbidx >= FB_MAX) - return -ENODEV; - info = registered_fb[fbidx]; - if (!info) + info = get_fb_info(fbidx); + if (!info) { request_module("fb%d", fbidx); - info = registered_fb[fbidx]; - if (!info) - return -ENODEV; + info = get_fb_info(fbidx); + if (!info) + return -ENODEV; + } + if (IS_ERR(info)) + return PTR_ERR(info); + mutex_lock(&info->lock); if (!try_module_get(info->fbops->owner)) { res = -ENODEV; @@ -1386,6 +1433,8 @@ __releases(&info->lock) #endif out: mutex_unlock(&info->lock); + if (res) + put_fb_info(info); return res; } @@ -1401,6 +1450,7 @@ __releases(&info->lock) info->fbops->fb_release(info,1); module_put(info->fbops->owner); mutex_unlock(&info->lock); + put_fb_info(info); return 0; } @@ -1487,8 +1537,10 @@ static bool fb_do_apertures_overlap(struct apertures_struct *gena, return false; } +static int do_unregister_framebuffer(struct fb_info *fb_info); + #define VGA_FB_PHYS 0xA0000 -void remove_conflicting_framebuffers(struct apertures_struct *a, +static void do_remove_conflicting_framebuffers(struct apertures_struct *a, const char *name, bool primary) { int i; @@ -1510,43 +1562,32 @@ void remove_conflicting_framebuffers(struct apertures_struct *a, printk(KERN_INFO "fb: conflicting fb hw usage " "%s vs %s - removing generic driver\n", name, registered_fb[i]->fix.id); - unregister_framebuffer(registered_fb[i]); + do_unregister_framebuffer(registered_fb[i]); } } } -EXPORT_SYMBOL(remove_conflicting_framebuffers); -/** - * register_framebuffer - registers a frame buffer device - * @fb_info: frame buffer info structure - * - * Registers a frame buffer device @fb_info. - * - * Returns negative errno on error, or zero for success. - * - */ - -int -register_framebuffer(struct fb_info *fb_info) +static int do_register_framebuffer(struct fb_info *fb_info) { int i; struct fb_event event; struct fb_videomode mode; - if (num_registered_fb == FB_MAX) - return -ENXIO; - if (fb_check_foreignness(fb_info)) return -ENOSYS; - remove_conflicting_framebuffers(fb_info->apertures, fb_info->fix.id, + do_remove_conflicting_framebuffers(fb_info->apertures, fb_info->fix.id, fb_is_primary_device(fb_info)); + if (num_registered_fb == FB_MAX) + return -ENXIO; + num_registered_fb++; for (i = 0 ; i < FB_MAX; i++) if (!registered_fb[i]) break; fb_info->node = i; + atomic_set(&fb_info->count, 1); mutex_init(&fb_info->lock); mutex_init(&fb_info->mm_lock); @@ -1592,36 +1633,14 @@ register_framebuffer(struct fb_info *fb_info) return 0; } - -/** - * unregister_framebuffer - releases a frame buffer device - * @fb_info: frame buffer info structure - * - * Unregisters a frame buffer device @fb_info. - * - * Returns negative errno on error, or zero for success. - * - * This function will also notify the framebuffer console - * to release the driver. - * - * This is meant to be called within a driver's module_exit() - * function. If this is called outside module_exit(), ensure - * that the driver implements fb_open() and fb_release() to - * check that no processes are using the device. - */ - -int -unregister_framebuffer(struct fb_info *fb_info) +static int do_unregister_framebuffer(struct fb_info *fb_info) { struct fb_event event; int i, ret = 0; i = fb_info->node; - if (!registered_fb[i]) { - ret = -EINVAL; - goto done; - } - + if (i < 0 || i >= FB_MAX || registered_fb[i] != fb_info) + return -EINVAL; if (!lock_fb_info(fb_info)) return -ENODEV; @@ -1629,16 +1648,14 @@ unregister_framebuffer(struct fb_info *fb_info) ret = fb_notifier_call_chain(FB_EVENT_FB_UNBIND, &event); unlock_fb_info(fb_info); - if (ret) { - ret = -EINVAL; - goto done; - } + if (ret) + return -EINVAL; if (fb_info->pixmap.addr && (fb_info->pixmap.flags & FB_PIXMAP_DEFAULT)) kfree(fb_info->pixmap.addr); fb_destroy_modelist(&fb_info->modelist); - registered_fb[i]=NULL; + registered_fb[i] = NULL; num_registered_fb--; fb_cleanup_device(fb_info); device_destroy(fb_class, MKDEV(FB_MAJOR, i)); @@ -1646,9 +1663,65 @@ unregister_framebuffer(struct fb_info *fb_info) fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event); /* this may free fb info */ - if (fb_info->fbops->fb_destroy) - fb_info->fbops->fb_destroy(fb_info); -done: + put_fb_info(fb_info); + return 0; +} + +void remove_conflicting_framebuffers(struct apertures_struct *a, + const char *name, bool primary) +{ + mutex_lock(®istration_lock); + do_remove_conflicting_framebuffers(a, name, primary); + mutex_unlock(®istration_lock); +} +EXPORT_SYMBOL(remove_conflicting_framebuffers); + +/** + * register_framebuffer - registers a frame buffer device + * @fb_info: frame buffer info structure + * + * Registers a frame buffer device @fb_info. + * + * Returns negative errno on error, or zero for success. + * + */ +int +register_framebuffer(struct fb_info *fb_info) +{ + int ret; + + mutex_lock(®istration_lock); + ret = do_register_framebuffer(fb_info); + mutex_unlock(®istration_lock); + + return ret; +} + +/** + * unregister_framebuffer - releases a frame buffer device + * @fb_info: frame buffer info structure + * + * Unregisters a frame buffer device @fb_info. + * + * Returns negative errno on error, or zero for success. + * + * This function will also notify the framebuffer console + * to release the driver. + * + * This is meant to be called within a driver's module_exit() + * function. If this is called outside module_exit(), ensure + * that the driver implements fb_open() and fb_release() to + * check that no processes are using the device. + */ +int +unregister_framebuffer(struct fb_info *fb_info) +{ + int ret; + + mutex_lock(®istration_lock); + ret = do_unregister_framebuffer(fb_info); + mutex_unlock(®istration_lock); + return ret; } diff --git a/trunk/drivers/video/pxafb.c b/trunk/drivers/video/pxafb.c index a2e5b5100ab4..0f4e8c942f9e 100644 --- a/trunk/drivers/video/pxafb.c +++ b/trunk/drivers/video/pxafb.c @@ -1648,7 +1648,9 @@ pxafb_freq_transition(struct notifier_block *nb, unsigned long val, void *data) switch (val) { case CPUFREQ_PRECHANGE: - if (!fbi->overlay[0].usage && !fbi->overlay[1].usage) +#ifdef CONFIG_FB_PXA_OVERLAY + if (!(fbi->overlay[0].usage || fbi->overlay[1].usage)) +#endif set_ctrlr_state(fbi, C_DISABLE_CLKCHANGE); break; diff --git a/trunk/drivers/video/udlfb.c b/trunk/drivers/video/udlfb.c index 68041d9dc260..695066b5b2e6 100644 --- a/trunk/drivers/video/udlfb.c +++ b/trunk/drivers/video/udlfb.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include