diff --git a/[refs] b/[refs]
index 15152c959ad0..b32f8d74c0ac 100644
--- a/[refs]
+++ b/[refs]
@@ -1,2 +1,2 @@
---
-refs/heads/master: efc3dbc37412c027e363736b4f4c74ee5e8ecffc
+refs/heads/master: 701b259f446be2f3625fb852bceb93afe76e206d
diff --git a/trunk/Documentation/DocBook/device-drivers.tmpl b/trunk/Documentation/DocBook/device-drivers.tmpl
index b638e50cf8f6..2f7fd4360848 100644
--- a/trunk/Documentation/DocBook/device-drivers.tmpl
+++ b/trunk/Documentation/DocBook/device-drivers.tmpl
@@ -50,7 +50,9 @@
Delaying, scheduling, and timer routines
!Iinclude/linux/sched.h
-!Ekernel/sched.c
+!Ekernel/sched/core.c
+!Ikernel/sched/cpupri.c
+!Ikernel/sched/fair.c
!Iinclude/linux/completion.h
!Ekernel/timer.c
@@ -216,7 +218,6 @@ X!Isound/sound_firmware.c
16x50 UART Driver
-!Iinclude/linux/serial_core.h
!Edrivers/tty/serial/serial_core.c
!Edrivers/tty/serial/8250.c
diff --git a/trunk/Documentation/DocBook/deviceiobook.tmpl b/trunk/Documentation/DocBook/deviceiobook.tmpl
index c1ed6a49e598..54199a0dcf9a 100644
--- a/trunk/Documentation/DocBook/deviceiobook.tmpl
+++ b/trunk/Documentation/DocBook/deviceiobook.tmpl
@@ -317,7 +317,7 @@ CPU B: spin_unlock_irqrestore(&dev_lock, flags)
Public Functions Provided
!Iarch/x86/include/asm/io.h
-!Elib/iomap.c
+!Elib/pci_iomap.c
diff --git a/trunk/Documentation/DocBook/media/dvb/dvbproperty.xml b/trunk/Documentation/DocBook/media/dvb/dvbproperty.xml
index ffee1fbbc001..c7a4ca517859 100644
--- a/trunk/Documentation/DocBook/media/dvb/dvbproperty.xml
+++ b/trunk/Documentation/DocBook/media/dvb/dvbproperty.xml
@@ -163,14 +163,16 @@ get/set up to 64 properties. The actual meaning of each property is described on
DTV_FREQUENCY
- Central frequency of the channel, in HZ.
+ Central frequency of the channel.
Notes:
- 1)For ISDB-T, the channels are usually transmitted with an offset of 143kHz.
+ 1)For satellital delivery systems, it is measured in kHz.
+ For the other ones, it is measured in Hz.
+ 2)For ISDB-T, the channels are usually transmitted with an offset of 143kHz.
E.g. a valid frequncy could be 474143 kHz. The stepping is bound to the bandwidth of
the channel which is 6MHz.
- 2)As in ISDB-Tsb the channel consists of only one or three segments the
+ 3)As in ISDB-Tsb the channel consists of only one or three segments the
frequency step is 429kHz, 3*429 respectively. As for ISDB-T the
central frequency of the channel is expected.
@@ -735,14 +737,10 @@ typedef enum fe_hierarchy {
DTV_TUNE
DTV_CLEAR
DTV_FREQUENCY
- DTV_MODULATION
DTV_BANDWIDTH_HZ
DTV_INVERSION
- DTV_CODE_RATE_HP
- DTV_CODE_RATE_LP
DTV_GUARD_INTERVAL
DTV_TRANSMISSION_MODE
- DTV_HIERARCHY
DTV_ISDBT_LAYER_ENABLED
DTV_ISDBT_PARTIAL_RECEPTION
DTV_ISDBT_SOUND_BROADCASTING
diff --git a/trunk/Documentation/DocBook/media/v4l/vidioc-g-ext-ctrls.xml b/trunk/Documentation/DocBook/media/v4l/vidioc-g-ext-ctrls.xml
index 6f1f9a629dc3..b17a7aac6997 100644
--- a/trunk/Documentation/DocBook/media/v4l/vidioc-g-ext-ctrls.xml
+++ b/trunk/Documentation/DocBook/media/v4l/vidioc-g-ext-ctrls.xml
@@ -183,7 +183,12 @@ applications must set the array to zero.
__u32
ctrl_class
The control class to which all controls belong, see
-.
+. Drivers that use a kernel framework for handling
+controls will also accept a value of 0 here, meaning that the controls can
+belong to any control class. Whether drivers support this can be tested by setting
+ctrl_class to 0 and calling VIDIOC_TRY_EXT_CTRLS
+with a count of 0. If that succeeds, then the driver
+supports this feature.
__u32
@@ -194,10 +199,13 @@ also be zero.
__u32
error_idx
- Set by the driver in case of an error. It is the
-index of the control causing the error or equal to 'count' when the
-error is not associated with a particular control. Undefined when the
-ioctl returns 0 (success).
+ Set by the driver in case of an error. If it is equal
+to count, then no actual changes were made to
+controls. In other words, the error was not associated with setting a particular
+control. If it is another value, then only the controls up to error_idx-1
+were modified and control error_idx is the one that
+caused the error. The error_idx value is undefined
+if the ioctl returned 0 (success).
__u32
diff --git a/trunk/Documentation/DocBook/media/v4l/vidioc-g-fbuf.xml b/trunk/Documentation/DocBook/media/v4l/vidioc-g-fbuf.xml
index 93817f337033..7c63815e7afd 100644
--- a/trunk/Documentation/DocBook/media/v4l/vidioc-g-fbuf.xml
+++ b/trunk/Documentation/DocBook/media/v4l/vidioc-g-fbuf.xml
@@ -364,15 +364,20 @@ capability and it is cleared otherwise.
V4L2_FBUF_FLAG_OVERLAY
0x0002
- The frame buffer is an overlay surface the same
-size as the capture. [?]
-
-
- The purpose of
-V4L2_FBUF_FLAG_OVERLAY was never quite clear.
-Most drivers seem to ignore this flag. For compatibility with the
-bttv driver applications should set the
-V4L2_FBUF_FLAG_OVERLAY flag.
+ If this flag is set for a video capture device, then the
+driver will set the initial overlay size to cover the full framebuffer size,
+otherwise the existing overlay size (as set by &VIDIOC-S-FMT;) will be used.
+
+Only one video capture driver (bttv) supports this flag. The use of this flag
+for capture devices is deprecated. There is no way to detect which drivers
+support this flag, so the only reliable method of setting the overlay size is
+through &VIDIOC-S-FMT;.
+
+If this flag is set for a video output device, then the video output overlay
+window is relative to the top-left corner of the framebuffer and restricted
+to the size of the framebuffer. If it is cleared, then the video output
+overlay window is relative to the video output display.
+
V4L2_FBUF_FLAG_CHROMAKEY
diff --git a/trunk/Documentation/DocBook/media/v4l/vidioc-g-frequency.xml b/trunk/Documentation/DocBook/media/v4l/vidioc-g-frequency.xml
index 16431813bebd..66e9a5257861 100644
--- a/trunk/Documentation/DocBook/media/v4l/vidioc-g-frequency.xml
+++ b/trunk/Documentation/DocBook/media/v4l/vidioc-g-frequency.xml
@@ -98,8 +98,11 @@ the &v4l2-output; modulator field and the
&v4l2-tuner-type;
type
The tuner type. This is the same value as in the
-&v4l2-tuner; type field. The field is not
-applicable to modulators, &ie; ignored by drivers.
+&v4l2-tuner; type field. The type must be set
+to V4L2_TUNER_RADIO for /dev/radioX
+device nodes, and to V4L2_TUNER_ANALOG_TV
+for all others. The field is not applicable to modulators, &ie; ignored
+by drivers.
__u32
diff --git a/trunk/Documentation/DocBook/media/v4l/vidioc-g-input.xml b/trunk/Documentation/DocBook/media/v4l/vidioc-g-input.xml
index 08ae82f131f2..1d43065090dd 100644
--- a/trunk/Documentation/DocBook/media/v4l/vidioc-g-input.xml
+++ b/trunk/Documentation/DocBook/media/v4l/vidioc-g-input.xml
@@ -61,8 +61,8 @@ desired input in an integer and call the
VIDIOC_S_INPUT ioctl with a pointer to this
integer. Side effects are possible. For example inputs may support
different video standards, so the driver may implicitly switch the
-current standard. It is good practice to select an input before
-querying or negotiating any other parameters.
+current standard. Because of these possible side effects applications
+must select an input before querying or negotiating any other parameters.
Information about video inputs is available using the
&VIDIOC-ENUMINPUT; ioctl.
diff --git a/trunk/Documentation/DocBook/media/v4l/vidioc-g-output.xml b/trunk/Documentation/DocBook/media/v4l/vidioc-g-output.xml
index fd45f1c13ccf..4533068ecb8a 100644
--- a/trunk/Documentation/DocBook/media/v4l/vidioc-g-output.xml
+++ b/trunk/Documentation/DocBook/media/v4l/vidioc-g-output.xml
@@ -61,8 +61,9 @@ desired output in an integer and call the
VIDIOC_S_OUTPUT ioctl with a pointer to this integer.
Side effects are possible. For example outputs may support different
video standards, so the driver may implicitly switch the current
-standard. It is good practice to select an output before querying or
-negotiating any other parameters.
+standard.
+standard. Because of these possible side effects applications
+must select an output before querying or negotiating any other parameters.
Information about video outputs is available using the
&VIDIOC-ENUMOUTPUT; ioctl.
diff --git a/trunk/Documentation/acpi/apei/einj.txt b/trunk/Documentation/acpi/apei/einj.txt
index 5cc699ba5453..e7cc36397217 100644
--- a/trunk/Documentation/acpi/apei/einj.txt
+++ b/trunk/Documentation/acpi/apei/einj.txt
@@ -47,20 +47,53 @@ directory apei/einj. The following files are provided.
- param1
This file is used to set the first error parameter value. Effect of
- parameter depends on error_type specified. For memory error, this is
- physical memory address. Only available if param_extension module
- parameter is specified.
+ parameter depends on error_type specified.
- param2
This file is used to set the second error parameter value. Effect of
- parameter depends on error_type specified. For memory error, this is
- physical memory address mask. Only available if param_extension
- module parameter is specified.
+ parameter depends on error_type specified.
+
+BIOS versions based in the ACPI 4.0 specification have limited options
+to control where the errors are injected. Your BIOS may support an
+extension (enabled with the param_extension=1 module parameter, or
+boot command line einj.param_extension=1). This allows the address
+and mask for memory injections to be specified by the param1 and
+param2 files in apei/einj.
+
+BIOS versions using the ACPI 5.0 specification have more control over
+the target of the injection. For processor related errors (type 0x1,
+0x2 and 0x4) the APICID of the target should be provided using the
+param1 file in apei/einj. For memory errors (type 0x8, 0x10 and 0x20)
+the address is set using param1 with a mask in param2 (0x0 is equivalent
+to all ones). For PCI express errors (type 0x40, 0x80 and 0x100) the
+segment, bus, device and function are specified using param1:
+
+ 31 24 23 16 15 11 10 8 7 0
+ +-------------------------------------------------+
+ | segment | bus | device | function | reserved |
+ +-------------------------------------------------+
+
+An ACPI 5.0 BIOS may also allow vendor specific errors to be injected.
+In this case a file named vendor will contain identifying information
+from the BIOS that hopefully will allow an application wishing to use
+the vendor specific extension to tell that they are running on a BIOS
+that supports it. All vendor extensions have the 0x80000000 bit set in
+error_type. A file vendor_flags controls the interpretation of param1
+and param2 (1 = PROCESSOR, 2 = MEMORY, 4 = PCI). See your BIOS vendor
+documentation for details (and expect changes to this API if vendors
+creativity in using this feature expands beyond our expectations).
+
+Example:
+# cd /sys/kernel/debug/apei/einj
+# cat available_error_type # See which errors can be injected
+0x00000002 Processor Uncorrectable non-fatal
+0x00000008 Memory Correctable
+0x00000010 Memory Uncorrectable non-fatal
+# echo 0x12345000 > param1 # Set memory address for injection
+# echo 0xfffffffffffff000 > param2 # Mask - anywhere in this page
+# echo 0x8 > error_type # Choose correctable memory error
+# echo 1 > error_inject # Inject now
-Injecting parameter support is a BIOS version specific extension, that
-is, it only works on some BIOS version. If you want to use it, please
-make sure your BIOS version has the proper support and specify
-"param_extension=y" in module parameter.
For more information about EINJ, please refer to ACPI specification
-version 4.0, section 17.5.
+version 4.0, section 17.5 and ACPI 5.0, section 18.6.
diff --git a/trunk/Documentation/devicetree/bindings/i2c/omap-i2c.txt b/trunk/Documentation/devicetree/bindings/i2c/omap-i2c.txt
new file mode 100644
index 000000000000..56564aa4b444
--- /dev/null
+++ b/trunk/Documentation/devicetree/bindings/i2c/omap-i2c.txt
@@ -0,0 +1,30 @@
+I2C for OMAP platforms
+
+Required properties :
+- compatible : Must be "ti,omap3-i2c" or "ti,omap4-i2c"
+- ti,hwmods : Must be "i2c", n being the instance number (1-based)
+- #address-cells = <1>;
+- #size-cells = <0>;
+
+Recommended properties :
+- clock-frequency : Desired I2C bus clock frequency in Hz. Otherwise
+ the default 100 kHz frequency will be used.
+
+Optional properties:
+- Child nodes conforming to i2c bus binding
+
+Note: Current implementation will fetch base address, irq and dma
+from omap hwmod data base during device registration.
+Future plan is to migrate hwmod data base contents into device tree
+blob so that, all the required data will be used from device tree dts
+file.
+
+Examples :
+
+i2c1: i2c@0 {
+ compatible = "ti,omap3-i2c";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ ti,hwmods = "i2c1";
+ clock-frequency = <400000>;
+};
diff --git a/trunk/Documentation/feature-removal-schedule.txt b/trunk/Documentation/feature-removal-schedule.txt
index d725c0dfe032..1bea46a54b1c 100644
--- a/trunk/Documentation/feature-removal-schedule.txt
+++ b/trunk/Documentation/feature-removal-schedule.txt
@@ -439,17 +439,6 @@ Who: Jean Delvare
----------------------------
-What: For VIDIOC_S_FREQUENCY the type field must match the device node's type.
- If not, return -EINVAL.
-When: 3.2
-Why: It makes no sense to switch the tuner to radio mode by calling
- VIDIOC_S_FREQUENCY on a video node, or to switch the tuner to tv mode by
- calling VIDIOC_S_FREQUENCY on a radio node. This is the first step of a
- move to more consistent handling of tv and radio tuners.
-Who: Hans Verkuil
-
-----------------------------
-
What: Opening a radio device node will no longer automatically switch the
tuner mode from tv to radio.
When: 3.3
diff --git a/trunk/Documentation/ioctl/ioctl-number.txt b/trunk/Documentation/ioctl/ioctl-number.txt
index 54078ed96b37..4840334ea97b 100644
--- a/trunk/Documentation/ioctl/ioctl-number.txt
+++ b/trunk/Documentation/ioctl/ioctl-number.txt
@@ -149,6 +149,7 @@ Code Seq#(hex) Include File Comments
'M' 01-03 drivers/scsi/megaraid/megaraid_sas.h
'M' 00-0F drivers/video/fsl-diu-fb.h conflict!
'N' 00-1F drivers/usb/scanner.h
+'N' 40-7F drivers/block/nvme.c
'O' 00-06 mtd/ubi-user.h UBI
'P' all linux/soundcard.h conflict!
'P' 60-6F sound/sscape_ioctl.h conflict!
diff --git a/trunk/Documentation/kernel-parameters.txt b/trunk/Documentation/kernel-parameters.txt
index b29f3c416296..033d4e69b43b 100644
--- a/trunk/Documentation/kernel-parameters.txt
+++ b/trunk/Documentation/kernel-parameters.txt
@@ -1059,6 +1059,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
By default, super page will be supported if Intel IOMMU
has the capability. With this option, super page will
not be supported.
+
+ intel_idle.max_cstate= [KNL,HW,ACPI,X86]
+ 0 disables intel_idle and fall back on acpi_idle.
+ 1 to 6 specify maximum depth of C-state.
+
intremap= [X86-64, Intel-IOMMU]
on enable Interrupt Remapping (default)
off disable Interrupt Remapping
diff --git a/trunk/Documentation/power/basic-pm-debugging.txt b/trunk/Documentation/power/basic-pm-debugging.txt
index 40a4c65f380a..262acf56fa79 100644
--- a/trunk/Documentation/power/basic-pm-debugging.txt
+++ b/trunk/Documentation/power/basic-pm-debugging.txt
@@ -15,7 +15,7 @@ test at least a couple of times in a row for confidence. [This is necessary,
because some problems only show up on a second attempt at suspending and
resuming the system.] Moreover, hibernating in the "reboot" and "shutdown"
modes causes the PM core to skip some platform-related callbacks which on ACPI
-systems might be necessary to make hibernation work. Thus, if you machine fails
+systems might be necessary to make hibernation work. Thus, if your machine fails
to hibernate or resume in the "reboot" mode, you should try the "platform" mode:
# echo platform > /sys/power/disk
diff --git a/trunk/Documentation/power/freezing-of-tasks.txt b/trunk/Documentation/power/freezing-of-tasks.txt
index 6ccb68f68da6..ebd7490ef1df 100644
--- a/trunk/Documentation/power/freezing-of-tasks.txt
+++ b/trunk/Documentation/power/freezing-of-tasks.txt
@@ -120,10 +120,10 @@ So in practice, the 'at all' may become a 'why freeze kernel threads?' and
freezing user threads I don't find really objectionable."
Still, there are kernel threads that may want to be freezable. For example, if
-a kernel that belongs to a device driver accesses the device directly, it in
-principle needs to know when the device is suspended, so that it doesn't try to
-access it at that time. However, if the kernel thread is freezable, it will be
-frozen before the driver's .suspend() callback is executed and it will be
+a kernel thread that belongs to a device driver accesses the device directly, it
+in principle needs to know when the device is suspended, so that it doesn't try
+to access it at that time. However, if the kernel thread is freezable, it will
+be frozen before the driver's .suspend() callback is executed and it will be
thawed after the driver's .resume() callback has run, so it won't be accessing
the device while it's suspended.
diff --git a/trunk/Documentation/scsi/ChangeLog.megaraid_sas b/trunk/Documentation/scsi/ChangeLog.megaraid_sas
index 64adb98b181c..57566bacb4c5 100644
--- a/trunk/Documentation/scsi/ChangeLog.megaraid_sas
+++ b/trunk/Documentation/scsi/ChangeLog.megaraid_sas
@@ -1,3 +1,13 @@
+Release Date : Fri. Jan 6, 2012 17:00:00 PST 2010 -
+ (emaild-id:megaraidlinux@lsi.com)
+ Adam Radford
+Current Version : 00.00.06.14-rc1
+Old Version : 00.00.06.12-rc1
+ 1. Fix reglockFlags for degraded raid5/6 for MR 9360/9380.
+ 2. Mask off flags in ioctl path to prevent memory scribble with older
+ MegaCLI versions.
+ 3. Remove poll_mode_io module paramater, sysfs node, and associated code.
+-------------------------------------------------------------------------------
Release Date : Wed. Oct 5, 2011 17:00:00 PST 2010 -
(emaild-id:megaraidlinux@lsi.com)
Adam Radford
diff --git a/trunk/Documentation/scsi/LICENSE.qla4xxx b/trunk/Documentation/scsi/LICENSE.qla4xxx
index 494980e40491..ab899591ecb7 100644
--- a/trunk/Documentation/scsi/LICENSE.qla4xxx
+++ b/trunk/Documentation/scsi/LICENSE.qla4xxx
@@ -1,32 +1,11 @@
Copyright (c) 2003-2011 QLogic Corporation
-QLogic Linux iSCSI HBA Driver
+QLogic Linux iSCSI Driver
This program includes a device driver for Linux 3.x.
You may modify and redistribute the device driver code under the
GNU General Public License (a copy of which is attached hereto as
Exhibit A) published by the Free Software Foundation (version 2).
-REGARDLESS OF WHAT LICENSING MECHANISM IS USED OR APPLICABLE,
-THIS PROGRAM IS PROVIDED BY QLOGIC CORPORATION "AS IS'' AND ANY
-EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
-PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR
-BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
-TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
-ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
-OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-USER ACKNOWLEDGES AND AGREES THAT USE OF THIS PROGRAM WILL NOT
-CREATE OR GIVE GROUNDS FOR A LICENSE BY IMPLICATION, ESTOPPEL, OR
-OTHERWISE IN ANY INTELLECTUAL PROPERTY RIGHTS (PATENT, COPYRIGHT,
-TRADE SECRET, MASK WORK, OR OTHER PROPRIETARY RIGHT) EMBODIED IN
-ANY OTHER QLOGIC HARDWARE OR SOFTWARE EITHER SOLELY OR IN
-COMBINATION WITH THIS PROGRAM.
-
EXHIBIT A
diff --git a/trunk/Documentation/target/tcm_mod_builder.py b/trunk/Documentation/target/tcm_mod_builder.py
index 7ef9b843d529..6e21b8b52638 100755
--- a/trunk/Documentation/target/tcm_mod_builder.py
+++ b/trunk/Documentation/target/tcm_mod_builder.py
@@ -230,14 +230,9 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += "#include \n"
buf += "#include \n\n"
buf += "#include \n"
- buf += "#include \n"
- buf += "#include \n"
+ buf += "#include \n"
buf += "#include \n"
- buf += "#include \n"
- buf += "#include \n"
- buf += "#include \n"
buf += "#include \n"
- buf += "#include \n"
buf += "#include \n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
@@ -260,7 +255,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
- buf += " if (!(se_nacl_new))\n"
+ buf += " if (!se_nacl_new)\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
@@ -308,7 +303,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
- buf += " if (!(tpg)) {\n"
+ buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
@@ -344,7 +339,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
- buf += " if (!(" + fabric_mod_port + ")) {\n"
+ buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
@@ -352,7 +347,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
- buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "__NAMELEN, wwpn); */\n\n"
+ buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
@@ -391,8 +386,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
- buf += " .release_cmd_to_pool = " + fabric_mod_name + "_release_cmd,\n"
- buf += " .release_cmd_direct = " + fabric_mod_name + "_release_cmd,\n"
+ buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
@@ -405,14 +399,12 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
- buf += " .new_cmd_failure = " + fabric_mod_name + "_new_cmd_failure,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n"
buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
- buf += " .pack_lun = " + fabric_mod_name + "_pack_lun,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
@@ -439,9 +431,9 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
- buf += " if (!(fabric)) {\n"
+ buf += " if (IS_ERR(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
- buf += " return -ENOMEM;\n"
+ buf += " return PTR_ERR(fabric);\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
@@ -475,9 +467,9 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
- buf += "static void " + fabric_mod_name + "_deregister_configfs(void)\n"
+ buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
- buf += " if (!(" + fabric_mod_name + "_fabric_configfs))\n"
+ buf += " if (!" + fabric_mod_name + "_fabric_configfs)\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
@@ -492,17 +484,15 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
- buf += "static void " + fabric_mod_name + "_exit(void)\n"
+ buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
- buf += "#ifdef MODULE\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
- buf += "#endif\n"
ret = p.write(buf)
if ret:
@@ -514,7 +504,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
def tcm_mod_scan_fabric_ops(tcm_dir):
- fabric_ops_api = tcm_dir + "include/target/target_core_fabric_ops.h"
+ fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
@@ -579,11 +569,7 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += "#include \n"
buf += "#include \n\n"
buf += "#include \n"
- buf += "#include \n"
- buf += "#include \n"
- buf += "#include \n"
- buf += "#include \n"
- buf += "#include \n"
+ buf += "#include \n"
buf += "#include \n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
@@ -788,7 +774,7 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
- buf += " if (!(nacl)) {\n"
+ buf += " if (!nacl) {\n"
buf += " printk(KERN_ERR \"Unable to alocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
@@ -815,7 +801,7 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
- if re.search('release_cmd_to_pool', fo):
+ if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
@@ -899,13 +885,6 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
- if re.search('new_cmd_failure\)\(', fo):
- buf += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *se_cmd)\n"
- buf += "{\n"
- buf += " return;\n"
- buf += "}\n\n"
- bufi += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *);\n"
-
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
@@ -948,15 +927,6 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
- if re.search('pack_lun\)\(', fo):
- buf += "u64 " + fabric_mod_name + "_pack_lun(unsigned int lun)\n"
- buf += "{\n"
- buf += " WARN_ON(lun >= 256);\n"
- buf += " /* Caller wants this byte-swapped */\n"
- buf += " return cpu_to_le64((lun & 0xff) << 8);\n"
- buf += "}\n\n"
- bufi += "u64 " + fabric_mod_name + "_pack_lun(unsigned int);\n"
-
ret = p.write(buf)
if ret:
diff --git a/trunk/Documentation/video4linux/v4l2-controls.txt b/trunk/Documentation/video4linux/v4l2-controls.txt
index 26aa0573933e..e2492a9d1027 100644
--- a/trunk/Documentation/video4linux/v4l2-controls.txt
+++ b/trunk/Documentation/video4linux/v4l2-controls.txt
@@ -666,27 +666,6 @@ a control of this type whenever the first control belonging to a new control
class is added.
-Differences from the Spec
-=========================
-
-There are a few places where the framework acts slightly differently from the
-V4L2 Specification. Those differences are described in this section. We will
-have to see whether we need to adjust the spec or not.
-
-1) It is no longer required to have all controls contained in a
-v4l2_ext_control array be from the same control class. The framework will be
-able to handle any type of control in the array. You need to set ctrl_class
-to 0 in order to enable this. If ctrl_class is non-zero, then it will still
-check that all controls belong to that control class.
-
-If you set ctrl_class to 0 and count to 0, then it will only return an error
-if there are no controls at all.
-
-2) Clarified the way error_idx works. For get and set it will be equal to
-count if nothing was done yet. If it is less than count then only the controls
-up to error_idx-1 were successfully applied.
-
-
Proposals for Extensions
========================
diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS
index fa3f5e6a608b..93c68d5f1cf4 100644
--- a/trunk/MAINTAINERS
+++ b/trunk/MAINTAINERS
@@ -2246,6 +2246,17 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/teigland/dlm.git
S: Supported
F: fs/dlm/
+DMA BUFFER SHARING FRAMEWORK
+M: Sumit Semwal
+S: Maintained
+L: linux-media@vger.kernel.org
+L: dri-devel@lists.freedesktop.org
+L: linaro-mm-sig@lists.linaro.org
+F: drivers/base/dma-buf*
+F: include/linux/dma-buf*
+F: Documentation/dma-buf-sharing.txt
+T: git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git
+
DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
M: Vinod Koul
M: Dan Williams
@@ -7200,7 +7211,7 @@ S: Maintained
F: drivers/net/vmxnet3/
VMware PVSCSI driver
-M: Alok Kataria
+M: Arvind Kumar
M: VMware PV-Drivers
L: linux-scsi@vger.kernel.org
S: Maintained
diff --git a/trunk/Makefile b/trunk/Makefile
index 156ac69c961e..71e6ed21dd15 100644
--- a/trunk/Makefile
+++ b/trunk/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
-PATCHLEVEL = 2
+PATCHLEVEL = 3
SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc1
NAME = Saber-toothed Squirrel
# *DOCUMENTATION*
diff --git a/trunk/arch/arm/Makefile b/trunk/arch/arm/Makefile
index 40319d91bb7f..1683bfb9166f 100644
--- a/trunk/arch/arm/Makefile
+++ b/trunk/arch/arm/Makefile
@@ -160,7 +160,6 @@ machine-$(CONFIG_ARCH_MSM) := msm
machine-$(CONFIG_ARCH_MV78XX0) := mv78xx0
machine-$(CONFIG_ARCH_IMX_V4_V5) := imx
machine-$(CONFIG_ARCH_IMX_V6_V7) := imx
-machine-$(CONFIG_ARCH_MX5) := mx5
machine-$(CONFIG_ARCH_MXS) := mxs
machine-$(CONFIG_ARCH_NETX) := netx
machine-$(CONFIG_ARCH_NOMADIK) := nomadik
diff --git a/trunk/arch/arm/configs/mx5_defconfig b/trunk/arch/arm/configs/imx_v6_v7_defconfig
similarity index 80%
rename from trunk/arch/arm/configs/mx5_defconfig
rename to trunk/arch/arm/configs/imx_v6_v7_defconfig
index d0d8dfece37e..3a4fb2e5fc68 100644
--- a/trunk/arch/arm/configs/mx5_defconfig
+++ b/trunk/arch/arm/configs/imx_v6_v7_defconfig
@@ -3,6 +3,7 @@ CONFIG_EXPERIMENTAL=y
CONFIG_KERNEL_LZO=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=18
+CONFIG_CGROUPS=y
CONFIG_RELAY=y
CONFIG_EXPERT=y
# CONFIG_SLUB_DEBUG is not set
@@ -14,20 +15,31 @@ CONFIG_MODULE_SRCVERSION_ALL=y
# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
CONFIG_ARCH_MXC=y
-CONFIG_ARCH_MX5=y
-CONFIG_MACH_MX51_BABBAGE=y
+CONFIG_MACH_MX31LILLY=y
+CONFIG_MACH_MX31LITE=y
+CONFIG_MACH_PCM037=y
+CONFIG_MACH_PCM037_EET=y
+CONFIG_MACH_MX31_3DS=y
+CONFIG_MACH_MX31MOBOARD=y
+CONFIG_MACH_QONG=y
+CONFIG_MACH_ARMADILLO5X0=y
+CONFIG_MACH_KZM_ARM11_01=y
+CONFIG_MACH_PCM043=y
+CONFIG_MACH_MX35_3DS=y
+CONFIG_MACH_EUKREA_CPUIMX35=y
+CONFIG_MACH_VPR200=y
+CONFIG_MACH_IMX51_DT=y
CONFIG_MACH_MX51_3DS=y
CONFIG_MACH_EUKREA_CPUIMX51=y
CONFIG_MACH_EUKREA_CPUIMX51SD=y
CONFIG_MACH_MX51_EFIKAMX=y
CONFIG_MACH_MX51_EFIKASB=y
-CONFIG_MACH_MX53_EVK=y
-CONFIG_MACH_MX53_SMD=y
-CONFIG_MACH_MX53_LOCO=y
-CONFIG_MACH_MX53_ARD=y
+CONFIG_MACH_IMX53_DT=y
+CONFIG_SOC_IMX6Q=y
CONFIG_MXC_PWM=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
+CONFIG_SMP=y
CONFIG_VMSPLIT_2G=y
CONFIG_PREEMPT_VOLUNTARY=y
CONFIG_AEABI=y
@@ -49,7 +61,7 @@ CONFIG_IP_PNP_DHCP=y
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
-# CONFIG_IPV6 is not set
+CONFIG_IPV6=y
# CONFIG_WIRELESS is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
@@ -68,24 +80,20 @@ CONFIG_SCSI_SCAN_ASYNC=y
CONFIG_ATA=y
CONFIG_PATA_IMX=y
CONFIG_NETDEVICES=y
-CONFIG_MII=m
-CONFIG_MARVELL_PHY=y
-CONFIG_DAVICOM_PHY=y
-CONFIG_QSEMI_PHY=y
-CONFIG_LXT_PHY=y
-CONFIG_CICADA_PHY=y
-CONFIG_VITESSE_PHY=y
-CONFIG_SMSC_PHY=y
-CONFIG_BROADCOM_PHY=y
-CONFIG_ICPLUS_PHY=y
-CONFIG_REALTEK_PHY=y
-CONFIG_NATIONAL_PHY=y
-CONFIG_STE10XP=y
-CONFIG_LSI_ET1011C_PHY=y
-CONFIG_MICREL_PHY=y
-CONFIG_NET_ETHERNET=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_FARADAY is not set
+CONFIG_FEC=y
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROCHIP is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+CONFIG_SMC91X=y
+CONFIG_SMC911X=y
+CONFIG_SMSC911X=y
+# CONFIG_NET_VENDOR_STMICRO is not set
# CONFIG_WLAN is not set
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_EVDEV=y
@@ -124,7 +132,6 @@ CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_MXC=y
CONFIG_USB_STORAGE=y
CONFIG_MMC=y
-CONFIG_MMC_BLOCK=m
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_ESDHC_IMX=y
@@ -133,6 +140,8 @@ CONFIG_LEDS_CLASS=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_INTF_DEV_UIE_EMUL=y
CONFIG_RTC_MXC=y
+CONFIG_DMADEVICES=y
+CONFIG_IMX_SDMA=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
diff --git a/trunk/arch/arm/configs/mx3_defconfig b/trunk/arch/arm/configs/mx3_defconfig
deleted file mode 100644
index cb0717fbb03d..000000000000
--- a/trunk/arch/arm/configs/mx3_defconfig
+++ /dev/null
@@ -1,144 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-CONFIG_SYSVIPC=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EXPERT=y
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_ARCH_MXC=y
-CONFIG_MACH_MX31ADS_WM1133_EV1=y
-CONFIG_MACH_MX31LILLY=y
-CONFIG_MACH_MX31LITE=y
-CONFIG_MACH_PCM037=y
-CONFIG_MACH_PCM037_EET=y
-CONFIG_MACH_MX31_3DS=y
-CONFIG_MACH_MX31MOBOARD=y
-CONFIG_MACH_QONG=y
-CONFIG_MACH_ARMADILLO5X0=y
-CONFIG_MACH_KZM_ARM11_01=y
-CONFIG_MACH_PCM043=y
-CONFIG_MACH_MX35_3DS=y
-CONFIG_MACH_EUKREA_CPUIMX35=y
-CONFIG_MXC_IRQ_PRIOR=y
-CONFIG_MXC_PWM=y
-CONFIG_ARM_ERRATA_411920=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_PREEMPT=y
-CONFIG_AEABI=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="noinitrd console=ttymxc0,115200 root=/dev/mtdblock2 rw ip=off"
-CONFIG_VFP=y
-CONFIG_PM_DEBUG=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
-# CONFIG_INET_DIAG is not set
-# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_FW_LOADER=m
-CONFIG_MTD=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_PHYSMAP=y
-CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_MXC=y
-CONFIG_MTD_UBI=y
-# CONFIG_BLK_DEV is not set
-CONFIG_MISC_DEVICES=y
-CONFIG_EEPROM_AT24=y
-CONFIG_NETDEVICES=y
-CONFIG_SMSC_PHY=y
-CONFIG_NET_ETHERNET=y
-CONFIG_SMSC911X=y
-CONFIG_DNET=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
-# CONFIG_INPUT_MOUSEDEV is not set
-# CONFIG_KEYBOARD_ATKBD is not set
-CONFIG_KEYBOARD_IMX=y
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
-# CONFIG_VT is not set
-# CONFIG_LEGACY_PTYS is not set
-CONFIG_SERIAL_8250=m
-CONFIG_SERIAL_8250_EXTENDED=y
-CONFIG_SERIAL_8250_SHARE_IRQ=y
-CONFIG_SERIAL_IMX=y
-CONFIG_SERIAL_IMX_CONSOLE=y
-# CONFIG_HW_RANDOM is not set
-CONFIG_I2C=y
-CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_IMX=y
-CONFIG_SPI=y
-CONFIG_W1=y
-CONFIG_W1_MASTER_MXC=y
-CONFIG_W1_SLAVE_THERM=y
-# CONFIG_HWMON is not set
-CONFIG_WATCHDOG=y
-CONFIG_IMX2_WDT=y
-CONFIG_MFD_WM8350_I2C=y
-CONFIG_REGULATOR=y
-CONFIG_REGULATOR_WM8350=y
-CONFIG_MEDIA_SUPPORT=y
-CONFIG_VIDEO_DEV=y
-# CONFIG_RC_CORE is not set
-# CONFIG_MEDIA_TUNER_CUSTOMISE is not set
-CONFIG_SOC_CAMERA=y
-CONFIG_SOC_CAMERA_MT9M001=y
-CONFIG_SOC_CAMERA_MT9M111=y
-CONFIG_SOC_CAMERA_MT9T031=y
-CONFIG_SOC_CAMERA_MT9V022=y
-CONFIG_SOC_CAMERA_TW9910=y
-CONFIG_SOC_CAMERA_OV772X=y
-CONFIG_VIDEO_MX3=y
-# CONFIG_RADIO_ADAPTERS is not set
-CONFIG_FB=y
-CONFIG_SOUND=y
-CONFIG_SND=y
-# CONFIG_SND_ARM is not set
-# CONFIG_SND_SPI is not set
-CONFIG_SND_SOC=y
-CONFIG_SND_IMX_SOC=y
-CONFIG_SND_MXC_SOC_WM1133_EV1=y
-CONFIG_SND_SOC_PHYCORE_AC97=y
-CONFIG_SND_SOC_EUKREA_TLV320=y
-CONFIG_USB=y
-CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_EHCI_MXC=y
-CONFIG_USB_GADGET=m
-CONFIG_USB_FSL_USB2=m
-CONFIG_USB_G_SERIAL=m
-CONFIG_USB_ULPI=y
-CONFIG_MMC=y
-CONFIG_MMC_MXC=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_MXC=y
-CONFIG_DMADEVICES=y
-# CONFIG_DNOTIFY is not set
-CONFIG_TMPFS=y
-CONFIG_JFFS2_FS=y
-CONFIG_UBIFS_FS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-CONFIG_NFS_V4=y
-CONFIG_ROOT_NFS=y
-# CONFIG_ENABLE_WARN_DEPRECATED is not set
-# CONFIG_ENABLE_MUST_CHECK is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/trunk/arch/arm/mach-imx/Kconfig b/trunk/arch/arm/mach-imx/Kconfig
index 0e6de366c648..09f357bcecde 100644
--- a/trunk/arch/arm/mach-imx/Kconfig
+++ b/trunk/arch/arm/mach-imx/Kconfig
@@ -22,6 +22,18 @@ config ARCH_MX25
config MACH_MX27
bool
+config ARCH_MX5
+ bool
+
+config ARCH_MX50
+ bool
+
+config ARCH_MX51
+ bool
+
+config ARCH_MX53
+ bool
+
config SOC_IMX1
bool
select ARCH_MX1
@@ -73,6 +85,32 @@ config SOC_IMX35
select MXC_AVIC
select SMP_ON_UP if SMP
+config SOC_IMX5
+ select CPU_V7
+ select ARM_L1_CACHE_SHIFT_6
+ select MXC_TZIC
+ select ARCH_MXC_IOMUX_V3
+ select ARCH_MXC_AUDMUX_V2
+ select ARCH_HAS_CPUFREQ
+ select ARCH_MX5
+ bool
+
+config SOC_IMX50
+ bool
+ select SOC_IMX5
+ select ARCH_MX50
+
+config SOC_IMX51
+ bool
+ select SOC_IMX5
+ select ARCH_MX5
+ select ARCH_MX51
+
+config SOC_IMX53
+ bool
+ select SOC_IMX5
+ select ARCH_MX5
+ select ARCH_MX53
if ARCH_IMX_V4_V5
@@ -592,6 +630,207 @@ config MACH_VPR200
Include support for VPR200 platform. This includes specific
configurations for the board and its peripherals.
+comment "i.MX5 platforms:"
+
+config MACH_MX50_RDP
+ bool "Support MX50 reference design platform"
+ depends on BROKEN
+ select SOC_IMX50
+ select IMX_HAVE_PLATFORM_IMX_I2C
+ select IMX_HAVE_PLATFORM_IMX_UART
+ select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
+ select IMX_HAVE_PLATFORM_SPI_IMX
+ help
+ Include support for MX50 reference design platform (RDP) board. This
+ includes specific configurations for the board and its peripherals.
+
+comment "i.MX51 machines:"
+
+config MACH_IMX51_DT
+ bool "Support i.MX51 platforms from device tree"
+ select SOC_IMX51
+ select USE_OF
+ select MACH_MX51_BABBAGE
+ help
+ Include support for Freescale i.MX51 based platforms
+ using the device tree for discovery
+
+config MACH_MX51_BABBAGE
+ bool "Support MX51 BABBAGE platforms"
+ select SOC_IMX51
+ select IMX_HAVE_PLATFORM_FSL_USB2_UDC
+ select IMX_HAVE_PLATFORM_IMX2_WDT
+ select IMX_HAVE_PLATFORM_IMX_I2C
+ select IMX_HAVE_PLATFORM_IMX_UART
+ select IMX_HAVE_PLATFORM_MXC_EHCI
+ select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
+ select IMX_HAVE_PLATFORM_SPI_IMX
+ help
+ Include support for MX51 Babbage platform, also known as MX51EVK in
+ u-boot. This includes specific configurations for the board and its
+ peripherals.
+
+config MACH_MX51_3DS
+ bool "Support MX51PDK (3DS)"
+ select SOC_IMX51
+ select IMX_HAVE_PLATFORM_IMX2_WDT
+ select IMX_HAVE_PLATFORM_IMX_KEYPAD
+ select IMX_HAVE_PLATFORM_IMX_UART
+ select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
+ select IMX_HAVE_PLATFORM_SPI_IMX
+ select MXC_DEBUG_BOARD
+ help
+ Include support for MX51PDK (3DS) platform. This includes specific
+ configurations for the board and its peripherals.
+
+config MACH_EUKREA_CPUIMX51
+ bool "Support Eukrea CPUIMX51 module"
+ select SOC_IMX51
+ select IMX_HAVE_PLATFORM_FSL_USB2_UDC
+ select IMX_HAVE_PLATFORM_IMX_I2C
+ select IMX_HAVE_PLATFORM_IMX_UART
+ select IMX_HAVE_PLATFORM_MXC_EHCI
+ select IMX_HAVE_PLATFORM_MXC_NAND
+ select IMX_HAVE_PLATFORM_SPI_IMX
+ help
+ Include support for Eukrea CPUIMX51 platform. This includes
+ specific configurations for the module and its peripherals.
+
+choice
+ prompt "Baseboard"
+ depends on MACH_EUKREA_CPUIMX51
+ default MACH_EUKREA_MBIMX51_BASEBOARD
+
+config MACH_EUKREA_MBIMX51_BASEBOARD
+ prompt "Eukrea MBIMX51 development board"
+ bool
+ select IMX_HAVE_PLATFORM_IMX_KEYPAD
+ select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
+ select LEDS_GPIO_REGISTER
+ help
+ This adds board specific devices that can be found on Eukrea's
+ MBIMX51 evaluation board.
+
+endchoice
+
+config MACH_EUKREA_CPUIMX51SD
+ bool "Support Eukrea CPUIMX51SD module"
+ select SOC_IMX51
+ select IMX_HAVE_PLATFORM_FSL_USB2_UDC
+ select IMX_HAVE_PLATFORM_IMX_I2C
+ select IMX_HAVE_PLATFORM_IMX_UART
+ select IMX_HAVE_PLATFORM_MXC_EHCI
+ select IMX_HAVE_PLATFORM_MXC_NAND
+ select IMX_HAVE_PLATFORM_SPI_IMX
+ help
+ Include support for Eukrea CPUIMX51SD platform. This includes
+ specific configurations for the module and its peripherals.
+
+choice
+ prompt "Baseboard"
+ depends on MACH_EUKREA_CPUIMX51SD
+ default MACH_EUKREA_MBIMXSD51_BASEBOARD
+
+config MACH_EUKREA_MBIMXSD51_BASEBOARD
+ prompt "Eukrea MBIMXSD development board"
+ bool
+ select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
+ select LEDS_GPIO_REGISTER
+ help
+ This adds board specific devices that can be found on Eukrea's
+ MBIMXSD evaluation board.
+
+endchoice
+
+config MX51_EFIKA_COMMON
+ bool
+ select SOC_IMX51
+ select IMX_HAVE_PLATFORM_IMX_UART
+ select IMX_HAVE_PLATFORM_MXC_EHCI
+ select IMX_HAVE_PLATFORM_PATA_IMX
+ select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
+ select IMX_HAVE_PLATFORM_SPI_IMX
+ select MXC_ULPI if USB_ULPI
+
+config MACH_MX51_EFIKAMX
+ bool "Support MX51 Genesi Efika MX nettop"
+ select LEDS_GPIO_REGISTER
+ select MX51_EFIKA_COMMON
+ help
+ Include support for Genesi Efika MX nettop. This includes specific
+ configurations for the board and its peripherals.
+
+config MACH_MX51_EFIKASB
+ bool "Support MX51 Genesi Efika Smartbook"
+ select LEDS_GPIO_REGISTER
+ select MX51_EFIKA_COMMON
+ help
+ Include support for Genesi Efika Smartbook. This includes specific
+ configurations for the board and its peripherals.
+
+comment "i.MX53 machines:"
+
+config MACH_IMX53_DT
+ bool "Support i.MX53 platforms from device tree"
+ select SOC_IMX53
+ select USE_OF
+ select MACH_MX53_ARD
+ select MACH_MX53_EVK
+ select MACH_MX53_LOCO
+ select MACH_MX53_SMD
+ help
+ Include support for Freescale i.MX53 based platforms
+ using the device tree for discovery
+
+config MACH_MX53_EVK
+ bool "Support MX53 EVK platforms"
+ select SOC_IMX53
+ select IMX_HAVE_PLATFORM_IMX2_WDT
+ select IMX_HAVE_PLATFORM_IMX_UART
+ select IMX_HAVE_PLATFORM_IMX_I2C
+ select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
+ select IMX_HAVE_PLATFORM_SPI_IMX
+ select LEDS_GPIO_REGISTER
+ help
+ Include support for MX53 EVK platform. This includes specific
+ configurations for the board and its peripherals.
+
+config MACH_MX53_SMD
+ bool "Support MX53 SMD platforms"
+ select SOC_IMX53
+ select IMX_HAVE_PLATFORM_IMX2_WDT
+ select IMX_HAVE_PLATFORM_IMX_I2C
+ select IMX_HAVE_PLATFORM_IMX_UART
+ select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
+ help
+ Include support for MX53 SMD platform. This includes specific
+ configurations for the board and its peripherals.
+
+config MACH_MX53_LOCO
+ bool "Support MX53 LOCO platforms"
+ select SOC_IMX53
+ select IMX_HAVE_PLATFORM_IMX2_WDT
+ select IMX_HAVE_PLATFORM_IMX_I2C
+ select IMX_HAVE_PLATFORM_IMX_UART
+ select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
+ select IMX_HAVE_PLATFORM_GPIO_KEYS
+ select LEDS_GPIO_REGISTER
+ help
+ Include support for MX53 LOCO platform. This includes specific
+ configurations for the board and its peripherals.
+
+config MACH_MX53_ARD
+ bool "Support MX53 ARD platforms"
+ select SOC_IMX53
+ select IMX_HAVE_PLATFORM_IMX2_WDT
+ select IMX_HAVE_PLATFORM_IMX_I2C
+ select IMX_HAVE_PLATFORM_IMX_UART
+ select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
+ select IMX_HAVE_PLATFORM_GPIO_KEYS
+ help
+ Include support for MX53 ARD platform. This includes specific
+ configurations for the board and its peripherals.
+
comment "i.MX6 family:"
config SOC_IMX6Q
diff --git a/trunk/arch/arm/mach-imx/Makefile b/trunk/arch/arm/mach-imx/Makefile
index f5920c24f7d7..55db9c488f2b 100644
--- a/trunk/arch/arm/mach-imx/Makefile
+++ b/trunk/arch/arm/mach-imx/Makefile
@@ -11,6 +11,8 @@ obj-$(CONFIG_SOC_IMX27) += clock-imx27.o mm-imx27.o ehci-imx27.o
obj-$(CONFIG_SOC_IMX31) += mm-imx3.o cpu-imx31.o clock-imx31.o iomux-imx31.o ehci-imx31.o
obj-$(CONFIG_SOC_IMX35) += mm-imx3.o cpu-imx35.o clock-imx35.o ehci-imx35.o
+obj-$(CONFIG_SOC_IMX5) += cpu-imx5.o mm-imx5.o clock-mx51-mx53.o ehci-imx5.o pm-imx5.o cpu_op-mx51.o
+
# Support for CMOS sensor interface
obj-$(CONFIG_MX1_VIDEO) += mx1-camera-fiq.o mx1-camera-fiq-ksym.o
@@ -75,3 +77,22 @@ obj-$(CONFIG_SOC_IMX6Q) += clock-imx6q.o mach-imx6q.o
ifeq ($(CONFIG_PM),y)
obj-$(CONFIG_SOC_IMX6Q) += pm-imx6q.o
endif
+
+# i.MX5 based machines
+obj-$(CONFIG_MACH_MX51_BABBAGE) += mach-mx51_babbage.o
+obj-$(CONFIG_MACH_MX51_3DS) += mach-mx51_3ds.o
+obj-$(CONFIG_MACH_MX53_EVK) += mach-mx53_evk.o
+obj-$(CONFIG_MACH_MX53_SMD) += mach-mx53_smd.o
+obj-$(CONFIG_MACH_MX53_LOCO) += mach-mx53_loco.o
+obj-$(CONFIG_MACH_MX53_ARD) += mach-mx53_ard.o
+obj-$(CONFIG_MACH_EUKREA_CPUIMX51) += mach-cpuimx51.o
+obj-$(CONFIG_MACH_EUKREA_MBIMX51_BASEBOARD) += eukrea_mbimx51-baseboard.o
+obj-$(CONFIG_MACH_EUKREA_CPUIMX51SD) += mach-cpuimx51sd.o
+obj-$(CONFIG_MACH_EUKREA_MBIMXSD51_BASEBOARD) += eukrea_mbimxsd-baseboard.o
+obj-$(CONFIG_MX51_EFIKA_COMMON) += mx51_efika.o
+obj-$(CONFIG_MACH_MX51_EFIKAMX) += mach-mx51_efikamx.o
+obj-$(CONFIG_MACH_MX51_EFIKASB) += mach-mx51_efikasb.o
+obj-$(CONFIG_MACH_MX50_RDP) += mach-mx50_rdp.o
+
+obj-$(CONFIG_MACH_IMX51_DT) += imx51-dt.o
+obj-$(CONFIG_MACH_IMX53_DT) += imx53-dt.o
diff --git a/trunk/arch/arm/mach-imx/Makefile.boot b/trunk/arch/arm/mach-imx/Makefile.boot
index 5f4d06af4912..6dfdbcc83afd 100644
--- a/trunk/arch/arm/mach-imx/Makefile.boot
+++ b/trunk/arch/arm/mach-imx/Makefile.boot
@@ -22,6 +22,18 @@ zreladdr-$(CONFIG_SOC_IMX35) += 0x80008000
params_phys-$(CONFIG_SOC_IMX35) := 0x80000100
initrd_phys-$(CONFIG_SOC_IMX35) := 0x80800000
+zreladdr-$(CONFIG_SOC_IMX50) += 0x70008000
+params_phys-$(CONFIG_SOC_IMX50) := 0x70000100
+initrd_phys-$(CONFIG_SOC_IMX50) := 0x70800000
+
+zreladdr-$(CONFIG_SOC_IMX51) += 0x90008000
+params_phys-$(CONFIG_SOC_IMX51) := 0x90000100
+initrd_phys-$(CONFIG_SOC_IMX51) := 0x90800000
+
+zreladdr-$(CONFIG_SOC_IMX53) += 0x70008000
+params_phys-$(CONFIG_SOC_IMX53) := 0x70000100
+initrd_phys-$(CONFIG_SOC_IMX53) := 0x70800000
+
zreladdr-$(CONFIG_SOC_IMX6Q) += 0x10008000
params_phys-$(CONFIG_SOC_IMX6Q) := 0x10000100
initrd_phys-$(CONFIG_SOC_IMX6Q) := 0x10800000
diff --git a/trunk/arch/arm/mach-mx5/clock-mx51-mx53.c b/trunk/arch/arm/mach-imx/clock-mx51-mx53.c
similarity index 99%
rename from trunk/arch/arm/mach-mx5/clock-mx51-mx53.c
rename to trunk/arch/arm/mach-imx/clock-mx51-mx53.c
index 4cb276977190..08470504a088 100644
--- a/trunk/arch/arm/mach-mx5/clock-mx51-mx53.c
+++ b/trunk/arch/arm/mach-imx/clock-mx51-mx53.c
@@ -23,7 +23,7 @@
#include
#include
-#include "crm_regs.h"
+#include "crm-regs-imx5.h"
/* External clock values passed-in by the board code */
static unsigned long external_high_reference, external_low_reference;
diff --git a/trunk/arch/arm/mach-mx5/cpu.c b/trunk/arch/arm/mach-imx/cpu-imx5.c
similarity index 100%
rename from trunk/arch/arm/mach-mx5/cpu.c
rename to trunk/arch/arm/mach-imx/cpu-imx5.c
diff --git a/trunk/arch/arm/mach-mx5/cpu_op-mx51.c b/trunk/arch/arm/mach-imx/cpu_op-mx51.c
similarity index 100%
rename from trunk/arch/arm/mach-mx5/cpu_op-mx51.c
rename to trunk/arch/arm/mach-imx/cpu_op-mx51.c
diff --git a/trunk/arch/arm/mach-mx5/cpu_op-mx51.h b/trunk/arch/arm/mach-imx/cpu_op-mx51.h
similarity index 100%
rename from trunk/arch/arm/mach-mx5/cpu_op-mx51.h
rename to trunk/arch/arm/mach-imx/cpu_op-mx51.h
diff --git a/trunk/arch/arm/mach-mx5/crm_regs.h b/trunk/arch/arm/mach-imx/crm-regs-imx5.h
similarity index 100%
rename from trunk/arch/arm/mach-mx5/crm_regs.h
rename to trunk/arch/arm/mach-imx/crm-regs-imx5.h
diff --git a/trunk/arch/arm/mach-mx5/devices-imx50.h b/trunk/arch/arm/mach-imx/devices-imx50.h
similarity index 100%
rename from trunk/arch/arm/mach-mx5/devices-imx50.h
rename to trunk/arch/arm/mach-imx/devices-imx50.h
diff --git a/trunk/arch/arm/mach-mx5/devices-imx51.h b/trunk/arch/arm/mach-imx/devices-imx51.h
similarity index 100%
rename from trunk/arch/arm/mach-mx5/devices-imx51.h
rename to trunk/arch/arm/mach-imx/devices-imx51.h
diff --git a/trunk/arch/arm/mach-mx5/devices-imx53.h b/trunk/arch/arm/mach-imx/devices-imx53.h
similarity index 100%
rename from trunk/arch/arm/mach-mx5/devices-imx53.h
rename to trunk/arch/arm/mach-imx/devices-imx53.h
diff --git a/trunk/arch/arm/mach-mx5/efika.h b/trunk/arch/arm/mach-imx/efika.h
similarity index 100%
rename from trunk/arch/arm/mach-mx5/efika.h
rename to trunk/arch/arm/mach-imx/efika.h
diff --git a/trunk/arch/arm/mach-mx5/ehci.c b/trunk/arch/arm/mach-imx/ehci-imx5.c
similarity index 100%
rename from trunk/arch/arm/mach-mx5/ehci.c
rename to trunk/arch/arm/mach-imx/ehci-imx5.c
diff --git a/trunk/arch/arm/mach-mx5/eukrea_mbimx51-baseboard.c b/trunk/arch/arm/mach-imx/eukrea_mbimx51-baseboard.c
similarity index 100%
rename from trunk/arch/arm/mach-mx5/eukrea_mbimx51-baseboard.c
rename to trunk/arch/arm/mach-imx/eukrea_mbimx51-baseboard.c
diff --git a/trunk/arch/arm/mach-mx5/eukrea_mbimxsd-baseboard.c b/trunk/arch/arm/mach-imx/eukrea_mbimxsd-baseboard.c
similarity index 100%
rename from trunk/arch/arm/mach-mx5/eukrea_mbimxsd-baseboard.c
rename to trunk/arch/arm/mach-imx/eukrea_mbimxsd-baseboard.c
diff --git a/trunk/arch/arm/mach-mx5/imx51-dt.c b/trunk/arch/arm/mach-imx/imx51-dt.c
similarity index 100%
rename from trunk/arch/arm/mach-mx5/imx51-dt.c
rename to trunk/arch/arm/mach-imx/imx51-dt.c
diff --git a/trunk/arch/arm/mach-mx5/imx53-dt.c b/trunk/arch/arm/mach-imx/imx53-dt.c
similarity index 100%
rename from trunk/arch/arm/mach-mx5/imx53-dt.c
rename to trunk/arch/arm/mach-imx/imx53-dt.c
diff --git a/trunk/arch/arm/mach-mx5/board-cpuimx51.c b/trunk/arch/arm/mach-imx/mach-cpuimx51.c
similarity index 100%
rename from trunk/arch/arm/mach-mx5/board-cpuimx51.c
rename to trunk/arch/arm/mach-imx/mach-cpuimx51.c
diff --git a/trunk/arch/arm/mach-mx5/board-cpuimx51sd.c b/trunk/arch/arm/mach-imx/mach-cpuimx51sd.c
similarity index 100%
rename from trunk/arch/arm/mach-mx5/board-cpuimx51sd.c
rename to trunk/arch/arm/mach-imx/mach-cpuimx51sd.c
diff --git a/trunk/arch/arm/mach-mx5/board-mx50_rdp.c b/trunk/arch/arm/mach-imx/mach-mx50_rdp.c
similarity index 100%
rename from trunk/arch/arm/mach-mx5/board-mx50_rdp.c
rename to trunk/arch/arm/mach-imx/mach-mx50_rdp.c
diff --git a/trunk/arch/arm/mach-mx5/board-mx51_3ds.c b/trunk/arch/arm/mach-imx/mach-mx51_3ds.c
similarity index 100%
rename from trunk/arch/arm/mach-mx5/board-mx51_3ds.c
rename to trunk/arch/arm/mach-imx/mach-mx51_3ds.c
diff --git a/trunk/arch/arm/mach-mx5/board-mx51_babbage.c b/trunk/arch/arm/mach-imx/mach-mx51_babbage.c
similarity index 100%
rename from trunk/arch/arm/mach-mx5/board-mx51_babbage.c
rename to trunk/arch/arm/mach-imx/mach-mx51_babbage.c
diff --git a/trunk/arch/arm/mach-mx5/board-mx51_efikamx.c b/trunk/arch/arm/mach-imx/mach-mx51_efikamx.c
similarity index 100%
rename from trunk/arch/arm/mach-mx5/board-mx51_efikamx.c
rename to trunk/arch/arm/mach-imx/mach-mx51_efikamx.c
diff --git a/trunk/arch/arm/mach-mx5/board-mx51_efikasb.c b/trunk/arch/arm/mach-imx/mach-mx51_efikasb.c
similarity index 100%
rename from trunk/arch/arm/mach-mx5/board-mx51_efikasb.c
rename to trunk/arch/arm/mach-imx/mach-mx51_efikasb.c
diff --git a/trunk/arch/arm/mach-mx5/board-mx53_ard.c b/trunk/arch/arm/mach-imx/mach-mx53_ard.c
similarity index 99%
rename from trunk/arch/arm/mach-mx5/board-mx53_ard.c
rename to trunk/arch/arm/mach-imx/mach-mx53_ard.c
index 5f224f1c3eb6..08dfb7628d2d 100644
--- a/trunk/arch/arm/mach-mx5/board-mx53_ard.c
+++ b/trunk/arch/arm/mach-imx/mach-mx53_ard.c
@@ -32,7 +32,6 @@
#include
#include
-#include "crm_regs.h"
#include "devices-imx53.h"
#define ARD_ETHERNET_INT_B IMX_GPIO_NR(2, 31)
diff --git a/trunk/arch/arm/mach-mx5/board-mx53_evk.c b/trunk/arch/arm/mach-imx/mach-mx53_evk.c
similarity index 99%
rename from trunk/arch/arm/mach-mx5/board-mx53_evk.c
rename to trunk/arch/arm/mach-imx/mach-mx53_evk.c
index d6ce137896d6..5a72188b9cdb 100644
--- a/trunk/arch/arm/mach-mx5/board-mx53_evk.c
+++ b/trunk/arch/arm/mach-imx/mach-mx53_evk.c
@@ -37,7 +37,6 @@
#define EVK_ECSPI1_CS1 IMX_GPIO_NR(3, 19)
#define MX53EVK_LED IMX_GPIO_NR(7, 7)
-#include "crm_regs.h"
#include "devices-imx53.h"
static iomux_v3_cfg_t mx53_evk_pads[] = {
diff --git a/trunk/arch/arm/mach-mx5/board-mx53_loco.c b/trunk/arch/arm/mach-imx/mach-mx53_loco.c
similarity index 99%
rename from trunk/arch/arm/mach-mx5/board-mx53_loco.c
rename to trunk/arch/arm/mach-imx/mach-mx53_loco.c
index fd8b524e1c58..37f67cac15a4 100644
--- a/trunk/arch/arm/mach-mx5/board-mx53_loco.c
+++ b/trunk/arch/arm/mach-imx/mach-mx53_loco.c
@@ -32,7 +32,6 @@
#include
#include
-#include "crm_regs.h"
#include "devices-imx53.h"
#define MX53_LOCO_POWER IMX_GPIO_NR(1, 8)
diff --git a/trunk/arch/arm/mach-mx5/board-mx53_smd.c b/trunk/arch/arm/mach-imx/mach-mx53_smd.c
similarity index 99%
rename from trunk/arch/arm/mach-mx5/board-mx53_smd.c
rename to trunk/arch/arm/mach-imx/mach-mx53_smd.c
index 22c53c9b18aa..8e972c5c3e13 100644
--- a/trunk/arch/arm/mach-mx5/board-mx53_smd.c
+++ b/trunk/arch/arm/mach-imx/mach-mx53_smd.c
@@ -31,7 +31,6 @@
#include
#include
-#include "crm_regs.h"
#include "devices-imx53.h"
#define SMD_FEC_PHY_RST IMX_GPIO_NR(7, 6)
diff --git a/trunk/arch/arm/mach-mx5/mm.c b/trunk/arch/arm/mach-imx/mm-imx5.c
similarity index 100%
rename from trunk/arch/arm/mach-mx5/mm.c
rename to trunk/arch/arm/mach-imx/mm-imx5.c
diff --git a/trunk/arch/arm/mach-mx5/mx51_efika.c b/trunk/arch/arm/mach-imx/mx51_efika.c
similarity index 100%
rename from trunk/arch/arm/mach-mx5/mx51_efika.c
rename to trunk/arch/arm/mach-imx/mx51_efika.c
diff --git a/trunk/arch/arm/mach-mx5/system.c b/trunk/arch/arm/mach-imx/pm-imx5.c
similarity index 58%
rename from trunk/arch/arm/mach-mx5/system.c
rename to trunk/arch/arm/mach-imx/pm-imx5.c
index 5eebfaad1226..6dc093448057 100644
--- a/trunk/arch/arm/mach-mx5/system.c
+++ b/trunk/arch/arm/mach-imx/pm-imx5.c
@@ -1,8 +1,6 @@
/*
- * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
- */
-
-/*
+ * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
* Version 2 or later at the following locations:
@@ -10,14 +8,22 @@
* http://www.opensource.org/licenses/gpl-license.html
* http://www.gnu.org/copyleft/gpl.html
*/
-#include
+#include
+#include
#include
-#include
+#include
+#include
+#include
#include
-#include "crm_regs.h"
+#include
+#include "crm-regs-imx5.h"
+
+static struct clk *gpc_dvfs_clk;
-/* set cpu low power mode before WFI instruction. This function is called
- * mx5 because it can be used for mx50, mx51, and mx53.*/
+/*
+ * set cpu low power mode before WFI instruction. This function is called
+ * mx5 because it can be used for mx50, mx51, and mx53.
+ */
void mx5_cpu_lp_set(enum mxc_cpu_pwr_mode mode)
{
u32 plat_lpc, arm_srpgcr, ccm_clpcr;
@@ -80,3 +86,68 @@ void mx5_cpu_lp_set(enum mxc_cpu_pwr_mode mode)
__raw_writel(empgc1, MXC_SRPG_EMPGC1_SRPGCR);
}
}
+
+static int mx5_suspend_prepare(void)
+{
+ return clk_enable(gpc_dvfs_clk);
+}
+
+static int mx5_suspend_enter(suspend_state_t state)
+{
+ switch (state) {
+ case PM_SUSPEND_MEM:
+ mx5_cpu_lp_set(STOP_POWER_OFF);
+ break;
+ case PM_SUSPEND_STANDBY:
+ mx5_cpu_lp_set(WAIT_UNCLOCKED_POWER_OFF);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (state == PM_SUSPEND_MEM) {
+ local_flush_tlb_all();
+ flush_cache_all();
+
+ /*clear the EMPGC0/1 bits */
+ __raw_writel(0, MXC_SRPG_EMPGC0_SRPGCR);
+ __raw_writel(0, MXC_SRPG_EMPGC1_SRPGCR);
+ }
+ cpu_do_idle();
+ return 0;
+}
+
+static void mx5_suspend_finish(void)
+{
+ clk_disable(gpc_dvfs_clk);
+}
+
+static int mx5_pm_valid(suspend_state_t state)
+{
+ return (state > PM_SUSPEND_ON && state <= PM_SUSPEND_MAX);
+}
+
+static const struct platform_suspend_ops mx5_suspend_ops = {
+ .valid = mx5_pm_valid,
+ .prepare = mx5_suspend_prepare,
+ .enter = mx5_suspend_enter,
+ .finish = mx5_suspend_finish,
+};
+
+static int __init mx5_pm_init(void)
+{
+ if (!cpu_is_mx51() && !cpu_is_mx53())
+ return 0;
+
+ if (gpc_dvfs_clk == NULL)
+ gpc_dvfs_clk = clk_get(NULL, "gpc_dvfs");
+
+ if (!IS_ERR(gpc_dvfs_clk)) {
+ if (cpu_is_mx51())
+ suspend_set_ops(&mx5_suspend_ops);
+ } else
+ return -EPERM;
+
+ return 0;
+}
+device_initcall(mx5_pm_init);
diff --git a/trunk/arch/arm/mach-mx5/Kconfig b/trunk/arch/arm/mach-mx5/Kconfig
deleted file mode 100644
index af0c212e3c7b..000000000000
--- a/trunk/arch/arm/mach-mx5/Kconfig
+++ /dev/null
@@ -1,244 +0,0 @@
-if ARCH_MX5
-
-# ARCH_MX5/50/53 are left to mark places where prevent multi-soc in single
-# image. So for most time, SOC_IMX50/51/53 should be used.
-
-config ARCH_MX51
- bool
-
-config ARCH_MX50
- bool
-
-config ARCH_MX53
- bool
-
-config SOC_IMX50
- bool
- select CPU_V7
- select ARM_L1_CACHE_SHIFT_6
- select MXC_TZIC
- select ARCH_MXC_IOMUX_V3
- select ARCH_MXC_AUDMUX_V2
- select ARCH_HAS_CPUFREQ
- select ARCH_MX50
-
-config SOC_IMX51
- bool
- select CPU_V7
- select ARM_L1_CACHE_SHIFT_6
- select MXC_TZIC
- select ARCH_MXC_IOMUX_V3
- select ARCH_MXC_AUDMUX_V2
- select ARCH_HAS_CPUFREQ
- select ARCH_MX51
-
-config SOC_IMX53
- bool
- select CPU_V7
- select ARM_L1_CACHE_SHIFT_6
- select MXC_TZIC
- select ARCH_MXC_IOMUX_V3
- select ARCH_MX53
-
-#comment "i.MX50 machines:"
-
-config MACH_MX50_RDP
- bool "Support MX50 reference design platform"
- depends on BROKEN
- select SOC_IMX50
- select IMX_HAVE_PLATFORM_IMX_I2C
- select IMX_HAVE_PLATFORM_IMX_UART
- select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
- select IMX_HAVE_PLATFORM_SPI_IMX
- help
- Include support for MX50 reference design platform (RDP) board. This
- includes specific configurations for the board and its peripherals.
-
-comment "i.MX51 machines:"
-
-config MACH_IMX51_DT
- bool "Support i.MX51 platforms from device tree"
- select SOC_IMX51
- select USE_OF
- select MACH_MX51_BABBAGE
- help
- Include support for Freescale i.MX51 based platforms
- using the device tree for discovery
-
-config MACH_MX51_BABBAGE
- bool "Support MX51 BABBAGE platforms"
- select SOC_IMX51
- select IMX_HAVE_PLATFORM_FSL_USB2_UDC
- select IMX_HAVE_PLATFORM_IMX2_WDT
- select IMX_HAVE_PLATFORM_IMX_I2C
- select IMX_HAVE_PLATFORM_IMX_UART
- select IMX_HAVE_PLATFORM_MXC_EHCI
- select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
- select IMX_HAVE_PLATFORM_SPI_IMX
- help
- Include support for MX51 Babbage platform, also known as MX51EVK in
- u-boot. This includes specific configurations for the board and its
- peripherals.
-
-config MACH_MX51_3DS
- bool "Support MX51PDK (3DS)"
- select SOC_IMX51
- select IMX_HAVE_PLATFORM_IMX2_WDT
- select IMX_HAVE_PLATFORM_IMX_KEYPAD
- select IMX_HAVE_PLATFORM_IMX_UART
- select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
- select IMX_HAVE_PLATFORM_SPI_IMX
- select MXC_DEBUG_BOARD
- help
- Include support for MX51PDK (3DS) platform. This includes specific
- configurations for the board and its peripherals.
-
-config MACH_EUKREA_CPUIMX51
- bool "Support Eukrea CPUIMX51 module"
- select SOC_IMX51
- select IMX_HAVE_PLATFORM_FSL_USB2_UDC
- select IMX_HAVE_PLATFORM_IMX_I2C
- select IMX_HAVE_PLATFORM_IMX_UART
- select IMX_HAVE_PLATFORM_MXC_EHCI
- select IMX_HAVE_PLATFORM_MXC_NAND
- select IMX_HAVE_PLATFORM_SPI_IMX
- help
- Include support for Eukrea CPUIMX51 platform. This includes
- specific configurations for the module and its peripherals.
-
-choice
- prompt "Baseboard"
- depends on MACH_EUKREA_CPUIMX51
- default MACH_EUKREA_MBIMX51_BASEBOARD
-
-config MACH_EUKREA_MBIMX51_BASEBOARD
- prompt "Eukrea MBIMX51 development board"
- bool
- select IMX_HAVE_PLATFORM_IMX_KEYPAD
- select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
- select LEDS_GPIO_REGISTER
- help
- This adds board specific devices that can be found on Eukrea's
- MBIMX51 evaluation board.
-
-endchoice
-
-config MACH_EUKREA_CPUIMX51SD
- bool "Support Eukrea CPUIMX51SD module"
- select SOC_IMX51
- select IMX_HAVE_PLATFORM_FSL_USB2_UDC
- select IMX_HAVE_PLATFORM_IMX_I2C
- select IMX_HAVE_PLATFORM_IMX_UART
- select IMX_HAVE_PLATFORM_MXC_EHCI
- select IMX_HAVE_PLATFORM_MXC_NAND
- select IMX_HAVE_PLATFORM_SPI_IMX
- help
- Include support for Eukrea CPUIMX51SD platform. This includes
- specific configurations for the module and its peripherals.
-
-choice
- prompt "Baseboard"
- depends on MACH_EUKREA_CPUIMX51SD
- default MACH_EUKREA_MBIMXSD51_BASEBOARD
-
-config MACH_EUKREA_MBIMXSD51_BASEBOARD
- prompt "Eukrea MBIMXSD development board"
- bool
- select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
- select LEDS_GPIO_REGISTER
- help
- This adds board specific devices that can be found on Eukrea's
- MBIMXSD evaluation board.
-
-endchoice
-
-config MX51_EFIKA_COMMON
- bool
- select SOC_IMX51
- select IMX_HAVE_PLATFORM_IMX_UART
- select IMX_HAVE_PLATFORM_MXC_EHCI
- select IMX_HAVE_PLATFORM_PATA_IMX
- select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
- select IMX_HAVE_PLATFORM_SPI_IMX
- select MXC_ULPI if USB_ULPI
-
-config MACH_MX51_EFIKAMX
- bool "Support MX51 Genesi Efika MX nettop"
- select LEDS_GPIO_REGISTER
- select MX51_EFIKA_COMMON
- help
- Include support for Genesi Efika MX nettop. This includes specific
- configurations for the board and its peripherals.
-
-config MACH_MX51_EFIKASB
- bool "Support MX51 Genesi Efika Smartbook"
- select LEDS_GPIO_REGISTER
- select MX51_EFIKA_COMMON
- help
- Include support for Genesi Efika Smartbook. This includes specific
- configurations for the board and its peripherals.
-
-comment "i.MX53 machines:"
-
-config MACH_IMX53_DT
- bool "Support i.MX53 platforms from device tree"
- select SOC_IMX53
- select USE_OF
- select MACH_MX53_ARD
- select MACH_MX53_EVK
- select MACH_MX53_LOCO
- select MACH_MX53_SMD
- help
- Include support for Freescale i.MX53 based platforms
- using the device tree for discovery
-
-config MACH_MX53_EVK
- bool "Support MX53 EVK platforms"
- select SOC_IMX53
- select IMX_HAVE_PLATFORM_IMX2_WDT
- select IMX_HAVE_PLATFORM_IMX_UART
- select IMX_HAVE_PLATFORM_IMX_I2C
- select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
- select IMX_HAVE_PLATFORM_SPI_IMX
- select LEDS_GPIO_REGISTER
- help
- Include support for MX53 EVK platform. This includes specific
- configurations for the board and its peripherals.
-
-config MACH_MX53_SMD
- bool "Support MX53 SMD platforms"
- select SOC_IMX53
- select IMX_HAVE_PLATFORM_IMX2_WDT
- select IMX_HAVE_PLATFORM_IMX_I2C
- select IMX_HAVE_PLATFORM_IMX_UART
- select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
- help
- Include support for MX53 SMD platform. This includes specific
- configurations for the board and its peripherals.
-
-config MACH_MX53_LOCO
- bool "Support MX53 LOCO platforms"
- select SOC_IMX53
- select IMX_HAVE_PLATFORM_IMX2_WDT
- select IMX_HAVE_PLATFORM_IMX_I2C
- select IMX_HAVE_PLATFORM_IMX_UART
- select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
- select IMX_HAVE_PLATFORM_GPIO_KEYS
- select LEDS_GPIO_REGISTER
- help
- Include support for MX53 LOCO platform. This includes specific
- configurations for the board and its peripherals.
-
-config MACH_MX53_ARD
- bool "Support MX53 ARD platforms"
- select SOC_IMX53
- select IMX_HAVE_PLATFORM_IMX2_WDT
- select IMX_HAVE_PLATFORM_IMX_I2C
- select IMX_HAVE_PLATFORM_IMX_UART
- select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
- select IMX_HAVE_PLATFORM_GPIO_KEYS
- help
- Include support for MX53 ARD platform. This includes specific
- configurations for the board and its peripherals.
-
-endif
diff --git a/trunk/arch/arm/mach-mx5/Makefile b/trunk/arch/arm/mach-mx5/Makefile
deleted file mode 100644
index 0fc60807fa2b..000000000000
--- a/trunk/arch/arm/mach-mx5/Makefile
+++ /dev/null
@@ -1,26 +0,0 @@
-#
-# Makefile for the linux kernel.
-#
-
-# Object file lists.
-obj-y := cpu.o mm.o clock-mx51-mx53.o ehci.o system.o
-
-obj-$(CONFIG_PM) += pm-imx5.o
-obj-$(CONFIG_CPU_FREQ_IMX) += cpu_op-mx51.o
-obj-$(CONFIG_MACH_MX51_BABBAGE) += board-mx51_babbage.o
-obj-$(CONFIG_MACH_MX51_3DS) += board-mx51_3ds.o
-obj-$(CONFIG_MACH_MX53_EVK) += board-mx53_evk.o
-obj-$(CONFIG_MACH_MX53_SMD) += board-mx53_smd.o
-obj-$(CONFIG_MACH_MX53_LOCO) += board-mx53_loco.o
-obj-$(CONFIG_MACH_MX53_ARD) += board-mx53_ard.o
-obj-$(CONFIG_MACH_EUKREA_CPUIMX51) += board-cpuimx51.o
-obj-$(CONFIG_MACH_EUKREA_MBIMX51_BASEBOARD) += eukrea_mbimx51-baseboard.o
-obj-$(CONFIG_MACH_EUKREA_CPUIMX51SD) += board-cpuimx51sd.o
-obj-$(CONFIG_MACH_EUKREA_MBIMXSD51_BASEBOARD) += eukrea_mbimxsd-baseboard.o
-obj-$(CONFIG_MX51_EFIKA_COMMON) += mx51_efika.o
-obj-$(CONFIG_MACH_MX51_EFIKAMX) += board-mx51_efikamx.o
-obj-$(CONFIG_MACH_MX51_EFIKASB) += board-mx51_efikasb.o
-obj-$(CONFIG_MACH_MX50_RDP) += board-mx50_rdp.o
-
-obj-$(CONFIG_MACH_IMX51_DT) += imx51-dt.o
-obj-$(CONFIG_MACH_IMX53_DT) += imx53-dt.o
diff --git a/trunk/arch/arm/mach-mx5/Makefile.boot b/trunk/arch/arm/mach-mx5/Makefile.boot
deleted file mode 100644
index ca207ca305ec..000000000000
--- a/trunk/arch/arm/mach-mx5/Makefile.boot
+++ /dev/null
@@ -1,9 +0,0 @@
- zreladdr-$(CONFIG_ARCH_MX50) += 0x70008000
-params_phys-$(CONFIG_ARCH_MX50) := 0x70000100
-initrd_phys-$(CONFIG_ARCH_MX50) := 0x70800000
- zreladdr-$(CONFIG_ARCH_MX51) += 0x90008000
-params_phys-$(CONFIG_ARCH_MX51) := 0x90000100
-initrd_phys-$(CONFIG_ARCH_MX51) := 0x90800000
- zreladdr-$(CONFIG_ARCH_MX53) += 0x70008000
-params_phys-$(CONFIG_ARCH_MX53) := 0x70000100
-initrd_phys-$(CONFIG_ARCH_MX53) := 0x70800000
diff --git a/trunk/arch/arm/mach-mx5/pm-imx5.c b/trunk/arch/arm/mach-mx5/pm-imx5.c
deleted file mode 100644
index 98052fc852c7..000000000000
--- a/trunk/arch/arm/mach-mx5/pm-imx5.c
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
- */
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include "crm_regs.h"
-
-static struct clk *gpc_dvfs_clk;
-
-static int mx5_suspend_prepare(void)
-{
- return clk_enable(gpc_dvfs_clk);
-}
-
-static int mx5_suspend_enter(suspend_state_t state)
-{
- switch (state) {
- case PM_SUSPEND_MEM:
- mx5_cpu_lp_set(STOP_POWER_OFF);
- break;
- case PM_SUSPEND_STANDBY:
- mx5_cpu_lp_set(WAIT_UNCLOCKED_POWER_OFF);
- break;
- default:
- return -EINVAL;
- }
-
- if (state == PM_SUSPEND_MEM) {
- local_flush_tlb_all();
- flush_cache_all();
-
- /*clear the EMPGC0/1 bits */
- __raw_writel(0, MXC_SRPG_EMPGC0_SRPGCR);
- __raw_writel(0, MXC_SRPG_EMPGC1_SRPGCR);
- }
- cpu_do_idle();
- return 0;
-}
-
-static void mx5_suspend_finish(void)
-{
- clk_disable(gpc_dvfs_clk);
-}
-
-static int mx5_pm_valid(suspend_state_t state)
-{
- return (state > PM_SUSPEND_ON && state <= PM_SUSPEND_MAX);
-}
-
-static const struct platform_suspend_ops mx5_suspend_ops = {
- .valid = mx5_pm_valid,
- .prepare = mx5_suspend_prepare,
- .enter = mx5_suspend_enter,
- .finish = mx5_suspend_finish,
-};
-
-static int __init mx5_pm_init(void)
-{
- if (gpc_dvfs_clk == NULL)
- gpc_dvfs_clk = clk_get(NULL, "gpc_dvfs");
-
- if (!IS_ERR(gpc_dvfs_clk)) {
- if (cpu_is_mx51())
- suspend_set_ops(&mx5_suspend_ops);
- } else
- return -EPERM;
-
- return 0;
-}
-device_initcall(mx5_pm_init);
diff --git a/trunk/arch/arm/plat-mxc/Kconfig b/trunk/arch/arm/plat-mxc/Kconfig
index b30708e28c1d..dcebb1230f7f 100644
--- a/trunk/arch/arm/plat-mxc/Kconfig
+++ b/trunk/arch/arm/plat-mxc/Kconfig
@@ -17,26 +17,17 @@ config ARCH_IMX_V4_V5
and ARMv5 SoCs
config ARCH_IMX_V6_V7
- bool "i.MX3, i.MX6"
+ bool "i.MX3, i.MX5, i.MX6"
select AUTO_ZRELADDR if !ZBOOT_ROM
select ARM_PATCH_PHYS_VIRT
select MIGHT_HAVE_CACHE_L2X0
help
- This enables support for systems based on the Freescale i.MX3 and i.MX6
- family.
-
-config ARCH_MX5
- bool "i.MX50, i.MX51, i.MX53"
- select AUTO_ZRELADDR if !ZBOOT_ROM
- select ARM_PATCH_PHYS_VIRT
- help
- This enables support for machines using Freescale's i.MX50 and i.MX53
- processors.
+ This enables support for systems based on the Freescale i.MX3, i.MX5
+ and i.MX6 family.
endchoice
source "arch/arm/mach-imx/Kconfig"
-source "arch/arm/mach-mx5/Kconfig"
endmenu
diff --git a/trunk/arch/ia64/kernel/acpi.c b/trunk/arch/ia64/kernel/acpi.c
index bfb4d01e0e51..5207035dc061 100644
--- a/trunk/arch/ia64/kernel/acpi.c
+++ b/trunk/arch/ia64/kernel/acpi.c
@@ -429,22 +429,24 @@ static u32 __devinitdata pxm_flag[PXM_FLAG_LEN];
static struct acpi_table_slit __initdata *slit_table;
cpumask_t early_cpu_possible_map = CPU_MASK_NONE;
-static int get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa)
+static int __init
+get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa)
{
int pxm;
pxm = pa->proximity_domain_lo;
- if (ia64_platform_is("sn2"))
+ if (ia64_platform_is("sn2") || acpi_srat_revision >= 2)
pxm += pa->proximity_domain_hi[0] << 8;
return pxm;
}
-static int get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma)
+static int __init
+get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma)
{
int pxm;
pxm = ma->proximity_domain;
- if (!ia64_platform_is("sn2"))
+ if (!ia64_platform_is("sn2") && acpi_srat_revision <= 1)
pxm &= 0xff;
return pxm;
diff --git a/trunk/arch/s390/include/asm/kexec.h b/trunk/arch/s390/include/asm/kexec.h
index cf4e47b0948c..3f30dac804ea 100644
--- a/trunk/arch/s390/include/asm/kexec.h
+++ b/trunk/arch/s390/include/asm/kexec.h
@@ -42,6 +42,24 @@
/* The native architecture */
#define KEXEC_ARCH KEXEC_ARCH_S390
+/*
+ * Size for s390x ELF notes per CPU
+ *
+ * Seven notes plus zero note at the end: prstatus, fpregset, timer,
+ * tod_cmp, tod_reg, control regs, and prefix
+ */
+#define KEXEC_NOTE_BYTES \
+ (ALIGN(sizeof(struct elf_note), 4) * 8 + \
+ ALIGN(sizeof("CORE"), 4) * 7 + \
+ ALIGN(sizeof(struct elf_prstatus), 4) + \
+ ALIGN(sizeof(elf_fpregset_t), 4) + \
+ ALIGN(sizeof(u64), 4) + \
+ ALIGN(sizeof(u64), 4) + \
+ ALIGN(sizeof(u32), 4) + \
+ ALIGN(sizeof(u64) * 16, 4) + \
+ ALIGN(sizeof(u32), 4) \
+ )
+
/* Provide a dummy definition to avoid build failures. */
static inline void crash_setup_regs(struct pt_regs *newregs,
struct pt_regs *oldregs) { }
diff --git a/trunk/arch/score/kernel/entry.S b/trunk/arch/score/kernel/entry.S
index 577abba3fac6..83bb96079c43 100644
--- a/trunk/arch/score/kernel/entry.S
+++ b/trunk/arch/score/kernel/entry.S
@@ -408,7 +408,7 @@ ENTRY(handle_sys)
sw r9, [r0, PT_EPC]
cmpi.c r27, __NR_syscalls # check syscall number
- bgtu illegal_syscall
+ bgeu illegal_syscall
slli r8, r27, 2 # get syscall routine
la r11, sys_call_table
diff --git a/trunk/arch/x86/.gitignore b/trunk/arch/x86/.gitignore
index 028079065af6..7cab8c08e6d1 100644
--- a/trunk/arch/x86/.gitignore
+++ b/trunk/arch/x86/.gitignore
@@ -1,3 +1,4 @@
boot/compressed/vmlinux
tools/test_get_len
+tools/insn_sanity
diff --git a/trunk/arch/x86/Kconfig b/trunk/arch/x86/Kconfig
index 6c14ecd851d0..864cc6e6ac8e 100644
--- a/trunk/arch/x86/Kconfig
+++ b/trunk/arch/x86/Kconfig
@@ -125,16 +125,6 @@ config HAVE_LATENCYTOP_SUPPORT
config MMU
def_bool y
-config ZONE_DMA
- bool "DMA memory allocation support" if EXPERT
- default y
- help
- DMA memory allocation support allows devices with less than 32-bit
- addressing to allocate within the first 16MB of address space.
- Disable if no such devices will be used.
-
- If unsure, say Y.
-
config SBUS
bool
@@ -255,6 +245,16 @@ source "kernel/Kconfig.freezer"
menu "Processor type and features"
+config ZONE_DMA
+ bool "DMA memory allocation support" if EXPERT
+ default y
+ help
+ DMA memory allocation support allows devices with less than 32-bit
+ addressing to allocate within the first 16MB of address space.
+ Disable if no such devices will be used.
+
+ If unsure, say Y.
+
source "kernel/time/Kconfig"
config SMP
diff --git a/trunk/arch/x86/include/asm/unistd.h b/trunk/arch/x86/include/asm/unistd.h
index b4a3db7ce140..21f77b89e47a 100644
--- a/trunk/arch/x86/include/asm/unistd.h
+++ b/trunk/arch/x86/include/asm/unistd.h
@@ -7,6 +7,7 @@
# include
# define __ARCH_WANT_IPC_PARSE_VERSION
# define __ARCH_WANT_STAT64
+# define __ARCH_WANT_SYS_IPC
# define __ARCH_WANT_SYS_OLD_MMAP
# define __ARCH_WANT_SYS_OLD_SELECT
diff --git a/trunk/arch/x86/include/asm/uv/uv_bau.h b/trunk/arch/x86/include/asm/uv/uv_bau.h
index 8e862aaf0d90..becf47b81735 100644
--- a/trunk/arch/x86/include/asm/uv/uv_bau.h
+++ b/trunk/arch/x86/include/asm/uv/uv_bau.h
@@ -65,7 +65,7 @@
* UV2: Bit 19 selects between
* (0): 10 microsecond timebase and
* (1): 80 microseconds
- * we're using 655us, similar to UV1: 65 units of 10us
+ * we're using 560us, similar to UV1: 65 units of 10us
*/
#define UV1_INTD_SOFT_ACK_TIMEOUT_PERIOD (9UL)
#define UV2_INTD_SOFT_ACK_TIMEOUT_PERIOD (15UL)
@@ -167,6 +167,7 @@
#define FLUSH_RETRY_TIMEOUT 2
#define FLUSH_GIVEUP 3
#define FLUSH_COMPLETE 4
+#define FLUSH_RETRY_BUSYBUG 5
/*
* tuning the action when the numalink network is extremely delayed
@@ -235,10 +236,10 @@ struct bau_msg_payload {
/*
- * Message header: 16 bytes (128 bits) (bytes 0x30-0x3f of descriptor)
+ * UV1 Message header: 16 bytes (128 bits) (bytes 0x30-0x3f of descriptor)
* see table 4.2.3.0.1 in broacast_assist spec.
*/
-struct bau_msg_header {
+struct uv1_bau_msg_header {
unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */
/* bits 5:0 */
unsigned int base_dest_nasid:15; /* nasid of the first bit */
@@ -317,20 +318,88 @@ struct bau_msg_header {
/* bits 127:107 */
};
+/*
+ * UV2 Message header: 16 bytes (128 bits) (bytes 0x30-0x3f of descriptor)
+ * see figure 9-2 of harp_sys.pdf
+ */
+struct uv2_bau_msg_header {
+ unsigned int base_dest_nasid:15; /* nasid of the first bit */
+ /* bits 14:0 */ /* in uvhub map */
+ unsigned int dest_subnodeid:5; /* must be 0x10, for the LB */
+ /* bits 19:15 */
+ unsigned int rsvd_1:1; /* must be zero */
+ /* bit 20 */
+ /* Address bits 59:21 */
+ /* bits 25:2 of address (44:21) are payload */
+ /* these next 24 bits become bytes 12-14 of msg */
+ /* bits 28:21 land in byte 12 */
+ unsigned int replied_to:1; /* sent as 0 by the source to
+ byte 12 */
+ /* bit 21 */
+ unsigned int msg_type:3; /* software type of the
+ message */
+ /* bits 24:22 */
+ unsigned int canceled:1; /* message canceled, resource
+ is to be freed*/
+ /* bit 25 */
+ unsigned int payload_1:3; /* not currently used */
+ /* bits 28:26 */
+
+ /* bits 36:29 land in byte 13 */
+ unsigned int payload_2a:3; /* not currently used */
+ unsigned int payload_2b:5; /* not currently used */
+ /* bits 36:29 */
+
+ /* bits 44:37 land in byte 14 */
+ unsigned int payload_3:8; /* not currently used */
+ /* bits 44:37 */
+
+ unsigned int rsvd_2:7; /* reserved */
+ /* bits 51:45 */
+ unsigned int swack_flag:1; /* software acknowledge flag */
+ /* bit 52 */
+ unsigned int rsvd_3a:3; /* must be zero */
+ unsigned int rsvd_3b:8; /* must be zero */
+ unsigned int rsvd_3c:8; /* must be zero */
+ unsigned int rsvd_3d:3; /* must be zero */
+ /* bits 74:53 */
+ unsigned int fairness:3; /* usually zero */
+ /* bits 77:75 */
+
+ unsigned int sequence:16; /* message sequence number */
+ /* bits 93:78 Suppl_A */
+ unsigned int chaining:1; /* next descriptor is part of
+ this activation*/
+ /* bit 94 */
+ unsigned int multilevel:1; /* multi-level multicast
+ format */
+ /* bit 95 */
+ unsigned int rsvd_4:24; /* ordered / source node /
+ source subnode / aging
+ must be zero */
+ /* bits 119:96 */
+ unsigned int command:8; /* message type */
+ /* bits 127:120 */
+};
+
/*
* The activation descriptor:
* The format of the message to send, plus all accompanying control
* Should be 64 bytes
*/
struct bau_desc {
- struct pnmask distribution;
+ struct pnmask distribution;
/*
* message template, consisting of header and payload:
*/
- struct bau_msg_header header;
- struct bau_msg_payload payload;
+ union bau_msg_header {
+ struct uv1_bau_msg_header uv1_hdr;
+ struct uv2_bau_msg_header uv2_hdr;
+ } header;
+
+ struct bau_msg_payload payload;
};
-/*
+/* UV1:
* -payload-- ---------header------
* bytes 0-11 bits 41-56 bits 58-81
* A B (2) C (3)
@@ -340,6 +409,16 @@ struct bau_desc {
* bytes 0-11 bytes 12-14 bytes 16-17 (byte 15 filled in by hw as vector)
* ------------payload queue-----------
*/
+/* UV2:
+ * -payload-- ---------header------
+ * bytes 0-11 bits 70-78 bits 21-44
+ * A B (2) C (3)
+ *
+ * A/B/C are moved to:
+ * A C B
+ * bytes 0-11 bytes 12-14 bytes 16-17 (byte 15 filled in by hw as vector)
+ * ------------payload queue-----------
+ */
/*
* The payload queue on the destination side is an array of these.
@@ -385,7 +464,6 @@ struct bau_pq_entry {
struct msg_desc {
struct bau_pq_entry *msg;
int msg_slot;
- int swack_slot;
struct bau_pq_entry *queue_first;
struct bau_pq_entry *queue_last;
};
@@ -405,6 +483,7 @@ struct ptc_stats {
requests */
unsigned long s_stimeout; /* source side timeouts */
unsigned long s_dtimeout; /* destination side timeouts */
+ unsigned long s_strongnacks; /* number of strong nack's */
unsigned long s_time; /* time spent in sending side */
unsigned long s_retriesok; /* successful retries */
unsigned long s_ntargcpu; /* total number of cpu's
@@ -439,6 +518,9 @@ struct ptc_stats {
unsigned long s_retry_messages; /* retry broadcasts */
unsigned long s_bau_reenabled; /* for bau enable/disable */
unsigned long s_bau_disabled; /* for bau enable/disable */
+ unsigned long s_uv2_wars; /* uv2 workaround, perm. busy */
+ unsigned long s_uv2_wars_hw; /* uv2 workaround, hiwater */
+ unsigned long s_uv2_war_waits; /* uv2 workaround, long waits */
/* destination statistics */
unsigned long d_alltlb; /* times all tlb's on this
cpu were flushed */
@@ -511,9 +593,12 @@ struct bau_control {
short osnode;
short uvhub_cpu;
short uvhub;
+ short uvhub_version;
short cpus_in_socket;
short cpus_in_uvhub;
short partition_base_pnode;
+ short using_desc; /* an index, like uvhub_cpu */
+ unsigned int inuse_map;
unsigned short message_number;
unsigned short uvhub_quiesce;
short socket_acknowledge_count[DEST_Q_SIZE];
@@ -531,6 +616,7 @@ struct bau_control {
int cong_response_us;
int cong_reps;
int cong_period;
+ unsigned long clocks_per_100_usec;
cycles_t period_time;
long period_requests;
struct hub_and_pnode *thp;
@@ -591,6 +677,11 @@ static inline void write_mmr_sw_ack(unsigned long mr)
uv_write_local_mmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, mr);
}
+static inline void write_gmmr_sw_ack(int pnode, unsigned long mr)
+{
+ write_gmmr(pnode, UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, mr);
+}
+
static inline unsigned long read_mmr_sw_ack(void)
{
return read_lmmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
diff --git a/trunk/arch/x86/kernel/e820.c b/trunk/arch/x86/kernel/e820.c
index 174d938d576b..62d61e9976eb 100644
--- a/trunk/arch/x86/kernel/e820.c
+++ b/trunk/arch/x86/kernel/e820.c
@@ -703,7 +703,7 @@ void __init e820_mark_nosave_regions(unsigned long limit_pfn)
}
#endif
-#ifdef CONFIG_HIBERNATION
+#ifdef CONFIG_ACPI
/**
* Mark ACPI NVS memory region, so that we can save/restore it during
* hibernation and the subsequent resume.
@@ -716,7 +716,7 @@ static int __init e820_mark_nvs_memory(void)
struct e820entry *ei = &e820.map[i];
if (ei->type == E820_NVS)
- suspend_nvs_register(ei->addr, ei->size);
+ acpi_nvs_register(ei->addr, ei->size);
}
return 0;
diff --git a/trunk/arch/x86/kernel/tsc.c b/trunk/arch/x86/kernel/tsc.c
index c0dd5b603749..a62c201c97ec 100644
--- a/trunk/arch/x86/kernel/tsc.c
+++ b/trunk/arch/x86/kernel/tsc.c
@@ -290,14 +290,15 @@ static inline int pit_verify_msb(unsigned char val)
static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *deltap)
{
int count;
- u64 tsc = 0;
+ u64 tsc = 0, prev_tsc = 0;
for (count = 0; count < 50000; count++) {
if (!pit_verify_msb(val))
break;
+ prev_tsc = tsc;
tsc = get_cycles();
}
- *deltap = get_cycles() - tsc;
+ *deltap = get_cycles() - prev_tsc;
*tscp = tsc;
/*
@@ -311,9 +312,9 @@ static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *de
* How many MSB values do we want to see? We aim for
* a maximum error rate of 500ppm (in practice the
* real error is much smaller), but refuse to spend
- * more than 25ms on it.
+ * more than 50ms on it.
*/
-#define MAX_QUICK_PIT_MS 25
+#define MAX_QUICK_PIT_MS 50
#define MAX_QUICK_PIT_ITERATIONS (MAX_QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256)
static unsigned long quick_pit_calibrate(void)
@@ -383,15 +384,12 @@ static unsigned long quick_pit_calibrate(void)
*
* As a result, we can depend on there not being
* any odd delays anywhere, and the TSC reads are
- * reliable (within the error). We also adjust the
- * delta to the middle of the error bars, just
- * because it looks nicer.
+ * reliable (within the error).
*
* kHz = ticks / time-in-seconds / 1000;
* kHz = (t2 - t1) / (I * 256 / PIT_TICK_RATE) / 1000
* kHz = ((t2 - t1) * PIT_TICK_RATE) / (I * 256 * 1000)
*/
- delta += (long)(d2 - d1)/2;
delta *= PIT_TICK_RATE;
do_div(delta, i*256*1000);
printk("Fast TSC calibration using PIT\n");
diff --git a/trunk/arch/x86/lib/x86-opcode-map.txt b/trunk/arch/x86/lib/x86-opcode-map.txt
index 5b83c51c12e0..819137904428 100644
--- a/trunk/arch/x86/lib/x86-opcode-map.txt
+++ b/trunk/arch/x86/lib/x86-opcode-map.txt
@@ -219,7 +219,9 @@ ab: STOS/W/D/Q Yv,rAX
ac: LODS/B AL,Xb
ad: LODS/W/D/Q rAX,Xv
ae: SCAS/B AL,Yb
-af: SCAS/W/D/Q rAX,Xv
+# Note: The May 2011 Intel manual shows Xv for the second parameter of the
+# next instruction but Yv is correct
+af: SCAS/W/D/Q rAX,Yv
# 0xb0 - 0xbf
b0: MOV AL/R8L,Ib
b1: MOV CL/R9L,Ib
@@ -729,8 +731,8 @@ de: VAESDEC Vdq,Hdq,Wdq (66),(v1)
df: VAESDECLAST Vdq,Hdq,Wdq (66),(v1)
f0: MOVBE Gy,My | MOVBE Gw,Mw (66) | CRC32 Gd,Eb (F2)
f1: MOVBE My,Gy | MOVBE Mw,Gw (66) | CRC32 Gd,Ey (F2)
-f3: ANDN Gy,By,Ey (v)
-f4: Grp17 (1A)
+f2: ANDN Gy,By,Ey (v)
+f3: Grp17 (1A)
f5: BZHI Gy,Ey,By (v) | PEXT Gy,By,Ey (F3),(v) | PDEP Gy,By,Ey (F2),(v)
f6: MULX By,Gy,rDX,Ey (F2),(v)
f7: BEXTR Gy,Ey,By (v) | SHLX Gy,Ey,By (66),(v) | SARX Gy,Ey,By (F3),(v) | SHRX Gy,Ey,By (F2),(v)
diff --git a/trunk/arch/x86/mm/srat.c b/trunk/arch/x86/mm/srat.c
index fd61b3fb7341..1c1c4f46a7c1 100644
--- a/trunk/arch/x86/mm/srat.c
+++ b/trunk/arch/x86/mm/srat.c
@@ -109,6 +109,8 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
return;
pxm = pa->proximity_domain_lo;
+ if (acpi_srat_revision >= 2)
+ pxm |= *((unsigned int*)pa->proximity_domain_hi) << 8;
node = setup_node(pxm);
if (node < 0) {
printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm);
@@ -160,6 +162,8 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
start = ma->base_address;
end = start + ma->length;
pxm = ma->proximity_domain;
+ if (acpi_srat_revision <= 1)
+ pxm &= 0xff;
node = setup_node(pxm);
if (node < 0) {
printk(KERN_ERR "SRAT: Too many proximity domains.\n");
diff --git a/trunk/arch/x86/platform/uv/tlb_uv.c b/trunk/arch/x86/platform/uv/tlb_uv.c
index 5b552198f774..9be4cff00a2d 100644
--- a/trunk/arch/x86/platform/uv/tlb_uv.c
+++ b/trunk/arch/x86/platform/uv/tlb_uv.c
@@ -157,13 +157,14 @@ static int __init uvhub_to_first_apicid(int uvhub)
* clear of the Timeout bit (as well) will free the resource. No reply will
* be sent (the hardware will only do one reply per message).
*/
-static void reply_to_message(struct msg_desc *mdp, struct bau_control *bcp)
+static void reply_to_message(struct msg_desc *mdp, struct bau_control *bcp,
+ int do_acknowledge)
{
unsigned long dw;
struct bau_pq_entry *msg;
msg = mdp->msg;
- if (!msg->canceled) {
+ if (!msg->canceled && do_acknowledge) {
dw = (msg->swack_vec << UV_SW_ACK_NPENDING) | msg->swack_vec;
write_mmr_sw_ack(dw);
}
@@ -212,8 +213,8 @@ static void bau_process_retry_msg(struct msg_desc *mdp,
if (mmr & (msg_res << UV_SW_ACK_NPENDING)) {
unsigned long mr;
/*
- * is the resource timed out?
- * make everyone ignore the cancelled message.
+ * Is the resource timed out?
+ * Make everyone ignore the cancelled message.
*/
msg2->canceled = 1;
stat->d_canceled++;
@@ -231,8 +232,8 @@ static void bau_process_retry_msg(struct msg_desc *mdp,
* Do all the things a cpu should do for a TLB shootdown message.
* Other cpu's may come here at the same time for this message.
*/
-static void bau_process_message(struct msg_desc *mdp,
- struct bau_control *bcp)
+static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp,
+ int do_acknowledge)
{
short socket_ack_count = 0;
short *sp;
@@ -284,8 +285,9 @@ static void bau_process_message(struct msg_desc *mdp,
if (msg_ack_count == bcp->cpus_in_uvhub) {
/*
* All cpus in uvhub saw it; reply
+ * (unless we are in the UV2 workaround)
*/
- reply_to_message(mdp, bcp);
+ reply_to_message(mdp, bcp, do_acknowledge);
}
}
@@ -491,27 +493,138 @@ static int uv1_wait_completion(struct bau_desc *bau_desc,
/*
* UV2 has an extra bit of status in the ACTIVATION_STATUS_2 register.
*/
-static unsigned long uv2_read_status(unsigned long offset, int rshft, int cpu)
+static unsigned long uv2_read_status(unsigned long offset, int rshft, int desc)
{
unsigned long descriptor_status;
unsigned long descriptor_status2;
descriptor_status = ((read_lmmr(offset) >> rshft) & UV_ACT_STATUS_MASK);
- descriptor_status2 = (read_mmr_uv2_status() >> cpu) & 0x1UL;
+ descriptor_status2 = (read_mmr_uv2_status() >> desc) & 0x1UL;
descriptor_status = (descriptor_status << 1) | descriptor_status2;
return descriptor_status;
}
+/*
+ * Return whether the status of the descriptor that is normally used for this
+ * cpu (the one indexed by its hub-relative cpu number) is busy.
+ * The status of the original 32 descriptors is always reflected in the 64
+ * bits of UVH_LB_BAU_SB_ACTIVATION_STATUS_0.
+ * The bit provided by the activation_status_2 register is irrelevant to
+ * the status if it is only being tested for busy or not busy.
+ */
+int normal_busy(struct bau_control *bcp)
+{
+ int cpu = bcp->uvhub_cpu;
+ int mmr_offset;
+ int right_shift;
+
+ mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
+ right_shift = cpu * UV_ACT_STATUS_SIZE;
+ return (((((read_lmmr(mmr_offset) >> right_shift) &
+ UV_ACT_STATUS_MASK)) << 1) == UV2H_DESC_BUSY);
+}
+
+/*
+ * Entered when a bau descriptor has gone into a permanent busy wait because
+ * of a hardware bug.
+ * Workaround the bug.
+ */
+int handle_uv2_busy(struct bau_control *bcp)
+{
+ int busy_one = bcp->using_desc;
+ int normal = bcp->uvhub_cpu;
+ int selected = -1;
+ int i;
+ unsigned long descriptor_status;
+ unsigned long status;
+ int mmr_offset;
+ struct bau_desc *bau_desc_old;
+ struct bau_desc *bau_desc_new;
+ struct bau_control *hmaster = bcp->uvhub_master;
+ struct ptc_stats *stat = bcp->statp;
+ cycles_t ttm;
+
+ stat->s_uv2_wars++;
+ spin_lock(&hmaster->uvhub_lock);
+ /* try for the original first */
+ if (busy_one != normal) {
+ if (!normal_busy(bcp))
+ selected = normal;
+ }
+ if (selected < 0) {
+ /* can't use the normal, select an alternate */
+ mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
+ descriptor_status = read_lmmr(mmr_offset);
+
+ /* scan available descriptors 32-63 */
+ for (i = 0; i < UV_CPUS_PER_AS; i++) {
+ if ((hmaster->inuse_map & (1 << i)) == 0) {
+ status = ((descriptor_status >>
+ (i * UV_ACT_STATUS_SIZE)) &
+ UV_ACT_STATUS_MASK) << 1;
+ if (status != UV2H_DESC_BUSY) {
+ selected = i + UV_CPUS_PER_AS;
+ break;
+ }
+ }
+ }
+ }
+
+ if (busy_one != normal)
+ /* mark the busy alternate as not in-use */
+ hmaster->inuse_map &= ~(1 << (busy_one - UV_CPUS_PER_AS));
+
+ if (selected >= 0) {
+ /* switch to the selected descriptor */
+ if (selected != normal) {
+ /* set the selected alternate as in-use */
+ hmaster->inuse_map |=
+ (1 << (selected - UV_CPUS_PER_AS));
+ if (selected > stat->s_uv2_wars_hw)
+ stat->s_uv2_wars_hw = selected;
+ }
+ bau_desc_old = bcp->descriptor_base;
+ bau_desc_old += (ITEMS_PER_DESC * busy_one);
+ bcp->using_desc = selected;
+ bau_desc_new = bcp->descriptor_base;
+ bau_desc_new += (ITEMS_PER_DESC * selected);
+ *bau_desc_new = *bau_desc_old;
+ } else {
+ /*
+ * All are busy. Wait for the normal one for this cpu to
+ * free up.
+ */
+ stat->s_uv2_war_waits++;
+ spin_unlock(&hmaster->uvhub_lock);
+ ttm = get_cycles();
+ do {
+ cpu_relax();
+ } while (normal_busy(bcp));
+ spin_lock(&hmaster->uvhub_lock);
+ /* switch to the original descriptor */
+ bcp->using_desc = normal;
+ bau_desc_old = bcp->descriptor_base;
+ bau_desc_old += (ITEMS_PER_DESC * bcp->using_desc);
+ bcp->using_desc = (ITEMS_PER_DESC * normal);
+ bau_desc_new = bcp->descriptor_base;
+ bau_desc_new += (ITEMS_PER_DESC * normal);
+ *bau_desc_new = *bau_desc_old; /* copy the entire descriptor */
+ }
+ spin_unlock(&hmaster->uvhub_lock);
+ return FLUSH_RETRY_BUSYBUG;
+}
+
static int uv2_wait_completion(struct bau_desc *bau_desc,
unsigned long mmr_offset, int right_shift,
struct bau_control *bcp, long try)
{
unsigned long descriptor_stat;
cycles_t ttm;
- int cpu = bcp->uvhub_cpu;
+ int desc = bcp->using_desc;
+ long busy_reps = 0;
struct ptc_stats *stat = bcp->statp;
- descriptor_stat = uv2_read_status(mmr_offset, right_shift, cpu);
+ descriptor_stat = uv2_read_status(mmr_offset, right_shift, desc);
/* spin on the status MMR, waiting for it to go idle */
while (descriptor_stat != UV2H_DESC_IDLE) {
@@ -522,32 +635,35 @@ static int uv2_wait_completion(struct bau_desc *bau_desc,
* our message and its state will stay IDLE.
*/
if ((descriptor_stat == UV2H_DESC_SOURCE_TIMEOUT) ||
- (descriptor_stat == UV2H_DESC_DEST_STRONG_NACK) ||
(descriptor_stat == UV2H_DESC_DEST_PUT_ERR)) {
stat->s_stimeout++;
return FLUSH_GIVEUP;
+ } else if (descriptor_stat == UV2H_DESC_DEST_STRONG_NACK) {
+ stat->s_strongnacks++;
+ bcp->conseccompletes = 0;
+ return FLUSH_GIVEUP;
} else if (descriptor_stat == UV2H_DESC_DEST_TIMEOUT) {
stat->s_dtimeout++;
- ttm = get_cycles();
- /*
- * Our retries may be blocked by all destination
- * swack resources being consumed, and a timeout
- * pending. In that case hardware returns the
- * ERROR that looks like a destination timeout.
- */
- if (cycles_2_us(ttm - bcp->send_message) < timeout_us) {
- bcp->conseccompletes = 0;
- return FLUSH_RETRY_PLUGGED;
- }
bcp->conseccompletes = 0;
return FLUSH_RETRY_TIMEOUT;
} else {
+ busy_reps++;
+ if (busy_reps > 1000000) {
+ /* not to hammer on the clock */
+ busy_reps = 0;
+ ttm = get_cycles();
+ if ((ttm - bcp->send_message) >
+ (bcp->clocks_per_100_usec)) {
+ return handle_uv2_busy(bcp);
+ }
+ }
/*
* descriptor_stat is still BUSY
*/
cpu_relax();
}
- descriptor_stat = uv2_read_status(mmr_offset, right_shift, cpu);
+ descriptor_stat = uv2_read_status(mmr_offset, right_shift,
+ desc);
}
bcp->conseccompletes++;
return FLUSH_COMPLETE;
@@ -563,17 +679,17 @@ static int wait_completion(struct bau_desc *bau_desc,
{
int right_shift;
unsigned long mmr_offset;
- int cpu = bcp->uvhub_cpu;
+ int desc = bcp->using_desc;
- if (cpu < UV_CPUS_PER_AS) {
+ if (desc < UV_CPUS_PER_AS) {
mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
- right_shift = cpu * UV_ACT_STATUS_SIZE;
+ right_shift = desc * UV_ACT_STATUS_SIZE;
} else {
mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
- right_shift = ((cpu - UV_CPUS_PER_AS) * UV_ACT_STATUS_SIZE);
+ right_shift = ((desc - UV_CPUS_PER_AS) * UV_ACT_STATUS_SIZE);
}
- if (is_uv1_hub())
+ if (bcp->uvhub_version == 1)
return uv1_wait_completion(bau_desc, mmr_offset, right_shift,
bcp, try);
else
@@ -752,19 +868,22 @@ static void handle_cmplt(int completion_status, struct bau_desc *bau_desc,
* Returns 1 if it gives up entirely and the original cpu mask is to be
* returned to the kernel.
*/
-int uv_flush_send_and_wait(struct bau_desc *bau_desc,
- struct cpumask *flush_mask, struct bau_control *bcp)
+int uv_flush_send_and_wait(struct cpumask *flush_mask, struct bau_control *bcp)
{
int seq_number = 0;
int completion_stat = 0;
+ int uv1 = 0;
long try = 0;
unsigned long index;
cycles_t time1;
cycles_t time2;
struct ptc_stats *stat = bcp->statp;
struct bau_control *hmaster = bcp->uvhub_master;
+ struct uv1_bau_msg_header *uv1_hdr = NULL;
+ struct uv2_bau_msg_header *uv2_hdr = NULL;
+ struct bau_desc *bau_desc;
- if (is_uv1_hub())
+ if (bcp->uvhub_version == 1)
uv1_throttle(hmaster, stat);
while (hmaster->uvhub_quiesce)
@@ -772,22 +891,39 @@ int uv_flush_send_and_wait(struct bau_desc *bau_desc,
time1 = get_cycles();
do {
- if (try == 0) {
- bau_desc->header.msg_type = MSG_REGULAR;
+ bau_desc = bcp->descriptor_base;
+ bau_desc += (ITEMS_PER_DESC * bcp->using_desc);
+ if (bcp->uvhub_version == 1) {
+ uv1 = 1;
+ uv1_hdr = &bau_desc->header.uv1_hdr;
+ } else
+ uv2_hdr = &bau_desc->header.uv2_hdr;
+ if ((try == 0) || (completion_stat == FLUSH_RETRY_BUSYBUG)) {
+ if (uv1)
+ uv1_hdr->msg_type = MSG_REGULAR;
+ else
+ uv2_hdr->msg_type = MSG_REGULAR;
seq_number = bcp->message_number++;
} else {
- bau_desc->header.msg_type = MSG_RETRY;
+ if (uv1)
+ uv1_hdr->msg_type = MSG_RETRY;
+ else
+ uv2_hdr->msg_type = MSG_RETRY;
stat->s_retry_messages++;
}
- bau_desc->header.sequence = seq_number;
- index = (1UL << AS_PUSH_SHIFT) | bcp->uvhub_cpu;
+ if (uv1)
+ uv1_hdr->sequence = seq_number;
+ else
+ uv2_hdr->sequence = seq_number;
+ index = (1UL << AS_PUSH_SHIFT) | bcp->using_desc;
bcp->send_message = get_cycles();
write_mmr_activation(index);
try++;
completion_stat = wait_completion(bau_desc, bcp, try);
+ /* UV2: wait_completion() may change the bcp->using_desc */
handle_cmplt(completion_stat, bau_desc, bcp, hmaster, stat);
@@ -798,6 +934,7 @@ int uv_flush_send_and_wait(struct bau_desc *bau_desc,
}
cpu_relax();
} while ((completion_stat == FLUSH_RETRY_PLUGGED) ||
+ (completion_stat == FLUSH_RETRY_BUSYBUG) ||
(completion_stat == FLUSH_RETRY_TIMEOUT));
time2 = get_cycles();
@@ -812,6 +949,7 @@ int uv_flush_send_and_wait(struct bau_desc *bau_desc,
record_send_stats(time1, time2, bcp, stat, completion_stat, try);
if (completion_stat == FLUSH_GIVEUP)
+ /* FLUSH_GIVEUP will fall back to using IPI's for tlb flush */
return 1;
return 0;
}
@@ -967,7 +1105,7 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
stat->s_ntargself++;
bau_desc = bcp->descriptor_base;
- bau_desc += ITEMS_PER_DESC * bcp->uvhub_cpu;
+ bau_desc += (ITEMS_PER_DESC * bcp->using_desc);
bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
if (set_distrib_bits(flush_mask, bcp, bau_desc, &locals, &remotes))
return NULL;
@@ -980,12 +1118,85 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
* uv_flush_send_and_wait returns 0 if all cpu's were messaged,
* or 1 if it gave up and the original cpumask should be returned.
*/
- if (!uv_flush_send_and_wait(bau_desc, flush_mask, bcp))
+ if (!uv_flush_send_and_wait(flush_mask, bcp))
return NULL;
else
return cpumask;
}
+/*
+ * Search the message queue for any 'other' message with the same software
+ * acknowledge resource bit vector.
+ */
+struct bau_pq_entry *find_another_by_swack(struct bau_pq_entry *msg,
+ struct bau_control *bcp, unsigned char swack_vec)
+{
+ struct bau_pq_entry *msg_next = msg + 1;
+
+ if (msg_next > bcp->queue_last)
+ msg_next = bcp->queue_first;
+ while ((msg_next->swack_vec != 0) && (msg_next != msg)) {
+ if (msg_next->swack_vec == swack_vec)
+ return msg_next;
+ msg_next++;
+ if (msg_next > bcp->queue_last)
+ msg_next = bcp->queue_first;
+ }
+ return NULL;
+}
+
+/*
+ * UV2 needs to work around a bug in which an arriving message has not
+ * set a bit in the UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE register.
+ * Such a message must be ignored.
+ */
+void process_uv2_message(struct msg_desc *mdp, struct bau_control *bcp)
+{
+ unsigned long mmr_image;
+ unsigned char swack_vec;
+ struct bau_pq_entry *msg = mdp->msg;
+ struct bau_pq_entry *other_msg;
+
+ mmr_image = read_mmr_sw_ack();
+ swack_vec = msg->swack_vec;
+
+ if ((swack_vec & mmr_image) == 0) {
+ /*
+ * This message was assigned a swack resource, but no
+ * reserved acknowlegment is pending.
+ * The bug has prevented this message from setting the MMR.
+ * And no other message has used the same sw_ack resource.
+ * Do the requested shootdown but do not reply to the msg.
+ * (the 0 means make no acknowledge)
+ */
+ bau_process_message(mdp, bcp, 0);
+ return;
+ }
+
+ /*
+ * Some message has set the MMR 'pending' bit; it might have been
+ * another message. Look for that message.
+ */
+ other_msg = find_another_by_swack(msg, bcp, msg->swack_vec);
+ if (other_msg) {
+ /* There is another. Do not ack the current one. */
+ bau_process_message(mdp, bcp, 0);
+ /*
+ * Let the natural processing of that message acknowledge
+ * it. Don't get the processing of sw_ack's out of order.
+ */
+ return;
+ }
+
+ /*
+ * There is no other message using this sw_ack, so it is safe to
+ * acknowledge it.
+ */
+ bau_process_message(mdp, bcp, 1);
+
+ return;
+}
+
/*
* The BAU message interrupt comes here. (registered by set_intr_gate)
* See entry_64.S
@@ -1009,6 +1220,7 @@ void uv_bau_message_interrupt(struct pt_regs *regs)
struct ptc_stats *stat;
struct msg_desc msgdesc;
+ ack_APIC_irq();
time_start = get_cycles();
bcp = &per_cpu(bau_control, smp_processor_id());
@@ -1022,9 +1234,11 @@ void uv_bau_message_interrupt(struct pt_regs *regs)
count++;
msgdesc.msg_slot = msg - msgdesc.queue_first;
- msgdesc.swack_slot = ffs(msg->swack_vec) - 1;
msgdesc.msg = msg;
- bau_process_message(&msgdesc, bcp);
+ if (bcp->uvhub_version == 2)
+ process_uv2_message(&msgdesc, bcp);
+ else
+ bau_process_message(&msgdesc, bcp, 1);
msg++;
if (msg > msgdesc.queue_last)
@@ -1036,8 +1250,6 @@ void uv_bau_message_interrupt(struct pt_regs *regs)
stat->d_nomsg++;
else if (count > 1)
stat->d_multmsg++;
-
- ack_APIC_irq();
}
/*
@@ -1083,7 +1295,7 @@ static void __init enable_timeouts(void)
*/
mmr_image |= (1L << SOFTACK_MSHIFT);
if (is_uv2_hub()) {
- mmr_image |= (1L << UV2_LEG_SHFT);
+ mmr_image &= ~(1L << UV2_LEG_SHFT);
mmr_image |= (1L << UV2_EXT_SHFT);
}
write_mmr_misc_control(pnode, mmr_image);
@@ -1136,13 +1348,13 @@ static int ptc_seq_show(struct seq_file *file, void *data)
seq_printf(file,
"remotehub numuvhubs numuvhubs16 numuvhubs8 ");
seq_printf(file,
- "numuvhubs4 numuvhubs2 numuvhubs1 dto retries rok ");
+ "numuvhubs4 numuvhubs2 numuvhubs1 dto snacks retries rok ");
seq_printf(file,
"resetp resett giveup sto bz throt swack recv rtime ");
seq_printf(file,
"all one mult none retry canc nocan reset rcan ");
seq_printf(file,
- "disable enable\n");
+ "disable enable wars warshw warwaits\n");
}
if (cpu < num_possible_cpus() && cpu_online(cpu)) {
stat = &per_cpu(ptcstats, cpu);
@@ -1154,10 +1366,10 @@ static int ptc_seq_show(struct seq_file *file, void *data)
stat->s_ntargremotes, stat->s_ntargcpu,
stat->s_ntarglocaluvhub, stat->s_ntargremoteuvhub,
stat->s_ntarguvhub, stat->s_ntarguvhub16);
- seq_printf(file, "%ld %ld %ld %ld %ld ",
+ seq_printf(file, "%ld %ld %ld %ld %ld %ld ",
stat->s_ntarguvhub8, stat->s_ntarguvhub4,
stat->s_ntarguvhub2, stat->s_ntarguvhub1,
- stat->s_dtimeout);
+ stat->s_dtimeout, stat->s_strongnacks);
seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld ",
stat->s_retry_messages, stat->s_retriesok,
stat->s_resets_plug, stat->s_resets_timeout,
@@ -1173,8 +1385,10 @@ static int ptc_seq_show(struct seq_file *file, void *data)
stat->d_nomsg, stat->d_retries, stat->d_canceled,
stat->d_nocanceled, stat->d_resets,
stat->d_rcanceled);
- seq_printf(file, "%ld %ld\n",
- stat->s_bau_disabled, stat->s_bau_reenabled);
+ seq_printf(file, "%ld %ld %ld %ld %ld\n",
+ stat->s_bau_disabled, stat->s_bau_reenabled,
+ stat->s_uv2_wars, stat->s_uv2_wars_hw,
+ stat->s_uv2_war_waits);
}
return 0;
}
@@ -1432,12 +1646,15 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
{
int i;
int cpu;
+ int uv1 = 0;
unsigned long gpa;
unsigned long m;
unsigned long n;
size_t dsize;
struct bau_desc *bau_desc;
struct bau_desc *bd2;
+ struct uv1_bau_msg_header *uv1_hdr;
+ struct uv2_bau_msg_header *uv2_hdr;
struct bau_control *bcp;
/*
@@ -1451,6 +1668,8 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
gpa = uv_gpa(bau_desc);
n = uv_gpa_to_gnode(gpa);
m = uv_gpa_to_offset(gpa);
+ if (is_uv1_hub())
+ uv1 = 1;
/* the 14-bit pnode */
write_mmr_descriptor_base(pnode, (n << UV_DESC_PSHIFT | m));
@@ -1461,21 +1680,33 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
*/
for (i = 0, bd2 = bau_desc; i < (ADP_SZ * ITEMS_PER_DESC); i++, bd2++) {
memset(bd2, 0, sizeof(struct bau_desc));
- bd2->header.swack_flag = 1;
- /*
- * The base_dest_nasid set in the message header is the nasid
- * of the first uvhub in the partition. The bit map will
- * indicate destination pnode numbers relative to that base.
- * They may not be consecutive if nasid striding is being used.
- */
- bd2->header.base_dest_nasid = UV_PNODE_TO_NASID(base_pnode);
- bd2->header.dest_subnodeid = UV_LB_SUBNODEID;
- bd2->header.command = UV_NET_ENDPOINT_INTD;
- bd2->header.int_both = 1;
- /*
- * all others need to be set to zero:
- * fairness chaining multilevel count replied_to
- */
+ if (uv1) {
+ uv1_hdr = &bd2->header.uv1_hdr;
+ uv1_hdr->swack_flag = 1;
+ /*
+ * The base_dest_nasid set in the message header
+ * is the nasid of the first uvhub in the partition.
+ * The bit map will indicate destination pnode numbers
+ * relative to that base. They may not be consecutive
+ * if nasid striding is being used.
+ */
+ uv1_hdr->base_dest_nasid =
+ UV_PNODE_TO_NASID(base_pnode);
+ uv1_hdr->dest_subnodeid = UV_LB_SUBNODEID;
+ uv1_hdr->command = UV_NET_ENDPOINT_INTD;
+ uv1_hdr->int_both = 1;
+ /*
+ * all others need to be set to zero:
+ * fairness chaining multilevel count replied_to
+ */
+ } else {
+ uv2_hdr = &bd2->header.uv2_hdr;
+ uv2_hdr->swack_flag = 1;
+ uv2_hdr->base_dest_nasid =
+ UV_PNODE_TO_NASID(base_pnode);
+ uv2_hdr->dest_subnodeid = UV_LB_SUBNODEID;
+ uv2_hdr->command = UV_NET_ENDPOINT_INTD;
+ }
}
for_each_present_cpu(cpu) {
if (pnode != uv_blade_to_pnode(uv_cpu_to_blade_id(cpu)))
@@ -1531,6 +1762,7 @@ static void pq_init(int node, int pnode)
write_mmr_payload_first(pnode, pn_first);
write_mmr_payload_tail(pnode, first);
write_mmr_payload_last(pnode, last);
+ write_gmmr_sw_ack(pnode, 0xffffUL);
/* in effect, all msg_type's are set to MSG_NOOP */
memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE);
@@ -1584,14 +1816,14 @@ static int calculate_destination_timeout(void)
ts_ns = base * mult1 * mult2;
ret = ts_ns / 1000;
} else {
- /* 4 bits 0/1 for 10/80us, 3 bits of multiplier */
- mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL);
+ /* 4 bits 0/1 for 10/80us base, 3 bits of multiplier */
+ mmr_image = uv_read_local_mmr(UVH_LB_BAU_MISC_CONTROL);
mmr_image = (mmr_image & UV_SA_MASK) >> UV_SA_SHFT;
if (mmr_image & (1L << UV2_ACK_UNITS_SHFT))
- mult1 = 80;
+ base = 80;
else
- mult1 = 10;
- base = mmr_image & UV2_ACK_MASK;
+ base = 10;
+ mult1 = mmr_image & UV2_ACK_MASK;
ret = mult1 * base;
}
return ret;
@@ -1618,6 +1850,7 @@ static void __init init_per_cpu_tunables(void)
bcp->cong_response_us = congested_respns_us;
bcp->cong_reps = congested_reps;
bcp->cong_period = congested_period;
+ bcp->clocks_per_100_usec = usec_2_cycles(100);
}
}
@@ -1728,8 +1961,17 @@ static int scan_sock(struct socket_desc *sdp, struct uvhub_desc *bdp,
bcp->cpus_in_socket = sdp->num_cpus;
bcp->socket_master = *smasterp;
bcp->uvhub = bdp->uvhub;
+ if (is_uv1_hub())
+ bcp->uvhub_version = 1;
+ else if (is_uv2_hub())
+ bcp->uvhub_version = 2;
+ else {
+ printk(KERN_EMERG "uvhub version not 1 or 2\n");
+ return 1;
+ }
bcp->uvhub_master = *hmasterp;
bcp->uvhub_cpu = uv_cpu_hub_info(cpu)->blade_processor_id;
+ bcp->using_desc = bcp->uvhub_cpu;
if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) {
printk(KERN_EMERG "%d cpus per uvhub invalid\n",
bcp->uvhub_cpu);
@@ -1845,6 +2087,8 @@ static int __init uv_bau_init(void)
uv_base_pnode = uv_blade_to_pnode(uvhub);
}
+ enable_timeouts();
+
if (init_per_cpu(nuvhubs, uv_base_pnode)) {
nobau = 1;
return 0;
@@ -1855,7 +2099,6 @@ static int __init uv_bau_init(void)
if (uv_blade_nr_possible_cpus(uvhub))
init_uvhub(uvhub, vector, uv_base_pnode);
- enable_timeouts();
alloc_intr_gate(vector, uv_bau_message_intr1);
for_each_possible_blade(uvhub) {
@@ -1867,7 +2110,8 @@ static int __init uv_bau_init(void)
val = 1L << 63;
write_gmmr_activation(pnode, val);
mmr = 1; /* should be 1 to broadcast to both sockets */
- write_mmr_data_broadcast(pnode, mmr);
+ if (!is_uv1_hub())
+ write_mmr_data_broadcast(pnode, mmr);
}
}
diff --git a/trunk/arch/x86/um/shared/sysdep/ptrace.h b/trunk/arch/x86/um/shared/sysdep/ptrace.h
index 5ef9344a8b24..2bbe1ec2d96a 100644
--- a/trunk/arch/x86/um/shared/sysdep/ptrace.h
+++ b/trunk/arch/x86/um/shared/sysdep/ptrace.h
@@ -1,3 +1,6 @@
+#ifndef __SYSDEP_X86_PTRACE_H
+#define __SYSDEP_X86_PTRACE_H
+
#ifdef __i386__
#include "ptrace_32.h"
#else
@@ -8,3 +11,5 @@ static inline long regs_return_value(struct uml_pt_regs *regs)
{
return UPT_SYSCALL_RET(regs);
}
+
+#endif /* __SYSDEP_X86_PTRACE_H */
diff --git a/trunk/drivers/acpi/Makefile b/trunk/drivers/acpi/Makefile
index ecb26b4f29a0..c07f44f05f9d 100644
--- a/trunk/drivers/acpi/Makefile
+++ b/trunk/drivers/acpi/Makefile
@@ -20,11 +20,12 @@ obj-y += acpi.o \
# All the builtin files are in the "acpi." module_param namespace.
acpi-y += osl.o utils.o reboot.o
acpi-y += atomicio.o
+acpi-y += nvs.o
# sleep related files
acpi-y += wakeup.o
acpi-y += sleep.o
-acpi-$(CONFIG_ACPI_SLEEP) += proc.o nvs.o
+acpi-$(CONFIG_ACPI_SLEEP) += proc.o
#
diff --git a/trunk/drivers/acpi/acpica/Makefile b/trunk/drivers/acpi/acpica/Makefile
index 301bd2d388ad..0ca208b6dcf0 100644
--- a/trunk/drivers/acpi/acpica/Makefile
+++ b/trunk/drivers/acpi/acpica/Makefile
@@ -8,41 +8,151 @@ ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT
# use acpi.o to put all files here into acpi.o modparam namespace
obj-y += acpi.o
-acpi-y := dsfield.o dsmthdat.o dsopcode.o dswexec.o dswscope.o \
- dsmethod.o dsobject.o dsutils.o dswload.o dswstate.o \
- dsinit.o dsargs.o dscontrol.o dswload2.o
+acpi-y := \
+ dsargs.o \
+ dscontrol.o \
+ dsfield.o \
+ dsinit.o \
+ dsmethod.o \
+ dsmthdat.o \
+ dsobject.o \
+ dsopcode.o \
+ dsutils.o \
+ dswexec.o \
+ dswload.o \
+ dswload2.o \
+ dswscope.o \
+ dswstate.o
-acpi-y += evevent.o evregion.o evsci.o evxfevnt.o \
- evmisc.o evrgnini.o evxface.o evxfregn.o \
- evgpe.o evgpeblk.o evgpeinit.o evgpeutil.o evxfgpe.o evglock.o
+acpi-y += \
+ evevent.o \
+ evgpe.o \
+ evgpeblk.o \
+ evgpeinit.o \
+ evgpeutil.o \
+ evglock.o \
+ evmisc.o \
+ evregion.o \
+ evrgnini.o \
+ evsci.o \
+ evxface.o \
+ evxfevnt.o \
+ evxfgpe.o \
+ evxfregn.o
-acpi-y += exconfig.o exfield.o exnames.o exoparg6.o exresolv.o exstorob.o\
- exconvrt.o exfldio.o exoparg1.o exprep.o exresop.o exsystem.o\
- excreate.o exmisc.o exoparg2.o exregion.o exstore.o exutils.o \
- exdump.o exmutex.o exoparg3.o exresnte.o exstoren.o exdebug.o
+acpi-y += \
+ exconfig.o \
+ exconvrt.o \
+ excreate.o \
+ exdebug.o \
+ exdump.o \
+ exfield.o \
+ exfldio.o \
+ exmutex.o \
+ exnames.o \
+ exoparg1.o \
+ exoparg2.o \
+ exoparg3.o \
+ exoparg6.o \
+ exprep.o \
+ exmisc.o \
+ exregion.o \
+ exresnte.o \
+ exresolv.o \
+ exresop.o \
+ exstore.o \
+ exstoren.o \
+ exstorob.o \
+ exsystem.o \
+ exutils.o
-acpi-y += hwacpi.o hwgpe.o hwregs.o hwsleep.o hwxface.o hwvalid.o hwpci.o
+acpi-y += \
+ hwacpi.o \
+ hwgpe.o \
+ hwpci.o \
+ hwregs.o \
+ hwsleep.o \
+ hwvalid.o \
+ hwxface.o
acpi-$(ACPI_FUTURE_USAGE) += hwtimer.o
-acpi-y += nsaccess.o nsload.o nssearch.o nsxfeval.o \
- nsalloc.o nseval.o nsnames.o nsutils.o nsxfname.o \
- nsdump.o nsinit.o nsobject.o nswalk.o nsxfobj.o \
- nsparse.o nspredef.o nsrepair.o nsrepair2.o
+acpi-y += \
+ nsaccess.o \
+ nsalloc.o \
+ nsdump.o \
+ nseval.o \
+ nsinit.o \
+ nsload.o \
+ nsnames.o \
+ nsobject.o \
+ nsparse.o \
+ nspredef.o \
+ nsrepair.o \
+ nsrepair2.o \
+ nssearch.o \
+ nsutils.o \
+ nswalk.o \
+ nsxfeval.o \
+ nsxfname.o \
+ nsxfobj.o
acpi-$(ACPI_FUTURE_USAGE) += nsdumpdv.o
-acpi-y += psargs.o psparse.o psloop.o pstree.o pswalk.o \
- psopcode.o psscope.o psutils.o psxface.o
+acpi-y += \
+ psargs.o \
+ psloop.o \
+ psopcode.o \
+ psparse.o \
+ psscope.o \
+ pstree.o \
+ psutils.o \
+ pswalk.o \
+ psxface.o
-acpi-y += rsaddr.o rscreate.o rsinfo.o rsio.o rslist.o rsmisc.o rsxface.o \
- rscalc.o rsirq.o rsmemory.o rsutils.o
+acpi-y += \
+ rsaddr.o \
+ rscalc.o \
+ rscreate.o \
+ rsinfo.o \
+ rsio.o \
+ rsirq.o \
+ rslist.o \
+ rsmemory.o \
+ rsmisc.o \
+ rsserial.o \
+ rsutils.o \
+ rsxface.o
acpi-$(ACPI_FUTURE_USAGE) += rsdump.o
-acpi-y += tbxface.o tbinstal.o tbutils.o tbfind.o tbfadt.o tbxfroot.o
+acpi-y += \
+ tbfadt.o \
+ tbfind.o \
+ tbinstal.o \
+ tbutils.o \
+ tbxface.o \
+ tbxfroot.o
-acpi-y += utalloc.o utdebug.o uteval.o utinit.o utmisc.o utxface.o \
- utcopy.o utdelete.o utglobal.o utmath.o utobject.o \
- utstate.o utmutex.o utobject.o utresrc.o utlock.o utids.o \
- utosi.o utxferror.o utdecode.o
+acpi-y += \
+ utaddress.o \
+ utalloc.o \
+ utcopy.o \
+ utdebug.o \
+ utdecode.o \
+ utdelete.o \
+ uteval.o \
+ utglobal.o \
+ utids.o \
+ utinit.o \
+ utlock.o \
+ utmath.o \
+ utmisc.o \
+ utmutex.o \
+ utobject.o \
+ utosi.o \
+ utresrc.o \
+ utstate.o \
+ utxface.o \
+ utxferror.o \
+ utxfmutex.o
diff --git a/trunk/drivers/acpi/acpica/accommon.h b/trunk/drivers/acpi/acpica/accommon.h
index e0ba17f0a7c8..a44bd424f9f4 100644
--- a/trunk/drivers/acpi/acpica/accommon.h
+++ b/trunk/drivers/acpi/acpica/accommon.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/acconfig.h b/trunk/drivers/acpi/acpica/acconfig.h
index f895a244ca7e..1f30af613e87 100644
--- a/trunk/drivers/acpi/acpica/acconfig.h
+++ b/trunk/drivers/acpi/acpica/acconfig.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -123,6 +123,10 @@
#define ACPI_MAX_SLEEP 2000 /* Two seconds */
+/* Address Range lists are per-space_id (Memory and I/O only) */
+
+#define ACPI_ADDRESS_RANGE_MAX 2
+
/******************************************************************************
*
* ACPI Specification constants (Do not change unless the specification changes)
@@ -202,9 +206,10 @@
#define ACPI_RSDP_CHECKSUM_LENGTH 20
#define ACPI_RSDP_XCHECKSUM_LENGTH 36
-/* SMBus and IPMI bidirectional buffer size */
+/* SMBus, GSBus and IPMI bidirectional buffer size */
#define ACPI_SMBUS_BUFFER_SIZE 34
+#define ACPI_GSBUS_BUFFER_SIZE 34
#define ACPI_IPMI_BUFFER_SIZE 66
/* _sx_d and _sx_w control methods */
diff --git a/trunk/drivers/acpi/acpica/acdebug.h b/trunk/drivers/acpi/acpica/acdebug.h
index eb0b1f8dee6d..deaa81979561 100644
--- a/trunk/drivers/acpi/acpica/acdebug.h
+++ b/trunk/drivers/acpi/acpica/acdebug.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/acdispat.h b/trunk/drivers/acpi/acpica/acdispat.h
index 2d1b7ffa377a..5935ba6707e2 100644
--- a/trunk/drivers/acpi/acpica/acdispat.h
+++ b/trunk/drivers/acpi/acpica/acdispat.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/acevents.h b/trunk/drivers/acpi/acpica/acevents.h
index bea3b4899183..c53caa521a30 100644
--- a/trunk/drivers/acpi/acpica/acevents.h
+++ b/trunk/drivers/acpi/acpica/acevents.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -162,6 +162,7 @@ acpi_status acpi_ev_initialize_op_regions(void);
acpi_status
acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
+ union acpi_operand_object *field_obj,
u32 function,
u32 region_offset, u32 bit_width, u64 *value);
diff --git a/trunk/drivers/acpi/acpica/acglobal.h b/trunk/drivers/acpi/acpica/acglobal.h
index e6652d716e45..2853f7673f3b 100644
--- a/trunk/drivers/acpi/acpica/acglobal.h
+++ b/trunk/drivers/acpi/acpica/acglobal.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -140,8 +140,19 @@ u32 acpi_gbl_trace_flags;
acpi_name acpi_gbl_trace_method_name;
u8 acpi_gbl_system_awake_and_running;
+/*
+ * ACPI 5.0 introduces the concept of a "reduced hardware platform", meaning
+ * that the ACPI hardware is no longer required. A flag in the FADT indicates
+ * a reduced HW machine, and that flag is duplicated here for convenience.
+ */
+u8 acpi_gbl_reduced_hardware;
+
#endif
+/* Do not disassemble buffers to resource descriptors */
+
+ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_no_resource_disassembly, FALSE);
+
/*****************************************************************************
*
* Debug support
@@ -207,7 +218,7 @@ ACPI_EXTERN struct acpi_rw_lock acpi_gbl_namespace_rw_lock;
/*****************************************************************************
*
- * Mutual exlusion within ACPICA subsystem
+ * Mutual exclusion within ACPICA subsystem
*
****************************************************************************/
@@ -295,6 +306,8 @@ ACPI_EXTERN u8 acpi_gbl_acpi_hardware_present;
ACPI_EXTERN u8 acpi_gbl_events_initialized;
ACPI_EXTERN u8 acpi_gbl_osi_data;
ACPI_EXTERN struct acpi_interface_info *acpi_gbl_supported_interfaces;
+ACPI_EXTERN struct acpi_address_range
+ *acpi_gbl_address_range_list[ACPI_ADDRESS_RANGE_MAX];
#ifndef DEFINE_ACPI_GLOBALS
diff --git a/trunk/drivers/acpi/acpica/achware.h b/trunk/drivers/acpi/acpica/achware.h
index e7213beaafc7..677793e938f5 100644
--- a/trunk/drivers/acpi/acpica/achware.h
+++ b/trunk/drivers/acpi/acpica/achware.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/acinterp.h b/trunk/drivers/acpi/acpica/acinterp.h
index 3731e1c34b83..eb308635da72 100644
--- a/trunk/drivers/acpi/acpica/acinterp.h
+++ b/trunk/drivers/acpi/acpica/acinterp.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -468,6 +468,8 @@ void acpi_ex_eisa_id_to_string(char *dest, u64 compressed_id);
void acpi_ex_integer_to_string(char *dest, u64 value);
+u8 acpi_is_valid_space_id(u8 space_id);
+
/*
* exregion - default op_region handlers
*/
diff --git a/trunk/drivers/acpi/acpica/aclocal.h b/trunk/drivers/acpi/acpica/aclocal.h
index 5552125d8340..3f24068837d5 100644
--- a/trunk/drivers/acpi/acpica/aclocal.h
+++ b/trunk/drivers/acpi/acpica/aclocal.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -53,7 +53,7 @@ typedef u32 acpi_mutex_handle;
/* Total number of aml opcodes defined */
-#define AML_NUM_OPCODES 0x7F
+#define AML_NUM_OPCODES 0x81
/* Forward declarations */
@@ -249,12 +249,16 @@ struct acpi_create_field_info {
struct acpi_namespace_node *field_node;
struct acpi_namespace_node *register_node;
struct acpi_namespace_node *data_register_node;
+ struct acpi_namespace_node *connection_node;
+ u8 *resource_buffer;
u32 bank_value;
u32 field_bit_position;
u32 field_bit_length;
+ u16 resource_length;
u8 field_flags;
u8 attribute;
u8 field_type;
+ u8 access_length;
};
typedef
@@ -315,7 +319,8 @@ struct acpi_name_info {
/*
* Used for ACPI_PTYPE1_FIXED, ACPI_PTYPE1_VAR, ACPI_PTYPE2,
- * ACPI_PTYPE2_MIN, ACPI_PTYPE2_PKG_COUNT, ACPI_PTYPE2_COUNT
+ * ACPI_PTYPE2_MIN, ACPI_PTYPE2_PKG_COUNT, ACPI_PTYPE2_COUNT,
+ * ACPI_PTYPE2_FIX_VAR
*/
struct acpi_package_info {
u8 type;
@@ -625,6 +630,15 @@ union acpi_generic_state {
typedef acpi_status(*ACPI_EXECUTE_OP) (struct acpi_walk_state * walk_state);
+/* Address Range info block */
+
+struct acpi_address_range {
+ struct acpi_address_range *next;
+ struct acpi_namespace_node *region_node;
+ acpi_physical_address start_address;
+ acpi_physical_address end_address;
+};
+
/*****************************************************************************
*
* Parser typedefs and structs
@@ -951,7 +965,7 @@ struct acpi_port_info {
#define ACPI_RESOURCE_NAME_END_DEPENDENT 0x38
#define ACPI_RESOURCE_NAME_IO 0x40
#define ACPI_RESOURCE_NAME_FIXED_IO 0x48
-#define ACPI_RESOURCE_NAME_RESERVED_S1 0x50
+#define ACPI_RESOURCE_NAME_FIXED_DMA 0x50
#define ACPI_RESOURCE_NAME_RESERVED_S2 0x58
#define ACPI_RESOURCE_NAME_RESERVED_S3 0x60
#define ACPI_RESOURCE_NAME_RESERVED_S4 0x68
@@ -973,7 +987,9 @@ struct acpi_port_info {
#define ACPI_RESOURCE_NAME_EXTENDED_IRQ 0x89
#define ACPI_RESOURCE_NAME_ADDRESS64 0x8A
#define ACPI_RESOURCE_NAME_EXTENDED_ADDRESS64 0x8B
-#define ACPI_RESOURCE_NAME_LARGE_MAX 0x8B
+#define ACPI_RESOURCE_NAME_GPIO 0x8C
+#define ACPI_RESOURCE_NAME_SERIAL_BUS 0x8E
+#define ACPI_RESOURCE_NAME_LARGE_MAX 0x8E
/*****************************************************************************
*
diff --git a/trunk/drivers/acpi/acpica/acmacros.h b/trunk/drivers/acpi/acpica/acmacros.h
index b7491ee1fba6..ef338a96f5b2 100644
--- a/trunk/drivers/acpi/acpica/acmacros.h
+++ b/trunk/drivers/acpi/acpica/acmacros.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/acnamesp.h b/trunk/drivers/acpi/acpica/acnamesp.h
index 79a598c67fe3..2c9e0f049523 100644
--- a/trunk/drivers/acpi/acpica/acnamesp.h
+++ b/trunk/drivers/acpi/acpica/acnamesp.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/acobject.h b/trunk/drivers/acpi/acpica/acobject.h
index 1055769f2f01..c065078ca83b 100644
--- a/trunk/drivers/acpi/acpica/acobject.h
+++ b/trunk/drivers/acpi/acpica/acobject.h
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -254,6 +254,7 @@ ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_NOTIFY_INFO};
u32 base_byte_offset; /* Byte offset within containing object */\
u32 value; /* Value to store into the Bank or Index register */\
u8 start_field_bit_offset;/* Bit offset within first field datum (0-63) */\
+ u8 access_length; /* For serial regions/fields */
struct acpi_object_field_common { /* COMMON FIELD (for BUFFER, REGION, BANK, and INDEX fields) */
@@ -261,7 +262,9 @@ struct acpi_object_field_common { /* COMMON FIELD (for BUFFER, REGION, BANK, and
};
struct acpi_object_region_field {
- ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO union acpi_operand_object *region_obj; /* Containing op_region object */
+ ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO u16 resource_length;
+ union acpi_operand_object *region_obj; /* Containing op_region object */
+ u8 *resource_buffer; /* resource_template for serial regions/fields */
};
struct acpi_object_bank_field {
@@ -358,6 +361,7 @@ typedef enum {
*/
struct acpi_object_extra {
ACPI_OBJECT_COMMON_HEADER struct acpi_namespace_node *method_REG; /* _REG method for this region (if any) */
+ struct acpi_namespace_node *scope_node;
void *region_context; /* Region-specific data */
u8 *aml_start;
u32 aml_length;
diff --git a/trunk/drivers/acpi/acpica/acopcode.h b/trunk/drivers/acpi/acpica/acopcode.h
index bb2ccfad7376..9440d053fbb3 100644
--- a/trunk/drivers/acpi/acpica/acopcode.h
+++ b/trunk/drivers/acpi/acpica/acopcode.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -93,6 +93,7 @@
#define ARGP_CONCAT_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET)
#define ARGP_CONCAT_RES_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET)
#define ARGP_COND_REF_OF_OP ARGP_LIST2 (ARGP_SUPERNAME, ARGP_SUPERNAME)
+#define ARGP_CONNECTFIELD_OP ARGP_LIST1 (ARGP_NAMESTRING)
#define ARGP_CONTINUE_OP ARG_NONE
#define ARGP_COPY_OP ARGP_LIST2 (ARGP_TERMARG, ARGP_SIMPLENAME)
#define ARGP_CREATE_BIT_FIELD_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_NAME)
@@ -164,6 +165,7 @@
#define ARGP_RETURN_OP ARGP_LIST1 (ARGP_TERMARG)
#define ARGP_REVISION_OP ARG_NONE
#define ARGP_SCOPE_OP ARGP_LIST3 (ARGP_PKGLENGTH, ARGP_NAME, ARGP_TERMLIST)
+#define ARGP_SERIALFIELD_OP ARGP_LIST1 (ARGP_NAMESTRING)
#define ARGP_SHIFT_LEFT_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET)
#define ARGP_SHIFT_RIGHT_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET)
#define ARGP_SIGNAL_OP ARGP_LIST1 (ARGP_SUPERNAME)
@@ -223,6 +225,7 @@
#define ARGI_CONCAT_OP ARGI_LIST3 (ARGI_COMPUTEDATA,ARGI_COMPUTEDATA, ARGI_TARGETREF)
#define ARGI_CONCAT_RES_OP ARGI_LIST3 (ARGI_BUFFER, ARGI_BUFFER, ARGI_TARGETREF)
#define ARGI_COND_REF_OF_OP ARGI_LIST2 (ARGI_OBJECT_REF, ARGI_TARGETREF)
+#define ARGI_CONNECTFIELD_OP ARGI_INVALID_OPCODE
#define ARGI_CONTINUE_OP ARGI_INVALID_OPCODE
#define ARGI_COPY_OP ARGI_LIST2 (ARGI_ANYTYPE, ARGI_SIMPLE_TARGET)
#define ARGI_CREATE_BIT_FIELD_OP ARGI_LIST3 (ARGI_BUFFER, ARGI_INTEGER, ARGI_REFERENCE)
@@ -294,6 +297,7 @@
#define ARGI_RETURN_OP ARGI_INVALID_OPCODE
#define ARGI_REVISION_OP ARG_NONE
#define ARGI_SCOPE_OP ARGI_INVALID_OPCODE
+#define ARGI_SERIALFIELD_OP ARGI_INVALID_OPCODE
#define ARGI_SHIFT_LEFT_OP ARGI_LIST3 (ARGI_INTEGER, ARGI_INTEGER, ARGI_TARGETREF)
#define ARGI_SHIFT_RIGHT_OP ARGI_LIST3 (ARGI_INTEGER, ARGI_INTEGER, ARGI_TARGETREF)
#define ARGI_SIGNAL_OP ARGI_LIST1 (ARGI_EVENT)
diff --git a/trunk/drivers/acpi/acpica/acparser.h b/trunk/drivers/acpi/acpica/acparser.h
index 5ea1e06afa20..b725d780d34d 100644
--- a/trunk/drivers/acpi/acpica/acparser.h
+++ b/trunk/drivers/acpi/acpica/acparser.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/acpredef.h b/trunk/drivers/acpi/acpica/acpredef.h
index c445cca490ea..bbb34c9be4e8 100644
--- a/trunk/drivers/acpi/acpica/acpredef.h
+++ b/trunk/drivers/acpi/acpica/acpredef.h
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -94,6 +94,14 @@
* ACPI_PTYPE2_REV_FIXED: Revision at start, each subpackage is Fixed-length
* (Used for _ART, _FPS)
*
+ * ACPI_PTYPE2_FIX_VAR: Each subpackage consists of some fixed-length elements
+ * followed by an optional element
+ * object type
+ * count
+ * object type
+ * count = 0 (optional)
+ * (Used for _DLM)
+ *
*****************************************************************************/
enum acpi_return_package_types {
@@ -105,7 +113,8 @@ enum acpi_return_package_types {
ACPI_PTYPE2_PKG_COUNT = 6,
ACPI_PTYPE2_FIXED = 7,
ACPI_PTYPE2_MIN = 8,
- ACPI_PTYPE2_REV_FIXED = 9
+ ACPI_PTYPE2_REV_FIXED = 9,
+ ACPI_PTYPE2_FIX_VAR = 10
};
#ifdef ACPI_CREATE_PREDEFINED_TABLE
@@ -154,6 +163,7 @@ static const union acpi_predefined_info predefined_names[] =
{{"_AC8", 0, ACPI_RTYPE_INTEGER}},
{{"_AC9", 0, ACPI_RTYPE_INTEGER}},
{{"_ADR", 0, ACPI_RTYPE_INTEGER}},
+ {{"_AEI", 0, ACPI_RTYPE_BUFFER}},
{{"_AL0", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
{{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
@@ -229,6 +239,13 @@ static const union acpi_predefined_info predefined_names[] =
{{"_CID", 0, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING | ACPI_RTYPE_PACKAGE}}, /* Variable-length (Ints/Strs) */
{{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING, 0,0}, 0,0}},
+ {{"_CLS", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (3 Int) */
+ {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 3, 0}, 0, 0}},
+
+ {{"_CPC", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Ints/Bufs) */
+ {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER | ACPI_RTYPE_BUFFER, 0, 0}, 0,
+ 0}},
+
{{"_CRS", 0, ACPI_RTYPE_BUFFER}},
{{"_CRT", 0, ACPI_RTYPE_INTEGER}},
{{"_CSD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (1 Int(n), n-1 Int) */
@@ -237,12 +254,21 @@ static const union acpi_predefined_info predefined_names[] =
{{"_CST", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (1 Int(n), n Pkg (1 Buf/3 Int) */
{{{ACPI_PTYPE2_PKG_COUNT,ACPI_RTYPE_BUFFER, 1, ACPI_RTYPE_INTEGER}, 3,0}},
+ {{"_CWS", 1, ACPI_RTYPE_INTEGER}},
{{"_DCK", 1, ACPI_RTYPE_INTEGER}},
{{"_DCS", 0, ACPI_RTYPE_INTEGER}},
{{"_DDC", 1, ACPI_RTYPE_INTEGER | ACPI_RTYPE_BUFFER}},
{{"_DDN", 0, ACPI_RTYPE_STRING}},
+ {{"_DEP", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
+ {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}},
+
{{"_DGS", 0, ACPI_RTYPE_INTEGER}},
{{"_DIS", 0, 0}},
+
+ {{"_DLM", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (1 Ref, 0/1 Optional Buf/Ref) */
+ {{{ACPI_PTYPE2_FIX_VAR, ACPI_RTYPE_REFERENCE, 1,
+ ACPI_RTYPE_REFERENCE | ACPI_RTYPE_BUFFER}, 0, 0}},
+
{{"_DMA", 0, ACPI_RTYPE_BUFFER}},
{{"_DOD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Ints) */
{{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 0,0}, 0,0}},
@@ -262,6 +288,7 @@ static const union acpi_predefined_info predefined_names[] =
{{"_EJ3", 1, 0}},
{{"_EJ4", 1, 0}},
{{"_EJD", 0, ACPI_RTYPE_STRING}},
+ {{"_EVT", 1, 0}},
{{"_FDE", 0, ACPI_RTYPE_BUFFER}},
{{"_FDI", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (16 Int) */
{{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 16,0}, 0,0}},
@@ -281,14 +308,17 @@ static const union acpi_predefined_info predefined_names[] =
{{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 3, 0}, 0, 0}},
{{"_GAI", 0, ACPI_RTYPE_INTEGER}},
+ {{"_GCP", 0, ACPI_RTYPE_INTEGER}},
{{"_GHL", 0, ACPI_RTYPE_INTEGER}},
{{"_GLK", 0, ACPI_RTYPE_INTEGER}},
{{"_GPD", 0, ACPI_RTYPE_INTEGER}},
{{"_GPE", 0, ACPI_RTYPE_INTEGER}}, /* _GPE method, not _GPE scope */
+ {{"_GRT", 0, ACPI_RTYPE_BUFFER}},
{{"_GSB", 0, ACPI_RTYPE_INTEGER}},
{{"_GTF", 0, ACPI_RTYPE_BUFFER}},
{{"_GTM", 0, ACPI_RTYPE_BUFFER}},
{{"_GTS", 1, 0}},
+ {{"_GWS", 1, ACPI_RTYPE_INTEGER}},
{{"_HID", 0, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING}},
{{"_HOT", 0, ACPI_RTYPE_INTEGER}},
{{"_HPP", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (4 Int) */
@@ -303,6 +333,7 @@ static const union acpi_predefined_info predefined_names[] =
{{"_HPX", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (var Ints) */
{{{ACPI_PTYPE2_MIN, ACPI_RTYPE_INTEGER, 5,0}, 0,0}},
+ {{"_HRV", 0, ACPI_RTYPE_INTEGER}},
{{"_IFT", 0, ACPI_RTYPE_INTEGER}}, /* See IPMI spec */
{{"_INI", 0, 0}},
{{"_IRC", 0, 0}},
@@ -361,6 +392,9 @@ static const union acpi_predefined_info predefined_names[] =
{{"_PR3", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
{{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}},
+ {{"_PRE", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
+ {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}},
+
{{"_PRL", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
{{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}},
@@ -391,6 +425,7 @@ static const union acpi_predefined_info predefined_names[] =
{{"_PSD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (5 Int) with count */
{{{ACPI_PTYPE2_COUNT, ACPI_RTYPE_INTEGER,0,0}, 0,0}},
+ {{"_PSE", 1, 0}},
{{"_PSL", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
{{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
@@ -457,6 +492,7 @@ static const union acpi_predefined_info predefined_names[] =
{{"_SLI", 0, ACPI_RTYPE_BUFFER}},
{{"_SPD", 1, ACPI_RTYPE_INTEGER}},
{{"_SRS", 1, 0}},
+ {{"_SRT", 1, ACPI_RTYPE_INTEGER}},
{{"_SRV", 0, ACPI_RTYPE_INTEGER}}, /* See IPMI spec */
{{"_SST", 1, 0}},
{{"_STA", 0, ACPI_RTYPE_INTEGER}},
@@ -464,6 +500,7 @@ static const union acpi_predefined_info predefined_names[] =
{{"_STP", 2, ACPI_RTYPE_INTEGER}},
{{"_STR", 0, ACPI_RTYPE_BUFFER}},
{{"_STV", 2, ACPI_RTYPE_INTEGER}},
+ {{"_SUB", 0, ACPI_RTYPE_STRING}},
{{"_SUN", 0, ACPI_RTYPE_INTEGER}},
{{"_SWS", 0, ACPI_RTYPE_INTEGER}},
{{"_TC1", 0, ACPI_RTYPE_INTEGER}},
diff --git a/trunk/drivers/acpi/acpica/acresrc.h b/trunk/drivers/acpi/acpica/acresrc.h
index f08b55b7f3a0..0347d0993497 100644
--- a/trunk/drivers/acpi/acpica/acresrc.h
+++ b/trunk/drivers/acpi/acpica/acresrc.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -73,28 +73,40 @@ typedef const struct acpi_rsconvert_info {
/* Resource conversion opcodes */
-#define ACPI_RSC_INITGET 0
-#define ACPI_RSC_INITSET 1
-#define ACPI_RSC_FLAGINIT 2
-#define ACPI_RSC_1BITFLAG 3
-#define ACPI_RSC_2BITFLAG 4
-#define ACPI_RSC_COUNT 5
-#define ACPI_RSC_COUNT16 6
-#define ACPI_RSC_LENGTH 7
-#define ACPI_RSC_MOVE8 8
-#define ACPI_RSC_MOVE16 9
-#define ACPI_RSC_MOVE32 10
-#define ACPI_RSC_MOVE64 11
-#define ACPI_RSC_SET8 12
-#define ACPI_RSC_DATA8 13
-#define ACPI_RSC_ADDRESS 14
-#define ACPI_RSC_SOURCE 15
-#define ACPI_RSC_SOURCEX 16
-#define ACPI_RSC_BITMASK 17
-#define ACPI_RSC_BITMASK16 18
-#define ACPI_RSC_EXIT_NE 19
-#define ACPI_RSC_EXIT_LE 20
-#define ACPI_RSC_EXIT_EQ 21
+typedef enum {
+ ACPI_RSC_INITGET = 0,
+ ACPI_RSC_INITSET,
+ ACPI_RSC_FLAGINIT,
+ ACPI_RSC_1BITFLAG,
+ ACPI_RSC_2BITFLAG,
+ ACPI_RSC_3BITFLAG,
+ ACPI_RSC_ADDRESS,
+ ACPI_RSC_BITMASK,
+ ACPI_RSC_BITMASK16,
+ ACPI_RSC_COUNT,
+ ACPI_RSC_COUNT16,
+ ACPI_RSC_COUNT_GPIO_PIN,
+ ACPI_RSC_COUNT_GPIO_RES,
+ ACPI_RSC_COUNT_GPIO_VEN,
+ ACPI_RSC_COUNT_SERIAL_RES,
+ ACPI_RSC_COUNT_SERIAL_VEN,
+ ACPI_RSC_DATA8,
+ ACPI_RSC_EXIT_EQ,
+ ACPI_RSC_EXIT_LE,
+ ACPI_RSC_EXIT_NE,
+ ACPI_RSC_LENGTH,
+ ACPI_RSC_MOVE_GPIO_PIN,
+ ACPI_RSC_MOVE_GPIO_RES,
+ ACPI_RSC_MOVE_SERIAL_RES,
+ ACPI_RSC_MOVE_SERIAL_VEN,
+ ACPI_RSC_MOVE8,
+ ACPI_RSC_MOVE16,
+ ACPI_RSC_MOVE32,
+ ACPI_RSC_MOVE64,
+ ACPI_RSC_SET8,
+ ACPI_RSC_SOURCE,
+ ACPI_RSC_SOURCEX
+} ACPI_RSCONVERT_OPCODES;
/* Resource Conversion sub-opcodes */
@@ -106,6 +118,9 @@ typedef const struct acpi_rsconvert_info {
#define ACPI_RS_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_resource,f)
#define AML_OFFSET(f) (u8) ACPI_OFFSET (union aml_resource,f)
+/*
+ * Individual entry for the resource dump tables
+ */
typedef const struct acpi_rsdump_info {
u8 opcode;
u8 offset;
@@ -116,20 +131,25 @@ typedef const struct acpi_rsdump_info {
/* Values for the Opcode field above */
-#define ACPI_RSD_TITLE 0
-#define ACPI_RSD_LITERAL 1
-#define ACPI_RSD_STRING 2
-#define ACPI_RSD_UINT8 3
-#define ACPI_RSD_UINT16 4
-#define ACPI_RSD_UINT32 5
-#define ACPI_RSD_UINT64 6
-#define ACPI_RSD_1BITFLAG 7
-#define ACPI_RSD_2BITFLAG 8
-#define ACPI_RSD_SHORTLIST 9
-#define ACPI_RSD_LONGLIST 10
-#define ACPI_RSD_DWORDLIST 11
-#define ACPI_RSD_ADDRESS 12
-#define ACPI_RSD_SOURCE 13
+typedef enum {
+ ACPI_RSD_TITLE = 0,
+ ACPI_RSD_1BITFLAG,
+ ACPI_RSD_2BITFLAG,
+ ACPI_RSD_3BITFLAG,
+ ACPI_RSD_ADDRESS,
+ ACPI_RSD_DWORDLIST,
+ ACPI_RSD_LITERAL,
+ ACPI_RSD_LONGLIST,
+ ACPI_RSD_SHORTLIST,
+ ACPI_RSD_SHORTLISTX,
+ ACPI_RSD_SOURCE,
+ ACPI_RSD_STRING,
+ ACPI_RSD_UINT8,
+ ACPI_RSD_UINT16,
+ ACPI_RSD_UINT32,
+ ACPI_RSD_UINT64,
+ ACPI_RSD_WORDLIST
+} ACPI_RSDUMP_OPCODES;
/* restore default alignment */
@@ -138,13 +158,18 @@ typedef const struct acpi_rsdump_info {
/* Resource tables indexed by internal resource type */
extern const u8 acpi_gbl_aml_resource_sizes[];
+extern const u8 acpi_gbl_aml_resource_serial_bus_sizes[];
extern struct acpi_rsconvert_info *acpi_gbl_set_resource_dispatch[];
/* Resource tables indexed by raw AML resource descriptor type */
extern const u8 acpi_gbl_resource_struct_sizes[];
+extern const u8 acpi_gbl_resource_struct_serial_bus_sizes[];
extern struct acpi_rsconvert_info *acpi_gbl_get_resource_dispatch[];
+extern struct acpi_rsconvert_info
+ *acpi_gbl_convert_resource_serial_bus_dispatch[];
+
struct acpi_vendor_walk_info {
struct acpi_vendor_uuid *uuid;
struct acpi_buffer *buffer;
@@ -190,6 +215,10 @@ acpi_status
acpi_rs_set_srs_method_data(struct acpi_namespace_node *node,
struct acpi_buffer *ret_buffer);
+acpi_status
+acpi_rs_get_aei_method_data(struct acpi_namespace_node *node,
+ struct acpi_buffer *ret_buffer);
+
/*
* rscalc
*/
@@ -293,6 +322,11 @@ extern struct acpi_rsconvert_info acpi_rs_convert_address16[];
extern struct acpi_rsconvert_info acpi_rs_convert_ext_irq[];
extern struct acpi_rsconvert_info acpi_rs_convert_address64[];
extern struct acpi_rsconvert_info acpi_rs_convert_ext_address64[];
+extern struct acpi_rsconvert_info acpi_rs_convert_gpio[];
+extern struct acpi_rsconvert_info acpi_rs_convert_fixed_dma[];
+extern struct acpi_rsconvert_info acpi_rs_convert_i2c_serial_bus[];
+extern struct acpi_rsconvert_info acpi_rs_convert_spi_serial_bus[];
+extern struct acpi_rsconvert_info acpi_rs_convert_uart_serial_bus[];
/* These resources require separate get/set tables */
@@ -310,6 +344,7 @@ extern struct acpi_rsconvert_info acpi_rs_set_vendor[];
* rsinfo
*/
extern struct acpi_rsdump_info *acpi_gbl_dump_resource_dispatch[];
+extern struct acpi_rsdump_info *acpi_gbl_dump_serial_bus_dispatch[];
/*
* rsdump
@@ -331,6 +366,12 @@ extern struct acpi_rsdump_info acpi_rs_dump_address64[];
extern struct acpi_rsdump_info acpi_rs_dump_ext_address64[];
extern struct acpi_rsdump_info acpi_rs_dump_ext_irq[];
extern struct acpi_rsdump_info acpi_rs_dump_generic_reg[];
+extern struct acpi_rsdump_info acpi_rs_dump_gpio[];
+extern struct acpi_rsdump_info acpi_rs_dump_fixed_dma[];
+extern struct acpi_rsdump_info acpi_rs_dump_common_serial_bus[];
+extern struct acpi_rsdump_info acpi_rs_dump_i2c_serial_bus[];
+extern struct acpi_rsdump_info acpi_rs_dump_spi_serial_bus[];
+extern struct acpi_rsdump_info acpi_rs_dump_uart_serial_bus[];
#endif
#endif /* __ACRESRC_H__ */
diff --git a/trunk/drivers/acpi/acpica/acstruct.h b/trunk/drivers/acpi/acpica/acstruct.h
index 1623b245dde2..0404df605bc1 100644
--- a/trunk/drivers/acpi/acpica/acstruct.h
+++ b/trunk/drivers/acpi/acpica/acstruct.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/actables.h b/trunk/drivers/acpi/acpica/actables.h
index 967f08124eba..d5bec304c823 100644
--- a/trunk/drivers/acpi/acpica/actables.h
+++ b/trunk/drivers/acpi/acpica/actables.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/acutils.h b/trunk/drivers/acpi/acpica/acutils.h
index 99c140d8e348..925ccf22101b 100644
--- a/trunk/drivers/acpi/acpica/acutils.h
+++ b/trunk/drivers/acpi/acpica/acutils.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -45,6 +45,7 @@
#define _ACUTILS_H
extern const u8 acpi_gbl_resource_aml_sizes[];
+extern const u8 acpi_gbl_resource_aml_serial_bus_sizes[];
/* Strings used by the disassembler and debugger resource dump routines */
@@ -578,6 +579,24 @@ acpi_ut_create_list(char *list_name,
#endif /* ACPI_DBG_TRACK_ALLOCATIONS */
+/*
+ * utaddress - address range check
+ */
+acpi_status
+acpi_ut_add_address_range(acpi_adr_space_type space_id,
+ acpi_physical_address address,
+ u32 length, struct acpi_namespace_node *region_node);
+
+void
+acpi_ut_remove_address_range(acpi_adr_space_type space_id,
+ struct acpi_namespace_node *region_node);
+
+u32
+acpi_ut_check_address_range(acpi_adr_space_type space_id,
+ acpi_physical_address address, u32 length, u8 warn);
+
+void acpi_ut_delete_address_lists(void);
+
/*
* utxferror - various error/warning output functions
*/
diff --git a/trunk/drivers/acpi/acpica/amlcode.h b/trunk/drivers/acpi/acpica/amlcode.h
index 1077f17859ed..905280fec0fa 100644
--- a/trunk/drivers/acpi/acpica/amlcode.h
+++ b/trunk/drivers/acpi/acpica/amlcode.h
@@ -7,7 +7,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -188,6 +188,14 @@
#define AML_LLESSEQUAL_OP (u16) 0x9294
#define AML_LNOTEQUAL_OP (u16) 0x9293
+/*
+ * Opcodes for "Field" operators
+ */
+#define AML_FIELD_OFFSET_OP (u8) 0x00
+#define AML_FIELD_ACCESS_OP (u8) 0x01
+#define AML_FIELD_CONNECTION_OP (u8) 0x02 /* ACPI 5.0 */
+#define AML_FIELD_EXT_ACCESS_OP (u8) 0x03 /* ACPI 5.0 */
+
/*
* Internal opcodes
* Use only "Unknown" AML opcodes, don't attempt to use
@@ -202,6 +210,8 @@
#define AML_INT_METHODCALL_OP (u16) 0x0035
#define AML_INT_RETURN_VALUE_OP (u16) 0x0036
#define AML_INT_EVAL_SUBTREE_OP (u16) 0x0037
+#define AML_INT_CONNECTION_OP (u16) 0x0038
+#define AML_INT_EXTACCESSFIELD_OP (u16) 0x0039
#define ARG_NONE 0x0
@@ -456,13 +466,16 @@ typedef enum {
* access_as keyword
*/
typedef enum {
- AML_FIELD_ATTRIB_SMB_QUICK = 0x02,
- AML_FIELD_ATTRIB_SMB_SEND_RCV = 0x04,
- AML_FIELD_ATTRIB_SMB_BYTE = 0x06,
- AML_FIELD_ATTRIB_SMB_WORD = 0x08,
- AML_FIELD_ATTRIB_SMB_BLOCK = 0x0A,
- AML_FIELD_ATTRIB_SMB_WORD_CALL = 0x0C,
- AML_FIELD_ATTRIB_SMB_BLOCK_CALL = 0x0D
+ AML_FIELD_ATTRIB_QUICK = 0x02,
+ AML_FIELD_ATTRIB_SEND_RCV = 0x04,
+ AML_FIELD_ATTRIB_BYTE = 0x06,
+ AML_FIELD_ATTRIB_WORD = 0x08,
+ AML_FIELD_ATTRIB_BLOCK = 0x0A,
+ AML_FIELD_ATTRIB_MULTIBYTE = 0x0B,
+ AML_FIELD_ATTRIB_WORD_CALL = 0x0C,
+ AML_FIELD_ATTRIB_BLOCK_CALL = 0x0D,
+ AML_FIELD_ATTRIB_RAW_BYTES = 0x0E,
+ AML_FIELD_ATTRIB_RAW_PROCESS = 0x0F
} AML_ACCESS_ATTRIBUTE;
/* Bit fields in the AML method_flags byte */
diff --git a/trunk/drivers/acpi/acpica/amlresrc.h b/trunk/drivers/acpi/acpica/amlresrc.h
index 59122cde247c..7b2128f274e7 100644
--- a/trunk/drivers/acpi/acpica/amlresrc.h
+++ b/trunk/drivers/acpi/acpica/amlresrc.h
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -58,29 +58,48 @@
#define ACPI_RESTAG_TYPESPECIFICATTRIBUTES "_ATT"
#define ACPI_RESTAG_BASEADDRESS "_BAS"
#define ACPI_RESTAG_BUSMASTER "_BM_" /* Master(1), Slave(0) */
+#define ACPI_RESTAG_DEBOUNCETIME "_DBT"
#define ACPI_RESTAG_DECODE "_DEC"
+#define ACPI_RESTAG_DEVICEPOLARITY "_DPL"
#define ACPI_RESTAG_DMA "_DMA"
#define ACPI_RESTAG_DMATYPE "_TYP" /* Compatible(0), A(1), B(2), F(3) */
+#define ACPI_RESTAG_DRIVESTRENGTH "_DRS"
+#define ACPI_RESTAG_ENDIANNESS "_END"
+#define ACPI_RESTAG_FLOWCONTROL "_FLC"
#define ACPI_RESTAG_GRANULARITY "_GRA"
#define ACPI_RESTAG_INTERRUPT "_INT"
#define ACPI_RESTAG_INTERRUPTLEVEL "_LL_" /* active_lo(1), active_hi(0) */
#define ACPI_RESTAG_INTERRUPTSHARE "_SHR" /* Shareable(1), no_share(0) */
#define ACPI_RESTAG_INTERRUPTTYPE "_HE_" /* Edge(1), Level(0) */
+#define ACPI_RESTAG_IORESTRICTION "_IOR"
#define ACPI_RESTAG_LENGTH "_LEN"
+#define ACPI_RESTAG_LINE "_LIN"
#define ACPI_RESTAG_MEMATTRIBUTES "_MTP" /* Memory(0), Reserved(1), ACPI(2), NVS(3) */
#define ACPI_RESTAG_MEMTYPE "_MEM" /* non_cache(0), Cacheable(1) Cache+combine(2), Cache+prefetch(3) */
#define ACPI_RESTAG_MAXADDR "_MAX"
#define ACPI_RESTAG_MINADDR "_MIN"
#define ACPI_RESTAG_MAXTYPE "_MAF"
#define ACPI_RESTAG_MINTYPE "_MIF"
+#define ACPI_RESTAG_MODE "_MOD"
+#define ACPI_RESTAG_PARITY "_PAR"
+#define ACPI_RESTAG_PHASE "_PHA"
+#define ACPI_RESTAG_PIN "_PIN"
+#define ACPI_RESTAG_PINCONFIG "_PPI"
+#define ACPI_RESTAG_POLARITY "_POL"
#define ACPI_RESTAG_REGISTERBITOFFSET "_RBO"
#define ACPI_RESTAG_REGISTERBITWIDTH "_RBW"
#define ACPI_RESTAG_RANGETYPE "_RNG"
#define ACPI_RESTAG_READWRITETYPE "_RW_" /* read_only(0), Writeable (1) */
+#define ACPI_RESTAG_LENGTH_RX "_RXL"
+#define ACPI_RESTAG_LENGTH_TX "_TXL"
+#define ACPI_RESTAG_SLAVEMODE "_SLV"
+#define ACPI_RESTAG_SPEED "_SPE"
+#define ACPI_RESTAG_STOPBITS "_STB"
#define ACPI_RESTAG_TRANSLATION "_TRA"
#define ACPI_RESTAG_TRANSTYPE "_TRS" /* Sparse(1), Dense(0) */
#define ACPI_RESTAG_TYPE "_TTP" /* Translation(1), Static (0) */
#define ACPI_RESTAG_XFERTYPE "_SIZ" /* 8(0), 8_and16(1), 16(2) */
+#define ACPI_RESTAG_VENDORDATA "_VEN"
/* Default sizes for "small" resource descriptors */
@@ -90,6 +109,7 @@
#define ASL_RDESC_END_DEPEND_SIZE 0x00
#define ASL_RDESC_IO_SIZE 0x07
#define ASL_RDESC_FIXED_IO_SIZE 0x03
+#define ASL_RDESC_FIXED_DMA_SIZE 0x05
#define ASL_RDESC_END_TAG_SIZE 0x01
struct asl_resource_node {
@@ -164,6 +184,12 @@ struct aml_resource_end_tag {
AML_RESOURCE_SMALL_HEADER_COMMON u8 checksum;
};
+struct aml_resource_fixed_dma {
+ AML_RESOURCE_SMALL_HEADER_COMMON u16 request_lines;
+ u16 channels;
+ u8 width;
+};
+
/*
* LARGE descriptors
*/
@@ -263,6 +289,110 @@ struct aml_resource_generic_register {
u64 address;
};
+/* Common descriptor for gpio_int and gpio_io (ACPI 5.0) */
+
+struct aml_resource_gpio {
+ AML_RESOURCE_LARGE_HEADER_COMMON u8 revision_id;
+ u8 connection_type;
+ u16 flags;
+ u16 int_flags;
+ u8 pin_config;
+ u16 drive_strength;
+ u16 debounce_timeout;
+ u16 pin_table_offset;
+ u8 res_source_index;
+ u16 res_source_offset;
+ u16 vendor_offset;
+ u16 vendor_length;
+ /*
+ * Optional fields follow immediately:
+ * 1) PIN list (Words)
+ * 2) Resource Source String
+ * 3) Vendor Data bytes
+ */
+};
+
+#define AML_RESOURCE_GPIO_REVISION 1 /* ACPI 5.0 */
+
+/* Values for connection_type above */
+
+#define AML_RESOURCE_GPIO_TYPE_INT 0
+#define AML_RESOURCE_GPIO_TYPE_IO 1
+#define AML_RESOURCE_MAX_GPIOTYPE 1
+
+/* Common preamble for all serial descriptors (ACPI 5.0) */
+
+#define AML_RESOURCE_SERIAL_COMMON \
+ u8 revision_id; \
+ u8 res_source_index; \
+ u8 type; \
+ u8 flags; \
+ u16 type_specific_flags; \
+ u8 type_revision_id; \
+ u16 type_data_length; \
+
+/* Values for the type field above */
+
+#define AML_RESOURCE_I2C_SERIALBUSTYPE 1
+#define AML_RESOURCE_SPI_SERIALBUSTYPE 2
+#define AML_RESOURCE_UART_SERIALBUSTYPE 3
+#define AML_RESOURCE_MAX_SERIALBUSTYPE 3
+#define AML_RESOURCE_VENDOR_SERIALBUSTYPE 192 /* Vendor defined is 0xC0-0xFF (NOT SUPPORTED) */
+
+struct aml_resource_common_serialbus {
+AML_RESOURCE_LARGE_HEADER_COMMON AML_RESOURCE_SERIAL_COMMON};
+
+struct aml_resource_i2c_serialbus {
+ AML_RESOURCE_LARGE_HEADER_COMMON
+ AML_RESOURCE_SERIAL_COMMON u32 connection_speed;
+ u16 slave_address;
+ /*
+ * Optional fields follow immediately:
+ * 1) Vendor Data bytes
+ * 2) Resource Source String
+ */
+};
+
+#define AML_RESOURCE_I2C_REVISION 1 /* ACPI 5.0 */
+#define AML_RESOURCE_I2C_TYPE_REVISION 1 /* ACPI 5.0 */
+#define AML_RESOURCE_I2C_MIN_DATA_LEN 6
+
+struct aml_resource_spi_serialbus {
+ AML_RESOURCE_LARGE_HEADER_COMMON
+ AML_RESOURCE_SERIAL_COMMON u32 connection_speed;
+ u8 data_bit_length;
+ u8 clock_phase;
+ u8 clock_polarity;
+ u16 device_selection;
+ /*
+ * Optional fields follow immediately:
+ * 1) Vendor Data bytes
+ * 2) Resource Source String
+ */
+};
+
+#define AML_RESOURCE_SPI_REVISION 1 /* ACPI 5.0 */
+#define AML_RESOURCE_SPI_TYPE_REVISION 1 /* ACPI 5.0 */
+#define AML_RESOURCE_SPI_MIN_DATA_LEN 9
+
+struct aml_resource_uart_serialbus {
+ AML_RESOURCE_LARGE_HEADER_COMMON
+ AML_RESOURCE_SERIAL_COMMON u32 default_baud_rate;
+ u16 rx_fifo_size;
+ u16 tx_fifo_size;
+ u8 parity;
+ u8 lines_enabled;
+ /*
+ * Optional fields follow immediately:
+ * 1) Vendor Data bytes
+ * 2) Resource Source String
+ */
+};
+
+#define AML_RESOURCE_UART_REVISION 1 /* ACPI 5.0 */
+#define AML_RESOURCE_UART_TYPE_REVISION 1 /* ACPI 5.0 */
+#define AML_RESOURCE_UART_MIN_DATA_LEN 10
+
/* restore default alignment */
#pragma pack()
@@ -284,6 +414,7 @@ union aml_resource {
struct aml_resource_end_dependent end_dpf;
struct aml_resource_io io;
struct aml_resource_fixed_io fixed_io;
+ struct aml_resource_fixed_dma fixed_dma;
struct aml_resource_vendor_small vendor_small;
struct aml_resource_end_tag end_tag;
@@ -299,6 +430,11 @@ union aml_resource {
struct aml_resource_address64 address64;
struct aml_resource_extended_address64 ext_address64;
struct aml_resource_extended_irq extended_irq;
+ struct aml_resource_gpio gpio;
+ struct aml_resource_i2c_serialbus i2c_serial_bus;
+ struct aml_resource_spi_serialbus spi_serial_bus;
+ struct aml_resource_uart_serialbus uart_serial_bus;
+ struct aml_resource_common_serialbus common_serial_bus;
/* Utility overlays */
diff --git a/trunk/drivers/acpi/acpica/dsargs.c b/trunk/drivers/acpi/acpica/dsargs.c
index 8c7b99728aa2..80eb1900297f 100644
--- a/trunk/drivers/acpi/acpica/dsargs.c
+++ b/trunk/drivers/acpi/acpica/dsargs.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -250,6 +250,13 @@ acpi_ds_get_bank_field_arguments(union acpi_operand_object *obj_desc)
status = acpi_ds_execute_arguments(node, node->parent,
extra_desc->extra.aml_length,
extra_desc->extra.aml_start);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ status = acpi_ut_add_address_range(obj_desc->region.space_id,
+ obj_desc->region.address,
+ obj_desc->region.length, node);
return_ACPI_STATUS(status);
}
@@ -384,8 +391,15 @@ acpi_status acpi_ds_get_region_arguments(union acpi_operand_object *obj_desc)
/* Execute the argument AML */
- status = acpi_ds_execute_arguments(node, node->parent,
+ status = acpi_ds_execute_arguments(node, extra_desc->extra.scope_node,
extra_desc->extra.aml_length,
extra_desc->extra.aml_start);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ status = acpi_ut_add_address_range(obj_desc->region.space_id,
+ obj_desc->region.address,
+ obj_desc->region.length, node);
return_ACPI_STATUS(status);
}
diff --git a/trunk/drivers/acpi/acpica/dscontrol.c b/trunk/drivers/acpi/acpica/dscontrol.c
index 26c49fff58da..effe4ca1133f 100644
--- a/trunk/drivers/acpi/acpica/dscontrol.c
+++ b/trunk/drivers/acpi/acpica/dscontrol.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/dsfield.c b/trunk/drivers/acpi/acpica/dsfield.c
index 34be60c0e448..cd243cf2cab2 100644
--- a/trunk/drivers/acpi/acpica/dsfield.c
+++ b/trunk/drivers/acpi/acpica/dsfield.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -221,6 +221,7 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
{
acpi_status status;
u64 position;
+ union acpi_parse_object *child;
ACPI_FUNCTION_TRACE_PTR(ds_get_field_names, info);
@@ -232,10 +233,11 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
while (arg) {
/*
- * Three types of field elements are handled:
- * 1) Offset - specifies a bit offset
- * 2) access_as - changes the access mode
- * 3) Name - Enters a new named field into the namespace
+ * Four types of field elements are handled:
+ * 1) Name - Enters a new named field into the namespace
+ * 2) Offset - specifies a bit offset
+ * 3) access_as - changes the access mode/attributes
+ * 4) Connection - Associate a resource template with the field
*/
switch (arg->common.aml_opcode) {
case AML_INT_RESERVEDFIELD_OP:
@@ -253,21 +255,70 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
break;
case AML_INT_ACCESSFIELD_OP:
-
+ case AML_INT_EXTACCESSFIELD_OP:
/*
- * Get a new access_type and access_attribute -- to be used for all
- * field units that follow, until field end or another access_as
- * keyword.
+ * Get new access_type, access_attribute, and access_length fields
+ * -- to be used for all field units that follow, until the
+ * end-of-field or another access_as keyword is encountered.
+ * NOTE. These three bytes are encoded in the integer value
+ * of the parseop for convenience.
*
* In field_flags, preserve the flag bits other than the
- * ACCESS_TYPE bits
+ * ACCESS_TYPE bits.
*/
+
+ /* access_type (byte_acc, word_acc, etc.) */
+
info->field_flags = (u8)
((info->
field_flags & ~(AML_FIELD_ACCESS_TYPE_MASK)) |
- ((u8) ((u32) arg->common.value.integer >> 8)));
+ ((u8)((u32)(arg->common.value.integer & 0x07))));
+
+ /* access_attribute (attrib_quick, attrib_byte, etc.) */
+
+ info->attribute =
+ (u8)((arg->common.value.integer >> 8) & 0xFF);
+
+ /* access_length (for serial/buffer protocols) */
+
+ info->access_length =
+ (u8)((arg->common.value.integer >> 16) & 0xFF);
+ break;
+
+ case AML_INT_CONNECTION_OP:
+ /*
+ * Clear any previous connection. New connection is used for all
+ * fields that follow, similar to access_as
+ */
+ info->resource_buffer = NULL;
+ info->connection_node = NULL;
- info->attribute = (u8) (arg->common.value.integer);
+ /*
+ * A Connection() is either an actual resource descriptor (buffer)
+ * or a named reference to a resource template
+ */
+ child = arg->common.value.arg;
+ if (child->common.aml_opcode == AML_INT_BYTELIST_OP) {
+ info->resource_buffer = child->named.data;
+ info->resource_length =
+ (u16)child->named.value.integer;
+ } else {
+ /* Lookup the Connection() namepath, it should already exist */
+
+ status = acpi_ns_lookup(walk_state->scope_info,
+ child->common.value.
+ name, ACPI_TYPE_ANY,
+ ACPI_IMODE_EXECUTE,
+ ACPI_NS_DONT_OPEN_SCOPE,
+ walk_state,
+ &info->connection_node);
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR_NAMESPACE(child->common.
+ value.name,
+ status);
+ return_ACPI_STATUS(status);
+ }
+ }
break;
case AML_INT_NAMEDFIELD_OP:
@@ -374,6 +425,8 @@ acpi_ds_create_field(union acpi_parse_object *op,
}
}
+ ACPI_MEMSET(&info, 0, sizeof(struct acpi_create_field_info));
+
/* Second arg is the field flags */
arg = arg->common.next;
@@ -386,7 +439,6 @@ acpi_ds_create_field(union acpi_parse_object *op,
info.region_node = region_node;
status = acpi_ds_get_field_names(&info, walk_state, arg->common.next);
-
return_ACPI_STATUS(status);
}
@@ -474,8 +526,8 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
*/
while (arg) {
/*
- * Ignore OFFSET and ACCESSAS terms here; we are only interested in the
- * field names in order to enter them into the namespace.
+ * Ignore OFFSET/ACCESSAS/CONNECTION terms here; we are only interested
+ * in the field names in order to enter them into the namespace.
*/
if (arg->common.aml_opcode == AML_INT_NAMEDFIELD_OP) {
status = acpi_ns_lookup(walk_state->scope_info,
@@ -651,6 +703,5 @@ acpi_ds_create_index_field(union acpi_parse_object *op,
info.region_node = region_node;
status = acpi_ds_get_field_names(&info, walk_state, arg->common.next);
-
return_ACPI_STATUS(status);
}
diff --git a/trunk/drivers/acpi/acpica/dsinit.c b/trunk/drivers/acpi/acpica/dsinit.c
index a7718bf2b9a1..9e5ac7f780a7 100644
--- a/trunk/drivers/acpi/acpica/dsinit.c
+++ b/trunk/drivers/acpi/acpica/dsinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/dsmethod.c b/trunk/drivers/acpi/acpica/dsmethod.c
index 5d797751e205..00f5dab5bcc0 100644
--- a/trunk/drivers/acpi/acpica/dsmethod.c
+++ b/trunk/drivers/acpi/acpica/dsmethod.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/dsmthdat.c b/trunk/drivers/acpi/acpica/dsmthdat.c
index 905ce29a92e1..b40bd507be5d 100644
--- a/trunk/drivers/acpi/acpica/dsmthdat.c
+++ b/trunk/drivers/acpi/acpica/dsmthdat.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/dsobject.c b/trunk/drivers/acpi/acpica/dsobject.c
index f42e17e5c252..d7045ca3e32a 100644
--- a/trunk/drivers/acpi/acpica/dsobject.c
+++ b/trunk/drivers/acpi/acpica/dsobject.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/dsopcode.c b/trunk/drivers/acpi/acpica/dsopcode.c
index c627a288e027..e5eff7585102 100644
--- a/trunk/drivers/acpi/acpica/dsopcode.c
+++ b/trunk/drivers/acpi/acpica/dsopcode.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/dsutils.c b/trunk/drivers/acpi/acpica/dsutils.c
index 2c477ce172fa..1abcda31037f 100644
--- a/trunk/drivers/acpi/acpica/dsutils.c
+++ b/trunk/drivers/acpi/acpica/dsutils.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/dswexec.c b/trunk/drivers/acpi/acpica/dswexec.c
index fe40e4c6554f..642f3c053e87 100644
--- a/trunk/drivers/acpi/acpica/dswexec.c
+++ b/trunk/drivers/acpi/acpica/dswexec.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/dswload.c b/trunk/drivers/acpi/acpica/dswload.c
index 324acec1179a..552aa3a50c84 100644
--- a/trunk/drivers/acpi/acpica/dswload.c
+++ b/trunk/drivers/acpi/acpica/dswload.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/dswload2.c b/trunk/drivers/acpi/acpica/dswload2.c
index 976318138c56..ae7147724763 100644
--- a/trunk/drivers/acpi/acpica/dswload2.c
+++ b/trunk/drivers/acpi/acpica/dswload2.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/dswscope.c b/trunk/drivers/acpi/acpica/dswscope.c
index 76a661fc1e09..9e9490a9cbf0 100644
--- a/trunk/drivers/acpi/acpica/dswscope.c
+++ b/trunk/drivers/acpi/acpica/dswscope.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/dswstate.c b/trunk/drivers/acpi/acpica/dswstate.c
index a6c374ef9914..c9c2ac13e7cc 100644
--- a/trunk/drivers/acpi/acpica/dswstate.c
+++ b/trunk/drivers/acpi/acpica/dswstate.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/evevent.c b/trunk/drivers/acpi/acpica/evevent.c
index d458b041e651..6729ebe2f1e6 100644
--- a/trunk/drivers/acpi/acpica/evevent.c
+++ b/trunk/drivers/acpi/acpica/evevent.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -71,6 +71,12 @@ acpi_status acpi_ev_initialize_events(void)
ACPI_FUNCTION_TRACE(ev_initialize_events);
+ /* If Hardware Reduced flag is set, there are no fixed events */
+
+ if (acpi_gbl_reduced_hardware) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
/*
* Initialize the Fixed and General Purpose Events. This is done prior to
* enabling SCIs to prevent interrupts from occurring before the handlers
@@ -111,6 +117,12 @@ acpi_status acpi_ev_install_xrupt_handlers(void)
ACPI_FUNCTION_TRACE(ev_install_xrupt_handlers);
+ /* If Hardware Reduced flag is set, there is no ACPI h/w */
+
+ if (acpi_gbl_reduced_hardware) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
/* Install the SCI handler */
status = acpi_ev_install_sci_handler();
diff --git a/trunk/drivers/acpi/acpica/evglock.c b/trunk/drivers/acpi/acpica/evglock.c
index 56a562a1e5d7..5e5683cb1f0d 100644
--- a/trunk/drivers/acpi/acpica/evglock.c
+++ b/trunk/drivers/acpi/acpica/evglock.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -70,6 +70,12 @@ acpi_status acpi_ev_init_global_lock_handler(void)
ACPI_FUNCTION_TRACE(ev_init_global_lock_handler);
+ /* If Hardware Reduced flag is set, there is no global lock */
+
+ if (acpi_gbl_reduced_hardware) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
/* Attempt installation of the global lock handler */
status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL,
diff --git a/trunk/drivers/acpi/acpica/evgpe.c b/trunk/drivers/acpi/acpica/evgpe.c
index 65c79add3b19..9e88cb6fb25e 100644
--- a/trunk/drivers/acpi/acpica/evgpe.c
+++ b/trunk/drivers/acpi/acpica/evgpe.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/evgpeblk.c b/trunk/drivers/acpi/acpica/evgpeblk.c
index ca2c41a53311..be75339cd5dd 100644
--- a/trunk/drivers/acpi/acpica/evgpeblk.c
+++ b/trunk/drivers/acpi/acpica/evgpeblk.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/evgpeinit.c b/trunk/drivers/acpi/acpica/evgpeinit.c
index ce9aa9f9a972..adf7494da9db 100644
--- a/trunk/drivers/acpi/acpica/evgpeinit.c
+++ b/trunk/drivers/acpi/acpica/evgpeinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/evgpeutil.c b/trunk/drivers/acpi/acpica/evgpeutil.c
index 80a81d0c4a80..25073932aa10 100644
--- a/trunk/drivers/acpi/acpica/evgpeutil.c
+++ b/trunk/drivers/acpi/acpica/evgpeutil.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/evmisc.c b/trunk/drivers/acpi/acpica/evmisc.c
index d0b331844427..84966f416463 100644
--- a/trunk/drivers/acpi/acpica/evmisc.c
+++ b/trunk/drivers/acpi/acpica/evmisc.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/evregion.c b/trunk/drivers/acpi/acpica/evregion.c
index f0edf5c43c03..1b0180a1b798 100644
--- a/trunk/drivers/acpi/acpica/evregion.c
+++ b/trunk/drivers/acpi/acpica/evregion.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -329,6 +329,7 @@ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
* FUNCTION: acpi_ev_address_space_dispatch
*
* PARAMETERS: region_obj - Internal region object
+ * field_obj - Corresponding field. Can be NULL.
* Function - Read or Write operation
* region_offset - Where in the region to read or write
* bit_width - Field width in bits (8, 16, 32, or 64)
@@ -344,6 +345,7 @@ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
acpi_status
acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
+ union acpi_operand_object *field_obj,
u32 function,
u32 region_offset, u32 bit_width, u64 *value)
{
@@ -353,6 +355,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
union acpi_operand_object *handler_desc;
union acpi_operand_object *region_obj2;
void *region_context = NULL;
+ struct acpi_connection_info *context;
ACPI_FUNCTION_TRACE(ev_address_space_dispatch);
@@ -375,6 +378,8 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
return_ACPI_STATUS(AE_NOT_EXIST);
}
+ context = handler_desc->address_space.context;
+
/*
* It may be the case that the region has never been initialized.
* Some types of regions require special init code
@@ -404,8 +409,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
acpi_ex_exit_interpreter();
status = region_setup(region_obj, ACPI_REGION_ACTIVATE,
- handler_desc->address_space.context,
- ®ion_context);
+ context, ®ion_context);
/* Re-enter the interpreter */
@@ -455,6 +459,25 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
acpi_ut_get_region_name(region_obj->region.
space_id)));
+ /*
+ * Special handling for generic_serial_bus and general_purpose_io:
+ * There are three extra parameters that must be passed to the
+ * handler via the context:
+ * 1) Connection buffer, a resource template from Connection() op.
+ * 2) Length of the above buffer.
+ * 3) Actual access length from the access_as() op.
+ */
+ if (((region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS) ||
+ (region_obj->region.space_id == ACPI_ADR_SPACE_GPIO)) &&
+ context && field_obj) {
+
+ /* Get the Connection (resource_template) buffer */
+
+ context->connection = field_obj->field.resource_buffer;
+ context->length = field_obj->field.resource_length;
+ context->access_length = field_obj->field.access_length;
+ }
+
if (!(handler_desc->address_space.handler_flags &
ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
/*
@@ -469,7 +492,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
status = handler(function,
(region_obj->region.address + region_offset),
- bit_width, value, handler_desc->address_space.context,
+ bit_width, value, context,
region_obj2->extra.region_context);
if (ACPI_FAILURE(status)) {
diff --git a/trunk/drivers/acpi/acpica/evrgnini.c b/trunk/drivers/acpi/acpica/evrgnini.c
index 55a5d35ef34a..819c17f5897a 100644
--- a/trunk/drivers/acpi/acpica/evrgnini.c
+++ b/trunk/drivers/acpi/acpica/evrgnini.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/evsci.c b/trunk/drivers/acpi/acpica/evsci.c
index 2ebd40e1a3ef..26065c612e76 100644
--- a/trunk/drivers/acpi/acpica/evsci.c
+++ b/trunk/drivers/acpi/acpica/evsci.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/evxface.c b/trunk/drivers/acpi/acpica/evxface.c
index f4f523bf5939..61944e89565a 100644
--- a/trunk/drivers/acpi/acpica/evxface.c
+++ b/trunk/drivers/acpi/acpica/evxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/evxfevnt.c b/trunk/drivers/acpi/acpica/evxfevnt.c
index 20516e599476..1768bbec1002 100644
--- a/trunk/drivers/acpi/acpica/evxfevnt.c
+++ b/trunk/drivers/acpi/acpica/evxfevnt.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/evxfgpe.c b/trunk/drivers/acpi/acpica/evxfgpe.c
index f06a3ee356ba..33388fd69df4 100644
--- a/trunk/drivers/acpi/acpica/evxfgpe.c
+++ b/trunk/drivers/acpi/acpica/evxfgpe.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/evxfregn.c b/trunk/drivers/acpi/acpica/evxfregn.c
index aee887e3ca5c..6019208cd4b6 100644
--- a/trunk/drivers/acpi/acpica/evxfregn.c
+++ b/trunk/drivers/acpi/acpica/evxfregn.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/exconfig.c b/trunk/drivers/acpi/acpica/exconfig.c
index 745a42b401f5..c86d44e41bc8 100644
--- a/trunk/drivers/acpi/acpica/exconfig.c
+++ b/trunk/drivers/acpi/acpica/exconfig.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -297,9 +297,9 @@ acpi_ex_region_read(union acpi_operand_object *obj_desc, u32 length, u8 *buffer)
/* Bytewise reads */
for (i = 0; i < length; i++) {
- status = acpi_ev_address_space_dispatch(obj_desc, ACPI_READ,
- region_offset, 8,
- &value);
+ status =
+ acpi_ev_address_space_dispatch(obj_desc, NULL, ACPI_READ,
+ region_offset, 8, &value);
if (ACPI_FAILURE(status)) {
return status;
}
diff --git a/trunk/drivers/acpi/acpica/exconvrt.c b/trunk/drivers/acpi/acpica/exconvrt.c
index 74162a11817d..e385436bd424 100644
--- a/trunk/drivers/acpi/acpica/exconvrt.c
+++ b/trunk/drivers/acpi/acpica/exconvrt.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/excreate.c b/trunk/drivers/acpi/acpica/excreate.c
index 110711afada8..3f5bc998c1cb 100644
--- a/trunk/drivers/acpi/acpica/excreate.c
+++ b/trunk/drivers/acpi/acpica/excreate.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -267,7 +267,7 @@ acpi_status acpi_ex_create_mutex(struct acpi_walk_state *walk_state)
*
* PARAMETERS: aml_start - Pointer to the region declaration AML
* aml_length - Max length of the declaration AML
- * region_space - space_iD for the region
+ * space_id - Address space ID for the region
* walk_state - Current state
*
* RETURN: Status
@@ -279,7 +279,7 @@ acpi_status acpi_ex_create_mutex(struct acpi_walk_state *walk_state)
acpi_status
acpi_ex_create_region(u8 * aml_start,
u32 aml_length,
- u8 region_space, struct acpi_walk_state *walk_state)
+ u8 space_id, struct acpi_walk_state *walk_state)
{
acpi_status status;
union acpi_operand_object *obj_desc;
@@ -304,16 +304,19 @@ acpi_ex_create_region(u8 * aml_start,
* Space ID must be one of the predefined IDs, or in the user-defined
* range
*/
- if ((region_space >= ACPI_NUM_PREDEFINED_REGIONS) &&
- (region_space < ACPI_USER_REGION_BEGIN) &&
- (region_space != ACPI_ADR_SPACE_DATA_TABLE)) {
- ACPI_ERROR((AE_INFO, "Invalid AddressSpace type 0x%X",
- region_space));
- return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID);
+ if (!acpi_is_valid_space_id(space_id)) {
+ /*
+ * Print an error message, but continue. We don't want to abort
+ * a table load for this exception. Instead, if the region is
+ * actually used at runtime, abort the executing method.
+ */
+ ACPI_ERROR((AE_INFO,
+ "Invalid/unknown Address Space ID: 0x%2.2X",
+ space_id));
}
ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "Region Type - %s (0x%X)\n",
- acpi_ut_get_region_name(region_space), region_space));
+ acpi_ut_get_region_name(space_id), space_id));
/* Create the region descriptor */
@@ -330,10 +333,16 @@ acpi_ex_create_region(u8 * aml_start,
region_obj2 = obj_desc->common.next_object;
region_obj2->extra.aml_start = aml_start;
region_obj2->extra.aml_length = aml_length;
+ if (walk_state->scope_info) {
+ region_obj2->extra.scope_node =
+ walk_state->scope_info->scope.node;
+ } else {
+ region_obj2->extra.scope_node = node;
+ }
/* Init the region from the operands */
- obj_desc->region.space_id = region_space;
+ obj_desc->region.space_id = space_id;
obj_desc->region.address = 0;
obj_desc->region.length = 0;
obj_desc->region.node = node;
diff --git a/trunk/drivers/acpi/acpica/exdebug.c b/trunk/drivers/acpi/acpica/exdebug.c
index c7a2f1edd282..e211e9c19215 100644
--- a/trunk/drivers/acpi/acpica/exdebug.c
+++ b/trunk/drivers/acpi/acpica/exdebug.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/exdump.c b/trunk/drivers/acpi/acpica/exdump.c
index 61b8c0e8b74d..2a6ac0a3bc1e 100644
--- a/trunk/drivers/acpi/acpica/exdump.c
+++ b/trunk/drivers/acpi/acpica/exdump.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -192,10 +192,13 @@ static struct acpi_exdump_info acpi_ex_dump_buffer_field[3] = {
"Buffer Object"}
};
-static struct acpi_exdump_info acpi_ex_dump_region_field[3] = {
+static struct acpi_exdump_info acpi_ex_dump_region_field[5] = {
{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_region_field), NULL},
{ACPI_EXD_FIELD, 0, NULL},
- {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(field.region_obj), "Region Object"}
+ {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(field.access_length), "AccessLength"},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(field.region_obj), "Region Object"},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(field.resource_buffer),
+ "ResourceBuffer"}
};
static struct acpi_exdump_info acpi_ex_dump_bank_field[5] = {
diff --git a/trunk/drivers/acpi/acpica/exfield.c b/trunk/drivers/acpi/acpica/exfield.c
index 0bde2230c028..dc092f5b35d6 100644
--- a/trunk/drivers/acpi/acpica/exfield.c
+++ b/trunk/drivers/acpi/acpica/exfield.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -100,18 +100,25 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
(obj_desc->field.region_obj->region.space_id ==
ACPI_ADR_SPACE_SMBUS
|| obj_desc->field.region_obj->region.space_id ==
+ ACPI_ADR_SPACE_GSBUS
+ || obj_desc->field.region_obj->region.space_id ==
ACPI_ADR_SPACE_IPMI)) {
/*
- * This is an SMBus or IPMI read. We must create a buffer to hold
+ * This is an SMBus, GSBus or IPMI read. We must create a buffer to hold
* the data and then directly access the region handler.
*
- * Note: Smbus protocol value is passed in upper 16-bits of Function
+ * Note: SMBus and GSBus protocol value is passed in upper 16-bits of Function
*/
if (obj_desc->field.region_obj->region.space_id ==
ACPI_ADR_SPACE_SMBUS) {
length = ACPI_SMBUS_BUFFER_SIZE;
function =
ACPI_READ | (obj_desc->field.attribute << 16);
+ } else if (obj_desc->field.region_obj->region.space_id ==
+ ACPI_ADR_SPACE_GSBUS) {
+ length = ACPI_GSBUS_BUFFER_SIZE;
+ function =
+ ACPI_READ | (obj_desc->field.attribute << 16);
} else { /* IPMI */
length = ACPI_IPMI_BUFFER_SIZE;
@@ -248,21 +255,23 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
(obj_desc->field.region_obj->region.space_id ==
ACPI_ADR_SPACE_SMBUS
|| obj_desc->field.region_obj->region.space_id ==
+ ACPI_ADR_SPACE_GSBUS
+ || obj_desc->field.region_obj->region.space_id ==
ACPI_ADR_SPACE_IPMI)) {
/*
- * This is an SMBus or IPMI write. We will bypass the entire field
+ * This is an SMBus, GSBus or IPMI write. We will bypass the entire field
* mechanism and handoff the buffer directly to the handler. For
* these address spaces, the buffer is bi-directional; on a write,
* return data is returned in the same buffer.
*
* Source must be a buffer of sufficient size:
- * ACPI_SMBUS_BUFFER_SIZE or ACPI_IPMI_BUFFER_SIZE.
+ * ACPI_SMBUS_BUFFER_SIZE, ACPI_GSBUS_BUFFER_SIZE, or ACPI_IPMI_BUFFER_SIZE.
*
- * Note: SMBus protocol type is passed in upper 16-bits of Function
+ * Note: SMBus and GSBus protocol type is passed in upper 16-bits of Function
*/
if (source_desc->common.type != ACPI_TYPE_BUFFER) {
ACPI_ERROR((AE_INFO,
- "SMBus or IPMI write requires Buffer, found type %s",
+ "SMBus/IPMI/GenericSerialBus write requires Buffer, found type %s",
acpi_ut_get_object_type_name(source_desc)));
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
@@ -273,6 +282,11 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
length = ACPI_SMBUS_BUFFER_SIZE;
function =
ACPI_WRITE | (obj_desc->field.attribute << 16);
+ } else if (obj_desc->field.region_obj->region.space_id ==
+ ACPI_ADR_SPACE_GSBUS) {
+ length = ACPI_GSBUS_BUFFER_SIZE;
+ function =
+ ACPI_WRITE | (obj_desc->field.attribute << 16);
} else { /* IPMI */
length = ACPI_IPMI_BUFFER_SIZE;
@@ -281,7 +295,7 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
if (source_desc->buffer.length < length) {
ACPI_ERROR((AE_INFO,
- "SMBus or IPMI write requires Buffer of length %u, found length %u",
+ "SMBus/IPMI/GenericSerialBus write requires Buffer of length %u, found length %u",
length, source_desc->buffer.length));
return_ACPI_STATUS(AE_AML_BUFFER_LIMIT);
diff --git a/trunk/drivers/acpi/acpica/exfldio.c b/trunk/drivers/acpi/acpica/exfldio.c
index f915a7f3f921..149de45fdadd 100644
--- a/trunk/drivers/acpi/acpica/exfldio.c
+++ b/trunk/drivers/acpi/acpica/exfldio.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -86,6 +86,7 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
{
acpi_status status = AE_OK;
union acpi_operand_object *rgn_desc;
+ u8 space_id;
ACPI_FUNCTION_TRACE_U32(ex_setup_region, field_datum_byte_offset);
@@ -101,6 +102,17 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
}
+ space_id = rgn_desc->region.space_id;
+
+ /* Validate the Space ID */
+
+ if (!acpi_is_valid_space_id(space_id)) {
+ ACPI_ERROR((AE_INFO,
+ "Invalid/unknown Address Space ID: 0x%2.2X",
+ space_id));
+ return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID);
+ }
+
/*
* If the Region Address and Length have not been previously evaluated,
* evaluate them now and save the results.
@@ -119,11 +131,12 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
}
/*
- * Exit now for SMBus or IPMI address space, it has a non-linear
+ * Exit now for SMBus, GSBus or IPMI address space, it has a non-linear
* address space and the request cannot be directly validated
*/
- if (rgn_desc->region.space_id == ACPI_ADR_SPACE_SMBUS ||
- rgn_desc->region.space_id == ACPI_ADR_SPACE_IPMI) {
+ if (space_id == ACPI_ADR_SPACE_SMBUS ||
+ space_id == ACPI_ADR_SPACE_GSBUS ||
+ space_id == ACPI_ADR_SPACE_IPMI) {
/* SMBus or IPMI has a non-linear address space */
@@ -271,11 +284,12 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
/* Invoke the appropriate address_space/op_region handler */
- status =
- acpi_ev_address_space_dispatch(rgn_desc, function, region_offset,
- ACPI_MUL_8(obj_desc->common_field.
- access_byte_width),
- value);
+ status = acpi_ev_address_space_dispatch(rgn_desc, obj_desc,
+ function, region_offset,
+ ACPI_MUL_8(obj_desc->
+ common_field.
+ access_byte_width),
+ value);
if (ACPI_FAILURE(status)) {
if (status == AE_NOT_IMPLEMENTED) {
@@ -316,6 +330,7 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
static u8
acpi_ex_register_overflow(union acpi_operand_object *obj_desc, u64 value)
{
+ ACPI_FUNCTION_NAME(ex_register_overflow);
if (obj_desc->common_field.bit_length >= ACPI_INTEGER_BIT_SIZE) {
/*
@@ -330,6 +345,11 @@ acpi_ex_register_overflow(union acpi_operand_object *obj_desc, u64 value)
* The Value is larger than the maximum value that can fit into
* the register.
*/
+ ACPI_ERROR((AE_INFO,
+ "Index value 0x%8.8X%8.8X overflows field width 0x%X",
+ ACPI_FORMAT_UINT64(value),
+ obj_desc->common_field.bit_length));
+
return (TRUE);
}
diff --git a/trunk/drivers/acpi/acpica/exmisc.c b/trunk/drivers/acpi/acpica/exmisc.c
index 703d88ed0b3d..0a0893310348 100644
--- a/trunk/drivers/acpi/acpica/exmisc.c
+++ b/trunk/drivers/acpi/acpica/exmisc.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/exmutex.c b/trunk/drivers/acpi/acpica/exmutex.c
index be1c56ead653..60933e9dc3c0 100644
--- a/trunk/drivers/acpi/acpica/exmutex.c
+++ b/trunk/drivers/acpi/acpica/exmutex.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/exnames.c b/trunk/drivers/acpi/acpica/exnames.c
index 49ec049c157e..fcc75fa27d32 100644
--- a/trunk/drivers/acpi/acpica/exnames.c
+++ b/trunk/drivers/acpi/acpica/exnames.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/exoparg1.c b/trunk/drivers/acpi/acpica/exoparg1.c
index 236ead14b7f7..9ba8c73cea16 100644
--- a/trunk/drivers/acpi/acpica/exoparg1.c
+++ b/trunk/drivers/acpi/acpica/exoparg1.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/exoparg2.c b/trunk/drivers/acpi/acpica/exoparg2.c
index 2571b4a310f4..879e8a277b94 100644
--- a/trunk/drivers/acpi/acpica/exoparg2.c
+++ b/trunk/drivers/acpi/acpica/exoparg2.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/exoparg3.c b/trunk/drivers/acpi/acpica/exoparg3.c
index 1b48d9d28c9a..71fcc65c9ffa 100644
--- a/trunk/drivers/acpi/acpica/exoparg3.c
+++ b/trunk/drivers/acpi/acpica/exoparg3.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/exoparg6.c b/trunk/drivers/acpi/acpica/exoparg6.c
index f4a2787e8e92..0786b8659061 100644
--- a/trunk/drivers/acpi/acpica/exoparg6.c
+++ b/trunk/drivers/acpi/acpica/exoparg6.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/exprep.c b/trunk/drivers/acpi/acpica/exprep.c
index cc95e2000406..30157f5a12d7 100644
--- a/trunk/drivers/acpi/acpica/exprep.c
+++ b/trunk/drivers/acpi/acpica/exprep.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -47,6 +47,7 @@
#include "acinterp.h"
#include "amlcode.h"
#include "acnamesp.h"
+#include "acdispat.h"
#define _COMPONENT ACPI_EXECUTER
ACPI_MODULE_NAME("exprep")
@@ -455,6 +456,30 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
obj_desc->field.region_obj =
acpi_ns_get_attached_object(info->region_node);
+ /* Fields specific to generic_serial_bus fields */
+
+ obj_desc->field.access_length = info->access_length;
+
+ if (info->connection_node) {
+ second_desc = info->connection_node->object;
+ if (!(second_desc->common.flags & AOPOBJ_DATA_VALID)) {
+ status =
+ acpi_ds_get_buffer_arguments(second_desc);
+ if (ACPI_FAILURE(status)) {
+ acpi_ut_delete_object_desc(obj_desc);
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ obj_desc->field.resource_buffer =
+ second_desc->buffer.pointer;
+ obj_desc->field.resource_length =
+ (u16)second_desc->buffer.length;
+ } else if (info->resource_buffer) {
+ obj_desc->field.resource_buffer = info->resource_buffer;
+ obj_desc->field.resource_length = info->resource_length;
+ }
+
/* Allow full data read from EC address space */
if ((obj_desc->field.region_obj->region.space_id ==
diff --git a/trunk/drivers/acpi/acpica/exregion.c b/trunk/drivers/acpi/acpica/exregion.c
index f0d5e14f1f2c..12d51df6d3bf 100644
--- a/trunk/drivers/acpi/acpica/exregion.c
+++ b/trunk/drivers/acpi/acpica/exregion.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/exresnte.c b/trunk/drivers/acpi/acpica/exresnte.c
index 55997e46948b..fa50e77e64a8 100644
--- a/trunk/drivers/acpi/acpica/exresnte.c
+++ b/trunk/drivers/acpi/acpica/exresnte.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/exresolv.c b/trunk/drivers/acpi/acpica/exresolv.c
index db502cd7d934..6e335dc34528 100644
--- a/trunk/drivers/acpi/acpica/exresolv.c
+++ b/trunk/drivers/acpi/acpica/exresolv.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/exresop.c b/trunk/drivers/acpi/acpica/exresop.c
index e3bb00ccdff5..a67b1d925ddd 100644
--- a/trunk/drivers/acpi/acpica/exresop.c
+++ b/trunk/drivers/acpi/acpica/exresop.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/exstore.c b/trunk/drivers/acpi/acpica/exstore.c
index c0c8842dd344..c6cf843cc4c9 100644
--- a/trunk/drivers/acpi/acpica/exstore.c
+++ b/trunk/drivers/acpi/acpica/exstore.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/exstoren.c b/trunk/drivers/acpi/acpica/exstoren.c
index a979017d56b8..b35bed52e061 100644
--- a/trunk/drivers/acpi/acpica/exstoren.c
+++ b/trunk/drivers/acpi/acpica/exstoren.c
@@ -7,7 +7,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/exstorob.c b/trunk/drivers/acpi/acpica/exstorob.c
index dc665cc554de..65a45d8335c8 100644
--- a/trunk/drivers/acpi/acpica/exstorob.c
+++ b/trunk/drivers/acpi/acpica/exstorob.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/exsystem.c b/trunk/drivers/acpi/acpica/exsystem.c
index df66e7b686be..191a12945226 100644
--- a/trunk/drivers/acpi/acpica/exsystem.c
+++ b/trunk/drivers/acpi/acpica/exsystem.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/exutils.c b/trunk/drivers/acpi/acpica/exutils.c
index 8ad93146dd32..eb6798ba8b59 100644
--- a/trunk/drivers/acpi/acpica/exutils.c
+++ b/trunk/drivers/acpi/acpica/exutils.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -435,4 +435,29 @@ void acpi_ex_integer_to_string(char *out_string, u64 value)
}
}
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_is_valid_space_id
+ *
+ * PARAMETERS: space_id - ID to be validated
+ *
+ * RETURN: TRUE if valid/supported ID.
+ *
+ * DESCRIPTION: Validate an operation region space_iD.
+ *
+ ******************************************************************************/
+
+u8 acpi_is_valid_space_id(u8 space_id)
+{
+
+ if ((space_id >= ACPI_NUM_PREDEFINED_REGIONS) &&
+ (space_id < ACPI_USER_REGION_BEGIN) &&
+ (space_id != ACPI_ADR_SPACE_DATA_TABLE) &&
+ (space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) {
+ return (FALSE);
+ }
+
+ return (TRUE);
+}
+
#endif
diff --git a/trunk/drivers/acpi/acpica/hwacpi.c b/trunk/drivers/acpi/acpica/hwacpi.c
index fc380d3d45ab..d21ec5f0b3a9 100644
--- a/trunk/drivers/acpi/acpica/hwacpi.c
+++ b/trunk/drivers/acpi/acpica/hwacpi.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/hwgpe.c b/trunk/drivers/acpi/acpica/hwgpe.c
index f610d88a66be..1a6894afef79 100644
--- a/trunk/drivers/acpi/acpica/hwgpe.c
+++ b/trunk/drivers/acpi/acpica/hwgpe.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/hwpci.c b/trunk/drivers/acpi/acpica/hwpci.c
index 050fd227951b..1455ddcdc32c 100644
--- a/trunk/drivers/acpi/acpica/hwpci.c
+++ b/trunk/drivers/acpi/acpica/hwpci.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/hwregs.c b/trunk/drivers/acpi/acpica/hwregs.c
index cc70f3fdcdd1..4ea4eeb51bfd 100644
--- a/trunk/drivers/acpi/acpica/hwregs.c
+++ b/trunk/drivers/acpi/acpica/hwregs.c
@@ -7,7 +7,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/hwsleep.c b/trunk/drivers/acpi/acpica/hwsleep.c
index d52da3073650..3c4a922a9fc2 100644
--- a/trunk/drivers/acpi/acpica/hwsleep.c
+++ b/trunk/drivers/acpi/acpica/hwsleep.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/hwtimer.c b/trunk/drivers/acpi/acpica/hwtimer.c
index 50d21c40b5c1..d4973d9da9f1 100644
--- a/trunk/drivers/acpi/acpica/hwtimer.c
+++ b/trunk/drivers/acpi/acpica/hwtimer.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/hwvalid.c b/trunk/drivers/acpi/acpica/hwvalid.c
index 5f1605874655..6e5c43a60bb7 100644
--- a/trunk/drivers/acpi/acpica/hwvalid.c
+++ b/trunk/drivers/acpi/acpica/hwvalid.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -134,6 +134,8 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
/* Supported widths are 8/16/32 */
if ((bit_width != 8) && (bit_width != 16) && (bit_width != 32)) {
+ ACPI_ERROR((AE_INFO,
+ "Bad BitWidth parameter: %8.8X", bit_width));
return AE_BAD_PARAMETER;
}
diff --git a/trunk/drivers/acpi/acpica/hwxface.c b/trunk/drivers/acpi/acpica/hwxface.c
index d707756228c2..9d38eb6c0d0b 100644
--- a/trunk/drivers/acpi/acpica/hwxface.c
+++ b/trunk/drivers/acpi/acpica/hwxface.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/nsaccess.c b/trunk/drivers/acpi/acpica/nsaccess.c
index d93172fd15a8..61623f3f6826 100644
--- a/trunk/drivers/acpi/acpica/nsaccess.c
+++ b/trunk/drivers/acpi/acpica/nsaccess.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/nsalloc.c b/trunk/drivers/acpi/acpica/nsalloc.c
index 1d0ef15d158f..7c3d3ceb98b3 100644
--- a/trunk/drivers/acpi/acpica/nsalloc.c
+++ b/trunk/drivers/acpi/acpica/nsalloc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/nsdump.c b/trunk/drivers/acpi/acpica/nsdump.c
index b683cc2ff9d3..b7f2b3be79ac 100644
--- a/trunk/drivers/acpi/acpica/nsdump.c
+++ b/trunk/drivers/acpi/acpica/nsdump.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/nsdumpdv.c b/trunk/drivers/acpi/acpica/nsdumpdv.c
index 2ed294b7a4db..30ea5bc53a78 100644
--- a/trunk/drivers/acpi/acpica/nsdumpdv.c
+++ b/trunk/drivers/acpi/acpica/nsdumpdv.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/nseval.c b/trunk/drivers/acpi/acpica/nseval.c
index c1bd02b1a058..f375cb82e321 100644
--- a/trunk/drivers/acpi/acpica/nseval.c
+++ b/trunk/drivers/acpi/acpica/nseval.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/nsinit.c b/trunk/drivers/acpi/acpica/nsinit.c
index fd7c6380e294..9d84ec2f0211 100644
--- a/trunk/drivers/acpi/acpica/nsinit.c
+++ b/trunk/drivers/acpi/acpica/nsinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/nsload.c b/trunk/drivers/acpi/acpica/nsload.c
index 5f7dc691c183..5cbf15ffe7d8 100644
--- a/trunk/drivers/acpi/acpica/nsload.c
+++ b/trunk/drivers/acpi/acpica/nsload.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/nsnames.c b/trunk/drivers/acpi/acpica/nsnames.c
index d5fa520c3de5..b20e7c8c3ffb 100644
--- a/trunk/drivers/acpi/acpica/nsnames.c
+++ b/trunk/drivers/acpi/acpica/nsnames.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/nsobject.c b/trunk/drivers/acpi/acpica/nsobject.c
index 3bb8bf105ea2..dd77a3ce6e50 100644
--- a/trunk/drivers/acpi/acpica/nsobject.c
+++ b/trunk/drivers/acpi/acpica/nsobject.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/nsparse.c b/trunk/drivers/acpi/acpica/nsparse.c
index b3234fa795b8..ec7ba2d3463c 100644
--- a/trunk/drivers/acpi/acpica/nsparse.c
+++ b/trunk/drivers/acpi/acpica/nsparse.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/nspredef.c b/trunk/drivers/acpi/acpica/nspredef.c
index c845c8089f39..bbe46a447d34 100644
--- a/trunk/drivers/acpi/acpica/nspredef.c
+++ b/trunk/drivers/acpi/acpica/nspredef.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -620,6 +620,7 @@ acpi_ns_check_package(struct acpi_predefined_data *data,
case ACPI_PTYPE2_FIXED:
case ACPI_PTYPE2_MIN:
case ACPI_PTYPE2_COUNT:
+ case ACPI_PTYPE2_FIX_VAR:
/*
* These types all return a single Package that consists of a
@@ -759,6 +760,34 @@ acpi_ns_check_package_list(struct acpi_predefined_data *data,
}
break;
+ case ACPI_PTYPE2_FIX_VAR:
+ /*
+ * Each subpackage has a fixed number of elements and an
+ * optional element
+ */
+ expected_count =
+ package->ret_info.count1 + package->ret_info.count2;
+ if (sub_package->package.count < expected_count) {
+ goto package_too_small;
+ }
+
+ status =
+ acpi_ns_check_package_elements(data, sub_elements,
+ package->ret_info.
+ object_type1,
+ package->ret_info.
+ count1,
+ package->ret_info.
+ object_type2,
+ sub_package->package.
+ count -
+ package->ret_info.
+ count1, 0);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ break;
+
case ACPI_PTYPE2_FIXED:
/* Each sub-package has a fixed length */
diff --git a/trunk/drivers/acpi/acpica/nsrepair.c b/trunk/drivers/acpi/acpica/nsrepair.c
index ac7b854b0bd7..9c35d20eb52b 100644
--- a/trunk/drivers/acpi/acpica/nsrepair.c
+++ b/trunk/drivers/acpi/acpica/nsrepair.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -634,6 +634,7 @@ acpi_ns_remove_null_elements(struct acpi_predefined_data *data,
case ACPI_PTYPE2_FIXED:
case ACPI_PTYPE2_MIN:
case ACPI_PTYPE2_REV_FIXED:
+ case ACPI_PTYPE2_FIX_VAR:
break;
default:
diff --git a/trunk/drivers/acpi/acpica/nsrepair2.c b/trunk/drivers/acpi/acpica/nsrepair2.c
index 024c4f263f87..726bc8e687f7 100644
--- a/trunk/drivers/acpi/acpica/nsrepair2.c
+++ b/trunk/drivers/acpi/acpica/nsrepair2.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -467,11 +467,12 @@ acpi_ns_repair_HID(struct acpi_predefined_data *data,
}
/*
- * Copy and uppercase the string. From the ACPI specification:
+ * Copy and uppercase the string. From the ACPI 5.0 specification:
*
* A valid PNP ID must be of the form "AAA####" where A is an uppercase
* letter and # is a hex digit. A valid ACPI ID must be of the form
- * "ACPI####" where # is a hex digit.
+ * "NNNN####" where N is an uppercase letter or decimal digit, and
+ * # is a hex digit.
*/
for (dest = new_string->string.pointer; *source; dest++, source++) {
*dest = (char)ACPI_TOUPPER(*source);
diff --git a/trunk/drivers/acpi/acpica/nssearch.c b/trunk/drivers/acpi/acpica/nssearch.c
index 28b0d7a62b99..507043d66114 100644
--- a/trunk/drivers/acpi/acpica/nssearch.c
+++ b/trunk/drivers/acpi/acpica/nssearch.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/nsutils.c b/trunk/drivers/acpi/acpica/nsutils.c
index cb1b104a69a2..a535b7afda5c 100644
--- a/trunk/drivers/acpi/acpica/nsutils.c
+++ b/trunk/drivers/acpi/acpica/nsutils.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/nswalk.c b/trunk/drivers/acpi/acpica/nswalk.c
index 345f0c3c6ad2..f69895a54895 100644
--- a/trunk/drivers/acpi/acpica/nswalk.c
+++ b/trunk/drivers/acpi/acpica/nswalk.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/nsxfeval.c b/trunk/drivers/acpi/acpica/nsxfeval.c
index e7f016d1b226..71d15f61807b 100644
--- a/trunk/drivers/acpi/acpica/nsxfeval.c
+++ b/trunk/drivers/acpi/acpica/nsxfeval.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/nsxfname.c b/trunk/drivers/acpi/acpica/nsxfname.c
index 83bf93024303..af401c9c4dfc 100644
--- a/trunk/drivers/acpi/acpica/nsxfname.c
+++ b/trunk/drivers/acpi/acpica/nsxfname.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/nsxfobj.c b/trunk/drivers/acpi/acpica/nsxfobj.c
index 57e6d825ed84..880a605cee20 100644
--- a/trunk/drivers/acpi/acpica/nsxfobj.c
+++ b/trunk/drivers/acpi/acpica/nsxfobj.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/psargs.c b/trunk/drivers/acpi/acpica/psargs.c
index e1fad0ee0136..5ac36aba507c 100644
--- a/trunk/drivers/acpi/acpica/psargs.c
+++ b/trunk/drivers/acpi/acpica/psargs.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -484,34 +484,54 @@ acpi_ps_get_next_simple_arg(struct acpi_parse_state *parser_state,
static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
*parser_state)
{
- u32 aml_offset = (u32)
- ACPI_PTR_DIFF(parser_state->aml,
- parser_state->aml_start);
+ u32 aml_offset;
union acpi_parse_object *field;
+ union acpi_parse_object *arg = NULL;
u16 opcode;
u32 name;
+ u8 access_type;
+ u8 access_attribute;
+ u8 access_length;
+ u32 pkg_length;
+ u8 *pkg_end;
+ u32 buffer_length;
ACPI_FUNCTION_TRACE(ps_get_next_field);
+ aml_offset =
+ (u32)ACPI_PTR_DIFF(parser_state->aml, parser_state->aml_start);
+
/* Determine field type */
switch (ACPI_GET8(parser_state->aml)) {
- default:
+ case AML_FIELD_OFFSET_OP:
- opcode = AML_INT_NAMEDFIELD_OP;
+ opcode = AML_INT_RESERVEDFIELD_OP;
+ parser_state->aml++;
break;
- case 0x00:
+ case AML_FIELD_ACCESS_OP:
- opcode = AML_INT_RESERVEDFIELD_OP;
+ opcode = AML_INT_ACCESSFIELD_OP;
parser_state->aml++;
break;
- case 0x01:
+ case AML_FIELD_CONNECTION_OP:
- opcode = AML_INT_ACCESSFIELD_OP;
+ opcode = AML_INT_CONNECTION_OP;
+ parser_state->aml++;
+ break;
+
+ case AML_FIELD_EXT_ACCESS_OP:
+
+ opcode = AML_INT_EXTACCESSFIELD_OP;
parser_state->aml++;
break;
+
+ default:
+
+ opcode = AML_INT_NAMEDFIELD_OP;
+ break;
}
/* Allocate a new field op */
@@ -549,16 +569,111 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
break;
case AML_INT_ACCESSFIELD_OP:
+ case AML_INT_EXTACCESSFIELD_OP:
/*
* Get access_type and access_attrib and merge into the field Op
- * access_type is first operand, access_attribute is second
+ * access_type is first operand, access_attribute is second. stuff
+ * these bytes into the node integer value for convenience.
*/
- field->common.value.integer =
- (((u32) ACPI_GET8(parser_state->aml) << 8));
+
+ /* Get the two bytes (Type/Attribute) */
+
+ access_type = ACPI_GET8(parser_state->aml);
parser_state->aml++;
- field->common.value.integer |= ACPI_GET8(parser_state->aml);
+ access_attribute = ACPI_GET8(parser_state->aml);
parser_state->aml++;
+
+ field->common.value.integer = (u8)access_type;
+ field->common.value.integer |= (u16)(access_attribute << 8);
+
+ /* This opcode has a third byte, access_length */
+
+ if (opcode == AML_INT_EXTACCESSFIELD_OP) {
+ access_length = ACPI_GET8(parser_state->aml);
+ parser_state->aml++;
+
+ field->common.value.integer |=
+ (u32)(access_length << 16);
+ }
+ break;
+
+ case AML_INT_CONNECTION_OP:
+
+ /*
+ * Argument for Connection operator can be either a Buffer
+ * (resource descriptor), or a name_string.
+ */
+ if (ACPI_GET8(parser_state->aml) == AML_BUFFER_OP) {
+ parser_state->aml++;
+
+ pkg_end = parser_state->aml;
+ pkg_length =
+ acpi_ps_get_next_package_length(parser_state);
+ pkg_end += pkg_length;
+
+ if (parser_state->aml < pkg_end) {
+
+ /* Non-empty list */
+
+ arg = acpi_ps_alloc_op(AML_INT_BYTELIST_OP);
+ if (!arg) {
+ return_PTR(NULL);
+ }
+
+ /* Get the actual buffer length argument */
+
+ opcode = ACPI_GET8(parser_state->aml);
+ parser_state->aml++;
+
+ switch (opcode) {
+ case AML_BYTE_OP: /* AML_BYTEDATA_ARG */
+ buffer_length =
+ ACPI_GET8(parser_state->aml);
+ parser_state->aml += 1;
+ break;
+
+ case AML_WORD_OP: /* AML_WORDDATA_ARG */
+ buffer_length =
+ ACPI_GET16(parser_state->aml);
+ parser_state->aml += 2;
+ break;
+
+ case AML_DWORD_OP: /* AML_DWORDATA_ARG */
+ buffer_length =
+ ACPI_GET32(parser_state->aml);
+ parser_state->aml += 4;
+ break;
+
+ default:
+ buffer_length = 0;
+ break;
+ }
+
+ /* Fill in bytelist data */
+
+ arg->named.value.size = buffer_length;
+ arg->named.data = parser_state->aml;
+ }
+
+ /* Skip to End of byte data */
+
+ parser_state->aml = pkg_end;
+ } else {
+ arg = acpi_ps_alloc_op(AML_INT_NAMEPATH_OP);
+ if (!arg) {
+ return_PTR(NULL);
+ }
+
+ /* Get the Namestring argument */
+
+ arg->common.value.name =
+ acpi_ps_get_next_namestring(parser_state);
+ }
+
+ /* Link the buffer/namestring to parent (CONNECTION_OP) */
+
+ acpi_ps_append_arg(field, arg);
break;
default:
diff --git a/trunk/drivers/acpi/acpica/psloop.c b/trunk/drivers/acpi/acpica/psloop.c
index 01dd70d1de51..9547ad8a620b 100644
--- a/trunk/drivers/acpi/acpica/psloop.c
+++ b/trunk/drivers/acpi/acpica/psloop.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/psopcode.c b/trunk/drivers/acpi/acpica/psopcode.c
index bed08de7528c..a0226fdcf75c 100644
--- a/trunk/drivers/acpi/acpica/psopcode.c
+++ b/trunk/drivers/acpi/acpica/psopcode.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -638,7 +638,16 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = {
/* 7E */ ACPI_OP("Timer", ARGP_TIMER_OP, ARGI_TIMER_OP, ACPI_TYPE_ANY,
AML_CLASS_EXECUTE, AML_TYPE_EXEC_0A_0T_1R,
- AML_FLAGS_EXEC_0A_0T_1R)
+ AML_FLAGS_EXEC_0A_0T_1R),
+
+/* ACPI 5.0 opcodes */
+
+/* 7F */ ACPI_OP("-ConnectField-", ARGP_CONNECTFIELD_OP,
+ ARGI_CONNECTFIELD_OP, ACPI_TYPE_ANY,
+ AML_CLASS_INTERNAL, AML_TYPE_BOGUS, AML_HAS_ARGS),
+/* 80 */ ACPI_OP("-ExtAccessField-", ARGP_CONNECTFIELD_OP,
+ ARGI_CONNECTFIELD_OP, ACPI_TYPE_ANY,
+ AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0)
/*! [End] no source code translation !*/
};
@@ -657,7 +666,7 @@ static const u8 acpi_gbl_short_op_index[256] = {
/* 0x20 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
/* 0x28 */ _UNK, _UNK, _UNK, _UNK, _UNK, 0x63, _PFX, _PFX,
/* 0x30 */ 0x67, 0x66, 0x68, 0x65, 0x69, 0x64, 0x6A, 0x7D,
-/* 0x38 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x38 */ 0x7F, 0x80, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
/* 0x40 */ _UNK, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
/* 0x48 */ _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
/* 0x50 */ _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
diff --git a/trunk/drivers/acpi/acpica/psparse.c b/trunk/drivers/acpi/acpica/psparse.c
index 9bb0cbd37b5e..2ff9c35a1968 100644
--- a/trunk/drivers/acpi/acpica/psparse.c
+++ b/trunk/drivers/acpi/acpica/psparse.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/psscope.c b/trunk/drivers/acpi/acpica/psscope.c
index a5faa1323a02..c872aa4b926e 100644
--- a/trunk/drivers/acpi/acpica/psscope.c
+++ b/trunk/drivers/acpi/acpica/psscope.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/pstree.c b/trunk/drivers/acpi/acpica/pstree.c
index f1464c03aa42..2b03cdbbe1c0 100644
--- a/trunk/drivers/acpi/acpica/pstree.c
+++ b/trunk/drivers/acpi/acpica/pstree.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -74,6 +74,12 @@ union acpi_parse_object *acpi_ps_get_arg(union acpi_parse_object *op, u32 argn)
ACPI_FUNCTION_ENTRY();
+/*
+ if (Op->Common.aml_opcode == AML_INT_CONNECTION_OP)
+ {
+ return (Op->Common.Value.Arg);
+ }
+*/
/* Get the info structure for this opcode */
op_info = acpi_ps_get_opcode_info(op->common.aml_opcode);
diff --git a/trunk/drivers/acpi/acpica/psutils.c b/trunk/drivers/acpi/acpica/psutils.c
index 7eda78503422..13bb131ae125 100644
--- a/trunk/drivers/acpi/acpica/psutils.c
+++ b/trunk/drivers/acpi/acpica/psutils.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/pswalk.c b/trunk/drivers/acpi/acpica/pswalk.c
index 3312d6368bf1..ab96cf47896d 100644
--- a/trunk/drivers/acpi/acpica/pswalk.c
+++ b/trunk/drivers/acpi/acpica/pswalk.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/psxface.c b/trunk/drivers/acpi/acpica/psxface.c
index 8086805d4494..9d98c5ff66a5 100644
--- a/trunk/drivers/acpi/acpica/psxface.c
+++ b/trunk/drivers/acpi/acpica/psxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/rsaddr.c b/trunk/drivers/acpi/acpica/rsaddr.c
index 9e66f9078426..a0305652394f 100644
--- a/trunk/drivers/acpi/acpica/rsaddr.c
+++ b/trunk/drivers/acpi/acpica/rsaddr.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/rscalc.c b/trunk/drivers/acpi/acpica/rscalc.c
index 3a8a89ec2ca4..3c6df4b7eb2d 100644
--- a/trunk/drivers/acpi/acpica/rscalc.c
+++ b/trunk/drivers/acpi/acpica/rscalc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -313,6 +313,38 @@ acpi_rs_get_aml_length(struct acpi_resource * resource, acpi_size * size_needed)
resource_source));
break;
+ case ACPI_RESOURCE_TYPE_GPIO:
+
+ total_size =
+ (acpi_rs_length) (total_size +
+ (resource->data.gpio.
+ pin_table_length * 2) +
+ resource->data.gpio.
+ resource_source.string_length +
+ resource->data.gpio.
+ vendor_length);
+
+ break;
+
+ case ACPI_RESOURCE_TYPE_SERIAL_BUS:
+
+ total_size =
+ acpi_gbl_aml_resource_serial_bus_sizes[resource->
+ data.
+ common_serial_bus.
+ type];
+
+ total_size = (acpi_rs_length) (total_size +
+ resource->data.
+ i2c_serial_bus.
+ resource_source.
+ string_length +
+ resource->data.
+ i2c_serial_bus.
+ vendor_length);
+
+ break;
+
default:
break;
}
@@ -362,10 +394,11 @@ acpi_rs_get_list_length(u8 * aml_buffer,
u32 extra_struct_bytes;
u8 resource_index;
u8 minimum_aml_resource_length;
+ union aml_resource *aml_resource;
ACPI_FUNCTION_TRACE(rs_get_list_length);
- *size_needed = 0;
+ *size_needed = ACPI_RS_SIZE_MIN; /* Minimum size is one end_tag */
end_aml = aml_buffer + aml_buffer_length;
/* Walk the list of AML resource descriptors */
@@ -376,9 +409,15 @@ acpi_rs_get_list_length(u8 * aml_buffer,
status = acpi_ut_validate_resource(aml_buffer, &resource_index);
if (ACPI_FAILURE(status)) {
+ /*
+ * Exit on failure. Cannot continue because the descriptor length
+ * may be bogus also.
+ */
return_ACPI_STATUS(status);
}
+ aml_resource = (void *)aml_buffer;
+
/* Get the resource length and base (minimum) AML size */
resource_length = acpi_ut_get_resource_length(aml_buffer);
@@ -422,10 +461,8 @@ acpi_rs_get_list_length(u8 * aml_buffer,
case ACPI_RESOURCE_NAME_END_TAG:
/*
- * End Tag:
- * This is the normal exit, add size of end_tag
+ * End Tag: This is the normal exit
*/
- *size_needed += ACPI_RS_SIZE_MIN;
return_ACPI_STATUS(AE_OK);
case ACPI_RESOURCE_NAME_ADDRESS32:
@@ -457,6 +494,33 @@ acpi_rs_get_list_length(u8 * aml_buffer,
minimum_aml_resource_length);
break;
+ case ACPI_RESOURCE_NAME_GPIO:
+
+ /* Vendor data is optional */
+
+ if (aml_resource->gpio.vendor_length) {
+ extra_struct_bytes +=
+ aml_resource->gpio.vendor_offset -
+ aml_resource->gpio.pin_table_offset +
+ aml_resource->gpio.vendor_length;
+ } else {
+ extra_struct_bytes +=
+ aml_resource->large_header.resource_length +
+ sizeof(struct aml_resource_large_header) -
+ aml_resource->gpio.pin_table_offset;
+ }
+ break;
+
+ case ACPI_RESOURCE_NAME_SERIAL_BUS:
+
+ minimum_aml_resource_length =
+ acpi_gbl_resource_aml_serial_bus_sizes
+ [aml_resource->common_serial_bus.type];
+ extra_struct_bytes +=
+ aml_resource->common_serial_bus.resource_length -
+ minimum_aml_resource_length;
+ break;
+
default:
break;
}
@@ -467,9 +531,18 @@ acpi_rs_get_list_length(u8 * aml_buffer,
* Important: Round the size up for the appropriate alignment. This
* is a requirement on IA64.
*/
- buffer_size = acpi_gbl_resource_struct_sizes[resource_index] +
- extra_struct_bytes;
- buffer_size = (u32) ACPI_ROUND_UP_TO_NATIVE_WORD(buffer_size);
+ if (acpi_ut_get_resource_type(aml_buffer) ==
+ ACPI_RESOURCE_NAME_SERIAL_BUS) {
+ buffer_size =
+ acpi_gbl_resource_struct_serial_bus_sizes
+ [aml_resource->common_serial_bus.type] +
+ extra_struct_bytes;
+ } else {
+ buffer_size =
+ acpi_gbl_resource_struct_sizes[resource_index] +
+ extra_struct_bytes;
+ }
+ buffer_size = (u32)ACPI_ROUND_UP_TO_NATIVE_WORD(buffer_size);
*size_needed += buffer_size;
diff --git a/trunk/drivers/acpi/acpica/rscreate.c b/trunk/drivers/acpi/acpica/rscreate.c
index 4ce6e1147e80..46d6eb38ae66 100644
--- a/trunk/drivers/acpi/acpica/rscreate.c
+++ b/trunk/drivers/acpi/acpica/rscreate.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -49,6 +49,70 @@
#define _COMPONENT ACPI_RESOURCES
ACPI_MODULE_NAME("rscreate")
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_buffer_to_resource
+ *
+ * PARAMETERS: aml_buffer - Pointer to the resource byte stream
+ * aml_buffer_length - Length of the aml_buffer
+ * resource_ptr - Where the converted resource is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Convert a raw AML buffer to a resource list
+ *
+ ******************************************************************************/
+acpi_status
+acpi_buffer_to_resource(u8 *aml_buffer,
+ u16 aml_buffer_length,
+ struct acpi_resource **resource_ptr)
+{
+ acpi_status status;
+ acpi_size list_size_needed;
+ void *resource;
+ void *current_resource_ptr;
+
+ /*
+ * Note: we allow AE_AML_NO_RESOURCE_END_TAG, since an end tag
+ * is not required here.
+ */
+
+ /* Get the required length for the converted resource */
+
+ status = acpi_rs_get_list_length(aml_buffer, aml_buffer_length,
+ &list_size_needed);
+ if (status == AE_AML_NO_RESOURCE_END_TAG) {
+ status = AE_OK;
+ }
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /* Allocate a buffer for the converted resource */
+
+ resource = ACPI_ALLOCATE_ZEROED(list_size_needed);
+ current_resource_ptr = resource;
+ if (!resource) {
+ return (AE_NO_MEMORY);
+ }
+
+ /* Perform the AML-to-Resource conversion */
+
+ status = acpi_ut_walk_aml_resources(aml_buffer, aml_buffer_length,
+ acpi_rs_convert_aml_to_resources,
+ ¤t_resource_ptr);
+ if (status == AE_AML_NO_RESOURCE_END_TAG) {
+ status = AE_OK;
+ }
+ if (ACPI_FAILURE(status)) {
+ ACPI_FREE(resource);
+ } else {
+ *resource_ptr = resource;
+ }
+
+ return (status);
+}
+
/*******************************************************************************
*
* FUNCTION: acpi_rs_create_resource_list
@@ -66,9 +130,10 @@ ACPI_MODULE_NAME("rscreate")
* of device resources.
*
******************************************************************************/
+
acpi_status
acpi_rs_create_resource_list(union acpi_operand_object *aml_buffer,
- struct acpi_buffer *output_buffer)
+ struct acpi_buffer * output_buffer)
{
acpi_status status;
diff --git a/trunk/drivers/acpi/acpica/rsdump.c b/trunk/drivers/acpi/acpica/rsdump.c
index 33db7520c74b..b4c581132393 100644
--- a/trunk/drivers/acpi/acpica/rsdump.c
+++ b/trunk/drivers/acpi/acpica/rsdump.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -61,11 +61,13 @@ static void acpi_rs_out_integer64(char *title, u64 value);
static void acpi_rs_out_title(char *title);
-static void acpi_rs_dump_byte_list(u16 length, u8 * data);
+static void acpi_rs_dump_byte_list(u16 length, u8 *data);
-static void acpi_rs_dump_dword_list(u8 length, u32 * data);
+static void acpi_rs_dump_word_list(u16 length, u16 *data);
-static void acpi_rs_dump_short_byte_list(u8 length, u8 * data);
+static void acpi_rs_dump_dword_list(u8 length, u32 *data);
+
+static void acpi_rs_dump_short_byte_list(u8 length, u8 *data);
static void
acpi_rs_dump_resource_source(struct acpi_resource_source *resource_source);
@@ -309,6 +311,125 @@ struct acpi_rsdump_info acpi_rs_dump_generic_reg[6] = {
{ACPI_RSD_UINT64, ACPI_RSD_OFFSET(generic_reg.address), "Address", NULL}
};
+struct acpi_rsdump_info acpi_rs_dump_gpio[16] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_gpio), "GPIO", NULL},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(gpio.revision_id), "RevisionId", NULL},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(gpio.connection_type),
+ "ConnectionType", acpi_gbl_ct_decode},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(gpio.producer_consumer),
+ "ProducerConsumer", acpi_gbl_consume_decode},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(gpio.pin_config), "PinConfig",
+ acpi_gbl_ppc_decode},
+ {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.sharable), "Sharable",
+ acpi_gbl_shr_decode},
+ {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.io_restriction),
+ "IoRestriction", acpi_gbl_ior_decode},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(gpio.triggering), "Triggering",
+ acpi_gbl_he_decode},
+ {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.polarity), "Polarity",
+ acpi_gbl_ll_decode},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.drive_strength), "DriveStrength",
+ NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.debounce_timeout),
+ "DebounceTimeout", NULL},
+ {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(gpio.resource_source),
+ "ResourceSource", NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.pin_table_length),
+ "PinTableLength", NULL},
+ {ACPI_RSD_WORDLIST, ACPI_RSD_OFFSET(gpio.pin_table), "PinTable", NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.vendor_length), "VendorLength",
+ NULL},
+ {ACPI_RSD_SHORTLISTX, ACPI_RSD_OFFSET(gpio.vendor_data), "VendorData",
+ NULL},
+};
+
+struct acpi_rsdump_info acpi_rs_dump_fixed_dma[4] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_fixed_dma),
+ "FixedDma", NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(fixed_dma.request_lines),
+ "RequestLines", NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(fixed_dma.channels), "Channels",
+ NULL},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(fixed_dma.width), "TransferWidth",
+ acpi_gbl_dts_decode},
+};
+
+#define ACPI_RS_DUMP_COMMON_SERIAL_BUS \
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET (common_serial_bus.revision_id), "RevisionId", NULL}, \
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET (common_serial_bus.type), "Type", acpi_gbl_sbt_decode}, \
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET (common_serial_bus.producer_consumer), "ProducerConsumer", acpi_gbl_consume_decode}, \
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET (common_serial_bus.slave_mode), "SlaveMode", acpi_gbl_sm_decode}, \
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET (common_serial_bus.type_revision_id), "TypeRevisionId", NULL}, \
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET (common_serial_bus.type_data_length), "TypeDataLength", NULL}, \
+ {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET (common_serial_bus.resource_source), "ResourceSource", NULL}, \
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET (common_serial_bus.vendor_length), "VendorLength", NULL}, \
+ {ACPI_RSD_SHORTLISTX,ACPI_RSD_OFFSET (common_serial_bus.vendor_data), "VendorData", NULL},
+
+struct acpi_rsdump_info acpi_rs_dump_common_serial_bus[10] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_common_serial_bus),
+ "Common Serial Bus", NULL},
+ ACPI_RS_DUMP_COMMON_SERIAL_BUS
+};
+
+struct acpi_rsdump_info acpi_rs_dump_i2c_serial_bus[13] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_i2c_serial_bus),
+ "I2C Serial Bus", NULL},
+ ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_1BITFLAG,
+ ACPI_RSD_OFFSET(i2c_serial_bus.
+ access_mode),
+ "AccessMode", acpi_gbl_am_decode},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(i2c_serial_bus.connection_speed),
+ "ConnectionSpeed", NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(i2c_serial_bus.slave_address),
+ "SlaveAddress", NULL},
+};
+
+struct acpi_rsdump_info acpi_rs_dump_spi_serial_bus[17] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_spi_serial_bus),
+ "Spi Serial Bus", NULL},
+ ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_1BITFLAG,
+ ACPI_RSD_OFFSET(spi_serial_bus.
+ wire_mode), "WireMode",
+ acpi_gbl_wm_decode},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(spi_serial_bus.device_polarity),
+ "DevicePolarity", acpi_gbl_dp_decode},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(spi_serial_bus.data_bit_length),
+ "DataBitLength", NULL},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(spi_serial_bus.clock_phase),
+ "ClockPhase", acpi_gbl_cph_decode},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(spi_serial_bus.clock_polarity),
+ "ClockPolarity", acpi_gbl_cpo_decode},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(spi_serial_bus.device_selection),
+ "DeviceSelection", NULL},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(spi_serial_bus.connection_speed),
+ "ConnectionSpeed", NULL},
+};
+
+struct acpi_rsdump_info acpi_rs_dump_uart_serial_bus[19] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_uart_serial_bus),
+ "Uart Serial Bus", NULL},
+ ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_2BITFLAG,
+ ACPI_RSD_OFFSET(uart_serial_bus.
+ flow_control),
+ "FlowControl", acpi_gbl_fc_decode},
+ {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(uart_serial_bus.stop_bits),
+ "StopBits", acpi_gbl_sb_decode},
+ {ACPI_RSD_3BITFLAG, ACPI_RSD_OFFSET(uart_serial_bus.data_bits),
+ "DataBits", acpi_gbl_bpb_decode},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(uart_serial_bus.endian), "Endian",
+ acpi_gbl_ed_decode},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(uart_serial_bus.parity), "Parity",
+ acpi_gbl_pt_decode},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(uart_serial_bus.lines_enabled),
+ "LinesEnabled", NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(uart_serial_bus.rx_fifo_size),
+ "RxFifoSize", NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(uart_serial_bus.tx_fifo_size),
+ "TxFifoSize", NULL},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(uart_serial_bus.default_baud_rate),
+ "ConnectionSpeed", NULL},
+};
+
/*
* Tables used for common address descriptor flag fields
*/
@@ -413,7 +534,14 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table)
/* Data items, 8/16/32/64 bit */
case ACPI_RSD_UINT8:
- acpi_rs_out_integer8(name, ACPI_GET8(target));
+ if (table->pointer) {
+ acpi_rs_out_string(name, ACPI_CAST_PTR(char,
+ table->
+ pointer
+ [*target]));
+ } else {
+ acpi_rs_out_integer8(name, ACPI_GET8(target));
+ }
break;
case ACPI_RSD_UINT16:
@@ -444,6 +572,13 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table)
0x03]));
break;
+ case ACPI_RSD_3BITFLAG:
+ acpi_rs_out_string(name, ACPI_CAST_PTR(char,
+ table->
+ pointer[*target &
+ 0x07]));
+ break;
+
case ACPI_RSD_SHORTLIST:
/*
* Short byte list (single line output) for DMA and IRQ resources
@@ -456,6 +591,20 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table)
}
break;
+ case ACPI_RSD_SHORTLISTX:
+ /*
+ * Short byte list (single line output) for GPIO vendor data
+ * Note: The list length is obtained from the previous table entry
+ */
+ if (previous_target) {
+ acpi_rs_out_title(name);
+ acpi_rs_dump_short_byte_list(*previous_target,
+ *
+ (ACPI_CAST_INDIRECT_PTR
+ (u8, target)));
+ }
+ break;
+
case ACPI_RSD_LONGLIST:
/*
* Long byte list for Vendor resource data
@@ -480,6 +629,18 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table)
}
break;
+ case ACPI_RSD_WORDLIST:
+ /*
+ * Word list for GPIO Pin Table
+ * Note: The list length is obtained from the previous table entry
+ */
+ if (previous_target) {
+ acpi_rs_dump_word_list(*previous_target,
+ *(ACPI_CAST_INDIRECT_PTR
+ (u16, target)));
+ }
+ break;
+
case ACPI_RSD_ADDRESS:
/*
* Common flags for all Address resources
@@ -627,14 +788,20 @@ void acpi_rs_dump_resource_list(struct acpi_resource *resource_list)
/* Dump the resource descriptor */
- acpi_rs_dump_descriptor(&resource_list->data,
- acpi_gbl_dump_resource_dispatch[type]);
+ if (type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
+ acpi_rs_dump_descriptor(&resource_list->data,
+ acpi_gbl_dump_serial_bus_dispatch
+ [resource_list->data.
+ common_serial_bus.type]);
+ } else {
+ acpi_rs_dump_descriptor(&resource_list->data,
+ acpi_gbl_dump_resource_dispatch
+ [type]);
+ }
/* Point to the next resource structure */
- resource_list =
- ACPI_ADD_PTR(struct acpi_resource, resource_list,
- resource_list->length);
+ resource_list = ACPI_NEXT_RESOURCE(resource_list);
/* Exit when END_TAG descriptor is reached */
@@ -768,4 +935,13 @@ static void acpi_rs_dump_dword_list(u8 length, u32 * data)
}
}
+static void acpi_rs_dump_word_list(u16 length, u16 *data)
+{
+ u16 i;
+
+ for (i = 0; i < length; i++) {
+ acpi_os_printf("%25s%2.2X : %4.4X\n", "Word", i, data[i]);
+ }
+}
+
#endif
diff --git a/trunk/drivers/acpi/acpica/rsinfo.c b/trunk/drivers/acpi/acpica/rsinfo.c
index f9ea60872aa4..a9fa5158200b 100644
--- a/trunk/drivers/acpi/acpica/rsinfo.c
+++ b/trunk/drivers/acpi/acpica/rsinfo.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -76,7 +76,10 @@ struct acpi_rsconvert_info *acpi_gbl_set_resource_dispatch[] = {
acpi_rs_convert_address64, /* 0x0D, ACPI_RESOURCE_TYPE_ADDRESS64 */
acpi_rs_convert_ext_address64, /* 0x0E, ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 */
acpi_rs_convert_ext_irq, /* 0x0F, ACPI_RESOURCE_TYPE_EXTENDED_IRQ */
- acpi_rs_convert_generic_reg /* 0x10, ACPI_RESOURCE_TYPE_GENERIC_REGISTER */
+ acpi_rs_convert_generic_reg, /* 0x10, ACPI_RESOURCE_TYPE_GENERIC_REGISTER */
+ acpi_rs_convert_gpio, /* 0x11, ACPI_RESOURCE_TYPE_GPIO */
+ acpi_rs_convert_fixed_dma, /* 0x12, ACPI_RESOURCE_TYPE_FIXED_DMA */
+ NULL, /* 0x13, ACPI_RESOURCE_TYPE_SERIAL_BUS - Use subtype table below */
};
/* Dispatch tables for AML-to-resource (Get Resource) conversion functions */
@@ -94,7 +97,7 @@ struct acpi_rsconvert_info *acpi_gbl_get_resource_dispatch[] = {
acpi_rs_convert_end_dpf, /* 0x07, ACPI_RESOURCE_NAME_END_DEPENDENT */
acpi_rs_convert_io, /* 0x08, ACPI_RESOURCE_NAME_IO */
acpi_rs_convert_fixed_io, /* 0x09, ACPI_RESOURCE_NAME_FIXED_IO */
- NULL, /* 0x0A, Reserved */
+ acpi_rs_convert_fixed_dma, /* 0x0A, ACPI_RESOURCE_NAME_FIXED_DMA */
NULL, /* 0x0B, Reserved */
NULL, /* 0x0C, Reserved */
NULL, /* 0x0D, Reserved */
@@ -114,7 +117,19 @@ struct acpi_rsconvert_info *acpi_gbl_get_resource_dispatch[] = {
acpi_rs_convert_address16, /* 0x08, ACPI_RESOURCE_NAME_ADDRESS16 */
acpi_rs_convert_ext_irq, /* 0x09, ACPI_RESOURCE_NAME_EXTENDED_IRQ */
acpi_rs_convert_address64, /* 0x0A, ACPI_RESOURCE_NAME_ADDRESS64 */
- acpi_rs_convert_ext_address64 /* 0x0B, ACPI_RESOURCE_NAME_EXTENDED_ADDRESS64 */
+ acpi_rs_convert_ext_address64, /* 0x0B, ACPI_RESOURCE_NAME_EXTENDED_ADDRESS64 */
+ acpi_rs_convert_gpio, /* 0x0C, ACPI_RESOURCE_NAME_GPIO */
+ NULL, /* 0x0D, Reserved */
+ NULL, /* 0x0E, ACPI_RESOURCE_NAME_SERIAL_BUS - Use subtype table below */
+};
+
+/* Subtype table for serial_bus -- I2C, SPI, and UART */
+
+struct acpi_rsconvert_info *acpi_gbl_convert_resource_serial_bus_dispatch[] = {
+ NULL,
+ acpi_rs_convert_i2c_serial_bus,
+ acpi_rs_convert_spi_serial_bus,
+ acpi_rs_convert_uart_serial_bus,
};
#ifdef ACPI_FUTURE_USAGE
@@ -140,6 +155,16 @@ struct acpi_rsdump_info *acpi_gbl_dump_resource_dispatch[] = {
acpi_rs_dump_ext_address64, /* ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 */
acpi_rs_dump_ext_irq, /* ACPI_RESOURCE_TYPE_EXTENDED_IRQ */
acpi_rs_dump_generic_reg, /* ACPI_RESOURCE_TYPE_GENERIC_REGISTER */
+ acpi_rs_dump_gpio, /* ACPI_RESOURCE_TYPE_GPIO */
+ acpi_rs_dump_fixed_dma, /* ACPI_RESOURCE_TYPE_FIXED_DMA */
+ NULL, /* ACPI_RESOURCE_TYPE_SERIAL_BUS */
+};
+
+struct acpi_rsdump_info *acpi_gbl_dump_serial_bus_dispatch[] = {
+ NULL,
+ acpi_rs_dump_i2c_serial_bus, /* AML_RESOURCE_I2C_BUS_TYPE */
+ acpi_rs_dump_spi_serial_bus, /* AML_RESOURCE_SPI_BUS_TYPE */
+ acpi_rs_dump_uart_serial_bus, /* AML_RESOURCE_UART_BUS_TYPE */
};
#endif
@@ -166,7 +191,10 @@ const u8 acpi_gbl_aml_resource_sizes[] = {
sizeof(struct aml_resource_address64), /* ACPI_RESOURCE_TYPE_ADDRESS64 */
sizeof(struct aml_resource_extended_address64), /*ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 */
sizeof(struct aml_resource_extended_irq), /* ACPI_RESOURCE_TYPE_EXTENDED_IRQ */
- sizeof(struct aml_resource_generic_register) /* ACPI_RESOURCE_TYPE_GENERIC_REGISTER */
+ sizeof(struct aml_resource_generic_register), /* ACPI_RESOURCE_TYPE_GENERIC_REGISTER */
+ sizeof(struct aml_resource_gpio), /* ACPI_RESOURCE_TYPE_GPIO */
+ sizeof(struct aml_resource_fixed_dma), /* ACPI_RESOURCE_TYPE_FIXED_DMA */
+ sizeof(struct aml_resource_common_serialbus), /* ACPI_RESOURCE_TYPE_SERIAL_BUS */
};
const u8 acpi_gbl_resource_struct_sizes[] = {
@@ -182,7 +210,7 @@ const u8 acpi_gbl_resource_struct_sizes[] = {
ACPI_RS_SIZE_MIN,
ACPI_RS_SIZE(struct acpi_resource_io),
ACPI_RS_SIZE(struct acpi_resource_fixed_io),
- 0,
+ ACPI_RS_SIZE(struct acpi_resource_fixed_dma),
0,
0,
0,
@@ -202,5 +230,21 @@ const u8 acpi_gbl_resource_struct_sizes[] = {
ACPI_RS_SIZE(struct acpi_resource_address16),
ACPI_RS_SIZE(struct acpi_resource_extended_irq),
ACPI_RS_SIZE(struct acpi_resource_address64),
- ACPI_RS_SIZE(struct acpi_resource_extended_address64)
+ ACPI_RS_SIZE(struct acpi_resource_extended_address64),
+ ACPI_RS_SIZE(struct acpi_resource_gpio),
+ ACPI_RS_SIZE(struct acpi_resource_common_serialbus)
+};
+
+const u8 acpi_gbl_aml_resource_serial_bus_sizes[] = {
+ 0,
+ sizeof(struct aml_resource_i2c_serialbus),
+ sizeof(struct aml_resource_spi_serialbus),
+ sizeof(struct aml_resource_uart_serialbus),
+};
+
+const u8 acpi_gbl_resource_struct_serial_bus_sizes[] = {
+ 0,
+ ACPI_RS_SIZE(struct acpi_resource_i2c_serialbus),
+ ACPI_RS_SIZE(struct acpi_resource_spi_serialbus),
+ ACPI_RS_SIZE(struct acpi_resource_uart_serialbus),
};
diff --git a/trunk/drivers/acpi/acpica/rsio.c b/trunk/drivers/acpi/acpica/rsio.c
index 0c7efef008be..f6a081057a22 100644
--- a/trunk/drivers/acpi/acpica/rsio.c
+++ b/trunk/drivers/acpi/acpica/rsio.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/rsirq.c b/trunk/drivers/acpi/acpica/rsirq.c
index 50b8ad211167..e23a9ec248cb 100644
--- a/trunk/drivers/acpi/acpica/rsirq.c
+++ b/trunk/drivers/acpi/acpica/rsirq.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -264,3 +264,34 @@ struct acpi_rsconvert_info acpi_rs_convert_dma[6] = {
AML_OFFSET(dma.dma_channel_mask),
ACPI_RS_OFFSET(data.dma.channel_count)}
};
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_fixed_dma
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_convert_fixed_dma[4] = {
+ {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_FIXED_DMA,
+ ACPI_RS_SIZE(struct acpi_resource_fixed_dma),
+ ACPI_RSC_TABLE_SIZE(acpi_rs_convert_fixed_dma)},
+
+ {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_FIXED_DMA,
+ sizeof(struct aml_resource_fixed_dma),
+ 0},
+
+ /*
+ * These fields are contiguous in both the source and destination:
+ * request_lines
+ * Channels
+ */
+
+ {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.fixed_dma.request_lines),
+ AML_OFFSET(fixed_dma.request_lines),
+ 2},
+
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.fixed_dma.width),
+ AML_OFFSET(fixed_dma.width),
+ 1},
+
+};
diff --git a/trunk/drivers/acpi/acpica/rslist.c b/trunk/drivers/acpi/acpica/rslist.c
index 1bfcef736c50..9be129f5d6f4 100644
--- a/trunk/drivers/acpi/acpica/rslist.c
+++ b/trunk/drivers/acpi/acpica/rslist.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -70,6 +70,8 @@ acpi_rs_convert_aml_to_resources(u8 * aml,
struct acpi_resource **resource_ptr =
ACPI_CAST_INDIRECT_PTR(struct acpi_resource, context);
struct acpi_resource *resource;
+ union aml_resource *aml_resource;
+ struct acpi_rsconvert_info *conversion_table;
acpi_status status;
ACPI_FUNCTION_TRACE(rs_convert_aml_to_resources);
@@ -84,14 +86,37 @@ acpi_rs_convert_aml_to_resources(u8 * aml,
"Misaligned resource pointer %p", resource));
}
+ /* Get the appropriate conversion info table */
+
+ aml_resource = ACPI_CAST_PTR(union aml_resource, aml);
+ if (acpi_ut_get_resource_type(aml) == ACPI_RESOURCE_NAME_SERIAL_BUS) {
+ if (aml_resource->common_serial_bus.type >
+ AML_RESOURCE_MAX_SERIALBUSTYPE) {
+ conversion_table = NULL;
+ } else {
+ /* This is an I2C, SPI, or UART serial_bus descriptor */
+
+ conversion_table =
+ acpi_gbl_convert_resource_serial_bus_dispatch
+ [aml_resource->common_serial_bus.type];
+ }
+ } else {
+ conversion_table =
+ acpi_gbl_get_resource_dispatch[resource_index];
+ }
+
+ if (!conversion_table) {
+ ACPI_ERROR((AE_INFO,
+ "Invalid/unsupported resource descriptor: Type 0x%2.2X",
+ resource_index));
+ return (AE_AML_INVALID_RESOURCE_TYPE);
+ }
+
/* Convert the AML byte stream resource to a local resource struct */
status =
- acpi_rs_convert_aml_to_resource(resource,
- ACPI_CAST_PTR(union aml_resource,
- aml),
- acpi_gbl_get_resource_dispatch
- [resource_index]);
+ acpi_rs_convert_aml_to_resource(resource, aml_resource,
+ conversion_table);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Could not convert AML resource (Type 0x%X)",
@@ -106,7 +131,7 @@ acpi_rs_convert_aml_to_resources(u8 * aml,
/* Point to the next structure in the output buffer */
- *resource_ptr = ACPI_ADD_PTR(void, resource, resource->length);
+ *resource_ptr = ACPI_NEXT_RESOURCE(resource);
return_ACPI_STATUS(AE_OK);
}
@@ -135,6 +160,7 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
{
u8 *aml = output_buffer;
u8 *end_aml = output_buffer + aml_size_needed;
+ struct acpi_rsconvert_info *conversion_table;
acpi_status status;
ACPI_FUNCTION_TRACE(rs_convert_resources_to_aml);
@@ -154,11 +180,34 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
/* Perform the conversion */
- status = acpi_rs_convert_resource_to_aml(resource, ACPI_CAST_PTR(union
- aml_resource,
- aml),
- acpi_gbl_set_resource_dispatch
- [resource->type]);
+ if (resource->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
+ if (resource->data.common_serial_bus.type >
+ AML_RESOURCE_MAX_SERIALBUSTYPE) {
+ conversion_table = NULL;
+ } else {
+ /* This is an I2C, SPI, or UART serial_bus descriptor */
+
+ conversion_table =
+ acpi_gbl_convert_resource_serial_bus_dispatch
+ [resource->data.common_serial_bus.type];
+ }
+ } else {
+ conversion_table =
+ acpi_gbl_set_resource_dispatch[resource->type];
+ }
+
+ if (!conversion_table) {
+ ACPI_ERROR((AE_INFO,
+ "Invalid/unsupported resource descriptor: Type 0x%2.2X",
+ resource->type));
+ return (AE_AML_INVALID_RESOURCE_TYPE);
+ }
+
+ status = acpi_rs_convert_resource_to_aml(resource,
+ ACPI_CAST_PTR(union
+ aml_resource,
+ aml),
+ conversion_table);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Could not convert resource (type 0x%X) to AML",
@@ -192,9 +241,7 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
/* Point to the next input resource descriptor */
- resource =
- ACPI_ADD_PTR(struct acpi_resource, resource,
- resource->length);
+ resource = ACPI_NEXT_RESOURCE(resource);
}
/* Completed buffer, but did not find an end_tag resource descriptor */
diff --git a/trunk/drivers/acpi/acpica/rsmemory.c b/trunk/drivers/acpi/acpica/rsmemory.c
index 7cc6d8625f1e..4fd611ad02b4 100644
--- a/trunk/drivers/acpi/acpica/rsmemory.c
+++ b/trunk/drivers/acpi/acpica/rsmemory.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/rsmisc.c b/trunk/drivers/acpi/acpica/rsmisc.c
index 410264b22a29..8073b371cc7c 100644
--- a/trunk/drivers/acpi/acpica/rsmisc.c
+++ b/trunk/drivers/acpi/acpica/rsmisc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -83,6 +83,10 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
ACPI_FUNCTION_TRACE(rs_convert_aml_to_resource);
+ if (!info) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
if (((acpi_size) resource) & 0x3) {
/* Each internal resource struct is expected to be 32-bit aligned */
@@ -101,7 +105,6 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
* table length (# of table entries)
*/
count = INIT_TABLE_LENGTH(info);
-
while (count) {
/*
* Source is the external AML byte stream buffer,
@@ -145,6 +148,14 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
((ACPI_GET8(source) >> info->value) & 0x03);
break;
+ case ACPI_RSC_3BITFLAG:
+ /*
+ * Mask and shift the flag bits
+ */
+ ACPI_SET8(destination) = (u8)
+ ((ACPI_GET8(source) >> info->value) & 0x07);
+ break;
+
case ACPI_RSC_COUNT:
item_count = ACPI_GET8(source);
@@ -163,6 +174,69 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
(info->value * (item_count - 1));
break;
+ case ACPI_RSC_COUNT_GPIO_PIN:
+
+ target = ACPI_ADD_PTR(void, aml, info->value);
+ item_count = ACPI_GET16(target) - ACPI_GET16(source);
+
+ resource->length = resource->length + item_count;
+ item_count = item_count / 2;
+ ACPI_SET16(destination) = item_count;
+ break;
+
+ case ACPI_RSC_COUNT_GPIO_VEN:
+
+ item_count = ACPI_GET8(source);
+ ACPI_SET8(destination) = (u8)item_count;
+
+ resource->length = resource->length +
+ (info->value * item_count);
+ break;
+
+ case ACPI_RSC_COUNT_GPIO_RES:
+
+ /*
+ * Vendor data is optional (length/offset may both be zero)
+ * Examine vendor data length field first
+ */
+ target = ACPI_ADD_PTR(void, aml, (info->value + 2));
+ if (ACPI_GET16(target)) {
+
+ /* Use vendor offset to get resource source length */
+
+ target = ACPI_ADD_PTR(void, aml, info->value);
+ item_count =
+ ACPI_GET16(target) - ACPI_GET16(source);
+ } else {
+ /* No vendor data to worry about */
+
+ item_count = aml->large_header.resource_length +
+ sizeof(struct aml_resource_large_header) -
+ ACPI_GET16(source);
+ }
+
+ resource->length = resource->length + item_count;
+ ACPI_SET16(destination) = item_count;
+ break;
+
+ case ACPI_RSC_COUNT_SERIAL_VEN:
+
+ item_count = ACPI_GET16(source) - info->value;
+
+ resource->length = resource->length + item_count;
+ ACPI_SET16(destination) = item_count;
+ break;
+
+ case ACPI_RSC_COUNT_SERIAL_RES:
+
+ item_count = (aml_resource_length +
+ sizeof(struct aml_resource_large_header))
+ - ACPI_GET16(source) - info->value;
+
+ resource->length = resource->length + item_count;
+ ACPI_SET16(destination) = item_count;
+ break;
+
case ACPI_RSC_LENGTH:
resource->length = resource->length + info->value;
@@ -183,6 +257,72 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
info->opcode);
break;
+ case ACPI_RSC_MOVE_GPIO_PIN:
+
+ /* Generate and set the PIN data pointer */
+
+ target = (char *)ACPI_ADD_PTR(void, resource,
+ (resource->length -
+ item_count * 2));
+ *(u16 **)destination = ACPI_CAST_PTR(u16, target);
+
+ /* Copy the PIN data */
+
+ source = ACPI_ADD_PTR(void, aml, ACPI_GET16(source));
+ acpi_rs_move_data(target, source, item_count,
+ info->opcode);
+ break;
+
+ case ACPI_RSC_MOVE_GPIO_RES:
+
+ /* Generate and set the resource_source string pointer */
+
+ target = (char *)ACPI_ADD_PTR(void, resource,
+ (resource->length -
+ item_count));
+ *(u8 **)destination = ACPI_CAST_PTR(u8, target);
+
+ /* Copy the resource_source string */
+
+ source = ACPI_ADD_PTR(void, aml, ACPI_GET16(source));
+ acpi_rs_move_data(target, source, item_count,
+ info->opcode);
+ break;
+
+ case ACPI_RSC_MOVE_SERIAL_VEN:
+
+ /* Generate and set the Vendor Data pointer */
+
+ target = (char *)ACPI_ADD_PTR(void, resource,
+ (resource->length -
+ item_count));
+ *(u8 **)destination = ACPI_CAST_PTR(u8, target);
+
+ /* Copy the Vendor Data */
+
+ source = ACPI_ADD_PTR(void, aml, info->value);
+ acpi_rs_move_data(target, source, item_count,
+ info->opcode);
+ break;
+
+ case ACPI_RSC_MOVE_SERIAL_RES:
+
+ /* Generate and set the resource_source string pointer */
+
+ target = (char *)ACPI_ADD_PTR(void, resource,
+ (resource->length -
+ item_count));
+ *(u8 **)destination = ACPI_CAST_PTR(u8, target);
+
+ /* Copy the resource_source string */
+
+ source =
+ ACPI_ADD_PTR(void, aml,
+ (ACPI_GET16(source) + info->value));
+ acpi_rs_move_data(target, source, item_count,
+ info->opcode);
+ break;
+
case ACPI_RSC_SET8:
ACPI_MEMSET(destination, info->aml_offset, info->value);
@@ -219,13 +359,18 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
* Optional resource_source (Index and String). This is the more
* complicated case used by the Interrupt() macro
*/
- target =
- ACPI_ADD_PTR(char, resource,
- info->aml_offset + (item_count * 4));
+ target = ACPI_ADD_PTR(char, resource,
+ info->aml_offset +
+ (item_count * 4));
resource->length +=
acpi_rs_get_resource_source(aml_resource_length,
- (acpi_rs_length) (((item_count - 1) * sizeof(u32)) + info->value), destination, aml, target);
+ (acpi_rs_length)
+ (((item_count -
+ 1) * sizeof(u32)) +
+ info->value),
+ destination, aml,
+ target);
break;
case ACPI_RSC_BITMASK:
@@ -327,6 +472,7 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
{
void *source = NULL;
void *destination;
+ char *target;
acpi_rsdesc_size aml_length = 0;
u8 count;
u16 temp16 = 0;
@@ -334,6 +480,10 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
ACPI_FUNCTION_TRACE(rs_convert_resource_to_aml);
+ if (!info) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
/*
* First table entry must be ACPI_RSC_INITxxx and must contain the
* table length (# of table entries)
@@ -383,6 +533,14 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
((ACPI_GET8(source) & 0x03) << info->value);
break;
+ case ACPI_RSC_3BITFLAG:
+ /*
+ * Mask and shift the flag bits
+ */
+ ACPI_SET8(destination) |= (u8)
+ ((ACPI_GET8(source) & 0x07) << info->value);
+ break;
+
case ACPI_RSC_COUNT:
item_count = ACPI_GET8(source);
@@ -400,6 +558,63 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
acpi_rs_set_resource_length(aml_length, aml);
break;
+ case ACPI_RSC_COUNT_GPIO_PIN:
+
+ item_count = ACPI_GET16(source);
+ ACPI_SET16(destination) = (u16)aml_length;
+
+ aml_length = (u16)(aml_length + item_count * 2);
+ target = ACPI_ADD_PTR(void, aml, info->value);
+ ACPI_SET16(target) = (u16)aml_length;
+ acpi_rs_set_resource_length(aml_length, aml);
+ break;
+
+ case ACPI_RSC_COUNT_GPIO_VEN:
+
+ item_count = ACPI_GET16(source);
+ ACPI_SET16(destination) = (u16)item_count;
+
+ aml_length =
+ (u16)(aml_length + (info->value * item_count));
+ acpi_rs_set_resource_length(aml_length, aml);
+ break;
+
+ case ACPI_RSC_COUNT_GPIO_RES:
+
+ /* Set resource source string length */
+
+ item_count = ACPI_GET16(source);
+ ACPI_SET16(destination) = (u16)aml_length;
+
+ /* Compute offset for the Vendor Data */
+
+ aml_length = (u16)(aml_length + item_count);
+ target = ACPI_ADD_PTR(void, aml, info->value);
+
+ /* Set vendor offset only if there is vendor data */
+
+ if (resource->data.gpio.vendor_length) {
+ ACPI_SET16(target) = (u16)aml_length;
+ }
+
+ acpi_rs_set_resource_length(aml_length, aml);
+ break;
+
+ case ACPI_RSC_COUNT_SERIAL_VEN:
+
+ item_count = ACPI_GET16(source);
+ ACPI_SET16(destination) = item_count + info->value;
+ aml_length = (u16)(aml_length + item_count);
+ acpi_rs_set_resource_length(aml_length, aml);
+ break;
+
+ case ACPI_RSC_COUNT_SERIAL_RES:
+
+ item_count = ACPI_GET16(source);
+ aml_length = (u16)(aml_length + item_count);
+ acpi_rs_set_resource_length(aml_length, aml);
+ break;
+
case ACPI_RSC_LENGTH:
acpi_rs_set_resource_length(info->value, aml);
@@ -417,6 +632,48 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
info->opcode);
break;
+ case ACPI_RSC_MOVE_GPIO_PIN:
+
+ destination = (char *)ACPI_ADD_PTR(void, aml,
+ ACPI_GET16
+ (destination));
+ source = *(u16 **)source;
+ acpi_rs_move_data(destination, source, item_count,
+ info->opcode);
+ break;
+
+ case ACPI_RSC_MOVE_GPIO_RES:
+
+ /* Used for both resource_source string and vendor_data */
+
+ destination = (char *)ACPI_ADD_PTR(void, aml,
+ ACPI_GET16
+ (destination));
+ source = *(u8 **)source;
+ acpi_rs_move_data(destination, source, item_count,
+ info->opcode);
+ break;
+
+ case ACPI_RSC_MOVE_SERIAL_VEN:
+
+ destination = (char *)ACPI_ADD_PTR(void, aml,
+ (aml_length -
+ item_count));
+ source = *(u8 **)source;
+ acpi_rs_move_data(destination, source, item_count,
+ info->opcode);
+ break;
+
+ case ACPI_RSC_MOVE_SERIAL_RES:
+
+ destination = (char *)ACPI_ADD_PTR(void, aml,
+ (aml_length -
+ item_count));
+ source = *(u8 **)source;
+ acpi_rs_move_data(destination, source, item_count,
+ info->opcode);
+ break;
+
case ACPI_RSC_ADDRESS:
/* Set the Resource Type, General Flags, and Type-Specific Flags */
diff --git a/trunk/drivers/acpi/acpica/rsserial.c b/trunk/drivers/acpi/acpica/rsserial.c
new file mode 100644
index 000000000000..9aa5e689b444
--- /dev/null
+++ b/trunk/drivers/acpi/acpica/rsserial.c
@@ -0,0 +1,441 @@
+/*******************************************************************************
+ *
+ * Module Name: rsserial - GPIO/serial_bus resource descriptors
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2012, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include
+#include "accommon.h"
+#include "acresrc.h"
+
+#define _COMPONENT ACPI_RESOURCES
+ACPI_MODULE_NAME("rsserial")
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_gpio
+ *
+ ******************************************************************************/
+struct acpi_rsconvert_info acpi_rs_convert_gpio[17] = {
+ {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_GPIO,
+ ACPI_RS_SIZE(struct acpi_resource_gpio),
+ ACPI_RSC_TABLE_SIZE(acpi_rs_convert_gpio)},
+
+ {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_GPIO,
+ sizeof(struct aml_resource_gpio),
+ 0},
+
+ /*
+ * These fields are contiguous in both the source and destination:
+ * revision_id
+ * connection_type
+ */
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.gpio.revision_id),
+ AML_OFFSET(gpio.revision_id),
+ 2},
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.gpio.producer_consumer),
+ AML_OFFSET(gpio.flags),
+ 0},
+
+ {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.gpio.sharable),
+ AML_OFFSET(gpio.int_flags),
+ 3},
+
+ {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.gpio.io_restriction),
+ AML_OFFSET(gpio.int_flags),
+ 0},
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.gpio.triggering),
+ AML_OFFSET(gpio.int_flags),
+ 0},
+
+ {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.gpio.polarity),
+ AML_OFFSET(gpio.int_flags),
+ 1},
+
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.gpio.pin_config),
+ AML_OFFSET(gpio.pin_config),
+ 1},
+
+ /*
+ * These fields are contiguous in both the source and destination:
+ * drive_strength
+ * debounce_timeout
+ */
+ {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.gpio.drive_strength),
+ AML_OFFSET(gpio.drive_strength),
+ 2},
+
+ /* Pin Table */
+
+ {ACPI_RSC_COUNT_GPIO_PIN, ACPI_RS_OFFSET(data.gpio.pin_table_length),
+ AML_OFFSET(gpio.pin_table_offset),
+ AML_OFFSET(gpio.res_source_offset)},
+
+ {ACPI_RSC_MOVE_GPIO_PIN, ACPI_RS_OFFSET(data.gpio.pin_table),
+ AML_OFFSET(gpio.pin_table_offset),
+ 0},
+
+ /* Resource Source */
+
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.gpio.resource_source.index),
+ AML_OFFSET(gpio.res_source_index),
+ 1},
+
+ {ACPI_RSC_COUNT_GPIO_RES,
+ ACPI_RS_OFFSET(data.gpio.resource_source.string_length),
+ AML_OFFSET(gpio.res_source_offset),
+ AML_OFFSET(gpio.vendor_offset)},
+
+ {ACPI_RSC_MOVE_GPIO_RES,
+ ACPI_RS_OFFSET(data.gpio.resource_source.string_ptr),
+ AML_OFFSET(gpio.res_source_offset),
+ 0},
+
+ /* Vendor Data */
+
+ {ACPI_RSC_COUNT_GPIO_VEN, ACPI_RS_OFFSET(data.gpio.vendor_length),
+ AML_OFFSET(gpio.vendor_length),
+ 1},
+
+ {ACPI_RSC_MOVE_GPIO_RES, ACPI_RS_OFFSET(data.gpio.vendor_data),
+ AML_OFFSET(gpio.vendor_offset),
+ 0},
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_i2c_serial_bus
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_convert_i2c_serial_bus[16] = {
+ {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_SERIAL_BUS,
+ ACPI_RS_SIZE(struct acpi_resource_i2c_serialbus),
+ ACPI_RSC_TABLE_SIZE(acpi_rs_convert_i2c_serial_bus)},
+
+ {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_SERIAL_BUS,
+ sizeof(struct aml_resource_i2c_serialbus),
+ 0},
+
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.revision_id),
+ AML_OFFSET(common_serial_bus.revision_id),
+ 1},
+
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.type),
+ AML_OFFSET(common_serial_bus.type),
+ 1},
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.common_serial_bus.slave_mode),
+ AML_OFFSET(common_serial_bus.flags),
+ 0},
+
+ {ACPI_RSC_1BITFLAG,
+ ACPI_RS_OFFSET(data.common_serial_bus.producer_consumer),
+ AML_OFFSET(common_serial_bus.flags),
+ 1},
+
+ {ACPI_RSC_MOVE8,
+ ACPI_RS_OFFSET(data.common_serial_bus.type_revision_id),
+ AML_OFFSET(common_serial_bus.type_revision_id),
+ 1},
+
+ {ACPI_RSC_MOVE16,
+ ACPI_RS_OFFSET(data.common_serial_bus.type_data_length),
+ AML_OFFSET(common_serial_bus.type_data_length),
+ 1},
+
+ /* Vendor data */
+
+ {ACPI_RSC_COUNT_SERIAL_VEN,
+ ACPI_RS_OFFSET(data.common_serial_bus.vendor_length),
+ AML_OFFSET(common_serial_bus.type_data_length),
+ AML_RESOURCE_I2C_MIN_DATA_LEN},
+
+ {ACPI_RSC_MOVE_SERIAL_VEN,
+ ACPI_RS_OFFSET(data.common_serial_bus.vendor_data),
+ 0,
+ sizeof(struct aml_resource_i2c_serialbus)},
+
+ /* Resource Source */
+
+ {ACPI_RSC_MOVE8,
+ ACPI_RS_OFFSET(data.common_serial_bus.resource_source.index),
+ AML_OFFSET(common_serial_bus.res_source_index),
+ 1},
+
+ {ACPI_RSC_COUNT_SERIAL_RES,
+ ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_length),
+ AML_OFFSET(common_serial_bus.type_data_length),
+ sizeof(struct aml_resource_common_serialbus)},
+
+ {ACPI_RSC_MOVE_SERIAL_RES,
+ ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_ptr),
+ AML_OFFSET(common_serial_bus.type_data_length),
+ sizeof(struct aml_resource_common_serialbus)},
+
+ /* I2C bus type specific */
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.i2c_serial_bus.access_mode),
+ AML_OFFSET(i2c_serial_bus.type_specific_flags),
+ 0},
+
+ {ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.i2c_serial_bus.connection_speed),
+ AML_OFFSET(i2c_serial_bus.connection_speed),
+ 1},
+
+ {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.i2c_serial_bus.slave_address),
+ AML_OFFSET(i2c_serial_bus.slave_address),
+ 1},
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_spi_serial_bus
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_convert_spi_serial_bus[20] = {
+ {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_SERIAL_BUS,
+ ACPI_RS_SIZE(struct acpi_resource_spi_serialbus),
+ ACPI_RSC_TABLE_SIZE(acpi_rs_convert_spi_serial_bus)},
+
+ {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_SERIAL_BUS,
+ sizeof(struct aml_resource_spi_serialbus),
+ 0},
+
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.revision_id),
+ AML_OFFSET(common_serial_bus.revision_id),
+ 1},
+
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.type),
+ AML_OFFSET(common_serial_bus.type),
+ 1},
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.common_serial_bus.slave_mode),
+ AML_OFFSET(common_serial_bus.flags),
+ 0},
+
+ {ACPI_RSC_1BITFLAG,
+ ACPI_RS_OFFSET(data.common_serial_bus.producer_consumer),
+ AML_OFFSET(common_serial_bus.flags),
+ 1},
+
+ {ACPI_RSC_MOVE8,
+ ACPI_RS_OFFSET(data.common_serial_bus.type_revision_id),
+ AML_OFFSET(common_serial_bus.type_revision_id),
+ 1},
+
+ {ACPI_RSC_MOVE16,
+ ACPI_RS_OFFSET(data.common_serial_bus.type_data_length),
+ AML_OFFSET(common_serial_bus.type_data_length),
+ 1},
+
+ /* Vendor data */
+
+ {ACPI_RSC_COUNT_SERIAL_VEN,
+ ACPI_RS_OFFSET(data.common_serial_bus.vendor_length),
+ AML_OFFSET(common_serial_bus.type_data_length),
+ AML_RESOURCE_SPI_MIN_DATA_LEN},
+
+ {ACPI_RSC_MOVE_SERIAL_VEN,
+ ACPI_RS_OFFSET(data.common_serial_bus.vendor_data),
+ 0,
+ sizeof(struct aml_resource_spi_serialbus)},
+
+ /* Resource Source */
+
+ {ACPI_RSC_MOVE8,
+ ACPI_RS_OFFSET(data.common_serial_bus.resource_source.index),
+ AML_OFFSET(common_serial_bus.res_source_index),
+ 1},
+
+ {ACPI_RSC_COUNT_SERIAL_RES,
+ ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_length),
+ AML_OFFSET(common_serial_bus.type_data_length),
+ sizeof(struct aml_resource_common_serialbus)},
+
+ {ACPI_RSC_MOVE_SERIAL_RES,
+ ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_ptr),
+ AML_OFFSET(common_serial_bus.type_data_length),
+ sizeof(struct aml_resource_common_serialbus)},
+
+ /* Spi bus type specific */
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.spi_serial_bus.wire_mode),
+ AML_OFFSET(spi_serial_bus.type_specific_flags),
+ 0},
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.spi_serial_bus.device_polarity),
+ AML_OFFSET(spi_serial_bus.type_specific_flags),
+ 1},
+
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.spi_serial_bus.data_bit_length),
+ AML_OFFSET(spi_serial_bus.data_bit_length),
+ 1},
+
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.spi_serial_bus.clock_phase),
+ AML_OFFSET(spi_serial_bus.clock_phase),
+ 1},
+
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.spi_serial_bus.clock_polarity),
+ AML_OFFSET(spi_serial_bus.clock_polarity),
+ 1},
+
+ {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.spi_serial_bus.device_selection),
+ AML_OFFSET(spi_serial_bus.device_selection),
+ 1},
+
+ {ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.spi_serial_bus.connection_speed),
+ AML_OFFSET(spi_serial_bus.connection_speed),
+ 1},
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_uart_serial_bus
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_convert_uart_serial_bus[22] = {
+ {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_SERIAL_BUS,
+ ACPI_RS_SIZE(struct acpi_resource_uart_serialbus),
+ ACPI_RSC_TABLE_SIZE(acpi_rs_convert_uart_serial_bus)},
+
+ {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_SERIAL_BUS,
+ sizeof(struct aml_resource_uart_serialbus),
+ 0},
+
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.revision_id),
+ AML_OFFSET(common_serial_bus.revision_id),
+ 1},
+
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.type),
+ AML_OFFSET(common_serial_bus.type),
+ 1},
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.common_serial_bus.slave_mode),
+ AML_OFFSET(common_serial_bus.flags),
+ 0},
+
+ {ACPI_RSC_1BITFLAG,
+ ACPI_RS_OFFSET(data.common_serial_bus.producer_consumer),
+ AML_OFFSET(common_serial_bus.flags),
+ 1},
+
+ {ACPI_RSC_MOVE8,
+ ACPI_RS_OFFSET(data.common_serial_bus.type_revision_id),
+ AML_OFFSET(common_serial_bus.type_revision_id),
+ 1},
+
+ {ACPI_RSC_MOVE16,
+ ACPI_RS_OFFSET(data.common_serial_bus.type_data_length),
+ AML_OFFSET(common_serial_bus.type_data_length),
+ 1},
+
+ /* Vendor data */
+
+ {ACPI_RSC_COUNT_SERIAL_VEN,
+ ACPI_RS_OFFSET(data.common_serial_bus.vendor_length),
+ AML_OFFSET(common_serial_bus.type_data_length),
+ AML_RESOURCE_UART_MIN_DATA_LEN},
+
+ {ACPI_RSC_MOVE_SERIAL_VEN,
+ ACPI_RS_OFFSET(data.common_serial_bus.vendor_data),
+ 0,
+ sizeof(struct aml_resource_uart_serialbus)},
+
+ /* Resource Source */
+
+ {ACPI_RSC_MOVE8,
+ ACPI_RS_OFFSET(data.common_serial_bus.resource_source.index),
+ AML_OFFSET(common_serial_bus.res_source_index),
+ 1},
+
+ {ACPI_RSC_COUNT_SERIAL_RES,
+ ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_length),
+ AML_OFFSET(common_serial_bus.type_data_length),
+ sizeof(struct aml_resource_common_serialbus)},
+
+ {ACPI_RSC_MOVE_SERIAL_RES,
+ ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_ptr),
+ AML_OFFSET(common_serial_bus.type_data_length),
+ sizeof(struct aml_resource_common_serialbus)},
+
+ /* Uart bus type specific */
+
+ {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.uart_serial_bus.flow_control),
+ AML_OFFSET(uart_serial_bus.type_specific_flags),
+ 0},
+
+ {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.uart_serial_bus.stop_bits),
+ AML_OFFSET(uart_serial_bus.type_specific_flags),
+ 2},
+
+ {ACPI_RSC_3BITFLAG, ACPI_RS_OFFSET(data.uart_serial_bus.data_bits),
+ AML_OFFSET(uart_serial_bus.type_specific_flags),
+ 4},
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.uart_serial_bus.endian),
+ AML_OFFSET(uart_serial_bus.type_specific_flags),
+ 7},
+
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.uart_serial_bus.parity),
+ AML_OFFSET(uart_serial_bus.parity),
+ 1},
+
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.uart_serial_bus.lines_enabled),
+ AML_OFFSET(uart_serial_bus.lines_enabled),
+ 1},
+
+ {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.uart_serial_bus.rx_fifo_size),
+ AML_OFFSET(uart_serial_bus.rx_fifo_size),
+ 1},
+
+ {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.uart_serial_bus.tx_fifo_size),
+ AML_OFFSET(uart_serial_bus.tx_fifo_size),
+ 1},
+
+ {ACPI_RSC_MOVE32,
+ ACPI_RS_OFFSET(data.uart_serial_bus.default_baud_rate),
+ AML_OFFSET(uart_serial_bus.default_baud_rate),
+ 1},
+};
diff --git a/trunk/drivers/acpi/acpica/rsutils.c b/trunk/drivers/acpi/acpica/rsutils.c
index 231811e56939..433a375deb93 100644
--- a/trunk/drivers/acpi/acpica/rsutils.c
+++ b/trunk/drivers/acpi/acpica/rsutils.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -144,6 +144,9 @@ acpi_rs_move_data(void *destination, void *source, u16 item_count, u8 move_type)
* since there are no alignment or endian issues
*/
case ACPI_RSC_MOVE8:
+ case ACPI_RSC_MOVE_GPIO_RES:
+ case ACPI_RSC_MOVE_SERIAL_VEN:
+ case ACPI_RSC_MOVE_SERIAL_RES:
ACPI_MEMCPY(destination, source, item_count);
return;
@@ -153,6 +156,7 @@ acpi_rs_move_data(void *destination, void *source, u16 item_count, u8 move_type)
* misaligned memory transfers
*/
case ACPI_RSC_MOVE16:
+ case ACPI_RSC_MOVE_GPIO_PIN:
ACPI_MOVE_16_TO_16(&ACPI_CAST_PTR(u16, destination)[i],
&ACPI_CAST_PTR(u16, source)[i]);
break;
@@ -588,6 +592,56 @@ acpi_rs_get_prs_method_data(struct acpi_namespace_node *node,
}
#endif /* ACPI_FUTURE_USAGE */
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_rs_get_aei_method_data
+ *
+ * PARAMETERS: Node - Device node
+ * ret_buffer - Pointer to a buffer structure for the
+ * results
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: This function is called to get the _AEI value of an object
+ * contained in an object specified by the handle passed in
+ *
+ * If the function fails an appropriate status will be returned
+ * and the contents of the callers buffer is undefined.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_rs_get_aei_method_data(struct acpi_namespace_node *node,
+ struct acpi_buffer *ret_buffer)
+{
+ union acpi_operand_object *obj_desc;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(rs_get_aei_method_data);
+
+ /* Parameters guaranteed valid by caller */
+
+ /* Execute the method, no parameters */
+
+ status = acpi_ut_evaluate_object(node, METHOD_NAME__AEI,
+ ACPI_BTYPE_BUFFER, &obj_desc);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * Make the call to create a resource linked list from the
+ * byte stream buffer that comes back from the _CRS method
+ * execution.
+ */
+ status = acpi_rs_create_resource_list(obj_desc, ret_buffer);
+
+ /* On exit, we must delete the object returned by evaluate_object */
+
+ acpi_ut_remove_reference(obj_desc);
+ return_ACPI_STATUS(status);
+}
+
/*******************************************************************************
*
* FUNCTION: acpi_rs_get_method_data
diff --git a/trunk/drivers/acpi/acpica/rsxface.c b/trunk/drivers/acpi/acpica/rsxface.c
index fe86b37b16ce..f58c098c7aeb 100644
--- a/trunk/drivers/acpi/acpica/rsxface.c
+++ b/trunk/drivers/acpi/acpica/rsxface.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -307,6 +307,46 @@ acpi_set_current_resources(acpi_handle device_handle,
ACPI_EXPORT_SYMBOL(acpi_set_current_resources)
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_get_event_resources
+ *
+ * PARAMETERS: device_handle - Handle to the device object for the
+ * device we are getting resources
+ * in_buffer - Pointer to a buffer containing the
+ * resources to be set for the device
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: This function is called to get the event resources for a
+ * specific device. The caller must first acquire a handle for
+ * the desired device. The resource data is passed to the routine
+ * the buffer pointed to by the in_buffer variable. Uses the
+ * _AEI method.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_get_event_resources(acpi_handle device_handle,
+ struct acpi_buffer *ret_buffer)
+{
+ acpi_status status;
+ struct acpi_namespace_node *node;
+
+ ACPI_FUNCTION_TRACE(acpi_get_event_resources);
+
+ /* Validate parameters then dispatch to internal routine */
+
+ status = acpi_rs_validate_parameters(device_handle, ret_buffer, &node);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ status = acpi_rs_get_aei_method_data(node, ret_buffer);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_event_resources)
+
/******************************************************************************
*
* FUNCTION: acpi_resource_to_address64
@@ -486,8 +526,9 @@ acpi_rs_match_vendor_resource(struct acpi_resource *resource, void *context)
*
* PARAMETERS: device_handle - Handle to the device object for the
* device we are querying
- * Name - Method name of the resources we want
- * (METHOD_NAME__CRS or METHOD_NAME__PRS)
+ * Name - Method name of the resources we want.
+ * (METHOD_NAME__CRS, METHOD_NAME__PRS, or
+ * METHOD_NAME__AEI)
* user_function - Called for each resource
* Context - Passed to user_function
*
@@ -514,11 +555,12 @@ acpi_walk_resources(acpi_handle device_handle,
if (!device_handle || !user_function || !name ||
(!ACPI_COMPARE_NAME(name, METHOD_NAME__CRS) &&
- !ACPI_COMPARE_NAME(name, METHOD_NAME__PRS))) {
+ !ACPI_COMPARE_NAME(name, METHOD_NAME__PRS) &&
+ !ACPI_COMPARE_NAME(name, METHOD_NAME__AEI))) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- /* Get the _CRS or _PRS resource list */
+ /* Get the _CRS/_PRS/_AEI resource list */
buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
status = acpi_rs_get_method_data(device_handle, name, &buffer);
diff --git a/trunk/drivers/acpi/acpica/tbfadt.c b/trunk/drivers/acpi/acpica/tbfadt.c
index 6f5588e62c0a..c5d870406f41 100644
--- a/trunk/drivers/acpi/acpica/tbfadt.c
+++ b/trunk/drivers/acpi/acpica/tbfadt.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -63,14 +63,15 @@ static void acpi_tb_setup_fadt_registers(void);
typedef struct acpi_fadt_info {
char *name;
- u8 address64;
- u8 address32;
- u8 length;
+ u16 address64;
+ u16 address32;
+ u16 length;
u8 default_length;
u8 type;
} acpi_fadt_info;
+#define ACPI_FADT_OPTIONAL 0
#define ACPI_FADT_REQUIRED 1
#define ACPI_FADT_SEPARATE_LENGTH 2
@@ -87,7 +88,7 @@ static struct acpi_fadt_info fadt_info_table[] = {
ACPI_FADT_OFFSET(pm1b_event_block),
ACPI_FADT_OFFSET(pm1_event_length),
ACPI_PM1_REGISTER_WIDTH * 2, /* Enable + Status register */
- 0},
+ ACPI_FADT_OPTIONAL},
{"Pm1aControlBlock",
ACPI_FADT_OFFSET(xpm1a_control_block),
@@ -101,7 +102,7 @@ static struct acpi_fadt_info fadt_info_table[] = {
ACPI_FADT_OFFSET(pm1b_control_block),
ACPI_FADT_OFFSET(pm1_control_length),
ACPI_PM1_REGISTER_WIDTH,
- 0},
+ ACPI_FADT_OPTIONAL},
{"Pm2ControlBlock",
ACPI_FADT_OFFSET(xpm2_control_block),
@@ -139,7 +140,7 @@ static struct acpi_fadt_info fadt_info_table[] = {
typedef struct acpi_fadt_pm_info {
struct acpi_generic_address *target;
- u8 source;
+ u16 source;
u8 register_num;
} acpi_fadt_pm_info;
@@ -253,8 +254,13 @@ void acpi_tb_parse_fadt(u32 table_index)
acpi_tb_install_table((acpi_physical_address) acpi_gbl_FADT.Xdsdt,
ACPI_SIG_DSDT, ACPI_TABLE_INDEX_DSDT);
- acpi_tb_install_table((acpi_physical_address) acpi_gbl_FADT.Xfacs,
- ACPI_SIG_FACS, ACPI_TABLE_INDEX_FACS);
+ /* If Hardware Reduced flag is set, there is no FACS */
+
+ if (!acpi_gbl_reduced_hardware) {
+ acpi_tb_install_table((acpi_physical_address) acpi_gbl_FADT.
+ Xfacs, ACPI_SIG_FACS,
+ ACPI_TABLE_INDEX_FACS);
+ }
}
/*******************************************************************************
@@ -277,12 +283,12 @@ void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
{
/*
* Check if the FADT is larger than the largest table that we expect
- * (the ACPI 2.0/3.0 version). If so, truncate the table, and issue
+ * (the ACPI 5.0 version). If so, truncate the table, and issue
* a warning.
*/
if (length > sizeof(struct acpi_table_fadt)) {
ACPI_WARNING((AE_INFO,
- "FADT (revision %u) is longer than ACPI 2.0 version, "
+ "FADT (revision %u) is longer than ACPI 5.0 version, "
"truncating length %u to %u",
table->revision, length,
(u32)sizeof(struct acpi_table_fadt)));
@@ -297,6 +303,13 @@ void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
ACPI_MEMCPY(&acpi_gbl_FADT, table,
ACPI_MIN(length, sizeof(struct acpi_table_fadt)));
+ /* Take a copy of the Hardware Reduced flag */
+
+ acpi_gbl_reduced_hardware = FALSE;
+ if (acpi_gbl_FADT.flags & ACPI_FADT_HW_REDUCED) {
+ acpi_gbl_reduced_hardware = TRUE;
+ }
+
/* Convert the local copy of the FADT to the common internal format */
acpi_tb_convert_fadt();
@@ -502,6 +515,12 @@ static void acpi_tb_validate_fadt(void)
acpi_gbl_FADT.Xdsdt = (u64) acpi_gbl_FADT.dsdt;
}
+ /* If Hardware Reduced flag is set, we are all done */
+
+ if (acpi_gbl_reduced_hardware) {
+ return;
+ }
+
/* Examine all of the 64-bit extended address fields (X fields) */
for (i = 0; i < ACPI_FADT_INFO_ENTRIES; i++) {
diff --git a/trunk/drivers/acpi/acpica/tbfind.c b/trunk/drivers/acpi/acpica/tbfind.c
index a55cb2bb5abb..4903e36ea75a 100644
--- a/trunk/drivers/acpi/acpica/tbfind.c
+++ b/trunk/drivers/acpi/acpica/tbfind.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/tbinstal.c b/trunk/drivers/acpi/acpica/tbinstal.c
index 62365f6075dd..1aecf7baa4e0 100644
--- a/trunk/drivers/acpi/acpica/tbinstal.c
+++ b/trunk/drivers/acpi/acpica/tbinstal.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/tbutils.c b/trunk/drivers/acpi/acpica/tbutils.c
index 0f2d395feaba..09ca39e14337 100644
--- a/trunk/drivers/acpi/acpica/tbutils.c
+++ b/trunk/drivers/acpi/acpica/tbutils.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -135,6 +135,13 @@ acpi_status acpi_tb_initialize_facs(void)
{
acpi_status status;
+ /* If Hardware Reduced flag is set, there is no FACS */
+
+ if (acpi_gbl_reduced_hardware) {
+ acpi_gbl_FACS = NULL;
+ return (AE_OK);
+ }
+
status = acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
ACPI_CAST_INDIRECT_PTR(struct
acpi_table_header,
diff --git a/trunk/drivers/acpi/acpica/tbxface.c b/trunk/drivers/acpi/acpica/tbxface.c
index e7d13f5d3f2d..abcc6412c244 100644
--- a/trunk/drivers/acpi/acpica/tbxface.c
+++ b/trunk/drivers/acpi/acpica/tbxface.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/tbxfroot.c b/trunk/drivers/acpi/acpica/tbxfroot.c
index 7eb6c6cc1edf..4258f647ca3d 100644
--- a/trunk/drivers/acpi/acpica/tbxfroot.c
+++ b/trunk/drivers/acpi/acpica/tbxfroot.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/utaddress.c b/trunk/drivers/acpi/acpica/utaddress.c
new file mode 100644
index 000000000000..67932aebe6dd
--- /dev/null
+++ b/trunk/drivers/acpi/acpica/utaddress.c
@@ -0,0 +1,294 @@
+/******************************************************************************
+ *
+ * Module Name: utaddress - op_region address range check
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2012, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include
+#include "accommon.h"
+#include "acnamesp.h"
+
+#define _COMPONENT ACPI_UTILITIES
+ACPI_MODULE_NAME("utaddress")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_add_address_range
+ *
+ * PARAMETERS: space_id - Address space ID
+ * Address - op_region start address
+ * Length - op_region length
+ * region_node - op_region namespace node
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Add the Operation Region address range to the global list.
+ * The only supported Space IDs are Memory and I/O. Called when
+ * the op_region address/length operands are fully evaluated.
+ *
+ * MUTEX: Locks the namespace
+ *
+ * NOTE: Because this interface is only called when an op_region argument
+ * list is evaluated, there cannot be any duplicate region_nodes.
+ * Duplicate Address/Length values are allowed, however, so that multiple
+ * address conflicts can be detected.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_ut_add_address_range(acpi_adr_space_type space_id,
+ acpi_physical_address address,
+ u32 length, struct acpi_namespace_node *region_node)
+{
+ struct acpi_address_range *range_info;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ut_add_address_range);
+
+ if ((space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) &&
+ (space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Allocate/init a new info block, add it to the appropriate list */
+
+ range_info = ACPI_ALLOCATE(sizeof(struct acpi_address_range));
+ if (!range_info) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ range_info->start_address = address;
+ range_info->end_address = (address + length - 1);
+ range_info->region_node = region_node;
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ ACPI_FREE(range_info);
+ return_ACPI_STATUS(status);
+ }
+
+ range_info->next = acpi_gbl_address_range_list[space_id];
+ acpi_gbl_address_range_list[space_id] = range_info;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+ "\nAdded [%4.4s] address range: 0x%p-0x%p\n",
+ acpi_ut_get_node_name(range_info->region_node),
+ ACPI_CAST_PTR(void, address),
+ ACPI_CAST_PTR(void, range_info->end_address)));
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_remove_address_range
+ *
+ * PARAMETERS: space_id - Address space ID
+ * region_node - op_region namespace node
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Remove the Operation Region from the global list. The only
+ * supported Space IDs are Memory and I/O. Called when an
+ * op_region is deleted.
+ *
+ * MUTEX: Assumes the namespace is locked
+ *
+ ******************************************************************************/
+
+void
+acpi_ut_remove_address_range(acpi_adr_space_type space_id,
+ struct acpi_namespace_node *region_node)
+{
+ struct acpi_address_range *range_info;
+ struct acpi_address_range *prev;
+
+ ACPI_FUNCTION_TRACE(ut_remove_address_range);
+
+ if ((space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) &&
+ (space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
+ return_VOID;
+ }
+
+ /* Get the appropriate list head and check the list */
+
+ range_info = prev = acpi_gbl_address_range_list[space_id];
+ while (range_info) {
+ if (range_info->region_node == region_node) {
+ if (range_info == prev) { /* Found at list head */
+ acpi_gbl_address_range_list[space_id] =
+ range_info->next;
+ } else {
+ prev->next = range_info->next;
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+ "\nRemoved [%4.4s] address range: 0x%p-0x%p\n",
+ acpi_ut_get_node_name(range_info->
+ region_node),
+ ACPI_CAST_PTR(void,
+ range_info->
+ start_address),
+ ACPI_CAST_PTR(void,
+ range_info->
+ end_address)));
+
+ ACPI_FREE(range_info);
+ return_VOID;
+ }
+
+ prev = range_info;
+ range_info = range_info->next;
+ }
+
+ return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_check_address_range
+ *
+ * PARAMETERS: space_id - Address space ID
+ * Address - Start address
+ * Length - Length of address range
+ * Warn - TRUE if warning on overlap desired
+ *
+ * RETURN: Count of the number of conflicts detected. Zero is always
+ * returned for Space IDs other than Memory or I/O.
+ *
+ * DESCRIPTION: Check if the input address range overlaps any of the
+ * ASL operation region address ranges. The only supported
+ * Space IDs are Memory and I/O.
+ *
+ * MUTEX: Assumes the namespace is locked.
+ *
+ ******************************************************************************/
+
+u32
+acpi_ut_check_address_range(acpi_adr_space_type space_id,
+ acpi_physical_address address, u32 length, u8 warn)
+{
+ struct acpi_address_range *range_info;
+ acpi_physical_address end_address;
+ char *pathname;
+ u32 overlap_count = 0;
+
+ ACPI_FUNCTION_TRACE(ut_check_address_range);
+
+ if ((space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) &&
+ (space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
+ return_UINT32(0);
+ }
+
+ range_info = acpi_gbl_address_range_list[space_id];
+ end_address = address + length - 1;
+
+ /* Check entire list for all possible conflicts */
+
+ while (range_info) {
+ /*
+ * Check if the requested Address/Length overlaps this address_range.
+ * Four cases to consider:
+ *
+ * 1) Input address/length is contained completely in the address range
+ * 2) Input address/length overlaps range at the range start
+ * 3) Input address/length overlaps range at the range end
+ * 4) Input address/length completely encompasses the range
+ */
+ if ((address <= range_info->end_address) &&
+ (end_address >= range_info->start_address)) {
+
+ /* Found an address range overlap */
+
+ overlap_count++;
+ if (warn) { /* Optional warning message */
+ pathname =
+ acpi_ns_get_external_pathname(range_info->
+ region_node);
+
+ ACPI_WARNING((AE_INFO,
+ "0x%p-0x%p %s conflicts with Region %s %d",
+ ACPI_CAST_PTR(void, address),
+ ACPI_CAST_PTR(void, end_address),
+ acpi_ut_get_region_name(space_id),
+ pathname, overlap_count));
+ ACPI_FREE(pathname);
+ }
+ }
+
+ range_info = range_info->next;
+ }
+
+ return_UINT32(overlap_count);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_delete_address_lists
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Delete all global address range lists (called during
+ * subsystem shutdown).
+ *
+ ******************************************************************************/
+
+void acpi_ut_delete_address_lists(void)
+{
+ struct acpi_address_range *next;
+ struct acpi_address_range *range_info;
+ int i;
+
+ /* Delete all elements in all address range lists */
+
+ for (i = 0; i < ACPI_ADDRESS_RANGE_MAX; i++) {
+ next = acpi_gbl_address_range_list[i];
+
+ while (next) {
+ range_info = next;
+ next = range_info->next;
+ ACPI_FREE(range_info);
+ }
+
+ acpi_gbl_address_range_list[i] = NULL;
+ }
+}
diff --git a/trunk/drivers/acpi/acpica/utalloc.c b/trunk/drivers/acpi/acpica/utalloc.c
index 0a697351cf69..9982d2ea66fb 100644
--- a/trunk/drivers/acpi/acpica/utalloc.c
+++ b/trunk/drivers/acpi/acpica/utalloc.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/utcopy.c b/trunk/drivers/acpi/acpica/utcopy.c
index aded299a2fa8..3317c0a406ee 100644
--- a/trunk/drivers/acpi/acpica/utcopy.c
+++ b/trunk/drivers/acpi/acpica/utcopy.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/utdebug.c b/trunk/drivers/acpi/acpica/utdebug.c
index a1f8d7509e66..a0998a886318 100644
--- a/trunk/drivers/acpi/acpica/utdebug.c
+++ b/trunk/drivers/acpi/acpica/utdebug.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/utdecode.c b/trunk/drivers/acpi/acpica/utdecode.c
index 8b087e2d64f4..d42ede5260c7 100644
--- a/trunk/drivers/acpi/acpica/utdecode.c
+++ b/trunk/drivers/acpi/acpica/utdecode.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -171,7 +171,9 @@ const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = {
"SMBus",
"SystemCMOS",
"PCIBARTarget",
- "IPMI"
+ "IPMI",
+ "GeneralPurposeIo",
+ "GenericSerialBus"
};
char *acpi_ut_get_region_name(u8 space_id)
diff --git a/trunk/drivers/acpi/acpica/utdelete.c b/trunk/drivers/acpi/acpica/utdelete.c
index 31f5a7832ef1..2a6c3e183697 100644
--- a/trunk/drivers/acpi/acpica/utdelete.c
+++ b/trunk/drivers/acpi/acpica/utdelete.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -215,11 +215,14 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
"***** Region %p\n", object));
- /* Invalidate the region address/length via the host OS */
-
- acpi_os_invalidate_address(object->region.space_id,
- object->region.address,
- (acpi_size) object->region.length);
+ /*
+ * Update address_range list. However, only permanent regions
+ * are installed in this list. (Not created within a method)
+ */
+ if (!(object->region.node->flags & ANOBJ_TEMPORARY)) {
+ acpi_ut_remove_address_range(object->region.space_id,
+ object->region.node);
+ }
second_desc = acpi_ns_get_secondary_object(object);
if (second_desc) {
diff --git a/trunk/drivers/acpi/acpica/uteval.c b/trunk/drivers/acpi/acpica/uteval.c
index 18f73c9d10bc..479f32b33415 100644
--- a/trunk/drivers/acpi/acpica/uteval.c
+++ b/trunk/drivers/acpi/acpica/uteval.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/utglobal.c b/trunk/drivers/acpi/acpica/utglobal.c
index ffba0a39c3e8..4153584cf526 100644
--- a/trunk/drivers/acpi/acpica/utglobal.c
+++ b/trunk/drivers/acpi/acpica/utglobal.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -264,6 +264,12 @@ acpi_status acpi_ut_init_globals(void)
return_ACPI_STATUS(status);
}
+ /* Address Range lists */
+
+ for (i = 0; i < ACPI_ADDRESS_RANGE_MAX; i++) {
+ acpi_gbl_address_range_list[i] = NULL;
+ }
+
/* Mutex locked flags */
for (i = 0; i < ACPI_NUM_MUTEX; i++) {
diff --git a/trunk/drivers/acpi/acpica/utids.c b/trunk/drivers/acpi/acpica/utids.c
index b679ea693545..c92eb1d93785 100644
--- a/trunk/drivers/acpi/acpica/utids.c
+++ b/trunk/drivers/acpi/acpica/utids.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/utinit.c b/trunk/drivers/acpi/acpica/utinit.c
index 191b6828cce9..8359c0c5dc98 100644
--- a/trunk/drivers/acpi/acpica/utinit.c
+++ b/trunk/drivers/acpi/acpica/utinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -92,6 +92,7 @@ static void acpi_ut_terminate(void)
gpe_xrupt_info = next_gpe_xrupt_info;
}
+ acpi_ut_delete_address_lists();
return_VOID;
}
diff --git a/trunk/drivers/acpi/acpica/utlock.c b/trunk/drivers/acpi/acpica/utlock.c
index f6bb75c6faf5..155fd786d0f2 100644
--- a/trunk/drivers/acpi/acpica/utlock.c
+++ b/trunk/drivers/acpi/acpica/utlock.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/utmath.c b/trunk/drivers/acpi/acpica/utmath.c
index ce481da9bb45..2491a552b0e6 100644
--- a/trunk/drivers/acpi/acpica/utmath.c
+++ b/trunk/drivers/acpi/acpica/utmath.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/utmisc.c b/trunk/drivers/acpi/acpica/utmisc.c
index c33a852d4f42..86f19db74e05 100644
--- a/trunk/drivers/acpi/acpica/utmisc.c
+++ b/trunk/drivers/acpi/acpica/utmisc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/utmutex.c b/trunk/drivers/acpi/acpica/utmutex.c
index 7d797e2baecd..43174df33121 100644
--- a/trunk/drivers/acpi/acpica/utmutex.c
+++ b/trunk/drivers/acpi/acpica/utmutex.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -293,14 +293,10 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)
{
- acpi_thread_id this_thread_id;
-
ACPI_FUNCTION_NAME(ut_release_mutex);
- this_thread_id = acpi_os_get_thread_id();
-
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Thread %u releasing Mutex [%s]\n",
- (u32)this_thread_id,
+ (u32)acpi_os_get_thread_id(),
acpi_ut_get_mutex_name(mutex_id)));
if (mutex_id > ACPI_MAX_MUTEX) {
@@ -329,7 +325,8 @@ acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)
* the ACPI subsystem code.
*/
for (i = mutex_id; i < ACPI_NUM_MUTEX; i++) {
- if (acpi_gbl_mutex_info[i].thread_id == this_thread_id) {
+ if (acpi_gbl_mutex_info[i].thread_id ==
+ acpi_os_get_thread_id()) {
if (i == mutex_id) {
continue;
}
diff --git a/trunk/drivers/acpi/acpica/utobject.c b/trunk/drivers/acpi/acpica/utobject.c
index 188340a017b4..b112744fc9ae 100644
--- a/trunk/drivers/acpi/acpica/utobject.c
+++ b/trunk/drivers/acpi/acpica/utobject.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/utosi.c b/trunk/drivers/acpi/acpica/utosi.c
index 1fb10cb8f11d..2360cf70c18c 100644
--- a/trunk/drivers/acpi/acpica/utosi.c
+++ b/trunk/drivers/acpi/acpica/utosi.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/utresrc.c b/trunk/drivers/acpi/acpica/utresrc.c
index 6ffd3a8bdaa5..9d441ea70305 100644
--- a/trunk/drivers/acpi/acpica/utresrc.c
+++ b/trunk/drivers/acpi/acpica/utresrc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -43,7 +43,7 @@
#include
#include "accommon.h"
-#include "amlresrc.h"
+#include "acresrc.h"
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utresrc")
@@ -154,6 +154,138 @@ const char *acpi_gbl_typ_decode[] = {
"TypeF"
};
+const char *acpi_gbl_ppc_decode[] = {
+ "PullDefault",
+ "PullUp",
+ "PullDown",
+ "PullNone"
+};
+
+const char *acpi_gbl_ior_decode[] = {
+ "IoRestrictionNone",
+ "IoRestrictionInputOnly",
+ "IoRestrictionOutputOnly",
+ "IoRestrictionNoneAndPreserve"
+};
+
+const char *acpi_gbl_dts_decode[] = {
+ "Width8bit",
+ "Width16bit",
+ "Width32bit",
+ "Width64bit",
+ "Width128bit",
+ "Width256bit",
+};
+
+/* GPIO connection type */
+
+const char *acpi_gbl_ct_decode[] = {
+ "Interrupt",
+ "I/O"
+};
+
+/* Serial bus type */
+
+const char *acpi_gbl_sbt_decode[] = {
+ "/* UNKNOWN serial bus type */",
+ "I2C",
+ "SPI",
+ "UART"
+};
+
+/* I2C serial bus access mode */
+
+const char *acpi_gbl_am_decode[] = {
+ "AddressingMode7Bit",
+ "AddressingMode10Bit"
+};
+
+/* I2C serial bus slave mode */
+
+const char *acpi_gbl_sm_decode[] = {
+ "ControllerInitiated",
+ "DeviceInitiated"
+};
+
+/* SPI serial bus wire mode */
+
+const char *acpi_gbl_wm_decode[] = {
+ "FourWireMode",
+ "ThreeWireMode"
+};
+
+/* SPI serial clock phase */
+
+const char *acpi_gbl_cph_decode[] = {
+ "ClockPhaseFirst",
+ "ClockPhaseSecond"
+};
+
+/* SPI serial bus clock polarity */
+
+const char *acpi_gbl_cpo_decode[] = {
+ "ClockPolarityLow",
+ "ClockPolarityHigh"
+};
+
+/* SPI serial bus device polarity */
+
+const char *acpi_gbl_dp_decode[] = {
+ "PolarityLow",
+ "PolarityHigh"
+};
+
+/* UART serial bus endian */
+
+const char *acpi_gbl_ed_decode[] = {
+ "LittleEndian",
+ "BigEndian"
+};
+
+/* UART serial bus bits per byte */
+
+const char *acpi_gbl_bpb_decode[] = {
+ "DataBitsFive",
+ "DataBitsSix",
+ "DataBitsSeven",
+ "DataBitsEight",
+ "DataBitsNine",
+ "/* UNKNOWN Bits per byte */",
+ "/* UNKNOWN Bits per byte */",
+ "/* UNKNOWN Bits per byte */"
+};
+
+/* UART serial bus stop bits */
+
+const char *acpi_gbl_sb_decode[] = {
+ "StopBitsNone",
+ "StopBitsOne",
+ "StopBitsOnePlusHalf",
+ "StopBitsTwo"
+};
+
+/* UART serial bus flow control */
+
+const char *acpi_gbl_fc_decode[] = {
+ "FlowControlNone",
+ "FlowControlHardware",
+ "FlowControlXON",
+ "/* UNKNOWN flow control keyword */"
+};
+
+/* UART serial bus parity type */
+
+const char *acpi_gbl_pt_decode[] = {
+ "ParityTypeNone",
+ "ParityTypeEven",
+ "ParityTypeOdd",
+ "ParityTypeMark",
+ "ParityTypeSpace",
+ "/* UNKNOWN parity keyword */",
+ "/* UNKNOWN parity keyword */",
+ "/* UNKNOWN parity keyword */"
+};
+
#endif
/*
@@ -173,7 +305,7 @@ const u8 acpi_gbl_resource_aml_sizes[] = {
ACPI_AML_SIZE_SMALL(struct aml_resource_end_dependent),
ACPI_AML_SIZE_SMALL(struct aml_resource_io),
ACPI_AML_SIZE_SMALL(struct aml_resource_fixed_io),
- 0,
+ ACPI_AML_SIZE_SMALL(struct aml_resource_fixed_dma),
0,
0,
0,
@@ -193,7 +325,17 @@ const u8 acpi_gbl_resource_aml_sizes[] = {
ACPI_AML_SIZE_LARGE(struct aml_resource_address16),
ACPI_AML_SIZE_LARGE(struct aml_resource_extended_irq),
ACPI_AML_SIZE_LARGE(struct aml_resource_address64),
- ACPI_AML_SIZE_LARGE(struct aml_resource_extended_address64)
+ ACPI_AML_SIZE_LARGE(struct aml_resource_extended_address64),
+ ACPI_AML_SIZE_LARGE(struct aml_resource_gpio),
+ 0,
+ ACPI_AML_SIZE_LARGE(struct aml_resource_common_serialbus),
+};
+
+const u8 acpi_gbl_resource_aml_serial_bus_sizes[] = {
+ 0,
+ ACPI_AML_SIZE_LARGE(struct aml_resource_i2c_serialbus),
+ ACPI_AML_SIZE_LARGE(struct aml_resource_spi_serialbus),
+ ACPI_AML_SIZE_LARGE(struct aml_resource_uart_serialbus),
};
/*
@@ -209,35 +351,49 @@ static const u8 acpi_gbl_resource_types[] = {
0,
0,
0,
- ACPI_SMALL_VARIABLE_LENGTH,
- ACPI_FIXED_LENGTH,
- ACPI_SMALL_VARIABLE_LENGTH,
- ACPI_FIXED_LENGTH,
- ACPI_FIXED_LENGTH,
- ACPI_FIXED_LENGTH,
- 0,
+ ACPI_SMALL_VARIABLE_LENGTH, /* 04 IRQ */
+ ACPI_FIXED_LENGTH, /* 05 DMA */
+ ACPI_SMALL_VARIABLE_LENGTH, /* 06 start_dependent_functions */
+ ACPI_FIXED_LENGTH, /* 07 end_dependent_functions */
+ ACPI_FIXED_LENGTH, /* 08 IO */
+ ACPI_FIXED_LENGTH, /* 09 fixed_iO */
+ ACPI_FIXED_LENGTH, /* 0_a fixed_dMA */
0,
0,
0,
- ACPI_VARIABLE_LENGTH,
- ACPI_FIXED_LENGTH,
+ ACPI_VARIABLE_LENGTH, /* 0_e vendor_short */
+ ACPI_FIXED_LENGTH, /* 0_f end_tag */
/* Large descriptors */
0,
- ACPI_FIXED_LENGTH,
- ACPI_FIXED_LENGTH,
+ ACPI_FIXED_LENGTH, /* 01 Memory24 */
+ ACPI_FIXED_LENGTH, /* 02 generic_register */
0,
- ACPI_VARIABLE_LENGTH,
- ACPI_FIXED_LENGTH,
- ACPI_FIXED_LENGTH,
- ACPI_VARIABLE_LENGTH,
- ACPI_VARIABLE_LENGTH,
- ACPI_VARIABLE_LENGTH,
- ACPI_VARIABLE_LENGTH,
- ACPI_FIXED_LENGTH
+ ACPI_VARIABLE_LENGTH, /* 04 vendor_long */
+ ACPI_FIXED_LENGTH, /* 05 Memory32 */
+ ACPI_FIXED_LENGTH, /* 06 memory32_fixed */
+ ACPI_VARIABLE_LENGTH, /* 07 Dword* address */
+ ACPI_VARIABLE_LENGTH, /* 08 Word* address */
+ ACPI_VARIABLE_LENGTH, /* 09 extended_iRQ */
+ ACPI_VARIABLE_LENGTH, /* 0_a Qword* address */
+ ACPI_FIXED_LENGTH, /* 0_b Extended* address */
+ ACPI_VARIABLE_LENGTH, /* 0_c Gpio* */
+ 0,
+ ACPI_VARIABLE_LENGTH /* 0_e *serial_bus */
};
+/*
+ * For the i_aSL compiler/disassembler, we don't want any error messages
+ * because the disassembler uses the resource validation code to determine
+ * if Buffer objects are actually Resource Templates.
+ */
+#ifdef ACPI_ASL_COMPILER
+#define ACPI_RESOURCE_ERROR(plist)
+#else
+#define ACPI_RESOURCE_ERROR(plist) ACPI_ERROR(plist)
+#endif
+
/*******************************************************************************
*
* FUNCTION: acpi_ut_walk_aml_resources
@@ -265,6 +421,7 @@ acpi_ut_walk_aml_resources(u8 * aml,
u8 resource_index;
u32 length;
u32 offset = 0;
+ u8 end_tag[2] = { 0x79, 0x00 };
ACPI_FUNCTION_TRACE(ut_walk_aml_resources);
@@ -286,6 +443,10 @@ acpi_ut_walk_aml_resources(u8 * aml,
status = acpi_ut_validate_resource(aml, &resource_index);
if (ACPI_FAILURE(status)) {
+ /*
+ * Exit on failure. Cannot continue because the descriptor length
+ * may be bogus also.
+ */
return_ACPI_STATUS(status);
}
@@ -300,7 +461,7 @@ acpi_ut_walk_aml_resources(u8 * aml,
user_function(aml, length, offset, resource_index,
context);
if (ACPI_FAILURE(status)) {
- return (status);
+ return_ACPI_STATUS(status);
}
}
@@ -333,7 +494,19 @@ acpi_ut_walk_aml_resources(u8 * aml,
/* Did not find an end_tag descriptor */
- return (AE_AML_NO_RESOURCE_END_TAG);
+ if (user_function) {
+
+ /* Insert an end_tag anyway. acpi_rs_get_list_length always leaves room */
+
+ (void)acpi_ut_validate_resource(end_tag, &resource_index);
+ status =
+ user_function(end_tag, 2, offset, resource_index, context);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
}
/*******************************************************************************
@@ -354,6 +527,7 @@ acpi_ut_walk_aml_resources(u8 * aml,
acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
{
+ union aml_resource *aml_resource;
u8 resource_type;
u8 resource_index;
acpi_rs_length resource_length;
@@ -375,7 +549,7 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
/* Verify the large resource type (name) against the max */
if (resource_type > ACPI_RESOURCE_NAME_LARGE_MAX) {
- return (AE_AML_INVALID_RESOURCE_TYPE);
+ goto invalid_resource;
}
/*
@@ -392,15 +566,17 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
((resource_type & ACPI_RESOURCE_NAME_SMALL_MASK) >> 3);
}
- /* Check validity of the resource type, zero indicates name is invalid */
-
+ /*
+ * Check validity of the resource type, via acpi_gbl_resource_types. Zero
+ * indicates an invalid resource.
+ */
if (!acpi_gbl_resource_types[resource_index]) {
- return (AE_AML_INVALID_RESOURCE_TYPE);
+ goto invalid_resource;
}
/*
- * 2) Validate the resource_length field. This ensures that the length
- * is at least reasonable, and guarantees that it is non-zero.
+ * Validate the resource_length field. This ensures that the length
+ * is at least reasonable, and guarantees that it is non-zero.
*/
resource_length = acpi_ut_get_resource_length(aml);
minimum_resource_length = acpi_gbl_resource_aml_sizes[resource_index];
@@ -413,7 +589,7 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
/* Fixed length resource, length must match exactly */
if (resource_length != minimum_resource_length) {
- return (AE_AML_BAD_RESOURCE_LENGTH);
+ goto bad_resource_length;
}
break;
@@ -422,7 +598,7 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
/* Variable length resource, length must be at least the minimum */
if (resource_length < minimum_resource_length) {
- return (AE_AML_BAD_RESOURCE_LENGTH);
+ goto bad_resource_length;
}
break;
@@ -432,7 +608,7 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
if ((resource_length > minimum_resource_length) ||
(resource_length < (minimum_resource_length - 1))) {
- return (AE_AML_BAD_RESOURCE_LENGTH);
+ goto bad_resource_length;
}
break;
@@ -440,7 +616,23 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
/* Shouldn't happen (because of validation earlier), but be sure */
- return (AE_AML_INVALID_RESOURCE_TYPE);
+ goto invalid_resource;
+ }
+
+ aml_resource = ACPI_CAST_PTR(union aml_resource, aml);
+ if (resource_type == ACPI_RESOURCE_NAME_SERIAL_BUS) {
+
+ /* Validate the bus_type field */
+
+ if ((aml_resource->common_serial_bus.type == 0) ||
+ (aml_resource->common_serial_bus.type >
+ AML_RESOURCE_MAX_SERIALBUSTYPE)) {
+ ACPI_RESOURCE_ERROR((AE_INFO,
+ "Invalid/unsupported SerialBus resource descriptor: BusType 0x%2.2X",
+ aml_resource->common_serial_bus.
+ type));
+ return (AE_AML_INVALID_RESOURCE_TYPE);
+ }
}
/* Optionally return the resource table index */
@@ -450,6 +642,22 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
}
return (AE_OK);
+
+ invalid_resource:
+
+ ACPI_RESOURCE_ERROR((AE_INFO,
+ "Invalid/unsupported resource descriptor: Type 0x%2.2X",
+ resource_type));
+ return (AE_AML_INVALID_RESOURCE_TYPE);
+
+ bad_resource_length:
+
+ ACPI_RESOURCE_ERROR((AE_INFO,
+ "Invalid resource descriptor length: Type "
+ "0x%2.2X, Length 0x%4.4X, MinLength 0x%4.4X",
+ resource_type, resource_length,
+ minimum_resource_length));
+ return (AE_AML_BAD_RESOURCE_LENGTH);
}
/*******************************************************************************
diff --git a/trunk/drivers/acpi/acpica/utstate.c b/trunk/drivers/acpi/acpica/utstate.c
index 30c21e1a9360..4267477c2797 100644
--- a/trunk/drivers/acpi/acpica/utstate.c
+++ b/trunk/drivers/acpi/acpica/utstate.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/utxface.c b/trunk/drivers/acpi/acpica/utxface.c
index 420ebfe08c72..644e8c8ebc4b 100644
--- a/trunk/drivers/acpi/acpica/utxface.c
+++ b/trunk/drivers/acpi/acpica/utxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -48,6 +48,7 @@
#include "acnamesp.h"
#include "acdebug.h"
#include "actables.h"
+#include "acinterp.h"
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utxface")
@@ -640,4 +641,41 @@ acpi_status acpi_install_interface_handler(acpi_interface_handler handler)
}
ACPI_EXPORT_SYMBOL(acpi_install_interface_handler)
+
+/*****************************************************************************
+ *
+ * FUNCTION: acpi_check_address_range
+ *
+ * PARAMETERS: space_id - Address space ID
+ * Address - Start address
+ * Length - Length
+ * Warn - TRUE if warning on overlap desired
+ *
+ * RETURN: Count of the number of conflicts detected.
+ *
+ * DESCRIPTION: Check if the input address range overlaps any of the
+ * ASL operation region address ranges.
+ *
+ ****************************************************************************/
+u32
+acpi_check_address_range(acpi_adr_space_type space_id,
+ acpi_physical_address address,
+ acpi_size length, u8 warn)
+{
+ u32 overlaps;
+ acpi_status status;
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return (0);
+ }
+
+ overlaps = acpi_ut_check_address_range(space_id, address,
+ (u32)length, warn);
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ return (overlaps);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_check_address_range)
#endif /* !ACPI_ASL_COMPILER */
diff --git a/trunk/drivers/acpi/acpica/utxferror.c b/trunk/drivers/acpi/acpica/utxferror.c
index 8d0245ec4315..52b568af1819 100644
--- a/trunk/drivers/acpi/acpica/utxferror.c
+++ b/trunk/drivers/acpi/acpica/utxferror.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/utxfmutex.c b/trunk/drivers/acpi/acpica/utxfmutex.c
new file mode 100644
index 000000000000..1427d191d15a
--- /dev/null
+++ b/trunk/drivers/acpi/acpica/utxfmutex.c
@@ -0,0 +1,187 @@
+/*******************************************************************************
+ *
+ * Module Name: utxfmutex - external AML mutex access functions
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2012, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include
+#include "accommon.h"
+#include "acnamesp.h"
+
+#define _COMPONENT ACPI_UTILITIES
+ACPI_MODULE_NAME("utxfmutex")
+
+/* Local prototypes */
+static acpi_status
+acpi_ut_get_mutex_object(acpi_handle handle,
+ acpi_string pathname,
+ union acpi_operand_object **ret_obj);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_get_mutex_object
+ *
+ * PARAMETERS: Handle - Mutex or prefix handle (optional)
+ * Pathname - Mutex pathname (optional)
+ * ret_obj - Where the mutex object is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Get an AML mutex object. The mutex node is pointed to by
+ * Handle:Pathname. Either Handle or Pathname can be NULL, but
+ * not both.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ut_get_mutex_object(acpi_handle handle,
+ acpi_string pathname,
+ union acpi_operand_object **ret_obj)
+{
+ struct acpi_namespace_node *mutex_node;
+ union acpi_operand_object *mutex_obj;
+ acpi_status status;
+
+ /* Parameter validation */
+
+ if (!ret_obj || (!handle && !pathname)) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ /* Get a the namespace node for the mutex */
+
+ mutex_node = handle;
+ if (pathname != NULL) {
+ status = acpi_get_handle(handle, pathname,
+ ACPI_CAST_PTR(acpi_handle,
+ &mutex_node));
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ }
+
+ /* Ensure that we actually have a Mutex object */
+
+ if (!mutex_node || (mutex_node->type != ACPI_TYPE_MUTEX)) {
+ return (AE_TYPE);
+ }
+
+ /* Get the low-level mutex object */
+
+ mutex_obj = acpi_ns_get_attached_object(mutex_node);
+ if (!mutex_obj) {
+ return (AE_NULL_OBJECT);
+ }
+
+ *ret_obj = mutex_obj;
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_acquire_mutex
+ *
+ * PARAMETERS: Handle - Mutex or prefix handle (optional)
+ * Pathname - Mutex pathname (optional)
+ * Timeout - Max time to wait for the lock (millisec)
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Acquire an AML mutex. This is a device driver interface to
+ * AML mutex objects, and allows for transaction locking between
+ * drivers and AML code. The mutex node is pointed to by
+ * Handle:Pathname. Either Handle or Pathname can be NULL, but
+ * not both.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_acquire_mutex(acpi_handle handle, acpi_string pathname, u16 timeout)
+{
+ acpi_status status;
+ union acpi_operand_object *mutex_obj;
+
+ /* Get the low-level mutex associated with Handle:Pathname */
+
+ status = acpi_ut_get_mutex_object(handle, pathname, &mutex_obj);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /* Acquire the OS mutex */
+
+ status = acpi_os_acquire_mutex(mutex_obj->mutex.os_mutex, timeout);
+ return (status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_release_mutex
+ *
+ * PARAMETERS: Handle - Mutex or prefix handle (optional)
+ * Pathname - Mutex pathname (optional)
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Release an AML mutex. This is a device driver interface to
+ * AML mutex objects, and allows for transaction locking between
+ * drivers and AML code. The mutex node is pointed to by
+ * Handle:Pathname. Either Handle or Pathname can be NULL, but
+ * not both.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_release_mutex(acpi_handle handle, acpi_string pathname)
+{
+ acpi_status status;
+ union acpi_operand_object *mutex_obj;
+
+ /* Get the low-level mutex associated with Handle:Pathname */
+
+ status = acpi_ut_get_mutex_object(handle, pathname, &mutex_obj);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /* Release the OS mutex */
+
+ acpi_os_release_mutex(mutex_obj->mutex.os_mutex);
+ return (AE_OK);
+}
diff --git a/trunk/drivers/acpi/apei/apei-base.c b/trunk/drivers/acpi/apei/apei-base.c
index 61540360d5ce..e45350cb6ac8 100644
--- a/trunk/drivers/acpi/apei/apei-base.c
+++ b/trunk/drivers/acpi/apei/apei-base.c
@@ -34,13 +34,13 @@
#include
#include
#include
+#include
#include
#include
#include
#include
#include
#include
-#include
#include "apei-internal.h"
@@ -70,7 +70,7 @@ int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val)
{
int rc;
- rc = acpi_atomic_read(val, &entry->register_region);
+ rc = apei_read(val, &entry->register_region);
if (rc)
return rc;
*val >>= entry->register_region.bit_offset;
@@ -116,13 +116,13 @@ int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val)
val <<= entry->register_region.bit_offset;
if (entry->flags & APEI_EXEC_PRESERVE_REGISTER) {
u64 valr = 0;
- rc = acpi_atomic_read(&valr, &entry->register_region);
+ rc = apei_read(&valr, &entry->register_region);
if (rc)
return rc;
valr &= ~(entry->mask << entry->register_region.bit_offset);
val |= valr;
}
- rc = acpi_atomic_write(val, &entry->register_region);
+ rc = apei_write(val, &entry->register_region);
return rc;
}
@@ -243,7 +243,7 @@ static int pre_map_gar_callback(struct apei_exec_context *ctx,
u8 ins = entry->instruction;
if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
- return acpi_pre_map_gar(&entry->register_region);
+ return acpi_os_map_generic_address(&entry->register_region);
return 0;
}
@@ -276,7 +276,7 @@ static int post_unmap_gar_callback(struct apei_exec_context *ctx,
u8 ins = entry->instruction;
if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
- acpi_post_unmap_gar(&entry->register_region);
+ acpi_os_unmap_generic_address(&entry->register_region);
return 0;
}
@@ -421,6 +421,17 @@ static int apei_resources_merge(struct apei_resources *resources1,
return 0;
}
+int apei_resources_add(struct apei_resources *resources,
+ unsigned long start, unsigned long size,
+ bool iomem)
+{
+ if (iomem)
+ return apei_res_add(&resources->iomem, start, size);
+ else
+ return apei_res_add(&resources->ioport, start, size);
+}
+EXPORT_SYMBOL_GPL(apei_resources_add);
+
/*
* EINJ has two groups of GARs (EINJ table entry and trigger table
* entry), so common resources are subtracted from the trigger table
@@ -438,8 +449,19 @@ int apei_resources_sub(struct apei_resources *resources1,
}
EXPORT_SYMBOL_GPL(apei_resources_sub);
+static int apei_get_nvs_callback(__u64 start, __u64 size, void *data)
+{
+ struct apei_resources *resources = data;
+ return apei_res_add(&resources->iomem, start, size);
+}
+
+static int apei_get_nvs_resources(struct apei_resources *resources)
+{
+ return acpi_nvs_for_each_region(apei_get_nvs_callback, resources);
+}
+
/*
- * IO memory/port rersource management mechanism is used to check
+ * IO memory/port resource management mechanism is used to check
* whether memory/port area used by GARs conflicts with normal memory
* or IO memory/port of devices.
*/
@@ -448,21 +470,35 @@ int apei_resources_request(struct apei_resources *resources,
{
struct apei_res *res, *res_bak = NULL;
struct resource *r;
+ struct apei_resources nvs_resources;
int rc;
rc = apei_resources_sub(resources, &apei_resources_all);
if (rc)
return rc;
+ /*
+ * Some firmware uses ACPI NVS region, that has been marked as
+ * busy, so exclude it from APEI resources to avoid false
+ * conflict.
+ */
+ apei_resources_init(&nvs_resources);
+ rc = apei_get_nvs_resources(&nvs_resources);
+ if (rc)
+ goto res_fini;
+ rc = apei_resources_sub(resources, &nvs_resources);
+ if (rc)
+ goto res_fini;
+
rc = -EINVAL;
list_for_each_entry(res, &resources->iomem, list) {
r = request_mem_region(res->start, res->end - res->start,
desc);
if (!r) {
pr_err(APEI_PFX
- "Can not request iomem region <%016llx-%016llx> for GARs.\n",
+ "Can not request [mem %#010llx-%#010llx] for %s registers\n",
(unsigned long long)res->start,
- (unsigned long long)res->end);
+ (unsigned long long)res->end - 1, desc);
res_bak = res;
goto err_unmap_iomem;
}
@@ -472,9 +508,9 @@ int apei_resources_request(struct apei_resources *resources,
r = request_region(res->start, res->end - res->start, desc);
if (!r) {
pr_err(APEI_PFX
- "Can not request ioport region <%016llx-%016llx> for GARs.\n",
+ "Can not request [io %#06llx-%#06llx] for %s registers\n",
(unsigned long long)res->start,
- (unsigned long long)res->end);
+ (unsigned long long)res->end - 1, desc);
res_bak = res;
goto err_unmap_ioport;
}
@@ -500,6 +536,8 @@ int apei_resources_request(struct apei_resources *resources,
break;
release_mem_region(res->start, res->end - res->start);
}
+res_fini:
+ apei_resources_fini(&nvs_resources);
return rc;
}
EXPORT_SYMBOL_GPL(apei_resources_request);
@@ -553,6 +591,96 @@ static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr)
return 0;
}
+/* read GAR in interrupt (including NMI) or process context */
+int apei_read(u64 *val, struct acpi_generic_address *reg)
+{
+ int rc;
+ u64 address;
+ u32 tmp, width = reg->bit_width;
+ acpi_status status;
+
+ rc = apei_check_gar(reg, &address);
+ if (rc)
+ return rc;
+
+ if (width == 64)
+ width = 32; /* Break into two 32-bit transfers */
+
+ *val = 0;
+ switch(reg->space_id) {
+ case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+ status = acpi_os_read_memory((acpi_physical_address)
+ address, &tmp, width);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+ *val = tmp;
+
+ if (reg->bit_width == 64) {
+ /* Read the top 32 bits */
+ status = acpi_os_read_memory((acpi_physical_address)
+ (address + 4), &tmp, 32);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+ *val |= ((u64)tmp << 32);
+ }
+ break;
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ status = acpi_os_read_port(address, (u32 *)val, reg->bit_width);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apei_read);
+
+/* write GAR in interrupt (including NMI) or process context */
+int apei_write(u64 val, struct acpi_generic_address *reg)
+{
+ int rc;
+ u64 address;
+ u32 width = reg->bit_width;
+ acpi_status status;
+
+ rc = apei_check_gar(reg, &address);
+ if (rc)
+ return rc;
+
+ if (width == 64)
+ width = 32; /* Break into two 32-bit transfers */
+
+ switch (reg->space_id) {
+ case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+ status = acpi_os_write_memory((acpi_physical_address)
+ address, ACPI_LODWORD(val),
+ width);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ if (reg->bit_width == 64) {
+ status = acpi_os_write_memory((acpi_physical_address)
+ (address + 4),
+ ACPI_HIDWORD(val), 32);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+ }
+ break;
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ status = acpi_os_write_port(address, val, reg->bit_width);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apei_write);
+
static int collect_res_callback(struct apei_exec_context *ctx,
struct acpi_whea_header *entry,
void *data)
diff --git a/trunk/drivers/acpi/apei/apei-internal.h b/trunk/drivers/acpi/apei/apei-internal.h
index f57050e7a5e7..cca240a33038 100644
--- a/trunk/drivers/acpi/apei/apei-internal.h
+++ b/trunk/drivers/acpi/apei/apei-internal.h
@@ -68,6 +68,9 @@ static inline int apei_exec_run_optional(struct apei_exec_context *ctx, u8 actio
/* IP has been set in instruction function */
#define APEI_EXEC_SET_IP 1
+int apei_read(u64 *val, struct acpi_generic_address *reg);
+int apei_write(u64 val, struct acpi_generic_address *reg);
+
int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val);
int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val);
int apei_exec_read_register(struct apei_exec_context *ctx,
@@ -95,6 +98,9 @@ static inline void apei_resources_init(struct apei_resources *resources)
}
void apei_resources_fini(struct apei_resources *resources);
+int apei_resources_add(struct apei_resources *resources,
+ unsigned long start, unsigned long size,
+ bool iomem);
int apei_resources_sub(struct apei_resources *resources1,
struct apei_resources *resources2);
int apei_resources_request(struct apei_resources *resources,
diff --git a/trunk/drivers/acpi/apei/einj.c b/trunk/drivers/acpi/apei/einj.c
index 589b96c38704..5b898d4dda99 100644
--- a/trunk/drivers/acpi/apei/einj.c
+++ b/trunk/drivers/acpi/apei/einj.c
@@ -42,6 +42,42 @@
/* Firmware should respond within 1 milliseconds */
#define FIRMWARE_TIMEOUT (1 * NSEC_PER_MSEC)
+/*
+ * ACPI version 5 provides a SET_ERROR_TYPE_WITH_ADDRESS action.
+ */
+static int acpi5;
+
+struct set_error_type_with_address {
+ u32 type;
+ u32 vendor_extension;
+ u32 flags;
+ u32 apicid;
+ u64 memory_address;
+ u64 memory_address_range;
+ u32 pcie_sbdf;
+};
+enum {
+ SETWA_FLAGS_APICID = 1,
+ SETWA_FLAGS_MEM = 2,
+ SETWA_FLAGS_PCIE_SBDF = 4,
+};
+
+/*
+ * Vendor extensions for platform specific operations
+ */
+struct vendor_error_type_extension {
+ u32 length;
+ u32 pcie_sbdf;
+ u16 vendor_id;
+ u16 device_id;
+ u8 rev_id;
+ u8 reserved[3];
+};
+
+static u32 vendor_flags;
+static struct debugfs_blob_wrapper vendor_blob;
+static char vendor_dev[64];
+
/*
* Some BIOSes allow parameters to the SET_ERROR_TYPE entries in the
* EINJ table through an unpublished extension. Use with caution as
@@ -103,7 +139,14 @@ static struct apei_exec_ins_type einj_ins_type[] = {
*/
static DEFINE_MUTEX(einj_mutex);
-static struct einj_parameter *einj_param;
+static void *einj_param;
+
+#ifndef readq
+static inline __u64 readq(volatile void __iomem *addr)
+{
+ return ((__u64)readl(addr+4) << 32) + readl(addr);
+}
+#endif
#ifndef writeq
static inline void writeq(__u64 val, volatile void __iomem *addr)
@@ -158,10 +201,31 @@ static int einj_timedout(u64 *t)
return 0;
}
-static u64 einj_get_parameter_address(void)
+static void check_vendor_extension(u64 paddr,
+ struct set_error_type_with_address *v5param)
+{
+ int offset = readl(&v5param->vendor_extension);
+ struct vendor_error_type_extension *v;
+ u32 sbdf;
+
+ if (!offset)
+ return;
+ v = ioremap(paddr + offset, sizeof(*v));
+ if (!v)
+ return;
+ sbdf = readl(&v->pcie_sbdf);
+ sprintf(vendor_dev, "%x:%x:%x.%x vendor_id=%x device_id=%x rev_id=%x\n",
+ sbdf >> 24, (sbdf >> 16) & 0xff,
+ (sbdf >> 11) & 0x1f, (sbdf >> 8) & 0x7,
+ readw(&v->vendor_id), readw(&v->device_id),
+ readb(&v->rev_id));
+ iounmap(v);
+}
+
+static void *einj_get_parameter_address(void)
{
int i;
- u64 paddr = 0;
+ u64 paddrv4 = 0, paddrv5 = 0;
struct acpi_whea_header *entry;
entry = EINJ_TAB_ENTRY(einj_tab);
@@ -170,12 +234,40 @@ static u64 einj_get_parameter_address(void)
entry->instruction == ACPI_EINJ_WRITE_REGISTER &&
entry->register_region.space_id ==
ACPI_ADR_SPACE_SYSTEM_MEMORY)
- memcpy(&paddr, &entry->register_region.address,
- sizeof(paddr));
+ memcpy(&paddrv4, &entry->register_region.address,
+ sizeof(paddrv4));
+ if (entry->action == ACPI_EINJ_SET_ERROR_TYPE_WITH_ADDRESS &&
+ entry->instruction == ACPI_EINJ_WRITE_REGISTER &&
+ entry->register_region.space_id ==
+ ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ memcpy(&paddrv5, &entry->register_region.address,
+ sizeof(paddrv5));
entry++;
}
+ if (paddrv5) {
+ struct set_error_type_with_address *v5param;
+
+ v5param = ioremap(paddrv5, sizeof(*v5param));
+ if (v5param) {
+ acpi5 = 1;
+ check_vendor_extension(paddrv5, v5param);
+ return v5param;
+ }
+ }
+ if (paddrv4) {
+ struct einj_parameter *v4param;
+
+ v4param = ioremap(paddrv4, sizeof(*v4param));
+ if (!v4param)
+ return 0;
+ if (readq(&v4param->reserved1) || readq(&v4param->reserved2)) {
+ iounmap(v4param);
+ return 0;
+ }
+ return v4param;
+ }
- return paddr;
+ return 0;
}
/* do sanity check to trigger table */
@@ -194,8 +286,29 @@ static int einj_check_trigger_header(struct acpi_einj_trigger *trigger_tab)
return 0;
}
+static struct acpi_generic_address *einj_get_trigger_parameter_region(
+ struct acpi_einj_trigger *trigger_tab, u64 param1, u64 param2)
+{
+ int i;
+ struct acpi_whea_header *entry;
+
+ entry = (struct acpi_whea_header *)
+ ((char *)trigger_tab + sizeof(struct acpi_einj_trigger));
+ for (i = 0; i < trigger_tab->entry_count; i++) {
+ if (entry->action == ACPI_EINJ_TRIGGER_ERROR &&
+ entry->instruction == ACPI_EINJ_WRITE_REGISTER_VALUE &&
+ entry->register_region.space_id ==
+ ACPI_ADR_SPACE_SYSTEM_MEMORY &&
+ (entry->register_region.address & param2) == (param1 & param2))
+ return &entry->register_region;
+ entry++;
+ }
+
+ return NULL;
+}
/* Execute instructions in trigger error action table */
-static int __einj_error_trigger(u64 trigger_paddr)
+static int __einj_error_trigger(u64 trigger_paddr, u32 type,
+ u64 param1, u64 param2)
{
struct acpi_einj_trigger *trigger_tab = NULL;
struct apei_exec_context trigger_ctx;
@@ -204,14 +317,16 @@ static int __einj_error_trigger(u64 trigger_paddr)
struct resource *r;
u32 table_size;
int rc = -EIO;
+ struct acpi_generic_address *trigger_param_region = NULL;
r = request_mem_region(trigger_paddr, sizeof(*trigger_tab),
"APEI EINJ Trigger Table");
if (!r) {
pr_err(EINJ_PFX
- "Can not request iomem region <%016llx-%016llx> for Trigger table.\n",
+ "Can not request [mem %#010llx-%#010llx] for Trigger table\n",
(unsigned long long)trigger_paddr,
- (unsigned long long)trigger_paddr+sizeof(*trigger_tab));
+ (unsigned long long)trigger_paddr +
+ sizeof(*trigger_tab) - 1);
goto out;
}
trigger_tab = ioremap_cache(trigger_paddr, sizeof(*trigger_tab));
@@ -232,9 +347,9 @@ static int __einj_error_trigger(u64 trigger_paddr)
"APEI EINJ Trigger Table");
if (!r) {
pr_err(EINJ_PFX
-"Can not request iomem region <%016llx-%016llx> for Trigger Table Entry.\n",
- (unsigned long long)trigger_paddr+sizeof(*trigger_tab),
- (unsigned long long)trigger_paddr + table_size);
+"Can not request [mem %#010llx-%#010llx] for Trigger Table Entry\n",
+ (unsigned long long)trigger_paddr + sizeof(*trigger_tab),
+ (unsigned long long)trigger_paddr + table_size - 1);
goto out_rel_header;
}
iounmap(trigger_tab);
@@ -255,6 +370,30 @@ static int __einj_error_trigger(u64 trigger_paddr)
rc = apei_resources_sub(&trigger_resources, &einj_resources);
if (rc)
goto out_fini;
+ /*
+ * Some firmware will access target address specified in
+ * param1 to trigger the error when injecting memory error.
+ * This will cause resource conflict with regular memory. So
+ * remove it from trigger table resources.
+ */
+ if (param_extension && (type & 0x0038) && param2) {
+ struct apei_resources addr_resources;
+ apei_resources_init(&addr_resources);
+ trigger_param_region = einj_get_trigger_parameter_region(
+ trigger_tab, param1, param2);
+ if (trigger_param_region) {
+ rc = apei_resources_add(&addr_resources,
+ trigger_param_region->address,
+ trigger_param_region->bit_width/8, true);
+ if (rc)
+ goto out_fini;
+ rc = apei_resources_sub(&trigger_resources,
+ &addr_resources);
+ }
+ apei_resources_fini(&addr_resources);
+ if (rc)
+ goto out_fini;
+ }
rc = apei_resources_request(&trigger_resources, "APEI EINJ Trigger");
if (rc)
goto out_fini;
@@ -293,12 +432,56 @@ static int __einj_error_inject(u32 type, u64 param1, u64 param2)
if (rc)
return rc;
apei_exec_ctx_set_input(&ctx, type);
- rc = apei_exec_run(&ctx, ACPI_EINJ_SET_ERROR_TYPE);
- if (rc)
- return rc;
- if (einj_param) {
- writeq(param1, &einj_param->param1);
- writeq(param2, &einj_param->param2);
+ if (acpi5) {
+ struct set_error_type_with_address *v5param = einj_param;
+
+ writel(type, &v5param->type);
+ if (type & 0x80000000) {
+ switch (vendor_flags) {
+ case SETWA_FLAGS_APICID:
+ writel(param1, &v5param->apicid);
+ break;
+ case SETWA_FLAGS_MEM:
+ writeq(param1, &v5param->memory_address);
+ writeq(param2, &v5param->memory_address_range);
+ break;
+ case SETWA_FLAGS_PCIE_SBDF:
+ writel(param1, &v5param->pcie_sbdf);
+ break;
+ }
+ writel(vendor_flags, &v5param->flags);
+ } else {
+ switch (type) {
+ case ACPI_EINJ_PROCESSOR_CORRECTABLE:
+ case ACPI_EINJ_PROCESSOR_UNCORRECTABLE:
+ case ACPI_EINJ_PROCESSOR_FATAL:
+ writel(param1, &v5param->apicid);
+ writel(SETWA_FLAGS_APICID, &v5param->flags);
+ break;
+ case ACPI_EINJ_MEMORY_CORRECTABLE:
+ case ACPI_EINJ_MEMORY_UNCORRECTABLE:
+ case ACPI_EINJ_MEMORY_FATAL:
+ writeq(param1, &v5param->memory_address);
+ writeq(param2, &v5param->memory_address_range);
+ writel(SETWA_FLAGS_MEM, &v5param->flags);
+ break;
+ case ACPI_EINJ_PCIX_CORRECTABLE:
+ case ACPI_EINJ_PCIX_UNCORRECTABLE:
+ case ACPI_EINJ_PCIX_FATAL:
+ writel(param1, &v5param->pcie_sbdf);
+ writel(SETWA_FLAGS_PCIE_SBDF, &v5param->flags);
+ break;
+ }
+ }
+ } else {
+ rc = apei_exec_run(&ctx, ACPI_EINJ_SET_ERROR_TYPE);
+ if (rc)
+ return rc;
+ if (einj_param) {
+ struct einj_parameter *v4param = einj_param;
+ writeq(param1, &v4param->param1);
+ writeq(param2, &v4param->param2);
+ }
}
rc = apei_exec_run(&ctx, ACPI_EINJ_EXECUTE_OPERATION);
if (rc)
@@ -324,7 +507,7 @@ static int __einj_error_inject(u32 type, u64 param1, u64 param2)
if (rc)
return rc;
trigger_paddr = apei_exec_ctx_get_output(&ctx);
- rc = __einj_error_trigger(trigger_paddr);
+ rc = __einj_error_trigger(trigger_paddr, type, param1, param2);
if (rc)
return rc;
rc = apei_exec_run_optional(&ctx, ACPI_EINJ_END_OPERATION);
@@ -408,15 +591,25 @@ static int error_type_set(void *data, u64 val)
{
int rc;
u32 available_error_type = 0;
+ u32 tval, vendor;
+
+ /*
+ * Vendor defined types have 0x80000000 bit set, and
+ * are not enumerated by ACPI_EINJ_GET_ERROR_TYPE
+ */
+ vendor = val & 0x80000000;
+ tval = val & 0x7fffffff;
/* Only one error type can be specified */
- if (val & (val - 1))
- return -EINVAL;
- rc = einj_get_available_error_type(&available_error_type);
- if (rc)
- return rc;
- if (!(val & available_error_type))
+ if (tval & (tval - 1))
return -EINVAL;
+ if (!vendor) {
+ rc = einj_get_available_error_type(&available_error_type);
+ if (rc)
+ return rc;
+ if (!(val & available_error_type))
+ return -EINVAL;
+ }
error_type = val;
return 0;
@@ -455,7 +648,6 @@ static int einj_check_table(struct acpi_table_einj *einj_tab)
static int __init einj_init(void)
{
int rc;
- u64 param_paddr;
acpi_status status;
struct dentry *fentry;
struct apei_exec_context ctx;
@@ -465,10 +657,9 @@ static int __init einj_init(void)
status = acpi_get_table(ACPI_SIG_EINJ, 0,
(struct acpi_table_header **)&einj_tab);
- if (status == AE_NOT_FOUND) {
- pr_info(EINJ_PFX "Table is not found!\n");
+ if (status == AE_NOT_FOUND)
return -ENODEV;
- } else if (ACPI_FAILURE(status)) {
+ else if (ACPI_FAILURE(status)) {
const char *msg = acpi_format_exception(status);
pr_err(EINJ_PFX "Failed to get table, %s\n", msg);
return -EINVAL;
@@ -509,23 +700,30 @@ static int __init einj_init(void)
rc = apei_exec_pre_map_gars(&ctx);
if (rc)
goto err_release;
- if (param_extension) {
- param_paddr = einj_get_parameter_address();
- if (param_paddr) {
- einj_param = ioremap(param_paddr, sizeof(*einj_param));
- rc = -ENOMEM;
- if (!einj_param)
- goto err_unmap;
- fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR,
- einj_debug_dir, &error_param1);
- if (!fentry)
- goto err_unmap;
- fentry = debugfs_create_x64("param2", S_IRUSR | S_IWUSR,
- einj_debug_dir, &error_param2);
- if (!fentry)
- goto err_unmap;
- } else
- pr_warn(EINJ_PFX "Parameter extension is not supported.\n");
+
+ einj_param = einj_get_parameter_address();
+ if ((param_extension || acpi5) && einj_param) {
+ fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR,
+ einj_debug_dir, &error_param1);
+ if (!fentry)
+ goto err_unmap;
+ fentry = debugfs_create_x64("param2", S_IRUSR | S_IWUSR,
+ einj_debug_dir, &error_param2);
+ if (!fentry)
+ goto err_unmap;
+ }
+
+ if (vendor_dev[0]) {
+ vendor_blob.data = vendor_dev;
+ vendor_blob.size = strlen(vendor_dev);
+ fentry = debugfs_create_blob("vendor", S_IRUSR,
+ einj_debug_dir, &vendor_blob);
+ if (!fentry)
+ goto err_unmap;
+ fentry = debugfs_create_x32("vendor_flags", S_IRUSR | S_IWUSR,
+ einj_debug_dir, &vendor_flags);
+ if (!fentry)
+ goto err_unmap;
}
pr_info(EINJ_PFX "Error INJection is initialized.\n");
diff --git a/trunk/drivers/acpi/apei/erst.c b/trunk/drivers/acpi/apei/erst.c
index 6a9e3bad13f4..eb9fab5b96e4 100644
--- a/trunk/drivers/acpi/apei/erst.c
+++ b/trunk/drivers/acpi/apei/erst.c
@@ -1127,10 +1127,9 @@ static int __init erst_init(void)
status = acpi_get_table(ACPI_SIG_ERST, 0,
(struct acpi_table_header **)&erst_tab);
- if (status == AE_NOT_FOUND) {
- pr_info(ERST_PFX "Table is not found!\n");
+ if (status == AE_NOT_FOUND)
goto err;
- } else if (ACPI_FAILURE(status)) {
+ else if (ACPI_FAILURE(status)) {
const char *msg = acpi_format_exception(status);
pr_err(ERST_PFX "Failed to get table, %s\n", msg);
rc = -EINVAL;
diff --git a/trunk/drivers/acpi/apei/ghes.c b/trunk/drivers/acpi/apei/ghes.c
index ebaf037a787b..9b3cac0abecc 100644
--- a/trunk/drivers/acpi/apei/ghes.c
+++ b/trunk/drivers/acpi/apei/ghes.c
@@ -33,6 +33,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -45,8 +46,9 @@
#include
#include
#include
+#include
+#include
#include
-#include
#include
#include
#include
@@ -299,7 +301,7 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic)
if (!ghes)
return ERR_PTR(-ENOMEM);
ghes->generic = generic;
- rc = acpi_pre_map_gar(&generic->error_status_address);
+ rc = acpi_os_map_generic_address(&generic->error_status_address);
if (rc)
goto err_free;
error_block_length = generic->error_block_length;
@@ -319,7 +321,7 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic)
return ghes;
err_unmap:
- acpi_post_unmap_gar(&generic->error_status_address);
+ acpi_os_unmap_generic_address(&generic->error_status_address);
err_free:
kfree(ghes);
return ERR_PTR(rc);
@@ -328,7 +330,7 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic)
static void ghes_fini(struct ghes *ghes)
{
kfree(ghes->estatus);
- acpi_post_unmap_gar(&ghes->generic->error_status_address);
+ acpi_os_unmap_generic_address(&ghes->generic->error_status_address);
}
enum {
@@ -399,7 +401,7 @@ static int ghes_read_estatus(struct ghes *ghes, int silent)
u32 len;
int rc;
- rc = acpi_atomic_read(&buf_paddr, &g->error_status_address);
+ rc = apei_read(&buf_paddr, &g->error_status_address);
if (rc) {
if (!silent && printk_ratelimit())
pr_warning(FW_WARN GHES_PFX
@@ -476,6 +478,27 @@ static void ghes_do_proc(const struct acpi_hest_generic_status *estatus)
}
#endif
}
+#ifdef CONFIG_ACPI_APEI_PCIEAER
+ else if (!uuid_le_cmp(*(uuid_le *)gdata->section_type,
+ CPER_SEC_PCIE)) {
+ struct cper_sec_pcie *pcie_err;
+ pcie_err = (struct cper_sec_pcie *)(gdata+1);
+ if (sev == GHES_SEV_RECOVERABLE &&
+ sec_sev == GHES_SEV_RECOVERABLE &&
+ pcie_err->validation_bits & CPER_PCIE_VALID_DEVICE_ID &&
+ pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) {
+ unsigned int devfn;
+ int aer_severity;
+ devfn = PCI_DEVFN(pcie_err->device_id.device,
+ pcie_err->device_id.function);
+ aer_severity = cper_severity_to_aer(sev);
+ aer_recover_queue(pcie_err->device_id.segment,
+ pcie_err->device_id.bus,
+ devfn, aer_severity);
+ }
+
+ }
+#endif
}
}
@@ -483,16 +506,22 @@ static void __ghes_print_estatus(const char *pfx,
const struct acpi_hest_generic *generic,
const struct acpi_hest_generic_status *estatus)
{
+ static atomic_t seqno;
+ unsigned int curr_seqno;
+ char pfx_seq[64];
+
if (pfx == NULL) {
if (ghes_severity(estatus->error_severity) <=
GHES_SEV_CORRECTED)
- pfx = KERN_WARNING HW_ERR;
+ pfx = KERN_WARNING;
else
- pfx = KERN_ERR HW_ERR;
+ pfx = KERN_ERR;
}
+ curr_seqno = atomic_inc_return(&seqno);
+ snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
- pfx, generic->header.source_id);
- apei_estatus_print(pfx, estatus);
+ pfx_seq, generic->header.source_id);
+ apei_estatus_print(pfx_seq, estatus);
}
static int ghes_print_estatus(const char *pfx,
@@ -711,26 +740,34 @@ static int ghes_notify_sci(struct notifier_block *this,
return ret;
}
+static struct llist_node *llist_nodes_reverse(struct llist_node *llnode)
+{
+ struct llist_node *next, *tail = NULL;
+
+ while (llnode) {
+ next = llnode->next;
+ llnode->next = tail;
+ tail = llnode;
+ llnode = next;
+ }
+
+ return tail;
+}
+
static void ghes_proc_in_irq(struct irq_work *irq_work)
{
- struct llist_node *llnode, *next, *tail = NULL;
+ struct llist_node *llnode, *next;
struct ghes_estatus_node *estatus_node;
struct acpi_hest_generic *generic;
struct acpi_hest_generic_status *estatus;
u32 len, node_len;
+ llnode = llist_del_all(&ghes_estatus_llist);
/*
* Because the time order of estatus in list is reversed,
* revert it back to proper order.
*/
- llnode = llist_del_all(&ghes_estatus_llist);
- while (llnode) {
- next = llnode->next;
- llnode->next = tail;
- tail = llnode;
- llnode = next;
- }
- llnode = tail;
+ llnode = llist_nodes_reverse(llnode);
while (llnode) {
next = llnode->next;
estatus_node = llist_entry(llnode, struct ghes_estatus_node,
@@ -750,6 +787,32 @@ static void ghes_proc_in_irq(struct irq_work *irq_work)
}
}
+static void ghes_print_queued_estatus(void)
+{
+ struct llist_node *llnode;
+ struct ghes_estatus_node *estatus_node;
+ struct acpi_hest_generic *generic;
+ struct acpi_hest_generic_status *estatus;
+ u32 len, node_len;
+
+ llnode = llist_del_all(&ghes_estatus_llist);
+ /*
+ * Because the time order of estatus in list is reversed,
+ * revert it back to proper order.
+ */
+ llnode = llist_nodes_reverse(llnode);
+ while (llnode) {
+ estatus_node = llist_entry(llnode, struct ghes_estatus_node,
+ llnode);
+ estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
+ len = apei_estatus_len(estatus);
+ node_len = GHES_ESTATUS_NODE_LEN(len);
+ generic = estatus_node->generic;
+ ghes_print_estatus(NULL, generic, estatus);
+ llnode = llnode->next;
+ }
+}
+
static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
{
struct ghes *ghes, *ghes_global = NULL;
@@ -775,7 +838,8 @@ static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
if (sev_global >= GHES_SEV_PANIC) {
oops_begin();
- __ghes_print_estatus(KERN_EMERG HW_ERR, ghes_global->generic,
+ ghes_print_queued_estatus();
+ __ghes_print_estatus(KERN_EMERG, ghes_global->generic,
ghes_global->estatus);
/* reboot to log the error! */
if (panic_timeout == 0)
diff --git a/trunk/drivers/acpi/apei/hest.c b/trunk/drivers/acpi/apei/hest.c
index ee7fddc4665c..7f00cf38098f 100644
--- a/trunk/drivers/acpi/apei/hest.c
+++ b/trunk/drivers/acpi/apei/hest.c
@@ -221,10 +221,9 @@ void __init acpi_hest_init(void)
status = acpi_get_table(ACPI_SIG_HEST, 0,
(struct acpi_table_header **)&hest_tab);
- if (status == AE_NOT_FOUND) {
- pr_info(HEST_PFX "Table not found.\n");
+ if (status == AE_NOT_FOUND)
goto err;
- } else if (ACPI_FAILURE(status)) {
+ else if (ACPI_FAILURE(status)) {
const char *msg = acpi_format_exception(status);
pr_err(HEST_PFX "Failed to get table, %s\n", msg);
rc = -EINVAL;
diff --git a/trunk/drivers/acpi/atomicio.c b/trunk/drivers/acpi/atomicio.c
index cfc0cc10af39..d4a5b3d3657b 100644
--- a/trunk/drivers/acpi/atomicio.c
+++ b/trunk/drivers/acpi/atomicio.c
@@ -32,6 +32,8 @@
#include
#include
#include
+#include
+#include
#include
#define ACPI_PFX "ACPI: "
@@ -97,6 +99,37 @@ static void __iomem *__acpi_try_ioremap(phys_addr_t paddr,
return NULL;
}
+#ifndef CONFIG_IA64
+#define should_use_kmap(pfn) page_is_ram(pfn)
+#else
+/* ioremap will take care of cache attributes */
+#define should_use_kmap(pfn) 0
+#endif
+
+static void __iomem *acpi_map(phys_addr_t pg_off, unsigned long pg_sz)
+{
+ unsigned long pfn;
+
+ pfn = pg_off >> PAGE_SHIFT;
+ if (should_use_kmap(pfn)) {
+ if (pg_sz > PAGE_SIZE)
+ return NULL;
+ return (void __iomem __force *)kmap(pfn_to_page(pfn));
+ } else
+ return ioremap(pg_off, pg_sz);
+}
+
+static void acpi_unmap(phys_addr_t pg_off, void __iomem *vaddr)
+{
+ unsigned long pfn;
+
+ pfn = pg_off >> PAGE_SHIFT;
+ if (page_is_ram(pfn))
+ kunmap(pfn_to_page(pfn));
+ else
+ iounmap(vaddr);
+}
+
/*
* Used to pre-map the specified IO memory area. First try to find
* whether the area is already pre-mapped, if it is, increase the
@@ -119,7 +152,7 @@ static void __iomem *acpi_pre_map(phys_addr_t paddr,
pg_off = paddr & PAGE_MASK;
pg_sz = ((paddr + size + PAGE_SIZE - 1) & PAGE_MASK) - pg_off;
- vaddr = ioremap(pg_off, pg_sz);
+ vaddr = acpi_map(pg_off, pg_sz);
if (!vaddr)
return NULL;
map = kmalloc(sizeof(*map), GFP_KERNEL);
@@ -135,7 +168,7 @@ static void __iomem *acpi_pre_map(phys_addr_t paddr,
vaddr = __acpi_try_ioremap(paddr, size);
if (vaddr) {
spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
- iounmap(map->vaddr);
+ acpi_unmap(pg_off, map->vaddr);
kfree(map);
return vaddr;
}
@@ -144,7 +177,7 @@ static void __iomem *acpi_pre_map(phys_addr_t paddr,
return map->vaddr + (paddr - map->paddr);
err_unmap:
- iounmap(vaddr);
+ acpi_unmap(pg_off, vaddr);
return NULL;
}
@@ -177,7 +210,7 @@ static void acpi_post_unmap(phys_addr_t paddr, unsigned long size)
return;
synchronize_rcu();
- iounmap(map->vaddr);
+ acpi_unmap(map->paddr, map->vaddr);
kfree(map);
}
@@ -260,6 +293,21 @@ int acpi_post_unmap_gar(struct acpi_generic_address *reg)
}
EXPORT_SYMBOL_GPL(acpi_post_unmap_gar);
+#ifdef readq
+static inline u64 read64(const volatile void __iomem *addr)
+{
+ return readq(addr);
+}
+#else
+static inline u64 read64(const volatile void __iomem *addr)
+{
+ u64 l, h;
+ l = readl(addr);
+ h = readl(addr+4);
+ return l | (h << 32);
+}
+#endif
+
/*
* Can be used in atomic (including NMI) or process context. RCU read
* lock can only be released after the IO memory area accessing.
@@ -280,11 +328,9 @@ static int acpi_atomic_read_mem(u64 paddr, u64 *val, u32 width)
case 32:
*val = readl(addr);
break;
-#ifdef readq
case 64:
- *val = readq(addr);
+ *val = read64(addr);
break;
-#endif
default:
return -EINVAL;
}
@@ -293,6 +339,19 @@ static int acpi_atomic_read_mem(u64 paddr, u64 *val, u32 width)
return 0;
}
+#ifdef writeq
+static inline void write64(u64 val, volatile void __iomem *addr)
+{
+ writeq(val, addr);
+}
+#else
+static inline void write64(u64 val, volatile void __iomem *addr)
+{
+ writel(val, addr);
+ writel(val>>32, addr+4);
+}
+#endif
+
static int acpi_atomic_write_mem(u64 paddr, u64 val, u32 width)
{
void __iomem *addr;
@@ -309,11 +368,9 @@ static int acpi_atomic_write_mem(u64 paddr, u64 val, u32 width)
case 32:
writel(val, addr);
break;
-#ifdef writeq
case 64:
- writeq(val, addr);
+ write64(val, addr);
break;
-#endif
default:
return -EINVAL;
}
diff --git a/trunk/drivers/acpi/numa.c b/trunk/drivers/acpi/numa.c
index 3b5c3189fd99..e56f3be7b07d 100644
--- a/trunk/drivers/acpi/numa.c
+++ b/trunk/drivers/acpi/numa.c
@@ -45,6 +45,8 @@ static int pxm_to_node_map[MAX_PXM_DOMAINS]
static int node_to_pxm_map[MAX_NUMNODES]
= { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
+unsigned char acpi_srat_revision __initdata;
+
int pxm_to_node(int pxm)
{
if (pxm < 0)
@@ -255,9 +257,13 @@ acpi_parse_memory_affinity(struct acpi_subtable_header * header,
static int __init acpi_parse_srat(struct acpi_table_header *table)
{
+ struct acpi_table_srat *srat;
if (!table)
return -EINVAL;
+ srat = (struct acpi_table_srat *)table;
+ acpi_srat_revision = srat->header.revision;
+
/* Real work done in acpi_table_parse_srat below. */
return 0;
diff --git a/trunk/drivers/acpi/nvs.c b/trunk/drivers/acpi/nvs.c
index 096787b43c96..7a2035fa8c71 100644
--- a/trunk/drivers/acpi/nvs.c
+++ b/trunk/drivers/acpi/nvs.c
@@ -15,6 +15,56 @@
#include
#include
+/* ACPI NVS regions, APEI may use it */
+
+struct nvs_region {
+ __u64 phys_start;
+ __u64 size;
+ struct list_head node;
+};
+
+static LIST_HEAD(nvs_region_list);
+
+#ifdef CONFIG_ACPI_SLEEP
+static int suspend_nvs_register(unsigned long start, unsigned long size);
+#else
+static inline int suspend_nvs_register(unsigned long a, unsigned long b)
+{
+ return 0;
+}
+#endif
+
+int acpi_nvs_register(__u64 start, __u64 size)
+{
+ struct nvs_region *region;
+
+ region = kmalloc(sizeof(*region), GFP_KERNEL);
+ if (!region)
+ return -ENOMEM;
+ region->phys_start = start;
+ region->size = size;
+ list_add_tail(®ion->node, &nvs_region_list);
+
+ return suspend_nvs_register(start, size);
+}
+
+int acpi_nvs_for_each_region(int (*func)(__u64 start, __u64 size, void *data),
+ void *data)
+{
+ int rc;
+ struct nvs_region *region;
+
+ list_for_each_entry(region, &nvs_region_list, node) {
+ rc = func(region->phys_start, region->size, data);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+
+#ifdef CONFIG_ACPI_SLEEP
/*
* Platforms, like ACPI, may want us to save some memory used by them during
* suspend and to restore the contents of this memory during the subsequent
@@ -41,7 +91,7 @@ static LIST_HEAD(nvs_list);
* things so that the data from page-aligned addresses in this region will
* be copied into separate RAM pages.
*/
-int suspend_nvs_register(unsigned long start, unsigned long size)
+static int suspend_nvs_register(unsigned long start, unsigned long size)
{
struct nvs_page *entry, *next;
@@ -159,3 +209,4 @@ void suspend_nvs_restore(void)
if (entry->data)
memcpy(entry->kaddr, entry->data, entry->size);
}
+#endif
diff --git a/trunk/drivers/acpi/osl.c b/trunk/drivers/acpi/osl.c
index f31c5c5f1b7e..fcc12d842bcc 100644
--- a/trunk/drivers/acpi/osl.c
+++ b/trunk/drivers/acpi/osl.c
@@ -83,19 +83,6 @@ static struct workqueue_struct *kacpi_notify_wq;
struct workqueue_struct *kacpi_hotplug_wq;
EXPORT_SYMBOL(kacpi_hotplug_wq);
-struct acpi_res_list {
- resource_size_t start;
- resource_size_t end;
- acpi_adr_space_type resource_type; /* IO port, System memory, ...*/
- char name[5]; /* only can have a length of 4 chars, make use of this
- one instead of res->name, no need to kalloc then */
- struct list_head resource_list;
- int count;
-};
-
-static LIST_HEAD(resource_list_head);
-static DEFINE_SPINLOCK(acpi_res_lock);
-
/*
* This list of permanent mappings is for memory that may be accessed from
* interrupt context, where we can't do the ioremap().
@@ -166,17 +153,21 @@ static u32 acpi_osi_handler(acpi_string interface, u32 supported)
return supported;
}
-static void __init acpi_request_region (struct acpi_generic_address *addr,
+static void __init acpi_request_region (struct acpi_generic_address *gas,
unsigned int length, char *desc)
{
- if (!addr->address || !length)
+ u64 addr;
+
+ /* Handle possible alignment issues */
+ memcpy(&addr, &gas->address, sizeof(addr));
+ if (!addr || !length)
return;
/* Resources are never freed */
- if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
- request_region(addr->address, length, desc);
- else if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
- request_mem_region(addr->address, length, desc);
+ if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
+ request_region(addr, length, desc);
+ else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ request_mem_region(addr, length, desc);
}
static int __init acpi_reserve_resources(void)
@@ -427,35 +418,42 @@ void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
__acpi_unmap_table(virt, size);
}
-static int acpi_os_map_generic_address(struct acpi_generic_address *addr)
+int acpi_os_map_generic_address(struct acpi_generic_address *gas)
{
+ u64 addr;
void __iomem *virt;
- if (addr->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
return 0;
- if (!addr->address || !addr->bit_width)
+ /* Handle possible alignment issues */
+ memcpy(&addr, &gas->address, sizeof(addr));
+ if (!addr || !gas->bit_width)
return -EINVAL;
- virt = acpi_os_map_memory(addr->address, addr->bit_width / 8);
+ virt = acpi_os_map_memory(addr, gas->bit_width / 8);
if (!virt)
return -EIO;
return 0;
}
+EXPORT_SYMBOL(acpi_os_map_generic_address);
-static void acpi_os_unmap_generic_address(struct acpi_generic_address *addr)
+void acpi_os_unmap_generic_address(struct acpi_generic_address *gas)
{
+ u64 addr;
struct acpi_ioremap *map;
- if (addr->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
return;
- if (!addr->address || !addr->bit_width)
+ /* Handle possible alignment issues */
+ memcpy(&addr, &gas->address, sizeof(addr));
+ if (!addr || !gas->bit_width)
return;
mutex_lock(&acpi_ioremap_lock);
- map = acpi_map_lookup(addr->address, addr->bit_width / 8);
+ map = acpi_map_lookup(addr, gas->bit_width / 8);
if (!map) {
mutex_unlock(&acpi_ioremap_lock);
return;
@@ -465,6 +463,7 @@ static void acpi_os_unmap_generic_address(struct acpi_generic_address *addr)
acpi_os_map_cleanup(map);
}
+EXPORT_SYMBOL(acpi_os_unmap_generic_address);
#ifdef ACPI_FUTURE_USAGE
acpi_status
@@ -1278,44 +1277,28 @@ __setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
* drivers */
int acpi_check_resource_conflict(const struct resource *res)
{
- struct acpi_res_list *res_list_elem;
- int ioport = 0, clash = 0;
+ acpi_adr_space_type space_id;
+ acpi_size length;
+ u8 warn = 0;
+ int clash = 0;
if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
return 0;
if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM))
return 0;
- ioport = res->flags & IORESOURCE_IO;
-
- spin_lock(&acpi_res_lock);
- list_for_each_entry(res_list_elem, &resource_list_head,
- resource_list) {
- if (ioport && (res_list_elem->resource_type
- != ACPI_ADR_SPACE_SYSTEM_IO))
- continue;
- if (!ioport && (res_list_elem->resource_type
- != ACPI_ADR_SPACE_SYSTEM_MEMORY))
- continue;
+ if (res->flags & IORESOURCE_IO)
+ space_id = ACPI_ADR_SPACE_SYSTEM_IO;
+ else
+ space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
- if (res->end < res_list_elem->start
- || res_list_elem->end < res->start)
- continue;
- clash = 1;
- break;
- }
- spin_unlock(&acpi_res_lock);
+ length = res->end - res->start + 1;
+ if (acpi_enforce_resources != ENFORCE_RESOURCES_NO)
+ warn = 1;
+ clash = acpi_check_address_range(space_id, res->start, length, warn);
if (clash) {
if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
- printk(KERN_WARNING "ACPI: resource %s %pR"
- " conflicts with ACPI region %s "
- "[%s 0x%zx-0x%zx]\n",
- res->name, res, res_list_elem->name,
- (res_list_elem->resource_type ==
- ACPI_ADR_SPACE_SYSTEM_IO) ? "io" : "mem",
- (size_t) res_list_elem->start,
- (size_t) res_list_elem->end);
if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX)
printk(KERN_NOTICE "ACPI: This conflict may"
" cause random problems and system"
@@ -1467,155 +1450,6 @@ acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
kmem_cache_free(cache, object);
return (AE_OK);
}
-
-static inline int acpi_res_list_add(struct acpi_res_list *res)
-{
- struct acpi_res_list *res_list_elem;
-
- list_for_each_entry(res_list_elem, &resource_list_head,
- resource_list) {
-
- if (res->resource_type == res_list_elem->resource_type &&
- res->start == res_list_elem->start &&
- res->end == res_list_elem->end) {
-
- /*
- * The Region(addr,len) already exist in the list,
- * just increase the count
- */
-
- res_list_elem->count++;
- return 0;
- }
- }
-
- res->count = 1;
- list_add(&res->resource_list, &resource_list_head);
- return 1;
-}
-
-static inline void acpi_res_list_del(struct acpi_res_list *res)
-{
- struct acpi_res_list *res_list_elem;
-
- list_for_each_entry(res_list_elem, &resource_list_head,
- resource_list) {
-
- if (res->resource_type == res_list_elem->resource_type &&
- res->start == res_list_elem->start &&
- res->end == res_list_elem->end) {
-
- /*
- * If the res count is decreased to 0,
- * remove and free it
- */
-
- if (--res_list_elem->count == 0) {
- list_del(&res_list_elem->resource_list);
- kfree(res_list_elem);
- }
- return;
- }
- }
-}
-
-acpi_status
-acpi_os_invalidate_address(
- u8 space_id,
- acpi_physical_address address,
- acpi_size length)
-{
- struct acpi_res_list res;
-
- switch (space_id) {
- case ACPI_ADR_SPACE_SYSTEM_IO:
- case ACPI_ADR_SPACE_SYSTEM_MEMORY:
- /* Only interference checks against SystemIO and SystemMemory
- are needed */
- res.start = address;
- res.end = address + length - 1;
- res.resource_type = space_id;
- spin_lock(&acpi_res_lock);
- acpi_res_list_del(&res);
- spin_unlock(&acpi_res_lock);
- break;
- case ACPI_ADR_SPACE_PCI_CONFIG:
- case ACPI_ADR_SPACE_EC:
- case ACPI_ADR_SPACE_SMBUS:
- case ACPI_ADR_SPACE_CMOS:
- case ACPI_ADR_SPACE_PCI_BAR_TARGET:
- case ACPI_ADR_SPACE_DATA_TABLE:
- case ACPI_ADR_SPACE_FIXED_HARDWARE:
- break;
- }
- return AE_OK;
-}
-
-/******************************************************************************
- *
- * FUNCTION: acpi_os_validate_address
- *
- * PARAMETERS: space_id - ACPI space ID
- * address - Physical address
- * length - Address length
- *
- * RETURN: AE_OK if address/length is valid for the space_id. Otherwise,
- * should return AE_AML_ILLEGAL_ADDRESS.
- *
- * DESCRIPTION: Validate a system address via the host OS. Used to validate
- * the addresses accessed by AML operation regions.
- *
- *****************************************************************************/
-
-acpi_status
-acpi_os_validate_address (
- u8 space_id,
- acpi_physical_address address,
- acpi_size length,
- char *name)
-{
- struct acpi_res_list *res;
- int added;
- if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
- return AE_OK;
-
- switch (space_id) {
- case ACPI_ADR_SPACE_SYSTEM_IO:
- case ACPI_ADR_SPACE_SYSTEM_MEMORY:
- /* Only interference checks against SystemIO and SystemMemory
- are needed */
- res = kzalloc(sizeof(struct acpi_res_list), GFP_KERNEL);
- if (!res)
- return AE_OK;
- /* ACPI names are fixed to 4 bytes, still better use strlcpy */
- strlcpy(res->name, name, 5);
- res->start = address;
- res->end = address + length - 1;
- res->resource_type = space_id;
- spin_lock(&acpi_res_lock);
- added = acpi_res_list_add(res);
- spin_unlock(&acpi_res_lock);
- pr_debug("%s %s resource: start: 0x%llx, end: 0x%llx, "
- "name: %s\n", added ? "Added" : "Already exist",
- (space_id == ACPI_ADR_SPACE_SYSTEM_IO)
- ? "SystemIO" : "System Memory",
- (unsigned long long)res->start,
- (unsigned long long)res->end,
- res->name);
- if (!added)
- kfree(res);
- break;
- case ACPI_ADR_SPACE_PCI_CONFIG:
- case ACPI_ADR_SPACE_EC:
- case ACPI_ADR_SPACE_SMBUS:
- case ACPI_ADR_SPACE_CMOS:
- case ACPI_ADR_SPACE_PCI_BAR_TARGET:
- case ACPI_ADR_SPACE_DATA_TABLE:
- case ACPI_ADR_SPACE_FIXED_HARDWARE:
- break;
- }
- return AE_OK;
-}
#endif
acpi_status __init acpi_os_initialize(void)
diff --git a/trunk/drivers/acpi/processor_core.c b/trunk/drivers/acpi/processor_core.c
index 3a0428e8435c..c850de4c9a14 100644
--- a/trunk/drivers/acpi/processor_core.c
+++ b/trunk/drivers/acpi/processor_core.c
@@ -173,8 +173,30 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
apic_id = map_mat_entry(handle, type, acpi_id);
if (apic_id == -1)
apic_id = map_madt_entry(type, acpi_id);
- if (apic_id == -1)
- return apic_id;
+ if (apic_id == -1) {
+ /*
+ * On UP processor, there is no _MAT or MADT table.
+ * So above apic_id is always set to -1.
+ *
+ * BIOS may define multiple CPU handles even for UP processor.
+ * For example,
+ *
+ * Scope (_PR)
+ * {
+ * Processor (CPU0, 0x00, 0x00000410, 0x06) {}
+ * Processor (CPU1, 0x01, 0x00000410, 0x06) {}
+ * Processor (CPU2, 0x02, 0x00000410, 0x06) {}
+ * Processor (CPU3, 0x03, 0x00000410, 0x06) {}
+ * }
+ *
+ * Ignores apic_id and always return 0 for CPU0's handle.
+ * Return -1 for other CPU's handle.
+ */
+ if (acpi_id == 0)
+ return acpi_id;
+ else
+ return apic_id;
+ }
#ifdef CONFIG_SMP
for_each_possible_cpu(i) {
diff --git a/trunk/drivers/acpi/processor_driver.c b/trunk/drivers/acpi/processor_driver.c
index 20a68ca386de..0034ede38710 100644
--- a/trunk/drivers/acpi/processor_driver.c
+++ b/trunk/drivers/acpi/processor_driver.c
@@ -82,7 +82,7 @@ MODULE_LICENSE("GPL");
static int acpi_processor_add(struct acpi_device *device);
static int acpi_processor_remove(struct acpi_device *device, int type);
static void acpi_processor_notify(struct acpi_device *device, u32 event);
-static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu);
+static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr);
static int acpi_processor_handle_eject(struct acpi_processor *pr);
@@ -324,10 +324,8 @@ static int acpi_processor_get_info(struct acpi_device *device)
* they are physically not present.
*/
if (pr->id == -1) {
- if (ACPI_FAILURE
- (acpi_processor_hotadd_init(pr->handle, &pr->id))) {
+ if (ACPI_FAILURE(acpi_processor_hotadd_init(pr)))
return -ENODEV;
- }
}
/*
* On some boxes several processors use the same processor bus id.
@@ -539,6 +537,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
thermal_cooling_device_unregister(pr->cdev);
err_power_exit:
acpi_processor_power_exit(pr, device);
+ sysfs_remove_link(&device->dev.kobj, "sysdev");
err_free_cpumask:
free_cpumask_var(pr->throttling.shared_cpu_map);
@@ -720,18 +719,19 @@ processor_walk_namespace_cb(acpi_handle handle,
return (AE_OK);
}
-static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu)
+static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr)
{
+ acpi_handle handle = pr->handle;
if (!is_processor_present(handle)) {
return AE_ERROR;
}
- if (acpi_map_lsapic(handle, p_cpu))
+ if (acpi_map_lsapic(handle, &pr->id))
return AE_ERROR;
- if (arch_register_cpu(*p_cpu)) {
- acpi_unmap_lsapic(*p_cpu);
+ if (arch_register_cpu(pr->id)) {
+ acpi_unmap_lsapic(pr->id);
return AE_ERROR;
}
@@ -748,7 +748,7 @@ static int acpi_processor_handle_eject(struct acpi_processor *pr)
return (0);
}
#else
-static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu)
+static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr)
{
return AE_ERROR;
}
@@ -827,8 +827,6 @@ static void __exit acpi_processor_exit(void)
acpi_bus_unregister_driver(&acpi_processor_driver);
- cpuidle_unregister_driver(&acpi_idle_driver);
-
return;
}
diff --git a/trunk/drivers/base/bus.c b/trunk/drivers/base/bus.c
index 99dc5921e1dd..40fb12288ce2 100644
--- a/trunk/drivers/base/bus.c
+++ b/trunk/drivers/base/bus.c
@@ -915,9 +915,10 @@ static BUS_ATTR(uevent, S_IWUSR, NULL, bus_uevent_store);
/**
* __bus_register - register a driver-core subsystem
- * @bus: bus.
+ * @bus: bus to register
+ * @key: lockdep class key
*
- * Once we have that, we registered the bus with the kobject
+ * Once we have that, we register the bus with the kobject
* infrastructure, then register the children subsystems it has:
* the devices and drivers that belong to the subsystem.
*/
@@ -1220,8 +1221,8 @@ static void system_root_device_release(struct device *dev)
}
/**
* subsys_system_register - register a subsystem at /sys/devices/system/
- * @subsys - system subsystem
- * @groups - default attributes for the root device
+ * @subsys: system subsystem
+ * @groups: default attributes for the root device
*
* All 'system' subsystems have a /sys/devices/system/ root device
* with the name of the subsystem. The root device can carry subsystem-
diff --git a/trunk/drivers/base/firmware_class.c b/trunk/drivers/base/firmware_class.c
index 26ab358dac62..6c9387d646ec 100644
--- a/trunk/drivers/base/firmware_class.c
+++ b/trunk/drivers/base/firmware_class.c
@@ -525,8 +525,7 @@ static int _request_firmware(const struct firmware **firmware_p,
if (!firmware) {
dev_err(device, "%s: kmalloc(struct firmware) failed\n",
__func__);
- retval = -ENOMEM;
- goto out;
+ return -ENOMEM;
}
if (fw_get_builtin_firmware(firmware, name)) {
diff --git a/trunk/drivers/block/Kconfig b/trunk/drivers/block/Kconfig
index a30aa103f95b..4e4c8a4a5fd3 100644
--- a/trunk/drivers/block/Kconfig
+++ b/trunk/drivers/block/Kconfig
@@ -317,6 +317,17 @@ config BLK_DEV_NBD
If unsure, say N.
+config BLK_DEV_NVME
+ tristate "NVM Express block device"
+ depends on PCI
+ ---help---
+ The NVM Express driver is for solid state drives directly
+ connected to the PCI or PCI Express bus. If you know you
+ don't have one of these, it is safe to answer N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called nvme.
+
config BLK_DEV_OSD
tristate "OSD object-as-blkdev support"
depends on SCSI_OSD_ULD
diff --git a/trunk/drivers/block/Makefile b/trunk/drivers/block/Makefile
index ad7b74a44ef3..5b795059f8fb 100644
--- a/trunk/drivers/block/Makefile
+++ b/trunk/drivers/block/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_XILINX_SYSACE) += xsysace.o
obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o
obj-$(CONFIG_MG_DISK) += mg_disk.o
obj-$(CONFIG_SUNVDC) += sunvdc.o
+obj-$(CONFIG_BLK_DEV_NVME) += nvme.o
obj-$(CONFIG_BLK_DEV_OSD) += osdblk.o
obj-$(CONFIG_BLK_DEV_UMEM) += umem.o
diff --git a/trunk/drivers/block/nvme.c b/trunk/drivers/block/nvme.c
new file mode 100644
index 000000000000..c1dc4d86c221
--- /dev/null
+++ b/trunk/drivers/block/nvme.c
@@ -0,0 +1,1739 @@
+/*
+ * NVM Express device driver
+ * Copyright (c) 2011, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#define NVME_Q_DEPTH 1024
+#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
+#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
+#define NVME_MINORS 64
+#define NVME_IO_TIMEOUT (5 * HZ)
+#define ADMIN_TIMEOUT (60 * HZ)
+
+static int nvme_major;
+module_param(nvme_major, int, 0);
+
+static int use_threaded_interrupts;
+module_param(use_threaded_interrupts, int, 0);
+
+static DEFINE_SPINLOCK(dev_list_lock);
+static LIST_HEAD(dev_list);
+static struct task_struct *nvme_thread;
+
+/*
+ * Represents an NVM Express device. Each nvme_dev is a PCI function.
+ */
+struct nvme_dev {
+ struct list_head node;
+ struct nvme_queue **queues;
+ u32 __iomem *dbs;
+ struct pci_dev *pci_dev;
+ struct dma_pool *prp_page_pool;
+ struct dma_pool *prp_small_pool;
+ int instance;
+ int queue_count;
+ int db_stride;
+ u32 ctrl_config;
+ struct msix_entry *entry;
+ struct nvme_bar __iomem *bar;
+ struct list_head namespaces;
+ char serial[20];
+ char model[40];
+ char firmware_rev[8];
+};
+
+/*
+ * An NVM Express namespace is equivalent to a SCSI LUN
+ */
+struct nvme_ns {
+ struct list_head list;
+
+ struct nvme_dev *dev;
+ struct request_queue *queue;
+ struct gendisk *disk;
+
+ int ns_id;
+ int lba_shift;
+};
+
+/*
+ * An NVM Express queue. Each device has at least two (one for admin
+ * commands and one for I/O commands).
+ */
+struct nvme_queue {
+ struct device *q_dmadev;
+ struct nvme_dev *dev;
+ spinlock_t q_lock;
+ struct nvme_command *sq_cmds;
+ volatile struct nvme_completion *cqes;
+ dma_addr_t sq_dma_addr;
+ dma_addr_t cq_dma_addr;
+ wait_queue_head_t sq_full;
+ wait_queue_t sq_cong_wait;
+ struct bio_list sq_cong;
+ u32 __iomem *q_db;
+ u16 q_depth;
+ u16 cq_vector;
+ u16 sq_head;
+ u16 sq_tail;
+ u16 cq_head;
+ u16 cq_phase;
+ unsigned long cmdid_data[];
+};
+
+/*
+ * Check we didin't inadvertently grow the command struct
+ */
+static inline void _nvme_check_size(void)
+{
+ BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
+ BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
+ BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
+}
+
+typedef void (*nvme_completion_fn)(struct nvme_dev *, void *,
+ struct nvme_completion *);
+
+struct nvme_cmd_info {
+ nvme_completion_fn fn;
+ void *ctx;
+ unsigned long timeout;
+};
+
+static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq)
+{
+ return (void *)&nvmeq->cmdid_data[BITS_TO_LONGS(nvmeq->q_depth)];
+}
+
+/**
+ * alloc_cmdid() - Allocate a Command ID
+ * @nvmeq: The queue that will be used for this command
+ * @ctx: A pointer that will be passed to the handler
+ * @handler: The function to call on completion
+ *
+ * Allocate a Command ID for a queue. The data passed in will
+ * be passed to the completion handler. This is implemented by using
+ * the bottom two bits of the ctx pointer to store the handler ID.
+ * Passing in a pointer that's not 4-byte aligned will cause a BUG.
+ * We can change this if it becomes a problem.
+ *
+ * May be called with local interrupts disabled and the q_lock held,
+ * or with interrupts enabled and no locks held.
+ */
+static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx,
+ nvme_completion_fn handler, unsigned timeout)
+{
+ int depth = nvmeq->q_depth - 1;
+ struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
+ int cmdid;
+
+ do {
+ cmdid = find_first_zero_bit(nvmeq->cmdid_data, depth);
+ if (cmdid >= depth)
+ return -EBUSY;
+ } while (test_and_set_bit(cmdid, nvmeq->cmdid_data));
+
+ info[cmdid].fn = handler;
+ info[cmdid].ctx = ctx;
+ info[cmdid].timeout = jiffies + timeout;
+ return cmdid;
+}
+
+static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx,
+ nvme_completion_fn handler, unsigned timeout)
+{
+ int cmdid;
+ wait_event_killable(nvmeq->sq_full,
+ (cmdid = alloc_cmdid(nvmeq, ctx, handler, timeout)) >= 0);
+ return (cmdid < 0) ? -EINTR : cmdid;
+}
+
+/* Special values must be less than 0x1000 */
+#define CMD_CTX_BASE ((void *)POISON_POINTER_DELTA)
+#define CMD_CTX_CANCELLED (0x30C + CMD_CTX_BASE)
+#define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE)
+#define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE)
+#define CMD_CTX_FLUSH (0x318 + CMD_CTX_BASE)
+
+static void special_completion(struct nvme_dev *dev, void *ctx,
+ struct nvme_completion *cqe)
+{
+ if (ctx == CMD_CTX_CANCELLED)
+ return;
+ if (ctx == CMD_CTX_FLUSH)
+ return;
+ if (ctx == CMD_CTX_COMPLETED) {
+ dev_warn(&dev->pci_dev->dev,
+ "completed id %d twice on queue %d\n",
+ cqe->command_id, le16_to_cpup(&cqe->sq_id));
+ return;
+ }
+ if (ctx == CMD_CTX_INVALID) {
+ dev_warn(&dev->pci_dev->dev,
+ "invalid id %d completed on queue %d\n",
+ cqe->command_id, le16_to_cpup(&cqe->sq_id));
+ return;
+ }
+
+ dev_warn(&dev->pci_dev->dev, "Unknown special completion %p\n", ctx);
+}
+
+/*
+ * Called with local interrupts disabled and the q_lock held. May not sleep.
+ */
+static void *free_cmdid(struct nvme_queue *nvmeq, int cmdid,
+ nvme_completion_fn *fn)
+{
+ void *ctx;
+ struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
+
+ if (cmdid >= nvmeq->q_depth) {
+ *fn = special_completion;
+ return CMD_CTX_INVALID;
+ }
+ *fn = info[cmdid].fn;
+ ctx = info[cmdid].ctx;
+ info[cmdid].fn = special_completion;
+ info[cmdid].ctx = CMD_CTX_COMPLETED;
+ clear_bit(cmdid, nvmeq->cmdid_data);
+ wake_up(&nvmeq->sq_full);
+ return ctx;
+}
+
+static void *cancel_cmdid(struct nvme_queue *nvmeq, int cmdid,
+ nvme_completion_fn *fn)
+{
+ void *ctx;
+ struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
+ if (fn)
+ *fn = info[cmdid].fn;
+ ctx = info[cmdid].ctx;
+ info[cmdid].fn = special_completion;
+ info[cmdid].ctx = CMD_CTX_CANCELLED;
+ return ctx;
+}
+
+static struct nvme_queue *get_nvmeq(struct nvme_dev *dev)
+{
+ return dev->queues[get_cpu() + 1];
+}
+
+static void put_nvmeq(struct nvme_queue *nvmeq)
+{
+ put_cpu();
+}
+
+/**
+ * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
+ * @nvmeq: The queue to use
+ * @cmd: The command to send
+ *
+ * Safe to use from interrupt context
+ */
+static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
+{
+ unsigned long flags;
+ u16 tail;
+ spin_lock_irqsave(&nvmeq->q_lock, flags);
+ tail = nvmeq->sq_tail;
+ memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
+ if (++tail == nvmeq->q_depth)
+ tail = 0;
+ writel(tail, nvmeq->q_db);
+ nvmeq->sq_tail = tail;
+ spin_unlock_irqrestore(&nvmeq->q_lock, flags);
+
+ return 0;
+}
+
+/*
+ * The nvme_iod describes the data in an I/O, including the list of PRP
+ * entries. You can't see it in this data structure because C doesn't let
+ * me express that. Use nvme_alloc_iod to ensure there's enough space
+ * allocated to store the PRP list.
+ */
+struct nvme_iod {
+ void *private; /* For the use of the submitter of the I/O */
+ int npages; /* In the PRP list. 0 means small pool in use */
+ int offset; /* Of PRP list */
+ int nents; /* Used in scatterlist */
+ int length; /* Of data, in bytes */
+ dma_addr_t first_dma;
+ struct scatterlist sg[0];
+};
+
+static __le64 **iod_list(struct nvme_iod *iod)
+{
+ return ((void *)iod) + iod->offset;
+}
+
+/*
+ * Will slightly overestimate the number of pages needed. This is OK
+ * as it only leads to a small amount of wasted memory for the lifetime of
+ * the I/O.
+ */
+static int nvme_npages(unsigned size)
+{
+ unsigned nprps = DIV_ROUND_UP(size + PAGE_SIZE, PAGE_SIZE);
+ return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
+}
+
+static struct nvme_iod *
+nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp)
+{
+ struct nvme_iod *iod = kmalloc(sizeof(struct nvme_iod) +
+ sizeof(__le64 *) * nvme_npages(nbytes) +
+ sizeof(struct scatterlist) * nseg, gfp);
+
+ if (iod) {
+ iod->offset = offsetof(struct nvme_iod, sg[nseg]);
+ iod->npages = -1;
+ iod->length = nbytes;
+ }
+
+ return iod;
+}
+
+static void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod)
+{
+ const int last_prp = PAGE_SIZE / 8 - 1;
+ int i;
+ __le64 **list = iod_list(iod);
+ dma_addr_t prp_dma = iod->first_dma;
+
+ if (iod->npages == 0)
+ dma_pool_free(dev->prp_small_pool, list[0], prp_dma);
+ for (i = 0; i < iod->npages; i++) {
+ __le64 *prp_list = list[i];
+ dma_addr_t next_prp_dma = le64_to_cpu(prp_list[last_prp]);
+ dma_pool_free(dev->prp_page_pool, prp_list, prp_dma);
+ prp_dma = next_prp_dma;
+ }
+ kfree(iod);
+}
+
+static void requeue_bio(struct nvme_dev *dev, struct bio *bio)
+{
+ struct nvme_queue *nvmeq = get_nvmeq(dev);
+ if (bio_list_empty(&nvmeq->sq_cong))
+ add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
+ bio_list_add(&nvmeq->sq_cong, bio);
+ put_nvmeq(nvmeq);
+ wake_up_process(nvme_thread);
+}
+
+static void bio_completion(struct nvme_dev *dev, void *ctx,
+ struct nvme_completion *cqe)
+{
+ struct nvme_iod *iod = ctx;
+ struct bio *bio = iod->private;
+ u16 status = le16_to_cpup(&cqe->status) >> 1;
+
+ dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
+ bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ nvme_free_iod(dev, iod);
+ if (status) {
+ bio_endio(bio, -EIO);
+ } else if (bio->bi_vcnt > bio->bi_idx) {
+ requeue_bio(dev, bio);
+ } else {
+ bio_endio(bio, 0);
+ }
+}
+
+/* length is in bytes. gfp flags indicates whether we may sleep. */
+static int nvme_setup_prps(struct nvme_dev *dev,
+ struct nvme_common_command *cmd, struct nvme_iod *iod,
+ int total_len, gfp_t gfp)
+{
+ struct dma_pool *pool;
+ int length = total_len;
+ struct scatterlist *sg = iod->sg;
+ int dma_len = sg_dma_len(sg);
+ u64 dma_addr = sg_dma_address(sg);
+ int offset = offset_in_page(dma_addr);
+ __le64 *prp_list;
+ __le64 **list = iod_list(iod);
+ dma_addr_t prp_dma;
+ int nprps, i;
+
+ cmd->prp1 = cpu_to_le64(dma_addr);
+ length -= (PAGE_SIZE - offset);
+ if (length <= 0)
+ return total_len;
+
+ dma_len -= (PAGE_SIZE - offset);
+ if (dma_len) {
+ dma_addr += (PAGE_SIZE - offset);
+ } else {
+ sg = sg_next(sg);
+ dma_addr = sg_dma_address(sg);
+ dma_len = sg_dma_len(sg);
+ }
+
+ if (length <= PAGE_SIZE) {
+ cmd->prp2 = cpu_to_le64(dma_addr);
+ return total_len;
+ }
+
+ nprps = DIV_ROUND_UP(length, PAGE_SIZE);
+ if (nprps <= (256 / 8)) {
+ pool = dev->prp_small_pool;
+ iod->npages = 0;
+ } else {
+ pool = dev->prp_page_pool;
+ iod->npages = 1;
+ }
+
+ prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
+ if (!prp_list) {
+ cmd->prp2 = cpu_to_le64(dma_addr);
+ iod->npages = -1;
+ return (total_len - length) + PAGE_SIZE;
+ }
+ list[0] = prp_list;
+ iod->first_dma = prp_dma;
+ cmd->prp2 = cpu_to_le64(prp_dma);
+ i = 0;
+ for (;;) {
+ if (i == PAGE_SIZE / 8) {
+ __le64 *old_prp_list = prp_list;
+ prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
+ if (!prp_list)
+ return total_len - length;
+ list[iod->npages++] = prp_list;
+ prp_list[0] = old_prp_list[i - 1];
+ old_prp_list[i - 1] = cpu_to_le64(prp_dma);
+ i = 1;
+ }
+ prp_list[i++] = cpu_to_le64(dma_addr);
+ dma_len -= PAGE_SIZE;
+ dma_addr += PAGE_SIZE;
+ length -= PAGE_SIZE;
+ if (length <= 0)
+ break;
+ if (dma_len > 0)
+ continue;
+ BUG_ON(dma_len < 0);
+ sg = sg_next(sg);
+ dma_addr = sg_dma_address(sg);
+ dma_len = sg_dma_len(sg);
+ }
+
+ return total_len;
+}
+
+/* NVMe scatterlists require no holes in the virtual address */
+#define BIOVEC_NOT_VIRT_MERGEABLE(vec1, vec2) ((vec2)->bv_offset || \
+ (((vec1)->bv_offset + (vec1)->bv_len) % PAGE_SIZE))
+
+static int nvme_map_bio(struct device *dev, struct nvme_iod *iod,
+ struct bio *bio, enum dma_data_direction dma_dir, int psegs)
+{
+ struct bio_vec *bvec, *bvprv = NULL;
+ struct scatterlist *sg = NULL;
+ int i, old_idx, length = 0, nsegs = 0;
+
+ sg_init_table(iod->sg, psegs);
+ old_idx = bio->bi_idx;
+ bio_for_each_segment(bvec, bio, i) {
+ if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) {
+ sg->length += bvec->bv_len;
+ } else {
+ if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec))
+ break;
+ sg = sg ? sg + 1 : iod->sg;
+ sg_set_page(sg, bvec->bv_page, bvec->bv_len,
+ bvec->bv_offset);
+ nsegs++;
+ }
+ length += bvec->bv_len;
+ bvprv = bvec;
+ }
+ bio->bi_idx = i;
+ iod->nents = nsegs;
+ sg_mark_end(sg);
+ if (dma_map_sg(dev, iod->sg, iod->nents, dma_dir) == 0) {
+ bio->bi_idx = old_idx;
+ return -ENOMEM;
+ }
+ return length;
+}
+
+static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns,
+ int cmdid)
+{
+ struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
+
+ memset(cmnd, 0, sizeof(*cmnd));
+ cmnd->common.opcode = nvme_cmd_flush;
+ cmnd->common.command_id = cmdid;
+ cmnd->common.nsid = cpu_to_le32(ns->ns_id);
+
+ if (++nvmeq->sq_tail == nvmeq->q_depth)
+ nvmeq->sq_tail = 0;
+ writel(nvmeq->sq_tail, nvmeq->q_db);
+
+ return 0;
+}
+
+static int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns)
+{
+ int cmdid = alloc_cmdid(nvmeq, (void *)CMD_CTX_FLUSH,
+ special_completion, NVME_IO_TIMEOUT);
+ if (unlikely(cmdid < 0))
+ return cmdid;
+
+ return nvme_submit_flush(nvmeq, ns, cmdid);
+}
+
+/*
+ * Called with local interrupts disabled and the q_lock held. May not sleep.
+ */
+static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
+ struct bio *bio)
+{
+ struct nvme_command *cmnd;
+ struct nvme_iod *iod;
+ enum dma_data_direction dma_dir;
+ int cmdid, length, result = -ENOMEM;
+ u16 control;
+ u32 dsmgmt;
+ int psegs = bio_phys_segments(ns->queue, bio);
+
+ if ((bio->bi_rw & REQ_FLUSH) && psegs) {
+ result = nvme_submit_flush_data(nvmeq, ns);
+ if (result)
+ return result;
+ }
+
+ iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC);
+ if (!iod)
+ goto nomem;
+ iod->private = bio;
+
+ result = -EBUSY;
+ cmdid = alloc_cmdid(nvmeq, iod, bio_completion, NVME_IO_TIMEOUT);
+ if (unlikely(cmdid < 0))
+ goto free_iod;
+
+ if ((bio->bi_rw & REQ_FLUSH) && !psegs)
+ return nvme_submit_flush(nvmeq, ns, cmdid);
+
+ control = 0;
+ if (bio->bi_rw & REQ_FUA)
+ control |= NVME_RW_FUA;
+ if (bio->bi_rw & (REQ_FAILFAST_DEV | REQ_RAHEAD))
+ control |= NVME_RW_LR;
+
+ dsmgmt = 0;
+ if (bio->bi_rw & REQ_RAHEAD)
+ dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
+
+ cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
+
+ memset(cmnd, 0, sizeof(*cmnd));
+ if (bio_data_dir(bio)) {
+ cmnd->rw.opcode = nvme_cmd_write;
+ dma_dir = DMA_TO_DEVICE;
+ } else {
+ cmnd->rw.opcode = nvme_cmd_read;
+ dma_dir = DMA_FROM_DEVICE;
+ }
+
+ result = nvme_map_bio(nvmeq->q_dmadev, iod, bio, dma_dir, psegs);
+ if (result < 0)
+ goto free_iod;
+ length = result;
+
+ cmnd->rw.command_id = cmdid;
+ cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
+ length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length,
+ GFP_ATOMIC);
+ cmnd->rw.slba = cpu_to_le64(bio->bi_sector >> (ns->lba_shift - 9));
+ cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1);
+ cmnd->rw.control = cpu_to_le16(control);
+ cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
+
+ bio->bi_sector += length >> 9;
+
+ if (++nvmeq->sq_tail == nvmeq->q_depth)
+ nvmeq->sq_tail = 0;
+ writel(nvmeq->sq_tail, nvmeq->q_db);
+
+ return 0;
+
+ free_iod:
+ nvme_free_iod(nvmeq->dev, iod);
+ nomem:
+ return result;
+}
+
+static void nvme_make_request(struct request_queue *q, struct bio *bio)
+{
+ struct nvme_ns *ns = q->queuedata;
+ struct nvme_queue *nvmeq = get_nvmeq(ns->dev);
+ int result = -EBUSY;
+
+ spin_lock_irq(&nvmeq->q_lock);
+ if (bio_list_empty(&nvmeq->sq_cong))
+ result = nvme_submit_bio_queue(nvmeq, ns, bio);
+ if (unlikely(result)) {
+ if (bio_list_empty(&nvmeq->sq_cong))
+ add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
+ bio_list_add(&nvmeq->sq_cong, bio);
+ }
+
+ spin_unlock_irq(&nvmeq->q_lock);
+ put_nvmeq(nvmeq);
+}
+
+static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq)
+{
+ u16 head, phase;
+
+ head = nvmeq->cq_head;
+ phase = nvmeq->cq_phase;
+
+ for (;;) {
+ void *ctx;
+ nvme_completion_fn fn;
+ struct nvme_completion cqe = nvmeq->cqes[head];
+ if ((le16_to_cpu(cqe.status) & 1) != phase)
+ break;
+ nvmeq->sq_head = le16_to_cpu(cqe.sq_head);
+ if (++head == nvmeq->q_depth) {
+ head = 0;
+ phase = !phase;
+ }
+
+ ctx = free_cmdid(nvmeq, cqe.command_id, &fn);
+ fn(nvmeq->dev, ctx, &cqe);
+ }
+
+ /* If the controller ignores the cq head doorbell and continuously
+ * writes to the queue, it is theoretically possible to wrap around
+ * the queue twice and mistakenly return IRQ_NONE. Linux only
+ * requires that 0.1% of your interrupts are handled, so this isn't
+ * a big problem.
+ */
+ if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
+ return IRQ_NONE;
+
+ writel(head, nvmeq->q_db + (1 << nvmeq->dev->db_stride));
+ nvmeq->cq_head = head;
+ nvmeq->cq_phase = phase;
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t nvme_irq(int irq, void *data)
+{
+ irqreturn_t result;
+ struct nvme_queue *nvmeq = data;
+ spin_lock(&nvmeq->q_lock);
+ result = nvme_process_cq(nvmeq);
+ spin_unlock(&nvmeq->q_lock);
+ return result;
+}
+
+static irqreturn_t nvme_irq_check(int irq, void *data)
+{
+ struct nvme_queue *nvmeq = data;
+ struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head];
+ if ((le16_to_cpu(cqe.status) & 1) != nvmeq->cq_phase)
+ return IRQ_NONE;
+ return IRQ_WAKE_THREAD;
+}
+
+static void nvme_abort_command(struct nvme_queue *nvmeq, int cmdid)
+{
+ spin_lock_irq(&nvmeq->q_lock);
+ cancel_cmdid(nvmeq, cmdid, NULL);
+ spin_unlock_irq(&nvmeq->q_lock);
+}
+
+struct sync_cmd_info {
+ struct task_struct *task;
+ u32 result;
+ int status;
+};
+
+static void sync_completion(struct nvme_dev *dev, void *ctx,
+ struct nvme_completion *cqe)
+{
+ struct sync_cmd_info *cmdinfo = ctx;
+ cmdinfo->result = le32_to_cpup(&cqe->result);
+ cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
+ wake_up_process(cmdinfo->task);
+}
+
+/*
+ * Returns 0 on success. If the result is negative, it's a Linux error code;
+ * if the result is positive, it's an NVM Express status code
+ */
+static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq,
+ struct nvme_command *cmd, u32 *result, unsigned timeout)
+{
+ int cmdid;
+ struct sync_cmd_info cmdinfo;
+
+ cmdinfo.task = current;
+ cmdinfo.status = -EINTR;
+
+ cmdid = alloc_cmdid_killable(nvmeq, &cmdinfo, sync_completion,
+ timeout);
+ if (cmdid < 0)
+ return cmdid;
+ cmd->common.command_id = cmdid;
+
+ set_current_state(TASK_KILLABLE);
+ nvme_submit_cmd(nvmeq, cmd);
+ schedule();
+
+ if (cmdinfo.status == -EINTR) {
+ nvme_abort_command(nvmeq, cmdid);
+ return -EINTR;
+ }
+
+ if (result)
+ *result = cmdinfo.result;
+
+ return cmdinfo.status;
+}
+
+static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
+ u32 *result)
+{
+ return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT);
+}
+
+static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
+{
+ int status;
+ struct nvme_command c;
+
+ memset(&c, 0, sizeof(c));
+ c.delete_queue.opcode = opcode;
+ c.delete_queue.qid = cpu_to_le16(id);
+
+ status = nvme_submit_admin_cmd(dev, &c, NULL);
+ if (status)
+ return -EIO;
+ return 0;
+}
+
+static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
+ struct nvme_queue *nvmeq)
+{
+ int status;
+ struct nvme_command c;
+ int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
+
+ memset(&c, 0, sizeof(c));
+ c.create_cq.opcode = nvme_admin_create_cq;
+ c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
+ c.create_cq.cqid = cpu_to_le16(qid);
+ c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
+ c.create_cq.cq_flags = cpu_to_le16(flags);
+ c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
+
+ status = nvme_submit_admin_cmd(dev, &c, NULL);
+ if (status)
+ return -EIO;
+ return 0;
+}
+
+static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
+ struct nvme_queue *nvmeq)
+{
+ int status;
+ struct nvme_command c;
+ int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM;
+
+ memset(&c, 0, sizeof(c));
+ c.create_sq.opcode = nvme_admin_create_sq;
+ c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
+ c.create_sq.sqid = cpu_to_le16(qid);
+ c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
+ c.create_sq.sq_flags = cpu_to_le16(flags);
+ c.create_sq.cqid = cpu_to_le16(qid);
+
+ status = nvme_submit_admin_cmd(dev, &c, NULL);
+ if (status)
+ return -EIO;
+ return 0;
+}
+
+static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
+{
+ return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid);
+}
+
+static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
+{
+ return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
+}
+
+static int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns,
+ dma_addr_t dma_addr)
+{
+ struct nvme_command c;
+
+ memset(&c, 0, sizeof(c));
+ c.identify.opcode = nvme_admin_identify;
+ c.identify.nsid = cpu_to_le32(nsid);
+ c.identify.prp1 = cpu_to_le64(dma_addr);
+ c.identify.cns = cpu_to_le32(cns);
+
+ return nvme_submit_admin_cmd(dev, &c, NULL);
+}
+
+static int nvme_get_features(struct nvme_dev *dev, unsigned fid,
+ unsigned dword11, dma_addr_t dma_addr)
+{
+ struct nvme_command c;
+
+ memset(&c, 0, sizeof(c));
+ c.features.opcode = nvme_admin_get_features;
+ c.features.prp1 = cpu_to_le64(dma_addr);
+ c.features.fid = cpu_to_le32(fid);
+ c.features.dword11 = cpu_to_le32(dword11);
+
+ return nvme_submit_admin_cmd(dev, &c, NULL);
+}
+
+static int nvme_set_features(struct nvme_dev *dev, unsigned fid,
+ unsigned dword11, dma_addr_t dma_addr, u32 *result)
+{
+ struct nvme_command c;
+
+ memset(&c, 0, sizeof(c));
+ c.features.opcode = nvme_admin_set_features;
+ c.features.prp1 = cpu_to_le64(dma_addr);
+ c.features.fid = cpu_to_le32(fid);
+ c.features.dword11 = cpu_to_le32(dword11);
+
+ return nvme_submit_admin_cmd(dev, &c, result);
+}
+
+static void nvme_free_queue(struct nvme_dev *dev, int qid)
+{
+ struct nvme_queue *nvmeq = dev->queues[qid];
+ int vector = dev->entry[nvmeq->cq_vector].vector;
+
+ irq_set_affinity_hint(vector, NULL);
+ free_irq(vector, nvmeq);
+
+ /* Don't tell the adapter to delete the admin queue */
+ if (qid) {
+ adapter_delete_sq(dev, qid);
+ adapter_delete_cq(dev, qid);
+ }
+
+ dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
+ (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
+ dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
+ nvmeq->sq_cmds, nvmeq->sq_dma_addr);
+ kfree(nvmeq);
+}
+
+static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
+ int depth, int vector)
+{
+ struct device *dmadev = &dev->pci_dev->dev;
+ unsigned extra = (depth / 8) + (depth * sizeof(struct nvme_cmd_info));
+ struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL);
+ if (!nvmeq)
+ return NULL;
+
+ nvmeq->cqes = dma_alloc_coherent(dmadev, CQ_SIZE(depth),
+ &nvmeq->cq_dma_addr, GFP_KERNEL);
+ if (!nvmeq->cqes)
+ goto free_nvmeq;
+ memset((void *)nvmeq->cqes, 0, CQ_SIZE(depth));
+
+ nvmeq->sq_cmds = dma_alloc_coherent(dmadev, SQ_SIZE(depth),
+ &nvmeq->sq_dma_addr, GFP_KERNEL);
+ if (!nvmeq->sq_cmds)
+ goto free_cqdma;
+
+ nvmeq->q_dmadev = dmadev;
+ nvmeq->dev = dev;
+ spin_lock_init(&nvmeq->q_lock);
+ nvmeq->cq_head = 0;
+ nvmeq->cq_phase = 1;
+ init_waitqueue_head(&nvmeq->sq_full);
+ init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread);
+ bio_list_init(&nvmeq->sq_cong);
+ nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)];
+ nvmeq->q_depth = depth;
+ nvmeq->cq_vector = vector;
+
+ return nvmeq;
+
+ free_cqdma:
+ dma_free_coherent(dmadev, CQ_SIZE(nvmeq->q_depth), (void *)nvmeq->cqes,
+ nvmeq->cq_dma_addr);
+ free_nvmeq:
+ kfree(nvmeq);
+ return NULL;
+}
+
+static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq,
+ const char *name)
+{
+ if (use_threaded_interrupts)
+ return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector,
+ nvme_irq_check, nvme_irq,
+ IRQF_DISABLED | IRQF_SHARED,
+ name, nvmeq);
+ return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq,
+ IRQF_DISABLED | IRQF_SHARED, name, nvmeq);
+}
+
+static __devinit struct nvme_queue *nvme_create_queue(struct nvme_dev *dev,
+ int qid, int cq_size, int vector)
+{
+ int result;
+ struct nvme_queue *nvmeq = nvme_alloc_queue(dev, qid, cq_size, vector);
+
+ if (!nvmeq)
+ return ERR_PTR(-ENOMEM);
+
+ result = adapter_alloc_cq(dev, qid, nvmeq);
+ if (result < 0)
+ goto free_nvmeq;
+
+ result = adapter_alloc_sq(dev, qid, nvmeq);
+ if (result < 0)
+ goto release_cq;
+
+ result = queue_request_irq(dev, nvmeq, "nvme");
+ if (result < 0)
+ goto release_sq;
+
+ return nvmeq;
+
+ release_sq:
+ adapter_delete_sq(dev, qid);
+ release_cq:
+ adapter_delete_cq(dev, qid);
+ free_nvmeq:
+ dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
+ (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
+ dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
+ nvmeq->sq_cmds, nvmeq->sq_dma_addr);
+ kfree(nvmeq);
+ return ERR_PTR(result);
+}
+
+static int __devinit nvme_configure_admin_queue(struct nvme_dev *dev)
+{
+ int result;
+ u32 aqa;
+ u64 cap;
+ unsigned long timeout;
+ struct nvme_queue *nvmeq;
+
+ dev->dbs = ((void __iomem *)dev->bar) + 4096;
+
+ nvmeq = nvme_alloc_queue(dev, 0, 64, 0);
+ if (!nvmeq)
+ return -ENOMEM;
+
+ aqa = nvmeq->q_depth - 1;
+ aqa |= aqa << 16;
+
+ dev->ctrl_config = NVME_CC_ENABLE | NVME_CC_CSS_NVM;
+ dev->ctrl_config |= (PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
+ dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
+ dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
+
+ writel(0, &dev->bar->cc);
+ writel(aqa, &dev->bar->aqa);
+ writeq(nvmeq->sq_dma_addr, &dev->bar->asq);
+ writeq(nvmeq->cq_dma_addr, &dev->bar->acq);
+ writel(dev->ctrl_config, &dev->bar->cc);
+
+ cap = readq(&dev->bar->cap);
+ timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
+ dev->db_stride = NVME_CAP_STRIDE(cap);
+
+ while (!(readl(&dev->bar->csts) & NVME_CSTS_RDY)) {
+ msleep(100);
+ if (fatal_signal_pending(current))
+ return -EINTR;
+ if (time_after(jiffies, timeout)) {
+ dev_err(&dev->pci_dev->dev,
+ "Device not ready; aborting initialisation\n");
+ return -ENODEV;
+ }
+ }
+
+ result = queue_request_irq(dev, nvmeq, "nvme admin");
+ dev->queues[0] = nvmeq;
+ return result;
+}
+
+static struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
+ unsigned long addr, unsigned length)
+{
+ int i, err, count, nents, offset;
+ struct scatterlist *sg;
+ struct page **pages;
+ struct nvme_iod *iod;
+
+ if (addr & 3)
+ return ERR_PTR(-EINVAL);
+ if (!length)
+ return ERR_PTR(-EINVAL);
+
+ offset = offset_in_page(addr);
+ count = DIV_ROUND_UP(offset + length, PAGE_SIZE);
+ pages = kcalloc(count, sizeof(*pages), GFP_KERNEL);
+
+ err = get_user_pages_fast(addr, count, 1, pages);
+ if (err < count) {
+ count = err;
+ err = -EFAULT;
+ goto put_pages;
+ }
+
+ iod = nvme_alloc_iod(count, length, GFP_KERNEL);
+ sg = iod->sg;
+ sg_init_table(sg, count);
+ for (i = 0; i < count; i++) {
+ sg_set_page(&sg[i], pages[i],
+ min_t(int, length, PAGE_SIZE - offset), offset);
+ length -= (PAGE_SIZE - offset);
+ offset = 0;
+ }
+ sg_mark_end(&sg[i - 1]);
+ iod->nents = count;
+
+ err = -ENOMEM;
+ nents = dma_map_sg(&dev->pci_dev->dev, sg, count,
+ write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ if (!nents)
+ goto free_iod;
+
+ kfree(pages);
+ return iod;
+
+ free_iod:
+ kfree(iod);
+ put_pages:
+ for (i = 0; i < count; i++)
+ put_page(pages[i]);
+ kfree(pages);
+ return ERR_PTR(err);
+}
+
+static void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
+ struct nvme_iod *iod)
+{
+ int i;
+
+ dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
+ write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+ for (i = 0; i < iod->nents; i++)
+ put_page(sg_page(&iod->sg[i]));
+}
+
+static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
+{
+ struct nvme_dev *dev = ns->dev;
+ struct nvme_queue *nvmeq;
+ struct nvme_user_io io;
+ struct nvme_command c;
+ unsigned length;
+ int status;
+ struct nvme_iod *iod;
+
+ if (copy_from_user(&io, uio, sizeof(io)))
+ return -EFAULT;
+ length = (io.nblocks + 1) << ns->lba_shift;
+
+ switch (io.opcode) {
+ case nvme_cmd_write:
+ case nvme_cmd_read:
+ case nvme_cmd_compare:
+ iod = nvme_map_user_pages(dev, io.opcode & 1, io.addr, length);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (IS_ERR(iod))
+ return PTR_ERR(iod);
+
+ memset(&c, 0, sizeof(c));
+ c.rw.opcode = io.opcode;
+ c.rw.flags = io.flags;
+ c.rw.nsid = cpu_to_le32(ns->ns_id);
+ c.rw.slba = cpu_to_le64(io.slba);
+ c.rw.length = cpu_to_le16(io.nblocks);
+ c.rw.control = cpu_to_le16(io.control);
+ c.rw.dsmgmt = cpu_to_le16(io.dsmgmt);
+ c.rw.reftag = io.reftag;
+ c.rw.apptag = io.apptag;
+ c.rw.appmask = io.appmask;
+ /* XXX: metadata */
+ length = nvme_setup_prps(dev, &c.common, iod, length, GFP_KERNEL);
+
+ nvmeq = get_nvmeq(dev);
+ /*
+ * Since nvme_submit_sync_cmd sleeps, we can't keep preemption
+ * disabled. We may be preempted at any point, and be rescheduled
+ * to a different CPU. That will cause cacheline bouncing, but no
+ * additional races since q_lock already protects against other CPUs.
+ */
+ put_nvmeq(nvmeq);
+ if (length != (io.nblocks + 1) << ns->lba_shift)
+ status = -ENOMEM;
+ else
+ status = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
+
+ nvme_unmap_user_pages(dev, io.opcode & 1, iod);
+ nvme_free_iod(dev, iod);
+ return status;
+}
+
+static int nvme_user_admin_cmd(struct nvme_ns *ns,
+ struct nvme_admin_cmd __user *ucmd)
+{
+ struct nvme_dev *dev = ns->dev;
+ struct nvme_admin_cmd cmd;
+ struct nvme_command c;
+ int status, length;
+ struct nvme_iod *iod;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
+ return -EFAULT;
+
+ memset(&c, 0, sizeof(c));
+ c.common.opcode = cmd.opcode;
+ c.common.flags = cmd.flags;
+ c.common.nsid = cpu_to_le32(cmd.nsid);
+ c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
+ c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
+ c.common.cdw10[0] = cpu_to_le32(cmd.cdw10);
+ c.common.cdw10[1] = cpu_to_le32(cmd.cdw11);
+ c.common.cdw10[2] = cpu_to_le32(cmd.cdw12);
+ c.common.cdw10[3] = cpu_to_le32(cmd.cdw13);
+ c.common.cdw10[4] = cpu_to_le32(cmd.cdw14);
+ c.common.cdw10[5] = cpu_to_le32(cmd.cdw15);
+
+ length = cmd.data_len;
+ if (cmd.data_len) {
+ iod = nvme_map_user_pages(dev, cmd.opcode & 1, cmd.addr,
+ length);
+ if (IS_ERR(iod))
+ return PTR_ERR(iod);
+ length = nvme_setup_prps(dev, &c.common, iod, length,
+ GFP_KERNEL);
+ }
+
+ if (length != cmd.data_len)
+ status = -ENOMEM;
+ else
+ status = nvme_submit_admin_cmd(dev, &c, NULL);
+
+ if (cmd.data_len) {
+ nvme_unmap_user_pages(dev, cmd.opcode & 1, iod);
+ nvme_free_iod(dev, iod);
+ }
+ return status;
+}
+
+static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
+ unsigned long arg)
+{
+ struct nvme_ns *ns = bdev->bd_disk->private_data;
+
+ switch (cmd) {
+ case NVME_IOCTL_ID:
+ return ns->ns_id;
+ case NVME_IOCTL_ADMIN_CMD:
+ return nvme_user_admin_cmd(ns, (void __user *)arg);
+ case NVME_IOCTL_SUBMIT_IO:
+ return nvme_submit_io(ns, (void __user *)arg);
+ default:
+ return -ENOTTY;
+ }
+}
+
+static const struct block_device_operations nvme_fops = {
+ .owner = THIS_MODULE,
+ .ioctl = nvme_ioctl,
+ .compat_ioctl = nvme_ioctl,
+};
+
+static void nvme_timeout_ios(struct nvme_queue *nvmeq)
+{
+ int depth = nvmeq->q_depth - 1;
+ struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
+ unsigned long now = jiffies;
+ int cmdid;
+
+ for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) {
+ void *ctx;
+ nvme_completion_fn fn;
+ static struct nvme_completion cqe = { .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1, };
+
+ if (!time_after(now, info[cmdid].timeout))
+ continue;
+ dev_warn(nvmeq->q_dmadev, "Timing out I/O %d\n", cmdid);
+ ctx = cancel_cmdid(nvmeq, cmdid, &fn);
+ fn(nvmeq->dev, ctx, &cqe);
+ }
+}
+
+static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
+{
+ while (bio_list_peek(&nvmeq->sq_cong)) {
+ struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
+ struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data;
+ if (nvme_submit_bio_queue(nvmeq, ns, bio)) {
+ bio_list_add_head(&nvmeq->sq_cong, bio);
+ break;
+ }
+ if (bio_list_empty(&nvmeq->sq_cong))
+ remove_wait_queue(&nvmeq->sq_full,
+ &nvmeq->sq_cong_wait);
+ }
+}
+
+static int nvme_kthread(void *data)
+{
+ struct nvme_dev *dev;
+
+ while (!kthread_should_stop()) {
+ __set_current_state(TASK_RUNNING);
+ spin_lock(&dev_list_lock);
+ list_for_each_entry(dev, &dev_list, node) {
+ int i;
+ for (i = 0; i < dev->queue_count; i++) {
+ struct nvme_queue *nvmeq = dev->queues[i];
+ if (!nvmeq)
+ continue;
+ spin_lock_irq(&nvmeq->q_lock);
+ if (nvme_process_cq(nvmeq))
+ printk("process_cq did something\n");
+ nvme_timeout_ios(nvmeq);
+ nvme_resubmit_bios(nvmeq);
+ spin_unlock_irq(&nvmeq->q_lock);
+ }
+ }
+ spin_unlock(&dev_list_lock);
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(HZ);
+ }
+ return 0;
+}
+
+static DEFINE_IDA(nvme_index_ida);
+
+static int nvme_get_ns_idx(void)
+{
+ int index, error;
+
+ do {
+ if (!ida_pre_get(&nvme_index_ida, GFP_KERNEL))
+ return -1;
+
+ spin_lock(&dev_list_lock);
+ error = ida_get_new(&nvme_index_ida, &index);
+ spin_unlock(&dev_list_lock);
+ } while (error == -EAGAIN);
+
+ if (error)
+ index = -1;
+ return index;
+}
+
+static void nvme_put_ns_idx(int index)
+{
+ spin_lock(&dev_list_lock);
+ ida_remove(&nvme_index_ida, index);
+ spin_unlock(&dev_list_lock);
+}
+
+static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid,
+ struct nvme_id_ns *id, struct nvme_lba_range_type *rt)
+{
+ struct nvme_ns *ns;
+ struct gendisk *disk;
+ int lbaf;
+
+ if (rt->attributes & NVME_LBART_ATTRIB_HIDE)
+ return NULL;
+
+ ns = kzalloc(sizeof(*ns), GFP_KERNEL);
+ if (!ns)
+ return NULL;
+ ns->queue = blk_alloc_queue(GFP_KERNEL);
+ if (!ns->queue)
+ goto out_free_ns;
+ ns->queue->queue_flags = QUEUE_FLAG_DEFAULT;
+ queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue);
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
+/* queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); */
+ blk_queue_make_request(ns->queue, nvme_make_request);
+ ns->dev = dev;
+ ns->queue->queuedata = ns;
+
+ disk = alloc_disk(NVME_MINORS);
+ if (!disk)
+ goto out_free_queue;
+ ns->ns_id = nsid;
+ ns->disk = disk;
+ lbaf = id->flbas & 0xf;
+ ns->lba_shift = id->lbaf[lbaf].ds;
+
+ disk->major = nvme_major;
+ disk->minors = NVME_MINORS;
+ disk->first_minor = NVME_MINORS * nvme_get_ns_idx();
+ disk->fops = &nvme_fops;
+ disk->private_data = ns;
+ disk->queue = ns->queue;
+ disk->driverfs_dev = &dev->pci_dev->dev;
+ sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid);
+ set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
+
+ return ns;
+
+ out_free_queue:
+ blk_cleanup_queue(ns->queue);
+ out_free_ns:
+ kfree(ns);
+ return NULL;
+}
+
+static void nvme_ns_free(struct nvme_ns *ns)
+{
+ int index = ns->disk->first_minor / NVME_MINORS;
+ put_disk(ns->disk);
+ nvme_put_ns_idx(index);
+ blk_cleanup_queue(ns->queue);
+ kfree(ns);
+}
+
+static int set_queue_count(struct nvme_dev *dev, int count)
+{
+ int status;
+ u32 result;
+ u32 q_count = (count - 1) | ((count - 1) << 16);
+
+ status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES, q_count, 0,
+ &result);
+ if (status)
+ return -EIO;
+ return min(result & 0xffff, result >> 16) + 1;
+}
+
+static int __devinit nvme_setup_io_queues(struct nvme_dev *dev)
+{
+ int result, cpu, i, nr_io_queues, db_bar_size;
+
+ nr_io_queues = num_online_cpus();
+ result = set_queue_count(dev, nr_io_queues);
+ if (result < 0)
+ return result;
+ if (result < nr_io_queues)
+ nr_io_queues = result;
+
+ /* Deregister the admin queue's interrupt */
+ free_irq(dev->entry[0].vector, dev->queues[0]);
+
+ db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3));
+ if (db_bar_size > 8192) {
+ iounmap(dev->bar);
+ dev->bar = ioremap(pci_resource_start(dev->pci_dev, 0),
+ db_bar_size);
+ dev->dbs = ((void __iomem *)dev->bar) + 4096;
+ dev->queues[0]->q_db = dev->dbs;
+ }
+
+ for (i = 0; i < nr_io_queues; i++)
+ dev->entry[i].entry = i;
+ for (;;) {
+ result = pci_enable_msix(dev->pci_dev, dev->entry,
+ nr_io_queues);
+ if (result == 0) {
+ break;
+ } else if (result > 0) {
+ nr_io_queues = result;
+ continue;
+ } else {
+ nr_io_queues = 1;
+ break;
+ }
+ }
+
+ result = queue_request_irq(dev, dev->queues[0], "nvme admin");
+ /* XXX: handle failure here */
+
+ cpu = cpumask_first(cpu_online_mask);
+ for (i = 0; i < nr_io_queues; i++) {
+ irq_set_affinity_hint(dev->entry[i].vector, get_cpu_mask(cpu));
+ cpu = cpumask_next(cpu, cpu_online_mask);
+ }
+
+ for (i = 0; i < nr_io_queues; i++) {
+ dev->queues[i + 1] = nvme_create_queue(dev, i + 1,
+ NVME_Q_DEPTH, i);
+ if (IS_ERR(dev->queues[i + 1]))
+ return PTR_ERR(dev->queues[i + 1]);
+ dev->queue_count++;
+ }
+
+ for (; i < num_possible_cpus(); i++) {
+ int target = i % rounddown_pow_of_two(dev->queue_count - 1);
+ dev->queues[i + 1] = dev->queues[target + 1];
+ }
+
+ return 0;
+}
+
+static void nvme_free_queues(struct nvme_dev *dev)
+{
+ int i;
+
+ for (i = dev->queue_count - 1; i >= 0; i--)
+ nvme_free_queue(dev, i);
+}
+
+static int __devinit nvme_dev_add(struct nvme_dev *dev)
+{
+ int res, nn, i;
+ struct nvme_ns *ns, *next;
+ struct nvme_id_ctrl *ctrl;
+ struct nvme_id_ns *id_ns;
+ void *mem;
+ dma_addr_t dma_addr;
+
+ res = nvme_setup_io_queues(dev);
+ if (res)
+ return res;
+
+ mem = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr,
+ GFP_KERNEL);
+
+ res = nvme_identify(dev, 0, 1, dma_addr);
+ if (res) {
+ res = -EIO;
+ goto out_free;
+ }
+
+ ctrl = mem;
+ nn = le32_to_cpup(&ctrl->nn);
+ memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
+ memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
+ memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
+
+ id_ns = mem;
+ for (i = 1; i <= nn; i++) {
+ res = nvme_identify(dev, i, 0, dma_addr);
+ if (res)
+ continue;
+
+ if (id_ns->ncap == 0)
+ continue;
+
+ res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i,
+ dma_addr + 4096);
+ if (res)
+ continue;
+
+ ns = nvme_alloc_ns(dev, i, mem, mem + 4096);
+ if (ns)
+ list_add_tail(&ns->list, &dev->namespaces);
+ }
+ list_for_each_entry(ns, &dev->namespaces, list)
+ add_disk(ns->disk);
+
+ goto out;
+
+ out_free:
+ list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
+ list_del(&ns->list);
+ nvme_ns_free(ns);
+ }
+
+ out:
+ dma_free_coherent(&dev->pci_dev->dev, 8192, mem, dma_addr);
+ return res;
+}
+
+static int nvme_dev_remove(struct nvme_dev *dev)
+{
+ struct nvme_ns *ns, *next;
+
+ spin_lock(&dev_list_lock);
+ list_del(&dev->node);
+ spin_unlock(&dev_list_lock);
+
+ /* TODO: wait all I/O finished or cancel them */
+
+ list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
+ list_del(&ns->list);
+ del_gendisk(ns->disk);
+ nvme_ns_free(ns);
+ }
+
+ nvme_free_queues(dev);
+
+ return 0;
+}
+
+static int nvme_setup_prp_pools(struct nvme_dev *dev)
+{
+ struct device *dmadev = &dev->pci_dev->dev;
+ dev->prp_page_pool = dma_pool_create("prp list page", dmadev,
+ PAGE_SIZE, PAGE_SIZE, 0);
+ if (!dev->prp_page_pool)
+ return -ENOMEM;
+
+ /* Optimisation for I/Os between 4k and 128k */
+ dev->prp_small_pool = dma_pool_create("prp list 256", dmadev,
+ 256, 256, 0);
+ if (!dev->prp_small_pool) {
+ dma_pool_destroy(dev->prp_page_pool);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void nvme_release_prp_pools(struct nvme_dev *dev)
+{
+ dma_pool_destroy(dev->prp_page_pool);
+ dma_pool_destroy(dev->prp_small_pool);
+}
+
+/* XXX: Use an ida or something to let remove / add work correctly */
+static void nvme_set_instance(struct nvme_dev *dev)
+{
+ static int instance;
+ dev->instance = instance++;
+}
+
+static void nvme_release_instance(struct nvme_dev *dev)
+{
+}
+
+static int __devinit nvme_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ int bars, result = -ENOMEM;
+ struct nvme_dev *dev;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+ dev->entry = kcalloc(num_possible_cpus(), sizeof(*dev->entry),
+ GFP_KERNEL);
+ if (!dev->entry)
+ goto free;
+ dev->queues = kcalloc(num_possible_cpus() + 1, sizeof(void *),
+ GFP_KERNEL);
+ if (!dev->queues)
+ goto free;
+
+ if (pci_enable_device_mem(pdev))
+ goto free;
+ pci_set_master(pdev);
+ bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ if (pci_request_selected_regions(pdev, bars, "nvme"))
+ goto disable;
+
+ INIT_LIST_HEAD(&dev->namespaces);
+ dev->pci_dev = pdev;
+ pci_set_drvdata(pdev, dev);
+ dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ nvme_set_instance(dev);
+ dev->entry[0].vector = pdev->irq;
+
+ result = nvme_setup_prp_pools(dev);
+ if (result)
+ goto disable_msix;
+
+ dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
+ if (!dev->bar) {
+ result = -ENOMEM;
+ goto disable_msix;
+ }
+
+ result = nvme_configure_admin_queue(dev);
+ if (result)
+ goto unmap;
+ dev->queue_count++;
+
+ spin_lock(&dev_list_lock);
+ list_add(&dev->node, &dev_list);
+ spin_unlock(&dev_list_lock);
+
+ result = nvme_dev_add(dev);
+ if (result)
+ goto delete;
+
+ return 0;
+
+ delete:
+ spin_lock(&dev_list_lock);
+ list_del(&dev->node);
+ spin_unlock(&dev_list_lock);
+
+ nvme_free_queues(dev);
+ unmap:
+ iounmap(dev->bar);
+ disable_msix:
+ pci_disable_msix(pdev);
+ nvme_release_instance(dev);
+ nvme_release_prp_pools(dev);
+ disable:
+ pci_disable_device(pdev);
+ pci_release_regions(pdev);
+ free:
+ kfree(dev->queues);
+ kfree(dev->entry);
+ kfree(dev);
+ return result;
+}
+
+static void __devexit nvme_remove(struct pci_dev *pdev)
+{
+ struct nvme_dev *dev = pci_get_drvdata(pdev);
+ nvme_dev_remove(dev);
+ pci_disable_msix(pdev);
+ iounmap(dev->bar);
+ nvme_release_instance(dev);
+ nvme_release_prp_pools(dev);
+ pci_disable_device(pdev);
+ pci_release_regions(pdev);
+ kfree(dev->queues);
+ kfree(dev->entry);
+ kfree(dev);
+}
+
+/* These functions are yet to be implemented */
+#define nvme_error_detected NULL
+#define nvme_dump_registers NULL
+#define nvme_link_reset NULL
+#define nvme_slot_reset NULL
+#define nvme_error_resume NULL
+#define nvme_suspend NULL
+#define nvme_resume NULL
+
+static struct pci_error_handlers nvme_err_handler = {
+ .error_detected = nvme_error_detected,
+ .mmio_enabled = nvme_dump_registers,
+ .link_reset = nvme_link_reset,
+ .slot_reset = nvme_slot_reset,
+ .resume = nvme_error_resume,
+};
+
+/* Move to pci_ids.h later */
+#define PCI_CLASS_STORAGE_EXPRESS 0x010802
+
+static DEFINE_PCI_DEVICE_TABLE(nvme_id_table) = {
+ { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, nvme_id_table);
+
+static struct pci_driver nvme_driver = {
+ .name = "nvme",
+ .id_table = nvme_id_table,
+ .probe = nvme_probe,
+ .remove = __devexit_p(nvme_remove),
+ .suspend = nvme_suspend,
+ .resume = nvme_resume,
+ .err_handler = &nvme_err_handler,
+};
+
+static int __init nvme_init(void)
+{
+ int result = -EBUSY;
+
+ nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
+ if (IS_ERR(nvme_thread))
+ return PTR_ERR(nvme_thread);
+
+ nvme_major = register_blkdev(nvme_major, "nvme");
+ if (nvme_major <= 0)
+ goto kill_kthread;
+
+ result = pci_register_driver(&nvme_driver);
+ if (result)
+ goto unregister_blkdev;
+ return 0;
+
+ unregister_blkdev:
+ unregister_blkdev(nvme_major, "nvme");
+ kill_kthread:
+ kthread_stop(nvme_thread);
+ return result;
+}
+
+static void __exit nvme_exit(void)
+{
+ pci_unregister_driver(&nvme_driver);
+ unregister_blkdev(nvme_major, "nvme");
+ kthread_stop(nvme_thread);
+}
+
+MODULE_AUTHOR("Matthew Wilcox ");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("0.8");
+module_init(nvme_init);
+module_exit(nvme_exit);
diff --git a/trunk/drivers/char/tpm/tpm.c b/trunk/drivers/char/tpm/tpm.c
index 6a8771f47a55..32362cf35b8d 100644
--- a/trunk/drivers/char/tpm/tpm.c
+++ b/trunk/drivers/char/tpm/tpm.c
@@ -846,6 +846,15 @@ int tpm_do_selftest(struct tpm_chip *chip)
do {
rc = __tpm_pcr_read(chip, 0, digest);
+ if (rc == TPM_ERR_DISABLED || rc == TPM_ERR_DEACTIVATED) {
+ dev_info(chip->dev,
+ "TPM is disabled/deactivated (0x%X)\n", rc);
+ /* TPM is disabled and/or deactivated; driver can
+ * proceed and TPM does handle commands for
+ * suspend/resume correctly
+ */
+ return 0;
+ }
if (rc != TPM_WARN_DOING_SELFTEST)
return rc;
msleep(delay_msec);
diff --git a/trunk/drivers/char/tpm/tpm.h b/trunk/drivers/char/tpm/tpm.h
index 8c1df302fbb6..010547138281 100644
--- a/trunk/drivers/char/tpm/tpm.h
+++ b/trunk/drivers/char/tpm/tpm.h
@@ -39,6 +39,9 @@ enum tpm_addr {
};
#define TPM_WARN_DOING_SELFTEST 0x802
+#define TPM_ERR_DEACTIVATED 0x6
+#define TPM_ERR_DISABLED 0x7
+
#define TPM_HEADER_SIZE 10
extern ssize_t tpm_show_pubek(struct device *, struct device_attribute *attr,
char *);
diff --git a/trunk/drivers/gpio/Kconfig b/trunk/drivers/gpio/Kconfig
index 37c4bd1cacd5..d0c41188d4e5 100644
--- a/trunk/drivers/gpio/Kconfig
+++ b/trunk/drivers/gpio/Kconfig
@@ -87,6 +87,7 @@ config GPIO_GENERIC_PLATFORM
config GPIO_IT8761E
tristate "IT8761E GPIO support"
+ depends on X86 # unconditional access to IO space.
help
Say yes here to support GPIO functionality of IT8761E super I/O chip.
diff --git a/trunk/drivers/gpio/gpio-ml-ioh.c b/trunk/drivers/gpio/gpio-ml-ioh.c
index 461958fc2264..03d6dd5dcb77 100644
--- a/trunk/drivers/gpio/gpio-ml-ioh.c
+++ b/trunk/drivers/gpio/gpio-ml-ioh.c
@@ -248,7 +248,7 @@ static void ioh_gpio_setup(struct ioh_gpio *chip, int num_port)
static int ioh_irq_type(struct irq_data *d, unsigned int type)
{
u32 im;
- u32 *im_reg;
+ void __iomem *im_reg;
u32 ien;
u32 im_pos;
int ch;
@@ -412,7 +412,7 @@ static int __devinit ioh_gpio_probe(struct pci_dev *pdev,
int i, j;
struct ioh_gpio *chip;
void __iomem *base;
- void __iomem *chip_save;
+ void *chip_save;
int irq_base;
ret = pci_enable_device(pdev);
@@ -428,7 +428,7 @@ static int __devinit ioh_gpio_probe(struct pci_dev *pdev,
}
base = pci_iomap(pdev, 1, 0);
- if (base == 0) {
+ if (!base) {
dev_err(&pdev->dev, "%s : pci_iomap failed", __func__);
ret = -ENOMEM;
goto err_iomap;
@@ -521,7 +521,7 @@ static void __devexit ioh_gpio_remove(struct pci_dev *pdev)
int err;
int i;
struct ioh_gpio *chip = pci_get_drvdata(pdev);
- void __iomem *chip_save;
+ void *chip_save;
chip_save = chip;
diff --git a/trunk/drivers/gpio/gpio-pch.c b/trunk/drivers/gpio/gpio-pch.c
index f0603297f829..68fa55e86eb1 100644
--- a/trunk/drivers/gpio/gpio-pch.c
+++ b/trunk/drivers/gpio/gpio-pch.c
@@ -231,7 +231,7 @@ static void pch_gpio_setup(struct pch_gpio *chip)
static int pch_irq_type(struct irq_data *d, unsigned int type)
{
u32 im;
- u32 *im_reg;
+ u32 __iomem *im_reg;
u32 ien;
u32 im_pos;
int ch;
@@ -376,7 +376,7 @@ static int __devinit pch_gpio_probe(struct pci_dev *pdev,
}
chip->base = pci_iomap(pdev, 1, 0);
- if (chip->base == 0) {
+ if (!chip->base) {
dev_err(&pdev->dev, "%s : pci_iomap FAILED", __func__);
ret = -ENOMEM;
goto err_iomap;
diff --git a/trunk/drivers/gpio/gpio-tps65910.c b/trunk/drivers/gpio/gpio-tps65910.c
index b9c1c297669e..91f45b965d1e 100644
--- a/trunk/drivers/gpio/gpio-tps65910.c
+++ b/trunk/drivers/gpio/gpio-tps65910.c
@@ -52,7 +52,7 @@ static int tps65910_gpio_output(struct gpio_chip *gc, unsigned offset,
struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio);
/* Set the initial value */
- tps65910_gpio_set(gc, 0, value);
+ tps65910_gpio_set(gc, offset, value);
return tps65910_set_bits(tps65910, TPS65910_GPIO0 + offset,
GPIO_CFG_MASK);
diff --git a/trunk/drivers/gpu/drm/gma500/gtt.c b/trunk/drivers/gpu/drm/gma500/gtt.c
index e770bd190a5c..5d5330f667f1 100644
--- a/trunk/drivers/gpu/drm/gma500/gtt.c
+++ b/trunk/drivers/gpu/drm/gma500/gtt.c
@@ -20,6 +20,7 @@
*/
#include
+#include
#include "psb_drv.h"
@@ -203,9 +204,7 @@ static int psb_gtt_attach_pages(struct gtt_range *gt)
gt->npage = pages;
for (i = 0; i < pages; i++) {
- /* FIXME: needs updating as per mail from Hugh Dickins */
- p = read_cache_page_gfp(mapping, i,
- __GFP_COLD | GFP_KERNEL);
+ p = shmem_read_mapping_page(mapping, i);
if (IS_ERR(p))
goto err;
gt->pages[i] = p;
diff --git a/trunk/drivers/i2c/busses/Kconfig b/trunk/drivers/i2c/busses/Kconfig
index cbe7a2fb779f..3101dd59e379 100644
--- a/trunk/drivers/i2c/busses/Kconfig
+++ b/trunk/drivers/i2c/busses/Kconfig
@@ -682,19 +682,19 @@ config I2C_XILINX
will be called xilinx_i2c.
config I2C_EG20T
- tristate "Intel EG20T PCH / OKI SEMICONDUCTOR IOH(ML7213/ML7223)"
+ tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) I2C"
depends on PCI
help
This driver is for PCH(Platform controller Hub) I2C of EG20T which
is an IOH(Input/Output Hub) for x86 embedded processor.
This driver can access PCH I2C bus device.
- This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
- Output Hub), ML7213 and ML7223.
- ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is
- for MP(Media Phone) use.
- ML7213/ML7223 is companion chip for Intel Atom E6xx series.
- ML7213/ML7223 is completely compatible for Intel EG20T PCH.
+ This driver also can be used for LAPIS Semiconductor IOH(Input/
+ Output Hub), ML7213, ML7223 and ML7831.
+ ML7213 IOH is for IVI(In-Vehicle Infotainment) use, ML7223 IOH is
+ for MP(Media Phone) use and ML7831 IOH is for general purpose use.
+ ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series.
+ ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH.
comment "External I2C/SMBus adapter drivers"
diff --git a/trunk/drivers/i2c/busses/i2c-eg20t.c b/trunk/drivers/i2c/busses/i2c-eg20t.c
index 3ef3557b6e32..ca8877641040 100644
--- a/trunk/drivers/i2c/busses/i2c-eg20t.c
+++ b/trunk/drivers/i2c/busses/i2c-eg20t.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD.
+ * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -136,7 +136,8 @@
/*
Set the number of I2C instance max
Intel EG20T PCH : 1ch
-OKI SEMICONDUCTOR ML7213 IOH : 2ch
+LAPIS Semiconductor ML7213 IOH : 2ch
+LAPIS Semiconductor ML7831 IOH : 1ch
*/
#define PCH_I2C_MAX_DEV 2
@@ -180,15 +181,17 @@ static int pch_clk = 50000; /* specifies I2C clock speed in KHz */
static wait_queue_head_t pch_event;
static DEFINE_MUTEX(pch_mutex);
-/* Definition for ML7213 by OKI SEMICONDUCTOR */
+/* Definition for ML7213 by LAPIS Semiconductor */
#define PCI_VENDOR_ID_ROHM 0x10DB
#define PCI_DEVICE_ID_ML7213_I2C 0x802D
#define PCI_DEVICE_ID_ML7223_I2C 0x8010
+#define PCI_DEVICE_ID_ML7831_I2C 0x8817
static DEFINE_PCI_DEVICE_TABLE(pch_pcidev_id) = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_I2C), 1, },
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_I2C), 2, },
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_I2C), 1, },
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_I2C), 1, },
{0,}
};
@@ -243,7 +246,7 @@ static void pch_i2c_init(struct i2c_algo_pch_data *adap)
if (pch_clk > PCH_MAX_CLK)
pch_clk = 62500;
- pch_i2cbc = (pch_clk + (pch_i2c_speed * 4)) / pch_i2c_speed * 8;
+ pch_i2cbc = (pch_clk + (pch_i2c_speed * 4)) / (pch_i2c_speed * 8);
/* Set transfer speed in I2CBC */
iowrite32(pch_i2cbc, p + PCH_I2CBC);
@@ -918,7 +921,9 @@ static int __devinit pch_i2c_probe(struct pci_dev *pdev,
pch_adap->dev.parent = &pdev->dev;
pch_i2c_init(&adap_info->pch_data[i]);
- ret = i2c_add_adapter(pch_adap);
+
+ pch_adap->nr = i;
+ ret = i2c_add_numbered_adapter(pch_adap);
if (ret) {
pch_pci_err(pdev, "i2c_add_adapter[ch:%d] FAILED\n", i);
goto err_add_adapter;
@@ -1058,8 +1063,8 @@ static void __exit pch_pci_exit(void)
}
module_exit(pch_pci_exit);
-MODULE_DESCRIPTION("Intel EG20T PCH/OKI SEMICONDUCTOR ML7213 IOH I2C Driver");
+MODULE_DESCRIPTION("Intel EG20T PCH/LAPIS Semico ML7213/ML7223/ML7831 IOH I2C");
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Tomoya MORINAGA. ");
+MODULE_AUTHOR("Tomoya MORINAGA. ");
module_param(pch_i2c_speed, int, (S_IRUSR | S_IWUSR));
module_param(pch_clk, int, (S_IRUSR | S_IWUSR));
diff --git a/trunk/drivers/i2c/busses/i2c-omap.c b/trunk/drivers/i2c/busses/i2c-omap.c
index fa23faa20f0e..f713eac55047 100644
--- a/trunk/drivers/i2c/busses/i2c-omap.c
+++ b/trunk/drivers/i2c/busses/i2c-omap.c
@@ -37,6 +37,9 @@
#include
#include
#include
+#include
+#include
+#include
#include
#include
#include
@@ -182,7 +185,9 @@ struct omap_i2c_dev {
u32 latency; /* maximum mpu wkup latency */
void (*set_mpu_wkup_lat)(struct device *dev,
long latency);
- u32 speed; /* Speed of bus in Khz */
+ u32 speed; /* Speed of bus in kHz */
+ u32 dtrev; /* extra revision from DT */
+ u32 flags;
u16 cmd_err;
u8 *buf;
u8 *regs;
@@ -235,7 +240,7 @@ static const u8 reg_map_ip_v2[] = {
[OMAP_I2C_BUF_REG] = 0x94,
[OMAP_I2C_CNT_REG] = 0x98,
[OMAP_I2C_DATA_REG] = 0x9c,
- [OMAP_I2C_SYSC_REG] = 0x20,
+ [OMAP_I2C_SYSC_REG] = 0x10,
[OMAP_I2C_CON_REG] = 0xa4,
[OMAP_I2C_OA_REG] = 0xa8,
[OMAP_I2C_SA_REG] = 0xac,
@@ -266,11 +271,7 @@ static inline u16 omap_i2c_read_reg(struct omap_i2c_dev *i2c_dev, int reg)
static void omap_i2c_unidle(struct omap_i2c_dev *dev)
{
- struct omap_i2c_bus_platform_data *pdata;
-
- pdata = dev->dev->platform_data;
-
- if (pdata->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) {
+ if (dev->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) {
omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
omap_i2c_write_reg(dev, OMAP_I2C_PSC_REG, dev->pscstate);
omap_i2c_write_reg(dev, OMAP_I2C_SCLL_REG, dev->scllstate);
@@ -291,13 +292,10 @@ static void omap_i2c_unidle(struct omap_i2c_dev *dev)
static void omap_i2c_idle(struct omap_i2c_dev *dev)
{
- struct omap_i2c_bus_platform_data *pdata;
u16 iv;
- pdata = dev->dev->platform_data;
-
dev->iestate = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG);
- if (pdata->rev == OMAP_I2C_IP_VERSION_2)
+ if (dev->dtrev == OMAP_I2C_IP_VERSION_2)
omap_i2c_write_reg(dev, OMAP_I2C_IP_V2_IRQENABLE_CLR, 1);
else
omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, 0);
@@ -320,9 +318,6 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
unsigned long timeout;
unsigned long internal_clk = 0;
struct clk *fclk;
- struct omap_i2c_bus_platform_data *pdata;
-
- pdata = dev->dev->platform_data;
if (dev->rev >= OMAP_I2C_OMAP1_REV_2) {
/* Disable I2C controller before soft reset */
@@ -373,7 +368,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
}
omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
- if (pdata->flags & OMAP_I2C_FLAG_ALWAYS_ARMXOR_CLK) {
+ if (dev->flags & OMAP_I2C_FLAG_ALWAYS_ARMXOR_CLK) {
/*
* The I2C functional clock is the armxor_ck, so there's
* no need to get "armxor_ck" separately. Now, if OMAP2420
@@ -397,7 +392,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
psc = fclk_rate / 12000000;
}
- if (!(pdata->flags & OMAP_I2C_FLAG_SIMPLE_CLOCK)) {
+ if (!(dev->flags & OMAP_I2C_FLAG_SIMPLE_CLOCK)) {
/*
* HSI2C controller internal clk rate should be 19.2 Mhz for
@@ -406,7 +401,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
* The filter is iclk (fclk for HS) period.
*/
if (dev->speed > 400 ||
- pdata->flags & OMAP_I2C_FLAG_FORCE_19200_INT_CLK)
+ dev->flags & OMAP_I2C_FLAG_FORCE_19200_INT_CLK)
internal_clk = 19200;
else if (dev->speed > 100)
internal_clk = 9600;
@@ -475,7 +470,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
dev->errata = 0;
- if (pdata->flags & OMAP_I2C_FLAG_APPLY_ERRATA_I207)
+ if (dev->flags & OMAP_I2C_FLAG_APPLY_ERRATA_I207)
dev->errata |= I2C_OMAP_ERRATA_I207;
/* Enable interrupts */
@@ -484,7 +479,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
OMAP_I2C_IE_AL) | ((dev->fifo_size) ?
(OMAP_I2C_IE_RDR | OMAP_I2C_IE_XDR) : 0);
omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, dev->iestate);
- if (pdata->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) {
+ if (dev->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) {
dev->pscstate = psc;
dev->scllstate = scll;
dev->sclhstate = sclh;
@@ -804,9 +799,6 @@ omap_i2c_isr(int this_irq, void *dev_id)
u16 bits;
u16 stat, w;
int err, count = 0;
- struct omap_i2c_bus_platform_data *pdata;
-
- pdata = dev->dev->platform_data;
if (pm_runtime_suspended(dev->dev))
return IRQ_NONE;
@@ -830,11 +822,9 @@ omap_i2c_isr(int this_irq, void *dev_id)
~(OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR |
OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR));
- if (stat & OMAP_I2C_STAT_NACK) {
+ if (stat & OMAP_I2C_STAT_NACK)
err |= OMAP_I2C_STAT_NACK;
- omap_i2c_write_reg(dev, OMAP_I2C_CON_REG,
- OMAP_I2C_CON_STP);
- }
+
if (stat & OMAP_I2C_STAT_AL) {
dev_err(dev->dev, "Arbitration lost\n");
err |= OMAP_I2C_STAT_AL;
@@ -875,7 +865,7 @@ omap_i2c_isr(int this_irq, void *dev_id)
* Data reg in 2430, omap3 and
* omap4 is 8 bit wide
*/
- if (pdata->flags &
+ if (dev->flags &
OMAP_I2C_FLAG_16BIT_DATA_REG) {
if (dev->buf_len) {
*dev->buf++ = w >> 8;
@@ -918,7 +908,7 @@ omap_i2c_isr(int this_irq, void *dev_id)
* Data reg in 2430, omap3 and
* omap4 is 8 bit wide
*/
- if (pdata->flags &
+ if (dev->flags &
OMAP_I2C_FLAG_16BIT_DATA_REG) {
if (dev->buf_len) {
w |= *dev->buf++ << 8;
@@ -965,6 +955,32 @@ static const struct i2c_algorithm omap_i2c_algo = {
.functionality = omap_i2c_func,
};
+#ifdef CONFIG_OF
+static struct omap_i2c_bus_platform_data omap3_pdata = {
+ .rev = OMAP_I2C_IP_VERSION_1,
+ .flags = OMAP_I2C_FLAG_APPLY_ERRATA_I207 |
+ OMAP_I2C_FLAG_RESET_REGS_POSTIDLE |
+ OMAP_I2C_FLAG_BUS_SHIFT_2,
+};
+
+static struct omap_i2c_bus_platform_data omap4_pdata = {
+ .rev = OMAP_I2C_IP_VERSION_2,
+};
+
+static const struct of_device_id omap_i2c_of_match[] = {
+ {
+ .compatible = "ti,omap4-i2c",
+ .data = &omap4_pdata,
+ },
+ {
+ .compatible = "ti,omap3-i2c",
+ .data = &omap3_pdata,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, omap_i2c_of_match);
+#endif
+
static int __devinit
omap_i2c_probe(struct platform_device *pdev)
{
@@ -972,9 +988,10 @@ omap_i2c_probe(struct platform_device *pdev)
struct i2c_adapter *adap;
struct resource *mem, *irq, *ioarea;
struct omap_i2c_bus_platform_data *pdata = pdev->dev.platform_data;
+ struct device_node *node = pdev->dev.of_node;
+ const struct of_device_id *match;
irq_handler_t isr;
int r;
- u32 speed = 0;
/* NOTE: driver uses the static register mapping */
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1001,15 +1018,24 @@ omap_i2c_probe(struct platform_device *pdev)
goto err_release_region;
}
- if (pdata != NULL) {
- speed = pdata->clkrate;
+ match = of_match_device(omap_i2c_of_match, &pdev->dev);
+ if (match) {
+ u32 freq = 100000; /* default to 100000 Hz */
+
+ pdata = match->data;
+ dev->dtrev = pdata->rev;
+ dev->flags = pdata->flags;
+
+ of_property_read_u32(node, "clock-frequency", &freq);
+ /* convert DT freq value in Hz into kHz for speed */
+ dev->speed = freq / 1000;
+ } else if (pdata != NULL) {
+ dev->speed = pdata->clkrate;
+ dev->flags = pdata->flags;
dev->set_mpu_wkup_lat = pdata->set_mpu_wkup_lat;
- } else {
- speed = 100; /* Default speed */
- dev->set_mpu_wkup_lat = NULL;
+ dev->dtrev = pdata->rev;
}
- dev->speed = speed;
dev->dev = &pdev->dev;
dev->irq = irq->start;
dev->base = ioremap(mem->start, resource_size(mem));
@@ -1020,9 +1046,9 @@ omap_i2c_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dev);
- dev->reg_shift = (pdata->flags >> OMAP_I2C_FLAG_BUS_SHIFT__SHIFT) & 3;
+ dev->reg_shift = (dev->flags >> OMAP_I2C_FLAG_BUS_SHIFT__SHIFT) & 3;
- if (pdata->rev == OMAP_I2C_IP_VERSION_2)
+ if (dev->dtrev == OMAP_I2C_IP_VERSION_2)
dev->regs = (u8 *)reg_map_ip_v2;
else
dev->regs = (u8 *)reg_map_ip_v1;
@@ -1035,7 +1061,7 @@ omap_i2c_probe(struct platform_device *pdev)
if (dev->rev <= OMAP_I2C_REV_ON_3430)
dev->errata |= I2C_OMAP3_1P153;
- if (!(pdata->flags & OMAP_I2C_FLAG_NO_FIFO)) {
+ if (!(dev->flags & OMAP_I2C_FLAG_NO_FIFO)) {
u16 s;
/* Set up the fifo size - Get total size */
@@ -1058,7 +1084,7 @@ omap_i2c_probe(struct platform_device *pdev)
/* calculate wakeup latency constraint for MPU */
if (dev->set_mpu_wkup_lat != NULL)
dev->latency = (1000000 * dev->fifo_size) /
- (1000 * speed / 8);
+ (1000 * dev->speed / 8);
}
/* reset ASAP, clearing any IRQs */
@@ -1074,7 +1100,7 @@ omap_i2c_probe(struct platform_device *pdev)
}
dev_info(dev->dev, "bus %d rev%d.%d.%d at %d kHz\n", pdev->id,
- pdata->rev, dev->rev >> 4, dev->rev & 0xf, dev->speed);
+ dev->dtrev, dev->rev >> 4, dev->rev & 0xf, dev->speed);
pm_runtime_put(dev->dev);
@@ -1085,6 +1111,7 @@ omap_i2c_probe(struct platform_device *pdev)
strlcpy(adap->name, "OMAP I2C adapter", sizeof(adap->name));
adap->algo = &omap_i2c_algo;
adap->dev.parent = &pdev->dev;
+ adap->dev.of_node = pdev->dev.of_node;
/* i2c device drivers may be active on return from add_adapter() */
adap->nr = pdev->id;
@@ -1094,6 +1121,8 @@ omap_i2c_probe(struct platform_device *pdev)
goto err_free_irq;
}
+ of_i2c_register_devices(adap);
+
return 0;
err_free_irq:
@@ -1166,6 +1195,7 @@ static struct platform_driver omap_i2c_driver = {
.name = "omap_i2c",
.owner = THIS_MODULE,
.pm = OMAP_I2C_PM_OPS,
+ .of_match_table = of_match_ptr(omap_i2c_of_match),
},
};
diff --git a/trunk/drivers/idle/intel_idle.c b/trunk/drivers/idle/intel_idle.c
index 5d2f8e13cf0e..20bce51c2e82 100644
--- a/trunk/drivers/idle/intel_idle.c
+++ b/trunk/drivers/idle/intel_idle.c
@@ -197,7 +197,7 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
.enter = &intel_idle },
};
-static int get_driver_data(int cstate)
+static long get_driver_data(int cstate)
{
int driver_data;
switch (cstate) {
@@ -232,6 +232,7 @@ static int get_driver_data(int cstate)
* @drv: cpuidle driver
* @index: index of cpuidle state
*
+ * Must be called under local_irq_disable().
*/
static int intel_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
@@ -247,8 +248,6 @@ static int intel_idle(struct cpuidle_device *dev,
cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1;
- local_irq_disable();
-
/*
* leave_mm() to avoid costly and often unnecessary wakeups
* for flushing the user TLB's associated with the active mm.
@@ -348,7 +347,8 @@ static int intel_idle_probe(void)
cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates);
if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
- !(ecx & CPUID5_ECX_INTERRUPT_BREAK))
+ !(ecx & CPUID5_ECX_INTERRUPT_BREAK) ||
+ !mwait_substates)
return -ENODEV;
pr_debug(PREFIX "MWAIT substates: 0x%x\n", mwait_substates);
@@ -394,7 +394,7 @@ static int intel_idle_probe(void)
if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */
lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
else {
- smp_call_function(__setup_broadcast_timer, (void *)true, 1);
+ on_each_cpu(__setup_broadcast_timer, (void *)true, 1);
register_cpu_notifier(&setup_broadcast_notifier);
}
@@ -471,71 +471,67 @@ static int intel_idle_cpuidle_driver_init(void)
}
if (auto_demotion_disable_flags)
- smp_call_function(auto_demotion_disable, NULL, 1);
+ on_each_cpu(auto_demotion_disable, NULL, 1);
return 0;
}
/*
- * intel_idle_cpuidle_devices_init()
+ * intel_idle_cpu_init()
* allocate, initialize, register cpuidle_devices
+ * @cpu: cpu/core to initialize
*/
-static int intel_idle_cpuidle_devices_init(void)
+int intel_idle_cpu_init(int cpu)
{
- int i, cstate;
+ int cstate;
struct cpuidle_device *dev;
- intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
- if (intel_idle_cpuidle_devices == NULL)
- return -ENOMEM;
-
- for_each_online_cpu(i) {
- dev = per_cpu_ptr(intel_idle_cpuidle_devices, i);
+ dev = per_cpu_ptr(intel_idle_cpuidle_devices, cpu);
- dev->state_count = 1;
+ dev->state_count = 1;
- for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) {
- int num_substates;
+ for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) {
+ int num_substates;
- if (cstate > max_cstate) {
- printk(PREFIX "max_cstate %d reached\n",
- max_cstate);
- break;
- }
+ if (cstate > max_cstate) {
+ printk(PREFIX "max_cstate %d reached\n",
+ max_cstate);
+ break;
+ }
- /* does the state exist in CPUID.MWAIT? */
- num_substates = (mwait_substates >> ((cstate) * 4))
- & MWAIT_SUBSTATE_MASK;
- if (num_substates == 0)
- continue;
- /* is the state not enabled? */
- if (cpuidle_state_table[cstate].enter == NULL) {
- continue;
- }
+ /* does the state exist in CPUID.MWAIT? */
+ num_substates = (mwait_substates >> ((cstate) * 4))
+ & MWAIT_SUBSTATE_MASK;
+ if (num_substates == 0)
+ continue;
+ /* is the state not enabled? */
+ if (cpuidle_state_table[cstate].enter == NULL)
+ continue;
- dev->states_usage[dev->state_count].driver_data =
- (void *)get_driver_data(cstate);
+ dev->states_usage[dev->state_count].driver_data =
+ (void *)get_driver_data(cstate);
dev->state_count += 1;
}
+ dev->cpu = cpu;
- dev->cpu = i;
- if (cpuidle_register_device(dev)) {
- pr_debug(PREFIX "cpuidle_register_device %d failed!\n",
- i);
- intel_idle_cpuidle_devices_uninit();
- return -EIO;
- }
+ if (cpuidle_register_device(dev)) {
+ pr_debug(PREFIX "cpuidle_register_device %d failed!\n", cpu);
+ intel_idle_cpuidle_devices_uninit();
+ return -EIO;
}
+ if (auto_demotion_disable_flags)
+ smp_call_function_single(cpu, auto_demotion_disable, NULL, 1);
+
return 0;
}
static int __init intel_idle_init(void)
{
- int retval;
+ int retval, i;
/* Do not load intel_idle at all for now if idle= is passed */
if (boot_option_idle_override != IDLE_NO_OVERRIDE)
@@ -553,10 +549,16 @@ static int __init intel_idle_init(void)
return retval;
}
- retval = intel_idle_cpuidle_devices_init();
- if (retval) {
- cpuidle_unregister_driver(&intel_idle_driver);
- return retval;
+ intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
+ if (intel_idle_cpuidle_devices == NULL)
+ return -ENOMEM;
+
+ for_each_online_cpu(i) {
+ retval = intel_idle_cpu_init(i);
+ if (retval) {
+ cpuidle_unregister_driver(&intel_idle_driver);
+ return retval;
+ }
}
return 0;
@@ -568,7 +570,7 @@ static void __exit intel_idle_exit(void)
cpuidle_unregister_driver(&intel_idle_driver);
if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) {
- smp_call_function(__setup_broadcast_timer, (void *)false, 1);
+ on_each_cpu(__setup_broadcast_timer, (void *)false, 1);
unregister_cpu_notifier(&setup_broadcast_notifier);
}
diff --git a/trunk/drivers/infiniband/Kconfig b/trunk/drivers/infiniband/Kconfig
index 0f9a84c1046a..eb0add311dc8 100644
--- a/trunk/drivers/infiniband/Kconfig
+++ b/trunk/drivers/infiniband/Kconfig
@@ -55,6 +55,7 @@ source "drivers/infiniband/hw/nes/Kconfig"
source "drivers/infiniband/ulp/ipoib/Kconfig"
source "drivers/infiniband/ulp/srp/Kconfig"
+source "drivers/infiniband/ulp/srpt/Kconfig"
source "drivers/infiniband/ulp/iser/Kconfig"
diff --git a/trunk/drivers/infiniband/Makefile b/trunk/drivers/infiniband/Makefile
index 9cc7a47d3e67..a3b2d8eac86e 100644
--- a/trunk/drivers/infiniband/Makefile
+++ b/trunk/drivers/infiniband/Makefile
@@ -10,4 +10,5 @@ obj-$(CONFIG_MLX4_INFINIBAND) += hw/mlx4/
obj-$(CONFIG_INFINIBAND_NES) += hw/nes/
obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/
obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/
+obj-$(CONFIG_INFINIBAND_SRPT) += ulp/srpt/
obj-$(CONFIG_INFINIBAND_ISER) += ulp/iser/
diff --git a/trunk/drivers/infiniband/ulp/srpt/Kconfig b/trunk/drivers/infiniband/ulp/srpt/Kconfig
new file mode 100644
index 000000000000..31ee83d528d9
--- /dev/null
+++ b/trunk/drivers/infiniband/ulp/srpt/Kconfig
@@ -0,0 +1,12 @@
+config INFINIBAND_SRPT
+ tristate "InfiniBand SCSI RDMA Protocol target support"
+ depends on INFINIBAND && TARGET_CORE
+ ---help---
+
+ Support for the SCSI RDMA Protocol (SRP) Target driver. The
+ SRP protocol is a protocol that allows an initiator to access
+ a block storage device on another host (target) over a network
+ that supports the RDMA protocol. Currently the RDMA protocol is
+ supported by InfiniBand and by iWarp network hardware. More
+ information about the SRP protocol can be found on the website
+ of the INCITS T10 technical committee (http://www.t10.org/).
diff --git a/trunk/drivers/infiniband/ulp/srpt/Makefile b/trunk/drivers/infiniband/ulp/srpt/Makefile
new file mode 100644
index 000000000000..e3ee4bdfffa5
--- /dev/null
+++ b/trunk/drivers/infiniband/ulp/srpt/Makefile
@@ -0,0 +1,2 @@
+ccflags-y := -Idrivers/target
+obj-$(CONFIG_INFINIBAND_SRPT) += ib_srpt.o
diff --git a/trunk/drivers/infiniband/ulp/srpt/ib_dm_mad.h b/trunk/drivers/infiniband/ulp/srpt/ib_dm_mad.h
new file mode 100644
index 000000000000..fb1de1f6f297
--- /dev/null
+++ b/trunk/drivers/infiniband/ulp/srpt/ib_dm_mad.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef IB_DM_MAD_H
+#define IB_DM_MAD_H
+
+#include
+
+#include
+
+enum {
+ /*
+ * See also section 13.4.7 Status Field, table 115 MAD Common Status
+ * Field Bit Values and also section 16.3.1.1 Status Field in the
+ * InfiniBand Architecture Specification.
+ */
+ DM_MAD_STATUS_UNSUP_METHOD = 0x0008,
+ DM_MAD_STATUS_UNSUP_METHOD_ATTR = 0x000c,
+ DM_MAD_STATUS_INVALID_FIELD = 0x001c,
+ DM_MAD_STATUS_NO_IOC = 0x0100,
+
+ /*
+ * See also the Device Management chapter, section 16.3.3 Attributes,
+ * table 279 Device Management Attributes in the InfiniBand
+ * Architecture Specification.
+ */
+ DM_ATTR_CLASS_PORT_INFO = 0x01,
+ DM_ATTR_IOU_INFO = 0x10,
+ DM_ATTR_IOC_PROFILE = 0x11,
+ DM_ATTR_SVC_ENTRIES = 0x12
+};
+
+struct ib_dm_hdr {
+ u8 reserved[28];
+};
+
+/*
+ * Structure of management datagram sent by the SRP target implementation.
+ * Contains a management datagram header, reliable multi-packet transaction
+ * protocol (RMPP) header and ib_dm_hdr. Notes:
+ * - The SRP target implementation does not use RMPP or ib_dm_hdr when sending
+ * management datagrams.
+ * - The header size must be exactly 64 bytes (IB_MGMT_DEVICE_HDR), since this
+ * is the header size that is passed to ib_create_send_mad() in ib_srpt.c.
+ * - The maximum supported size for a management datagram when not using RMPP
+ * is 256 bytes -- 64 bytes header and 192 (IB_MGMT_DEVICE_DATA) bytes data.
+ */
+struct ib_dm_mad {
+ struct ib_mad_hdr mad_hdr;
+ struct ib_rmpp_hdr rmpp_hdr;
+ struct ib_dm_hdr dm_hdr;
+ u8 data[IB_MGMT_DEVICE_DATA];
+};
+
+/*
+ * IOUnitInfo as defined in section 16.3.3.3 IOUnitInfo of the InfiniBand
+ * Architecture Specification.
+ */
+struct ib_dm_iou_info {
+ __be16 change_id;
+ u8 max_controllers;
+ u8 op_rom;
+ u8 controller_list[128];
+};
+
+/*
+ * IOControllerprofile as defined in section 16.3.3.4 IOControllerProfile of
+ * the InfiniBand Architecture Specification.
+ */
+struct ib_dm_ioc_profile {
+ __be64 guid;
+ __be32 vendor_id;
+ __be32 device_id;
+ __be16 device_version;
+ __be16 reserved1;
+ __be32 subsys_vendor_id;
+ __be32 subsys_device_id;
+ __be16 io_class;
+ __be16 io_subclass;
+ __be16 protocol;
+ __be16 protocol_version;
+ __be16 service_conn;
+ __be16 initiators_supported;
+ __be16 send_queue_depth;
+ u8 reserved2;
+ u8 rdma_read_depth;
+ __be32 send_size;
+ __be32 rdma_size;
+ u8 op_cap_mask;
+ u8 svc_cap_mask;
+ u8 num_svc_entries;
+ u8 reserved3[9];
+ u8 id_string[64];
+};
+
+struct ib_dm_svc_entry {
+ u8 name[40];
+ __be64 id;
+};
+
+/*
+ * See also section 16.3.3.5 ServiceEntries in the InfiniBand Architecture
+ * Specification. See also section B.7, table B.8 in the T10 SRP r16a document.
+ */
+struct ib_dm_svc_entries {
+ struct ib_dm_svc_entry service_entries[4];
+};
+
+#endif
diff --git a/trunk/drivers/infiniband/ulp/srpt/ib_srpt.c b/trunk/drivers/infiniband/ulp/srpt/ib_srpt.c
new file mode 100644
index 000000000000..cd5d05e22a77
--- /dev/null
+++ b/trunk/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -0,0 +1,4073 @@
+/*
+ * Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved.
+ * Copyright (C) 2008 - 2011 Bart Van Assche .
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include "ib_srpt.h"
+
+/* Name of this kernel module. */
+#define DRV_NAME "ib_srpt"
+#define DRV_VERSION "2.0.0"
+#define DRV_RELDATE "2011-02-14"
+
+#define SRPT_ID_STRING "Linux SRP target"
+
+#undef pr_fmt
+#define pr_fmt(fmt) DRV_NAME " " fmt
+
+MODULE_AUTHOR("Vu Pham and Bart Van Assche");
+MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol target "
+ "v" DRV_VERSION " (" DRV_RELDATE ")");
+MODULE_LICENSE("Dual BSD/GPL");
+
+/*
+ * Global Variables
+ */
+
+static u64 srpt_service_guid;
+static spinlock_t srpt_dev_lock; /* Protects srpt_dev_list. */
+static struct list_head srpt_dev_list; /* List of srpt_device structures. */
+
+static unsigned srp_max_req_size = DEFAULT_MAX_REQ_SIZE;
+module_param(srp_max_req_size, int, 0444);
+MODULE_PARM_DESC(srp_max_req_size,
+ "Maximum size of SRP request messages in bytes.");
+
+static int srpt_srq_size = DEFAULT_SRPT_SRQ_SIZE;
+module_param(srpt_srq_size, int, 0444);
+MODULE_PARM_DESC(srpt_srq_size,
+ "Shared receive queue (SRQ) size.");
+
+static int srpt_get_u64_x(char *buffer, struct kernel_param *kp)
+{
+ return sprintf(buffer, "0x%016llx", *(u64 *)kp->arg);
+}
+module_param_call(srpt_service_guid, NULL, srpt_get_u64_x, &srpt_service_guid,
+ 0444);
+MODULE_PARM_DESC(srpt_service_guid,
+ "Using this value for ioc_guid, id_ext, and cm_listen_id"
+ " instead of using the node_guid of the first HCA.");
+
+static struct ib_client srpt_client;
+static struct target_fabric_configfs *srpt_target;
+static void srpt_release_channel(struct srpt_rdma_ch *ch);
+static int srpt_queue_status(struct se_cmd *cmd);
+
+/**
+ * opposite_dma_dir() - Swap DMA_TO_DEVICE and DMA_FROM_DEVICE.
+ */
+static inline
+enum dma_data_direction opposite_dma_dir(enum dma_data_direction dir)
+{
+ switch (dir) {
+ case DMA_TO_DEVICE: return DMA_FROM_DEVICE;
+ case DMA_FROM_DEVICE: return DMA_TO_DEVICE;
+ default: return dir;
+ }
+}
+
+/**
+ * srpt_sdev_name() - Return the name associated with the HCA.
+ *
+ * Examples are ib0, ib1, ...
+ */
+static inline const char *srpt_sdev_name(struct srpt_device *sdev)
+{
+ return sdev->device->name;
+}
+
+static enum rdma_ch_state srpt_get_ch_state(struct srpt_rdma_ch *ch)
+{
+ unsigned long flags;
+ enum rdma_ch_state state;
+
+ spin_lock_irqsave(&ch->spinlock, flags);
+ state = ch->state;
+ spin_unlock_irqrestore(&ch->spinlock, flags);
+ return state;
+}
+
+static enum rdma_ch_state
+srpt_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state new_state)
+{
+ unsigned long flags;
+ enum rdma_ch_state prev;
+
+ spin_lock_irqsave(&ch->spinlock, flags);
+ prev = ch->state;
+ ch->state = new_state;
+ spin_unlock_irqrestore(&ch->spinlock, flags);
+ return prev;
+}
+
+/**
+ * srpt_test_and_set_ch_state() - Test and set the channel state.
+ *
+ * Returns true if and only if the channel state has been set to the new state.
+ */
+static bool
+srpt_test_and_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state old,
+ enum rdma_ch_state new)
+{
+ unsigned long flags;
+ enum rdma_ch_state prev;
+
+ spin_lock_irqsave(&ch->spinlock, flags);
+ prev = ch->state;
+ if (prev == old)
+ ch->state = new;
+ spin_unlock_irqrestore(&ch->spinlock, flags);
+ return prev == old;
+}
+
+/**
+ * srpt_event_handler() - Asynchronous IB event callback function.
+ *
+ * Callback function called by the InfiniBand core when an asynchronous IB
+ * event occurs. This callback may occur in interrupt context. See also
+ * section 11.5.2, Set Asynchronous Event Handler in the InfiniBand
+ * Architecture Specification.
+ */
+static void srpt_event_handler(struct ib_event_handler *handler,
+ struct ib_event *event)
+{
+ struct srpt_device *sdev;
+ struct srpt_port *sport;
+
+ sdev = ib_get_client_data(event->device, &srpt_client);
+ if (!sdev || sdev->device != event->device)
+ return;
+
+ pr_debug("ASYNC event= %d on device= %s\n", event->event,
+ srpt_sdev_name(sdev));
+
+ switch (event->event) {
+ case IB_EVENT_PORT_ERR:
+ if (event->element.port_num <= sdev->device->phys_port_cnt) {
+ sport = &sdev->port[event->element.port_num - 1];
+ sport->lid = 0;
+ sport->sm_lid = 0;
+ }
+ break;
+ case IB_EVENT_PORT_ACTIVE:
+ case IB_EVENT_LID_CHANGE:
+ case IB_EVENT_PKEY_CHANGE:
+ case IB_EVENT_SM_CHANGE:
+ case IB_EVENT_CLIENT_REREGISTER:
+ /* Refresh port data asynchronously. */
+ if (event->element.port_num <= sdev->device->phys_port_cnt) {
+ sport = &sdev->port[event->element.port_num - 1];
+ if (!sport->lid && !sport->sm_lid)
+ schedule_work(&sport->work);
+ }
+ break;
+ default:
+ printk(KERN_ERR "received unrecognized IB event %d\n",
+ event->event);
+ break;
+ }
+}
+
+/**
+ * srpt_srq_event() - SRQ event callback function.
+ */
+static void srpt_srq_event(struct ib_event *event, void *ctx)
+{
+ printk(KERN_INFO "SRQ event %d\n", event->event);
+}
+
+/**
+ * srpt_qp_event() - QP event callback function.
+ */
+static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
+{
+ pr_debug("QP event %d on cm_id=%p sess_name=%s state=%d\n",
+ event->event, ch->cm_id, ch->sess_name, srpt_get_ch_state(ch));
+
+ switch (event->event) {
+ case IB_EVENT_COMM_EST:
+ ib_cm_notify(ch->cm_id, event->event);
+ break;
+ case IB_EVENT_QP_LAST_WQE_REACHED:
+ if (srpt_test_and_set_ch_state(ch, CH_DRAINING,
+ CH_RELEASING))
+ srpt_release_channel(ch);
+ else
+ pr_debug("%s: state %d - ignored LAST_WQE.\n",
+ ch->sess_name, srpt_get_ch_state(ch));
+ break;
+ default:
+ printk(KERN_ERR "received unrecognized IB QP event %d\n",
+ event->event);
+ break;
+ }
+}
+
+/**
+ * srpt_set_ioc() - Helper function for initializing an IOUnitInfo structure.
+ *
+ * @slot: one-based slot number.
+ * @value: four-bit value.
+ *
+ * Copies the lowest four bits of value in element slot of the array of four
+ * bit elements called c_list (controller list). The index slot is one-based.
+ */
+static void srpt_set_ioc(u8 *c_list, u32 slot, u8 value)
+{
+ u16 id;
+ u8 tmp;
+
+ id = (slot - 1) / 2;
+ if (slot & 0x1) {
+ tmp = c_list[id] & 0xf;
+ c_list[id] = (value << 4) | tmp;
+ } else {
+ tmp = c_list[id] & 0xf0;
+ c_list[id] = (value & 0xf) | tmp;
+ }
+}
+
+/**
+ * srpt_get_class_port_info() - Copy ClassPortInfo to a management datagram.
+ *
+ * See also section 16.3.3.1 ClassPortInfo in the InfiniBand Architecture
+ * Specification.
+ */
+static void srpt_get_class_port_info(struct ib_dm_mad *mad)
+{
+ struct ib_class_port_info *cif;
+
+ cif = (struct ib_class_port_info *)mad->data;
+ memset(cif, 0, sizeof *cif);
+ cif->base_version = 1;
+ cif->class_version = 1;
+ cif->resp_time_value = 20;
+
+ mad->mad_hdr.status = 0;
+}
+
+/**
+ * srpt_get_iou() - Write IOUnitInfo to a management datagram.
+ *
+ * See also section 16.3.3.3 IOUnitInfo in the InfiniBand Architecture
+ * Specification. See also section B.7, table B.6 in the SRP r16a document.
+ */
+static void srpt_get_iou(struct ib_dm_mad *mad)
+{
+ struct ib_dm_iou_info *ioui;
+ u8 slot;
+ int i;
+
+ ioui = (struct ib_dm_iou_info *)mad->data;
+ ioui->change_id = __constant_cpu_to_be16(1);
+ ioui->max_controllers = 16;
+
+ /* set present for slot 1 and empty for the rest */
+ srpt_set_ioc(ioui->controller_list, 1, 1);
+ for (i = 1, slot = 2; i < 16; i++, slot++)
+ srpt_set_ioc(ioui->controller_list, slot, 0);
+
+ mad->mad_hdr.status = 0;
+}
+
+/**
+ * srpt_get_ioc() - Write IOControllerprofile to a management datagram.
+ *
+ * See also section 16.3.3.4 IOControllerProfile in the InfiniBand
+ * Architecture Specification. See also section B.7, table B.7 in the SRP
+ * r16a document.
+ */
+static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
+ struct ib_dm_mad *mad)
+{
+ struct srpt_device *sdev = sport->sdev;
+ struct ib_dm_ioc_profile *iocp;
+
+ iocp = (struct ib_dm_ioc_profile *)mad->data;
+
+ if (!slot || slot > 16) {
+ mad->mad_hdr.status
+ = __constant_cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
+ return;
+ }
+
+ if (slot > 2) {
+ mad->mad_hdr.status
+ = __constant_cpu_to_be16(DM_MAD_STATUS_NO_IOC);
+ return;
+ }
+
+ memset(iocp, 0, sizeof *iocp);
+ strcpy(iocp->id_string, SRPT_ID_STRING);
+ iocp->guid = cpu_to_be64(srpt_service_guid);
+ iocp->vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id);
+ iocp->device_id = cpu_to_be32(sdev->dev_attr.vendor_part_id);
+ iocp->device_version = cpu_to_be16(sdev->dev_attr.hw_ver);
+ iocp->subsys_vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id);
+ iocp->subsys_device_id = 0x0;
+ iocp->io_class = __constant_cpu_to_be16(SRP_REV16A_IB_IO_CLASS);
+ iocp->io_subclass = __constant_cpu_to_be16(SRP_IO_SUBCLASS);
+ iocp->protocol = __constant_cpu_to_be16(SRP_PROTOCOL);
+ iocp->protocol_version = __constant_cpu_to_be16(SRP_PROTOCOL_VERSION);
+ iocp->send_queue_depth = cpu_to_be16(sdev->srq_size);
+ iocp->rdma_read_depth = 4;
+ iocp->send_size = cpu_to_be32(srp_max_req_size);
+ iocp->rdma_size = cpu_to_be32(min(sport->port_attrib.srp_max_rdma_size,
+ 1U << 24));
+ iocp->num_svc_entries = 1;
+ iocp->op_cap_mask = SRP_SEND_TO_IOC | SRP_SEND_FROM_IOC |
+ SRP_RDMA_READ_FROM_IOC | SRP_RDMA_WRITE_FROM_IOC;
+
+ mad->mad_hdr.status = 0;
+}
+
+/**
+ * srpt_get_svc_entries() - Write ServiceEntries to a management datagram.
+ *
+ * See also section 16.3.3.5 ServiceEntries in the InfiniBand Architecture
+ * Specification. See also section B.7, table B.8 in the SRP r16a document.
+ */
+static void srpt_get_svc_entries(u64 ioc_guid,
+ u16 slot, u8 hi, u8 lo, struct ib_dm_mad *mad)
+{
+ struct ib_dm_svc_entries *svc_entries;
+
+ WARN_ON(!ioc_guid);
+
+ if (!slot || slot > 16) {
+ mad->mad_hdr.status
+ = __constant_cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
+ return;
+ }
+
+ if (slot > 2 || lo > hi || hi > 1) {
+ mad->mad_hdr.status
+ = __constant_cpu_to_be16(DM_MAD_STATUS_NO_IOC);
+ return;
+ }
+
+ svc_entries = (struct ib_dm_svc_entries *)mad->data;
+ memset(svc_entries, 0, sizeof *svc_entries);
+ svc_entries->service_entries[0].id = cpu_to_be64(ioc_guid);
+ snprintf(svc_entries->service_entries[0].name,
+ sizeof(svc_entries->service_entries[0].name),
+ "%s%016llx",
+ SRP_SERVICE_NAME_PREFIX,
+ ioc_guid);
+
+ mad->mad_hdr.status = 0;
+}
+
+/**
+ * srpt_mgmt_method_get() - Process a received management datagram.
+ * @sp: source port through which the MAD has been received.
+ * @rq_mad: received MAD.
+ * @rsp_mad: response MAD.
+ */
+static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad,
+ struct ib_dm_mad *rsp_mad)
+{
+ u16 attr_id;
+ u32 slot;
+ u8 hi, lo;
+
+ attr_id = be16_to_cpu(rq_mad->mad_hdr.attr_id);
+ switch (attr_id) {
+ case DM_ATTR_CLASS_PORT_INFO:
+ srpt_get_class_port_info(rsp_mad);
+ break;
+ case DM_ATTR_IOU_INFO:
+ srpt_get_iou(rsp_mad);
+ break;
+ case DM_ATTR_IOC_PROFILE:
+ slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
+ srpt_get_ioc(sp, slot, rsp_mad);
+ break;
+ case DM_ATTR_SVC_ENTRIES:
+ slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
+ hi = (u8) ((slot >> 8) & 0xff);
+ lo = (u8) (slot & 0xff);
+ slot = (u16) ((slot >> 16) & 0xffff);
+ srpt_get_svc_entries(srpt_service_guid,
+ slot, hi, lo, rsp_mad);
+ break;
+ default:
+ rsp_mad->mad_hdr.status =
+ __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
+ break;
+ }
+}
+
+/**
+ * srpt_mad_send_handler() - Post MAD-send callback function.
+ */
+static void srpt_mad_send_handler(struct ib_mad_agent *mad_agent,
+ struct ib_mad_send_wc *mad_wc)
+{
+ ib_destroy_ah(mad_wc->send_buf->ah);
+ ib_free_send_mad(mad_wc->send_buf);
+}
+
+/**
+ * srpt_mad_recv_handler() - MAD reception callback function.
+ */
+static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent,
+ struct ib_mad_recv_wc *mad_wc)
+{
+ struct srpt_port *sport = (struct srpt_port *)mad_agent->context;
+ struct ib_ah *ah;
+ struct ib_mad_send_buf *rsp;
+ struct ib_dm_mad *dm_mad;
+
+ if (!mad_wc || !mad_wc->recv_buf.mad)
+ return;
+
+ ah = ib_create_ah_from_wc(mad_agent->qp->pd, mad_wc->wc,
+ mad_wc->recv_buf.grh, mad_agent->port_num);
+ if (IS_ERR(ah))
+ goto err;
+
+ BUILD_BUG_ON(offsetof(struct ib_dm_mad, data) != IB_MGMT_DEVICE_HDR);
+
+ rsp = ib_create_send_mad(mad_agent, mad_wc->wc->src_qp,
+ mad_wc->wc->pkey_index, 0,
+ IB_MGMT_DEVICE_HDR, IB_MGMT_DEVICE_DATA,
+ GFP_KERNEL);
+ if (IS_ERR(rsp))
+ goto err_rsp;
+
+ rsp->ah = ah;
+
+ dm_mad = rsp->mad;
+ memcpy(dm_mad, mad_wc->recv_buf.mad, sizeof *dm_mad);
+ dm_mad->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
+ dm_mad->mad_hdr.status = 0;
+
+ switch (mad_wc->recv_buf.mad->mad_hdr.method) {
+ case IB_MGMT_METHOD_GET:
+ srpt_mgmt_method_get(sport, mad_wc->recv_buf.mad, dm_mad);
+ break;
+ case IB_MGMT_METHOD_SET:
+ dm_mad->mad_hdr.status =
+ __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
+ break;
+ default:
+ dm_mad->mad_hdr.status =
+ __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD);
+ break;
+ }
+
+ if (!ib_post_send_mad(rsp, NULL)) {
+ ib_free_recv_mad(mad_wc);
+ /* will destroy_ah & free_send_mad in send completion */
+ return;
+ }
+
+ ib_free_send_mad(rsp);
+
+err_rsp:
+ ib_destroy_ah(ah);
+err:
+ ib_free_recv_mad(mad_wc);
+}
+
+/**
+ * srpt_refresh_port() - Configure a HCA port.
+ *
+ * Enable InfiniBand management datagram processing, update the cached sm_lid,
+ * lid and gid values, and register a callback function for processing MADs
+ * on the specified port.
+ *
+ * Note: It is safe to call this function more than once for the same port.
+ */
+static int srpt_refresh_port(struct srpt_port *sport)
+{
+ struct ib_mad_reg_req reg_req;
+ struct ib_port_modify port_modify;
+ struct ib_port_attr port_attr;
+ int ret;
+
+ memset(&port_modify, 0, sizeof port_modify);
+ port_modify.set_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
+ port_modify.clr_port_cap_mask = 0;
+
+ ret = ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
+ if (ret)
+ goto err_mod_port;
+
+ ret = ib_query_port(sport->sdev->device, sport->port, &port_attr);
+ if (ret)
+ goto err_query_port;
+
+ sport->sm_lid = port_attr.sm_lid;
+ sport->lid = port_attr.lid;
+
+ ret = ib_query_gid(sport->sdev->device, sport->port, 0, &sport->gid);
+ if (ret)
+ goto err_query_port;
+
+ if (!sport->mad_agent) {
+ memset(®_req, 0, sizeof reg_req);
+ reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT;
+ reg_req.mgmt_class_version = IB_MGMT_BASE_VERSION;
+ set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask);
+ set_bit(IB_MGMT_METHOD_SET, reg_req.method_mask);
+
+ sport->mad_agent = ib_register_mad_agent(sport->sdev->device,
+ sport->port,
+ IB_QPT_GSI,
+ ®_req, 0,
+ srpt_mad_send_handler,
+ srpt_mad_recv_handler,
+ sport);
+ if (IS_ERR(sport->mad_agent)) {
+ ret = PTR_ERR(sport->mad_agent);
+ sport->mad_agent = NULL;
+ goto err_query_port;
+ }
+ }
+
+ return 0;
+
+err_query_port:
+
+ port_modify.set_port_cap_mask = 0;
+ port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
+ ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
+
+err_mod_port:
+
+ return ret;
+}
+
+/**
+ * srpt_unregister_mad_agent() - Unregister MAD callback functions.
+ *
+ * Note: It is safe to call this function more than once for the same device.
+ */
+static void srpt_unregister_mad_agent(struct srpt_device *sdev)
+{
+ struct ib_port_modify port_modify = {
+ .clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP,
+ };
+ struct srpt_port *sport;
+ int i;
+
+ for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
+ sport = &sdev->port[i - 1];
+ WARN_ON(sport->port != i);
+ if (ib_modify_port(sdev->device, i, 0, &port_modify) < 0)
+ printk(KERN_ERR "disabling MAD processing failed.\n");
+ if (sport->mad_agent) {
+ ib_unregister_mad_agent(sport->mad_agent);
+ sport->mad_agent = NULL;
+ }
+ }
+}
+
+/**
+ * srpt_alloc_ioctx() - Allocate an SRPT I/O context structure.
+ */
+static struct srpt_ioctx *srpt_alloc_ioctx(struct srpt_device *sdev,
+ int ioctx_size, int dma_size,
+ enum dma_data_direction dir)
+{
+ struct srpt_ioctx *ioctx;
+
+ ioctx = kmalloc(ioctx_size, GFP_KERNEL);
+ if (!ioctx)
+ goto err;
+
+ ioctx->buf = kmalloc(dma_size, GFP_KERNEL);
+ if (!ioctx->buf)
+ goto err_free_ioctx;
+
+ ioctx->dma = ib_dma_map_single(sdev->device, ioctx->buf, dma_size, dir);
+ if (ib_dma_mapping_error(sdev->device, ioctx->dma))
+ goto err_free_buf;
+
+ return ioctx;
+
+err_free_buf:
+ kfree(ioctx->buf);
+err_free_ioctx:
+ kfree(ioctx);
+err:
+ return NULL;
+}
+
+/**
+ * srpt_free_ioctx() - Free an SRPT I/O context structure.
+ */
+static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx,
+ int dma_size, enum dma_data_direction dir)
+{
+ if (!ioctx)
+ return;
+
+ ib_dma_unmap_single(sdev->device, ioctx->dma, dma_size, dir);
+ kfree(ioctx->buf);
+ kfree(ioctx);
+}
+
+/**
+ * srpt_alloc_ioctx_ring() - Allocate a ring of SRPT I/O context structures.
+ * @sdev: Device to allocate the I/O context ring for.
+ * @ring_size: Number of elements in the I/O context ring.
+ * @ioctx_size: I/O context size.
+ * @dma_size: DMA buffer size.
+ * @dir: DMA data direction.
+ */
+static struct srpt_ioctx **srpt_alloc_ioctx_ring(struct srpt_device *sdev,
+ int ring_size, int ioctx_size,
+ int dma_size, enum dma_data_direction dir)
+{
+ struct srpt_ioctx **ring;
+ int i;
+
+ WARN_ON(ioctx_size != sizeof(struct srpt_recv_ioctx)
+ && ioctx_size != sizeof(struct srpt_send_ioctx));
+
+ ring = kmalloc(ring_size * sizeof(ring[0]), GFP_KERNEL);
+ if (!ring)
+ goto out;
+ for (i = 0; i < ring_size; ++i) {
+ ring[i] = srpt_alloc_ioctx(sdev, ioctx_size, dma_size, dir);
+ if (!ring[i])
+ goto err;
+ ring[i]->index = i;
+ }
+ goto out;
+
+err:
+ while (--i >= 0)
+ srpt_free_ioctx(sdev, ring[i], dma_size, dir);
+ kfree(ring);
+out:
+ return ring;
+}
+
+/**
+ * srpt_free_ioctx_ring() - Free the ring of SRPT I/O context structures.
+ */
+static void srpt_free_ioctx_ring(struct srpt_ioctx **ioctx_ring,
+ struct srpt_device *sdev, int ring_size,
+ int dma_size, enum dma_data_direction dir)
+{
+ int i;
+
+ for (i = 0; i < ring_size; ++i)
+ srpt_free_ioctx(sdev, ioctx_ring[i], dma_size, dir);
+ kfree(ioctx_ring);
+}
+
+/**
+ * srpt_get_cmd_state() - Get the state of a SCSI command.
+ */
+static enum srpt_command_state srpt_get_cmd_state(struct srpt_send_ioctx *ioctx)
+{
+ enum srpt_command_state state;
+ unsigned long flags;
+
+ BUG_ON(!ioctx);
+
+ spin_lock_irqsave(&ioctx->spinlock, flags);
+ state = ioctx->state;
+ spin_unlock_irqrestore(&ioctx->spinlock, flags);
+ return state;
+}
+
+/**
+ * srpt_set_cmd_state() - Set the state of a SCSI command.
+ *
+ * Does not modify the state of aborted commands. Returns the previous command
+ * state.
+ */
+static enum srpt_command_state srpt_set_cmd_state(struct srpt_send_ioctx *ioctx,
+ enum srpt_command_state new)
+{
+ enum srpt_command_state previous;
+ unsigned long flags;
+
+ BUG_ON(!ioctx);
+
+ spin_lock_irqsave(&ioctx->spinlock, flags);
+ previous = ioctx->state;
+ if (previous != SRPT_STATE_DONE)
+ ioctx->state = new;
+ spin_unlock_irqrestore(&ioctx->spinlock, flags);
+
+ return previous;
+}
+
+/**
+ * srpt_test_and_set_cmd_state() - Test and set the state of a command.
+ *
+ * Returns true if and only if the previous command state was equal to 'old'.
+ */
+static bool srpt_test_and_set_cmd_state(struct srpt_send_ioctx *ioctx,
+ enum srpt_command_state old,
+ enum srpt_command_state new)
+{
+ enum srpt_command_state previous;
+ unsigned long flags;
+
+ WARN_ON(!ioctx);
+ WARN_ON(old == SRPT_STATE_DONE);
+ WARN_ON(new == SRPT_STATE_NEW);
+
+ spin_lock_irqsave(&ioctx->spinlock, flags);
+ previous = ioctx->state;
+ if (previous == old)
+ ioctx->state = new;
+ spin_unlock_irqrestore(&ioctx->spinlock, flags);
+ return previous == old;
+}
+
+/**
+ * srpt_post_recv() - Post an IB receive request.
+ */
+static int srpt_post_recv(struct srpt_device *sdev,
+ struct srpt_recv_ioctx *ioctx)
+{
+ struct ib_sge list;
+ struct ib_recv_wr wr, *bad_wr;
+
+ BUG_ON(!sdev);
+ wr.wr_id = encode_wr_id(SRPT_RECV, ioctx->ioctx.index);
+
+ list.addr = ioctx->ioctx.dma;
+ list.length = srp_max_req_size;
+ list.lkey = sdev->mr->lkey;
+
+ wr.next = NULL;
+ wr.sg_list = &list;
+ wr.num_sge = 1;
+
+ return ib_post_srq_recv(sdev->srq, &wr, &bad_wr);
+}
+
+/**
+ * srpt_post_send() - Post an IB send request.
+ *
+ * Returns zero upon success and a non-zero value upon failure.
+ */
+static int srpt_post_send(struct srpt_rdma_ch *ch,
+ struct srpt_send_ioctx *ioctx, int len)
+{
+ struct ib_sge list;
+ struct ib_send_wr wr, *bad_wr;
+ struct srpt_device *sdev = ch->sport->sdev;
+ int ret;
+
+ atomic_inc(&ch->req_lim);
+
+ ret = -ENOMEM;
+ if (unlikely(atomic_dec_return(&ch->sq_wr_avail) < 0)) {
+ printk(KERN_WARNING "IB send queue full (needed 1)\n");
+ goto out;
+ }
+
+ ib_dma_sync_single_for_device(sdev->device, ioctx->ioctx.dma, len,
+ DMA_TO_DEVICE);
+
+ list.addr = ioctx->ioctx.dma;
+ list.length = len;
+ list.lkey = sdev->mr->lkey;
+
+ wr.next = NULL;
+ wr.wr_id = encode_wr_id(SRPT_SEND, ioctx->ioctx.index);
+ wr.sg_list = &list;
+ wr.num_sge = 1;
+ wr.opcode = IB_WR_SEND;
+ wr.send_flags = IB_SEND_SIGNALED;
+
+ ret = ib_post_send(ch->qp, &wr, &bad_wr);
+
+out:
+ if (ret < 0) {
+ atomic_inc(&ch->sq_wr_avail);
+ atomic_dec(&ch->req_lim);
+ }
+ return ret;
+}
+
+/**
+ * srpt_get_desc_tbl() - Parse the data descriptors of an SRP_CMD request.
+ * @ioctx: Pointer to the I/O context associated with the request.
+ * @srp_cmd: Pointer to the SRP_CMD request data.
+ * @dir: Pointer to the variable to which the transfer direction will be
+ * written.
+ * @data_len: Pointer to the variable to which the total data length of all
+ * descriptors in the SRP_CMD request will be written.
+ *
+ * This function initializes ioctx->nrbuf and ioctx->r_bufs.
+ *
+ * Returns -EINVAL when the SRP_CMD request contains inconsistent descriptors;
+ * -ENOMEM when memory allocation fails and zero upon success.
+ */
+static int srpt_get_desc_tbl(struct srpt_send_ioctx *ioctx,
+ struct srp_cmd *srp_cmd,
+ enum dma_data_direction *dir, u64 *data_len)
+{
+ struct srp_indirect_buf *idb;
+ struct srp_direct_buf *db;
+ unsigned add_cdb_offset;
+ int ret;
+
+ /*
+ * The pointer computations below will only be compiled correctly
+ * if srp_cmd::add_data is declared as s8*, u8*, s8[] or u8[], so check
+ * whether srp_cmd::add_data has been declared as a byte pointer.
+ */
+ BUILD_BUG_ON(!__same_type(srp_cmd->add_data[0], (s8)0)
+ && !__same_type(srp_cmd->add_data[0], (u8)0));
+
+ BUG_ON(!dir);
+ BUG_ON(!data_len);
+
+ ret = 0;
+ *data_len = 0;
+
+ /*
+ * The lower four bits of the buffer format field contain the DATA-IN
+ * buffer descriptor format, and the highest four bits contain the
+ * DATA-OUT buffer descriptor format.
+ */
+ *dir = DMA_NONE;
+ if (srp_cmd->buf_fmt & 0xf)
+ /* DATA-IN: transfer data from target to initiator (read). */
+ *dir = DMA_FROM_DEVICE;
+ else if (srp_cmd->buf_fmt >> 4)
+ /* DATA-OUT: transfer data from initiator to target (write). */
+ *dir = DMA_TO_DEVICE;
+
+ /*
+ * According to the SRP spec, the lower two bits of the 'ADDITIONAL
+ * CDB LENGTH' field are reserved and the size in bytes of this field
+ * is four times the value specified in bits 3..7. Hence the "& ~3".
+ */
+ add_cdb_offset = srp_cmd->add_cdb_len & ~3;
+ if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) ||
+ ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) {
+ ioctx->n_rbuf = 1;
+ ioctx->rbufs = &ioctx->single_rbuf;
+
+ db = (struct srp_direct_buf *)(srp_cmd->add_data
+ + add_cdb_offset);
+ memcpy(ioctx->rbufs, db, sizeof *db);
+ *data_len = be32_to_cpu(db->len);
+ } else if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_INDIRECT) ||
+ ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_INDIRECT)) {
+ idb = (struct srp_indirect_buf *)(srp_cmd->add_data
+ + add_cdb_offset);
+
+ ioctx->n_rbuf = be32_to_cpu(idb->table_desc.len) / sizeof *db;
+
+ if (ioctx->n_rbuf >
+ (srp_cmd->data_out_desc_cnt + srp_cmd->data_in_desc_cnt)) {
+ printk(KERN_ERR "received unsupported SRP_CMD request"
+ " type (%u out + %u in != %u / %zu)\n",
+ srp_cmd->data_out_desc_cnt,
+ srp_cmd->data_in_desc_cnt,
+ be32_to_cpu(idb->table_desc.len),
+ sizeof(*db));
+ ioctx->n_rbuf = 0;
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (ioctx->n_rbuf == 1)
+ ioctx->rbufs = &ioctx->single_rbuf;
+ else {
+ ioctx->rbufs =
+ kmalloc(ioctx->n_rbuf * sizeof *db, GFP_ATOMIC);
+ if (!ioctx->rbufs) {
+ ioctx->n_rbuf = 0;
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+
+ db = idb->desc_list;
+ memcpy(ioctx->rbufs, db, ioctx->n_rbuf * sizeof *db);
+ *data_len = be32_to_cpu(idb->len);
+ }
+out:
+ return ret;
+}
+
+/**
+ * srpt_init_ch_qp() - Initialize queue pair attributes.
+ *
+ * Initialized the attributes of queue pair 'qp' by allowing local write,
+ * remote read and remote write. Also transitions 'qp' to state IB_QPS_INIT.
+ */
+static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
+{
+ struct ib_qp_attr *attr;
+ int ret;
+
+ attr = kzalloc(sizeof *attr, GFP_KERNEL);
+ if (!attr)
+ return -ENOMEM;
+
+ attr->qp_state = IB_QPS_INIT;
+ attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ |
+ IB_ACCESS_REMOTE_WRITE;
+ attr->port_num = ch->sport->port;
+ attr->pkey_index = 0;
+
+ ret = ib_modify_qp(qp, attr,
+ IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PORT |
+ IB_QP_PKEY_INDEX);
+
+ kfree(attr);
+ return ret;
+}
+
+/**
+ * srpt_ch_qp_rtr() - Change the state of a channel to 'ready to receive' (RTR).
+ * @ch: channel of the queue pair.
+ * @qp: queue pair to change the state of.
+ *
+ * Returns zero upon success and a negative value upon failure.
+ *
+ * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system.
+ * If this structure ever becomes larger, it might be necessary to allocate
+ * it dynamically instead of on the stack.
+ */
+static int srpt_ch_qp_rtr(struct srpt_rdma_ch *ch, struct ib_qp *qp)
+{
+ struct ib_qp_attr qp_attr;
+ int attr_mask;
+ int ret;
+
+ qp_attr.qp_state = IB_QPS_RTR;
+ ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask);
+ if (ret)
+ goto out;
+
+ qp_attr.max_dest_rd_atomic = 4;
+
+ ret = ib_modify_qp(qp, &qp_attr, attr_mask);
+
+out:
+ return ret;
+}
+
+/**
+ * srpt_ch_qp_rts() - Change the state of a channel to 'ready to send' (RTS).
+ * @ch: channel of the queue pair.
+ * @qp: queue pair to change the state of.
+ *
+ * Returns zero upon success and a negative value upon failure.
+ *
+ * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system.
+ * If this structure ever becomes larger, it might be necessary to allocate
+ * it dynamically instead of on the stack.
+ */
+static int srpt_ch_qp_rts(struct srpt_rdma_ch *ch, struct ib_qp *qp)
+{
+ struct ib_qp_attr qp_attr;
+ int attr_mask;
+ int ret;
+
+ qp_attr.qp_state = IB_QPS_RTS;
+ ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask);
+ if (ret)
+ goto out;
+
+ qp_attr.max_rd_atomic = 4;
+
+ ret = ib_modify_qp(qp, &qp_attr, attr_mask);
+
+out:
+ return ret;
+}
+
+/**
+ * srpt_ch_qp_err() - Set the channel queue pair state to 'error'.
+ */
+static int srpt_ch_qp_err(struct srpt_rdma_ch *ch)
+{
+ struct ib_qp_attr qp_attr;
+
+ qp_attr.qp_state = IB_QPS_ERR;
+ return ib_modify_qp(ch->qp, &qp_attr, IB_QP_STATE);
+}
+
+/**
+ * srpt_unmap_sg_to_ib_sge() - Unmap an IB SGE list.
+ */
+static void srpt_unmap_sg_to_ib_sge(struct srpt_rdma_ch *ch,
+ struct srpt_send_ioctx *ioctx)
+{
+ struct scatterlist *sg;
+ enum dma_data_direction dir;
+
+ BUG_ON(!ch);
+ BUG_ON(!ioctx);
+ BUG_ON(ioctx->n_rdma && !ioctx->rdma_ius);
+
+ while (ioctx->n_rdma)
+ kfree(ioctx->rdma_ius[--ioctx->n_rdma].sge);
+
+ kfree(ioctx->rdma_ius);
+ ioctx->rdma_ius = NULL;
+
+ if (ioctx->mapped_sg_count) {
+ sg = ioctx->sg;
+ WARN_ON(!sg);
+ dir = ioctx->cmd.data_direction;
+ BUG_ON(dir == DMA_NONE);
+ ib_dma_unmap_sg(ch->sport->sdev->device, sg, ioctx->sg_cnt,
+ opposite_dma_dir(dir));
+ ioctx->mapped_sg_count = 0;
+ }
+}
+
+/**
+ * srpt_map_sg_to_ib_sge() - Map an SG list to an IB SGE list.
+ */
+static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
+ struct srpt_send_ioctx *ioctx)
+{
+ struct se_cmd *cmd;
+ struct scatterlist *sg, *sg_orig;
+ int sg_cnt;
+ enum dma_data_direction dir;
+ struct rdma_iu *riu;
+ struct srp_direct_buf *db;
+ dma_addr_t dma_addr;
+ struct ib_sge *sge;
+ u64 raddr;
+ u32 rsize;
+ u32 tsize;
+ u32 dma_len;
+ int count, nrdma;
+ int i, j, k;
+
+ BUG_ON(!ch);
+ BUG_ON(!ioctx);
+ cmd = &ioctx->cmd;
+ dir = cmd->data_direction;
+ BUG_ON(dir == DMA_NONE);
+
+ transport_do_task_sg_chain(cmd);
+ ioctx->sg = sg = sg_orig = cmd->t_tasks_sg_chained;
+ ioctx->sg_cnt = sg_cnt = cmd->t_tasks_sg_chained_no;
+
+ count = ib_dma_map_sg(ch->sport->sdev->device, sg, sg_cnt,
+ opposite_dma_dir(dir));
+ if (unlikely(!count))
+ return -EAGAIN;
+
+ ioctx->mapped_sg_count = count;
+
+ if (ioctx->rdma_ius && ioctx->n_rdma_ius)
+ nrdma = ioctx->n_rdma_ius;
+ else {
+ nrdma = (count + SRPT_DEF_SG_PER_WQE - 1) / SRPT_DEF_SG_PER_WQE
+ + ioctx->n_rbuf;
+
+ ioctx->rdma_ius = kzalloc(nrdma * sizeof *riu, GFP_KERNEL);
+ if (!ioctx->rdma_ius)
+ goto free_mem;
+
+ ioctx->n_rdma_ius = nrdma;
+ }
+
+ db = ioctx->rbufs;
+ tsize = cmd->data_length;
+ dma_len = sg_dma_len(&sg[0]);
+ riu = ioctx->rdma_ius;
+
+ /*
+ * For each remote desc - calculate the #ib_sge.
+ * If #ib_sge < SRPT_DEF_SG_PER_WQE per rdma operation then
+ * each remote desc rdma_iu is required a rdma wr;
+ * else
+ * we need to allocate extra rdma_iu to carry extra #ib_sge in
+ * another rdma wr
+ */
+ for (i = 0, j = 0;
+ j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) {
+ rsize = be32_to_cpu(db->len);
+ raddr = be64_to_cpu(db->va);
+ riu->raddr = raddr;
+ riu->rkey = be32_to_cpu(db->key);
+ riu->sge_cnt = 0;
+
+ /* calculate how many sge required for this remote_buf */
+ while (rsize > 0 && tsize > 0) {
+
+ if (rsize >= dma_len) {
+ tsize -= dma_len;
+ rsize -= dma_len;
+ raddr += dma_len;
+
+ if (tsize > 0) {
+ ++j;
+ if (j < count) {
+ sg = sg_next(sg);
+ dma_len = sg_dma_len(sg);
+ }
+ }
+ } else {
+ tsize -= rsize;
+ dma_len -= rsize;
+ rsize = 0;
+ }
+
+ ++riu->sge_cnt;
+
+ if (rsize > 0 && riu->sge_cnt == SRPT_DEF_SG_PER_WQE) {
+ ++ioctx->n_rdma;
+ riu->sge =
+ kmalloc(riu->sge_cnt * sizeof *riu->sge,
+ GFP_KERNEL);
+ if (!riu->sge)
+ goto free_mem;
+
+ ++riu;
+ riu->sge_cnt = 0;
+ riu->raddr = raddr;
+ riu->rkey = be32_to_cpu(db->key);
+ }
+ }
+
+ ++ioctx->n_rdma;
+ riu->sge = kmalloc(riu->sge_cnt * sizeof *riu->sge,
+ GFP_KERNEL);
+ if (!riu->sge)
+ goto free_mem;
+ }
+
+ db = ioctx->rbufs;
+ tsize = cmd->data_length;
+ riu = ioctx->rdma_ius;
+ sg = sg_orig;
+ dma_len = sg_dma_len(&sg[0]);
+ dma_addr = sg_dma_address(&sg[0]);
+
+ /* this second loop is really mapped sg_addres to rdma_iu->ib_sge */
+ for (i = 0, j = 0;
+ j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) {
+ rsize = be32_to_cpu(db->len);
+ sge = riu->sge;
+ k = 0;
+
+ while (rsize > 0 && tsize > 0) {
+ sge->addr = dma_addr;
+ sge->lkey = ch->sport->sdev->mr->lkey;
+
+ if (rsize >= dma_len) {
+ sge->length =
+ (tsize < dma_len) ? tsize : dma_len;
+ tsize -= dma_len;
+ rsize -= dma_len;
+
+ if (tsize > 0) {
+ ++j;
+ if (j < count) {
+ sg = sg_next(sg);
+ dma_len = sg_dma_len(sg);
+ dma_addr = sg_dma_address(sg);
+ }
+ }
+ } else {
+ sge->length = (tsize < rsize) ? tsize : rsize;
+ tsize -= rsize;
+ dma_len -= rsize;
+ dma_addr += rsize;
+ rsize = 0;
+ }
+
+ ++k;
+ if (k == riu->sge_cnt && rsize > 0 && tsize > 0) {
+ ++riu;
+ sge = riu->sge;
+ k = 0;
+ } else if (rsize > 0 && tsize > 0)
+ ++sge;
+ }
+ }
+
+ return 0;
+
+free_mem:
+ srpt_unmap_sg_to_ib_sge(ch, ioctx);
+
+ return -ENOMEM;
+}
+
+/**
+ * srpt_get_send_ioctx() - Obtain an I/O context for sending to the initiator.
+ */
+static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
+{
+ struct srpt_send_ioctx *ioctx;
+ unsigned long flags;
+
+ BUG_ON(!ch);
+
+ ioctx = NULL;
+ spin_lock_irqsave(&ch->spinlock, flags);
+ if (!list_empty(&ch->free_list)) {
+ ioctx = list_first_entry(&ch->free_list,
+ struct srpt_send_ioctx, free_list);
+ list_del(&ioctx->free_list);
+ }
+ spin_unlock_irqrestore(&ch->spinlock, flags);
+
+ if (!ioctx)
+ return ioctx;
+
+ BUG_ON(ioctx->ch != ch);
+ kref_init(&ioctx->kref);
+ spin_lock_init(&ioctx->spinlock);
+ ioctx->state = SRPT_STATE_NEW;
+ ioctx->n_rbuf = 0;
+ ioctx->rbufs = NULL;
+ ioctx->n_rdma = 0;
+ ioctx->n_rdma_ius = 0;
+ ioctx->rdma_ius = NULL;
+ ioctx->mapped_sg_count = 0;
+ init_completion(&ioctx->tx_done);
+ ioctx->queue_status_only = false;
+ /*
+ * transport_init_se_cmd() does not initialize all fields, so do it
+ * here.
+ */
+ memset(&ioctx->cmd, 0, sizeof(ioctx->cmd));
+ memset(&ioctx->sense_data, 0, sizeof(ioctx->sense_data));
+
+ return ioctx;
+}
+
+/**
+ * srpt_put_send_ioctx() - Free up resources.
+ */
+static void srpt_put_send_ioctx(struct srpt_send_ioctx *ioctx)
+{
+ struct srpt_rdma_ch *ch;
+ unsigned long flags;
+
+ BUG_ON(!ioctx);
+ ch = ioctx->ch;
+ BUG_ON(!ch);
+
+ WARN_ON(srpt_get_cmd_state(ioctx) != SRPT_STATE_DONE);
+
+ srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
+ transport_generic_free_cmd(&ioctx->cmd, 0);
+
+ if (ioctx->n_rbuf > 1) {
+ kfree(ioctx->rbufs);
+ ioctx->rbufs = NULL;
+ ioctx->n_rbuf = 0;
+ }
+
+ spin_lock_irqsave(&ch->spinlock, flags);
+ list_add(&ioctx->free_list, &ch->free_list);
+ spin_unlock_irqrestore(&ch->spinlock, flags);
+}
+
+static void srpt_put_send_ioctx_kref(struct kref *kref)
+{
+ srpt_put_send_ioctx(container_of(kref, struct srpt_send_ioctx, kref));
+}
+
+/**
+ * srpt_abort_cmd() - Abort a SCSI command.
+ * @ioctx: I/O context associated with the SCSI command.
+ * @context: Preferred execution context.
+ */
+static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
+{
+ enum srpt_command_state state;
+ unsigned long flags;
+
+ BUG_ON(!ioctx);
+
+ /*
+ * If the command is in a state where the target core is waiting for
+ * the ib_srpt driver, change the state to the next state. Changing
+ * the state of the command from SRPT_STATE_NEED_DATA to
+ * SRPT_STATE_DATA_IN ensures that srpt_xmit_response() will call this
+ * function a second time.
+ */
+
+ spin_lock_irqsave(&ioctx->spinlock, flags);
+ state = ioctx->state;
+ switch (state) {
+ case SRPT_STATE_NEED_DATA:
+ ioctx->state = SRPT_STATE_DATA_IN;
+ break;
+ case SRPT_STATE_DATA_IN:
+ case SRPT_STATE_CMD_RSP_SENT:
+ case SRPT_STATE_MGMT_RSP_SENT:
+ ioctx->state = SRPT_STATE_DONE;
+ break;
+ default:
+ break;
+ }
+ spin_unlock_irqrestore(&ioctx->spinlock, flags);
+
+ if (state == SRPT_STATE_DONE)
+ goto out;
+
+ pr_debug("Aborting cmd with state %d and tag %lld\n", state,
+ ioctx->tag);
+
+ switch (state) {
+ case SRPT_STATE_NEW:
+ case SRPT_STATE_DATA_IN:
+ case SRPT_STATE_MGMT:
+ /*
+ * Do nothing - defer abort processing until
+ * srpt_queue_response() is invoked.
+ */
+ WARN_ON(!transport_check_aborted_status(&ioctx->cmd, false));
+ break;
+ case SRPT_STATE_NEED_DATA:
+ /* DMA_TO_DEVICE (write) - RDMA read error. */
+ atomic_set(&ioctx->cmd.transport_lun_stop, 1);
+ transport_generic_handle_data(&ioctx->cmd);
+ break;
+ case SRPT_STATE_CMD_RSP_SENT:
+ /*
+ * SRP_RSP sending failed or the SRP_RSP send completion has
+ * not been received in time.
+ */
+ srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
+ atomic_set(&ioctx->cmd.transport_lun_stop, 1);
+ kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
+ break;
+ case SRPT_STATE_MGMT_RSP_SENT:
+ srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
+ kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
+ break;
+ default:
+ WARN_ON("ERROR: unexpected command state");
+ break;
+ }
+
+out:
+ return state;
+}
+
+/**
+ * srpt_handle_send_err_comp() - Process an IB_WC_SEND error completion.
+ */
+static void srpt_handle_send_err_comp(struct srpt_rdma_ch *ch, u64 wr_id)
+{
+ struct srpt_send_ioctx *ioctx;
+ enum srpt_command_state state;
+ struct se_cmd *cmd;
+ u32 index;
+
+ atomic_inc(&ch->sq_wr_avail);
+
+ index = idx_from_wr_id(wr_id);
+ ioctx = ch->ioctx_ring[index];
+ state = srpt_get_cmd_state(ioctx);
+ cmd = &ioctx->cmd;
+
+ WARN_ON(state != SRPT_STATE_CMD_RSP_SENT
+ && state != SRPT_STATE_MGMT_RSP_SENT
+ && state != SRPT_STATE_NEED_DATA
+ && state != SRPT_STATE_DONE);
+
+ /* If SRP_RSP sending failed, undo the ch->req_lim change. */
+ if (state == SRPT_STATE_CMD_RSP_SENT
+ || state == SRPT_STATE_MGMT_RSP_SENT)
+ atomic_dec(&ch->req_lim);
+
+ srpt_abort_cmd(ioctx);
+}
+
+/**
+ * srpt_handle_send_comp() - Process an IB send completion notification.
+ */
+static void srpt_handle_send_comp(struct srpt_rdma_ch *ch,
+ struct srpt_send_ioctx *ioctx)
+{
+ enum srpt_command_state state;
+
+ atomic_inc(&ch->sq_wr_avail);
+
+ state = srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
+
+ if (WARN_ON(state != SRPT_STATE_CMD_RSP_SENT
+ && state != SRPT_STATE_MGMT_RSP_SENT
+ && state != SRPT_STATE_DONE))
+ pr_debug("state = %d\n", state);
+
+ if (state != SRPT_STATE_DONE)
+ kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
+ else
+ printk(KERN_ERR "IB completion has been received too late for"
+ " wr_id = %u.\n", ioctx->ioctx.index);
+}
+
+/**
+ * srpt_handle_rdma_comp() - Process an IB RDMA completion notification.
+ *
+ * Note: transport_generic_handle_data() is asynchronous so unmapping the
+ * data that has been transferred via IB RDMA must be postponed until the
+ * check_stop_free() callback.
+ */
+static void srpt_handle_rdma_comp(struct srpt_rdma_ch *ch,
+ struct srpt_send_ioctx *ioctx,
+ enum srpt_opcode opcode)
+{
+ WARN_ON(ioctx->n_rdma <= 0);
+ atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
+
+ if (opcode == SRPT_RDMA_READ_LAST) {
+ if (srpt_test_and_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA,
+ SRPT_STATE_DATA_IN))
+ transport_generic_handle_data(&ioctx->cmd);
+ else
+ printk(KERN_ERR "%s[%d]: wrong state = %d\n", __func__,
+ __LINE__, srpt_get_cmd_state(ioctx));
+ } else if (opcode == SRPT_RDMA_ABORT) {
+ ioctx->rdma_aborted = true;
+ } else {
+ WARN(true, "unexpected opcode %d\n", opcode);
+ }
+}
+
+/**
+ * srpt_handle_rdma_err_comp() - Process an IB RDMA error completion.
+ */
+static void srpt_handle_rdma_err_comp(struct srpt_rdma_ch *ch,
+ struct srpt_send_ioctx *ioctx,
+ enum srpt_opcode opcode)
+{
+ struct se_cmd *cmd;
+ enum srpt_command_state state;
+
+ cmd = &ioctx->cmd;
+ state = srpt_get_cmd_state(ioctx);
+ switch (opcode) {
+ case SRPT_RDMA_READ_LAST:
+ if (ioctx->n_rdma <= 0) {
+ printk(KERN_ERR "Received invalid RDMA read"
+ " error completion with idx %d\n",
+ ioctx->ioctx.index);
+ break;
+ }
+ atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
+ if (state == SRPT_STATE_NEED_DATA)
+ srpt_abort_cmd(ioctx);
+ else
+ printk(KERN_ERR "%s[%d]: wrong state = %d\n",
+ __func__, __LINE__, state);
+ break;
+ case SRPT_RDMA_WRITE_LAST:
+ atomic_set(&ioctx->cmd.transport_lun_stop, 1);
+ break;
+ default:
+ printk(KERN_ERR "%s[%d]: opcode = %u\n", __func__,
+ __LINE__, opcode);
+ break;
+ }
+}
+
+/**
+ * srpt_build_cmd_rsp() - Build an SRP_RSP response.
+ * @ch: RDMA channel through which the request has been received.
+ * @ioctx: I/O context associated with the SRP_CMD request. The response will
+ * be built in the buffer ioctx->buf points at and hence this function will
+ * overwrite the request data.
+ * @tag: tag of the request for which this response is being generated.
+ * @status: value for the STATUS field of the SRP_RSP information unit.
+ *
+ * Returns the size in bytes of the SRP_RSP response.
+ *
+ * An SRP_RSP response contains a SCSI status or service response. See also
+ * section 6.9 in the SRP r16a document for the format of an SRP_RSP
+ * response. See also SPC-2 for more information about sense data.
+ */
+static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
+ struct srpt_send_ioctx *ioctx, u64 tag,
+ int status)
+{
+ struct srp_rsp *srp_rsp;
+ const u8 *sense_data;
+ int sense_data_len, max_sense_len;
+
+ /*
+ * The lowest bit of all SAM-3 status codes is zero (see also
+ * paragraph 5.3 in SAM-3).
+ */
+ WARN_ON(status & 1);
+
+ srp_rsp = ioctx->ioctx.buf;
+ BUG_ON(!srp_rsp);
+
+ sense_data = ioctx->sense_data;
+ sense_data_len = ioctx->cmd.scsi_sense_length;
+ WARN_ON(sense_data_len > sizeof(ioctx->sense_data));
+
+ memset(srp_rsp, 0, sizeof *srp_rsp);
+ srp_rsp->opcode = SRP_RSP;
+ srp_rsp->req_lim_delta =
+ __constant_cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
+ srp_rsp->tag = tag;
+ srp_rsp->status = status;
+
+ if (sense_data_len) {
+ BUILD_BUG_ON(MIN_MAX_RSP_SIZE <= sizeof(*srp_rsp));
+ max_sense_len = ch->max_ti_iu_len - sizeof(*srp_rsp);
+ if (sense_data_len > max_sense_len) {
+ printk(KERN_WARNING "truncated sense data from %d to %d"
+ " bytes\n", sense_data_len, max_sense_len);
+ sense_data_len = max_sense_len;
+ }
+
+ srp_rsp->flags |= SRP_RSP_FLAG_SNSVALID;
+ srp_rsp->sense_data_len = cpu_to_be32(sense_data_len);
+ memcpy(srp_rsp + 1, sense_data, sense_data_len);
+ }
+
+ return sizeof(*srp_rsp) + sense_data_len;
+}
+
+/**
+ * srpt_build_tskmgmt_rsp() - Build a task management response.
+ * @ch: RDMA channel through which the request has been received.
+ * @ioctx: I/O context in which the SRP_RSP response will be built.
+ * @rsp_code: RSP_CODE that will be stored in the response.
+ * @tag: Tag of the request for which this response is being generated.
+ *
+ * Returns the size in bytes of the SRP_RSP response.
+ *
+ * An SRP_RSP response contains a SCSI status or service response. See also
+ * section 6.9 in the SRP r16a document for the format of an SRP_RSP
+ * response.
+ */
+static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
+ struct srpt_send_ioctx *ioctx,
+ u8 rsp_code, u64 tag)
+{
+ struct srp_rsp *srp_rsp;
+ int resp_data_len;
+ int resp_len;
+
+ resp_data_len = (rsp_code == SRP_TSK_MGMT_SUCCESS) ? 0 : 4;
+ resp_len = sizeof(*srp_rsp) + resp_data_len;
+
+ srp_rsp = ioctx->ioctx.buf;
+ BUG_ON(!srp_rsp);
+ memset(srp_rsp, 0, sizeof *srp_rsp);
+
+ srp_rsp->opcode = SRP_RSP;
+ srp_rsp->req_lim_delta = __constant_cpu_to_be32(1
+ + atomic_xchg(&ch->req_lim_delta, 0));
+ srp_rsp->tag = tag;
+
+ if (rsp_code != SRP_TSK_MGMT_SUCCESS) {
+ srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID;
+ srp_rsp->resp_data_len = cpu_to_be32(resp_data_len);
+ srp_rsp->data[3] = rsp_code;
+ }
+
+ return resp_len;
+}
+
+#define NO_SUCH_LUN ((uint64_t)-1LL)
+
+/*
+ * SCSI LUN addressing method. See also SAM-2 and the section about
+ * eight byte LUNs.
+ */
+enum scsi_lun_addr_method {
+ SCSI_LUN_ADDR_METHOD_PERIPHERAL = 0,
+ SCSI_LUN_ADDR_METHOD_FLAT = 1,
+ SCSI_LUN_ADDR_METHOD_LUN = 2,
+ SCSI_LUN_ADDR_METHOD_EXTENDED_LUN = 3,
+};
+
+/*
+ * srpt_unpack_lun() - Convert from network LUN to linear LUN.
+ *
+ * Convert an 2-byte, 4-byte, 6-byte or 8-byte LUN structure in network byte
+ * order (big endian) to a linear LUN. Supports three LUN addressing methods:
+ * peripheral, flat and logical unit. See also SAM-2, section 4.9.4 (page 40).
+ */
+static uint64_t srpt_unpack_lun(const uint8_t *lun, int len)
+{
+ uint64_t res = NO_SUCH_LUN;
+ int addressing_method;
+
+ if (unlikely(len < 2)) {
+ printk(KERN_ERR "Illegal LUN length %d, expected 2 bytes or "
+ "more", len);
+ goto out;
+ }
+
+ switch (len) {
+ case 8:
+ if ((*((__be64 *)lun) &
+ __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
+ goto out_err;
+ break;
+ case 4:
+ if (*((__be16 *)&lun[2]) != 0)
+ goto out_err;
+ break;
+ case 6:
+ if (*((__be32 *)&lun[2]) != 0)
+ goto out_err;
+ break;
+ case 2:
+ break;
+ default:
+ goto out_err;
+ }
+
+ addressing_method = (*lun) >> 6; /* highest two bits of byte 0 */
+ switch (addressing_method) {
+ case SCSI_LUN_ADDR_METHOD_PERIPHERAL:
+ case SCSI_LUN_ADDR_METHOD_FLAT:
+ case SCSI_LUN_ADDR_METHOD_LUN:
+ res = *(lun + 1) | (((*lun) & 0x3f) << 8);
+ break;
+
+ case SCSI_LUN_ADDR_METHOD_EXTENDED_LUN:
+ default:
+ printk(KERN_ERR "Unimplemented LUN addressing method %u",
+ addressing_method);
+ break;
+ }
+
+out:
+ return res;
+
+out_err:
+ printk(KERN_ERR "Support for multi-level LUNs has not yet been"
+ " implemented");
+ goto out;
+}
+
+static int srpt_check_stop_free(struct se_cmd *cmd)
+{
+ struct srpt_send_ioctx *ioctx;
+
+ ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
+ return kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
+}
+
+/**
+ * srpt_handle_cmd() - Process SRP_CMD.
+ */
+static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
+ struct srpt_recv_ioctx *recv_ioctx,
+ struct srpt_send_ioctx *send_ioctx)
+{
+ struct se_cmd *cmd;
+ struct srp_cmd *srp_cmd;
+ uint64_t unpacked_lun;
+ u64 data_len;
+ enum dma_data_direction dir;
+ int ret;
+
+ BUG_ON(!send_ioctx);
+
+ srp_cmd = recv_ioctx->ioctx.buf;
+ kref_get(&send_ioctx->kref);
+ cmd = &send_ioctx->cmd;
+ send_ioctx->tag = srp_cmd->tag;
+
+ switch (srp_cmd->task_attr) {
+ case SRP_CMD_SIMPLE_Q:
+ cmd->sam_task_attr = MSG_SIMPLE_TAG;
+ break;
+ case SRP_CMD_ORDERED_Q:
+ default:
+ cmd->sam_task_attr = MSG_ORDERED_TAG;
+ break;
+ case SRP_CMD_HEAD_OF_Q:
+ cmd->sam_task_attr = MSG_HEAD_TAG;
+ break;
+ case SRP_CMD_ACA:
+ cmd->sam_task_attr = MSG_ACA_TAG;
+ break;
+ }
+
+ ret = srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &data_len);
+ if (ret) {
+ printk(KERN_ERR "0x%llx: parsing SRP descriptor table failed.\n",
+ srp_cmd->tag);
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ goto send_sense;
+ }
+
+ cmd->data_length = data_len;
+ cmd->data_direction = dir;
+ unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_cmd->lun,
+ sizeof(srp_cmd->lun));
+ if (transport_lookup_cmd_lun(cmd, unpacked_lun) < 0)
+ goto send_sense;
+ ret = transport_generic_allocate_tasks(cmd, srp_cmd->cdb);
+ if (cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
+ srpt_queue_status(cmd);
+ else if (cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION)
+ goto send_sense;
+ else
+ WARN_ON_ONCE(ret);
+
+ transport_handle_cdb_direct(cmd);
+ return 0;
+
+send_sense:
+ transport_send_check_condition_and_sense(cmd, cmd->scsi_sense_reason,
+ 0);
+ return -1;
+}
+
+/**
+ * srpt_rx_mgmt_fn_tag() - Process a task management function by tag.
+ * @ch: RDMA channel of the task management request.
+ * @fn: Task management function to perform.
+ * @req_tag: Tag of the SRP task management request.
+ * @mgmt_ioctx: I/O context of the task management request.
+ *
+ * Returns zero if the target core will process the task management
+ * request asynchronously.
+ *
+ * Note: It is assumed that the initiator serializes tag-based task management
+ * requests.
+ */
+static int srpt_rx_mgmt_fn_tag(struct srpt_send_ioctx *ioctx, u64 tag)
+{
+ struct srpt_device *sdev;
+ struct srpt_rdma_ch *ch;
+ struct srpt_send_ioctx *target;
+ int ret, i;
+
+ ret = -EINVAL;
+ ch = ioctx->ch;
+ BUG_ON(!ch);
+ BUG_ON(!ch->sport);
+ sdev = ch->sport->sdev;
+ BUG_ON(!sdev);
+ spin_lock_irq(&sdev->spinlock);
+ for (i = 0; i < ch->rq_size; ++i) {
+ target = ch->ioctx_ring[i];
+ if (target->cmd.se_lun == ioctx->cmd.se_lun &&
+ target->tag == tag &&
+ srpt_get_cmd_state(target) != SRPT_STATE_DONE) {
+ ret = 0;
+ /* now let the target core abort &target->cmd; */
+ break;
+ }
+ }
+ spin_unlock_irq(&sdev->spinlock);
+ return ret;
+}
+
+static int srp_tmr_to_tcm(int fn)
+{
+ switch (fn) {
+ case SRP_TSK_ABORT_TASK:
+ return TMR_ABORT_TASK;
+ case SRP_TSK_ABORT_TASK_SET:
+ return TMR_ABORT_TASK_SET;
+ case SRP_TSK_CLEAR_TASK_SET:
+ return TMR_CLEAR_TASK_SET;
+ case SRP_TSK_LUN_RESET:
+ return TMR_LUN_RESET;
+ case SRP_TSK_CLEAR_ACA:
+ return TMR_CLEAR_ACA;
+ default:
+ return -1;
+ }
+}
+
+/**
+ * srpt_handle_tsk_mgmt() - Process an SRP_TSK_MGMT information unit.
+ *
+ * Returns 0 if and only if the request will be processed by the target core.
+ *
+ * For more information about SRP_TSK_MGMT information units, see also section
+ * 6.7 in the SRP r16a document.
+ */
+static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
+ struct srpt_recv_ioctx *recv_ioctx,
+ struct srpt_send_ioctx *send_ioctx)
+{
+ struct srp_tsk_mgmt *srp_tsk;
+ struct se_cmd *cmd;
+ uint64_t unpacked_lun;
+ int tcm_tmr;
+ int res;
+
+ BUG_ON(!send_ioctx);
+
+ srp_tsk = recv_ioctx->ioctx.buf;
+ cmd = &send_ioctx->cmd;
+
+ pr_debug("recv tsk_mgmt fn %d for task_tag %lld and cmd tag %lld"
+ " cm_id %p sess %p\n", srp_tsk->tsk_mgmt_func,
+ srp_tsk->task_tag, srp_tsk->tag, ch->cm_id, ch->sess);
+
+ srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT);
+ send_ioctx->tag = srp_tsk->tag;
+ tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func);
+ if (tcm_tmr < 0) {
+ send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ send_ioctx->cmd.se_tmr_req->response =
+ TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
+ goto process_tmr;
+ }
+ cmd->se_tmr_req = core_tmr_alloc_req(cmd, NULL, tcm_tmr, GFP_KERNEL);
+ if (!cmd->se_tmr_req) {
+ send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
+ goto process_tmr;
+ }
+
+ unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_tsk->lun,
+ sizeof(srp_tsk->lun));
+ res = transport_lookup_tmr_lun(&send_ioctx->cmd, unpacked_lun);
+ if (res) {
+ pr_debug("rejecting TMR for LUN %lld\n", unpacked_lun);
+ send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ send_ioctx->cmd.se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
+ goto process_tmr;
+ }
+
+ if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK)
+ srpt_rx_mgmt_fn_tag(send_ioctx, srp_tsk->task_tag);
+
+process_tmr:
+ kref_get(&send_ioctx->kref);
+ if (!(send_ioctx->cmd.se_cmd_flags & SCF_SCSI_CDB_EXCEPTION))
+ transport_generic_handle_tmr(&send_ioctx->cmd);
+ else
+ transport_send_check_condition_and_sense(cmd,
+ cmd->scsi_sense_reason, 0);
+
+}
+
+/**
+ * srpt_handle_new_iu() - Process a newly received information unit.
+ * @ch: RDMA channel through which the information unit has been received.
+ * @ioctx: SRPT I/O context associated with the information unit.
+ */
+static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
+ struct srpt_recv_ioctx *recv_ioctx,
+ struct srpt_send_ioctx *send_ioctx)
+{
+ struct srp_cmd *srp_cmd;
+ enum rdma_ch_state ch_state;
+
+ BUG_ON(!ch);
+ BUG_ON(!recv_ioctx);
+
+ ib_dma_sync_single_for_cpu(ch->sport->sdev->device,
+ recv_ioctx->ioctx.dma, srp_max_req_size,
+ DMA_FROM_DEVICE);
+
+ ch_state = srpt_get_ch_state(ch);
+ if (unlikely(ch_state == CH_CONNECTING)) {
+ list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list);
+ goto out;
+ }
+
+ if (unlikely(ch_state != CH_LIVE))
+ goto out;
+
+ srp_cmd = recv_ioctx->ioctx.buf;
+ if (srp_cmd->opcode == SRP_CMD || srp_cmd->opcode == SRP_TSK_MGMT) {
+ if (!send_ioctx)
+ send_ioctx = srpt_get_send_ioctx(ch);
+ if (unlikely(!send_ioctx)) {
+ list_add_tail(&recv_ioctx->wait_list,
+ &ch->cmd_wait_list);
+ goto out;
+ }
+ }
+
+ transport_init_se_cmd(&send_ioctx->cmd, &srpt_target->tf_ops, ch->sess,
+ 0, DMA_NONE, MSG_SIMPLE_TAG,
+ send_ioctx->sense_data);
+
+ switch (srp_cmd->opcode) {
+ case SRP_CMD:
+ srpt_handle_cmd(ch, recv_ioctx, send_ioctx);
+ break;
+ case SRP_TSK_MGMT:
+ srpt_handle_tsk_mgmt(ch, recv_ioctx, send_ioctx);
+ break;
+ case SRP_I_LOGOUT:
+ printk(KERN_ERR "Not yet implemented: SRP_I_LOGOUT\n");
+ break;
+ case SRP_CRED_RSP:
+ pr_debug("received SRP_CRED_RSP\n");
+ break;
+ case SRP_AER_RSP:
+ pr_debug("received SRP_AER_RSP\n");
+ break;
+ case SRP_RSP:
+ printk(KERN_ERR "Received SRP_RSP\n");
+ break;
+ default:
+ printk(KERN_ERR "received IU with unknown opcode 0x%x\n",
+ srp_cmd->opcode);
+ break;
+ }
+
+ srpt_post_recv(ch->sport->sdev, recv_ioctx);
+out:
+ return;
+}
+
+static void srpt_process_rcv_completion(struct ib_cq *cq,
+ struct srpt_rdma_ch *ch,
+ struct ib_wc *wc)
+{
+ struct srpt_device *sdev = ch->sport->sdev;
+ struct srpt_recv_ioctx *ioctx;
+ u32 index;
+
+ index = idx_from_wr_id(wc->wr_id);
+ if (wc->status == IB_WC_SUCCESS) {
+ int req_lim;
+
+ req_lim = atomic_dec_return(&ch->req_lim);
+ if (unlikely(req_lim < 0))
+ printk(KERN_ERR "req_lim = %d < 0\n", req_lim);
+ ioctx = sdev->ioctx_ring[index];
+ srpt_handle_new_iu(ch, ioctx, NULL);
+ } else {
+ printk(KERN_INFO "receiving failed for idx %u with status %d\n",
+ index, wc->status);
+ }
+}
+
+/**
+ * srpt_process_send_completion() - Process an IB send completion.
+ *
+ * Note: Although this has not yet been observed during tests, at least in
+ * theory it is possible that the srpt_get_send_ioctx() call invoked by
+ * srpt_handle_new_iu() fails. This is possible because the req_lim_delta
+ * value in each response is set to one, and it is possible that this response
+ * makes the initiator send a new request before the send completion for that
+ * response has been processed. This could e.g. happen if the call to
+ * srpt_put_send_iotcx() is delayed because of a higher priority interrupt or
+ * if IB retransmission causes generation of the send completion to be
+ * delayed. Incoming information units for which srpt_get_send_ioctx() fails
+ * are queued on cmd_wait_list. The code below processes these delayed
+ * requests one at a time.
+ */
+static void srpt_process_send_completion(struct ib_cq *cq,
+ struct srpt_rdma_ch *ch,
+ struct ib_wc *wc)
+{
+ struct srpt_send_ioctx *send_ioctx;
+ uint32_t index;
+ enum srpt_opcode opcode;
+
+ index = idx_from_wr_id(wc->wr_id);
+ opcode = opcode_from_wr_id(wc->wr_id);
+ send_ioctx = ch->ioctx_ring[index];
+ if (wc->status == IB_WC_SUCCESS) {
+ if (opcode == SRPT_SEND)
+ srpt_handle_send_comp(ch, send_ioctx);
+ else {
+ WARN_ON(opcode != SRPT_RDMA_ABORT &&
+ wc->opcode != IB_WC_RDMA_READ);
+ srpt_handle_rdma_comp(ch, send_ioctx, opcode);
+ }
+ } else {
+ if (opcode == SRPT_SEND) {
+ printk(KERN_INFO "sending response for idx %u failed"
+ " with status %d\n", index, wc->status);
+ srpt_handle_send_err_comp(ch, wc->wr_id);
+ } else if (opcode != SRPT_RDMA_MID) {
+ printk(KERN_INFO "RDMA t %d for idx %u failed with"
+ " status %d", opcode, index, wc->status);
+ srpt_handle_rdma_err_comp(ch, send_ioctx, opcode);
+ }
+ }
+
+ while (unlikely(opcode == SRPT_SEND
+ && !list_empty(&ch->cmd_wait_list)
+ && srpt_get_ch_state(ch) == CH_LIVE
+ && (send_ioctx = srpt_get_send_ioctx(ch)) != NULL)) {
+ struct srpt_recv_ioctx *recv_ioctx;
+
+ recv_ioctx = list_first_entry(&ch->cmd_wait_list,
+ struct srpt_recv_ioctx,
+ wait_list);
+ list_del(&recv_ioctx->wait_list);
+ srpt_handle_new_iu(ch, recv_ioctx, send_ioctx);
+ }
+}
+
+static void srpt_process_completion(struct ib_cq *cq, struct srpt_rdma_ch *ch)
+{
+ struct ib_wc *const wc = ch->wc;
+ int i, n;
+
+ WARN_ON(cq != ch->cq);
+
+ ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
+ while ((n = ib_poll_cq(cq, ARRAY_SIZE(ch->wc), wc)) > 0) {
+ for (i = 0; i < n; i++) {
+ if (opcode_from_wr_id(wc[i].wr_id) == SRPT_RECV)
+ srpt_process_rcv_completion(cq, ch, &wc[i]);
+ else
+ srpt_process_send_completion(cq, ch, &wc[i]);
+ }
+ }
+}
+
+/**
+ * srpt_completion() - IB completion queue callback function.
+ *
+ * Notes:
+ * - It is guaranteed that a completion handler will never be invoked
+ * concurrently on two different CPUs for the same completion queue. See also
+ * Documentation/infiniband/core_locking.txt and the implementation of
+ * handle_edge_irq() in kernel/irq/chip.c.
+ * - When threaded IRQs are enabled, completion handlers are invoked in thread
+ * context instead of interrupt context.
+ */
+static void srpt_completion(struct ib_cq *cq, void *ctx)
+{
+ struct srpt_rdma_ch *ch = ctx;
+
+ wake_up_interruptible(&ch->wait_queue);
+}
+
+static int srpt_compl_thread(void *arg)
+{
+ struct srpt_rdma_ch *ch;
+
+ /* Hibernation / freezing of the SRPT kernel thread is not supported. */
+ current->flags |= PF_NOFREEZE;
+
+ ch = arg;
+ BUG_ON(!ch);
+ printk(KERN_INFO "Session %s: kernel thread %s (PID %d) started\n",
+ ch->sess_name, ch->thread->comm, current->pid);
+ while (!kthread_should_stop()) {
+ wait_event_interruptible(ch->wait_queue,
+ (srpt_process_completion(ch->cq, ch),
+ kthread_should_stop()));
+ }
+ printk(KERN_INFO "Session %s: kernel thread %s (PID %d) stopped\n",
+ ch->sess_name, ch->thread->comm, current->pid);
+ return 0;
+}
+
+/**
+ * srpt_create_ch_ib() - Create receive and send completion queues.
+ */
+static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
+{
+ struct ib_qp_init_attr *qp_init;
+ struct srpt_port *sport = ch->sport;
+ struct srpt_device *sdev = sport->sdev;
+ u32 srp_sq_size = sport->port_attrib.srp_sq_size;
+ int ret;
+
+ WARN_ON(ch->rq_size < 1);
+
+ ret = -ENOMEM;
+ qp_init = kzalloc(sizeof *qp_init, GFP_KERNEL);
+ if (!qp_init)
+ goto out;
+
+ ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch,
+ ch->rq_size + srp_sq_size, 0);
+ if (IS_ERR(ch->cq)) {
+ ret = PTR_ERR(ch->cq);
+ printk(KERN_ERR "failed to create CQ cqe= %d ret= %d\n",
+ ch->rq_size + srp_sq_size, ret);
+ goto out;
+ }
+
+ qp_init->qp_context = (void *)ch;
+ qp_init->event_handler
+ = (void(*)(struct ib_event *, void*))srpt_qp_event;
+ qp_init->send_cq = ch->cq;
+ qp_init->recv_cq = ch->cq;
+ qp_init->srq = sdev->srq;
+ qp_init->sq_sig_type = IB_SIGNAL_REQ_WR;
+ qp_init->qp_type = IB_QPT_RC;
+ qp_init->cap.max_send_wr = srp_sq_size;
+ qp_init->cap.max_send_sge = SRPT_DEF_SG_PER_WQE;
+
+ ch->qp = ib_create_qp(sdev->pd, qp_init);
+ if (IS_ERR(ch->qp)) {
+ ret = PTR_ERR(ch->qp);
+ printk(KERN_ERR "failed to create_qp ret= %d\n", ret);
+ goto err_destroy_cq;
+ }
+
+ atomic_set(&ch->sq_wr_avail, qp_init->cap.max_send_wr);
+
+ pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n",
+ __func__, ch->cq->cqe, qp_init->cap.max_send_sge,
+ qp_init->cap.max_send_wr, ch->cm_id);
+
+ ret = srpt_init_ch_qp(ch, ch->qp);
+ if (ret)
+ goto err_destroy_qp;
+
+ init_waitqueue_head(&ch->wait_queue);
+
+ pr_debug("creating thread for session %s\n", ch->sess_name);
+
+ ch->thread = kthread_run(srpt_compl_thread, ch, "ib_srpt_compl");
+ if (IS_ERR(ch->thread)) {
+ printk(KERN_ERR "failed to create kernel thread %ld\n",
+ PTR_ERR(ch->thread));
+ ch->thread = NULL;
+ goto err_destroy_qp;
+ }
+
+out:
+ kfree(qp_init);
+ return ret;
+
+err_destroy_qp:
+ ib_destroy_qp(ch->qp);
+err_destroy_cq:
+ ib_destroy_cq(ch->cq);
+ goto out;
+}
+
+static void srpt_destroy_ch_ib(struct srpt_rdma_ch *ch)
+{
+ if (ch->thread)
+ kthread_stop(ch->thread);
+
+ ib_destroy_qp(ch->qp);
+ ib_destroy_cq(ch->cq);
+}
+
+/**
+ * __srpt_close_ch() - Close an RDMA channel by setting the QP error state.
+ *
+ * Reset the QP and make sure all resources associated with the channel will
+ * be deallocated at an appropriate time.
+ *
+ * Note: The caller must hold ch->sport->sdev->spinlock.
+ */
+static void __srpt_close_ch(struct srpt_rdma_ch *ch)
+{
+ struct srpt_device *sdev;
+ enum rdma_ch_state prev_state;
+ unsigned long flags;
+
+ sdev = ch->sport->sdev;
+
+ spin_lock_irqsave(&ch->spinlock, flags);
+ prev_state = ch->state;
+ switch (prev_state) {
+ case CH_CONNECTING:
+ case CH_LIVE:
+ ch->state = CH_DISCONNECTING;
+ break;
+ default:
+ break;
+ }
+ spin_unlock_irqrestore(&ch->spinlock, flags);
+
+ switch (prev_state) {
+ case CH_CONNECTING:
+ ib_send_cm_rej(ch->cm_id, IB_CM_REJ_NO_RESOURCES, NULL, 0,
+ NULL, 0);
+ /* fall through */
+ case CH_LIVE:
+ if (ib_send_cm_dreq(ch->cm_id, NULL, 0) < 0)
+ printk(KERN_ERR "sending CM DREQ failed.\n");
+ break;
+ case CH_DISCONNECTING:
+ break;
+ case CH_DRAINING:
+ case CH_RELEASING:
+ break;
+ }
+}
+
+/**
+ * srpt_close_ch() - Close an RDMA channel.
+ */
+static void srpt_close_ch(struct srpt_rdma_ch *ch)
+{
+ struct srpt_device *sdev;
+
+ sdev = ch->sport->sdev;
+ spin_lock_irq(&sdev->spinlock);
+ __srpt_close_ch(ch);
+ spin_unlock_irq(&sdev->spinlock);
+}
+
+/**
+ * srpt_drain_channel() - Drain a channel by resetting the IB queue pair.
+ * @cm_id: Pointer to the CM ID of the channel to be drained.
+ *
+ * Note: Must be called from inside srpt_cm_handler to avoid a race between
+ * accessing sdev->spinlock and the call to kfree(sdev) in srpt_remove_one()
+ * (the caller of srpt_cm_handler holds the cm_id spinlock; srpt_remove_one()
+ * waits until all target sessions for the associated IB device have been
+ * unregistered and target session registration involves a call to
+ * ib_destroy_cm_id(), which locks the cm_id spinlock and hence waits until
+ * this function has finished).
+ */
+static void srpt_drain_channel(struct ib_cm_id *cm_id)
+{
+ struct srpt_device *sdev;
+ struct srpt_rdma_ch *ch;
+ int ret;
+ bool do_reset = false;
+
+ WARN_ON_ONCE(irqs_disabled());
+
+ sdev = cm_id->context;
+ BUG_ON(!sdev);
+ spin_lock_irq(&sdev->spinlock);
+ list_for_each_entry(ch, &sdev->rch_list, list) {
+ if (ch->cm_id == cm_id) {
+ do_reset = srpt_test_and_set_ch_state(ch,
+ CH_CONNECTING, CH_DRAINING) ||
+ srpt_test_and_set_ch_state(ch,
+ CH_LIVE, CH_DRAINING) ||
+ srpt_test_and_set_ch_state(ch,
+ CH_DISCONNECTING, CH_DRAINING);
+ break;
+ }
+ }
+ spin_unlock_irq(&sdev->spinlock);
+
+ if (do_reset) {
+ ret = srpt_ch_qp_err(ch);
+ if (ret < 0)
+ printk(KERN_ERR "Setting queue pair in error state"
+ " failed: %d\n", ret);
+ }
+}
+
+/**
+ * srpt_find_channel() - Look up an RDMA channel.
+ * @cm_id: Pointer to the CM ID of the channel to be looked up.
+ *
+ * Return NULL if no matching RDMA channel has been found.
+ */
+static struct srpt_rdma_ch *srpt_find_channel(struct srpt_device *sdev,
+ struct ib_cm_id *cm_id)
+{
+ struct srpt_rdma_ch *ch;
+ bool found;
+
+ WARN_ON_ONCE(irqs_disabled());
+ BUG_ON(!sdev);
+
+ found = false;
+ spin_lock_irq(&sdev->spinlock);
+ list_for_each_entry(ch, &sdev->rch_list, list) {
+ if (ch->cm_id == cm_id) {
+ found = true;
+ break;
+ }
+ }
+ spin_unlock_irq(&sdev->spinlock);
+
+ return found ? ch : NULL;
+}
+
+/**
+ * srpt_release_channel() - Release channel resources.
+ *
+ * Schedules the actual release because:
+ * - Calling the ib_destroy_cm_id() call from inside an IB CM callback would
+ * trigger a deadlock.
+ * - It is not safe to call TCM transport_* functions from interrupt context.
+ */
+static void srpt_release_channel(struct srpt_rdma_ch *ch)
+{
+ schedule_work(&ch->release_work);
+}
+
+static void srpt_release_channel_work(struct work_struct *w)
+{
+ struct srpt_rdma_ch *ch;
+ struct srpt_device *sdev;
+
+ ch = container_of(w, struct srpt_rdma_ch, release_work);
+ pr_debug("ch = %p; ch->sess = %p; release_done = %p\n", ch, ch->sess,
+ ch->release_done);
+
+ sdev = ch->sport->sdev;
+ BUG_ON(!sdev);
+
+ transport_deregister_session_configfs(ch->sess);
+ transport_deregister_session(ch->sess);
+ ch->sess = NULL;
+
+ srpt_destroy_ch_ib(ch);
+
+ srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
+ ch->sport->sdev, ch->rq_size,
+ ch->rsp_size, DMA_TO_DEVICE);
+
+ spin_lock_irq(&sdev->spinlock);
+ list_del(&ch->list);
+ spin_unlock_irq(&sdev->spinlock);
+
+ ib_destroy_cm_id(ch->cm_id);
+
+ if (ch->release_done)
+ complete(ch->release_done);
+
+ wake_up(&sdev->ch_releaseQ);
+
+ kfree(ch);
+}
+
+static struct srpt_node_acl *__srpt_lookup_acl(struct srpt_port *sport,
+ u8 i_port_id[16])
+{
+ struct srpt_node_acl *nacl;
+
+ list_for_each_entry(nacl, &sport->port_acl_list, list)
+ if (memcmp(nacl->i_port_id, i_port_id,
+ sizeof(nacl->i_port_id)) == 0)
+ return nacl;
+
+ return NULL;
+}
+
+static struct srpt_node_acl *srpt_lookup_acl(struct srpt_port *sport,
+ u8 i_port_id[16])
+{
+ struct srpt_node_acl *nacl;
+
+ spin_lock_irq(&sport->port_acl_lock);
+ nacl = __srpt_lookup_acl(sport, i_port_id);
+ spin_unlock_irq(&sport->port_acl_lock);
+
+ return nacl;
+}
+
+/**
+ * srpt_cm_req_recv() - Process the event IB_CM_REQ_RECEIVED.
+ *
+ * Ownership of the cm_id is transferred to the target session if this
+ * functions returns zero. Otherwise the caller remains the owner of cm_id.
+ */
+static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
+ struct ib_cm_req_event_param *param,
+ void *private_data)
+{
+ struct srpt_device *sdev = cm_id->context;
+ struct srpt_port *sport = &sdev->port[param->port - 1];
+ struct srp_login_req *req;
+ struct srp_login_rsp *rsp;
+ struct srp_login_rej *rej;
+ struct ib_cm_rep_param *rep_param;
+ struct srpt_rdma_ch *ch, *tmp_ch;
+ struct srpt_node_acl *nacl;
+ u32 it_iu_len;
+ int i;
+ int ret = 0;
+
+ WARN_ON_ONCE(irqs_disabled());
+
+ if (WARN_ON(!sdev || !private_data))
+ return -EINVAL;
+
+ req = (struct srp_login_req *)private_data;
+
+ it_iu_len = be32_to_cpu(req->req_it_iu_len);
+
+ printk(KERN_INFO "Received SRP_LOGIN_REQ with i_port_id 0x%llx:0x%llx,"
+ " t_port_id 0x%llx:0x%llx and it_iu_len %d on port %d"
+ " (guid=0x%llx:0x%llx)\n",
+ be64_to_cpu(*(__be64 *)&req->initiator_port_id[0]),
+ be64_to_cpu(*(__be64 *)&req->initiator_port_id[8]),
+ be64_to_cpu(*(__be64 *)&req->target_port_id[0]),
+ be64_to_cpu(*(__be64 *)&req->target_port_id[8]),
+ it_iu_len,
+ param->port,
+ be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[0]),
+ be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[8]));
+
+ rsp = kzalloc(sizeof *rsp, GFP_KERNEL);
+ rej = kzalloc(sizeof *rej, GFP_KERNEL);
+ rep_param = kzalloc(sizeof *rep_param, GFP_KERNEL);
+
+ if (!rsp || !rej || !rep_param) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (it_iu_len > srp_max_req_size || it_iu_len < 64) {
+ rej->reason = __constant_cpu_to_be32(
+ SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE);
+ ret = -EINVAL;
+ printk(KERN_ERR "rejected SRP_LOGIN_REQ because its"
+ " length (%d bytes) is out of range (%d .. %d)\n",
+ it_iu_len, 64, srp_max_req_size);
+ goto reject;
+ }
+
+ if (!sport->enabled) {
+ rej->reason = __constant_cpu_to_be32(
+ SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ ret = -EINVAL;
+ printk(KERN_ERR "rejected SRP_LOGIN_REQ because the target port"
+ " has not yet been enabled\n");
+ goto reject;
+ }
+
+ if ((req->req_flags & SRP_MTCH_ACTION) == SRP_MULTICHAN_SINGLE) {
+ rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_NO_CHAN;
+
+ spin_lock_irq(&sdev->spinlock);
+
+ list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list) {
+ if (!memcmp(ch->i_port_id, req->initiator_port_id, 16)
+ && !memcmp(ch->t_port_id, req->target_port_id, 16)
+ && param->port == ch->sport->port
+ && param->listen_id == ch->sport->sdev->cm_id
+ && ch->cm_id) {
+ enum rdma_ch_state ch_state;
+
+ ch_state = srpt_get_ch_state(ch);
+ if (ch_state != CH_CONNECTING
+ && ch_state != CH_LIVE)
+ continue;
+
+ /* found an existing channel */
+ pr_debug("Found existing channel %s"
+ " cm_id= %p state= %d\n",
+ ch->sess_name, ch->cm_id, ch_state);
+
+ __srpt_close_ch(ch);
+
+ rsp->rsp_flags =
+ SRP_LOGIN_RSP_MULTICHAN_TERMINATED;
+ }
+ }
+
+ spin_unlock_irq(&sdev->spinlock);
+
+ } else
+ rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_MAINTAINED;
+
+ if (*(__be64 *)req->target_port_id != cpu_to_be64(srpt_service_guid)
+ || *(__be64 *)(req->target_port_id + 8) !=
+ cpu_to_be64(srpt_service_guid)) {
+ rej->reason = __constant_cpu_to_be32(
+ SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL);
+ ret = -ENOMEM;
+ printk(KERN_ERR "rejected SRP_LOGIN_REQ because it"
+ " has an invalid target port identifier.\n");
+ goto reject;
+ }
+
+ ch = kzalloc(sizeof *ch, GFP_KERNEL);
+ if (!ch) {
+ rej->reason = __constant_cpu_to_be32(
+ SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ printk(KERN_ERR "rejected SRP_LOGIN_REQ because no memory.\n");
+ ret = -ENOMEM;
+ goto reject;
+ }
+
+ INIT_WORK(&ch->release_work, srpt_release_channel_work);
+ memcpy(ch->i_port_id, req->initiator_port_id, 16);
+ memcpy(ch->t_port_id, req->target_port_id, 16);
+ ch->sport = &sdev->port[param->port - 1];
+ ch->cm_id = cm_id;
+ /*
+ * Avoid QUEUE_FULL conditions by limiting the number of buffers used
+ * for the SRP protocol to the command queue size.
+ */
+ ch->rq_size = SRPT_RQ_SIZE;
+ spin_lock_init(&ch->spinlock);
+ ch->state = CH_CONNECTING;
+ INIT_LIST_HEAD(&ch->cmd_wait_list);
+ ch->rsp_size = ch->sport->port_attrib.srp_max_rsp_size;
+
+ ch->ioctx_ring = (struct srpt_send_ioctx **)
+ srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
+ sizeof(*ch->ioctx_ring[0]),
+ ch->rsp_size, DMA_TO_DEVICE);
+ if (!ch->ioctx_ring)
+ goto free_ch;
+
+ INIT_LIST_HEAD(&ch->free_list);
+ for (i = 0; i < ch->rq_size; i++) {
+ ch->ioctx_ring[i]->ch = ch;
+ list_add_tail(&ch->ioctx_ring[i]->free_list, &ch->free_list);
+ }
+
+ ret = srpt_create_ch_ib(ch);
+ if (ret) {
+ rej->reason = __constant_cpu_to_be32(
+ SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ printk(KERN_ERR "rejected SRP_LOGIN_REQ because creating"
+ " a new RDMA channel failed.\n");
+ goto free_ring;
+ }
+
+ ret = srpt_ch_qp_rtr(ch, ch->qp);
+ if (ret) {
+ rej->reason = __constant_cpu_to_be32(
+ SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ printk(KERN_ERR "rejected SRP_LOGIN_REQ because enabling"
+ " RTR failed (error code = %d)\n", ret);
+ goto destroy_ib;
+ }
+ /*
+ * Use the initator port identifier as the session name.
+ */
+ snprintf(ch->sess_name, sizeof(ch->sess_name), "0x%016llx%016llx",
+ be64_to_cpu(*(__be64 *)ch->i_port_id),
+ be64_to_cpu(*(__be64 *)(ch->i_port_id + 8)));
+
+ pr_debug("registering session %s\n", ch->sess_name);
+
+ nacl = srpt_lookup_acl(sport, ch->i_port_id);
+ if (!nacl) {
+ printk(KERN_INFO "Rejected login because no ACL has been"
+ " configured yet for initiator %s.\n", ch->sess_name);
+ rej->reason = __constant_cpu_to_be32(
+ SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
+ goto destroy_ib;
+ }
+
+ ch->sess = transport_init_session();
+ if (!ch->sess) {
+ rej->reason = __constant_cpu_to_be32(
+ SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ pr_debug("Failed to create session\n");
+ goto deregister_session;
+ }
+ ch->sess->se_node_acl = &nacl->nacl;
+ transport_register_session(&sport->port_tpg_1, &nacl->nacl, ch->sess, ch);
+
+ pr_debug("Establish connection sess=%p name=%s cm_id=%p\n", ch->sess,
+ ch->sess_name, ch->cm_id);
+
+ /* create srp_login_response */
+ rsp->opcode = SRP_LOGIN_RSP;
+ rsp->tag = req->tag;
+ rsp->max_it_iu_len = req->req_it_iu_len;
+ rsp->max_ti_iu_len = req->req_it_iu_len;
+ ch->max_ti_iu_len = it_iu_len;
+ rsp->buf_fmt = __constant_cpu_to_be16(SRP_BUF_FORMAT_DIRECT
+ | SRP_BUF_FORMAT_INDIRECT);
+ rsp->req_lim_delta = cpu_to_be32(ch->rq_size);
+ atomic_set(&ch->req_lim, ch->rq_size);
+ atomic_set(&ch->req_lim_delta, 0);
+
+ /* create cm reply */
+ rep_param->qp_num = ch->qp->qp_num;
+ rep_param->private_data = (void *)rsp;
+ rep_param->private_data_len = sizeof *rsp;
+ rep_param->rnr_retry_count = 7;
+ rep_param->flow_control = 1;
+ rep_param->failover_accepted = 0;
+ rep_param->srq = 1;
+ rep_param->responder_resources = 4;
+ rep_param->initiator_depth = 4;
+
+ ret = ib_send_cm_rep(cm_id, rep_param);
+ if (ret) {
+ printk(KERN_ERR "sending SRP_LOGIN_REQ response failed"
+ " (error code = %d)\n", ret);
+ goto release_channel;
+ }
+
+ spin_lock_irq(&sdev->spinlock);
+ list_add_tail(&ch->list, &sdev->rch_list);
+ spin_unlock_irq(&sdev->spinlock);
+
+ goto out;
+
+release_channel:
+ srpt_set_ch_state(ch, CH_RELEASING);
+ transport_deregister_session_configfs(ch->sess);
+
+deregister_session:
+ transport_deregister_session(ch->sess);
+ ch->sess = NULL;
+
+destroy_ib:
+ srpt_destroy_ch_ib(ch);
+
+free_ring:
+ srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
+ ch->sport->sdev, ch->rq_size,
+ ch->rsp_size, DMA_TO_DEVICE);
+free_ch:
+ kfree(ch);
+
+reject:
+ rej->opcode = SRP_LOGIN_REJ;
+ rej->tag = req->tag;
+ rej->buf_fmt = __constant_cpu_to_be16(SRP_BUF_FORMAT_DIRECT
+ | SRP_BUF_FORMAT_INDIRECT);
+
+ ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
+ (void *)rej, sizeof *rej);
+
+out:
+ kfree(rep_param);
+ kfree(rsp);
+ kfree(rej);
+
+ return ret;
+}
+
+static void srpt_cm_rej_recv(struct ib_cm_id *cm_id)
+{
+ printk(KERN_INFO "Received IB REJ for cm_id %p.\n", cm_id);
+ srpt_drain_channel(cm_id);
+}
+
+/**
+ * srpt_cm_rtu_recv() - Process an IB_CM_RTU_RECEIVED or USER_ESTABLISHED event.
+ *
+ * An IB_CM_RTU_RECEIVED message indicates that the connection is established
+ * and that the recipient may begin transmitting (RTU = ready to use).
+ */
+static void srpt_cm_rtu_recv(struct ib_cm_id *cm_id)
+{
+ struct srpt_rdma_ch *ch;
+ int ret;
+
+ ch = srpt_find_channel(cm_id->context, cm_id);
+ BUG_ON(!ch);
+
+ if (srpt_test_and_set_ch_state(ch, CH_CONNECTING, CH_LIVE)) {
+ struct srpt_recv_ioctx *ioctx, *ioctx_tmp;
+
+ ret = srpt_ch_qp_rts(ch, ch->qp);
+
+ list_for_each_entry_safe(ioctx, ioctx_tmp, &ch->cmd_wait_list,
+ wait_list) {
+ list_del(&ioctx->wait_list);
+ srpt_handle_new_iu(ch, ioctx, NULL);
+ }
+ if (ret)
+ srpt_close_ch(ch);
+ }
+}
+
+static void srpt_cm_timewait_exit(struct ib_cm_id *cm_id)
+{
+ printk(KERN_INFO "Received IB TimeWait exit for cm_id %p.\n", cm_id);
+ srpt_drain_channel(cm_id);
+}
+
+static void srpt_cm_rep_error(struct ib_cm_id *cm_id)
+{
+ printk(KERN_INFO "Received IB REP error for cm_id %p.\n", cm_id);
+ srpt_drain_channel(cm_id);
+}
+
+/**
+ * srpt_cm_dreq_recv() - Process reception of a DREQ message.
+ */
+static void srpt_cm_dreq_recv(struct ib_cm_id *cm_id)
+{
+ struct srpt_rdma_ch *ch;
+ unsigned long flags;
+ bool send_drep = false;
+
+ ch = srpt_find_channel(cm_id->context, cm_id);
+ BUG_ON(!ch);
+
+ pr_debug("cm_id= %p ch->state= %d\n", cm_id, srpt_get_ch_state(ch));
+
+ spin_lock_irqsave(&ch->spinlock, flags);
+ switch (ch->state) {
+ case CH_CONNECTING:
+ case CH_LIVE:
+ send_drep = true;
+ ch->state = CH_DISCONNECTING;
+ break;
+ case CH_DISCONNECTING:
+ case CH_DRAINING:
+ case CH_RELEASING:
+ WARN(true, "unexpected channel state %d\n", ch->state);
+ break;
+ }
+ spin_unlock_irqrestore(&ch->spinlock, flags);
+
+ if (send_drep) {
+ if (ib_send_cm_drep(ch->cm_id, NULL, 0) < 0)
+ printk(KERN_ERR "Sending IB DREP failed.\n");
+ printk(KERN_INFO "Received DREQ and sent DREP for session %s.\n",
+ ch->sess_name);
+ }
+}
+
+/**
+ * srpt_cm_drep_recv() - Process reception of a DREP message.
+ */
+static void srpt_cm_drep_recv(struct ib_cm_id *cm_id)
+{
+ printk(KERN_INFO "Received InfiniBand DREP message for cm_id %p.\n",
+ cm_id);
+ srpt_drain_channel(cm_id);
+}
+
+/**
+ * srpt_cm_handler() - IB connection manager callback function.
+ *
+ * A non-zero return value will cause the caller destroy the CM ID.
+ *
+ * Note: srpt_cm_handler() must only return a non-zero value when transferring
+ * ownership of the cm_id to a channel by srpt_cm_req_recv() failed. Returning
+ * a non-zero value in any other case will trigger a race with the
+ * ib_destroy_cm_id() call in srpt_release_channel().
+ */
+static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
+{
+ int ret;
+
+ ret = 0;
+ switch (event->event) {
+ case IB_CM_REQ_RECEIVED:
+ ret = srpt_cm_req_recv(cm_id, &event->param.req_rcvd,
+ event->private_data);
+ break;
+ case IB_CM_REJ_RECEIVED:
+ srpt_cm_rej_recv(cm_id);
+ break;
+ case IB_CM_RTU_RECEIVED:
+ case IB_CM_USER_ESTABLISHED:
+ srpt_cm_rtu_recv(cm_id);
+ break;
+ case IB_CM_DREQ_RECEIVED:
+ srpt_cm_dreq_recv(cm_id);
+ break;
+ case IB_CM_DREP_RECEIVED:
+ srpt_cm_drep_recv(cm_id);
+ break;
+ case IB_CM_TIMEWAIT_EXIT:
+ srpt_cm_timewait_exit(cm_id);
+ break;
+ case IB_CM_REP_ERROR:
+ srpt_cm_rep_error(cm_id);
+ break;
+ case IB_CM_DREQ_ERROR:
+ printk(KERN_INFO "Received IB DREQ ERROR event.\n");
+ break;
+ case IB_CM_MRA_RECEIVED:
+ printk(KERN_INFO "Received IB MRA event\n");
+ break;
+ default:
+ printk(KERN_ERR "received unrecognized IB CM event %d\n",
+ event->event);
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * srpt_perform_rdmas() - Perform IB RDMA.
+ *
+ * Returns zero upon success or a negative number upon failure.
+ */
+static int srpt_perform_rdmas(struct srpt_rdma_ch *ch,
+ struct srpt_send_ioctx *ioctx)
+{
+ struct ib_send_wr wr;
+ struct ib_send_wr *bad_wr;
+ struct rdma_iu *riu;
+ int i;
+ int ret;
+ int sq_wr_avail;
+ enum dma_data_direction dir;
+ const int n_rdma = ioctx->n_rdma;
+
+ dir = ioctx->cmd.data_direction;
+ if (dir == DMA_TO_DEVICE) {
+ /* write */
+ ret = -ENOMEM;
+ sq_wr_avail = atomic_sub_return(n_rdma, &ch->sq_wr_avail);
+ if (sq_wr_avail < 0) {
+ printk(KERN_WARNING "IB send queue full (needed %d)\n",
+ n_rdma);
+ goto out;
+ }
+ }
+
+ ioctx->rdma_aborted = false;
+ ret = 0;
+ riu = ioctx->rdma_ius;
+ memset(&wr, 0, sizeof wr);
+
+ for (i = 0; i < n_rdma; ++i, ++riu) {
+ if (dir == DMA_FROM_DEVICE) {
+ wr.opcode = IB_WR_RDMA_WRITE;
+ wr.wr_id = encode_wr_id(i == n_rdma - 1 ?
+ SRPT_RDMA_WRITE_LAST :
+ SRPT_RDMA_MID,
+ ioctx->ioctx.index);
+ } else {
+ wr.opcode = IB_WR_RDMA_READ;
+ wr.wr_id = encode_wr_id(i == n_rdma - 1 ?
+ SRPT_RDMA_READ_LAST :
+ SRPT_RDMA_MID,
+ ioctx->ioctx.index);
+ }
+ wr.next = NULL;
+ wr.wr.rdma.remote_addr = riu->raddr;
+ wr.wr.rdma.rkey = riu->rkey;
+ wr.num_sge = riu->sge_cnt;
+ wr.sg_list = riu->sge;
+
+ /* only get completion event for the last rdma write */
+ if (i == (n_rdma - 1) && dir == DMA_TO_DEVICE)
+ wr.send_flags = IB_SEND_SIGNALED;
+
+ ret = ib_post_send(ch->qp, &wr, &bad_wr);
+ if (ret)
+ break;
+ }
+
+ if (ret)
+ printk(KERN_ERR "%s[%d]: ib_post_send() returned %d for %d/%d",
+ __func__, __LINE__, ret, i, n_rdma);
+ if (ret && i > 0) {
+ wr.num_sge = 0;
+ wr.wr_id = encode_wr_id(SRPT_RDMA_ABORT, ioctx->ioctx.index);
+ wr.send_flags = IB_SEND_SIGNALED;
+ while (ch->state == CH_LIVE &&
+ ib_post_send(ch->qp, &wr, &bad_wr) != 0) {
+ printk(KERN_INFO "Trying to abort failed RDMA transfer [%d]",
+ ioctx->ioctx.index);
+ msleep(1000);
+ }
+ while (ch->state != CH_RELEASING && !ioctx->rdma_aborted) {
+ printk(KERN_INFO "Waiting until RDMA abort finished [%d]",
+ ioctx->ioctx.index);
+ msleep(1000);
+ }
+ }
+out:
+ if (unlikely(dir == DMA_TO_DEVICE && ret < 0))
+ atomic_add(n_rdma, &ch->sq_wr_avail);
+ return ret;
+}
+
+/**
+ * srpt_xfer_data() - Start data transfer from initiator to target.
+ */
+static int srpt_xfer_data(struct srpt_rdma_ch *ch,
+ struct srpt_send_ioctx *ioctx)
+{
+ int ret;
+
+ ret = srpt_map_sg_to_ib_sge(ch, ioctx);
+ if (ret) {
+ printk(KERN_ERR "%s[%d] ret=%d\n", __func__, __LINE__, ret);
+ goto out;
+ }
+
+ ret = srpt_perform_rdmas(ch, ioctx);
+ if (ret) {
+ if (ret == -EAGAIN || ret == -ENOMEM)
+ printk(KERN_INFO "%s[%d] queue full -- ret=%d\n",
+ __func__, __LINE__, ret);
+ else
+ printk(KERN_ERR "%s[%d] fatal error -- ret=%d\n",
+ __func__, __LINE__, ret);
+ goto out_unmap;
+ }
+
+out:
+ return ret;
+out_unmap:
+ srpt_unmap_sg_to_ib_sge(ch, ioctx);
+ goto out;
+}
+
+static int srpt_write_pending_status(struct se_cmd *se_cmd)
+{
+ struct srpt_send_ioctx *ioctx;
+
+ ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
+ return srpt_get_cmd_state(ioctx) == SRPT_STATE_NEED_DATA;
+}
+
+/*
+ * srpt_write_pending() - Start data transfer from initiator to target (write).
+ */
+static int srpt_write_pending(struct se_cmd *se_cmd)
+{
+ struct srpt_rdma_ch *ch;
+ struct srpt_send_ioctx *ioctx;
+ enum srpt_command_state new_state;
+ enum rdma_ch_state ch_state;
+ int ret;
+
+ ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
+
+ new_state = srpt_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA);
+ WARN_ON(new_state == SRPT_STATE_DONE);
+
+ ch = ioctx->ch;
+ BUG_ON(!ch);
+
+ ch_state = srpt_get_ch_state(ch);
+ switch (ch_state) {
+ case CH_CONNECTING:
+ WARN(true, "unexpected channel state %d\n", ch_state);
+ ret = -EINVAL;
+ goto out;
+ case CH_LIVE:
+ break;
+ case CH_DISCONNECTING:
+ case CH_DRAINING:
+ case CH_RELEASING:
+ pr_debug("cmd with tag %lld: channel disconnecting\n",
+ ioctx->tag);
+ srpt_set_cmd_state(ioctx, SRPT_STATE_DATA_IN);
+ ret = -EINVAL;
+ goto out;
+ }
+ ret = srpt_xfer_data(ch, ioctx);
+
+out:
+ return ret;
+}
+
+static u8 tcm_to_srp_tsk_mgmt_status(const int tcm_mgmt_status)
+{
+ switch (tcm_mgmt_status) {
+ case TMR_FUNCTION_COMPLETE:
+ return SRP_TSK_MGMT_SUCCESS;
+ case TMR_FUNCTION_REJECTED:
+ return SRP_TSK_MGMT_FUNC_NOT_SUPP;
+ }
+ return SRP_TSK_MGMT_FAILED;
+}
+
+/**
+ * srpt_queue_response() - Transmits the response to a SCSI command.
+ *
+ * Callback function called by the TCM core. Must not block since it can be
+ * invoked on the context of the IB completion handler.
+ */
+static int srpt_queue_response(struct se_cmd *cmd)
+{
+ struct srpt_rdma_ch *ch;
+ struct srpt_send_ioctx *ioctx;
+ enum srpt_command_state state;
+ unsigned long flags;
+ int ret;
+ enum dma_data_direction dir;
+ int resp_len;
+ u8 srp_tm_status;
+
+ ret = 0;
+
+ ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
+ ch = ioctx->ch;
+ BUG_ON(!ch);
+
+ spin_lock_irqsave(&ioctx->spinlock, flags);
+ state = ioctx->state;
+ switch (state) {
+ case SRPT_STATE_NEW:
+ case SRPT_STATE_DATA_IN:
+ ioctx->state = SRPT_STATE_CMD_RSP_SENT;
+ break;
+ case SRPT_STATE_MGMT:
+ ioctx->state = SRPT_STATE_MGMT_RSP_SENT;
+ break;
+ default:
+ WARN(true, "ch %p; cmd %d: unexpected command state %d\n",
+ ch, ioctx->ioctx.index, ioctx->state);
+ break;
+ }
+ spin_unlock_irqrestore(&ioctx->spinlock, flags);
+
+ if (unlikely(transport_check_aborted_status(&ioctx->cmd, false)
+ || WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT))) {
+ atomic_inc(&ch->req_lim_delta);
+ srpt_abort_cmd(ioctx);
+ goto out;
+ }
+
+ dir = ioctx->cmd.data_direction;
+
+ /* For read commands, transfer the data to the initiator. */
+ if (dir == DMA_FROM_DEVICE && ioctx->cmd.data_length &&
+ !ioctx->queue_status_only) {
+ ret = srpt_xfer_data(ch, ioctx);
+ if (ret) {
+ printk(KERN_ERR "xfer_data failed for tag %llu\n",
+ ioctx->tag);
+ goto out;
+ }
+ }
+
+ if (state != SRPT_STATE_MGMT)
+ resp_len = srpt_build_cmd_rsp(ch, ioctx, ioctx->tag,
+ cmd->scsi_status);
+ else {
+ srp_tm_status
+ = tcm_to_srp_tsk_mgmt_status(cmd->se_tmr_req->response);
+ resp_len = srpt_build_tskmgmt_rsp(ch, ioctx, srp_tm_status,
+ ioctx->tag);
+ }
+ ret = srpt_post_send(ch, ioctx, resp_len);
+ if (ret) {
+ printk(KERN_ERR "sending cmd response failed for tag %llu\n",
+ ioctx->tag);
+ srpt_unmap_sg_to_ib_sge(ch, ioctx);
+ srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
+ kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
+ }
+
+out:
+ return ret;
+}
+
+static int srpt_queue_status(struct se_cmd *cmd)
+{
+ struct srpt_send_ioctx *ioctx;
+
+ ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
+ BUG_ON(ioctx->sense_data != cmd->sense_buffer);
+ if (cmd->se_cmd_flags &
+ (SCF_TRANSPORT_TASK_SENSE | SCF_EMULATED_TASK_SENSE))
+ WARN_ON(cmd->scsi_status != SAM_STAT_CHECK_CONDITION);
+ ioctx->queue_status_only = true;
+ return srpt_queue_response(cmd);
+}
+
+static void srpt_refresh_port_work(struct work_struct *work)
+{
+ struct srpt_port *sport = container_of(work, struct srpt_port, work);
+
+ srpt_refresh_port(sport);
+}
+
+static int srpt_ch_list_empty(struct srpt_device *sdev)
+{
+ int res;
+
+ spin_lock_irq(&sdev->spinlock);
+ res = list_empty(&sdev->rch_list);
+ spin_unlock_irq(&sdev->spinlock);
+
+ return res;
+}
+
+/**
+ * srpt_release_sdev() - Free the channel resources associated with a target.
+ */
+static int srpt_release_sdev(struct srpt_device *sdev)
+{
+ struct srpt_rdma_ch *ch, *tmp_ch;
+ int res;
+
+ WARN_ON_ONCE(irqs_disabled());
+
+ BUG_ON(!sdev);
+
+ spin_lock_irq(&sdev->spinlock);
+ list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list)
+ __srpt_close_ch(ch);
+ spin_unlock_irq(&sdev->spinlock);
+
+ res = wait_event_interruptible(sdev->ch_releaseQ,
+ srpt_ch_list_empty(sdev));
+ if (res)
+ printk(KERN_ERR "%s: interrupted.\n", __func__);
+
+ return 0;
+}
+
+static struct srpt_port *__srpt_lookup_port(const char *name)
+{
+ struct ib_device *dev;
+ struct srpt_device *sdev;
+ struct srpt_port *sport;
+ int i;
+
+ list_for_each_entry(sdev, &srpt_dev_list, list) {
+ dev = sdev->device;
+ if (!dev)
+ continue;
+
+ for (i = 0; i < dev->phys_port_cnt; i++) {
+ sport = &sdev->port[i];
+
+ if (!strcmp(sport->port_guid, name))
+ return sport;
+ }
+ }
+
+ return NULL;
+}
+
+static struct srpt_port *srpt_lookup_port(const char *name)
+{
+ struct srpt_port *sport;
+
+ spin_lock(&srpt_dev_lock);
+ sport = __srpt_lookup_port(name);
+ spin_unlock(&srpt_dev_lock);
+
+ return sport;
+}
+
+/**
+ * srpt_add_one() - Infiniband device addition callback function.
+ */
+static void srpt_add_one(struct ib_device *device)
+{
+ struct srpt_device *sdev;
+ struct srpt_port *sport;
+ struct ib_srq_init_attr srq_attr;
+ int i;
+
+ pr_debug("device = %p, device->dma_ops = %p\n", device,
+ device->dma_ops);
+
+ sdev = kzalloc(sizeof *sdev, GFP_KERNEL);
+ if (!sdev)
+ goto err;
+
+ sdev->device = device;
+ INIT_LIST_HEAD(&sdev->rch_list);
+ init_waitqueue_head(&sdev->ch_releaseQ);
+ spin_lock_init(&sdev->spinlock);
+
+ if (ib_query_device(device, &sdev->dev_attr))
+ goto free_dev;
+
+ sdev->pd = ib_alloc_pd(device);
+ if (IS_ERR(sdev->pd))
+ goto free_dev;
+
+ sdev->mr = ib_get_dma_mr(sdev->pd, IB_ACCESS_LOCAL_WRITE);
+ if (IS_ERR(sdev->mr))
+ goto err_pd;
+
+ sdev->srq_size = min(srpt_srq_size, sdev->dev_attr.max_srq_wr);
+
+ srq_attr.event_handler = srpt_srq_event;
+ srq_attr.srq_context = (void *)sdev;
+ srq_attr.attr.max_wr = sdev->srq_size;
+ srq_attr.attr.max_sge = 1;
+ srq_attr.attr.srq_limit = 0;
+
+ sdev->srq = ib_create_srq(sdev->pd, &srq_attr);
+ if (IS_ERR(sdev->srq))
+ goto err_mr;
+
+ pr_debug("%s: create SRQ #wr= %d max_allow=%d dev= %s\n",
+ __func__, sdev->srq_size, sdev->dev_attr.max_srq_wr,
+ device->name);
+
+ if (!srpt_service_guid)
+ srpt_service_guid = be64_to_cpu(device->node_guid);
+
+ sdev->cm_id = ib_create_cm_id(device, srpt_cm_handler, sdev);
+ if (IS_ERR(sdev->cm_id))
+ goto err_srq;
+
+ /* print out target login information */
+ pr_debug("Target login info: id_ext=%016llx,ioc_guid=%016llx,"
+ "pkey=ffff,service_id=%016llx\n", srpt_service_guid,
+ srpt_service_guid, srpt_service_guid);
+
+ /*
+ * We do not have a consistent service_id (ie. also id_ext of target_id)
+ * to identify this target. We currently use the guid of the first HCA
+ * in the system as service_id; therefore, the target_id will change
+ * if this HCA is gone bad and replaced by different HCA
+ */
+ if (ib_cm_listen(sdev->cm_id, cpu_to_be64(srpt_service_guid), 0, NULL))
+ goto err_cm;
+
+ INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device,
+ srpt_event_handler);
+ if (ib_register_event_handler(&sdev->event_handler))
+ goto err_cm;
+
+ sdev->ioctx_ring = (struct srpt_recv_ioctx **)
+ srpt_alloc_ioctx_ring(sdev, sdev->srq_size,
+ sizeof(*sdev->ioctx_ring[0]),
+ srp_max_req_size, DMA_FROM_DEVICE);
+ if (!sdev->ioctx_ring)
+ goto err_event;
+
+ for (i = 0; i < sdev->srq_size; ++i)
+ srpt_post_recv(sdev, sdev->ioctx_ring[i]);
+
+ WARN_ON(sdev->device->phys_port_cnt
+ > sizeof(sdev->port)/sizeof(sdev->port[0]));
+
+ for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
+ sport = &sdev->port[i - 1];
+ sport->sdev = sdev;
+ sport->port = i;
+ sport->port_attrib.srp_max_rdma_size = DEFAULT_MAX_RDMA_SIZE;
+ sport->port_attrib.srp_max_rsp_size = DEFAULT_MAX_RSP_SIZE;
+ sport->port_attrib.srp_sq_size = DEF_SRPT_SQ_SIZE;
+ INIT_WORK(&sport->work, srpt_refresh_port_work);
+ INIT_LIST_HEAD(&sport->port_acl_list);
+ spin_lock_init(&sport->port_acl_lock);
+
+ if (srpt_refresh_port(sport)) {
+ printk(KERN_ERR "MAD registration failed for %s-%d.\n",
+ srpt_sdev_name(sdev), i);
+ goto err_ring;
+ }
+ snprintf(sport->port_guid, sizeof(sport->port_guid),
+ "0x%016llx%016llx",
+ be64_to_cpu(sport->gid.global.subnet_prefix),
+ be64_to_cpu(sport->gid.global.interface_id));
+ }
+
+ spin_lock(&srpt_dev_lock);
+ list_add_tail(&sdev->list, &srpt_dev_list);
+ spin_unlock(&srpt_dev_lock);
+
+out:
+ ib_set_client_data(device, &srpt_client, sdev);
+ pr_debug("added %s.\n", device->name);
+ return;
+
+err_ring:
+ srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
+ sdev->srq_size, srp_max_req_size,
+ DMA_FROM_DEVICE);
+err_event:
+ ib_unregister_event_handler(&sdev->event_handler);
+err_cm:
+ ib_destroy_cm_id(sdev->cm_id);
+err_srq:
+ ib_destroy_srq(sdev->srq);
+err_mr:
+ ib_dereg_mr(sdev->mr);
+err_pd:
+ ib_dealloc_pd(sdev->pd);
+free_dev:
+ kfree(sdev);
+err:
+ sdev = NULL;
+ printk(KERN_INFO "%s(%s) failed.\n", __func__, device->name);
+ goto out;
+}
+
+/**
+ * srpt_remove_one() - InfiniBand device removal callback function.
+ */
+static void srpt_remove_one(struct ib_device *device)
+{
+ struct srpt_device *sdev;
+ int i;
+
+ sdev = ib_get_client_data(device, &srpt_client);
+ if (!sdev) {
+ printk(KERN_INFO "%s(%s): nothing to do.\n", __func__,
+ device->name);
+ return;
+ }
+
+ srpt_unregister_mad_agent(sdev);
+
+ ib_unregister_event_handler(&sdev->event_handler);
+
+ /* Cancel any work queued by the just unregistered IB event handler. */
+ for (i = 0; i < sdev->device->phys_port_cnt; i++)
+ cancel_work_sync(&sdev->port[i].work);
+
+ ib_destroy_cm_id(sdev->cm_id);
+
+ /*
+ * Unregistering a target must happen after destroying sdev->cm_id
+ * such that no new SRP_LOGIN_REQ information units can arrive while
+ * destroying the target.
+ */
+ spin_lock(&srpt_dev_lock);
+ list_del(&sdev->list);
+ spin_unlock(&srpt_dev_lock);
+ srpt_release_sdev(sdev);
+
+ ib_destroy_srq(sdev->srq);
+ ib_dereg_mr(sdev->mr);
+ ib_dealloc_pd(sdev->pd);
+
+ srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
+ sdev->srq_size, srp_max_req_size, DMA_FROM_DEVICE);
+ sdev->ioctx_ring = NULL;
+ kfree(sdev);
+}
+
+static struct ib_client srpt_client = {
+ .name = DRV_NAME,
+ .add = srpt_add_one,
+ .remove = srpt_remove_one
+};
+
+static int srpt_check_true(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static int srpt_check_false(struct se_portal_group *se_tpg)
+{
+ return 0;
+}
+
+static char *srpt_get_fabric_name(void)
+{
+ return "srpt";
+}
+
+static u8 srpt_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+{
+ return SCSI_TRANSPORTID_PROTOCOLID_SRP;
+}
+
+static char *srpt_get_fabric_wwn(struct se_portal_group *tpg)
+{
+ struct srpt_port *sport = container_of(tpg, struct srpt_port, port_tpg_1);
+
+ return sport->port_guid;
+}
+
+static u16 srpt_get_tag(struct se_portal_group *tpg)
+{
+ return 1;
+}
+
+static u32 srpt_get_default_depth(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static u32 srpt_get_pr_transport_id(struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl,
+ struct t10_pr_registration *pr_reg,
+ int *format_code, unsigned char *buf)
+{
+ struct srpt_node_acl *nacl;
+ struct spc_rdma_transport_id *tr_id;
+
+ nacl = container_of(se_nacl, struct srpt_node_acl, nacl);
+ tr_id = (void *)buf;
+ tr_id->protocol_identifier = SCSI_TRANSPORTID_PROTOCOLID_SRP;
+ memcpy(tr_id->i_port_id, nacl->i_port_id, sizeof(tr_id->i_port_id));
+ return sizeof(*tr_id);
+}
+
+static u32 srpt_get_pr_transport_id_len(struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl,
+ struct t10_pr_registration *pr_reg,
+ int *format_code)
+{
+ *format_code = 0;
+ return sizeof(struct spc_rdma_transport_id);
+}
+
+static char *srpt_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
+ const char *buf, u32 *out_tid_len,
+ char **port_nexus_ptr)
+{
+ struct spc_rdma_transport_id *tr_id;
+
+ *port_nexus_ptr = NULL;
+ *out_tid_len = sizeof(struct spc_rdma_transport_id);
+ tr_id = (void *)buf;
+ return (char *)tr_id->i_port_id;
+}
+
+static struct se_node_acl *srpt_alloc_fabric_acl(struct se_portal_group *se_tpg)
+{
+ struct srpt_node_acl *nacl;
+
+ nacl = kzalloc(sizeof(struct srpt_node_acl), GFP_KERNEL);
+ if (!nacl) {
+ printk(KERN_ERR "Unable to alocate struct srpt_node_acl\n");
+ return NULL;
+ }
+
+ return &nacl->nacl;
+}
+
+static void srpt_release_fabric_acl(struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl)
+{
+ struct srpt_node_acl *nacl;
+
+ nacl = container_of(se_nacl, struct srpt_node_acl, nacl);
+ kfree(nacl);
+}
+
+static u32 srpt_tpg_get_inst_index(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static void srpt_release_cmd(struct se_cmd *se_cmd)
+{
+}
+
+/**
+ * srpt_shutdown_session() - Whether or not a session may be shut down.
+ */
+static int srpt_shutdown_session(struct se_session *se_sess)
+{
+ return true;
+}
+
+/**
+ * srpt_close_session() - Forcibly close a session.
+ *
+ * Callback function invoked by the TCM core to clean up sessions associated
+ * with a node ACL when the user invokes
+ * rmdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
+ */
+static void srpt_close_session(struct se_session *se_sess)
+{
+ DECLARE_COMPLETION_ONSTACK(release_done);
+ struct srpt_rdma_ch *ch;
+ struct srpt_device *sdev;
+ int res;
+
+ ch = se_sess->fabric_sess_ptr;
+ WARN_ON(ch->sess != se_sess);
+
+ pr_debug("ch %p state %d\n", ch, srpt_get_ch_state(ch));
+
+ sdev = ch->sport->sdev;
+ spin_lock_irq(&sdev->spinlock);
+ BUG_ON(ch->release_done);
+ ch->release_done = &release_done;
+ __srpt_close_ch(ch);
+ spin_unlock_irq(&sdev->spinlock);
+
+ res = wait_for_completion_timeout(&release_done, 60 * HZ);
+ WARN_ON(res <= 0);
+}
+
+/**
+ * To do: Find out whether stop_session() has a meaning for transports
+ * other than iSCSI.
+ */
+static void srpt_stop_session(struct se_session *se_sess, int sess_sleep,
+ int conn_sleep)
+{
+}
+
+static void srpt_reset_nexus(struct se_session *sess)
+{
+ printk(KERN_ERR "This is the SRP protocol, not iSCSI\n");
+}
+
+static int srpt_sess_logged_in(struct se_session *se_sess)
+{
+ return true;
+}
+
+/**
+ * srpt_sess_get_index() - Return the value of scsiAttIntrPortIndex (SCSI-MIB).
+ *
+ * A quote from RFC 4455 (SCSI-MIB) about this MIB object:
+ * This object represents an arbitrary integer used to uniquely identify a
+ * particular attached remote initiator port to a particular SCSI target port
+ * within a particular SCSI target device within a particular SCSI instance.
+ */
+static u32 srpt_sess_get_index(struct se_session *se_sess)
+{
+ return 0;
+}
+
+static void srpt_set_default_node_attrs(struct se_node_acl *nacl)
+{
+}
+
+static u32 srpt_get_task_tag(struct se_cmd *se_cmd)
+{
+ struct srpt_send_ioctx *ioctx;
+
+ ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
+ return ioctx->tag;
+}
+
+/* Note: only used from inside debug printk's by the TCM core. */
+static int srpt_get_tcm_cmd_state(struct se_cmd *se_cmd)
+{
+ struct srpt_send_ioctx *ioctx;
+
+ ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
+ return srpt_get_cmd_state(ioctx);
+}
+
+static u16 srpt_set_fabric_sense_len(struct se_cmd *cmd, u32 sense_length)
+{
+ return 0;
+}
+
+static u16 srpt_get_fabric_sense_len(void)
+{
+ return 0;
+}
+
+static int srpt_is_state_remove(struct se_cmd *se_cmd)
+{
+ return 0;
+}
+
+/**
+ * srpt_parse_i_port_id() - Parse an initiator port ID.
+ * @name: ASCII representation of a 128-bit initiator port ID.
+ * @i_port_id: Binary 128-bit port ID.
+ */
+static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name)
+{
+ const char *p;
+ unsigned len, count, leading_zero_bytes;
+ int ret, rc;
+
+ p = name;
+ if (strnicmp(p, "0x", 2) == 0)
+ p += 2;
+ ret = -EINVAL;
+ len = strlen(p);
+ if (len % 2)
+ goto out;
+ count = min(len / 2, 16U);
+ leading_zero_bytes = 16 - count;
+ memset(i_port_id, 0, leading_zero_bytes);
+ rc = hex2bin(i_port_id + leading_zero_bytes, p, count);
+ if (rc < 0)
+ pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", rc);
+ ret = 0;
+out:
+ return ret;
+}
+
+/*
+ * configfs callback function invoked for
+ * mkdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
+ */
+static struct se_node_acl *srpt_make_nodeacl(struct se_portal_group *tpg,
+ struct config_group *group,
+ const char *name)
+{
+ struct srpt_port *sport = container_of(tpg, struct srpt_port, port_tpg_1);
+ struct se_node_acl *se_nacl, *se_nacl_new;
+ struct srpt_node_acl *nacl;
+ int ret = 0;
+ u32 nexus_depth = 1;
+ u8 i_port_id[16];
+
+ if (srpt_parse_i_port_id(i_port_id, name) < 0) {
+ printk(KERN_ERR "invalid initiator port ID %s\n", name);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ se_nacl_new = srpt_alloc_fabric_acl(tpg);
+ if (!se_nacl_new) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ /*
+ * nacl_new may be released by core_tpg_add_initiator_node_acl()
+ * when converting a node ACL from demo mode to explict
+ */
+ se_nacl = core_tpg_add_initiator_node_acl(tpg, se_nacl_new, name,
+ nexus_depth);
+ if (IS_ERR(se_nacl)) {
+ ret = PTR_ERR(se_nacl);
+ goto err;
+ }
+ /* Locate our struct srpt_node_acl and set sdev and i_port_id. */
+ nacl = container_of(se_nacl, struct srpt_node_acl, nacl);
+ memcpy(&nacl->i_port_id[0], &i_port_id[0], 16);
+ nacl->sport = sport;
+
+ spin_lock_irq(&sport->port_acl_lock);
+ list_add_tail(&nacl->list, &sport->port_acl_list);
+ spin_unlock_irq(&sport->port_acl_lock);
+
+ return se_nacl;
+err:
+ return ERR_PTR(ret);
+}
+
+/*
+ * configfs callback function invoked for
+ * rmdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
+ */
+static void srpt_drop_nodeacl(struct se_node_acl *se_nacl)
+{
+ struct srpt_node_acl *nacl;
+ struct srpt_device *sdev;
+ struct srpt_port *sport;
+
+ nacl = container_of(se_nacl, struct srpt_node_acl, nacl);
+ sport = nacl->sport;
+ sdev = sport->sdev;
+ spin_lock_irq(&sport->port_acl_lock);
+ list_del(&nacl->list);
+ spin_unlock_irq(&sport->port_acl_lock);
+ core_tpg_del_initiator_node_acl(&sport->port_tpg_1, se_nacl, 1);
+ srpt_release_fabric_acl(NULL, se_nacl);
+}
+
+static ssize_t srpt_tpg_attrib_show_srp_max_rdma_size(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
+
+ return sprintf(page, "%u\n", sport->port_attrib.srp_max_rdma_size);
+}
+
+static ssize_t srpt_tpg_attrib_store_srp_max_rdma_size(
+ struct se_portal_group *se_tpg,
+ const char *page,
+ size_t count)
+{
+ struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
+ unsigned long val;
+ int ret;
+
+ ret = strict_strtoul(page, 0, &val);
+ if (ret < 0) {
+ pr_err("strict_strtoul() failed with ret: %d\n", ret);
+ return -EINVAL;
+ }
+ if (val > MAX_SRPT_RDMA_SIZE) {
+ pr_err("val: %lu exceeds MAX_SRPT_RDMA_SIZE: %d\n", val,
+ MAX_SRPT_RDMA_SIZE);
+ return -EINVAL;
+ }
+ if (val < DEFAULT_MAX_RDMA_SIZE) {
+ pr_err("val: %lu smaller than DEFAULT_MAX_RDMA_SIZE: %d\n",
+ val, DEFAULT_MAX_RDMA_SIZE);
+ return -EINVAL;
+ }
+ sport->port_attrib.srp_max_rdma_size = val;
+
+ return count;
+}
+
+TF_TPG_ATTRIB_ATTR(srpt, srp_max_rdma_size, S_IRUGO | S_IWUSR);
+
+static ssize_t srpt_tpg_attrib_show_srp_max_rsp_size(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
+
+ return sprintf(page, "%u\n", sport->port_attrib.srp_max_rsp_size);
+}
+
+static ssize_t srpt_tpg_attrib_store_srp_max_rsp_size(
+ struct se_portal_group *se_tpg,
+ const char *page,
+ size_t count)
+{
+ struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
+ unsigned long val;
+ int ret;
+
+ ret = strict_strtoul(page, 0, &val);
+ if (ret < 0) {
+ pr_err("strict_strtoul() failed with ret: %d\n", ret);
+ return -EINVAL;
+ }
+ if (val > MAX_SRPT_RSP_SIZE) {
+ pr_err("val: %lu exceeds MAX_SRPT_RSP_SIZE: %d\n", val,
+ MAX_SRPT_RSP_SIZE);
+ return -EINVAL;
+ }
+ if (val < MIN_MAX_RSP_SIZE) {
+ pr_err("val: %lu smaller than MIN_MAX_RSP_SIZE: %d\n", val,
+ MIN_MAX_RSP_SIZE);
+ return -EINVAL;
+ }
+ sport->port_attrib.srp_max_rsp_size = val;
+
+ return count;
+}
+
+TF_TPG_ATTRIB_ATTR(srpt, srp_max_rsp_size, S_IRUGO | S_IWUSR);
+
+static ssize_t srpt_tpg_attrib_show_srp_sq_size(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
+
+ return sprintf(page, "%u\n", sport->port_attrib.srp_sq_size);
+}
+
+static ssize_t srpt_tpg_attrib_store_srp_sq_size(
+ struct se_portal_group *se_tpg,
+ const char *page,
+ size_t count)
+{
+ struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
+ unsigned long val;
+ int ret;
+
+ ret = strict_strtoul(page, 0, &val);
+ if (ret < 0) {
+ pr_err("strict_strtoul() failed with ret: %d\n", ret);
+ return -EINVAL;
+ }
+ if (val > MAX_SRPT_SRQ_SIZE) {
+ pr_err("val: %lu exceeds MAX_SRPT_SRQ_SIZE: %d\n", val,
+ MAX_SRPT_SRQ_SIZE);
+ return -EINVAL;
+ }
+ if (val < MIN_SRPT_SRQ_SIZE) {
+ pr_err("val: %lu smaller than MIN_SRPT_SRQ_SIZE: %d\n", val,
+ MIN_SRPT_SRQ_SIZE);
+ return -EINVAL;
+ }
+ sport->port_attrib.srp_sq_size = val;
+
+ return count;
+}
+
+TF_TPG_ATTRIB_ATTR(srpt, srp_sq_size, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *srpt_tpg_attrib_attrs[] = {
+ &srpt_tpg_attrib_srp_max_rdma_size.attr,
+ &srpt_tpg_attrib_srp_max_rsp_size.attr,
+ &srpt_tpg_attrib_srp_sq_size.attr,
+ NULL,
+};
+
+static ssize_t srpt_tpg_show_enable(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
+
+ return snprintf(page, PAGE_SIZE, "%d\n", (sport->enabled) ? 1: 0);
+}
+
+static ssize_t srpt_tpg_store_enable(
+ struct se_portal_group *se_tpg,
+ const char *page,
+ size_t count)
+{
+ struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
+ unsigned long tmp;
+ int ret;
+
+ ret = strict_strtoul(page, 0, &tmp);
+ if (ret < 0) {
+ printk(KERN_ERR "Unable to extract srpt_tpg_store_enable\n");
+ return -EINVAL;
+ }
+
+ if ((tmp != 0) && (tmp != 1)) {
+ printk(KERN_ERR "Illegal value for srpt_tpg_store_enable: %lu\n", tmp);
+ return -EINVAL;
+ }
+ if (tmp == 1)
+ sport->enabled = true;
+ else
+ sport->enabled = false;
+
+ return count;
+}
+
+TF_TPG_BASE_ATTR(srpt, enable, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *srpt_tpg_attrs[] = {
+ &srpt_tpg_enable.attr,
+ NULL,
+};
+
+/**
+ * configfs callback invoked for
+ * mkdir /sys/kernel/config/target/$driver/$port/$tpg
+ */
+static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn,
+ struct config_group *group,
+ const char *name)
+{
+ struct srpt_port *sport = container_of(wwn, struct srpt_port, port_wwn);
+ int res;
+
+ /* Initialize sport->port_wwn and sport->port_tpg_1 */
+ res = core_tpg_register(&srpt_target->tf_ops, &sport->port_wwn,
+ &sport->port_tpg_1, sport, TRANSPORT_TPG_TYPE_NORMAL);
+ if (res)
+ return ERR_PTR(res);
+
+ return &sport->port_tpg_1;
+}
+
+/**
+ * configfs callback invoked for
+ * rmdir /sys/kernel/config/target/$driver/$port/$tpg
+ */
+static void srpt_drop_tpg(struct se_portal_group *tpg)
+{
+ struct srpt_port *sport = container_of(tpg,
+ struct srpt_port, port_tpg_1);
+
+ sport->enabled = false;
+ core_tpg_deregister(&sport->port_tpg_1);
+}
+
+/**
+ * configfs callback invoked for
+ * mkdir /sys/kernel/config/target/$driver/$port
+ */
+static struct se_wwn *srpt_make_tport(struct target_fabric_configfs *tf,
+ struct config_group *group,
+ const char *name)
+{
+ struct srpt_port *sport;
+ int ret;
+
+ sport = srpt_lookup_port(name);
+ pr_debug("make_tport(%s)\n", name);
+ ret = -EINVAL;
+ if (!sport)
+ goto err;
+
+ return &sport->port_wwn;
+
+err:
+ return ERR_PTR(ret);
+}
+
+/**
+ * configfs callback invoked for
+ * rmdir /sys/kernel/config/target/$driver/$port
+ */
+static void srpt_drop_tport(struct se_wwn *wwn)
+{
+ struct srpt_port *sport = container_of(wwn, struct srpt_port, port_wwn);
+
+ pr_debug("drop_tport(%s\n", config_item_name(&sport->port_wwn.wwn_group.cg_item));
+}
+
+static ssize_t srpt_wwn_show_attr_version(struct target_fabric_configfs *tf,
+ char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%s\n", DRV_VERSION);
+}
+
+TF_WWN_ATTR_RO(srpt, version);
+
+static struct configfs_attribute *srpt_wwn_attrs[] = {
+ &srpt_wwn_version.attr,
+ NULL,
+};
+
+static struct target_core_fabric_ops srpt_template = {
+ .get_fabric_name = srpt_get_fabric_name,
+ .get_fabric_proto_ident = srpt_get_fabric_proto_ident,
+ .tpg_get_wwn = srpt_get_fabric_wwn,
+ .tpg_get_tag = srpt_get_tag,
+ .tpg_get_default_depth = srpt_get_default_depth,
+ .tpg_get_pr_transport_id = srpt_get_pr_transport_id,
+ .tpg_get_pr_transport_id_len = srpt_get_pr_transport_id_len,
+ .tpg_parse_pr_out_transport_id = srpt_parse_pr_out_transport_id,
+ .tpg_check_demo_mode = srpt_check_false,
+ .tpg_check_demo_mode_cache = srpt_check_true,
+ .tpg_check_demo_mode_write_protect = srpt_check_true,
+ .tpg_check_prod_mode_write_protect = srpt_check_false,
+ .tpg_alloc_fabric_acl = srpt_alloc_fabric_acl,
+ .tpg_release_fabric_acl = srpt_release_fabric_acl,
+ .tpg_get_inst_index = srpt_tpg_get_inst_index,
+ .release_cmd = srpt_release_cmd,
+ .check_stop_free = srpt_check_stop_free,
+ .shutdown_session = srpt_shutdown_session,
+ .close_session = srpt_close_session,
+ .stop_session = srpt_stop_session,
+ .fall_back_to_erl0 = srpt_reset_nexus,
+ .sess_logged_in = srpt_sess_logged_in,
+ .sess_get_index = srpt_sess_get_index,
+ .sess_get_initiator_sid = NULL,
+ .write_pending = srpt_write_pending,
+ .write_pending_status = srpt_write_pending_status,
+ .set_default_node_attributes = srpt_set_default_node_attrs,
+ .get_task_tag = srpt_get_task_tag,
+ .get_cmd_state = srpt_get_tcm_cmd_state,
+ .queue_data_in = srpt_queue_response,
+ .queue_status = srpt_queue_status,
+ .queue_tm_rsp = srpt_queue_response,
+ .get_fabric_sense_len = srpt_get_fabric_sense_len,
+ .set_fabric_sense_len = srpt_set_fabric_sense_len,
+ .is_state_remove = srpt_is_state_remove,
+ /*
+ * Setup function pointers for generic logic in
+ * target_core_fabric_configfs.c
+ */
+ .fabric_make_wwn = srpt_make_tport,
+ .fabric_drop_wwn = srpt_drop_tport,
+ .fabric_make_tpg = srpt_make_tpg,
+ .fabric_drop_tpg = srpt_drop_tpg,
+ .fabric_post_link = NULL,
+ .fabric_pre_unlink = NULL,
+ .fabric_make_np = NULL,
+ .fabric_drop_np = NULL,
+ .fabric_make_nodeacl = srpt_make_nodeacl,
+ .fabric_drop_nodeacl = srpt_drop_nodeacl,
+};
+
+/**
+ * srpt_init_module() - Kernel module initialization.
+ *
+ * Note: Since ib_register_client() registers callback functions, and since at
+ * least one of these callback functions (srpt_add_one()) calls target core
+ * functions, this driver must be registered with the target core before
+ * ib_register_client() is called.
+ */
+static int __init srpt_init_module(void)
+{
+ int ret;
+
+ ret = -EINVAL;
+ if (srp_max_req_size < MIN_MAX_REQ_SIZE) {
+ printk(KERN_ERR "invalid value %d for kernel module parameter"
+ " srp_max_req_size -- must be at least %d.\n",
+ srp_max_req_size, MIN_MAX_REQ_SIZE);
+ goto out;
+ }
+
+ if (srpt_srq_size < MIN_SRPT_SRQ_SIZE
+ || srpt_srq_size > MAX_SRPT_SRQ_SIZE) {
+ printk(KERN_ERR "invalid value %d for kernel module parameter"
+ " srpt_srq_size -- must be in the range [%d..%d].\n",
+ srpt_srq_size, MIN_SRPT_SRQ_SIZE, MAX_SRPT_SRQ_SIZE);
+ goto out;
+ }
+
+ spin_lock_init(&srpt_dev_lock);
+ INIT_LIST_HEAD(&srpt_dev_list);
+
+ ret = -ENODEV;
+ srpt_target = target_fabric_configfs_init(THIS_MODULE, "srpt");
+ if (!srpt_target) {
+ printk(KERN_ERR "couldn't register\n");
+ goto out;
+ }
+
+ srpt_target->tf_ops = srpt_template;
+
+ /* Enable SG chaining */
+ srpt_target->tf_ops.task_sg_chaining = true;
+
+ /*
+ * Set up default attribute lists.
+ */
+ srpt_target->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = srpt_wwn_attrs;
+ srpt_target->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = srpt_tpg_attrs;
+ srpt_target->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = srpt_tpg_attrib_attrs;
+ srpt_target->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
+ srpt_target->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
+ srpt_target->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
+ srpt_target->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
+ srpt_target->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
+ srpt_target->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
+
+ ret = target_fabric_configfs_register(srpt_target);
+ if (ret < 0) {
+ printk(KERN_ERR "couldn't register\n");
+ goto out_free_target;
+ }
+
+ ret = ib_register_client(&srpt_client);
+ if (ret) {
+ printk(KERN_ERR "couldn't register IB client\n");
+ goto out_unregister_target;
+ }
+
+ return 0;
+
+out_unregister_target:
+ target_fabric_configfs_deregister(srpt_target);
+ srpt_target = NULL;
+out_free_target:
+ if (srpt_target)
+ target_fabric_configfs_free(srpt_target);
+out:
+ return ret;
+}
+
+static void __exit srpt_cleanup_module(void)
+{
+ ib_unregister_client(&srpt_client);
+ target_fabric_configfs_deregister(srpt_target);
+ srpt_target = NULL;
+}
+
+module_init(srpt_init_module);
+module_exit(srpt_cleanup_module);
diff --git a/trunk/drivers/infiniband/ulp/srpt/ib_srpt.h b/trunk/drivers/infiniband/ulp/srpt/ib_srpt.h
new file mode 100644
index 000000000000..b4b4bbcd7f16
--- /dev/null
+++ b/trunk/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -0,0 +1,444 @@
+/*
+ * Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved.
+ * Copyright (C) 2009 - 2010 Bart Van Assche .
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef IB_SRPT_H
+#define IB_SRPT_H
+
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+
+#include
+
+#include "ib_dm_mad.h"
+
+/*
+ * The prefix the ServiceName field must start with in the device management
+ * ServiceEntries attribute pair. See also the SRP specification.
+ */
+#define SRP_SERVICE_NAME_PREFIX "SRP.T10:"
+
+enum {
+ /*
+ * SRP IOControllerProfile attributes for SRP target ports that have
+ * not been defined in . Source: section B.7, table B.7
+ * in the SRP specification.
+ */
+ SRP_PROTOCOL = 0x0108,
+ SRP_PROTOCOL_VERSION = 0x0001,
+ SRP_IO_SUBCLASS = 0x609e,
+ SRP_SEND_TO_IOC = 0x01,
+ SRP_SEND_FROM_IOC = 0x02,
+ SRP_RDMA_READ_FROM_IOC = 0x08,
+ SRP_RDMA_WRITE_FROM_IOC = 0x20,
+
+ /*
+ * srp_login_cmd.req_flags bitmasks. See also table 9 in the SRP
+ * specification.
+ */
+ SRP_MTCH_ACTION = 0x03, /* MULTI-CHANNEL ACTION */
+ SRP_LOSOLNT = 0x10, /* logout solicited notification */
+ SRP_CRSOLNT = 0x20, /* credit request solicited notification */
+ SRP_AESOLNT = 0x40, /* asynchronous event solicited notification */
+
+ /*
+ * srp_cmd.sol_nt / srp_tsk_mgmt.sol_not bitmasks. See also tables
+ * 18 and 20 in the SRP specification.
+ */
+ SRP_SCSOLNT = 0x02, /* SCSOLNT = successful solicited notification */
+ SRP_UCSOLNT = 0x04, /* UCSOLNT = unsuccessful solicited notification */
+
+ /*
+ * srp_rsp.sol_not / srp_t_logout.sol_not bitmasks. See also tables
+ * 16 and 22 in the SRP specification.
+ */
+ SRP_SOLNT = 0x01, /* SOLNT = solicited notification */
+
+ /* See also table 24 in the SRP specification. */
+ SRP_TSK_MGMT_SUCCESS = 0x00,
+ SRP_TSK_MGMT_FUNC_NOT_SUPP = 0x04,
+ SRP_TSK_MGMT_FAILED = 0x05,
+
+ /* See also table 21 in the SRP specification. */
+ SRP_CMD_SIMPLE_Q = 0x0,
+ SRP_CMD_HEAD_OF_Q = 0x1,
+ SRP_CMD_ORDERED_Q = 0x2,
+ SRP_CMD_ACA = 0x4,
+
+ SRP_LOGIN_RSP_MULTICHAN_NO_CHAN = 0x0,
+ SRP_LOGIN_RSP_MULTICHAN_TERMINATED = 0x1,
+ SRP_LOGIN_RSP_MULTICHAN_MAINTAINED = 0x2,
+
+ SRPT_DEF_SG_TABLESIZE = 128,
+ SRPT_DEF_SG_PER_WQE = 16,
+
+ MIN_SRPT_SQ_SIZE = 16,
+ DEF_SRPT_SQ_SIZE = 4096,
+ SRPT_RQ_SIZE = 128,
+ MIN_SRPT_SRQ_SIZE = 4,
+ DEFAULT_SRPT_SRQ_SIZE = 4095,
+ MAX_SRPT_SRQ_SIZE = 65535,
+ MAX_SRPT_RDMA_SIZE = 1U << 24,
+ MAX_SRPT_RSP_SIZE = 1024,
+
+ MIN_MAX_REQ_SIZE = 996,
+ DEFAULT_MAX_REQ_SIZE
+ = sizeof(struct srp_cmd)/*48*/
+ + sizeof(struct srp_indirect_buf)/*20*/
+ + 128 * sizeof(struct srp_direct_buf)/*16*/,
+
+ MIN_MAX_RSP_SIZE = sizeof(struct srp_rsp)/*36*/ + 4,
+ DEFAULT_MAX_RSP_SIZE = 256, /* leaves 220 bytes for sense data */
+
+ DEFAULT_MAX_RDMA_SIZE = 65536,
+};
+
+enum srpt_opcode {
+ SRPT_RECV,
+ SRPT_SEND,
+ SRPT_RDMA_MID,
+ SRPT_RDMA_ABORT,
+ SRPT_RDMA_READ_LAST,
+ SRPT_RDMA_WRITE_LAST,
+};
+
+static inline u64 encode_wr_id(u8 opcode, u32 idx)
+{
+ return ((u64)opcode << 32) | idx;
+}
+static inline enum srpt_opcode opcode_from_wr_id(u64 wr_id)
+{
+ return wr_id >> 32;
+}
+static inline u32 idx_from_wr_id(u64 wr_id)
+{
+ return (u32)wr_id;
+}
+
+struct rdma_iu {
+ u64 raddr;
+ u32 rkey;
+ struct ib_sge *sge;
+ u32 sge_cnt;
+ int mem_id;
+};
+
+/**
+ * enum srpt_command_state - SCSI command state managed by SRPT.
+ * @SRPT_STATE_NEW: New command arrived and is being processed.
+ * @SRPT_STATE_NEED_DATA: Processing a write or bidir command and waiting
+ * for data arrival.
+ * @SRPT_STATE_DATA_IN: Data for the write or bidir command arrived and is
+ * being processed.
+ * @SRPT_STATE_CMD_RSP_SENT: SRP_RSP for SRP_CMD has been sent.
+ * @SRPT_STATE_MGMT: Processing a SCSI task management command.
+ * @SRPT_STATE_MGMT_RSP_SENT: SRP_RSP for SRP_TSK_MGMT has been sent.
+ * @SRPT_STATE_DONE: Command processing finished successfully, command
+ * processing has been aborted or command processing
+ * failed.
+ */
+enum srpt_command_state {
+ SRPT_STATE_NEW = 0,
+ SRPT_STATE_NEED_DATA = 1,
+ SRPT_STATE_DATA_IN = 2,
+ SRPT_STATE_CMD_RSP_SENT = 3,
+ SRPT_STATE_MGMT = 4,
+ SRPT_STATE_MGMT_RSP_SENT = 5,
+ SRPT_STATE_DONE = 6,
+};
+
+/**
+ * struct srpt_ioctx - Shared SRPT I/O context information.
+ * @buf: Pointer to the buffer.
+ * @dma: DMA address of the buffer.
+ * @index: Index of the I/O context in its ioctx_ring array.
+ */
+struct srpt_ioctx {
+ void *buf;
+ dma_addr_t dma;
+ uint32_t index;
+};
+
+/**
+ * struct srpt_recv_ioctx - SRPT receive I/O context.
+ * @ioctx: See above.
+ * @wait_list: Node for insertion in srpt_rdma_ch.cmd_wait_list.
+ */
+struct srpt_recv_ioctx {
+ struct srpt_ioctx ioctx;
+ struct list_head wait_list;
+};
+
+/**
+ * struct srpt_send_ioctx - SRPT send I/O context.
+ * @ioctx: See above.
+ * @ch: Channel pointer.
+ * @free_list: Node in srpt_rdma_ch.free_list.
+ * @n_rbuf: Number of data buffers in the received SRP command.
+ * @rbufs: Pointer to SRP data buffer array.
+ * @single_rbuf: SRP data buffer if the command has only a single buffer.
+ * @sg: Pointer to sg-list associated with this I/O context.
+ * @sg_cnt: SG-list size.
+ * @mapped_sg_count: ib_dma_map_sg() return value.
+ * @n_rdma_ius: Number of elements in the rdma_ius array.
+ * @rdma_ius: Array with information about the RDMA mapping.
+ * @tag: Tag of the received SRP information unit.
+ * @spinlock: Protects 'state'.
+ * @state: I/O context state.
+ * @rdma_aborted: If initiating a multipart RDMA transfer failed, whether
+ * the already initiated transfers have finished.
+ * @cmd: Target core command data structure.
+ * @sense_data: SCSI sense data.
+ */
+struct srpt_send_ioctx {
+ struct srpt_ioctx ioctx;
+ struct srpt_rdma_ch *ch;
+ struct kref kref;
+ struct rdma_iu *rdma_ius;
+ struct srp_direct_buf *rbufs;
+ struct srp_direct_buf single_rbuf;
+ struct scatterlist *sg;
+ struct list_head free_list;
+ spinlock_t spinlock;
+ enum srpt_command_state state;
+ bool rdma_aborted;
+ struct se_cmd cmd;
+ struct completion tx_done;
+ u64 tag;
+ int sg_cnt;
+ int mapped_sg_count;
+ u16 n_rdma_ius;
+ u8 n_rdma;
+ u8 n_rbuf;
+ bool queue_status_only;
+ u8 sense_data[SCSI_SENSE_BUFFERSIZE];
+};
+
+/**
+ * enum rdma_ch_state - SRP channel state.
+ * @CH_CONNECTING: QP is in RTR state; waiting for RTU.
+ * @CH_LIVE: QP is in RTS state.
+ * @CH_DISCONNECTING: DREQ has been received; waiting for DREP
+ * or DREQ has been send and waiting for DREP
+ * or .
+ * @CH_DRAINING: QP is in ERR state; waiting for last WQE event.
+ * @CH_RELEASING: Last WQE event has been received; releasing resources.
+ */
+enum rdma_ch_state {
+ CH_CONNECTING,
+ CH_LIVE,
+ CH_DISCONNECTING,
+ CH_DRAINING,
+ CH_RELEASING
+};
+
+/**
+ * struct srpt_rdma_ch - RDMA channel.
+ * @wait_queue: Allows the kernel thread to wait for more work.
+ * @thread: Kernel thread that processes the IB queues associated with
+ * the channel.
+ * @cm_id: IB CM ID associated with the channel.
+ * @qp: IB queue pair used for communicating over this channel.
+ * @cq: IB completion queue for this channel.
+ * @rq_size: IB receive queue size.
+ * @rsp_size IB response message size in bytes.
+ * @sq_wr_avail: number of work requests available in the send queue.
+ * @sport: pointer to the information of the HCA port used by this
+ * channel.
+ * @i_port_id: 128-bit initiator port identifier copied from SRP_LOGIN_REQ.
+ * @t_port_id: 128-bit target port identifier copied from SRP_LOGIN_REQ.
+ * @max_ti_iu_len: maximum target-to-initiator information unit length.
+ * @req_lim: request limit: maximum number of requests that may be sent
+ * by the initiator without having received a response.
+ * @req_lim_delta: Number of credits not yet sent back to the initiator.
+ * @spinlock: Protects free_list and state.
+ * @free_list: Head of list with free send I/O contexts.
+ * @state: channel state. See also enum rdma_ch_state.
+ * @ioctx_ring: Send ring.
+ * @wc: IB work completion array for srpt_process_completion().
+ * @list: Node for insertion in the srpt_device.rch_list list.
+ * @cmd_wait_list: List of SCSI commands that arrived before the RTU event. This
+ * list contains struct srpt_ioctx elements and is protected
+ * against concurrent modification by the cm_id spinlock.
+ * @sess: Session information associated with this SRP channel.
+ * @sess_name: Session name.
+ * @release_work: Allows scheduling of srpt_release_channel().
+ * @release_done: Enables waiting for srpt_release_channel() completion.
+ */
+struct srpt_rdma_ch {
+ wait_queue_head_t wait_queue;
+ struct task_struct *thread;
+ struct ib_cm_id *cm_id;
+ struct ib_qp *qp;
+ struct ib_cq *cq;
+ int rq_size;
+ u32 rsp_size;
+ atomic_t sq_wr_avail;
+ struct srpt_port *sport;
+ u8 i_port_id[16];
+ u8 t_port_id[16];
+ int max_ti_iu_len;
+ atomic_t req_lim;
+ atomic_t req_lim_delta;
+ spinlock_t spinlock;
+ struct list_head free_list;
+ enum rdma_ch_state state;
+ struct srpt_send_ioctx **ioctx_ring;
+ struct ib_wc wc[16];
+ struct list_head list;
+ struct list_head cmd_wait_list;
+ struct se_session *sess;
+ u8 sess_name[36];
+ struct work_struct release_work;
+ struct completion *release_done;
+};
+
+/**
+ * struct srpt_port_attib - Attributes for SRPT port
+ * @srp_max_rdma_size: Maximum size of SRP RDMA transfers for new connections.
+ * @srp_max_rsp_size: Maximum size of SRP response messages in bytes.
+ * @srp_sq_size: Shared receive queue (SRQ) size.
+ */
+struct srpt_port_attrib {
+ u32 srp_max_rdma_size;
+ u32 srp_max_rsp_size;
+ u32 srp_sq_size;
+};
+
+/**
+ * struct srpt_port - Information associated by SRPT with a single IB port.
+ * @sdev: backpointer to the HCA information.
+ * @mad_agent: per-port management datagram processing information.
+ * @enabled: Whether or not this target port is enabled.
+ * @port_guid: ASCII representation of Port GUID
+ * @port: one-based port number.
+ * @sm_lid: cached value of the port's sm_lid.
+ * @lid: cached value of the port's lid.
+ * @gid: cached value of the port's gid.
+ * @port_acl_lock spinlock for port_acl_list:
+ * @work: work structure for refreshing the aforementioned cached values.
+ * @port_tpg_1 Target portal group = 1 data.
+ * @port_wwn: Target core WWN data.
+ * @port_acl_list: Head of the list with all node ACLs for this port.
+ */
+struct srpt_port {
+ struct srpt_device *sdev;
+ struct ib_mad_agent *mad_agent;
+ bool enabled;
+ u8 port_guid[64];
+ u8 port;
+ u16 sm_lid;
+ u16 lid;
+ union ib_gid gid;
+ spinlock_t port_acl_lock;
+ struct work_struct work;
+ struct se_portal_group port_tpg_1;
+ struct se_wwn port_wwn;
+ struct list_head port_acl_list;
+ struct srpt_port_attrib port_attrib;
+};
+
+/**
+ * struct srpt_device - Information associated by SRPT with a single HCA.
+ * @device: Backpointer to the struct ib_device managed by the IB core.
+ * @pd: IB protection domain.
+ * @mr: L_Key (local key) with write access to all local memory.
+ * @srq: Per-HCA SRQ (shared receive queue).
+ * @cm_id: Connection identifier.
+ * @dev_attr: Attributes of the InfiniBand device as obtained during the
+ * ib_client.add() callback.
+ * @srq_size: SRQ size.
+ * @ioctx_ring: Per-HCA SRQ.
+ * @rch_list: Per-device channel list -- see also srpt_rdma_ch.list.
+ * @ch_releaseQ: Enables waiting for removal from rch_list.
+ * @spinlock: Protects rch_list and tpg.
+ * @port: Information about the ports owned by this HCA.
+ * @event_handler: Per-HCA asynchronous IB event handler.
+ * @list: Node in srpt_dev_list.
+ */
+struct srpt_device {
+ struct ib_device *device;
+ struct ib_pd *pd;
+ struct ib_mr *mr;
+ struct ib_srq *srq;
+ struct ib_cm_id *cm_id;
+ struct ib_device_attr dev_attr;
+ int srq_size;
+ struct srpt_recv_ioctx **ioctx_ring;
+ struct list_head rch_list;
+ wait_queue_head_t ch_releaseQ;
+ spinlock_t spinlock;
+ struct srpt_port port[2];
+ struct ib_event_handler event_handler;
+ struct list_head list;
+};
+
+/**
+ * struct srpt_node_acl - Per-initiator ACL data (managed via configfs).
+ * @i_port_id: 128-bit SRP initiator port ID.
+ * @sport: port information.
+ * @nacl: Target core node ACL information.
+ * @list: Element of the per-HCA ACL list.
+ */
+struct srpt_node_acl {
+ u8 i_port_id[16];
+ struct srpt_port *sport;
+ struct se_node_acl nacl;
+ struct list_head list;
+};
+
+/*
+ * SRP-releated SCSI persistent reservation definitions.
+ *
+ * See also SPC4r28, section 7.6.1 (Protocol specific parameters introduction).
+ * See also SPC4r28, section 7.6.4.5 (TransportID for initiator ports using
+ * SCSI over an RDMA interface).
+ */
+
+enum {
+ SCSI_TRANSPORTID_PROTOCOLID_SRP = 4,
+};
+
+struct spc_rdma_transport_id {
+ uint8_t protocol_identifier;
+ uint8_t reserved[7];
+ uint8_t i_port_id[16];
+};
+
+#endif /* IB_SRPT_H */
diff --git a/trunk/drivers/leds/Kconfig b/trunk/drivers/leds/Kconfig
index c957c344233f..9ca28fced2b9 100644
--- a/trunk/drivers/leds/Kconfig
+++ b/trunk/drivers/leds/Kconfig
@@ -403,6 +403,13 @@ config LEDS_MAX8997
This option enables support for on-chip LED drivers on
MAXIM MAX8997 PMIC.
+config LEDS_OT200
+ tristate "LED support for the Bachmann OT200"
+ depends on LEDS_CLASS && HAS_IOMEM
+ help
+ This option enables support for the LEDs on the Bachmann OT200.
+ Say Y to enable LEDs on the Bachmann OT200.
+
config LEDS_TRIGGERS
bool "LED Trigger support"
depends on LEDS_CLASS
diff --git a/trunk/drivers/leds/Makefile b/trunk/drivers/leds/Makefile
index b8a9723477f0..1fc6875a8b20 100644
--- a/trunk/drivers/leds/Makefile
+++ b/trunk/drivers/leds/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_LEDS_LP5523) += leds-lp5523.o
obj-$(CONFIG_LEDS_TCA6507) += leds-tca6507.o
obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o
obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o
+obj-$(CONFIG_LEDS_OT200) += leds-ot200.o
obj-$(CONFIG_LEDS_FSG) += leds-fsg.o
obj-$(CONFIG_LEDS_PCA955X) += leds-pca955x.o
obj-$(CONFIG_LEDS_DA903X) += leds-da903x.o
diff --git a/trunk/drivers/leds/leds-ot200.c b/trunk/drivers/leds/leds-ot200.c
new file mode 100644
index 000000000000..c4646825a620
--- /dev/null
+++ b/trunk/drivers/leds/leds-ot200.c
@@ -0,0 +1,171 @@
+/*
+ * Bachmann ot200 leds driver.
+ *
+ * Author: Sebastian Andrzej Siewior
+ * Christian Gmeiner
+ *
+ * License: GPL as published by the FSF.
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+
+struct ot200_led {
+ struct led_classdev cdev;
+ const char *name;
+ unsigned long port;
+ u8 mask;
+};
+
+/*
+ * The device has three leds on the back panel (led_err, led_init and led_run)
+ * and can handle up to seven leds on the front panel.
+ */
+
+static struct ot200_led leds[] = {
+ {
+ .name = "led_run",
+ .port = 0x5a,
+ .mask = BIT(0),
+ },
+ {
+ .name = "led_init",
+ .port = 0x5a,
+ .mask = BIT(1),
+ },
+ {
+ .name = "led_err",
+ .port = 0x5a,
+ .mask = BIT(2),
+ },
+ {
+ .name = "led_1",
+ .port = 0x49,
+ .mask = BIT(7),
+ },
+ {
+ .name = "led_2",
+ .port = 0x49,
+ .mask = BIT(6),
+ },
+ {
+ .name = "led_3",
+ .port = 0x49,
+ .mask = BIT(5),
+ },
+ {
+ .name = "led_4",
+ .port = 0x49,
+ .mask = BIT(4),
+ },
+ {
+ .name = "led_5",
+ .port = 0x49,
+ .mask = BIT(3),
+ },
+ {
+ .name = "led_6",
+ .port = 0x49,
+ .mask = BIT(2),
+ },
+ {
+ .name = "led_7",
+ .port = 0x49,
+ .mask = BIT(1),
+ }
+};
+
+static DEFINE_SPINLOCK(value_lock);
+
+/*
+ * we need to store the current led states, as it is not
+ * possible to read the current led state via inb().
+ */
+static u8 leds_back;
+static u8 leds_front;
+
+static void ot200_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct ot200_led *led = container_of(led_cdev, struct ot200_led, cdev);
+ u8 *val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&value_lock, flags);
+
+ if (led->port == 0x49)
+ val = &leds_front;
+ else if (led->port == 0x5a)
+ val = &leds_back;
+ else
+ BUG();
+
+ if (value == LED_OFF)
+ *val &= ~led->mask;
+ else
+ *val |= led->mask;
+
+ outb(*val, led->port);
+ spin_unlock_irqrestore(&value_lock, flags);
+}
+
+static int __devinit ot200_led_probe(struct platform_device *pdev)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(leds); i++) {
+
+ leds[i].cdev.name = leds[i].name;
+ leds[i].cdev.brightness_set = ot200_led_brightness_set;
+
+ ret = led_classdev_register(&pdev->dev, &leds[i].cdev);
+ if (ret < 0)
+ goto err;
+ }
+
+ leds_front = 0; /* turn off all front leds */
+ leds_back = BIT(1); /* turn on init led */
+ outb(leds_front, 0x49);
+ outb(leds_back, 0x5a);
+
+ return 0;
+
+err:
+ for (i = i - 1; i >= 0; i--)
+ led_classdev_unregister(&leds[i].cdev);
+
+ return ret;
+}
+
+static int __devexit ot200_led_remove(struct platform_device *pdev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(leds); i++)
+ led_classdev_unregister(&leds[i].cdev);
+
+ return 0;
+}
+
+static struct platform_driver ot200_led_driver = {
+ .probe = ot200_led_probe,
+ .remove = __devexit_p(ot200_led_remove),
+ .driver = {
+ .name = "leds-ot200",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(ot200_led_driver);
+
+MODULE_AUTHOR("Sebastian A. Siewior ");
+MODULE_DESCRIPTION("ot200 LED driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:leds-ot200");
diff --git a/trunk/drivers/media/common/tuners/tuner-xc2028.c b/trunk/drivers/media/common/tuners/tuner-xc2028.c
index 27555995f7e4..b5ee3ebfcfca 100644
--- a/trunk/drivers/media/common/tuners/tuner-xc2028.c
+++ b/trunk/drivers/media/common/tuners/tuner-xc2028.c
@@ -24,6 +24,21 @@
#include
#include "dvb_frontend.h"
+/* Registers (Write-only) */
+#define XREG_INIT 0x00
+#define XREG_RF_FREQ 0x02
+#define XREG_POWER_DOWN 0x08
+
+/* Registers (Read-only) */
+#define XREG_FREQ_ERROR 0x01
+#define XREG_LOCK 0x02
+#define XREG_VERSION 0x04
+#define XREG_PRODUCT_ID 0x08
+#define XREG_HSYNC_FREQ 0x10
+#define XREG_FRAME_LINES 0x20
+#define XREG_SNR 0x40
+
+#define XREG_ADC_ENV 0x0100
static int debug;
module_param(debug, int, 0644);
@@ -885,7 +900,7 @@ static int xc2028_signal(struct dvb_frontend *fe, u16 *strength)
mutex_lock(&priv->lock);
/* Sync Lock Indicator */
- rc = xc2028_get_reg(priv, 0x0002, &frq_lock);
+ rc = xc2028_get_reg(priv, XREG_LOCK, &frq_lock);
if (rc < 0)
goto ret;
@@ -894,7 +909,7 @@ static int xc2028_signal(struct dvb_frontend *fe, u16 *strength)
signal = 1 << 11;
/* Get SNR of the video signal */
- rc = xc2028_get_reg(priv, 0x0040, &signal);
+ rc = xc2028_get_reg(priv, XREG_SNR, &signal);
if (rc < 0)
goto ret;
@@ -1019,9 +1034,9 @@ static int generic_set_freq(struct dvb_frontend *fe, u32 freq /* in HZ */,
/* CMD= Set frequency */
if (priv->firm_version < 0x0202)
- rc = send_seq(priv, {0x00, 0x02, 0x00, 0x00});
+ rc = send_seq(priv, {0x00, XREG_RF_FREQ, 0x00, 0x00});
else
- rc = send_seq(priv, {0x80, 0x02, 0x00, 0x00});
+ rc = send_seq(priv, {0x80, XREG_RF_FREQ, 0x00, 0x00});
if (rc < 0)
goto ret;
@@ -1201,9 +1216,9 @@ static int xc2028_sleep(struct dvb_frontend *fe)
mutex_lock(&priv->lock);
if (priv->firm_version < 0x0202)
- rc = send_seq(priv, {0x00, 0x08, 0x00, 0x00});
+ rc = send_seq(priv, {0x00, XREG_POWER_DOWN, 0x00, 0x00});
else
- rc = send_seq(priv, {0x80, 0x08, 0x00, 0x00});
+ rc = send_seq(priv, {0x80, XREG_POWER_DOWN, 0x00, 0x00});
priv->cur_fw.type = 0; /* need firmware reload */
diff --git a/trunk/drivers/media/common/tuners/xc4000.c b/trunk/drivers/media/common/tuners/xc4000.c
index d218c1d68c33..68397110b7d9 100644
--- a/trunk/drivers/media/common/tuners/xc4000.c
+++ b/trunk/drivers/media/common/tuners/xc4000.c
@@ -154,6 +154,8 @@ struct xc4000_priv {
#define XREG_SNR 0x06
#define XREG_VERSION 0x07
#define XREG_PRODUCT_ID 0x08
+#define XREG_SIGNAL_LEVEL 0x0A
+#define XREG_NOISE_LEVEL 0x0B
/*
Basic firmware description. This will remain with
@@ -486,6 +488,16 @@ static int xc_get_quality(struct xc4000_priv *priv, u16 *quality)
return xc4000_readreg(priv, XREG_QUALITY, quality);
}
+static int xc_get_signal_level(struct xc4000_priv *priv, u16 *signal)
+{
+ return xc4000_readreg(priv, XREG_SIGNAL_LEVEL, signal);
+}
+
+static int xc_get_noise_level(struct xc4000_priv *priv, u16 *noise)
+{
+ return xc4000_readreg(priv, XREG_NOISE_LEVEL, noise);
+}
+
static u16 xc_wait_for_lock(struct xc4000_priv *priv)
{
u16 lock_state = 0;
@@ -1089,6 +1101,8 @@ static void xc_debug_dump(struct xc4000_priv *priv)
u32 hsync_freq_hz = 0;
u16 frame_lines;
u16 quality;
+ u16 signal = 0;
+ u16 noise = 0;
u8 hw_majorversion = 0, hw_minorversion = 0;
u8 fw_majorversion = 0, fw_minorversion = 0;
@@ -1119,6 +1133,12 @@ static void xc_debug_dump(struct xc4000_priv *priv)
xc_get_quality(priv, &quality);
dprintk(1, "*** Quality (0:<8dB, 7:>56dB) = %d\n", quality);
+
+ xc_get_signal_level(priv, &signal);
+ dprintk(1, "*** Signal level = -%ddB (%d)\n", signal >> 8, signal);
+
+ xc_get_noise_level(priv, &noise);
+ dprintk(1, "*** Noise level = %ddB (%d)\n", noise >> 8, noise);
}
static int xc4000_set_params(struct dvb_frontend *fe)
@@ -1432,6 +1452,71 @@ static int xc4000_set_analog_params(struct dvb_frontend *fe,
return ret;
}
+static int xc4000_get_signal(struct dvb_frontend *fe, u16 *strength)
+{
+ struct xc4000_priv *priv = fe->tuner_priv;
+ u16 value = 0;
+ int rc;
+
+ mutex_lock(&priv->lock);
+ rc = xc4000_readreg(priv, XREG_SIGNAL_LEVEL, &value);
+ mutex_unlock(&priv->lock);
+
+ if (rc < 0)
+ goto ret;
+
+ /* Informations from real testing of DVB-T and radio part,
+ coeficient for one dB is 0xff.
+ */
+ tuner_dbg("Signal strength: -%ddB (%05d)\n", value >> 8, value);
+
+ /* all known digital modes */
+ if ((priv->video_standard == XC4000_DTV6) ||
+ (priv->video_standard == XC4000_DTV7) ||
+ (priv->video_standard == XC4000_DTV7_8) ||
+ (priv->video_standard == XC4000_DTV8))
+ goto digital;
+
+ /* Analog mode has NOISE LEVEL important, signal
+ depends only on gain of antenna and amplifiers,
+ but it doesn't tell anything about real quality
+ of reception.
+ */
+ mutex_lock(&priv->lock);
+ rc = xc4000_readreg(priv, XREG_NOISE_LEVEL, &value);
+ mutex_unlock(&priv->lock);
+
+ tuner_dbg("Noise level: %ddB (%05d)\n", value >> 8, value);
+
+ /* highest noise level: 32dB */
+ if (value >= 0x2000) {
+ value = 0;
+ } else {
+ value = ~value << 3;
+ }
+
+ goto ret;
+
+ /* Digital mode has SIGNAL LEVEL important and real
+ noise level is stored in demodulator registers.
+ */
+digital:
+ /* best signal: -50dB */
+ if (value <= 0x3200) {
+ value = 0xffff;
+ /* minimum: -114dB - should be 0x7200 but real zero is 0x713A */
+ } else if (value >= 0x713A) {
+ value = 0;
+ } else {
+ value = ~(value - 0x3200) << 2;
+ }
+
+ret:
+ *strength = value;
+
+ return rc;
+}
+
static int xc4000_get_frequency(struct dvb_frontend *fe, u32 *freq)
{
struct xc4000_priv *priv = fe->tuner_priv;
@@ -1559,6 +1644,7 @@ static const struct dvb_tuner_ops xc4000_tuner_ops = {
.set_params = xc4000_set_params,
.set_analog_params = xc4000_set_analog_params,
.get_frequency = xc4000_get_frequency,
+ .get_rf_strength = xc4000_get_signal,
.get_bandwidth = xc4000_get_bandwidth,
.get_status = xc4000_get_status
};
diff --git a/trunk/drivers/media/dvb/dvb-core/dvb_frontend.c b/trunk/drivers/media/dvb/dvb-core/dvb_frontend.c
index b15db4fe347b..fbbe545a74cb 100644
--- a/trunk/drivers/media/dvb/dvb-core/dvb_frontend.c
+++ b/trunk/drivers/media/dvb/dvb-core/dvb_frontend.c
@@ -904,8 +904,11 @@ static int dvb_frontend_clear_cache(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int i;
+ u32 delsys;
+ delsys = c->delivery_system;
memset(c, 0, sizeof(struct dtv_frontend_properties));
+ c->delivery_system = delsys;
c->state = DTV_CLEAR;
@@ -1009,25 +1012,6 @@ static struct dtv_cmds_h dtv_cmds[DTV_MAX_COMMAND + 1] = {
_DTV_CMD(DTV_ISDBT_LAYERC_SEGMENT_COUNT, 1, 0),
_DTV_CMD(DTV_ISDBT_LAYERC_TIME_INTERLEAVING, 1, 0),
- _DTV_CMD(DTV_ISDBT_PARTIAL_RECEPTION, 0, 0),
- _DTV_CMD(DTV_ISDBT_SOUND_BROADCASTING, 0, 0),
- _DTV_CMD(DTV_ISDBT_SB_SUBCHANNEL_ID, 0, 0),
- _DTV_CMD(DTV_ISDBT_SB_SEGMENT_IDX, 0, 0),
- _DTV_CMD(DTV_ISDBT_SB_SEGMENT_COUNT, 0, 0),
- _DTV_CMD(DTV_ISDBT_LAYER_ENABLED, 0, 0),
- _DTV_CMD(DTV_ISDBT_LAYERA_FEC, 0, 0),
- _DTV_CMD(DTV_ISDBT_LAYERA_MODULATION, 0, 0),
- _DTV_CMD(DTV_ISDBT_LAYERA_SEGMENT_COUNT, 0, 0),
- _DTV_CMD(DTV_ISDBT_LAYERA_TIME_INTERLEAVING, 0, 0),
- _DTV_CMD(DTV_ISDBT_LAYERB_FEC, 0, 0),
- _DTV_CMD(DTV_ISDBT_LAYERB_MODULATION, 0, 0),
- _DTV_CMD(DTV_ISDBT_LAYERB_SEGMENT_COUNT, 0, 0),
- _DTV_CMD(DTV_ISDBT_LAYERB_TIME_INTERLEAVING, 0, 0),
- _DTV_CMD(DTV_ISDBT_LAYERC_FEC, 0, 0),
- _DTV_CMD(DTV_ISDBT_LAYERC_MODULATION, 0, 0),
- _DTV_CMD(DTV_ISDBT_LAYERC_SEGMENT_COUNT, 0, 0),
- _DTV_CMD(DTV_ISDBT_LAYERC_TIME_INTERLEAVING, 0, 0),
-
_DTV_CMD(DTV_ISDBS_TS_ID, 1, 0),
_DTV_CMD(DTV_DVBT2_PLP_ID, 1, 0),
@@ -1413,6 +1397,15 @@ static int set_delivery_system(struct dvb_frontend *fe, u32 desired_system)
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
enum dvbv3_emulation_type type;
+ /*
+ * It was reported that some old DVBv5 applications were
+ * filling delivery_system with SYS_UNDEFINED. If this happens,
+ * assume that the application wants to use the first supported
+ * delivery system.
+ */
+ if (c->delivery_system == SYS_UNDEFINED)
+ c->delivery_system = fe->ops.delsys[0];
+
if (desired_system == SYS_UNDEFINED) {
/*
* A DVBv3 call doesn't know what's the desired system.
@@ -1732,6 +1725,7 @@ static int dvb_frontend_ioctl_properties(struct file *file,
{
struct dvb_device *dvbdev = file->private_data;
struct dvb_frontend *fe = dvbdev->priv;
+ struct dvb_frontend_private *fepriv = fe->frontend_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int err = 0;
@@ -1798,9 +1792,14 @@ static int dvb_frontend_ioctl_properties(struct file *file,
/*
* Fills the cache out struct with the cache contents, plus
- * the data retrieved from get_frontend.
+ * the data retrieved from get_frontend, if the frontend
+ * is not idle. Otherwise, returns the cached content
*/
- dtv_get_frontend(fe, NULL);
+ if (fepriv->state != FESTATE_IDLE) {
+ err = dtv_get_frontend(fe, NULL);
+ if (err < 0)
+ goto out;
+ }
for (i = 0; i < tvps->num; i++) {
err = dtv_property_process_get(fe, c, tvp + i, file);
if (err < 0)
diff --git a/trunk/drivers/media/dvb/dvb-usb/anysee.c b/trunk/drivers/media/dvb/dvb-usb/anysee.c
index d66192974d68..1455e2644ab5 100644
--- a/trunk/drivers/media/dvb/dvb-usb/anysee.c
+++ b/trunk/drivers/media/dvb/dvb-usb/anysee.c
@@ -877,24 +877,18 @@ static int anysee_frontend_attach(struct dvb_usb_adapter *adap)
case ANYSEE_HW_508T2C: /* 20 */
/* E7 T2C */
+ if (state->fe_id)
+ break;
+
/* enable DVB-T/T2/C demod on IOE[5] */
ret = anysee_wr_reg_mask(adap->dev, REG_IOE, (1 << 5), 0x20);
if (ret)
goto error;
- if (state->fe_id == 0) {
- /* DVB-T/T2 */
- adap->fe_adap[state->fe_id].fe =
- dvb_attach(cxd2820r_attach,
- &anysee_cxd2820r_config,
- &adap->dev->i2c_adap, NULL);
- } else {
- /* DVB-C */
- adap->fe_adap[state->fe_id].fe =
- dvb_attach(cxd2820r_attach,
- &anysee_cxd2820r_config,
- &adap->dev->i2c_adap, adap->fe_adap[0].fe);
- }
+ /* attach demod */
+ adap->fe_adap[state->fe_id].fe = dvb_attach(cxd2820r_attach,
+ &anysee_cxd2820r_config, &adap->dev->i2c_adap,
+ NULL);
state->has_ci = true;
diff --git a/trunk/drivers/media/dvb/dvb-usb/dib0700.h b/trunk/drivers/media/dvb/dvb-usb/dib0700.h
index 9bd6d51b3b93..7de125c0b36f 100644
--- a/trunk/drivers/media/dvb/dvb-usb/dib0700.h
+++ b/trunk/drivers/media/dvb/dvb-usb/dib0700.h
@@ -48,6 +48,8 @@ struct dib0700_state {
u8 disable_streaming_master_mode;
u32 fw_version;
u32 nb_packet_buffer_size;
+ int (*read_status)(struct dvb_frontend *, fe_status_t *);
+ int (*sleep)(struct dvb_frontend* fe);
u8 buf[255];
};
diff --git a/trunk/drivers/media/dvb/dvb-usb/dib0700_core.c b/trunk/drivers/media/dvb/dvb-usb/dib0700_core.c
index 206999476f02..070e82aa53f5 100644
--- a/trunk/drivers/media/dvb/dvb-usb/dib0700_core.c
+++ b/trunk/drivers/media/dvb/dvb-usb/dib0700_core.c
@@ -834,6 +834,7 @@ static struct usb_driver dib0700_driver = {
module_usb_driver(dib0700_driver);
+MODULE_FIRMWARE("dvb-usb-dib0700-1.20.fw");
MODULE_AUTHOR("Patrick Boettcher ");
MODULE_DESCRIPTION("Driver for devices based on DiBcom DiB0700 - USB bridge");
MODULE_VERSION("1.0");
diff --git a/trunk/drivers/media/dvb/dvb-usb/dib0700_devices.c b/trunk/drivers/media/dvb/dvb-usb/dib0700_devices.c
index 81ef4b46f790..f9e966aa26e7 100644
--- a/trunk/drivers/media/dvb/dvb-usb/dib0700_devices.c
+++ b/trunk/drivers/media/dvb/dvb-usb/dib0700_devices.c
@@ -3066,19 +3066,25 @@ static struct dib7000p_config stk7070pd_dib7000p_config[2] = {
}
};
-static int stk7070pd_frontend_attach0(struct dvb_usb_adapter *adap)
+static void stk7070pd_init(struct dvb_usb_device *dev)
{
- dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1);
+ dib0700_set_gpio(dev, GPIO6, GPIO_OUT, 1);
msleep(10);
- dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1);
- dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1);
- dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1);
- dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0);
+ dib0700_set_gpio(dev, GPIO9, GPIO_OUT, 1);
+ dib0700_set_gpio(dev, GPIO4, GPIO_OUT, 1);
+ dib0700_set_gpio(dev, GPIO7, GPIO_OUT, 1);
+ dib0700_set_gpio(dev, GPIO10, GPIO_OUT, 0);
- dib0700_ctrl_clock(adap->dev, 72, 1);
+ dib0700_ctrl_clock(dev, 72, 1);
msleep(10);
- dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1);
+ dib0700_set_gpio(dev, GPIO10, GPIO_OUT, 1);
+}
+
+static int stk7070pd_frontend_attach0(struct dvb_usb_adapter *adap)
+{
+ stk7070pd_init(adap->dev);
+
msleep(10);
dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1);
@@ -3099,6 +3105,77 @@ static int stk7070pd_frontend_attach1(struct dvb_usb_adapter *adap)
return adap->fe_adap[0].fe == NULL ? -ENODEV : 0;
}
+static int novatd_read_status_override(struct dvb_frontend *fe,
+ fe_status_t *stat)
+{
+ struct dvb_usb_adapter *adap = fe->dvb->priv;
+ struct dvb_usb_device *dev = adap->dev;
+ struct dib0700_state *state = dev->priv;
+ int ret;
+
+ ret = state->read_status(fe, stat);
+
+ if (!ret)
+ dib0700_set_gpio(dev, adap->id == 0 ? GPIO1 : GPIO0, GPIO_OUT,
+ !!(*stat & FE_HAS_LOCK));
+
+ return ret;
+}
+
+static int novatd_sleep_override(struct dvb_frontend* fe)
+{
+ struct dvb_usb_adapter *adap = fe->dvb->priv;
+ struct dvb_usb_device *dev = adap->dev;
+ struct dib0700_state *state = dev->priv;
+
+ /* turn off LED */
+ dib0700_set_gpio(dev, adap->id == 0 ? GPIO1 : GPIO0, GPIO_OUT, 0);
+
+ return state->sleep(fe);
+}
+
+/**
+ * novatd_frontend_attach - Nova-TD specific attach
+ *
+ * Nova-TD has GPIO0, 1 and 2 for LEDs. So do not fiddle with them except for
+ * information purposes.
+ */
+static int novatd_frontend_attach(struct dvb_usb_adapter *adap)
+{
+ struct dvb_usb_device *dev = adap->dev;
+ struct dib0700_state *st = dev->priv;
+
+ if (adap->id == 0) {
+ stk7070pd_init(dev);
+
+ /* turn the power LED on, the other two off (just in case) */
+ dib0700_set_gpio(dev, GPIO0, GPIO_OUT, 0);
+ dib0700_set_gpio(dev, GPIO1, GPIO_OUT, 0);
+ dib0700_set_gpio(dev, GPIO2, GPIO_OUT, 1);
+
+ if (dib7000p_i2c_enumeration(&dev->i2c_adap, 2, 18,
+ stk7070pd_dib7000p_config) != 0) {
+ err("%s: dib7000p_i2c_enumeration failed. Cannot continue\n",
+ __func__);
+ return -ENODEV;
+ }
+ }
+
+ adap->fe_adap[0].fe = dvb_attach(dib7000p_attach, &dev->i2c_adap,
+ adap->id == 0 ? 0x80 : 0x82,
+ &stk7070pd_dib7000p_config[adap->id]);
+
+ if (adap->fe_adap[0].fe == NULL)
+ return -ENODEV;
+
+ st->read_status = adap->fe_adap[0].fe->ops.read_status;
+ adap->fe_adap[0].fe->ops.read_status = novatd_read_status_override;
+ st->sleep = adap->fe_adap[0].fe->ops.sleep;
+ adap->fe_adap[0].fe->ops.sleep = novatd_sleep_override;
+
+ return 0;
+}
+
/* S5H1411 */
static struct s5h1411_config pinnacle_801e_config = {
.output_mode = S5H1411_PARALLEL_OUTPUT,
@@ -3861,6 +3938,57 @@ struct dvb_usb_device_properties dib0700_devices[] = {
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
+ .num_adapters = 2,
+ .adapter = {
+ {
+ .num_frontends = 1,
+ .fe = {{
+ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
+ .pid_filter_count = 32,
+ .pid_filter = stk70x0p_pid_filter,
+ .pid_filter_ctrl = stk70x0p_pid_filter_ctrl,
+ .frontend_attach = novatd_frontend_attach,
+ .tuner_attach = dib7070p_tuner_attach,
+
+ DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
+ }},
+ .size_of_priv = sizeof(struct dib0700_adapter_state),
+ }, {
+ .num_frontends = 1,
+ .fe = {{
+ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
+ .pid_filter_count = 32,
+ .pid_filter = stk70x0p_pid_filter,
+ .pid_filter_ctrl = stk70x0p_pid_filter_ctrl,
+ .frontend_attach = novatd_frontend_attach,
+ .tuner_attach = dib7070p_tuner_attach,
+
+ DIB0700_DEFAULT_STREAMING_CONFIG(0x03),
+ }},
+ .size_of_priv = sizeof(struct dib0700_adapter_state),
+ }
+ },
+
+ .num_device_descs = 1,
+ .devices = {
+ { "Hauppauge Nova-TD Stick (52009)",
+ { &dib0700_usb_id_table[35], NULL },
+ { NULL },
+ },
+ },
+
+ .rc.core = {
+ .rc_interval = DEFAULT_RC_INTERVAL,
+ .rc_codes = RC_MAP_DIB0700_RC5_TABLE,
+ .module_name = "dib0700",
+ .rc_query = dib0700_rc_query_old_firmware,
+ .allowed_protos = RC_TYPE_RC5 |
+ RC_TYPE_RC6 |
+ RC_TYPE_NEC,
+ .change_protocol = dib0700_change_protocol,
+ },
+ }, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
+
.num_adapters = 2,
.adapter = {
{
@@ -3892,7 +4020,7 @@ struct dvb_usb_device_properties dib0700_devices[] = {
}
},
- .num_device_descs = 6,
+ .num_device_descs = 5,
.devices = {
{ "DiBcom STK7070PD reference design",
{ &dib0700_usb_id_table[17], NULL },
@@ -3902,10 +4030,6 @@ struct dvb_usb_device_properties dib0700_devices[] = {
{ &dib0700_usb_id_table[18], NULL },
{ NULL },
},
- { "Hauppauge Nova-TD Stick (52009)",
- { &dib0700_usb_id_table[35], NULL },
- { NULL },
- },
{ "Hauppauge Nova-TD-500 (84xxx)",
{ &dib0700_usb_id_table[36], NULL },
{ NULL },
diff --git a/trunk/drivers/media/dvb/frontends/cxd2820r_core.c b/trunk/drivers/media/dvb/frontends/cxd2820r_core.c
index 93e1b12e7907..caae7f79c837 100644
--- a/trunk/drivers/media/dvb/frontends/cxd2820r_core.c
+++ b/trunk/drivers/media/dvb/frontends/cxd2820r_core.c
@@ -309,9 +309,14 @@ static int cxd2820r_read_status(struct dvb_frontend *fe, fe_status_t *status)
static int cxd2820r_get_frontend(struct dvb_frontend *fe)
{
+ struct cxd2820r_priv *priv = fe->demodulator_priv;
int ret;
dbg("%s: delsys=%d", __func__, fe->dtv_property_cache.delivery_system);
+
+ if (priv->delivery_system == SYS_UNDEFINED)
+ return 0;
+
switch (fe->dtv_property_cache.delivery_system) {
case SYS_DVBT:
ret = cxd2820r_get_frontend_t(fe);
@@ -476,10 +481,10 @@ static enum dvbfe_search cxd2820r_search(struct dvb_frontend *fe)
dbg("%s: delsys=%d", __func__, fe->dtv_property_cache.delivery_system);
/* switch between DVB-T and DVB-T2 when tune fails */
- if (priv->last_tune_failed && (priv->delivery_system != SYS_DVBC_ANNEX_A)) {
+ if (priv->last_tune_failed) {
if (priv->delivery_system == SYS_DVBT)
c->delivery_system = SYS_DVBT2;
- else
+ else if (priv->delivery_system == SYS_DVBT2)
c->delivery_system = SYS_DVBT;
}
@@ -492,6 +497,7 @@ static enum dvbfe_search cxd2820r_search(struct dvb_frontend *fe)
/* frontend lock wait loop count */
switch (priv->delivery_system) {
case SYS_DVBT:
+ case SYS_DVBC_ANNEX_A:
i = 20;
break;
case SYS_DVBT2:
diff --git a/trunk/drivers/media/dvb/frontends/ds3000.c b/trunk/drivers/media/dvb/frontends/ds3000.c
index 938777065de6..af65d013db11 100644
--- a/trunk/drivers/media/dvb/frontends/ds3000.c
+++ b/trunk/drivers/media/dvb/frontends/ds3000.c
@@ -1195,7 +1195,7 @@ static int ds3000_set_frontend(struct dvb_frontend *fe)
for (i = 0; i < 30 ; i++) {
ds3000_read_status(fe, &status);
- if (status && FE_HAS_LOCK)
+ if (status & FE_HAS_LOCK)
break;
msleep(10);
diff --git a/trunk/drivers/media/dvb/frontends/mb86a20s.c b/trunk/drivers/media/dvb/frontends/mb86a20s.c
index 7fa3e472cdca..fade566927c3 100644
--- a/trunk/drivers/media/dvb/frontends/mb86a20s.c
+++ b/trunk/drivers/media/dvb/frontends/mb86a20s.c
@@ -402,7 +402,7 @@ static int mb86a20s_get_modulation(struct mb86a20s_state *state,
[2] = 0x8e, /* Layer C */
};
- if (layer > ARRAY_SIZE(reg))
+ if (layer >= ARRAY_SIZE(reg))
return -EINVAL;
rc = mb86a20s_writereg(state, 0x6d, reg[layer]);
if (rc < 0)
@@ -435,7 +435,7 @@ static int mb86a20s_get_fec(struct mb86a20s_state *state,
[2] = 0x8f, /* Layer C */
};
- if (layer > ARRAY_SIZE(reg))
+ if (layer >= ARRAY_SIZE(reg))
return -EINVAL;
rc = mb86a20s_writereg(state, 0x6d, reg[layer]);
if (rc < 0)
@@ -470,7 +470,7 @@ static int mb86a20s_get_interleaving(struct mb86a20s_state *state,
[2] = 0x90, /* Layer C */
};
- if (layer > ARRAY_SIZE(reg))
+ if (layer >= ARRAY_SIZE(reg))
return -EINVAL;
rc = mb86a20s_writereg(state, 0x6d, reg[layer]);
if (rc < 0)
@@ -494,7 +494,7 @@ static int mb86a20s_get_segment_count(struct mb86a20s_state *state,
[2] = 0x91, /* Layer C */
};
- if (layer > ARRAY_SIZE(reg))
+ if (layer >= ARRAY_SIZE(reg))
return -EINVAL;
rc = mb86a20s_writereg(state, 0x6d, reg[layer]);
if (rc < 0)
diff --git a/trunk/drivers/media/dvb/frontends/tda18271c2dd.c b/trunk/drivers/media/dvb/frontends/tda18271c2dd.c
index 86da3d816498..ad7c72e8f517 100644
--- a/trunk/drivers/media/dvb/frontends/tda18271c2dd.c
+++ b/trunk/drivers/media/dvb/frontends/tda18271c2dd.c
@@ -29,7 +29,6 @@
#include
#include
#include
-#include
#include
#include "dvb_frontend.h"
diff --git a/trunk/drivers/media/video/as3645a.c b/trunk/drivers/media/video/as3645a.c
index ec859a580651..f241702a0f36 100644
--- a/trunk/drivers/media/video/as3645a.c
+++ b/trunk/drivers/media/video/as3645a.c
@@ -29,6 +29,7 @@
#include
#include
#include
+#include
#include
#include
diff --git a/trunk/drivers/media/video/cx18/cx18-fileops.c b/trunk/drivers/media/video/cx18/cx18-fileops.c
index 14cb961c22bd..4bfd865a4106 100644
--- a/trunk/drivers/media/video/cx18/cx18-fileops.c
+++ b/trunk/drivers/media/video/cx18/cx18-fileops.c
@@ -751,20 +751,10 @@ int cx18_v4l2_close(struct file *filp)
CX18_DEBUG_IOCTL("close() of %s\n", s->name);
- v4l2_fh_del(fh);
- v4l2_fh_exit(fh);
-
- /* Easy case first: this stream was never claimed by us */
- if (s->id != id->open_id) {
- kfree(id);
- return 0;
- }
-
- /* 'Unclaim' this stream */
-
- /* Stop radio */
mutex_lock(&cx->serialize_lock);
- if (id->type == CX18_ENC_STREAM_TYPE_RAD) {
+ /* Stop radio */
+ if (id->type == CX18_ENC_STREAM_TYPE_RAD &&
+ v4l2_fh_is_singular_file(filp)) {
/* Closing radio device, return to TV mode */
cx18_mute(cx);
/* Mark that the radio is no longer in use */
@@ -781,10 +771,14 @@ int cx18_v4l2_close(struct file *filp)
}
/* Done! Unmute and continue. */
cx18_unmute(cx);
- cx18_release_stream(s);
- } else {
- cx18_stop_capture(id, 0);
}
+
+ v4l2_fh_del(fh);
+ v4l2_fh_exit(fh);
+
+ /* 'Unclaim' this stream */
+ if (s->id == id->open_id)
+ cx18_stop_capture(id, 0);
kfree(id);
mutex_unlock(&cx->serialize_lock);
return 0;
@@ -810,21 +804,15 @@ static int cx18_serialized_open(struct cx18_stream *s, struct file *filp)
item->open_id = cx->open_id++;
filp->private_data = &item->fh;
+ v4l2_fh_add(&item->fh);
- if (item->type == CX18_ENC_STREAM_TYPE_RAD) {
- /* Try to claim this stream */
- if (cx18_claim_stream(item, item->type)) {
- /* No, it's already in use */
- v4l2_fh_exit(&item->fh);
- kfree(item);
- return -EBUSY;
- }
-
+ if (item->type == CX18_ENC_STREAM_TYPE_RAD &&
+ v4l2_fh_is_singular_file(filp)) {
if (!test_bit(CX18_F_I_RADIO_USER, &cx->i_flags)) {
if (atomic_read(&cx->ana_capturing) > 0) {
/* switching to radio while capture is
in progress is not polite */
- cx18_release_stream(s);
+ v4l2_fh_del(&item->fh);
v4l2_fh_exit(&item->fh);
kfree(item);
return -EBUSY;
@@ -842,7 +830,6 @@ static int cx18_serialized_open(struct cx18_stream *s, struct file *filp)
/* Done! Unmute and continue. */
cx18_unmute(cx);
}
- v4l2_fh_add(&item->fh);
return 0;
}
diff --git a/trunk/drivers/media/video/cx231xx/cx231xx-cards.c b/trunk/drivers/media/video/cx231xx/cx231xx-cards.c
index 919ed77b32f2..875a7ce94736 100644
--- a/trunk/drivers/media/video/cx231xx/cx231xx-cards.c
+++ b/trunk/drivers/media/video/cx231xx/cx231xx-cards.c
@@ -1052,7 +1052,7 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (dev == NULL) {
cx231xx_err(DRIVER_NAME ": out of memory!\n");
- clear_bit(dev->devno, &cx231xx_devused);
+ clear_bit(nr, &cx231xx_devused);
return -ENOMEM;
}
diff --git a/trunk/drivers/media/video/cx23885/cx23885-cards.c b/trunk/drivers/media/video/cx23885/cx23885-cards.c
index 3c01be999e35..19b5499d2624 100644
--- a/trunk/drivers/media/video/cx23885/cx23885-cards.c
+++ b/trunk/drivers/media/video/cx23885/cx23885-cards.c
@@ -213,8 +213,8 @@ struct cx23885_board cx23885_boards[] = {
.portc = CX23885_MPEG_DVB,
.tuner_type = TUNER_XC4000,
.tuner_addr = 0x61,
- .radio_type = TUNER_XC4000,
- .radio_addr = 0x61,
+ .radio_type = UNSET,
+ .radio_addr = ADDR_UNSET,
.input = {{
.type = CX23885_VMUX_TELEVISION,
.vmux = CX25840_VIN2_CH1 |
diff --git a/trunk/drivers/media/video/cx23885/cx23885-dvb.c b/trunk/drivers/media/video/cx23885/cx23885-dvb.c
index af8a225763d3..6835eb1fc093 100644
--- a/trunk/drivers/media/video/cx23885/cx23885-dvb.c
+++ b/trunk/drivers/media/video/cx23885/cx23885-dvb.c
@@ -943,6 +943,11 @@ static int dvb_register(struct cx23885_tsport *port)
fe = dvb_attach(xc4000_attach, fe0->dvb.frontend,
&dev->i2c_bus[1].i2c_adap, &cfg);
+ if (!fe) {
+ printk(KERN_ERR "%s/2: xc4000 attach failed\n",
+ dev->name);
+ goto frontend_detach;
+ }
}
break;
case CX23885_BOARD_TBS_6920:
diff --git a/trunk/drivers/media/video/cx23885/cx23885-video.c b/trunk/drivers/media/video/cx23885/cx23885-video.c
index 4bbf9bb97bde..c654bdc7ccb2 100644
--- a/trunk/drivers/media/video/cx23885/cx23885-video.c
+++ b/trunk/drivers/media/video/cx23885/cx23885-video.c
@@ -1550,7 +1550,6 @@ static int cx23885_set_freq_via_ops(struct cx23885_dev *dev,
struct v4l2_control ctrl;
struct videobuf_dvb_frontend *vfe;
struct dvb_frontend *fe;
- int err = 0;
struct analog_parameters params = {
.mode = V4L2_TUNER_ANALOG_TV,
@@ -1572,8 +1571,10 @@ static int cx23885_set_freq_via_ops(struct cx23885_dev *dev,
params.frequency, f->tuner, params.std);
vfe = videobuf_dvb_get_frontend(&dev->ts2.frontends, 1);
- if (!vfe)
- err = -EINVAL;
+ if (!vfe) {
+ mutex_unlock(&dev->lock);
+ return -EINVAL;
+ }
fe = vfe->dvb.frontend;
diff --git a/trunk/drivers/media/video/cx88/cx88-cards.c b/trunk/drivers/media/video/cx88/cx88-cards.c
index 62c7ad050f9b..cbd5d119a2c6 100644
--- a/trunk/drivers/media/video/cx88/cx88-cards.c
+++ b/trunk/drivers/media/video/cx88/cx88-cards.c
@@ -1573,8 +1573,8 @@ static const struct cx88_board cx88_boards[] = {
.name = "Pinnacle Hybrid PCTV",
.tuner_type = TUNER_XC2028,
.tuner_addr = 0x61,
- .radio_type = TUNER_XC2028,
- .radio_addr = 0x61,
+ .radio_type = UNSET,
+ .radio_addr = ADDR_UNSET,
.input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
@@ -1611,8 +1611,8 @@ static const struct cx88_board cx88_boards[] = {
.name = "Leadtek TV2000 XP Global",
.tuner_type = TUNER_XC2028,
.tuner_addr = 0x61,
- .radio_type = TUNER_XC2028,
- .radio_addr = 0x61,
+ .radio_type = UNSET,
+ .radio_addr = ADDR_UNSET,
.input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
@@ -2115,8 +2115,8 @@ static const struct cx88_board cx88_boards[] = {
.name = "Terratec Cinergy HT PCI MKII",
.tuner_type = TUNER_XC2028,
.tuner_addr = 0x61,
- .radio_type = TUNER_XC2028,
- .radio_addr = 0x61,
+ .radio_type = UNSET,
+ .radio_addr = ADDR_UNSET,
.input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
@@ -2154,9 +2154,9 @@ static const struct cx88_board cx88_boards[] = {
[CX88_BOARD_WINFAST_DTV1800H] = {
.name = "Leadtek WinFast DTV1800 Hybrid",
.tuner_type = TUNER_XC2028,
- .radio_type = TUNER_XC2028,
+ .radio_type = UNSET,
.tuner_addr = 0x61,
- .radio_addr = 0x61,
+ .radio_addr = ADDR_UNSET,
/*
* GPIO setting
*
@@ -2195,9 +2195,9 @@ static const struct cx88_board cx88_boards[] = {
[CX88_BOARD_WINFAST_DTV1800H_XC4000] = {
.name = "Leadtek WinFast DTV1800 H (XC4000)",
.tuner_type = TUNER_XC4000,
- .radio_type = TUNER_XC4000,
+ .radio_type = UNSET,
.tuner_addr = 0x61,
- .radio_addr = 0x61,
+ .radio_addr = ADDR_UNSET,
/*
* GPIO setting
*
@@ -2236,9 +2236,9 @@ static const struct cx88_board cx88_boards[] = {
[CX88_BOARD_WINFAST_DTV2000H_PLUS] = {
.name = "Leadtek WinFast DTV2000 H PLUS",
.tuner_type = TUNER_XC4000,
- .radio_type = TUNER_XC4000,
+ .radio_type = UNSET,
.tuner_addr = 0x61,
- .radio_addr = 0x61,
+ .radio_addr = ADDR_UNSET,
/*
* GPIO
* 2: 1: mute audio
diff --git a/trunk/drivers/media/video/ivtv/ivtv-driver.c b/trunk/drivers/media/video/ivtv/ivtv-driver.c
index 544af91cbdc1..3949b7dc2368 100644
--- a/trunk/drivers/media/video/ivtv/ivtv-driver.c
+++ b/trunk/drivers/media/video/ivtv/ivtv-driver.c
@@ -731,9 +731,6 @@ static int __devinit ivtv_init_struct1(struct ivtv *itv)
init_kthread_work(&itv->irq_work, ivtv_irq_work_handler);
- /* start counting open_id at 1 */
- itv->open_id = 1;
-
/* Initial settings */
itv->cxhdl.port = CX2341X_PORT_MEMORY;
itv->cxhdl.capabilities = CX2341X_CAP_HAS_SLICED_VBI;
diff --git a/trunk/drivers/media/video/ivtv/ivtv-driver.h b/trunk/drivers/media/video/ivtv/ivtv-driver.h
index 8f9cc17b518e..06f3d78389bf 100644
--- a/trunk/drivers/media/video/ivtv/ivtv-driver.h
+++ b/trunk/drivers/media/video/ivtv/ivtv-driver.h
@@ -332,7 +332,7 @@ struct ivtv_stream {
const char *name; /* name of the stream */
int type; /* stream type */
- u32 id;
+ struct v4l2_fh *fh; /* pointer to the streaming filehandle */
spinlock_t qlock; /* locks access to the queues */
unsigned long s_flags; /* status flags, see above */
int dma; /* can be PCI_DMA_TODEVICE, PCI_DMA_FROMDEVICE or PCI_DMA_NONE */
@@ -379,7 +379,6 @@ struct ivtv_stream {
struct ivtv_open_id {
struct v4l2_fh fh;
- u32 open_id; /* unique ID for this file descriptor */
int type; /* stream type */
int yuv_frames; /* 1: started OUT_UDMA_YUV output mode */
struct ivtv *itv;
diff --git a/trunk/drivers/media/video/ivtv/ivtv-fileops.c b/trunk/drivers/media/video/ivtv/ivtv-fileops.c
index 38f052257f46..2cd6c89b7d91 100644
--- a/trunk/drivers/media/video/ivtv/ivtv-fileops.c
+++ b/trunk/drivers/media/video/ivtv/ivtv-fileops.c
@@ -50,16 +50,16 @@ static int ivtv_claim_stream(struct ivtv_open_id *id, int type)
if (test_and_set_bit(IVTV_F_S_CLAIMED, &s->s_flags)) {
/* someone already claimed this stream */
- if (s->id == id->open_id) {
+ if (s->fh == &id->fh) {
/* yes, this file descriptor did. So that's OK. */
return 0;
}
- if (s->id == -1 && (type == IVTV_DEC_STREAM_TYPE_VBI ||
+ if (s->fh == NULL && (type == IVTV_DEC_STREAM_TYPE_VBI ||
type == IVTV_ENC_STREAM_TYPE_VBI)) {
/* VBI is handled already internally, now also assign
the file descriptor to this stream for external
reading of the stream. */
- s->id = id->open_id;
+ s->fh = &id->fh;
IVTV_DEBUG_INFO("Start Read VBI\n");
return 0;
}
@@ -67,7 +67,7 @@ static int ivtv_claim_stream(struct ivtv_open_id *id, int type)
IVTV_DEBUG_INFO("Stream %d is busy\n", type);
return -EBUSY;
}
- s->id = id->open_id;
+ s->fh = &id->fh;
if (type == IVTV_DEC_STREAM_TYPE_VBI) {
/* Enable reinsertion interrupt */
ivtv_clear_irq_mask(itv, IVTV_IRQ_DEC_VBI_RE_INSERT);
@@ -104,7 +104,7 @@ void ivtv_release_stream(struct ivtv_stream *s)
struct ivtv *itv = s->itv;
struct ivtv_stream *s_vbi;
- s->id = -1;
+ s->fh = NULL;
if ((s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type == IVTV_ENC_STREAM_TYPE_VBI) &&
test_bit(IVTV_F_S_INTERNAL_USE, &s->s_flags)) {
/* this stream is still in use internally */
@@ -136,7 +136,7 @@ void ivtv_release_stream(struct ivtv_stream *s)
/* was already cleared */
return;
}
- if (s_vbi->id != -1) {
+ if (s_vbi->fh) {
/* VBI stream still claimed by a file descriptor */
return;
}
@@ -268,11 +268,13 @@ static struct ivtv_buffer *ivtv_get_buffer(struct ivtv_stream *s, int non_block,
}
/* wait for more data to arrive */
+ mutex_unlock(&itv->serialize_lock);
prepare_to_wait(&s->waitq, &wait, TASK_INTERRUPTIBLE);
/* New buffers might have become available before we were added to the waitqueue */
if (!s->q_full.buffers)
schedule();
finish_wait(&s->waitq, &wait);
+ mutex_lock(&itv->serialize_lock);
if (signal_pending(current)) {
/* return if a signal was received */
IVTV_DEBUG_INFO("User stopped %s\n", s->name);
@@ -357,7 +359,7 @@ static ssize_t ivtv_read(struct ivtv_stream *s, char __user *ubuf, size_t tot_co
size_t tot_written = 0;
int single_frame = 0;
- if (atomic_read(&itv->capturing) == 0 && s->id == -1) {
+ if (atomic_read(&itv->capturing) == 0 && s->fh == NULL) {
/* shouldn't happen */
IVTV_DEBUG_WARN("Stream %s not initialized before read\n", s->name);
return -EIO;
@@ -507,9 +509,7 @@ ssize_t ivtv_v4l2_read(struct file * filp, char __user *buf, size_t count, loff_
IVTV_DEBUG_HI_FILE("read %zd bytes from %s\n", count, s->name);
- mutex_lock(&itv->serialize_lock);
rc = ivtv_start_capture(id);
- mutex_unlock(&itv->serialize_lock);
if (rc)
return rc;
return ivtv_read_pos(s, buf, count, pos, filp->f_flags & O_NONBLOCK);
@@ -584,9 +584,7 @@ ssize_t ivtv_v4l2_write(struct file *filp, const char __user *user_buf, size_t c
set_bit(IVTV_F_S_APPL_IO, &s->s_flags);
/* Start decoder (returns 0 if already started) */
- mutex_lock(&itv->serialize_lock);
rc = ivtv_start_decoding(id, itv->speed);
- mutex_unlock(&itv->serialize_lock);
if (rc) {
IVTV_DEBUG_WARN("Failed start decode stream %s\n", s->name);
@@ -627,11 +625,13 @@ ssize_t ivtv_v4l2_write(struct file *filp, const char __user *user_buf, size_t c
break;
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
+ mutex_unlock(&itv->serialize_lock);
prepare_to_wait(&s->waitq, &wait, TASK_INTERRUPTIBLE);
/* New buffers might have become free before we were added to the waitqueue */
if (!s->q_free.buffers)
schedule();
finish_wait(&s->waitq, &wait);
+ mutex_lock(&itv->serialize_lock);
if (signal_pending(current)) {
IVTV_DEBUG_INFO("User stopped %s\n", s->name);
return -EINTR;
@@ -686,12 +686,14 @@ ssize_t ivtv_v4l2_write(struct file *filp, const char __user *user_buf, size_t c
if (mode == OUT_YUV)
ivtv_yuv_setup_stream_frame(itv);
+ mutex_unlock(&itv->serialize_lock);
prepare_to_wait(&itv->dma_waitq, &wait, TASK_INTERRUPTIBLE);
while (!(got_sig = signal_pending(current)) &&
test_bit(IVTV_F_S_DMA_PENDING, &s->s_flags)) {
schedule();
}
finish_wait(&itv->dma_waitq, &wait);
+ mutex_lock(&itv->serialize_lock);
if (got_sig) {
IVTV_DEBUG_INFO("User interrupted %s\n", s->name);
return -EINTR;
@@ -756,9 +758,7 @@ unsigned int ivtv_v4l2_enc_poll(struct file *filp, poll_table * wait)
if (!eof && !test_bit(IVTV_F_S_STREAMING, &s->s_flags)) {
int rc;
- mutex_lock(&itv->serialize_lock);
rc = ivtv_start_capture(id);
- mutex_unlock(&itv->serialize_lock);
if (rc) {
IVTV_DEBUG_INFO("Could not start capture for %s (%d)\n",
s->name, rc);
@@ -808,7 +808,7 @@ void ivtv_stop_capture(struct ivtv_open_id *id, int gop_end)
id->type == IVTV_ENC_STREAM_TYPE_VBI) &&
test_bit(IVTV_F_S_INTERNAL_USE, &s->s_flags)) {
/* Also used internally, don't stop capturing */
- s->id = -1;
+ s->fh = NULL;
}
else {
ivtv_stop_v4l2_encode_stream(s, gop_end);
@@ -861,20 +861,9 @@ int ivtv_v4l2_close(struct file *filp)
IVTV_DEBUG_FILE("close %s\n", s->name);
- v4l2_fh_del(fh);
- v4l2_fh_exit(fh);
-
- /* Easy case first: this stream was never claimed by us */
- if (s->id != id->open_id) {
- kfree(id);
- return 0;
- }
-
- /* 'Unclaim' this stream */
-
/* Stop radio */
- mutex_lock(&itv->serialize_lock);
- if (id->type == IVTV_ENC_STREAM_TYPE_RAD) {
+ if (id->type == IVTV_ENC_STREAM_TYPE_RAD &&
+ v4l2_fh_is_singular_file(filp)) {
/* Closing radio device, return to TV mode */
ivtv_mute(itv);
/* Mark that the radio is no longer in use */
@@ -890,13 +879,25 @@ int ivtv_v4l2_close(struct file *filp)
if (atomic_read(&itv->capturing) > 0) {
/* Undo video mute */
ivtv_vapi(itv, CX2341X_ENC_MUTE_VIDEO, 1,
- v4l2_ctrl_g_ctrl(itv->cxhdl.video_mute) |
- (v4l2_ctrl_g_ctrl(itv->cxhdl.video_mute_yuv) << 8));
+ v4l2_ctrl_g_ctrl(itv->cxhdl.video_mute) |
+ (v4l2_ctrl_g_ctrl(itv->cxhdl.video_mute_yuv) << 8));
}
/* Done! Unmute and continue. */
ivtv_unmute(itv);
- ivtv_release_stream(s);
- } else if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) {
+ }
+
+ v4l2_fh_del(fh);
+ v4l2_fh_exit(fh);
+
+ /* Easy case first: this stream was never claimed by us */
+ if (s->fh != &id->fh) {
+ kfree(id);
+ return 0;
+ }
+
+ /* 'Unclaim' this stream */
+
+ if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) {
struct ivtv_stream *s_vout = &itv->streams[IVTV_DEC_STREAM_TYPE_VOUT];
ivtv_stop_decoding(id, VIDEO_CMD_STOP_TO_BLACK | VIDEO_CMD_STOP_IMMEDIATELY, 0);
@@ -911,21 +912,25 @@ int ivtv_v4l2_close(struct file *filp)
ivtv_stop_capture(id, 0);
}
kfree(id);
- mutex_unlock(&itv->serialize_lock);
return 0;
}
-static int ivtv_serialized_open(struct ivtv_stream *s, struct file *filp)
+int ivtv_v4l2_open(struct file *filp)
{
-#ifdef CONFIG_VIDEO_ADV_DEBUG
struct video_device *vdev = video_devdata(filp);
-#endif
+ struct ivtv_stream *s = video_get_drvdata(vdev);
struct ivtv *itv = s->itv;
struct ivtv_open_id *item;
int res = 0;
IVTV_DEBUG_FILE("open %s\n", s->name);
+ if (ivtv_init_on_first_open(itv)) {
+ IVTV_ERR("Failed to initialize on device %s\n",
+ video_device_node_name(vdev));
+ return -ENXIO;
+ }
+
#ifdef CONFIG_VIDEO_ADV_DEBUG
/* Unless ivtv_fw_debug is set, error out if firmware dead. */
if (ivtv_fw_debug) {
@@ -966,31 +971,19 @@ static int ivtv_serialized_open(struct ivtv_stream *s, struct file *filp)
return -ENOMEM;
}
v4l2_fh_init(&item->fh, s->vdev);
- if (res < 0) {
- v4l2_fh_exit(&item->fh);
- kfree(item);
- return res;
- }
item->itv = itv;
item->type = s->type;
- item->open_id = itv->open_id++;
filp->private_data = &item->fh;
+ v4l2_fh_add(&item->fh);
- if (item->type == IVTV_ENC_STREAM_TYPE_RAD) {
- /* Try to claim this stream */
- if (ivtv_claim_stream(item, item->type)) {
- /* No, it's already in use */
- v4l2_fh_exit(&item->fh);
- kfree(item);
- return -EBUSY;
- }
-
+ if (item->type == IVTV_ENC_STREAM_TYPE_RAD &&
+ v4l2_fh_is_singular_file(filp)) {
if (!test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags)) {
if (atomic_read(&itv->capturing) > 0) {
/* switching to radio while capture is
in progress is not polite */
- ivtv_release_stream(s);
+ v4l2_fh_del(&item->fh);
v4l2_fh_exit(&item->fh);
kfree(item);
return -EBUSY;
@@ -1022,32 +1015,9 @@ static int ivtv_serialized_open(struct ivtv_stream *s, struct file *filp)
1080 * ((itv->yuv_info.v4l2_src_h + 31) & ~31);
itv->yuv_info.stream_size = 0;
}
- v4l2_fh_add(&item->fh);
return 0;
}
-int ivtv_v4l2_open(struct file *filp)
-{
- int res;
- struct ivtv *itv = NULL;
- struct ivtv_stream *s = NULL;
- struct video_device *vdev = video_devdata(filp);
-
- s = video_get_drvdata(vdev);
- itv = s->itv;
-
- mutex_lock(&itv->serialize_lock);
- if (ivtv_init_on_first_open(itv)) {
- IVTV_ERR("Failed to initialize on device %s\n",
- video_device_node_name(vdev));
- mutex_unlock(&itv->serialize_lock);
- return -ENXIO;
- }
- res = ivtv_serialized_open(s, filp);
- mutex_unlock(&itv->serialize_lock);
- return res;
-}
-
void ivtv_mute(struct ivtv *itv)
{
if (atomic_read(&itv->capturing))
diff --git a/trunk/drivers/media/video/ivtv/ivtv-ioctl.c b/trunk/drivers/media/video/ivtv/ivtv-ioctl.c
index ecafa697326e..c4bc48143098 100644
--- a/trunk/drivers/media/video/ivtv/ivtv-ioctl.c
+++ b/trunk/drivers/media/video/ivtv/ivtv-ioctl.c
@@ -179,6 +179,7 @@ int ivtv_set_speed(struct ivtv *itv, int speed)
ivtv_vapi(itv, CX2341X_DEC_PAUSE_PLAYBACK, 1, 0);
/* Wait for any DMA to finish */
+ mutex_unlock(&itv->serialize_lock);
prepare_to_wait(&itv->dma_waitq, &wait, TASK_INTERRUPTIBLE);
while (test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
got_sig = signal_pending(current);
@@ -188,6 +189,7 @@ int ivtv_set_speed(struct ivtv *itv, int speed)
schedule();
}
finish_wait(&itv->dma_waitq, &wait);
+ mutex_lock(&itv->serialize_lock);
if (got_sig)
return -EINTR;
@@ -1107,6 +1109,7 @@ void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id *std)
* happens within the first 100 lines of the top field.
* Make 4 attempts to sync to the decoder before giving up.
*/
+ mutex_unlock(&itv->serialize_lock);
for (f = 0; f < 4; f++) {
prepare_to_wait(&itv->vsync_waitq, &wait,
TASK_UNINTERRUPTIBLE);
@@ -1115,6 +1118,7 @@ void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id *std)
schedule_timeout(msecs_to_jiffies(25));
}
finish_wait(&itv->vsync_waitq, &wait);
+ mutex_lock(&itv->serialize_lock);
if (f == 4)
IVTV_WARN("Mode change failed to sync to decoder\n");
@@ -1842,8 +1846,7 @@ static long ivtv_default(struct file *file, void *fh, bool valid_prio,
return 0;
}
-static long ivtv_serialized_ioctl(struct ivtv *itv, struct file *filp,
- unsigned int cmd, unsigned long arg)
+long ivtv_v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct video_device *vfd = video_devdata(filp);
long ret;
@@ -1855,21 +1858,6 @@ static long ivtv_serialized_ioctl(struct ivtv *itv, struct file *filp,
return ret;
}
-long ivtv_v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- struct ivtv_open_id *id = fh2id(filp->private_data);
- struct ivtv *itv = id->itv;
- long res;
-
- /* DQEVENT can block, so this should not run with the serialize lock */
- if (cmd == VIDIOC_DQEVENT)
- return ivtv_serialized_ioctl(itv, filp, cmd, arg);
- mutex_lock(&itv->serialize_lock);
- res = ivtv_serialized_ioctl(itv, filp, cmd, arg);
- mutex_unlock(&itv->serialize_lock);
- return res;
-}
-
static const struct v4l2_ioctl_ops ivtv_ioctl_ops = {
.vidioc_querycap = ivtv_querycap,
.vidioc_s_audio = ivtv_s_audio,
diff --git a/trunk/drivers/media/video/ivtv/ivtv-irq.c b/trunk/drivers/media/video/ivtv/ivtv-irq.c
index 9c29e964d400..1b3b9578bf47 100644
--- a/trunk/drivers/media/video/ivtv/ivtv-irq.c
+++ b/trunk/drivers/media/video/ivtv/ivtv-irq.c
@@ -288,13 +288,13 @@ static void dma_post(struct ivtv_stream *s)
ivtv_process_vbi_data(itv, buf, 0, s->type);
s->q_dma.bytesused += buf->bytesused;
}
- if (s->id == -1) {
+ if (s->fh == NULL) {
ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
return;
}
}
ivtv_queue_move(s, &s->q_dma, NULL, &s->q_full, s->q_dma.bytesused);
- if (s->id != -1)
+ if (s->fh)
wake_up(&s->waitq);
}
diff --git a/trunk/drivers/media/video/ivtv/ivtv-streams.c b/trunk/drivers/media/video/ivtv/ivtv-streams.c
index e7794dc1330e..c6e28b4ebbed 100644
--- a/trunk/drivers/media/video/ivtv/ivtv-streams.c
+++ b/trunk/drivers/media/video/ivtv/ivtv-streams.c
@@ -159,7 +159,6 @@ static void ivtv_stream_init(struct ivtv *itv, int type)
s->buffers = (itv->options.kilobytes[type] * 1024 + s->buf_size - 1) / s->buf_size;
spin_lock_init(&s->qlock);
init_waitqueue_head(&s->waitq);
- s->id = -1;
s->sg_handle = IVTV_DMA_UNMAPPED;
ivtv_queue_init(&s->q_free);
ivtv_queue_init(&s->q_full);
@@ -214,6 +213,7 @@ static int ivtv_prep_dev(struct ivtv *itv, int type)
s->vdev->fops = ivtv_stream_info[type].fops;
s->vdev->release = video_device_release;
s->vdev->tvnorms = V4L2_STD_ALL;
+ s->vdev->lock = &itv->serialize_lock;
set_bit(V4L2_FL_USE_FH_PRIO, &s->vdev->flags);
ivtv_set_funcs(s->vdev);
return 0;
diff --git a/trunk/drivers/media/video/ivtv/ivtv-yuv.c b/trunk/drivers/media/video/ivtv/ivtv-yuv.c
index dcbab6ad4c26..2ad65eb29832 100644
--- a/trunk/drivers/media/video/ivtv/ivtv-yuv.c
+++ b/trunk/drivers/media/video/ivtv/ivtv-yuv.c
@@ -1149,23 +1149,37 @@ int ivtv_yuv_udma_stream_frame(struct ivtv *itv, void __user *src)
{
struct yuv_playback_info *yi = &itv->yuv_info;
struct ivtv_dma_frame dma_args;
+ int res;
ivtv_yuv_setup_stream_frame(itv);
/* We only need to supply source addresses for this */
dma_args.y_source = src;
dma_args.uv_source = src + 720 * ((yi->v4l2_src_h + 31) & ~31);
- return ivtv_yuv_udma_frame(itv, &dma_args);
+ /* Wait for frame DMA. Note that serialize_lock is locked,
+ so to allow other processes to access the driver while
+ we are waiting unlock first and later lock again. */
+ mutex_unlock(&itv->serialize_lock);
+ res = ivtv_yuv_udma_frame(itv, &dma_args);
+ mutex_lock(&itv->serialize_lock);
+ return res;
}
/* IVTV_IOC_DMA_FRAME ioctl handler */
int ivtv_yuv_prep_frame(struct ivtv *itv, struct ivtv_dma_frame *args)
{
-/* IVTV_DEBUG_INFO("yuv_prep_frame\n"); */
+ int res;
+/* IVTV_DEBUG_INFO("yuv_prep_frame\n"); */
ivtv_yuv_next_free(itv);
ivtv_yuv_setup_frame(itv, args);
- return ivtv_yuv_udma_frame(itv, args);
+ /* Wait for frame DMA. Note that serialize_lock is locked,
+ so to allow other processes to access the driver while
+ we are waiting unlock first and later lock again. */
+ mutex_unlock(&itv->serialize_lock);
+ res = ivtv_yuv_udma_frame(itv, args);
+ mutex_lock(&itv->serialize_lock);
+ return res;
}
void ivtv_yuv_close(struct ivtv *itv)
@@ -1174,7 +1188,9 @@ void ivtv_yuv_close(struct ivtv *itv)
int h_filter, v_filter_1, v_filter_2;
IVTV_DEBUG_YUV("ivtv_yuv_close\n");
+ mutex_unlock(&itv->serialize_lock);
ivtv_waitq(&itv->vsync_waitq);
+ mutex_lock(&itv->serialize_lock);
yi->running = 0;
atomic_set(&yi->next_dma_frame, -1);
diff --git a/trunk/drivers/media/video/omap/omap_vout.c b/trunk/drivers/media/video/omap/omap_vout.c
index a277f95091ef..1fb7d5bd5ec2 100644
--- a/trunk/drivers/media/video/omap/omap_vout.c
+++ b/trunk/drivers/media/video/omap/omap_vout.c
@@ -1042,7 +1042,8 @@ static int vidioc_querycap(struct file *file, void *fh,
strlcpy(cap->driver, VOUT_NAME, sizeof(cap->driver));
strlcpy(cap->card, vout->vfd->name, sizeof(cap->card));
cap->bus_info[0] = '\0';
- cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT;
+ cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT |
+ V4L2_CAP_VIDEO_OUTPUT_OVERLAY;
return 0;
}
@@ -1825,7 +1826,9 @@ static int vidioc_g_fbuf(struct file *file, void *fh,
ovid = &vout->vid_info;
ovl = ovid->overlays[0];
- a->flags = 0x0;
+ /* The video overlay must stay within the framebuffer and can't be
+ positioned independently. */
+ a->flags = V4L2_FBUF_FLAG_OVERLAY;
a->capability = V4L2_FBUF_CAP_LOCAL_ALPHA | V4L2_FBUF_CAP_CHROMAKEY
| V4L2_FBUF_CAP_SRC_CHROMAKEY;
diff --git a/trunk/drivers/media/video/pwc/pwc-ctrl.c b/trunk/drivers/media/video/pwc/pwc-ctrl.c
index 905d41d90c6a..1f506fde97d0 100644
--- a/trunk/drivers/media/video/pwc/pwc-ctrl.c
+++ b/trunk/drivers/media/video/pwc/pwc-ctrl.c
@@ -104,47 +104,16 @@ static struct Nala_table_entry Nala_table[PSZ_MAX][PWC_FPS_MAX_NALA] =
/****************************************************************************/
-static int _send_control_msg(struct pwc_device *pdev,
- u8 request, u16 value, int index, void *buf, int buflen)
-{
- int rc;
- void *kbuf = NULL;
-
- if (buflen) {
- kbuf = kmemdup(buf, buflen, GFP_KERNEL); /* not allowed on stack */
- if (kbuf == NULL)
- return -ENOMEM;
- }
-
- rc = usb_control_msg(pdev->udev, usb_sndctrlpipe(pdev->udev, 0),
- request,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- value,
- index,
- kbuf, buflen, USB_CTRL_SET_TIMEOUT);
-
- kfree(kbuf);
- return rc;
-}
-
static int recv_control_msg(struct pwc_device *pdev,
- u8 request, u16 value, void *buf, int buflen)
+ u8 request, u16 value, int recv_count)
{
int rc;
- void *kbuf = kmalloc(buflen, GFP_KERNEL); /* not allowed on stack */
-
- if (kbuf == NULL)
- return -ENOMEM;
rc = usb_control_msg(pdev->udev, usb_rcvctrlpipe(pdev->udev, 0),
request,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- value,
- pdev->vcinterface,
- kbuf, buflen, USB_CTRL_GET_TIMEOUT);
- memcpy(buf, kbuf, buflen);
- kfree(kbuf);
-
+ value, pdev->vcinterface,
+ pdev->ctrl_buf, recv_count, USB_CTRL_GET_TIMEOUT);
if (rc < 0)
PWC_ERROR("recv_control_msg error %d req %02x val %04x\n",
rc, request, value);
@@ -152,27 +121,39 @@ static int recv_control_msg(struct pwc_device *pdev,
}
static inline int send_video_command(struct pwc_device *pdev,
- int index, void *buf, int buflen)
+ int index, const unsigned char *buf, int buflen)
{
- return _send_control_msg(pdev,
- SET_EP_STREAM_CTL,
- VIDEO_OUTPUT_CONTROL_FORMATTER,
- index,
- buf, buflen);
+ int rc;
+
+ memcpy(pdev->ctrl_buf, buf, buflen);
+
+ rc = usb_control_msg(pdev->udev, usb_sndctrlpipe(pdev->udev, 0),
+ SET_EP_STREAM_CTL,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ VIDEO_OUTPUT_CONTROL_FORMATTER, index,
+ pdev->ctrl_buf, buflen, USB_CTRL_SET_TIMEOUT);
+ if (rc >= 0)
+ memcpy(pdev->cmd_buf, buf, buflen);
+ else
+ PWC_ERROR("send_video_command error %d\n", rc);
+
+ return rc;
}
int send_control_msg(struct pwc_device *pdev,
u8 request, u16 value, void *buf, int buflen)
{
- return _send_control_msg(pdev,
- request, value, pdev->vcinterface, buf, buflen);
+ return usb_control_msg(pdev->udev, usb_sndctrlpipe(pdev->udev, 0),
+ request,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ value, pdev->vcinterface,
+ buf, buflen, USB_CTRL_SET_TIMEOUT);
}
-static int set_video_mode_Nala(struct pwc_device *pdev, int size, int frames,
- int *compression)
+static int set_video_mode_Nala(struct pwc_device *pdev, int size, int pixfmt,
+ int frames, int *compression, int send_to_cam)
{
- unsigned char buf[3];
- int ret, fps;
+ int fps, ret = 0;
struct Nala_table_entry *pEntry;
int frames2frames[31] =
{ /* closest match of framerate */
@@ -194,30 +175,29 @@ static int set_video_mode_Nala(struct pwc_device *pdev, int size, int frames,
7 /* 30 */
};
- if (size < 0 || size > PSZ_CIF || frames < 4 || frames > 25)
+ if (size < 0 || size > PSZ_CIF)
return -EINVAL;
+ if (frames < 4)
+ frames = 4;
+ else if (frames > 25)
+ frames = 25;
frames = frames2frames[frames];
fps = frames2table[frames];
pEntry = &Nala_table[size][fps];
if (pEntry->alternate == 0)
return -EINVAL;
- memcpy(buf, pEntry->mode, 3);
- ret = send_video_command(pdev, pdev->vendpoint, buf, 3);
- if (ret < 0) {
- PWC_DEBUG_MODULE("Failed to send video command... %d\n", ret);
+ if (send_to_cam)
+ ret = send_video_command(pdev, pdev->vendpoint,
+ pEntry->mode, 3);
+ if (ret < 0)
return ret;
- }
- if (pEntry->compressed && pdev->pixfmt == V4L2_PIX_FMT_YUV420) {
- ret = pwc_dec1_init(pdev, pdev->type, pdev->release, buf);
- if (ret < 0)
- return ret;
- }
- pdev->cmd_len = 3;
- memcpy(pdev->cmd_buf, buf, 3);
+ if (pEntry->compressed && pixfmt == V4L2_PIX_FMT_YUV420)
+ pwc_dec1_init(pdev, pEntry->mode);
/* Set various parameters */
+ pdev->pixfmt = pixfmt;
pdev->vframes = frames;
pdev->valternate = pEntry->alternate;
pdev->width = pwc_image_sizes[size][0];
@@ -243,18 +223,20 @@ static int set_video_mode_Nala(struct pwc_device *pdev, int size, int frames,
}
-static int set_video_mode_Timon(struct pwc_device *pdev, int size, int frames,
- int *compression)
+static int set_video_mode_Timon(struct pwc_device *pdev, int size, int pixfmt,
+ int frames, int *compression, int send_to_cam)
{
- unsigned char buf[13];
const struct Timon_table_entry *pChoose;
- int ret, fps;
+ int fps, ret = 0;
- if (size >= PSZ_MAX || frames < 5 || frames > 30 ||
- *compression < 0 || *compression > 3)
- return -EINVAL;
- if (size == PSZ_VGA && frames > 15)
+ if (size >= PSZ_MAX || *compression < 0 || *compression > 3)
return -EINVAL;
+ if (frames < 5)
+ frames = 5;
+ else if (size == PSZ_VGA && frames > 15)
+ frames = 15;
+ else if (frames > 30)
+ frames = 30;
fps = (frames / 5) - 1;
/* Find a supported framerate with progressively higher compression */
@@ -268,22 +250,18 @@ static int set_video_mode_Timon(struct pwc_device *pdev, int size, int frames,
if (pChoose == NULL || pChoose->alternate == 0)
return -ENOENT; /* Not supported. */
- memcpy(buf, pChoose->mode, 13);
- ret = send_video_command(pdev, pdev->vendpoint, buf, 13);
+ if (send_to_cam)
+ ret = send_video_command(pdev, pdev->vendpoint,
+ pChoose->mode, 13);
if (ret < 0)
return ret;
- if (pChoose->bandlength > 0 && pdev->pixfmt == V4L2_PIX_FMT_YUV420) {
- ret = pwc_dec23_init(pdev, pdev->type, buf);
- if (ret < 0)
- return ret;
- }
-
- pdev->cmd_len = 13;
- memcpy(pdev->cmd_buf, buf, 13);
+ if (pChoose->bandlength > 0 && pixfmt == V4L2_PIX_FMT_YUV420)
+ pwc_dec23_init(pdev, pChoose->mode);
/* Set various parameters */
- pdev->vframes = frames;
+ pdev->pixfmt = pixfmt;
+ pdev->vframes = (fps + 1) * 5;
pdev->valternate = pChoose->alternate;
pdev->width = pwc_image_sizes[size][0];
pdev->height = pwc_image_sizes[size][1];
@@ -296,18 +274,20 @@ static int set_video_mode_Timon(struct pwc_device *pdev, int size, int frames,
}
-static int set_video_mode_Kiara(struct pwc_device *pdev, int size, int frames,
- int *compression)
+static int set_video_mode_Kiara(struct pwc_device *pdev, int size, int pixfmt,
+ int frames, int *compression, int send_to_cam)
{
const struct Kiara_table_entry *pChoose = NULL;
- int fps, ret;
- unsigned char buf[12];
+ int fps, ret = 0;
- if (size >= PSZ_MAX || frames < 5 || frames > 30 ||
- *compression < 0 || *compression > 3)
- return -EINVAL;
- if (size == PSZ_VGA && frames > 15)
+ if (size >= PSZ_MAX || *compression < 0 || *compression > 3)
return -EINVAL;
+ if (frames < 5)
+ frames = 5;
+ else if (size == PSZ_VGA && frames > 15)
+ frames = 15;
+ else if (frames > 30)
+ frames = 30;
fps = (frames / 5) - 1;
/* Find a supported framerate with progressively higher compression */
@@ -320,26 +300,18 @@ static int set_video_mode_Kiara(struct pwc_device *pdev, int size, int frames,
if (pChoose == NULL || pChoose->alternate == 0)
return -ENOENT; /* Not supported. */
- PWC_TRACE("Using alternate setting %d.\n", pChoose->alternate);
-
- /* usb_control_msg won't take staticly allocated arrays as argument?? */
- memcpy(buf, pChoose->mode, 12);
-
/* Firmware bug: video endpoint is 5, but commands are sent to endpoint 4 */
- ret = send_video_command(pdev, 4 /* pdev->vendpoint */, buf, 12);
+ if (send_to_cam)
+ ret = send_video_command(pdev, 4, pChoose->mode, 12);
if (ret < 0)
return ret;
- if (pChoose->bandlength > 0 && pdev->pixfmt == V4L2_PIX_FMT_YUV420) {
- ret = pwc_dec23_init(pdev, pdev->type, buf);
- if (ret < 0)
- return ret;
- }
+ if (pChoose->bandlength > 0 && pixfmt == V4L2_PIX_FMT_YUV420)
+ pwc_dec23_init(pdev, pChoose->mode);
- pdev->cmd_len = 12;
- memcpy(pdev->cmd_buf, buf, 12);
/* All set and go */
- pdev->vframes = frames;
+ pdev->pixfmt = pixfmt;
+ pdev->vframes = (fps + 1) * 5;
pdev->valternate = pChoose->alternate;
pdev->width = pwc_image_sizes[size][0];
pdev->height = pwc_image_sizes[size][1];
@@ -354,22 +326,24 @@ static int set_video_mode_Kiara(struct pwc_device *pdev, int size, int frames,
}
int pwc_set_video_mode(struct pwc_device *pdev, int width, int height,
- int frames, int *compression)
+ int pixfmt, int frames, int *compression, int send_to_cam)
{
int ret, size;
- PWC_DEBUG_FLOW("set_video_mode(%dx%d @ %d, pixfmt %08x).\n", width, height, frames, pdev->pixfmt);
+ PWC_DEBUG_FLOW("set_video_mode(%dx%d @ %d, pixfmt %08x).\n",
+ width, height, frames, pixfmt);
size = pwc_get_size(pdev, width, height);
PWC_TRACE("decode_size = %d.\n", size);
if (DEVICE_USE_CODEC1(pdev->type)) {
- ret = set_video_mode_Nala(pdev, size, frames, compression);
-
+ ret = set_video_mode_Nala(pdev, size, pixfmt, frames,
+ compression, send_to_cam);
} else if (DEVICE_USE_CODEC3(pdev->type)) {
- ret = set_video_mode_Kiara(pdev, size, frames, compression);
-
+ ret = set_video_mode_Kiara(pdev, size, pixfmt, frames,
+ compression, send_to_cam);
} else {
- ret = set_video_mode_Timon(pdev, size, frames, compression);
+ ret = set_video_mode_Timon(pdev, size, pixfmt, frames,
+ compression, send_to_cam);
}
if (ret < 0) {
PWC_ERROR("Failed to set video mode %s@%d fps; return code = %d\n", size2name[size], frames, ret);
@@ -436,13 +410,12 @@ unsigned int pwc_get_fps(struct pwc_device *pdev, unsigned int index, unsigned i
int pwc_get_u8_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data)
{
int ret;
- u8 buf;
- ret = recv_control_msg(pdev, request, value, &buf, sizeof(buf));
+ ret = recv_control_msg(pdev, request, value, 1);
if (ret < 0)
return ret;
- *data = buf;
+ *data = pdev->ctrl_buf[0];
return 0;
}
@@ -450,7 +423,8 @@ int pwc_set_u8_ctrl(struct pwc_device *pdev, u8 request, u16 value, u8 data)
{
int ret;
- ret = send_control_msg(pdev, request, value, &data, sizeof(data));
+ pdev->ctrl_buf[0] = data;
+ ret = send_control_msg(pdev, request, value, pdev->ctrl_buf, 1);
if (ret < 0)
return ret;
@@ -460,37 +434,34 @@ int pwc_set_u8_ctrl(struct pwc_device *pdev, u8 request, u16 value, u8 data)
int pwc_get_s8_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data)
{
int ret;
- s8 buf;
- ret = recv_control_msg(pdev, request, value, &buf, sizeof(buf));
+ ret = recv_control_msg(pdev, request, value, 1);
if (ret < 0)
return ret;
- *data = buf;
+ *data = ((s8 *)pdev->ctrl_buf)[0];
return 0;
}
int pwc_get_u16_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data)
{
int ret;
- u8 buf[2];
- ret = recv_control_msg(pdev, request, value, buf, sizeof(buf));
+ ret = recv_control_msg(pdev, request, value, 2);
if (ret < 0)
return ret;
- *data = (buf[1] << 8) | buf[0];
+ *data = (pdev->ctrl_buf[1] << 8) | pdev->ctrl_buf[0];
return 0;
}
int pwc_set_u16_ctrl(struct pwc_device *pdev, u8 request, u16 value, u16 data)
{
int ret;
- u8 buf[2];
- buf[0] = data & 0xff;
- buf[1] = data >> 8;
- ret = send_control_msg(pdev, request, value, buf, sizeof(buf));
+ pdev->ctrl_buf[0] = data & 0xff;
+ pdev->ctrl_buf[1] = data >> 8;
+ ret = send_control_msg(pdev, request, value, pdev->ctrl_buf, 2);
if (ret < 0)
return ret;
@@ -511,7 +482,6 @@ int pwc_button_ctrl(struct pwc_device *pdev, u16 value)
/* POWER */
void pwc_camera_power(struct pwc_device *pdev, int power)
{
- char buf;
int r;
if (!pdev->power_save)
@@ -521,13 +491,11 @@ void pwc_camera_power(struct pwc_device *pdev, int power)
return; /* Not supported by Nala or Timon < release 6 */
if (power)
- buf = 0x00; /* active */
+ pdev->ctrl_buf[0] = 0x00; /* active */
else
- buf = 0xFF; /* power save */
- r = send_control_msg(pdev,
- SET_STATUS_CTL, SET_POWER_SAVE_MODE_FORMATTER,
- &buf, sizeof(buf));
-
+ pdev->ctrl_buf[0] = 0xFF; /* power save */
+ r = send_control_msg(pdev, SET_STATUS_CTL,
+ SET_POWER_SAVE_MODE_FORMATTER, pdev->ctrl_buf, 1);
if (r < 0)
PWC_ERROR("Failed to power %s camera (%d)\n",
power ? "on" : "off", r);
@@ -535,7 +503,6 @@ void pwc_camera_power(struct pwc_device *pdev, int power)
int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value)
{
- unsigned char buf[2];
int r;
if (pdev->type < 730)
@@ -551,11 +518,11 @@ int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value)
if (off_value > 0xff)
off_value = 0xff;
- buf[0] = on_value;
- buf[1] = off_value;
+ pdev->ctrl_buf[0] = on_value;
+ pdev->ctrl_buf[1] = off_value;
r = send_control_msg(pdev,
- SET_STATUS_CTL, LED_FORMATTER, &buf, sizeof(buf));
+ SET_STATUS_CTL, LED_FORMATTER, pdev->ctrl_buf, 2);
if (r < 0)
PWC_ERROR("Failed to set LED on/off time (%d)\n", r);
@@ -565,7 +532,6 @@ int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value)
#ifdef CONFIG_USB_PWC_DEBUG
int pwc_get_cmos_sensor(struct pwc_device *pdev, int *sensor)
{
- unsigned char buf;
int ret = -1, request;
if (pdev->type < 675)
@@ -575,14 +541,13 @@ int pwc_get_cmos_sensor(struct pwc_device *pdev, int *sensor)
else
request = SENSOR_TYPE_FORMATTER2;
- ret = recv_control_msg(pdev,
- GET_STATUS_CTL, request, &buf, sizeof(buf));
+ ret = recv_control_msg(pdev, GET_STATUS_CTL, request, 1);
if (ret < 0)
return ret;
if (pdev->type < 675)
- *sensor = buf | 0x100;
+ *sensor = pdev->ctrl_buf[0] | 0x100;
else
- *sensor = buf;
+ *sensor = pdev->ctrl_buf[0];
return 0;
}
#endif
diff --git a/trunk/drivers/media/video/pwc/pwc-dec1.c b/trunk/drivers/media/video/pwc/pwc-dec1.c
index be0e02cb487f..e899036aadf4 100644
--- a/trunk/drivers/media/video/pwc/pwc-dec1.c
+++ b/trunk/drivers/media/video/pwc/pwc-dec1.c
@@ -22,19 +22,11 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#include "pwc-dec1.h"
+#include "pwc.h"
-int pwc_dec1_init(struct pwc_device *pwc, int type, int release, void *buffer)
+void pwc_dec1_init(struct pwc_device *pdev, const unsigned char *cmd)
{
- struct pwc_dec1_private *pdec;
+ struct pwc_dec1_private *pdec = &pdev->dec1;
- if (pwc->decompress_data == NULL) {
- pdec = kmalloc(sizeof(struct pwc_dec1_private), GFP_KERNEL);
- if (pdec == NULL)
- return -ENOMEM;
- pwc->decompress_data = pdec;
- }
- pdec = pwc->decompress_data;
-
- return 0;
+ pdec->version = pdev->release;
}
diff --git a/trunk/drivers/media/video/pwc/pwc-dec1.h b/trunk/drivers/media/video/pwc/pwc-dec1.h
index a57d8601080b..c565ef8f52fb 100644
--- a/trunk/drivers/media/video/pwc/pwc-dec1.h
+++ b/trunk/drivers/media/video/pwc/pwc-dec1.h
@@ -25,13 +25,15 @@
#ifndef PWC_DEC1_H
#define PWC_DEC1_H
-#include "pwc.h"
+#include
+
+struct pwc_device;
struct pwc_dec1_private
{
int version;
};
-int pwc_dec1_init(struct pwc_device *pwc, int type, int release, void *buffer);
+void pwc_dec1_init(struct pwc_device *pdev, const unsigned char *cmd);
#endif
diff --git a/trunk/drivers/media/video/pwc/pwc-dec23.c b/trunk/drivers/media/video/pwc/pwc-dec23.c
index 2c6709112b2f..3792fedff951 100644
--- a/trunk/drivers/media/video/pwc/pwc-dec23.c
+++ b/trunk/drivers/media/video/pwc/pwc-dec23.c
@@ -294,22 +294,17 @@ static unsigned char pwc_crop_table[256 + 2*MAX_OUTER_CROP_VALUE];
/* If the type or the command change, we rebuild the lookup table */
-int pwc_dec23_init(struct pwc_device *pwc, int type, unsigned char *cmd)
+void pwc_dec23_init(struct pwc_device *pdev, const unsigned char *cmd)
{
int flags, version, shift, i;
- struct pwc_dec23_private *pdec;
-
- if (pwc->decompress_data == NULL) {
- pdec = kmalloc(sizeof(struct pwc_dec23_private), GFP_KERNEL);
- if (pdec == NULL)
- return -ENOMEM;
- pwc->decompress_data = pdec;
- }
- pdec = pwc->decompress_data;
+ struct pwc_dec23_private *pdec = &pdev->dec23;
mutex_init(&pdec->lock);
- if (DEVICE_USE_CODEC3(type)) {
+ if (pdec->last_cmd_valid && pdec->last_cmd == cmd[2])
+ return;
+
+ if (DEVICE_USE_CODEC3(pdev->type)) {
flags = cmd[2] & 0x18;
if (flags == 8)
pdec->nbits = 7; /* More bits, mean more bits to encode the stream, but better quality */
@@ -356,7 +351,8 @@ int pwc_dec23_init(struct pwc_device *pwc, int type, unsigned char *cmd)
pwc_crop_table[MAX_OUTER_CROP_VALUE+256+i] = 255;
#endif
- return 0;
+ pdec->last_cmd = cmd[2];
+ pdec->last_cmd_valid = 1;
}
/*
@@ -659,12 +655,12 @@ static void DecompressBand23(struct pwc_dec23_private *pdec,
* src: raw data
* dst: image output
*/
-void pwc_dec23_decompress(const struct pwc_device *pwc,
+void pwc_dec23_decompress(struct pwc_device *pdev,
const void *src,
void *dst)
{
int bandlines_left, bytes_per_block;
- struct pwc_dec23_private *pdec = pwc->decompress_data;
+ struct pwc_dec23_private *pdec = &pdev->dec23;
/* YUV420P image format */
unsigned char *pout_planar_y;
@@ -674,23 +670,22 @@ void pwc_dec23_decompress(const struct pwc_device *pwc,
mutex_lock(&pdec->lock);
- bandlines_left = pwc->height / 4;
- bytes_per_block = pwc->width * 4;
- plane_size = pwc->height * pwc->width;
+ bandlines_left = pdev->height / 4;
+ bytes_per_block = pdev->width * 4;
+ plane_size = pdev->height * pdev->width;
pout_planar_y = dst;
pout_planar_u = dst + plane_size;
pout_planar_v = dst + plane_size + plane_size / 4;
while (bandlines_left--) {
- DecompressBand23(pwc->decompress_data,
- src,
+ DecompressBand23(pdec, src,
pout_planar_y, pout_planar_u, pout_planar_v,
- pwc->width, pwc->width);
- src += pwc->vbandlength;
+ pdev->width, pdev->width);
+ src += pdev->vbandlength;
pout_planar_y += bytes_per_block;
- pout_planar_u += pwc->width;
- pout_planar_v += pwc->width;
+ pout_planar_u += pdev->width;
+ pout_planar_v += pdev->width;
}
mutex_unlock(&pdec->lock);
}
diff --git a/trunk/drivers/media/video/pwc/pwc-dec23.h b/trunk/drivers/media/video/pwc/pwc-dec23.h
index d64a3c281af6..c655b1c1e6a9 100644
--- a/trunk/drivers/media/video/pwc/pwc-dec23.h
+++ b/trunk/drivers/media/video/pwc/pwc-dec23.h
@@ -25,17 +25,20 @@
#ifndef PWC_DEC23_H
#define PWC_DEC23_H
-#include "pwc.h"
+struct pwc_device;
struct pwc_dec23_private
{
struct mutex lock;
+ unsigned char last_cmd, last_cmd_valid;
+
unsigned int scalebits;
unsigned int nbitsmask, nbits; /* Number of bits of a color in the compressed stream */
unsigned int reservoir;
unsigned int nbits_in_reservoir;
+
const unsigned char *stream;
int temp_colors[16];
@@ -51,8 +54,8 @@ struct pwc_dec23_private
};
-int pwc_dec23_init(struct pwc_device *pwc, int type, unsigned char *cmd);
-void pwc_dec23_decompress(const struct pwc_device *pwc,
+void pwc_dec23_init(struct pwc_device *pdev, const unsigned char *cmd);
+void pwc_dec23_decompress(struct pwc_device *pdev,
const void *src,
void *dst);
#endif
diff --git a/trunk/drivers/media/video/pwc/pwc-if.c b/trunk/drivers/media/video/pwc/pwc-if.c
index 943d37ad0d33..122fbd0081eb 100644
--- a/trunk/drivers/media/video/pwc/pwc-if.c
+++ b/trunk/drivers/media/video/pwc/pwc-if.c
@@ -128,18 +128,11 @@ static struct usb_driver pwc_driver = {
#define MAX_DEV_HINTS 20
#define MAX_ISOC_ERRORS 20
-static int default_fps = 10;
#ifdef CONFIG_USB_PWC_DEBUG
int pwc_trace = PWC_DEBUG_LEVEL;
#endif
static int power_save = -1;
-static int led_on = 100, led_off; /* defaults to LED that is on while in use */
-static struct {
- int type;
- char serial_number[30];
- int device_node;
- struct pwc_device *pdev;
-} device_hint[MAX_DEV_HINTS];
+static int leds[2] = { 100, 0 };
/***/
@@ -386,8 +379,8 @@ static int pwc_isoc_init(struct pwc_device *pdev)
retry:
/* We first try with low compression and then retry with a higher
compression setting if there is not enough bandwidth. */
- ret = pwc_set_video_mode(pdev, pdev->width, pdev->height,
- pdev->vframes, &compression);
+ ret = pwc_set_video_mode(pdev, pdev->width, pdev->height, pdev->pixfmt,
+ pdev->vframes, &compression, 1);
/* Get the current alternate interface, adjust packet size */
intf = usb_ifnum_to_if(udev, 0);
@@ -597,23 +590,9 @@ int pwc_test_n_set_capt_file(struct pwc_device *pdev, struct file *file)
static void pwc_video_release(struct v4l2_device *v)
{
struct pwc_device *pdev = container_of(v, struct pwc_device, v4l2_dev);
- int hint;
-
- /* search device_hint[] table if we occupy a slot, by any chance */
- for (hint = 0; hint < MAX_DEV_HINTS; hint++)
- if (device_hint[hint].pdev == pdev)
- device_hint[hint].pdev = NULL;
-
- /* Free intermediate decompression buffer & tables */
- if (pdev->decompress_data != NULL) {
- PWC_DEBUG_MEMORY("Freeing decompression buffer at %p.\n",
- pdev->decompress_data);
- kfree(pdev->decompress_data);
- pdev->decompress_data = NULL;
- }
v4l2_ctrl_handler_free(&pdev->ctrl_handler);
-
+ kfree(pdev->ctrl_buf);
kfree(pdev);
}
@@ -758,7 +737,7 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
/* Turn on camera and set LEDS on */
pwc_camera_power(pdev, 1);
- pwc_set_leds(pdev, led_on, led_off);
+ pwc_set_leds(pdev, leds[0], leds[1]);
r = pwc_isoc_init(pdev);
if (r) {
@@ -813,10 +792,9 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
struct usb_device *udev = interface_to_usbdev(intf);
struct pwc_device *pdev = NULL;
int vendor_id, product_id, type_id;
- int hint, rc;
+ int rc;
int features = 0;
int compression = 0;
- int video_nr = -1; /* default: use next available device */
int my_power_save = power_save;
char serial_number[30], *name;
@@ -1076,7 +1054,6 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
return -ENOMEM;
}
pdev->type = type_id;
- pdev->vframes = default_fps;
pdev->features = features;
pwc_construct(pdev); /* set min/max sizes correct */
@@ -1107,24 +1084,14 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
pdev->release = le16_to_cpu(udev->descriptor.bcdDevice);
PWC_DEBUG_PROBE("Release: %04x\n", pdev->release);
- /* Now search device_hint[] table for a match, so we can hint a node number. */
- for (hint = 0; hint < MAX_DEV_HINTS; hint++) {
- if (((device_hint[hint].type == -1) || (device_hint[hint].type == pdev->type)) &&
- (device_hint[hint].pdev == NULL)) {
- /* so far, so good... try serial number */
- if ((device_hint[hint].serial_number[0] == '*') || !strcmp(device_hint[hint].serial_number, serial_number)) {
- /* match! */
- video_nr = device_hint[hint].device_node;
- PWC_DEBUG_PROBE("Found hint, will try to register as /dev/video%d\n", video_nr);
- break;
- }
- }
+ /* Allocate USB command buffers */
+ pdev->ctrl_buf = kmalloc(sizeof(pdev->cmd_buf), GFP_KERNEL);
+ if (!pdev->ctrl_buf) {
+ PWC_ERROR("Oops, could not allocate memory for pwc_device.\n");
+ rc = -ENOMEM;
+ goto err_free_mem;
}
- /* occupy slot */
- if (hint < MAX_DEV_HINTS)
- device_hint[hint].pdev = pdev;
-
#ifdef CONFIG_USB_PWC_DEBUG
/* Query sensor type */
if (pwc_get_cmos_sensor(pdev, &rc) >= 0) {
@@ -1138,8 +1105,8 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
pwc_set_leds(pdev, 0, 0);
/* Setup intial videomode */
- rc = pwc_set_video_mode(pdev, MAX_WIDTH, MAX_HEIGHT, pdev->vframes,
- &compression);
+ rc = pwc_set_video_mode(pdev, MAX_WIDTH, MAX_HEIGHT,
+ V4L2_PIX_FMT_YUV420, 30, &compression, 1);
if (rc)
goto err_free_mem;
@@ -1164,7 +1131,7 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
pdev->v4l2_dev.ctrl_handler = &pdev->ctrl_handler;
pdev->vdev.v4l2_dev = &pdev->v4l2_dev;
- rc = video_register_device(&pdev->vdev, VFL_TYPE_GRABBER, video_nr);
+ rc = video_register_device(&pdev->vdev, VFL_TYPE_GRABBER, -1);
if (rc < 0) {
PWC_ERROR("Failed to register as video device (%d).\n", rc);
goto err_unregister_v4l2_dev;
@@ -1207,8 +1174,7 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
err_free_controls:
v4l2_ctrl_handler_free(&pdev->ctrl_handler);
err_free_mem:
- if (hint < MAX_DEV_HINTS)
- device_hint[hint].pdev = NULL;
+ kfree(pdev->ctrl_buf);
kfree(pdev);
return rc;
}
@@ -1243,27 +1209,19 @@ static void usb_pwc_disconnect(struct usb_interface *intf)
* Initialization code & module stuff
*/
-static int fps;
-static int leds[2] = { -1, -1 };
static unsigned int leds_nargs;
-static char *dev_hint[MAX_DEV_HINTS];
-static unsigned int dev_hint_nargs;
-module_param(fps, int, 0444);
#ifdef CONFIG_USB_PWC_DEBUG
module_param_named(trace, pwc_trace, int, 0644);
#endif
module_param(power_save, int, 0644);
module_param_array(leds, int, &leds_nargs, 0444);
-module_param_array(dev_hint, charp, &dev_hint_nargs, 0444);
-MODULE_PARM_DESC(fps, "Initial frames per second. Varies with model, useful range 5-30");
#ifdef CONFIG_USB_PWC_DEBUG
MODULE_PARM_DESC(trace, "For debugging purposes");
#endif
MODULE_PARM_DESC(power_save, "Turn power saving for new cameras on or off");
MODULE_PARM_DESC(leds, "LED on,off time in milliseconds");
-MODULE_PARM_DESC(dev_hint, "Device node hints");
MODULE_DESCRIPTION("Philips & OEM USB webcam driver");
MODULE_AUTHOR("Luc Saillard ");
@@ -1273,114 +1231,13 @@ MODULE_VERSION( PWC_VERSION );
static int __init usb_pwc_init(void)
{
- int i;
-
-#ifdef CONFIG_USB_PWC_DEBUG
- PWC_INFO("Philips webcam module version " PWC_VERSION " loaded.\n");
- PWC_INFO("Supports Philips PCA645/646, PCVC675/680/690, PCVC720[40]/730/740/750 & PCVC830/840.\n");
- PWC_INFO("Also supports the Askey VC010, various Logitech Quickcams, Samsung MPC-C10 and MPC-C30,\n");
- PWC_INFO("the Creative WebCam 5 & Pro Ex, SOTEC Afina Eye and Visionite VCS-UC300 and VCS-UM100.\n");
-
- if (pwc_trace >= 0) {
- PWC_DEBUG_MODULE("Trace options: 0x%04x\n", pwc_trace);
- }
-#endif
-
- if (fps) {
- if (fps < 4 || fps > 30) {
- PWC_ERROR("Framerate out of bounds (4-30).\n");
- return -EINVAL;
- }
- default_fps = fps;
- PWC_DEBUG_MODULE("Default framerate set to %d.\n", default_fps);
- }
-
- if (leds[0] >= 0)
- led_on = leds[0];
- if (leds[1] >= 0)
- led_off = leds[1];
-
- /* Big device node whoopla. Basically, it allows you to assign a
- device node (/dev/videoX) to a camera, based on its type
- & serial number. The format is [type[.serialnumber]:]node.
-
- Any camera that isn't matched by these rules gets the next
- available free device node.
- */
- for (i = 0; i < MAX_DEV_HINTS; i++) {
- char *s, *colon, *dot;
-
- /* This loop also initializes the array */
- device_hint[i].pdev = NULL;
- s = dev_hint[i];
- if (s != NULL && *s != '\0') {
- device_hint[i].type = -1; /* wildcard */
- strcpy(device_hint[i].serial_number, "*");
-
- /* parse string: chop at ':' & '/' */
- colon = dot = s;
- while (*colon != '\0' && *colon != ':')
- colon++;
- while (*dot != '\0' && *dot != '.')
- dot++;
- /* Few sanity checks */
- if (*dot != '\0' && dot > colon) {
- PWC_ERROR("Malformed camera hint: the colon must be after the dot.\n");
- return -EINVAL;
- }
-
- if (*colon == '\0') {
- /* No colon */
- if (*dot != '\0') {
- PWC_ERROR("Malformed camera hint: no colon + device node given.\n");
- return -EINVAL;
- }
- else {
- /* No type or serial number specified, just a number. */
- device_hint[i].device_node =
- simple_strtol(s, NULL, 10);
- }
- }
- else {
- /* There's a colon, so we have at least a type and a device node */
- device_hint[i].type =
- simple_strtol(s, NULL, 10);
- device_hint[i].device_node =
- simple_strtol(colon + 1, NULL, 10);
- if (*dot != '\0') {
- /* There's a serial number as well */
- int k;
-
- dot++;
- k = 0;
- while (*dot != ':' && k < 29) {
- device_hint[i].serial_number[k++] = *dot;
- dot++;
- }
- device_hint[i].serial_number[k] = '\0';
- }
- }
- PWC_TRACE("device_hint[%d]:\n", i);
- PWC_TRACE(" type : %d\n", device_hint[i].type);
- PWC_TRACE(" serial# : %s\n", device_hint[i].serial_number);
- PWC_TRACE(" node : %d\n", device_hint[i].device_node);
- }
- else
- device_hint[i].type = 0; /* not filled */
- } /* ..for MAX_DEV_HINTS */
-
- PWC_DEBUG_PROBE("Registering driver at address 0x%p.\n", &pwc_driver);
return usb_register(&pwc_driver);
}
static void __exit usb_pwc_exit(void)
{
- PWC_DEBUG_MODULE("Deregistering driver.\n");
usb_deregister(&pwc_driver);
- PWC_INFO("Philips webcam module removed.\n");
}
module_init(usb_pwc_init);
module_exit(usb_pwc_exit);
-
-/* vim: set cino= formatoptions=croql cindent shiftwidth=8 tabstop=8: */
diff --git a/trunk/drivers/media/video/pwc/pwc-misc.c b/trunk/drivers/media/video/pwc/pwc-misc.c
index 23a55b5814fc..9be5adffa874 100644
--- a/trunk/drivers/media/video/pwc/pwc-misc.c
+++ b/trunk/drivers/media/video/pwc/pwc-misc.c
@@ -90,5 +90,4 @@ void pwc_construct(struct pwc_device *pdev)
pdev->frame_header_size = 0;
pdev->frame_trailer_size = 0;
}
- pdev->pixfmt = V4L2_PIX_FMT_YUV420; /* default */
}
diff --git a/trunk/drivers/media/video/pwc/pwc-v4l.c b/trunk/drivers/media/video/pwc/pwc-v4l.c
index 80e25842e84a..f495eeb5403a 100644
--- a/trunk/drivers/media/video/pwc/pwc-v4l.c
+++ b/trunk/drivers/media/video/pwc/pwc-v4l.c
@@ -493,16 +493,11 @@ static int pwc_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f)
(pixelformat>>24)&255);
ret = pwc_set_video_mode(pdev, f->fmt.pix.width, f->fmt.pix.height,
- pdev->vframes, &compression);
+ pixelformat, 30, &compression, 0);
PWC_DEBUG_IOCTL("pwc_set_video_mode(), return=%d\n", ret);
- if (ret == 0) {
- pdev->pixfmt = pixelformat;
- pwc_vidioc_fill_fmt(f, pdev->width, pdev->height,
- pdev->pixfmt);
- }
-
+ pwc_vidioc_fill_fmt(f, pdev->width, pdev->height, pdev->pixfmt);
leave:
mutex_unlock(&pdev->udevlock);
return ret;
@@ -777,33 +772,33 @@ static int pwc_set_autogain_expo(struct pwc_device *pdev)
static int pwc_set_motor(struct pwc_device *pdev)
{
int ret;
- u8 buf[4];
- buf[0] = 0;
+ pdev->ctrl_buf[0] = 0;
if (pdev->motor_pan_reset->is_new)
- buf[0] |= 0x01;
+ pdev->ctrl_buf[0] |= 0x01;
if (pdev->motor_tilt_reset->is_new)
- buf[0] |= 0x02;
+ pdev->ctrl_buf[0] |= 0x02;
if (pdev->motor_pan_reset->is_new || pdev->motor_tilt_reset->is_new) {
ret = send_control_msg(pdev, SET_MPT_CTL,
- PT_RESET_CONTROL_FORMATTER, buf, 1);
+ PT_RESET_CONTROL_FORMATTER,
+ pdev->ctrl_buf, 1);
if (ret < 0)
return ret;
}
- memset(buf, 0, sizeof(buf));
+ memset(pdev->ctrl_buf, 0, 4);
if (pdev->motor_pan->is_new) {
- buf[0] = pdev->motor_pan->val & 0xFF;
- buf[1] = (pdev->motor_pan->val >> 8);
+ pdev->ctrl_buf[0] = pdev->motor_pan->val & 0xFF;
+ pdev->ctrl_buf[1] = (pdev->motor_pan->val >> 8);
}
if (pdev->motor_tilt->is_new) {
- buf[2] = pdev->motor_tilt->val & 0xFF;
- buf[3] = (pdev->motor_tilt->val >> 8);
+ pdev->ctrl_buf[2] = pdev->motor_tilt->val & 0xFF;
+ pdev->ctrl_buf[3] = (pdev->motor_tilt->val >> 8);
}
if (pdev->motor_pan->is_new || pdev->motor_tilt->is_new) {
ret = send_control_msg(pdev, SET_MPT_CTL,
PT_RELATIVE_CONTROL_FORMATTER,
- buf, sizeof(buf));
+ pdev->ctrl_buf, 4);
if (ret < 0)
return ret;
}
@@ -1094,6 +1089,63 @@ static int pwc_enum_frameintervals(struct file *file, void *fh,
return 0;
}
+static int pwc_g_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *parm)
+{
+ struct pwc_device *pdev = video_drvdata(file);
+
+ if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ memset(parm, 0, sizeof(*parm));
+
+ parm->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ parm->parm.capture.readbuffers = MIN_FRAMES;
+ parm->parm.capture.capability |= V4L2_CAP_TIMEPERFRAME;
+ parm->parm.capture.timeperframe.denominator = pdev->vframes;
+ parm->parm.capture.timeperframe.numerator = 1;
+
+ return 0;
+}
+
+static int pwc_s_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *parm)
+{
+ struct pwc_device *pdev = video_drvdata(file);
+ int compression = 0;
+ int ret, fps;
+
+ if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ parm->parm.capture.timeperframe.numerator == 0)
+ return -EINVAL;
+
+ if (pwc_test_n_set_capt_file(pdev, file))
+ return -EBUSY;
+
+ fps = parm->parm.capture.timeperframe.denominator /
+ parm->parm.capture.timeperframe.numerator;
+
+ mutex_lock(&pdev->udevlock);
+ if (!pdev->udev) {
+ ret = -ENODEV;
+ goto leave;
+ }
+
+ if (pdev->iso_init) {
+ ret = -EBUSY;
+ goto leave;
+ }
+
+ ret = pwc_set_video_mode(pdev, pdev->width, pdev->height, pdev->pixfmt,
+ fps, &compression, 0);
+
+ pwc_g_parm(file, fh, parm);
+
+leave:
+ mutex_unlock(&pdev->udevlock);
+ return ret;
+}
+
static int pwc_log_status(struct file *file, void *priv)
{
struct pwc_device *pdev = video_drvdata(file);
@@ -1120,4 +1172,6 @@ const struct v4l2_ioctl_ops pwc_ioctl_ops = {
.vidioc_log_status = pwc_log_status,
.vidioc_enum_framesizes = pwc_enum_framesizes,
.vidioc_enum_frameintervals = pwc_enum_frameintervals,
+ .vidioc_g_parm = pwc_g_parm,
+ .vidioc_s_parm = pwc_s_parm,
};
diff --git a/trunk/drivers/media/video/pwc/pwc.h b/trunk/drivers/media/video/pwc/pwc.h
index 47c518fef179..e4d4d711dd1f 100644
--- a/trunk/drivers/media/video/pwc/pwc.h
+++ b/trunk/drivers/media/video/pwc/pwc.h
@@ -44,6 +44,8 @@
#ifdef CONFIG_USB_PWC_INPUT_EVDEV
#include
#endif
+#include "pwc-dec1.h"
+#include "pwc-dec23.h"
/* Version block */
#define PWC_VERSION "10.0.15"
@@ -132,9 +134,6 @@
#define DEVICE_USE_CODEC3(x) ((x)>=700)
#define DEVICE_USE_CODEC23(x) ((x)>=675)
-/* from pwc-dec.h */
-#define PWCX_FLAG_PLANAR 0x0001
-
/* Request types: video */
#define SET_LUM_CTL 0x01
#define GET_LUM_CTL 0x02
@@ -248,8 +247,8 @@ struct pwc_device
char vmirror; /* for ToUCaM series */
char power_save; /* Do powersaving for this cam */
- int cmd_len;
unsigned char cmd_buf[13];
+ unsigned char *ctrl_buf;
struct urb *urbs[MAX_ISO_BUFS];
char iso_init;
@@ -272,7 +271,10 @@ struct pwc_device
int frame_total_size; /* including header & trailer */
int drop_frames;
- void *decompress_data; /* private data for decompression engine */
+ union { /* private data for decompression engine */
+ struct pwc_dec1_private dec1;
+ struct pwc_dec23_private dec23;
+ };
/*
* We have an 'image' and a 'view', where 'image' is the fixed-size img
@@ -364,7 +366,7 @@ void pwc_construct(struct pwc_device *pdev);
/** Functions in pwc-ctrl.c */
/* Request a certain video mode. Returns < 0 if not possible */
extern int pwc_set_video_mode(struct pwc_device *pdev, int width, int height,
- int frames, int *compression);
+ int pixfmt, int frames, int *compression, int send_to_cam);
extern unsigned int pwc_get_fps(struct pwc_device *pdev, unsigned int index, unsigned int size);
extern int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value);
extern int pwc_get_cmos_sensor(struct pwc_device *pdev, int *sensor);
diff --git a/trunk/drivers/media/video/s5p-fimc/fimc-capture.c b/trunk/drivers/media/video/s5p-fimc/fimc-capture.c
index 510cfab477ff..a9e9653beeb4 100644
--- a/trunk/drivers/media/video/s5p-fimc/fimc-capture.c
+++ b/trunk/drivers/media/video/s5p-fimc/fimc-capture.c
@@ -693,7 +693,7 @@ static int fimc_pipeline_try_format(struct fimc_ctx *ctx,
mf->code = 0;
continue;
}
- if (mf->width != tfmt->width || mf->width != tfmt->width) {
+ if (mf->width != tfmt->width || mf->height != tfmt->height) {
u32 fcc = ffmt->fourcc;
tfmt->width = mf->width;
tfmt->height = mf->height;
@@ -702,7 +702,8 @@ static int fimc_pipeline_try_format(struct fimc_ctx *ctx,
NULL, &fcc, FIMC_SD_PAD_SOURCE);
if (ffmt && ffmt->mbus_code)
mf->code = ffmt->mbus_code;
- if (mf->width != tfmt->width || mf->width != tfmt->width)
+ if (mf->width != tfmt->width ||
+ mf->height != tfmt->height)
continue;
tfmt->code = mf->code;
}
@@ -710,7 +711,7 @@ static int fimc_pipeline_try_format(struct fimc_ctx *ctx,
ret = v4l2_subdev_call(csis, pad, set_fmt, NULL, &sfmt);
if (mf->code == tfmt->code &&
- mf->width == tfmt->width && mf->width == tfmt->width)
+ mf->width == tfmt->width && mf->height == tfmt->height)
break;
}
diff --git a/trunk/drivers/media/video/s5p-fimc/fimc-core.c b/trunk/drivers/media/video/s5p-fimc/fimc-core.c
index f5cbb8a4c540..81bcbb9492ea 100644
--- a/trunk/drivers/media/video/s5p-fimc/fimc-core.c
+++ b/trunk/drivers/media/video/s5p-fimc/fimc-core.c
@@ -848,11 +848,11 @@ int fimc_ctrls_create(struct fimc_ctx *ctx)
v4l2_ctrl_handler_init(&ctx->ctrl_handler, 4);
ctx->ctrl_rotate = v4l2_ctrl_new_std(&ctx->ctrl_handler, &fimc_ctrl_ops,
- V4L2_CID_HFLIP, 0, 1, 1, 0);
+ V4L2_CID_ROTATE, 0, 270, 90, 0);
ctx->ctrl_hflip = v4l2_ctrl_new_std(&ctx->ctrl_handler, &fimc_ctrl_ops,
- V4L2_CID_VFLIP, 0, 1, 1, 0);
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
ctx->ctrl_vflip = v4l2_ctrl_new_std(&ctx->ctrl_handler, &fimc_ctrl_ops,
- V4L2_CID_ROTATE, 0, 270, 90, 0);
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
if (variant->has_alpha)
ctx->ctrl_alpha = v4l2_ctrl_new_std(&ctx->ctrl_handler,
&fimc_ctrl_ops, V4L2_CID_ALPHA_COMPONENT,
diff --git a/trunk/drivers/media/video/s5p-fimc/fimc-mdevice.c b/trunk/drivers/media/video/s5p-fimc/fimc-mdevice.c
index 615c862f0360..8ea4ee116e46 100644
--- a/trunk/drivers/media/video/s5p-fimc/fimc-mdevice.c
+++ b/trunk/drivers/media/video/s5p-fimc/fimc-mdevice.c
@@ -21,7 +21,6 @@
#include
#include
#include
-#include
#include
#include
diff --git a/trunk/drivers/media/video/s5p-g2d/g2d.c b/trunk/drivers/media/video/s5p-g2d/g2d.c
index c40b0dde1883..febaa673d363 100644
--- a/trunk/drivers/media/video/s5p-g2d/g2d.c
+++ b/trunk/drivers/media/video/s5p-g2d/g2d.c
@@ -184,6 +184,7 @@ static int g2d_s_ctrl(struct v4l2_ctrl *ctrl)
ctx->rop = ROP4_INVERT;
else
ctx->rop = ROP4_COPY;
+ break;
default:
v4l2_err(&ctx->dev->v4l2_dev, "unknown control\n");
return -EINVAL;
diff --git a/trunk/drivers/media/video/s5p-jpeg/jpeg-core.c b/trunk/drivers/media/video/s5p-jpeg/jpeg-core.c
index f841a3e9845c..1105a8749c8b 100644
--- a/trunk/drivers/media/video/s5p-jpeg/jpeg-core.c
+++ b/trunk/drivers/media/video/s5p-jpeg/jpeg-core.c
@@ -989,9 +989,10 @@ static struct v4l2_m2m_ops s5p_jpeg_m2m_ops = {
* ============================================================================
*/
-static int s5p_jpeg_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
- unsigned int *nplanes, unsigned int sizes[],
- void *alloc_ctxs[])
+static int s5p_jpeg_queue_setup(struct vb2_queue *vq,
+ const struct v4l2_format *fmt,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], void *alloc_ctxs[])
{
struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(vq);
struct s5p_jpeg_q_data *q_data = NULL;
diff --git a/trunk/drivers/media/video/s5p-mfc/s5p_mfc.c b/trunk/drivers/media/video/s5p-mfc/s5p_mfc.c
index e43e128baf5f..83fe461af263 100644
--- a/trunk/drivers/media/video/s5p-mfc/s5p_mfc.c
+++ b/trunk/drivers/media/video/s5p-mfc/s5p_mfc.c
@@ -18,7 +18,6 @@
#include
#include
#include
-#include
#include
#include
#include
@@ -475,7 +474,7 @@ static void s5p_mfc_handle_seq_done(struct s5p_mfc_ctx *ctx,
ctx->mv_size = 0;
}
ctx->dpb_count = s5p_mfc_get_dpb_count();
- if (ctx->img_width == 0 || ctx->img_width == 0)
+ if (ctx->img_width == 0 || ctx->img_height == 0)
ctx->state = MFCINST_ERROR;
else
ctx->state = MFCINST_HEAD_PARSED;
diff --git a/trunk/drivers/media/video/s5p-mfc/s5p_mfc_dec.c b/trunk/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
index 844a4d7797bc..c25ec022d267 100644
--- a/trunk/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
+++ b/trunk/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
@@ -165,7 +165,7 @@ static struct mfc_control controls[] = {
.maximum = 32,
.step = 1,
.default_value = 1,
- .flags = V4L2_CTRL_FLAG_VOLATILE,
+ .is_volatile = 1,
},
};
diff --git a/trunk/drivers/media/video/saa7164/saa7164-cards.c b/trunk/drivers/media/video/saa7164/saa7164-cards.c
index 971591d6450f..5b72da5ce418 100644
--- a/trunk/drivers/media/video/saa7164/saa7164-cards.c
+++ b/trunk/drivers/media/video/saa7164/saa7164-cards.c
@@ -269,8 +269,6 @@ struct saa7164_board saa7164_boards[] = {
.portb = SAA7164_MPEG_DVB,
.portc = SAA7164_MPEG_ENCODER,
.portd = SAA7164_MPEG_ENCODER,
- .portc = SAA7164_MPEG_ENCODER,
- .portd = SAA7164_MPEG_ENCODER,
.porte = SAA7164_MPEG_VBI,
.portf = SAA7164_MPEG_VBI,
.chiprev = SAA7164_CHIP_REV3,
@@ -333,8 +331,6 @@ struct saa7164_board saa7164_boards[] = {
.portd = SAA7164_MPEG_ENCODER,
.porte = SAA7164_MPEG_VBI,
.portf = SAA7164_MPEG_VBI,
- .porte = SAA7164_MPEG_VBI,
- .portf = SAA7164_MPEG_VBI,
.chiprev = SAA7164_CHIP_REV3,
.unit = {{
.id = 0x28,
diff --git a/trunk/drivers/media/video/tlg2300/pd-main.c b/trunk/drivers/media/video/tlg2300/pd-main.c
index 129f135d5a5f..c096b3f74200 100644
--- a/trunk/drivers/media/video/tlg2300/pd-main.c
+++ b/trunk/drivers/media/video/tlg2300/pd-main.c
@@ -374,7 +374,7 @@ static inline void set_map_flags(struct poseidon *pd, struct usb_device *udev)
}
#endif
-static bool check_firmware(struct usb_device *udev, int *down_firmware)
+static int check_firmware(struct usb_device *udev, int *down_firmware)
{
void *buf;
int ret;
@@ -398,7 +398,7 @@ static bool check_firmware(struct usb_device *udev, int *down_firmware)
*down_firmware = 1;
return firmware_download(udev);
}
- return ret;
+ return 0;
}
static int poseidon_probe(struct usb_interface *interface,
diff --git a/trunk/drivers/media/video/v4l2-ctrls.c b/trunk/drivers/media/video/v4l2-ctrls.c
index da1f4c2d2d4b..cccd42be718a 100644
--- a/trunk/drivers/media/video/v4l2-ctrls.c
+++ b/trunk/drivers/media/video/v4l2-ctrls.c
@@ -465,8 +465,8 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_CHROMA_GAIN: return "Chroma Gain";
case V4L2_CID_ILLUMINATORS_1: return "Illuminator 1";
case V4L2_CID_ILLUMINATORS_2: return "Illuminator 2";
- case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE: return "Minimum Number of Capture Buffers";
- case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT: return "Minimum Number of Output Buffers";
+ case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE: return "Min Number of Capture Buffers";
+ case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT: return "Min Number of Output Buffers";
case V4L2_CID_ALPHA_COMPONENT: return "Alpha Component";
/* MPEG controls */
@@ -506,25 +506,25 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_MPEG_VIDEO_MUTE_YUV: return "Video Mute YUV";
case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE: return "Decoder Slice Interface";
case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER: return "MPEG4 Loop Filter Enable";
- case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB: return "The Number of Intra Refresh MBs";
+ case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB: return "Number of Intra Refresh MBs";
case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE: return "Frame Level Rate Control Enable";
case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE: return "H264 MB Level Rate Control";
case V4L2_CID_MPEG_VIDEO_HEADER_MODE: return "Sequence Header Mode";
- case V4L2_CID_MPEG_VIDEO_MAX_REF_PIC: return "The Max Number of Reference Picture";
+ case V4L2_CID_MPEG_VIDEO_MAX_REF_PIC: return "Max Number of Reference Pics";
case V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP: return "H263 I-Frame QP Value";
- case V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP: return "H263 P frame QP Value";
- case V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP: return "H263 B frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP: return "H263 P-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP: return "H263 B-Frame QP Value";
case V4L2_CID_MPEG_VIDEO_H263_MIN_QP: return "H263 Minimum QP Value";
case V4L2_CID_MPEG_VIDEO_H263_MAX_QP: return "H263 Maximum QP Value";
case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP: return "H264 I-Frame QP Value";
- case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP: return "H264 P frame QP Value";
- case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP: return "H264 B frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP: return "H264 P-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP: return "H264 B-Frame QP Value";
case V4L2_CID_MPEG_VIDEO_H264_MAX_QP: return "H264 Maximum QP Value";
case V4L2_CID_MPEG_VIDEO_H264_MIN_QP: return "H264 Minimum QP Value";
case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM: return "H264 8x8 Transform Enable";
case V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE: return "H264 CPB Buffer Size";
- case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE: return "H264 Entorpy Mode";
- case V4L2_CID_MPEG_VIDEO_H264_I_PERIOD: return "H264 I Period";
+ case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE: return "H264 Entropy Mode";
+ case V4L2_CID_MPEG_VIDEO_H264_I_PERIOD: return "H264 I-Frame Period";
case V4L2_CID_MPEG_VIDEO_H264_LEVEL: return "H264 Level";
case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA: return "H264 Loop Filter Alpha Offset";
case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA: return "H264 Loop Filter Beta Offset";
@@ -535,16 +535,16 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE: return "Aspect Ratio VUI Enable";
case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC: return "VUI Aspect Ratio IDC";
case V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP: return "MPEG4 I-Frame QP Value";
- case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP: return "MPEG4 P frame QP Value";
- case V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP: return "MPEG4 B frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP: return "MPEG4 P-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP: return "MPEG4 B-Frame QP Value";
case V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP: return "MPEG4 Minimum QP Value";
case V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP: return "MPEG4 Maximum QP Value";
case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL: return "MPEG4 Level";
case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE: return "MPEG4 Profile";
case V4L2_CID_MPEG_VIDEO_MPEG4_QPEL: return "Quarter Pixel Search Enable";
- case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES: return "The Maximum Bytes Per Slice";
- case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB: return "The Number of MB in a Slice";
- case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: return "The Slice Partitioning Method";
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES: return "Maximum Bytes in a Slice";
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB: return "Number of MBs in a Slice";
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: return "Slice Partitioning Method";
case V4L2_CID_MPEG_VIDEO_VBV_SIZE: return "VBV Buffer Size";
/* CAMERA controls */
@@ -580,7 +580,7 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_AUDIO_LIMITER_ENABLED: return "Audio Limiter Feature Enabled";
case V4L2_CID_AUDIO_LIMITER_RELEASE_TIME: return "Audio Limiter Release Time";
case V4L2_CID_AUDIO_LIMITER_DEVIATION: return "Audio Limiter Deviation";
- case V4L2_CID_AUDIO_COMPRESSION_ENABLED: return "Audio Compression Feature Enabled";
+ case V4L2_CID_AUDIO_COMPRESSION_ENABLED: return "Audio Compression Enabled";
case V4L2_CID_AUDIO_COMPRESSION_GAIN: return "Audio Compression Gain";
case V4L2_CID_AUDIO_COMPRESSION_THRESHOLD: return "Audio Compression Threshold";
case V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME: return "Audio Compression Attack Time";
@@ -588,24 +588,24 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_PILOT_TONE_ENABLED: return "Pilot Tone Feature Enabled";
case V4L2_CID_PILOT_TONE_DEVIATION: return "Pilot Tone Deviation";
case V4L2_CID_PILOT_TONE_FREQUENCY: return "Pilot Tone Frequency";
- case V4L2_CID_TUNE_PREEMPHASIS: return "Pre-emphasis settings";
+ case V4L2_CID_TUNE_PREEMPHASIS: return "Pre-Emphasis";
case V4L2_CID_TUNE_POWER_LEVEL: return "Tune Power Level";
case V4L2_CID_TUNE_ANTENNA_CAPACITOR: return "Tune Antenna Capacitor";
/* Flash controls */
- case V4L2_CID_FLASH_CLASS: return "Flash controls";
- case V4L2_CID_FLASH_LED_MODE: return "LED mode";
- case V4L2_CID_FLASH_STROBE_SOURCE: return "Strobe source";
+ case V4L2_CID_FLASH_CLASS: return "Flash Controls";
+ case V4L2_CID_FLASH_LED_MODE: return "LED Mode";
+ case V4L2_CID_FLASH_STROBE_SOURCE: return "Strobe Source";
case V4L2_CID_FLASH_STROBE: return "Strobe";
- case V4L2_CID_FLASH_STROBE_STOP: return "Stop strobe";
- case V4L2_CID_FLASH_STROBE_STATUS: return "Strobe status";
- case V4L2_CID_FLASH_TIMEOUT: return "Strobe timeout";
- case V4L2_CID_FLASH_INTENSITY: return "Intensity, flash mode";
- case V4L2_CID_FLASH_TORCH_INTENSITY: return "Intensity, torch mode";
- case V4L2_CID_FLASH_INDICATOR_INTENSITY: return "Intensity, indicator";
+ case V4L2_CID_FLASH_STROBE_STOP: return "Stop Strobe";
+ case V4L2_CID_FLASH_STROBE_STATUS: return "Strobe Status";
+ case V4L2_CID_FLASH_TIMEOUT: return "Strobe Timeout";
+ case V4L2_CID_FLASH_INTENSITY: return "Intensity, Flash Mode";
+ case V4L2_CID_FLASH_TORCH_INTENSITY: return "Intensity, Torch Mode";
+ case V4L2_CID_FLASH_INDICATOR_INTENSITY: return "Intensity, Indicator";
case V4L2_CID_FLASH_FAULT: return "Faults";
case V4L2_CID_FLASH_CHARGE: return "Charge";
- case V4L2_CID_FLASH_READY: return "Ready to strobe";
+ case V4L2_CID_FLASH_READY: return "Ready to Strobe";
default:
return NULL;
diff --git a/trunk/drivers/media/video/v4l2-ioctl.c b/trunk/drivers/media/video/v4l2-ioctl.c
index 77feeb67e2db..3f623859a337 100644
--- a/trunk/drivers/media/video/v4l2-ioctl.c
+++ b/trunk/drivers/media/video/v4l2-ioctl.c
@@ -1871,6 +1871,7 @@ static long __video_do_ioctl(struct file *file,
case VIDIOC_S_FREQUENCY:
{
struct v4l2_frequency *p = arg;
+ enum v4l2_tuner_type type;
if (!ops->vidioc_s_frequency)
break;
@@ -1878,9 +1879,14 @@ static long __video_do_ioctl(struct file *file,
ret = ret_prio;
break;
}
+ type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
+ V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
dbgarg(cmd, "tuner=%d, type=%d, frequency=%d\n",
p->tuner, p->type, p->frequency);
- ret = ops->vidioc_s_frequency(file, fh, p);
+ if (p->type != type)
+ ret = -EINVAL;
+ else
+ ret = ops->vidioc_s_frequency(file, fh, p);
break;
}
case VIDIOC_G_SLICED_VBI_CAP:
diff --git a/trunk/drivers/media/video/zoran/zoran_driver.c b/trunk/drivers/media/video/zoran/zoran_driver.c
index f6d26419445e..4c09ab781ec3 100644
--- a/trunk/drivers/media/video/zoran/zoran_driver.c
+++ b/trunk/drivers/media/video/zoran/zoran_driver.c
@@ -1958,7 +1958,6 @@ static int zoran_g_fbuf(struct file *file, void *__fh,
mutex_unlock(&zr->resource_lock);
fb->fmt.colorspace = V4L2_COLORSPACE_SRGB;
fb->fmt.field = V4L2_FIELD_INTERLACED;
- fb->flags = V4L2_FBUF_FLAG_OVERLAY;
fb->capability = V4L2_FBUF_CAP_LIST_CLIPPING;
return 0;
diff --git a/trunk/drivers/pci/pci.c b/trunk/drivers/pci/pci.c
index 97fff785e97e..af295bb21d62 100644
--- a/trunk/drivers/pci/pci.c
+++ b/trunk/drivers/pci/pci.c
@@ -2802,7 +2802,7 @@ pci_intx(struct pci_dev *pdev, int enable)
/**
* pci_intx_mask_supported - probe for INTx masking support
- * @pdev: the PCI device to operate on
+ * @dev: the PCI device to operate on
*
* Check if the device dev support INTx masking via the config space
* command word.
@@ -2884,7 +2884,7 @@ static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
/**
* pci_check_and_mask_intx - mask INTx on pending interrupt
- * @pdev: the PCI device to operate on
+ * @dev: the PCI device to operate on
*
* Check if the device dev has its INTx line asserted, mask it and
* return true in that case. False is returned if not interrupt was
@@ -2898,7 +2898,7 @@ EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
/**
* pci_check_and_mask_intx - unmask INTx of no interrupt is pending
- * @pdev: the PCI device to operate on
+ * @dev: the PCI device to operate on
*
* Check if the device dev has its INTx line asserted, unmask it if not
* and return true. False is returned and the mask remains active if
diff --git a/trunk/drivers/regulator/core.c b/trunk/drivers/regulator/core.c
index ca86f39a0fdc..e9a83f84adaf 100644
--- a/trunk/drivers/regulator/core.c
+++ b/trunk/drivers/regulator/core.c
@@ -2731,6 +2731,8 @@ static void rdev_init_debugfs(struct regulator_dev *rdev)
* @dev: struct device for the regulator
* @init_data: platform provided init data, passed through by driver
* @driver_data: private regulator data
+ * @of_node: OpenFirmware node to parse for device tree bindings (may be
+ * NULL).
*
* Called by regulator drivers to register a regulator.
* Returns 0 on success.
diff --git a/trunk/drivers/scsi/Kconfig b/trunk/drivers/scsi/Kconfig
index 06ea3bcfdd2a..16570aa84aac 100644
--- a/trunk/drivers/scsi/Kconfig
+++ b/trunk/drivers/scsi/Kconfig
@@ -830,16 +830,11 @@ config SCSI_ISCI
tristate "Intel(R) C600 Series Chipset SAS Controller"
depends on PCI && SCSI
depends on X86
- # (temporary): known alpha quality driver
- depends on EXPERIMENTAL
select SCSI_SAS_LIBSAS
- select SCSI_SAS_HOST_SMP
---help---
This driver supports the 6Gb/s SAS capabilities of the storage
control unit found in the Intel(R) C600 series chipset.
- The experimental tag will be removed after the driver exits alpha
-
config SCSI_GENERIC_NCR5380
tristate "Generic NCR5380/53c400 SCSI PIO support"
depends on ISA && SCSI
diff --git a/trunk/drivers/scsi/bfa/bfa_defs_svc.h b/trunk/drivers/scsi/bfa/bfa_defs_svc.h
index 78963be2c4fb..cb07c628b2f1 100644
--- a/trunk/drivers/scsi/bfa/bfa_defs_svc.h
+++ b/trunk/drivers/scsi/bfa/bfa_defs_svc.h
@@ -673,12 +673,7 @@ struct bfa_itnim_iostats_s {
u32 tm_iocdowns; /* TM cleaned-up due to IOC down */
u32 tm_cleanups; /* TM cleanup requests */
u32 tm_cleanup_comps; /* TM cleanup completions */
- u32 lm_lun_across_sg; /* LM lun is across sg data buf */
- u32 lm_lun_not_sup; /* LM lun not supported */
- u32 lm_rpl_data_changed; /* LM report-lun data changed */
- u32 lm_wire_residue_changed; /* LM report-lun rsp residue changed */
- u32 lm_small_buf_addresidue; /* LM buf smaller than reported cnt */
- u32 lm_lun_not_rdy; /* LM lun not ready */
+ u32 rsvd[6];
};
/* Modify char* port_stt[] in bfal_port.c if a new state was added */
diff --git a/trunk/drivers/scsi/bfa/bfa_fc.h b/trunk/drivers/scsi/bfa/bfa_fc.h
index 50b6a1c86195..8d0b88f67a38 100644
--- a/trunk/drivers/scsi/bfa/bfa_fc.h
+++ b/trunk/drivers/scsi/bfa/bfa_fc.h
@@ -56,161 +56,6 @@ struct scsi_cdb_s {
#define SCSI_MAX_ALLOC_LEN 0xFF /* maximum allocarion length */
-#define SCSI_SENSE_CUR_ERR 0x70
-#define SCSI_SENSE_DEF_ERR 0x71
-
-/*
- * SCSI additional sense codes
- */
-#define SCSI_ASC_LUN_NOT_READY 0x04
-#define SCSI_ASC_LUN_NOT_SUPPORTED 0x25
-#define SCSI_ASC_TOCC 0x3F
-
-/*
- * SCSI additional sense code qualifiers
- */
-#define SCSI_ASCQ_MAN_INTR_REQ 0x03 /* manual intervention req */
-#define SCSI_ASCQ_RL_DATA_CHANGED 0x0E /* report luns data changed */
-
-/*
- * Methods of reporting informational exceptions
- */
-#define SCSI_MP_IEC_UNIT_ATTN 0x2 /* generate unit attention */
-
-struct scsi_report_luns_data_s {
- u32 lun_list_length; /* length of LUN list length */
- u32 reserved;
- struct scsi_lun lun[1]; /* first LUN in lun list */
-};
-
-struct scsi_inquiry_vendor_s {
- u8 vendor_id[8];
-};
-
-struct scsi_inquiry_prodid_s {
- u8 product_id[16];
-};
-
-struct scsi_inquiry_prodrev_s {
- u8 product_rev[4];
-};
-
-struct scsi_inquiry_data_s {
-#ifdef __BIG_ENDIAN
- u8 peripheral_qual:3; /* peripheral qualifier */
- u8 device_type:5; /* peripheral device type */
- u8 rmb:1; /* removable medium bit */
- u8 device_type_mod:7; /* device type modifier */
- u8 version;
- u8 aenc:1; /* async evt notification capability */
- u8 trm_iop:1; /* terminate I/O process */
- u8 norm_aca:1; /* normal ACA supported */
- u8 hi_support:1; /* SCSI-3: supports REPORT LUNS */
- u8 rsp_data_format:4;
- u8 additional_len;
- u8 sccs:1;
- u8 reserved1:7;
- u8 reserved2:1;
- u8 enc_serv:1; /* enclosure service component */
- u8 reserved3:1;
- u8 multi_port:1; /* multi-port device */
- u8 m_chngr:1; /* device in medium transport element */
- u8 ack_req_q:1; /* SIP specific bit */
- u8 addr32:1; /* SIP specific bit */
- u8 addr16:1; /* SIP specific bit */
- u8 rel_adr:1; /* relative address */
- u8 w_bus32:1;
- u8 w_bus16:1;
- u8 synchronous:1;
- u8 linked_commands:1;
- u8 trans_dis:1;
- u8 cmd_queue:1; /* command queueing supported */
- u8 soft_reset:1; /* soft reset alternative (VS) */
-#else
- u8 device_type:5; /* peripheral device type */
- u8 peripheral_qual:3; /* peripheral qualifier */
- u8 device_type_mod:7; /* device type modifier */
- u8 rmb:1; /* removable medium bit */
- u8 version;
- u8 rsp_data_format:4;
- u8 hi_support:1; /* SCSI-3: supports REPORT LUNS */
- u8 norm_aca:1; /* normal ACA supported */
- u8 terminate_iop:1;/* terminate I/O process */
- u8 aenc:1; /* async evt notification capability */
- u8 additional_len;
- u8 reserved1:7;
- u8 sccs:1;
- u8 addr16:1; /* SIP specific bit */
- u8 addr32:1; /* SIP specific bit */
- u8 ack_req_q:1; /* SIP specific bit */
- u8 m_chngr:1; /* device in medium transport element */
- u8 multi_port:1; /* multi-port device */
- u8 reserved3:1; /* TBD - Vendor Specific */
- u8 enc_serv:1; /* enclosure service component */
- u8 reserved2:1;
- u8 soft_seset:1; /* soft reset alternative (VS) */
- u8 cmd_queue:1; /* command queueing supported */
- u8 trans_dis:1;
- u8 linked_commands:1;
- u8 synchronous:1;
- u8 w_bus16:1;
- u8 w_bus32:1;
- u8 rel_adr:1; /* relative address */
-#endif
- struct scsi_inquiry_vendor_s vendor_id;
- struct scsi_inquiry_prodid_s product_id;
- struct scsi_inquiry_prodrev_s product_rev;
- u8 vendor_specific[20];
- u8 reserved4[40];
-};
-
-/*
- * SCSI sense data format
- */
-struct scsi_sense_s {
-#ifdef __BIG_ENDIAN
- u8 valid:1;
- u8 rsp_code:7;
-#else
- u8 rsp_code:7;
- u8 valid:1;
-#endif
- u8 seg_num;
-#ifdef __BIG_ENDIAN
- u8 file_mark:1;
- u8 eom:1; /* end of media */
- u8 ili:1; /* incorrect length indicator */
- u8 reserved:1;
- u8 sense_key:4;
-#else
- u8 sense_key:4;
- u8 reserved:1;
- u8 ili:1; /* incorrect length indicator */
- u8 eom:1; /* end of media */
- u8 file_mark:1;
-#endif
- u8 information[4]; /* device-type or cmd specific info */
- u8 add_sense_length; /* additional sense length */
- u8 command_info[4];/* command specific information */
- u8 asc; /* additional sense code */
- u8 ascq; /* additional sense code qualifier */
- u8 fru_code; /* field replaceable unit code */
-#ifdef __BIG_ENDIAN
- u8 sksv:1; /* sense key specific valid */
- u8 c_d:1; /* command/data bit */
- u8 res1:2;
- u8 bpv:1; /* bit pointer valid */
- u8 bpointer:3; /* bit pointer */
-#else
- u8 bpointer:3; /* bit pointer */
- u8 bpv:1; /* bit pointer valid */
- u8 res1:2;
- u8 c_d:1; /* command/data bit */
- u8 sksv:1; /* sense key specific valid */
-#endif
- u8 fpointer[2]; /* field pointer */
-};
-
/*
* Fibre Channel Header Structure (FCHS) definition
*/
diff --git a/trunk/drivers/scsi/bfa/bfa_fcpim.c b/trunk/drivers/scsi/bfa/bfa_fcpim.c
index e07bd4745d8b..f0f80e282e39 100644
--- a/trunk/drivers/scsi/bfa/bfa_fcpim.c
+++ b/trunk/drivers/scsi/bfa/bfa_fcpim.c
@@ -24,8 +24,6 @@ BFA_TRC_FILE(HAL, FCPIM);
* BFA ITNIM Related definitions
*/
static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
-static bfa_boolean_t bfa_ioim_lm_proc_rpl_data(struct bfa_ioim_s *ioim);
-static bfa_boolean_t bfa_ioim_lm_proc_inq_data(struct bfa_ioim_s *ioim);
static void bfa_ioim_lm_init(struct bfa_s *bfa);
#define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \
@@ -60,14 +58,6 @@ static void bfa_ioim_lm_init(struct bfa_s *bfa);
} \
} while (0)
-#define bfa_ioim_rp_wwn(__ioim) \
- (((struct bfa_fcs_rport_s *) \
- (__ioim)->itnim->rport->rport_drv)->pwwn)
-
-#define bfa_ioim_lp_wwn(__ioim) \
- ((BFA_LPS_FROM_TAG(BFA_LPS_MOD((__ioim)->bfa), \
- (__ioim)->itnim->rport->rport_info.lp_tag))->pwwn) \
-
#define bfa_itnim_sler_cb(__itnim) do { \
if ((__itnim)->bfa->fcs) \
bfa_cb_itnim_sler((__itnim)->ditn); \
@@ -77,13 +67,6 @@ static void bfa_ioim_lm_init(struct bfa_s *bfa);
} \
} while (0)
-enum bfa_ioim_lm_status {
- BFA_IOIM_LM_PRESENT = 1,
- BFA_IOIM_LM_LUN_NOT_SUP = 2,
- BFA_IOIM_LM_RPL_DATA_CHANGED = 3,
- BFA_IOIM_LM_LUN_NOT_RDY = 4,
-};
-
enum bfa_ioim_lm_ua_status {
BFA_IOIM_LM_UA_RESET = 0,
BFA_IOIM_LM_UA_SET = 1,
@@ -145,9 +128,6 @@ enum bfa_ioim_event {
BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */
BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */
BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */
- BFA_IOIM_SM_LM_LUN_NOT_SUP = 19,/* lunmask lun not supported */
- BFA_IOIM_SM_LM_RPL_DC = 20, /* lunmask report-lun data changed */
- BFA_IOIM_SM_LM_LUN_NOT_RDY = 21,/* lunmask lun not ready */
};
@@ -245,9 +225,6 @@ static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
-static void __bfa_cb_ioim_lm_lun_not_sup(void *cbarg, bfa_boolean_t complete);
-static void __bfa_cb_ioim_lm_rpl_dc(void *cbarg, bfa_boolean_t complete);
-static void __bfa_cb_ioim_lm_lun_not_rdy(void *cbarg, bfa_boolean_t complete);
/*
* forward declaration of BFA IO state machine
@@ -445,12 +422,6 @@ bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats,
bfa_fcpim_add_iostats(lstats, rstats, output_reqs);
bfa_fcpim_add_iostats(lstats, rstats, rd_throughput);
bfa_fcpim_add_iostats(lstats, rstats, wr_throughput);
- bfa_fcpim_add_iostats(lstats, rstats, lm_lun_across_sg);
- bfa_fcpim_add_iostats(lstats, rstats, lm_lun_not_sup);
- bfa_fcpim_add_iostats(lstats, rstats, lm_rpl_data_changed);
- bfa_fcpim_add_iostats(lstats, rstats, lm_wire_residue_changed);
- bfa_fcpim_add_iostats(lstats, rstats, lm_small_buf_addresidue);
- bfa_fcpim_add_iostats(lstats, rstats, lm_lun_not_rdy);
}
bfa_status_t
@@ -1580,27 +1551,6 @@ bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
__bfa_cb_ioim_abort, ioim);
break;
- case BFA_IOIM_SM_LM_LUN_NOT_SUP:
- bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
- bfa_ioim_move_to_comp_q(ioim);
- bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
- __bfa_cb_ioim_lm_lun_not_sup, ioim);
- break;
-
- case BFA_IOIM_SM_LM_RPL_DC:
- bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
- bfa_ioim_move_to_comp_q(ioim);
- bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
- __bfa_cb_ioim_lm_rpl_dc, ioim);
- break;
-
- case BFA_IOIM_SM_LM_LUN_NOT_RDY:
- bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
- bfa_ioim_move_to_comp_q(ioim);
- bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
- __bfa_cb_ioim_lm_lun_not_rdy, ioim);
- break;
-
default:
bfa_sm_fault(ioim->bfa, event);
}
@@ -2160,243 +2110,6 @@ bfa_ioim_lm_init(struct bfa_s *bfa)
}
}
-/*
- * Validate LUN for LUN masking
- */
-static enum bfa_ioim_lm_status
-bfa_ioim_lm_check(struct bfa_ioim_s *ioim, struct bfa_lps_s *lps,
- struct bfa_rport_s *rp, struct scsi_lun lun)
-{
- u8 i;
- struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
- struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
- struct scsi_cdb_s *cdb = (struct scsi_cdb_s *)cmnd->cmnd;
-
- if ((cdb->scsi_cdb[0] == REPORT_LUNS) &&
- (scsilun_to_int((struct scsi_lun *)&lun) == 0)) {
- ioim->proc_rsp_data = bfa_ioim_lm_proc_rpl_data;
- return BFA_IOIM_LM_PRESENT;
- }
-
- for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
-
- if (lun_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
- continue;
-
- if ((scsilun_to_int((struct scsi_lun *)&lun_list[i].lun) ==
- scsilun_to_int((struct scsi_lun *)&lun))
- && (rp->rport_tag == lun_list[i].rp_tag)
- && ((u8)ioim->itnim->rport->rport_info.lp_tag ==
- lun_list[i].lp_tag)) {
- bfa_trc(ioim->bfa, lun_list[i].rp_tag);
- bfa_trc(ioim->bfa, lun_list[i].lp_tag);
- bfa_trc(ioim->bfa, scsilun_to_int(
- (struct scsi_lun *)&lun_list[i].lun));
-
- if ((lun_list[i].ua == BFA_IOIM_LM_UA_SET) &&
- ((cdb->scsi_cdb[0] != INQUIRY) ||
- (cdb->scsi_cdb[0] != REPORT_LUNS))) {
- lun_list[i].ua = BFA_IOIM_LM_UA_RESET;
- return BFA_IOIM_LM_RPL_DATA_CHANGED;
- }
-
- if (cdb->scsi_cdb[0] == REPORT_LUNS)
- ioim->proc_rsp_data = bfa_ioim_lm_proc_rpl_data;
-
- return BFA_IOIM_LM_PRESENT;
- }
- }
-
- if ((cdb->scsi_cdb[0] == INQUIRY) &&
- (scsilun_to_int((struct scsi_lun *)&lun) == 0)) {
- ioim->proc_rsp_data = bfa_ioim_lm_proc_inq_data;
- return BFA_IOIM_LM_PRESENT;
- }
-
- if (cdb->scsi_cdb[0] == TEST_UNIT_READY)
- return BFA_IOIM_LM_LUN_NOT_RDY;
-
- return BFA_IOIM_LM_LUN_NOT_SUP;
-}
-
-static bfa_boolean_t
-bfa_ioim_lm_proc_rsp_data_dummy(struct bfa_ioim_s *ioim)
-{
- return BFA_TRUE;
-}
-
-static void
-bfa_ioim_lm_fetch_lun(struct bfa_ioim_s *ioim, u8 *rl_data, int offset,
- int buf_lun_cnt)
-{
- struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
- struct scsi_lun *lun_data = (struct scsi_lun *)(rl_data + offset);
- struct scsi_lun lun;
- int i, j;
-
- bfa_trc(ioim->bfa, buf_lun_cnt);
- for (j = 0; j < buf_lun_cnt; j++) {
- lun = *((struct scsi_lun *)(lun_data + j));
- for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
- if (lun_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
- continue;
- if ((lun_list[i].rp_wwn == bfa_ioim_rp_wwn(ioim)) &&
- (lun_list[i].lp_wwn == bfa_ioim_lp_wwn(ioim)) &&
- (scsilun_to_int((struct scsi_lun *)&lun_list[i].lun)
- == scsilun_to_int((struct scsi_lun *)&lun))) {
- lun_list[i].state = BFA_IOIM_LUN_MASK_FETCHED;
- break;
- }
- } /* next lun in mask DB */
- } /* next lun in buf */
-}
-
-static int
-bfa_ioim_lm_update_lun_sg(struct bfa_ioim_s *ioim, u32 *pgdlen,
- struct scsi_report_luns_data_s *rl)
-{
- struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
- struct scatterlist *sg = scsi_sglist(cmnd);
- struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
- struct scsi_lun *prev_rl_data = NULL, *base_rl_data;
- int i, j, sgeid, lun_fetched_cnt = 0, prev_sg_len = 0, base_count;
- int lun_across_sg_bytes, bytes_from_next_buf;
- u64 last_lun, temp_last_lun;
-
- /* fetch luns from the first sg element */
- bfa_ioim_lm_fetch_lun(ioim, (u8 *)(rl->lun), 0,
- (sg_dma_len(sg) / sizeof(struct scsi_lun)) - 1);
-
- /* fetch luns from multiple sg elements */
- scsi_for_each_sg(cmnd, sg, scsi_sg_count(cmnd), sgeid) {
- if (sgeid == 0) {
- prev_sg_len = sg_dma_len(sg);
- prev_rl_data = (struct scsi_lun *)
- phys_to_virt(sg_dma_address(sg));
- continue;
- }
-
- /* if the buf is having more data */
- lun_across_sg_bytes = prev_sg_len % sizeof(struct scsi_lun);
- if (lun_across_sg_bytes) {
- bfa_trc(ioim->bfa, lun_across_sg_bytes);
- bfa_stats(ioim->itnim, lm_lun_across_sg);
- bytes_from_next_buf = sizeof(struct scsi_lun) -
- lun_across_sg_bytes;
-
- /* from next buf take higher bytes */
- temp_last_lun = *((u64 *)
- phys_to_virt(sg_dma_address(sg)));
- last_lun |= temp_last_lun >>
- (lun_across_sg_bytes * BITS_PER_BYTE);
-
- /* from prev buf take higher bytes */
- temp_last_lun = *((u64 *)(prev_rl_data +
- (prev_sg_len - lun_across_sg_bytes)));
- temp_last_lun >>= bytes_from_next_buf * BITS_PER_BYTE;
- last_lun = last_lun | (temp_last_lun <<
- (bytes_from_next_buf * BITS_PER_BYTE));
-
- bfa_ioim_lm_fetch_lun(ioim, (u8 *)&last_lun, 0, 1);
- } else
- bytes_from_next_buf = 0;
-
- *pgdlen += sg_dma_len(sg);
- prev_sg_len = sg_dma_len(sg);
- prev_rl_data = (struct scsi_lun *)
- phys_to_virt(sg_dma_address(sg));
- bfa_ioim_lm_fetch_lun(ioim, (u8 *)prev_rl_data,
- bytes_from_next_buf,
- sg_dma_len(sg) / sizeof(struct scsi_lun));
- }
-
- /* update the report luns data - based on fetched luns */
- sg = scsi_sglist(cmnd);
- base_rl_data = (struct scsi_lun *)rl->lun;
- base_count = (sg_dma_len(sg) / sizeof(struct scsi_lun)) - 1;
- for (i = 0, j = 0; i < MAX_LUN_MASK_CFG; i++) {
- if (lun_list[i].state == BFA_IOIM_LUN_MASK_FETCHED) {
- base_rl_data[j] = lun_list[i].lun;
- lun_list[i].state = BFA_IOIM_LUN_MASK_ACTIVE;
- j++;
- lun_fetched_cnt++;
- }
-
- if (j > base_count) {
- j = 0;
- sg = sg_next(sg);
- base_rl_data = (struct scsi_lun *)
- phys_to_virt(sg_dma_address(sg));
- base_count = sg_dma_len(sg) / sizeof(struct scsi_lun);
- }
- }
-
- bfa_trc(ioim->bfa, lun_fetched_cnt);
- return lun_fetched_cnt;
-}
-
-static bfa_boolean_t
-bfa_ioim_lm_proc_inq_data(struct bfa_ioim_s *ioim)
-{
- struct scsi_inquiry_data_s *inq;
- struct scatterlist *sg = scsi_sglist((struct scsi_cmnd *)ioim->dio);
-
- ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
- inq = (struct scsi_inquiry_data_s *)phys_to_virt(sg_dma_address(sg));
-
- bfa_trc(ioim->bfa, inq->device_type);
- inq->peripheral_qual = SCSI_INQ_PQ_NOT_CON;
- return 0;
-}
-
-static bfa_boolean_t
-bfa_ioim_lm_proc_rpl_data(struct bfa_ioim_s *ioim)
-{
- struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
- struct scatterlist *sg = scsi_sglist(cmnd);
- struct bfi_ioim_rsp_s *m;
- struct scsi_report_luns_data_s *rl = NULL;
- int lun_count = 0, lun_fetched_cnt = 0;
- u32 residue, pgdlen = 0;
-
- ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
- if (bfa_get_lun_mask_status(ioim->bfa) != BFA_LUNMASK_ENABLED)
- return BFA_TRUE;
-
- m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
- if (m->scsi_status == SCSI_STATUS_CHECK_CONDITION)
- return BFA_TRUE;
-
- pgdlen = sg_dma_len(sg);
- bfa_trc(ioim->bfa, pgdlen);
- rl = (struct scsi_report_luns_data_s *)phys_to_virt(sg_dma_address(sg));
- lun_count = cpu_to_be32(rl->lun_list_length) / sizeof(struct scsi_lun);
- lun_fetched_cnt = bfa_ioim_lm_update_lun_sg(ioim, &pgdlen, rl);
-
- if (lun_count == lun_fetched_cnt)
- return BFA_TRUE;
-
- bfa_trc(ioim->bfa, lun_count);
- bfa_trc(ioim->bfa, lun_fetched_cnt);
- bfa_trc(ioim->bfa, be32_to_cpu(rl->lun_list_length));
-
- if (be32_to_cpu(rl->lun_list_length) <= pgdlen)
- rl->lun_list_length = be32_to_cpu(lun_fetched_cnt) *
- sizeof(struct scsi_lun);
- else
- bfa_stats(ioim->itnim, lm_small_buf_addresidue);
-
- bfa_trc(ioim->bfa, be32_to_cpu(rl->lun_list_length));
- bfa_trc(ioim->bfa, be32_to_cpu(m->residue));
-
- residue = be32_to_cpu(m->residue);
- residue += (lun_count - lun_fetched_cnt) * sizeof(struct scsi_lun);
- bfa_stats(ioim->itnim, lm_wire_residue_changed);
- m->residue = be32_to_cpu(residue);
- bfa_trc(ioim->bfa, ioim->nsges);
- return BFA_FALSE;
-}
-
static void
__bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
{
@@ -2454,83 +2167,6 @@ __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
m->scsi_status, sns_len, snsinfo, residue);
}
-static void
-__bfa_cb_ioim_lm_lun_not_sup(void *cbarg, bfa_boolean_t complete)
-{
- struct bfa_ioim_s *ioim = cbarg;
- int sns_len = 0xD;
- u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
- struct scsi_sense_s *snsinfo;
-
- if (!complete) {
- bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
- return;
- }
-
- snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(
- ioim->fcpim->fcp, ioim->iotag);
- snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
- snsinfo->add_sense_length = 0xa;
- snsinfo->asc = SCSI_ASC_LUN_NOT_SUPPORTED;
- snsinfo->sense_key = ILLEGAL_REQUEST;
- bfa_trc(ioim->bfa, residue);
- bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
- SCSI_STATUS_CHECK_CONDITION, sns_len,
- (u8 *)snsinfo, residue);
-}
-
-static void
-__bfa_cb_ioim_lm_rpl_dc(void *cbarg, bfa_boolean_t complete)
-{
- struct bfa_ioim_s *ioim = cbarg;
- int sns_len = 0xD;
- u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
- struct scsi_sense_s *snsinfo;
-
- if (!complete) {
- bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
- return;
- }
-
- snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(ioim->fcpim->fcp,
- ioim->iotag);
- snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
- snsinfo->sense_key = SCSI_MP_IEC_UNIT_ATTN;
- snsinfo->asc = SCSI_ASC_TOCC;
- snsinfo->add_sense_length = 0x6;
- snsinfo->ascq = SCSI_ASCQ_RL_DATA_CHANGED;
- bfa_trc(ioim->bfa, residue);
- bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
- SCSI_STATUS_CHECK_CONDITION, sns_len,
- (u8 *)snsinfo, residue);
-}
-
-static void
-__bfa_cb_ioim_lm_lun_not_rdy(void *cbarg, bfa_boolean_t complete)
-{
- struct bfa_ioim_s *ioim = cbarg;
- int sns_len = 0xD;
- u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
- struct scsi_sense_s *snsinfo;
-
- if (!complete) {
- bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
- return;
- }
-
- snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(
- ioim->fcpim->fcp, ioim->iotag);
- snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
- snsinfo->add_sense_length = 0xa;
- snsinfo->sense_key = NOT_READY;
- snsinfo->asc = SCSI_ASC_LUN_NOT_READY;
- snsinfo->ascq = SCSI_ASCQ_MAN_INTR_REQ;
- bfa_trc(ioim->bfa, residue);
- bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
- SCSI_STATUS_CHECK_CONDITION, sns_len,
- (u8 *)snsinfo, residue);
-}
-
void
bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn, wwn_t rp_wwn,
u16 rp_tag, u8 lp_tag)
@@ -2647,7 +2283,8 @@ bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
if (port) {
*pwwn = port->port_cfg.pwwn;
rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
- rp = rp_fcs->bfa_rport;
+ if (rp_fcs)
+ rp = rp_fcs->bfa_rport;
}
lunm_list = bfa_get_lun_mask_list(bfa);
@@ -2715,7 +2352,8 @@ bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
if (port) {
*pwwn = port->port_cfg.pwwn;
rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
- rp = rp_fcs->bfa_rport;
+ if (rp_fcs)
+ rp = rp_fcs->bfa_rport;
}
}
@@ -2757,7 +2395,6 @@ __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
return;
}
- ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
0, 0, NULL, 0);
}
@@ -2773,7 +2410,6 @@ __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
return;
}
- ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
0, 0, NULL, 0);
}
@@ -2788,7 +2424,6 @@ __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
return;
}
- ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
}
@@ -3132,7 +2767,6 @@ bfa_ioim_attach(struct bfa_fcpim_s *fcpim)
ioim->bfa = fcpim->bfa;
ioim->fcpim = fcpim;
ioim->iosp = iosp;
- ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
INIT_LIST_HEAD(&ioim->sgpg_q);
bfa_reqq_winit(&ioim->iosp->reqq_wait,
bfa_ioim_qresume, ioim);
@@ -3170,7 +2804,6 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
evt = BFA_IOIM_SM_DONE;
else
evt = BFA_IOIM_SM_COMP;
- ioim->proc_rsp_data(ioim);
break;
case BFI_IOIM_STS_TIMEDOUT:
@@ -3206,7 +2839,6 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
if (rsp->abort_tag != ioim->abort_tag) {
bfa_trc(ioim->bfa, rsp->abort_tag);
bfa_trc(ioim->bfa, ioim->abort_tag);
- ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
return;
}
@@ -3225,7 +2857,6 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
WARN_ON(1);
}
- ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
bfa_sm_send_event(ioim, evt);
}
@@ -3244,15 +2875,7 @@ bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
bfa_ioim_cb_profile_comp(fcpim, ioim);
- if (bfa_get_lun_mask_status(bfa) != BFA_LUNMASK_ENABLED) {
- bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
- return;
- }
-
- if (ioim->proc_rsp_data(ioim) == BFA_TRUE)
- bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
- else
- bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP);
+ bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
}
/*
@@ -3364,35 +2987,6 @@ bfa_ioim_free(struct bfa_ioim_s *ioim)
void
bfa_ioim_start(struct bfa_ioim_s *ioim)
{
- struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
- struct bfa_lps_s *lps;
- enum bfa_ioim_lm_status status;
- struct scsi_lun scsilun;
-
- if (bfa_get_lun_mask_status(ioim->bfa) == BFA_LUNMASK_ENABLED) {
- lps = BFA_IOIM_TO_LPS(ioim);
- int_to_scsilun(cmnd->device->lun, &scsilun);
- status = bfa_ioim_lm_check(ioim, lps,
- ioim->itnim->rport, scsilun);
- if (status == BFA_IOIM_LM_LUN_NOT_RDY) {
- bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_LUN_NOT_RDY);
- bfa_stats(ioim->itnim, lm_lun_not_rdy);
- return;
- }
-
- if (status == BFA_IOIM_LM_LUN_NOT_SUP) {
- bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_LUN_NOT_SUP);
- bfa_stats(ioim->itnim, lm_lun_not_sup);
- return;
- }
-
- if (status == BFA_IOIM_LM_RPL_DATA_CHANGED) {
- bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_RPL_DC);
- bfa_stats(ioim->itnim, lm_rpl_data_changed);
- return;
- }
- }
-
bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
/*
diff --git a/trunk/drivers/scsi/bfa/bfa_fcpim.h b/trunk/drivers/scsi/bfa/bfa_fcpim.h
index 1080bcb81cb7..36f26da80f76 100644
--- a/trunk/drivers/scsi/bfa/bfa_fcpim.h
+++ b/trunk/drivers/scsi/bfa/bfa_fcpim.h
@@ -110,7 +110,6 @@ struct bfad_ioim_s;
struct bfad_tskim_s;
typedef void (*bfa_fcpim_profile_t) (struct bfa_ioim_s *ioim);
-typedef bfa_boolean_t (*bfa_ioim_lm_proc_rsp_data_t) (struct bfa_ioim_s *ioim);
struct bfa_fcpim_s {
struct bfa_s *bfa;
@@ -124,7 +123,6 @@ struct bfa_fcpim_s {
u32 path_tov;
u16 q_depth;
u8 reqq; /* Request queue to be used */
- u8 lun_masking_pending;
struct list_head itnim_q; /* queue of active itnim */
struct list_head ioim_resfree_q; /* IOs waiting for f/w */
struct list_head ioim_comp_q; /* IO global comp Q */
@@ -181,7 +179,6 @@ struct bfa_ioim_s {
u8 reqq; /* Request queue for I/O */
u8 mode; /* IO is passthrough or not */
u64 start_time; /* IO's Profile start val */
- bfa_ioim_lm_proc_rsp_data_t proc_rsp_data; /* RSP data adjust */
};
struct bfa_ioim_sp_s {
@@ -261,10 +258,6 @@ struct bfa_itnim_s {
(__ioim)->iotag |= k << BFA_IOIM_RETRY_TAG_OFFSET; \
} while (0)
-#define BFA_IOIM_TO_LPS(__ioim) \
- BFA_LPS_FROM_TAG(BFA_LPS_MOD(__ioim->bfa), \
- __ioim->itnim->rport->rport_info.lp_tag)
-
static inline bfa_boolean_t
bfa_ioim_maxretry_reached(struct bfa_ioim_s *ioim)
{
diff --git a/trunk/drivers/scsi/bfa/bfa_svc.h b/trunk/drivers/scsi/bfa/bfa_svc.h
index 95adb86d3769..b52cbb6bcd5a 100644
--- a/trunk/drivers/scsi/bfa/bfa_svc.h
+++ b/trunk/drivers/scsi/bfa/bfa_svc.h
@@ -582,11 +582,6 @@ void bfa_cb_rport_qos_scn_prio(void *rport,
#define BFA_LP_TAG_INVALID 0xff
void bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp);
void bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp);
-bfa_boolean_t bfa_rport_lunmask_active(struct bfa_rport_s *rp);
-wwn_t bfa_rport_get_pwwn(struct bfa_s *bfa, struct bfa_rport_s *rp);
-struct bfa_rport_s *bfa_rport_get_by_wwn(struct bfa_s *bfa, u16 vf_id,
- wwn_t *lpwwn, wwn_t rpwwn);
-void *bfa_cb_get_rp_by_wwn(void *arg, u16 vf_id, wwn_t *lpwwn, wwn_t rpwwn);
/*
* bfa fcxp API functions
diff --git a/trunk/drivers/scsi/bfa/bfad.c b/trunk/drivers/scsi/bfa/bfad.c
index 66fb72531b34..404fd10ddb21 100644
--- a/trunk/drivers/scsi/bfa/bfad.c
+++ b/trunk/drivers/scsi/bfa/bfad.c
@@ -674,6 +674,7 @@ bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
spin_lock_irqsave(&bfad->bfad_lock, flags);
bfa_fcs_vport_start(&vport->fcs_vport);
+ list_add_tail(&vport->list_entry, &bfad->vport_list);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
return BFA_STATUS_OK;
@@ -1404,6 +1405,7 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
bfad->ref_count = 0;
bfad->pport.bfad = bfad;
INIT_LIST_HEAD(&bfad->pbc_vport_list);
+ INIT_LIST_HEAD(&bfad->vport_list);
/* Setup the debugfs node for this bfad */
if (bfa_debugfs_enable)
diff --git a/trunk/drivers/scsi/bfa/bfad_attr.c b/trunk/drivers/scsi/bfa/bfad_attr.c
index 9d95844ab463..1938fe0473e9 100644
--- a/trunk/drivers/scsi/bfa/bfad_attr.c
+++ b/trunk/drivers/scsi/bfa/bfad_attr.c
@@ -491,7 +491,7 @@ bfad_im_vport_delete(struct fc_vport *fc_vport)
free_scsi_host:
bfad_scsi_host_free(bfad, im_port);
-
+ list_del(&vport->list_entry);
kfree(vport);
return 0;
diff --git a/trunk/drivers/scsi/bfa/bfad_bsg.c b/trunk/drivers/scsi/bfa/bfad_bsg.c
index 06fc00caeb41..530de2b1200a 100644
--- a/trunk/drivers/scsi/bfa/bfad_bsg.c
+++ b/trunk/drivers/scsi/bfa/bfad_bsg.c
@@ -2394,6 +2394,21 @@ bfad_iocmd_vf_clr_stats(struct bfad_s *bfad, void *cmd)
return 0;
}
+/* Function to reset the LUN SCAN mode */
+static void
+bfad_iocmd_lunmask_reset_lunscan_mode(struct bfad_s *bfad, int lunmask_cfg)
+{
+ struct bfad_im_port_s *pport_im = bfad->pport.im_port;
+ struct bfad_vport_s *vport = NULL;
+
+ /* Set the scsi device LUN SCAN flags for base port */
+ bfad_reset_sdev_bflags(pport_im, lunmask_cfg);
+
+ /* Set the scsi device LUN SCAN flags for the vports */
+ list_for_each_entry(vport, &bfad->vport_list, list_entry)
+ bfad_reset_sdev_bflags(vport->drv_port.im_port, lunmask_cfg);
+}
+
int
bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd)
{
@@ -2401,11 +2416,17 @@ bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd)
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
- if (v_cmd == IOCMD_FCPIM_LUNMASK_ENABLE)
+ if (v_cmd == IOCMD_FCPIM_LUNMASK_ENABLE) {
iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_TRUE);
- else if (v_cmd == IOCMD_FCPIM_LUNMASK_DISABLE)
+ /* Set the LUN Scanning mode to be Sequential scan */
+ if (iocmd->status == BFA_STATUS_OK)
+ bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_TRUE);
+ } else if (v_cmd == IOCMD_FCPIM_LUNMASK_DISABLE) {
iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_FALSE);
- else if (v_cmd == IOCMD_FCPIM_LUNMASK_CLEAR)
+ /* Set the LUN Scanning mode to default REPORT_LUNS scan */
+ if (iocmd->status == BFA_STATUS_OK)
+ bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_FALSE);
+ } else if (v_cmd == IOCMD_FCPIM_LUNMASK_CLEAR)
iocmd->status = bfa_fcpim_lunmask_clear(&bfad->bfa);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
return 0;
diff --git a/trunk/drivers/scsi/bfa/bfad_drv.h b/trunk/drivers/scsi/bfa/bfad_drv.h
index 5e19a5f820ec..dc5b9d99c450 100644
--- a/trunk/drivers/scsi/bfa/bfad_drv.h
+++ b/trunk/drivers/scsi/bfa/bfad_drv.h
@@ -43,6 +43,7 @@
#include
#include
#include
+#include
#include "bfa_modules.h"
#include "bfa_fcs.h"
@@ -227,6 +228,7 @@ struct bfad_s {
struct list_head active_aen_q;
struct bfa_aen_entry_s aen_list[BFA_AEN_MAX_ENTRY];
spinlock_t bfad_aen_spinlock;
+ struct list_head vport_list;
};
/* BFAD state machine events */
diff --git a/trunk/drivers/scsi/bfa/bfad_im.c b/trunk/drivers/scsi/bfa/bfad_im.c
index e5db649e8eb7..3153923f5b60 100644
--- a/trunk/drivers/scsi/bfa/bfad_im.c
+++ b/trunk/drivers/scsi/bfa/bfad_im.c
@@ -917,6 +917,37 @@ bfad_get_itnim(struct bfad_im_port_s *im_port, int id)
return NULL;
}
+/*
+ * Function is invoked from the SCSI Host Template slave_alloc() entry point.
+ * Has the logic to query the LUN Mask database to check if this LUN needs to
+ * be made visible to the SCSI mid-layer or not.
+ *
+ * Returns BFA_STATUS_OK if this LUN needs to be added to the OS stack.
+ * Returns -ENXIO to notify SCSI mid-layer to not add this LUN to the OS stack.
+ */
+static int
+bfad_im_check_if_make_lun_visible(struct scsi_device *sdev,
+ struct fc_rport *rport)
+{
+ struct bfad_itnim_data_s *itnim_data =
+ (struct bfad_itnim_data_s *) rport->dd_data;
+ struct bfa_s *bfa = itnim_data->itnim->bfa_itnim->bfa;
+ struct bfa_rport_s *bfa_rport = itnim_data->itnim->bfa_itnim->rport;
+ struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(bfa);
+ int i = 0, ret = -ENXIO;
+
+ for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+ if (lun_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE &&
+ scsilun_to_int(&lun_list[i].lun) == sdev->lun &&
+ lun_list[i].rp_tag == bfa_rport->rport_tag &&
+ lun_list[i].lp_tag == (u8)bfa_rport->rport_info.lp_tag) {
+ ret = BFA_STATUS_OK;
+ break;
+ }
+ }
+ return ret;
+}
+
/*
* Scsi_Host template entry slave_alloc
*/
@@ -924,10 +955,33 @@ static int
bfad_im_slave_alloc(struct scsi_device *sdev)
{
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+ struct bfad_itnim_data_s *itnim_data =
+ (struct bfad_itnim_data_s *) rport->dd_data;
+ struct bfa_s *bfa = itnim_data->itnim->bfa_itnim->bfa;
if (!rport || fc_remote_port_chkready(rport))
return -ENXIO;
+ if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_ENABLED) {
+ /*
+ * We should not mask LUN 0 - since this will translate
+ * to no LUN / TARGET for SCSI ml resulting no scan.
+ */
+ if (sdev->lun == 0) {
+ sdev->sdev_bflags |= BLIST_NOREPORTLUN |
+ BLIST_SPARSELUN;
+ goto done;
+ }
+
+ /*
+ * Query LUN Mask configuration - to expose this LUN
+ * to the SCSI mid-layer or to mask it.
+ */
+ if (bfad_im_check_if_make_lun_visible(sdev, rport) !=
+ BFA_STATUS_OK)
+ return -ENXIO;
+ }
+done:
sdev->hostdata = rport->dd_data;
return 0;
@@ -1037,6 +1091,8 @@ bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, struct bfad_itnim_s *itnim)
&& (fc_rport->scsi_target_id < MAX_FCP_TARGET))
itnim->scsi_tgt_id = fc_rport->scsi_target_id;
+ itnim->channel = fc_rport->channel;
+
return;
}
diff --git a/trunk/drivers/scsi/bfa/bfad_im.h b/trunk/drivers/scsi/bfa/bfad_im.h
index 004b6cf848d9..0814367ef101 100644
--- a/trunk/drivers/scsi/bfa/bfad_im.h
+++ b/trunk/drivers/scsi/bfa/bfad_im.h
@@ -91,6 +91,7 @@ struct bfad_itnim_s {
struct fc_rport *fc_rport;
struct bfa_itnim_s *bfa_itnim;
u16 scsi_tgt_id;
+ u16 channel;
u16 queue_work;
unsigned long last_ramp_up_time;
unsigned long last_queue_full_time;
@@ -166,4 +167,30 @@ irqreturn_t bfad_intx(int irq, void *dev_id);
int bfad_im_bsg_request(struct fc_bsg_job *job);
int bfad_im_bsg_timeout(struct fc_bsg_job *job);
+/*
+ * Macro to set the SCSI device sdev_bflags - sdev_bflags are used by the
+ * SCSI mid-layer to choose LUN Scanning mode REPORT_LUNS vs. Sequential Scan
+ *
+ * Internally iterate's over all the ITNIM's part of the im_port & set's the
+ * sdev_bflags for the scsi_device associated with LUN #0.
+ */
+#define bfad_reset_sdev_bflags(__im_port, __lunmask_cfg) do { \
+ struct scsi_device *__sdev = NULL; \
+ struct bfad_itnim_s *__itnim = NULL; \
+ u32 scan_flags = BLIST_NOREPORTLUN | BLIST_SPARSELUN; \
+ list_for_each_entry(__itnim, &((__im_port)->itnim_mapped_list), \
+ list_entry) { \
+ __sdev = scsi_device_lookup((__im_port)->shost, \
+ __itnim->channel, \
+ __itnim->scsi_tgt_id, 0); \
+ if (__sdev) { \
+ if ((__lunmask_cfg) == BFA_TRUE) \
+ __sdev->sdev_bflags |= scan_flags; \
+ else \
+ __sdev->sdev_bflags &= ~scan_flags; \
+ scsi_device_put(__sdev); \
+ } \
+ } \
+} while (0)
+
#endif
diff --git a/trunk/drivers/scsi/cxgbi/libcxgbi.c b/trunk/drivers/scsi/cxgbi/libcxgbi.c
index c5360ffb4bed..d3ff9cd40234 100644
--- a/trunk/drivers/scsi/cxgbi/libcxgbi.c
+++ b/trunk/drivers/scsi/cxgbi/libcxgbi.c
@@ -1868,8 +1868,9 @@ int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
tdata->skb = alloc_skb(cdev->skb_tx_rsvd + headroom, GFP_ATOMIC);
if (!tdata->skb) {
- pr_warn("alloc skb %u+%u, opcode 0x%x failed.\n",
- cdev->skb_tx_rsvd, headroom, opcode);
+ struct cxgbi_sock *csk = cconn->cep->csk;
+ struct net_device *ndev = cdev->ports[csk->port_id];
+ ndev->stats.tx_dropped++;
return -ENOMEM;
}
diff --git a/trunk/drivers/scsi/device_handler/scsi_dh_alua.c b/trunk/drivers/scsi/device_handler/scsi_dh_alua.c
index 4ef021291a4d..04c5cea47a22 100644
--- a/trunk/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/trunk/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -466,6 +466,11 @@ static int alua_check_sense(struct scsi_device *sdev,
* Power On, Reset, or Bus Device Reset, just retry.
*/
return ADD_TO_MLQUEUE;
+ if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x01)
+ /*
+ * Mode Parameters Changed
+ */
+ return ADD_TO_MLQUEUE;
if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x06)
/*
* ALUA state changed
diff --git a/trunk/drivers/scsi/device_handler/scsi_dh_rdac.c b/trunk/drivers/scsi/device_handler/scsi_dh_rdac.c
index 841ebf4a6788..53a31c753cb1 100644
--- a/trunk/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/trunk/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -953,6 +953,8 @@ static int __init rdac_init(void)
if (!kmpath_rdacd) {
scsi_unregister_device_handler(&rdac_dh);
printk(KERN_ERR "kmpath_rdacd creation failed.\n");
+
+ r = -EINVAL;
}
done:
return r;
diff --git a/trunk/drivers/scsi/fcoe/fcoe.c b/trunk/drivers/scsi/fcoe/fcoe.c
index 8d67467dd9ce..e9599600aa23 100644
--- a/trunk/drivers/scsi/fcoe/fcoe.c
+++ b/trunk/drivers/scsi/fcoe/fcoe.c
@@ -58,7 +58,11 @@ module_param_named(ddp_min, fcoe_ddp_min, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ddp_min, "Minimum I/O size in bytes for " \
"Direct Data Placement (DDP).");
-DEFINE_MUTEX(fcoe_config_mutex);
+unsigned int fcoe_debug_logging;
+module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
+
+static DEFINE_MUTEX(fcoe_config_mutex);
static struct workqueue_struct *fcoe_wq;
@@ -67,8 +71,8 @@ static DECLARE_COMPLETION(fcoe_flush_completion);
/* fcoe host list */
/* must only by accessed under the RTNL mutex */
-LIST_HEAD(fcoe_hostlist);
-DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu);
+static LIST_HEAD(fcoe_hostlist);
+static DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu);
/* Function Prototypes */
static int fcoe_reset(struct Scsi_Host *);
@@ -157,7 +161,7 @@ static struct libfc_function_template fcoe_libfc_fcn_templ = {
.lport_set_port_id = fcoe_set_port_id,
};
-struct fc_function_template fcoe_nport_fc_functions = {
+static struct fc_function_template fcoe_nport_fc_functions = {
.show_host_node_name = 1,
.show_host_port_name = 1,
.show_host_supported_classes = 1,
@@ -197,7 +201,7 @@ struct fc_function_template fcoe_nport_fc_functions = {
.bsg_request = fc_lport_bsg_request,
};
-struct fc_function_template fcoe_vport_fc_functions = {
+static struct fc_function_template fcoe_vport_fc_functions = {
.show_host_node_name = 1,
.show_host_port_name = 1,
.show_host_supported_classes = 1,
@@ -433,7 +437,7 @@ static inline void fcoe_interface_put(struct fcoe_interface *fcoe)
*
* Caller must be holding the RTNL mutex
*/
-void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
+static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
{
struct net_device *netdev = fcoe->netdev;
struct fcoe_ctlr *fip = &fcoe->ctlr;
@@ -748,7 +752,7 @@ static int fcoe_shost_config(struct fc_lport *lport, struct device *dev)
*
* Returns: True for read types I/O, otherwise returns false.
*/
-bool fcoe_oem_match(struct fc_frame *fp)
+static bool fcoe_oem_match(struct fc_frame *fp)
{
struct fc_frame_header *fh = fc_frame_header_get(fp);
struct fcp_cmnd *fcp;
@@ -756,11 +760,12 @@ bool fcoe_oem_match(struct fc_frame *fp)
if (fc_fcp_is_read(fr_fsp(fp)) &&
(fr_fsp(fp)->data_len > fcoe_ddp_min))
return true;
- else if (!(ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX)) {
+ else if ((fr_fsp(fp) == NULL) &&
+ (fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) &&
+ (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN)) {
fcp = fc_frame_payload_get(fp, sizeof(*fcp));
- if (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN &&
- fcp && (ntohl(fcp->fc_dl) > fcoe_ddp_min) &&
- (fcp->fc_flags & FCP_CFL_WRDATA))
+ if ((fcp->fc_flags & FCP_CFL_WRDATA) &&
+ (ntohl(fcp->fc_dl) > fcoe_ddp_min))
return true;
}
return false;
@@ -1106,7 +1111,7 @@ static int __init fcoe_if_init(void)
*
* Returns: 0 on success
*/
-int __exit fcoe_if_exit(void)
+static int __exit fcoe_if_exit(void)
{
fc_release_transport(fcoe_nport_scsi_transport);
fc_release_transport(fcoe_vport_scsi_transport);
@@ -1295,7 +1300,7 @@ static inline unsigned int fcoe_select_cpu(void)
*
* Returns: 0 for success
*/
-int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
+static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
struct packet_type *ptype, struct net_device *olddev)
{
struct fc_lport *lport;
@@ -1451,7 +1456,7 @@ static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen)
*
* Return: 0 for success
*/
-int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
+static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
{
int wlen;
u32 crc;
@@ -1671,8 +1676,7 @@ static void fcoe_recv_frame(struct sk_buff *skb)
skb->dev ? skb->dev->name : "");
port = lport_priv(lport);
- if (skb_is_nonlinear(skb))
- skb_linearize(skb); /* not ideal */
+ skb_linearize(skb); /* check for skb_is_nonlinear is within skb_linearize */
/*
* Frame length checks and setting up the header pointers
@@ -1728,7 +1732,7 @@ static void fcoe_recv_frame(struct sk_buff *skb)
*
* Return: 0 for success
*/
-int fcoe_percpu_receive_thread(void *arg)
+static int fcoe_percpu_receive_thread(void *arg)
{
struct fcoe_percpu_s *p = arg;
struct sk_buff *skb;
@@ -2146,7 +2150,7 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
* Returns: 0 if the ethtool query was successful
* -1 if the ethtool query failed
*/
-int fcoe_link_speed_update(struct fc_lport *lport)
+static int fcoe_link_speed_update(struct fc_lport *lport)
{
struct net_device *netdev = fcoe_netdev(lport);
struct ethtool_cmd ecmd;
@@ -2180,7 +2184,7 @@ int fcoe_link_speed_update(struct fc_lport *lport)
* Returns: 0 if link is UP and OK, -1 if not
*
*/
-int fcoe_link_ok(struct fc_lport *lport)
+static int fcoe_link_ok(struct fc_lport *lport)
{
struct net_device *netdev = fcoe_netdev(lport);
@@ -2200,7 +2204,7 @@ int fcoe_link_ok(struct fc_lport *lport)
* there no packets that will be handled by the lport, but also that any
* threads already handling packet have returned.
*/
-void fcoe_percpu_clean(struct fc_lport *lport)
+static void fcoe_percpu_clean(struct fc_lport *lport)
{
struct fcoe_percpu_s *pp;
struct fcoe_rcv_info *fr;
@@ -2251,7 +2255,7 @@ void fcoe_percpu_clean(struct fc_lport *lport)
*
* Returns: Always 0 (return value required by FC transport template)
*/
-int fcoe_reset(struct Scsi_Host *shost)
+static int fcoe_reset(struct Scsi_Host *shost)
{
struct fc_lport *lport = shost_priv(shost);
struct fcoe_port *port = lport_priv(lport);
diff --git a/trunk/drivers/scsi/fcoe/fcoe.h b/trunk/drivers/scsi/fcoe/fcoe.h
index 6c6884bcf840..bcc89e639495 100644
--- a/trunk/drivers/scsi/fcoe/fcoe.h
+++ b/trunk/drivers/scsi/fcoe/fcoe.h
@@ -40,9 +40,7 @@
#define FCOE_MIN_XID 0x0000 /* the min xid supported by fcoe_sw */
#define FCOE_MAX_XID 0x0FFF /* the max xid supported by fcoe_sw */
-unsigned int fcoe_debug_logging;
-module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
+extern unsigned int fcoe_debug_logging;
#define FCOE_LOGGING 0x01 /* General logging, not categorized */
#define FCOE_NETDEV_LOGGING 0x02 /* Netdevice logging */
diff --git a/trunk/drivers/scsi/hpsa.c b/trunk/drivers/scsi/hpsa.c
index 5140f5d0fd6b..b96962c39449 100644
--- a/trunk/drivers/scsi/hpsa.c
+++ b/trunk/drivers/scsi/hpsa.c
@@ -4271,7 +4271,9 @@ static void stop_controller_lockup_detector(struct ctlr_info *h)
remove_ctlr_from_lockup_detector_list(h);
/* If the list of ctlr's to monitor is empty, stop the thread */
if (list_empty(&hpsa_ctlr_list)) {
+ spin_unlock_irqrestore(&lockup_detector_lock, flags);
kthread_stop(hpsa_lockup_detector);
+ spin_lock_irqsave(&lockup_detector_lock, flags);
hpsa_lockup_detector = NULL;
}
spin_unlock_irqrestore(&lockup_detector_lock, flags);
diff --git a/trunk/drivers/scsi/isci/firmware/Makefile b/trunk/drivers/scsi/isci/firmware/Makefile
deleted file mode 100644
index 5f54461cabc5..000000000000
--- a/trunk/drivers/scsi/isci/firmware/Makefile
+++ /dev/null
@@ -1,19 +0,0 @@
-# Makefile for create_fw
-#
-CC=gcc
-CFLAGS=-c -Wall -O2 -g
-LDFLAGS=
-SOURCES=create_fw.c
-OBJECTS=$(SOURCES:.cpp=.o)
-EXECUTABLE=create_fw
-
-all: $(SOURCES) $(EXECUTABLE)
-
-$(EXECUTABLE): $(OBJECTS)
- $(CC) $(LDFLAGS) $(OBJECTS) -o $@
-
-.c.o:
- $(CC) $(CFLAGS) $< -O $@
-
-clean:
- rm -f *.o $(EXECUTABLE)
diff --git a/trunk/drivers/scsi/isci/firmware/README b/trunk/drivers/scsi/isci/firmware/README
deleted file mode 100644
index 8056d2bd233b..000000000000
--- a/trunk/drivers/scsi/isci/firmware/README
+++ /dev/null
@@ -1,36 +0,0 @@
-This defines the temporary binary blow we are to pass to the SCU
-driver to emulate the binary firmware that we will eventually be
-able to access via NVRAM on the SCU controller.
-
-The current size of the binary blob is expected to be 149 bytes or larger
-
-Header Types:
-0x1: Phy Masks
-0x2: Phy Gens
-0x3: SAS Addrs
-0xff: End of Data
-
-ID string - u8[12]: "#SCU MAGIC#\0"
-Version - u8: 1
-SubVersion - u8: 0
-
-Header Type - u8: 0x1
-Size - u8: 8
-Phy Mask - u32[8]
-
-Header Type - u8: 0x2
-Size - u8: 8
-Phy Gen - u32[8]
-
-Header Type - u8: 0x3
-Size - u8: 8
-Sas Addr - u64[8]
-
-Header Type - u8: 0xf
-
-
-==============================================================================
-
-Place isci_firmware.bin in /lib/firmware
-Be sure to recreate the initramfs image to include the firmware.
-
diff --git a/trunk/drivers/scsi/isci/firmware/create_fw.c b/trunk/drivers/scsi/isci/firmware/create_fw.c
deleted file mode 100644
index c7a2887a7e95..000000000000
--- a/trunk/drivers/scsi/isci/firmware/create_fw.c
+++ /dev/null
@@ -1,99 +0,0 @@
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-#include "create_fw.h"
-#include "../probe_roms.h"
-
-int write_blob(struct isci_orom *isci_orom)
-{
- FILE *fd;
- int err;
- size_t count;
-
- fd = fopen(blob_name, "w+");
- if (!fd) {
- perror("Open file for write failed");
- fclose(fd);
- return -EIO;
- }
-
- count = fwrite(isci_orom, sizeof(struct isci_orom), 1, fd);
- if (count != 1) {
- perror("Write data failed");
- fclose(fd);
- return -EIO;
- }
-
- fclose(fd);
-
- return 0;
-}
-
-void set_binary_values(struct isci_orom *isci_orom)
-{
- int ctrl_idx, phy_idx, port_idx;
-
- /* setting OROM signature */
- strncpy(isci_orom->hdr.signature, sig, strlen(sig));
- isci_orom->hdr.version = version;
- isci_orom->hdr.total_block_length = sizeof(struct isci_orom);
- isci_orom->hdr.hdr_length = sizeof(struct sci_bios_oem_param_block_hdr);
- isci_orom->hdr.num_elements = num_elements;
-
- for (ctrl_idx = 0; ctrl_idx < 2; ctrl_idx++) {
- isci_orom->ctrl[ctrl_idx].controller.mode_type = mode_type;
- isci_orom->ctrl[ctrl_idx].controller.max_concurrent_dev_spin_up =
- max_num_concurrent_dev_spin_up;
- isci_orom->ctrl[ctrl_idx].controller.do_enable_ssc =
- enable_ssc;
-
- for (port_idx = 0; port_idx < 4; port_idx++)
- isci_orom->ctrl[ctrl_idx].ports[port_idx].phy_mask =
- phy_mask[ctrl_idx][port_idx];
-
- for (phy_idx = 0; phy_idx < 4; phy_idx++) {
- isci_orom->ctrl[ctrl_idx].phys[phy_idx].sas_address.high =
- (__u32)(sas_addr[ctrl_idx][phy_idx] >> 32);
- isci_orom->ctrl[ctrl_idx].phys[phy_idx].sas_address.low =
- (__u32)(sas_addr[ctrl_idx][phy_idx]);
-
- isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control0 =
- afe_tx_amp_control0;
- isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control1 =
- afe_tx_amp_control1;
- isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control2 =
- afe_tx_amp_control2;
- isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control3 =
- afe_tx_amp_control3;
- }
- }
-}
-
-int main(void)
-{
- int err;
- struct isci_orom *isci_orom;
-
- isci_orom = malloc(sizeof(struct isci_orom));
- memset(isci_orom, 0, sizeof(struct isci_orom));
-
- set_binary_values(isci_orom);
-
- err = write_blob(isci_orom);
- if (err < 0) {
- free(isci_orom);
- return err;
- }
-
- free(isci_orom);
- return 0;
-}
diff --git a/trunk/drivers/scsi/isci/firmware/create_fw.h b/trunk/drivers/scsi/isci/firmware/create_fw.h
deleted file mode 100644
index 5f298828d22e..000000000000
--- a/trunk/drivers/scsi/isci/firmware/create_fw.h
+++ /dev/null
@@ -1,77 +0,0 @@
-#ifndef _CREATE_FW_H_
-#define _CREATE_FW_H_
-#include "../probe_roms.h"
-
-
-/* we are configuring for 2 SCUs */
-static const int num_elements = 2;
-
-/*
- * For all defined arrays:
- * elements 0-3 are for SCU0, ports 0-3
- * elements 4-7 are for SCU1, ports 0-3
- *
- * valid configurations for one SCU are:
- * P0 P1 P2 P3
- * ----------------
- * 0xF,0x0,0x0,0x0 # 1 x4 port
- * 0x3,0x0,0x4,0x8 # Phys 0 and 1 are a x2 port, phy 2 and phy 3 are each x1
- * # ports
- * 0x1,0x2,0xC,0x0 # Phys 0 and 1 are each x1 ports, phy 2 and phy 3 are a x2
- * # port
- * 0x3,0x0,0xC,0x0 # Phys 0 and 1 are a x2 port, phy 2 and phy 3 are a x2 port
- * 0x1,0x2,0x4,0x8 # Each phy is a x1 port (this is the default configuration)
- *
- * if there is a port/phy on which you do not wish to override the default
- * values, use the value assigned to UNINIT_PARAM (255).
- */
-
-/* discovery mode type (port auto config mode by default ) */
-
-/*
- * if there is a port/phy on which you do not wish to override the default
- * values, use the value "0000000000000000". SAS address of zero's is
- * considered invalid and will not be used.
- */
-#ifdef MPC
-static const int mode_type = SCIC_PORT_MANUAL_CONFIGURATION_MODE;
-static const __u8 phy_mask[2][4] = { {1, 2, 4, 8},
- {1, 2, 4, 8} };
-static const unsigned long long sas_addr[2][4] = { { 0x5FCFFFFFF0000001ULL,
- 0x5FCFFFFFF0000002ULL,
- 0x5FCFFFFFF0000003ULL,
- 0x5FCFFFFFF0000004ULL },
- { 0x5FCFFFFFF0000005ULL,
- 0x5FCFFFFFF0000006ULL,
- 0x5FCFFFFFF0000007ULL,
- 0x5FCFFFFFF0000008ULL } };
-#else /* APC (default) */
-static const int mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
-static const __u8 phy_mask[2][4];
-static const unsigned long long sas_addr[2][4] = { { 0x5FCFFFFF00000001ULL,
- 0x5FCFFFFF00000001ULL,
- 0x5FCFFFFF00000001ULL,
- 0x5FCFFFFF00000001ULL },
- { 0x5FCFFFFF00000002ULL,
- 0x5FCFFFFF00000002ULL,
- 0x5FCFFFFF00000002ULL,
- 0x5FCFFFFF00000002ULL } };
-#endif
-
-/* Maximum number of concurrent device spin up */
-static const int max_num_concurrent_dev_spin_up = 1;
-
-/* enable of ssc operation */
-static const int enable_ssc;
-
-/* AFE_TX_AMP_CONTROL */
-static const unsigned int afe_tx_amp_control0 = 0x000bdd08;
-static const unsigned int afe_tx_amp_control1 = 0x000ffc00;
-static const unsigned int afe_tx_amp_control2 = 0x000b7c09;
-static const unsigned int afe_tx_amp_control3 = 0x000afc6e;
-
-static const char blob_name[] = "isci_firmware.bin";
-static const char sig[] = "ISCUOEMB";
-static const unsigned char version = 0x10;
-
-#endif
diff --git a/trunk/drivers/scsi/isci/host.c b/trunk/drivers/scsi/isci/host.c
index e7fe9c4c85b8..1a65d6514237 100644
--- a/trunk/drivers/scsi/isci/host.c
+++ b/trunk/drivers/scsi/isci/host.c
@@ -899,7 +899,8 @@ static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost)
*/
if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) ||
(iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) ||
- (iphy->is_in_link_training == true && is_phy_starting(iphy))) {
+ (iphy->is_in_link_training == true && is_phy_starting(iphy)) ||
+ (ihost->port_agent.phy_ready_mask != ihost->port_agent.phy_configured_mask)) {
is_controller_start_complete = false;
break;
}
@@ -1666,6 +1667,9 @@ static void sci_controller_set_default_config_parameters(struct isci_host *ihost
/* Default to no SSC operation. */
ihost->oem_parameters.controller.do_enable_ssc = false;
+ /* Default to short cables on all phys. */
+ ihost->oem_parameters.controller.cable_selection_mask = 0;
+
/* Initialize all of the port parameter information to narrow ports. */
for (index = 0; index < SCI_MAX_PORTS; index++) {
ihost->oem_parameters.ports[index].phy_mask = 0;
@@ -1673,8 +1677,9 @@ static void sci_controller_set_default_config_parameters(struct isci_host *ihost
/* Initialize all of the phy parameter information. */
for (index = 0; index < SCI_MAX_PHYS; index++) {
- /* Default to 6G (i.e. Gen 3) for now. */
- ihost->user_parameters.phys[index].max_speed_generation = 3;
+ /* Default to 3G (i.e. Gen 2). */
+ ihost->user_parameters.phys[index].max_speed_generation =
+ SCIC_SDS_PARM_GEN2_SPEED;
/* the frequencies cannot be 0 */
ihost->user_parameters.phys[index].align_insertion_frequency = 0x7f;
@@ -1694,7 +1699,7 @@ static void sci_controller_set_default_config_parameters(struct isci_host *ihost
ihost->user_parameters.ssp_inactivity_timeout = 5;
ihost->user_parameters.stp_max_occupancy_timeout = 5;
ihost->user_parameters.ssp_max_occupancy_timeout = 20;
- ihost->user_parameters.no_outbound_task_timeout = 20;
+ ihost->user_parameters.no_outbound_task_timeout = 2;
}
static void controller_timeout(unsigned long data)
@@ -1759,7 +1764,7 @@ static enum sci_status sci_controller_construct(struct isci_host *ihost,
return sci_controller_reset(ihost);
}
-int sci_oem_parameters_validate(struct sci_oem_params *oem)
+int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version)
{
int i;
@@ -1791,18 +1796,61 @@ int sci_oem_parameters_validate(struct sci_oem_params *oem)
oem->controller.max_concurr_spin_up < 1)
return -EINVAL;
+ if (oem->controller.do_enable_ssc) {
+ if (version < ISCI_ROM_VER_1_1 && oem->controller.do_enable_ssc != 1)
+ return -EINVAL;
+
+ if (version >= ISCI_ROM_VER_1_1) {
+ u8 test = oem->controller.ssc_sata_tx_spread_level;
+
+ switch (test) {
+ case 0:
+ case 2:
+ case 3:
+ case 6:
+ case 7:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ test = oem->controller.ssc_sas_tx_spread_level;
+ if (oem->controller.ssc_sas_tx_type == 0) {
+ switch (test) {
+ case 0:
+ case 2:
+ case 3:
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else if (oem->controller.ssc_sas_tx_type == 1) {
+ switch (test) {
+ case 0:
+ case 3:
+ case 6:
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ }
+ }
+
return 0;
}
static enum sci_status sci_oem_parameters_set(struct isci_host *ihost)
{
u32 state = ihost->sm.current_state_id;
+ struct isci_pci_info *pci_info = to_pci_info(ihost->pdev);
if (state == SCIC_RESET ||
state == SCIC_INITIALIZING ||
state == SCIC_INITIALIZED) {
- if (sci_oem_parameters_validate(&ihost->oem_parameters))
+ if (sci_oem_parameters_validate(&ihost->oem_parameters,
+ pci_info->orom->hdr.version))
return SCI_FAILURE_INVALID_PARAMETER_VALUE;
return SCI_SUCCESS;
@@ -1857,6 +1905,31 @@ static void power_control_timeout(unsigned long data)
ihost->power_control.phys_waiting--;
ihost->power_control.phys_granted_power++;
sci_phy_consume_power_handler(iphy);
+
+ if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) {
+ u8 j;
+
+ for (j = 0; j < SCI_MAX_PHYS; j++) {
+ struct isci_phy *requester = ihost->power_control.requesters[j];
+
+ /*
+ * Search the power_control queue to see if there are other phys
+ * attached to the same remote device. If found, take all of
+ * them out of await_sas_power state.
+ */
+ if (requester != NULL && requester != iphy) {
+ u8 other = memcmp(requester->frame_rcvd.iaf.sas_addr,
+ iphy->frame_rcvd.iaf.sas_addr,
+ sizeof(requester->frame_rcvd.iaf.sas_addr));
+
+ if (other == 0) {
+ ihost->power_control.requesters[j] = NULL;
+ ihost->power_control.phys_waiting--;
+ sci_phy_consume_power_handler(requester);
+ }
+ }
+ }
+ }
}
/*
@@ -1891,9 +1964,34 @@ void sci_controller_power_control_queue_insert(struct isci_host *ihost,
ihost->power_control.timer_started = true;
} else {
- /* Add the phy in the waiting list */
- ihost->power_control.requesters[iphy->phy_index] = iphy;
- ihost->power_control.phys_waiting++;
+ /*
+ * There are phys, attached to the same sas address as this phy, are
+ * already in READY state, this phy don't need wait.
+ */
+ u8 i;
+ struct isci_phy *current_phy;
+
+ for (i = 0; i < SCI_MAX_PHYS; i++) {
+ u8 other;
+ current_phy = &ihost->phys[i];
+
+ other = memcmp(current_phy->frame_rcvd.iaf.sas_addr,
+ iphy->frame_rcvd.iaf.sas_addr,
+ sizeof(current_phy->frame_rcvd.iaf.sas_addr));
+
+ if (current_phy->sm.current_state_id == SCI_PHY_READY &&
+ current_phy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS &&
+ other == 0) {
+ sci_phy_consume_power_handler(iphy);
+ break;
+ }
+ }
+
+ if (i == SCI_MAX_PHYS) {
+ /* Add the phy in the waiting list */
+ ihost->power_control.requesters[iphy->phy_index] = iphy;
+ ihost->power_control.phys_waiting++;
+ }
}
}
@@ -1908,162 +2006,250 @@ void sci_controller_power_control_queue_remove(struct isci_host *ihost,
ihost->power_control.requesters[iphy->phy_index] = NULL;
}
+static int is_long_cable(int phy, unsigned char selection_byte)
+{
+ return !!(selection_byte & (1 << phy));
+}
+
+static int is_medium_cable(int phy, unsigned char selection_byte)
+{
+ return !!(selection_byte & (1 << (phy + 4)));
+}
+
+static enum cable_selections decode_selection_byte(
+ int phy,
+ unsigned char selection_byte)
+{
+ return ((selection_byte & (1 << phy)) ? 1 : 0)
+ + (selection_byte & (1 << (phy + 4)) ? 2 : 0);
+}
+
+static unsigned char *to_cable_select(struct isci_host *ihost)
+{
+ if (is_cable_select_overridden())
+ return ((unsigned char *)&cable_selection_override)
+ + ihost->id;
+ else
+ return &ihost->oem_parameters.controller.cable_selection_mask;
+}
+
+enum cable_selections decode_cable_selection(struct isci_host *ihost, int phy)
+{
+ return decode_selection_byte(phy, *to_cable_select(ihost));
+}
+
+char *lookup_cable_names(enum cable_selections selection)
+{
+ static char *cable_names[] = {
+ [short_cable] = "short",
+ [long_cable] = "long",
+ [medium_cable] = "medium",
+ [undefined_cable] = "" /* bit 0==1 */
+ };
+ return (selection <= undefined_cable) ? cable_names[selection]
+ : cable_names[undefined_cable];
+}
+
#define AFE_REGISTER_WRITE_DELAY 10
-/* Initialize the AFE for this phy index. We need to read the AFE setup from
- * the OEM parameters
- */
static void sci_controller_afe_initialization(struct isci_host *ihost)
{
+ struct scu_afe_registers __iomem *afe = &ihost->scu_registers->afe;
const struct sci_oem_params *oem = &ihost->oem_parameters;
struct pci_dev *pdev = ihost->pdev;
u32 afe_status;
u32 phy_id;
+ unsigned char cable_selection_mask = *to_cable_select(ihost);
/* Clear DFX Status registers */
- writel(0x0081000f, &ihost->scu_registers->afe.afe_dfx_master_control0);
+ writel(0x0081000f, &afe->afe_dfx_master_control0);
udelay(AFE_REGISTER_WRITE_DELAY);
- if (is_b0(pdev)) {
+ if (is_b0(pdev) || is_c0(pdev) || is_c1(pdev)) {
/* PM Rx Equalization Save, PM SPhy Rx Acknowledgement
- * Timer, PM Stagger Timer */
- writel(0x0007BFFF, &ihost->scu_registers->afe.afe_pmsn_master_control2);
+ * Timer, PM Stagger Timer
+ */
+ writel(0x0007FFFF, &afe->afe_pmsn_master_control2);
udelay(AFE_REGISTER_WRITE_DELAY);
}
/* Configure bias currents to normal */
if (is_a2(pdev))
- writel(0x00005A00, &ihost->scu_registers->afe.afe_bias_control);
+ writel(0x00005A00, &afe->afe_bias_control);
else if (is_b0(pdev) || is_c0(pdev))
- writel(0x00005F00, &ihost->scu_registers->afe.afe_bias_control);
+ writel(0x00005F00, &afe->afe_bias_control);
+ else if (is_c1(pdev))
+ writel(0x00005500, &afe->afe_bias_control);
udelay(AFE_REGISTER_WRITE_DELAY);
/* Enable PLL */
- if (is_b0(pdev) || is_c0(pdev))
- writel(0x80040A08, &ihost->scu_registers->afe.afe_pll_control0);
- else
- writel(0x80040908, &ihost->scu_registers->afe.afe_pll_control0);
+ if (is_a2(pdev))
+ writel(0x80040908, &afe->afe_pll_control0);
+ else if (is_b0(pdev) || is_c0(pdev))
+ writel(0x80040A08, &afe->afe_pll_control0);
+ else if (is_c1(pdev)) {
+ writel(0x80000B08, &afe->afe_pll_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+ writel(0x00000B08, &afe->afe_pll_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+ writel(0x80000B08, &afe->afe_pll_control0);
+ }
udelay(AFE_REGISTER_WRITE_DELAY);
/* Wait for the PLL to lock */
do {
- afe_status = readl(&ihost->scu_registers->afe.afe_common_block_status);
+ afe_status = readl(&afe->afe_common_block_status);
udelay(AFE_REGISTER_WRITE_DELAY);
} while ((afe_status & 0x00001000) == 0);
if (is_a2(pdev)) {
- /* Shorten SAS SNW lock time (RxLock timer value from 76 us to 50 us) */
- writel(0x7bcc96ad, &ihost->scu_registers->afe.afe_pmsn_master_control0);
+ /* Shorten SAS SNW lock time (RxLock timer value from 76
+ * us to 50 us)
+ */
+ writel(0x7bcc96ad, &afe->afe_pmsn_master_control0);
udelay(AFE_REGISTER_WRITE_DELAY);
}
for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) {
+ struct scu_afe_transceiver *xcvr = &afe->scu_afe_xcvr[phy_id];
const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id];
+ int cable_length_long =
+ is_long_cable(phy_id, cable_selection_mask);
+ int cable_length_medium =
+ is_medium_cable(phy_id, cable_selection_mask);
- if (is_b0(pdev)) {
- /* Configure transmitter SSC parameters */
- writel(0x00030000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control);
+ if (is_a2(pdev)) {
+ /* All defaults, except the Receive Word
+ * Alignament/Comma Detect Enable....(0xe800)
+ */
+ writel(0x00004512, &xcvr->afe_xcvr_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ writel(0x0050100F, &xcvr->afe_xcvr_control1);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+ } else if (is_b0(pdev)) {
+ /* Configure transmitter SSC parameters */
+ writel(0x00030000, &xcvr->afe_tx_ssc_control);
udelay(AFE_REGISTER_WRITE_DELAY);
} else if (is_c0(pdev)) {
- /* Configure transmitter SSC parameters */
- writel(0x0003000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control);
+ /* Configure transmitter SSC parameters */
+ writel(0x00010202, &xcvr->afe_tx_ssc_control);
udelay(AFE_REGISTER_WRITE_DELAY);
- /*
- * All defaults, except the Receive Word Alignament/Comma Detect
- * Enable....(0xe800) */
- writel(0x00004500, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
+ /* All defaults, except the Receive Word
+ * Alignament/Comma Detect Enable....(0xe800)
+ */
+ writel(0x00014500, &xcvr->afe_xcvr_control0);
udelay(AFE_REGISTER_WRITE_DELAY);
- } else {
- /*
- * All defaults, except the Receive Word Alignament/Comma Detect
- * Enable....(0xe800) */
- writel(0x00004512, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
+ } else if (is_c1(pdev)) {
+ /* Configure transmitter SSC parameters */
+ writel(0x00010202, &xcvr->afe_tx_ssc_control);
udelay(AFE_REGISTER_WRITE_DELAY);
- writel(0x0050100F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control1);
+ /* All defaults, except the Receive Word
+ * Alignament/Comma Detect Enable....(0xe800)
+ */
+ writel(0x0001C500, &xcvr->afe_xcvr_control0);
udelay(AFE_REGISTER_WRITE_DELAY);
}
- /*
- * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
- * & increase TX int & ext bias 20%....(0xe85c) */
+ /* Power up TX and RX out from power down (PWRDNTX and
+ * PWRDNRX) & increase TX int & ext bias 20%....(0xe85c)
+ */
if (is_a2(pdev))
- writel(0x000003F0, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+ writel(0x000003F0, &xcvr->afe_channel_control);
else if (is_b0(pdev)) {
- /* Power down TX and RX (PWRDNTX and PWRDNRX) */
- writel(0x000003D7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+ writel(0x000003D7, &xcvr->afe_channel_control);
udelay(AFE_REGISTER_WRITE_DELAY);
- /*
- * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
- * & increase TX int & ext bias 20%....(0xe85c) */
- writel(0x000003D4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
- } else {
- writel(0x000001E7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+ writel(0x000003D4, &xcvr->afe_channel_control);
+ } else if (is_c0(pdev)) {
+ writel(0x000001E7, &xcvr->afe_channel_control);
udelay(AFE_REGISTER_WRITE_DELAY);
- /*
- * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
- * & increase TX int & ext bias 20%....(0xe85c) */
- writel(0x000001E4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+ writel(0x000001E4, &xcvr->afe_channel_control);
+ } else if (is_c1(pdev)) {
+ writel(cable_length_long ? 0x000002F7 : 0x000001F7,
+ &xcvr->afe_channel_control);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ writel(cable_length_long ? 0x000002F4 : 0x000001F4,
+ &xcvr->afe_channel_control);
}
udelay(AFE_REGISTER_WRITE_DELAY);
if (is_a2(pdev)) {
/* Enable TX equalization (0xe824) */
- writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
+ writel(0x00040000, &xcvr->afe_tx_control);
udelay(AFE_REGISTER_WRITE_DELAY);
}
- /*
- * RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0, TPD=0x0(TX Power On),
- * RDD=0x0(RX Detect Enabled) ....(0xe800) */
- writel(0x00004100, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
+ if (is_a2(pdev) || is_b0(pdev))
+ /* RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0,
+ * TPD=0x0(TX Power On), RDD=0x0(RX Detect
+ * Enabled) ....(0xe800)
+ */
+ writel(0x00004100, &xcvr->afe_xcvr_control0);
+ else if (is_c0(pdev))
+ writel(0x00014100, &xcvr->afe_xcvr_control0);
+ else if (is_c1(pdev))
+ writel(0x0001C100, &xcvr->afe_xcvr_control0);
udelay(AFE_REGISTER_WRITE_DELAY);
/* Leave DFE/FFE on */
if (is_a2(pdev))
- writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
+ writel(0x3F11103F, &xcvr->afe_rx_ssc_control0);
else if (is_b0(pdev)) {
- writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
+ writel(0x3F11103F, &xcvr->afe_rx_ssc_control0);
udelay(AFE_REGISTER_WRITE_DELAY);
/* Enable TX equalization (0xe824) */
- writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
- } else {
- writel(0x0140DF0F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control1);
+ writel(0x00040000, &xcvr->afe_tx_control);
+ } else if (is_c0(pdev)) {
+ writel(0x01400C0F, &xcvr->afe_rx_ssc_control1);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ writel(0x3F6F103F, &xcvr->afe_rx_ssc_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ /* Enable TX equalization (0xe824) */
+ writel(0x00040000, &xcvr->afe_tx_control);
+ } else if (is_c1(pdev)) {
+ writel(cable_length_long ? 0x01500C0C :
+ cable_length_medium ? 0x01400C0D : 0x02400C0D,
+ &xcvr->afe_xcvr_control1);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ writel(0x000003E0, &xcvr->afe_dfx_rx_control1);
udelay(AFE_REGISTER_WRITE_DELAY);
- writel(0x3F6F103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
+ writel(cable_length_long ? 0x33091C1F :
+ cable_length_medium ? 0x3315181F : 0x2B17161F,
+ &xcvr->afe_rx_ssc_control0);
udelay(AFE_REGISTER_WRITE_DELAY);
/* Enable TX equalization (0xe824) */
- writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
+ writel(0x00040000, &xcvr->afe_tx_control);
}
udelay(AFE_REGISTER_WRITE_DELAY);
- writel(oem_phy->afe_tx_amp_control0,
- &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control0);
+ writel(oem_phy->afe_tx_amp_control0, &xcvr->afe_tx_amp_control0);
udelay(AFE_REGISTER_WRITE_DELAY);
- writel(oem_phy->afe_tx_amp_control1,
- &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control1);
+ writel(oem_phy->afe_tx_amp_control1, &xcvr->afe_tx_amp_control1);
udelay(AFE_REGISTER_WRITE_DELAY);
- writel(oem_phy->afe_tx_amp_control2,
- &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control2);
+ writel(oem_phy->afe_tx_amp_control2, &xcvr->afe_tx_amp_control2);
udelay(AFE_REGISTER_WRITE_DELAY);
- writel(oem_phy->afe_tx_amp_control3,
- &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control3);
+ writel(oem_phy->afe_tx_amp_control3, &xcvr->afe_tx_amp_control3);
udelay(AFE_REGISTER_WRITE_DELAY);
}
/* Transfer control to the PEs */
- writel(0x00010f00, &ihost->scu_registers->afe.afe_dfx_master_control0);
+ writel(0x00010f00, &afe->afe_dfx_master_control0);
udelay(AFE_REGISTER_WRITE_DELAY);
}
diff --git a/trunk/drivers/scsi/isci/host.h b/trunk/drivers/scsi/isci/host.h
index 646051afd3cb..5477f0fa8233 100644
--- a/trunk/drivers/scsi/isci/host.h
+++ b/trunk/drivers/scsi/isci/host.h
@@ -435,11 +435,36 @@ static inline bool is_b0(struct pci_dev *pdev)
static inline bool is_c0(struct pci_dev *pdev)
{
- if (pdev->revision >= 5)
+ if (pdev->revision == 5)
return true;
return false;
}
+static inline bool is_c1(struct pci_dev *pdev)
+{
+ if (pdev->revision >= 6)
+ return true;
+ return false;
+}
+
+enum cable_selections {
+ short_cable = 0,
+ long_cable = 1,
+ medium_cable = 2,
+ undefined_cable = 3
+};
+
+#define CABLE_OVERRIDE_DISABLED (0x10000)
+
+static inline int is_cable_select_overridden(void)
+{
+ return cable_selection_override < CABLE_OVERRIDE_DISABLED;
+}
+
+enum cable_selections decode_cable_selection(struct isci_host *ihost, int phy);
+void validate_cable_selections(struct isci_host *ihost);
+char *lookup_cable_names(enum cable_selections);
+
/* set hw control for 'activity', even though active enclosures seem to drive
* the activity led on their own. Skip setting FSENG control on 'status' due
* to unexpected operation and 'error' due to not being a supported automatic
diff --git a/trunk/drivers/scsi/isci/init.c b/trunk/drivers/scsi/isci/init.c
index a97edabcb85a..17c4c2c89c2e 100644
--- a/trunk/drivers/scsi/isci/init.c
+++ b/trunk/drivers/scsi/isci/init.c
@@ -65,7 +65,7 @@
#include "probe_roms.h"
#define MAJ 1
-#define MIN 0
+#define MIN 1
#define BUILD 0
#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
__stringify(BUILD)
@@ -94,7 +94,7 @@ MODULE_DEVICE_TABLE(pci, isci_id_table);
/* linux isci specific settings */
-unsigned char no_outbound_task_to = 20;
+unsigned char no_outbound_task_to = 2;
module_param(no_outbound_task_to, byte, 0);
MODULE_PARM_DESC(no_outbound_task_to, "No Outbound Task Timeout (1us incr)");
@@ -114,7 +114,7 @@ u16 stp_inactive_to = 5;
module_param(stp_inactive_to, ushort, 0);
MODULE_PARM_DESC(stp_inactive_to, "STP inactivity timeout (100us incr)");
-unsigned char phy_gen = 3;
+unsigned char phy_gen = SCIC_SDS_PARM_GEN2_SPEED;
module_param(phy_gen, byte, 0);
MODULE_PARM_DESC(phy_gen, "PHY generation (1: 1.5Gbps 2: 3.0Gbps 3: 6.0Gbps)");
@@ -122,6 +122,14 @@ unsigned char max_concurr_spinup;
module_param(max_concurr_spinup, byte, 0);
MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup");
+uint cable_selection_override = CABLE_OVERRIDE_DISABLED;
+module_param(cable_selection_override, uint, 0);
+
+MODULE_PARM_DESC(cable_selection_override,
+ "This field indicates length of the SAS/SATA cable between "
+ "host and device. If any bits > 15 are set (default) "
+ "indicates \"use platform defaults\"");
+
static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev);
@@ -412,6 +420,14 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
return NULL;
isci_host->shost = shost;
+ dev_info(&pdev->dev, "%sSCU controller %d: phy 3-0 cables: "
+ "{%s, %s, %s, %s}\n",
+ (is_cable_select_overridden() ? "* " : ""), isci_host->id,
+ lookup_cable_names(decode_cable_selection(isci_host, 3)),
+ lookup_cable_names(decode_cable_selection(isci_host, 2)),
+ lookup_cable_names(decode_cable_selection(isci_host, 1)),
+ lookup_cable_names(decode_cable_selection(isci_host, 0)));
+
err = isci_host_init(isci_host);
if (err)
goto err_shost;
@@ -466,7 +482,8 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic
orom = isci_request_oprom(pdev);
for (i = 0; orom && i < ARRAY_SIZE(orom->ctrl); i++) {
- if (sci_oem_parameters_validate(&orom->ctrl[i])) {
+ if (sci_oem_parameters_validate(&orom->ctrl[i],
+ orom->hdr.version)) {
dev_warn(&pdev->dev,
"[%d]: invalid oem parameters detected, falling back to firmware\n", i);
devm_kfree(&pdev->dev, orom);
diff --git a/trunk/drivers/scsi/isci/isci.h b/trunk/drivers/scsi/isci/isci.h
index 8efeb6b08321..234ab46fce33 100644
--- a/trunk/drivers/scsi/isci/isci.h
+++ b/trunk/drivers/scsi/isci/isci.h
@@ -480,6 +480,7 @@ extern u16 ssp_inactive_to;
extern u16 stp_inactive_to;
extern unsigned char phy_gen;
extern unsigned char max_concurr_spinup;
+extern uint cable_selection_override;
irqreturn_t isci_msix_isr(int vec, void *data);
irqreturn_t isci_intx_isr(int vec, void *data);
diff --git a/trunk/drivers/scsi/isci/phy.c b/trunk/drivers/scsi/isci/phy.c
index 35f50c2183e1..fe18acfd6eb3 100644
--- a/trunk/drivers/scsi/isci/phy.c
+++ b/trunk/drivers/scsi/isci/phy.c
@@ -91,22 +91,23 @@ sci_phy_transport_layer_initialization(struct isci_phy *iphy,
static enum sci_status
sci_phy_link_layer_initialization(struct isci_phy *iphy,
- struct scu_link_layer_registers __iomem *reg)
+ struct scu_link_layer_registers __iomem *llr)
{
struct isci_host *ihost = iphy->owning_port->owning_controller;
+ struct sci_phy_user_params *phy_user;
+ struct sci_phy_oem_params *phy_oem;
int phy_idx = iphy->phy_index;
- struct sci_phy_user_params *phy_user = &ihost->user_parameters.phys[phy_idx];
- struct sci_phy_oem_params *phy_oem =
- &ihost->oem_parameters.phys[phy_idx];
- u32 phy_configuration;
struct sci_phy_cap phy_cap;
+ u32 phy_configuration;
u32 parity_check = 0;
u32 parity_count = 0;
u32 llctl, link_rate;
u32 clksm_value = 0;
u32 sp_timeouts = 0;
- iphy->link_layer_registers = reg;
+ phy_user = &ihost->user_parameters.phys[phy_idx];
+ phy_oem = &ihost->oem_parameters.phys[phy_idx];
+ iphy->link_layer_registers = llr;
/* Set our IDENTIFY frame data */
#define SCI_END_DEVICE 0x01
@@ -116,32 +117,26 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
SCU_SAS_TIID_GEN_BIT(STP_INITIATOR) |
SCU_SAS_TIID_GEN_BIT(DA_SATA_HOST) |
SCU_SAS_TIID_GEN_VAL(DEVICE_TYPE, SCI_END_DEVICE),
- &iphy->link_layer_registers->transmit_identification);
+ &llr->transmit_identification);
/* Write the device SAS Address */
- writel(0xFEDCBA98,
- &iphy->link_layer_registers->sas_device_name_high);
- writel(phy_idx, &iphy->link_layer_registers->sas_device_name_low);
+ writel(0xFEDCBA98, &llr->sas_device_name_high);
+ writel(phy_idx, &llr->sas_device_name_low);
/* Write the source SAS Address */
- writel(phy_oem->sas_address.high,
- &iphy->link_layer_registers->source_sas_address_high);
- writel(phy_oem->sas_address.low,
- &iphy->link_layer_registers->source_sas_address_low);
+ writel(phy_oem->sas_address.high, &llr->source_sas_address_high);
+ writel(phy_oem->sas_address.low, &llr->source_sas_address_low);
/* Clear and Set the PHY Identifier */
- writel(0, &iphy->link_layer_registers->identify_frame_phy_id);
- writel(SCU_SAS_TIPID_GEN_VALUE(ID, phy_idx),
- &iphy->link_layer_registers->identify_frame_phy_id);
+ writel(0, &llr->identify_frame_phy_id);
+ writel(SCU_SAS_TIPID_GEN_VALUE(ID, phy_idx), &llr->identify_frame_phy_id);
/* Change the initial state of the phy configuration register */
- phy_configuration =
- readl(&iphy->link_layer_registers->phy_configuration);
+ phy_configuration = readl(&llr->phy_configuration);
/* Hold OOB state machine in reset */
phy_configuration |= SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
- writel(phy_configuration,
- &iphy->link_layer_registers->phy_configuration);
+ writel(phy_configuration, &llr->phy_configuration);
/* Configure the SNW capabilities */
phy_cap.all = 0;
@@ -149,15 +144,64 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
phy_cap.gen3_no_ssc = 1;
phy_cap.gen2_no_ssc = 1;
phy_cap.gen1_no_ssc = 1;
- if (ihost->oem_parameters.controller.do_enable_ssc == true) {
+ if (ihost->oem_parameters.controller.do_enable_ssc) {
+ struct scu_afe_registers __iomem *afe = &ihost->scu_registers->afe;
+ struct scu_afe_transceiver *xcvr = &afe->scu_afe_xcvr[phy_idx];
+ struct isci_pci_info *pci_info = to_pci_info(ihost->pdev);
+ bool en_sas = false;
+ bool en_sata = false;
+ u32 sas_type = 0;
+ u32 sata_spread = 0x2;
+ u32 sas_spread = 0x2;
+
phy_cap.gen3_ssc = 1;
phy_cap.gen2_ssc = 1;
phy_cap.gen1_ssc = 1;
+
+ if (pci_info->orom->hdr.version < ISCI_ROM_VER_1_1)
+ en_sas = en_sata = true;
+ else {
+ sata_spread = ihost->oem_parameters.controller.ssc_sata_tx_spread_level;
+ sas_spread = ihost->oem_parameters.controller.ssc_sas_tx_spread_level;
+
+ if (sata_spread)
+ en_sata = true;
+
+ if (sas_spread) {
+ en_sas = true;
+ sas_type = ihost->oem_parameters.controller.ssc_sas_tx_type;
+ }
+
+ }
+
+ if (en_sas) {
+ u32 reg;
+
+ reg = readl(&xcvr->afe_xcvr_control0);
+ reg |= (0x00100000 | (sas_type << 19));
+ writel(reg, &xcvr->afe_xcvr_control0);
+
+ reg = readl(&xcvr->afe_tx_ssc_control);
+ reg |= sas_spread << 8;
+ writel(reg, &xcvr->afe_tx_ssc_control);
+ }
+
+ if (en_sata) {
+ u32 reg;
+
+ reg = readl(&xcvr->afe_tx_ssc_control);
+ reg |= sata_spread;
+ writel(reg, &xcvr->afe_tx_ssc_control);
+
+ reg = readl(&llr->stp_control);
+ reg |= 1 << 12;
+ writel(reg, &llr->stp_control);
+ }
}
- /*
- * The SAS specification indicates that the phy_capabilities that
- * are transmitted shall have an even parity. Calculate the parity. */
+ /* The SAS specification indicates that the phy_capabilities that
+ * are transmitted shall have an even parity. Calculate the parity.
+ */
parity_check = phy_cap.all;
while (parity_check != 0) {
if (parity_check & 0x1)
@@ -165,20 +209,20 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
parity_check >>= 1;
}
- /*
- * If parity indicates there are an odd number of bits set, then
- * set the parity bit to 1 in the phy capabilities. */
+ /* If parity indicates there are an odd number of bits set, then
+ * set the parity bit to 1 in the phy capabilities.
+ */
if ((parity_count % 2) != 0)
phy_cap.parity = 1;
- writel(phy_cap.all, &iphy->link_layer_registers->phy_capabilities);
+ writel(phy_cap.all, &llr->phy_capabilities);
/* Set the enable spinup period but disable the ability to send
* notify enable spinup
*/
writel(SCU_ENSPINUP_GEN_VAL(COUNT,
phy_user->notify_enable_spin_up_insertion_frequency),
- &iphy->link_layer_registers->notify_enable_spinup_control);
+ &llr->notify_enable_spinup_control);
/* Write the ALIGN Insertion Ferequency for connected phy and
* inpendent of connected state
@@ -189,11 +233,13 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
clksm_value |= SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(GENERAL,
phy_user->align_insertion_frequency);
- writel(clksm_value, &iphy->link_layer_registers->clock_skew_management);
+ writel(clksm_value, &llr->clock_skew_management);
- /* @todo Provide a way to write this register correctly */
- writel(0x02108421,
- &iphy->link_layer_registers->afe_lookup_table_control);
+ if (is_c0(ihost->pdev) || is_c1(ihost->pdev)) {
+ writel(0x04210400, &llr->afe_lookup_table_control);
+ writel(0x020A7C05, &llr->sas_primitive_timeout);
+ } else
+ writel(0x02108421, &llr->afe_lookup_table_control);
llctl = SCU_SAS_LLCTL_GEN_VAL(NO_OUTBOUND_TASK_TIMEOUT,
(u8)ihost->user_parameters.no_outbound_task_timeout);
@@ -210,9 +256,9 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
break;
}
llctl |= SCU_SAS_LLCTL_GEN_VAL(MAX_LINK_RATE, link_rate);
- writel(llctl, &iphy->link_layer_registers->link_layer_control);
+ writel(llctl, &llr->link_layer_control);
- sp_timeouts = readl(&iphy->link_layer_registers->sas_phy_timeouts);
+ sp_timeouts = readl(&llr->sas_phy_timeouts);
/* Clear the default 0x36 (54us) RATE_CHANGE timeout value. */
sp_timeouts &= ~SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0xFF);
@@ -222,20 +268,23 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
*/
sp_timeouts |= SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0x3B);
- writel(sp_timeouts, &iphy->link_layer_registers->sas_phy_timeouts);
+ writel(sp_timeouts, &llr->sas_phy_timeouts);
if (is_a2(ihost->pdev)) {
- /* Program the max ARB time for the PHY to 700us so we inter-operate with
- * the PMC expander which shuts down PHYs if the expander PHY generates too
- * many breaks. This time value will guarantee that the initiator PHY will
- * generate the break.
+ /* Program the max ARB time for the PHY to 700us so we
+ * inter-operate with the PMC expander which shuts down
+ * PHYs if the expander PHY generates too many breaks.
+ * This time value will guarantee that the initiator PHY
+ * will generate the break.
*/
writel(SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME,
- &iphy->link_layer_registers->maximum_arbitration_wait_timer_timeout);
+ &llr->maximum_arbitration_wait_timer_timeout);
}
- /* Disable link layer hang detection, rely on the OS timeout for I/O timeouts. */
- writel(0, &iphy->link_layer_registers->link_layer_hang_detection_timeout);
+ /* Disable link layer hang detection, rely on the OS timeout for
+ * I/O timeouts.
+ */
+ writel(0, &llr->link_layer_hang_detection_timeout);
/* We can exit the initial state to the stopped state */
sci_change_state(&iphy->sm, SCI_PHY_STOPPED);
@@ -1049,24 +1098,25 @@ static void scu_link_layer_stop_protocol_engine(
writel(enable_spinup_value, &iphy->link_layer_registers->notify_enable_spinup_control);
}
-/**
- *
- *
- * This method will start the OOB/SN state machine for this struct isci_phy object.
- */
-static void scu_link_layer_start_oob(
- struct isci_phy *iphy)
+static void scu_link_layer_start_oob(struct isci_phy *iphy)
{
- u32 scu_sas_pcfg_value;
-
- scu_sas_pcfg_value =
- readl(&iphy->link_layer_registers->phy_configuration);
- scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE);
- scu_sas_pcfg_value &=
- ~(SCU_SAS_PCFG_GEN_BIT(OOB_RESET) |
- SCU_SAS_PCFG_GEN_BIT(HARD_RESET));
- writel(scu_sas_pcfg_value,
- &iphy->link_layer_registers->phy_configuration);
+ struct scu_link_layer_registers __iomem *ll = iphy->link_layer_registers;
+ u32 val;
+
+ /** Reset OOB sequence - start */
+ val = readl(&ll->phy_configuration);
+ val &= ~(SCU_SAS_PCFG_GEN_BIT(OOB_RESET) |
+ SCU_SAS_PCFG_GEN_BIT(HARD_RESET));
+ writel(val, &ll->phy_configuration);
+ readl(&ll->phy_configuration); /* flush */
+ /** Reset OOB sequence - end */
+
+ /** Start OOB sequence - start */
+ val = readl(&ll->phy_configuration);
+ val |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE);
+ writel(val, &ll->phy_configuration);
+ readl(&ll->phy_configuration); /* flush */
+ /** Start OOB sequence - end */
}
/**
diff --git a/trunk/drivers/scsi/isci/port.c b/trunk/drivers/scsi/isci/port.c
index ac7f27749f97..7c6ac58a5c4c 100644
--- a/trunk/drivers/scsi/isci/port.c
+++ b/trunk/drivers/scsi/isci/port.c
@@ -114,7 +114,7 @@ static u32 sci_port_get_phys(struct isci_port *iport)
* value is returned if the specified port is not valid. When this value is
* returned, no data is copied to the properties output parameter.
*/
-static enum sci_status sci_port_get_properties(struct isci_port *iport,
+enum sci_status sci_port_get_properties(struct isci_port *iport,
struct sci_port_properties *prop)
{
if (!iport || iport->logical_port_index == SCIC_SDS_DUMMY_PORT)
@@ -647,19 +647,26 @@ void sci_port_setup_transports(struct isci_port *iport, u32 device_id)
}
}
-static void sci_port_activate_phy(struct isci_port *iport, struct isci_phy *iphy,
- bool do_notify_user)
+static void sci_port_resume_phy(struct isci_port *iport, struct isci_phy *iphy)
+{
+ sci_phy_resume(iphy);
+ iport->enabled_phy_mask |= 1 << iphy->phy_index;
+}
+
+static void sci_port_activate_phy(struct isci_port *iport,
+ struct isci_phy *iphy,
+ u8 flags)
{
struct isci_host *ihost = iport->owning_controller;
- if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA)
+ if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA && (flags & PF_RESUME))
sci_phy_resume(iphy);
iport->active_phy_mask |= 1 << iphy->phy_index;
sci_controller_clear_invalid_phy(ihost, iphy);
- if (do_notify_user == true)
+ if (flags & PF_NOTIFY)
isci_port_link_up(ihost, iport, iphy);
}
@@ -669,14 +676,19 @@ void sci_port_deactivate_phy(struct isci_port *iport, struct isci_phy *iphy,
struct isci_host *ihost = iport->owning_controller;
iport->active_phy_mask &= ~(1 << iphy->phy_index);
+ iport->enabled_phy_mask &= ~(1 << iphy->phy_index);
if (!iport->active_phy_mask)
iport->last_active_phy = iphy->phy_index;
iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN;
- /* Re-assign the phy back to the LP as if it were a narrow port */
- writel(iphy->phy_index,
- &iport->port_pe_configuration_register[iphy->phy_index]);
+ /* Re-assign the phy back to the LP as if it were a narrow port for APC
+ * mode. For MPC mode, the phy will remain in the port.
+ */
+ if (iport->owning_controller->oem_parameters.controller.mode_type ==
+ SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE)
+ writel(iphy->phy_index,
+ &iport->port_pe_configuration_register[iphy->phy_index]);
if (do_notify_user == true)
isci_port_link_down(ihost, iphy, iport);
@@ -701,18 +713,16 @@ static void sci_port_invalid_link_up(struct isci_port *iport, struct isci_phy *i
* sci_port_general_link_up_handler - phy can be assigned to port?
* @sci_port: sci_port object for which has a phy that has gone link up.
* @sci_phy: This is the struct isci_phy object that has gone link up.
- * @do_notify_user: This parameter specifies whether to inform the user (via
- * sci_port_link_up()) as to the fact that a new phy as become ready.
+ * @flags: PF_RESUME, PF_NOTIFY to sci_port_activate_phy
*
- * Determine if this phy can be assigned to this
- * port . If the phy is not a valid PHY for
- * this port then the function will notify the user. A PHY can only be
- * part of a port if it's attached SAS ADDRESS is the same as all other PHYs in
- * the same port. none
+ * Determine if this phy can be assigned to this port . If the phy is
+ * not a valid PHY for this port then the function will notify the user.
+ * A PHY can only be part of a port if it's attached SAS ADDRESS is the
+ * same as all other PHYs in the same port.
*/
static void sci_port_general_link_up_handler(struct isci_port *iport,
- struct isci_phy *iphy,
- bool do_notify_user)
+ struct isci_phy *iphy,
+ u8 flags)
{
struct sci_sas_address port_sas_address;
struct sci_sas_address phy_sas_address;
@@ -730,7 +740,7 @@ static void sci_port_general_link_up_handler(struct isci_port *iport,
iport->active_phy_mask == 0) {
struct sci_base_state_machine *sm = &iport->sm;
- sci_port_activate_phy(iport, iphy, do_notify_user);
+ sci_port_activate_phy(iport, iphy, flags);
if (sm->current_state_id == SCI_PORT_RESETTING)
port_state_machine_change(iport, SCI_PORT_READY);
} else
@@ -781,11 +791,16 @@ bool sci_port_link_detected(
struct isci_phy *iphy)
{
if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) &&
- (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) &&
- sci_port_is_wide(iport)) {
- sci_port_invalid_link_up(iport, iphy);
-
- return false;
+ (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA)) {
+ if (sci_port_is_wide(iport)) {
+ sci_port_invalid_link_up(iport, iphy);
+ return false;
+ } else {
+ struct isci_host *ihost = iport->owning_controller;
+ struct isci_port *dst_port = &(ihost->ports[iphy->phy_index]);
+ writel(iphy->phy_index,
+ &dst_port->port_pe_configuration_register[iphy->phy_index]);
+ }
}
return true;
@@ -975,6 +990,13 @@ static void sci_port_ready_substate_waiting_enter(struct sci_base_state_machine
}
}
+static void scic_sds_port_ready_substate_waiting_exit(
+ struct sci_base_state_machine *sm)
+{
+ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+ sci_port_resume_port_task_scheduler(iport);
+}
+
static void sci_port_ready_substate_operational_enter(struct sci_base_state_machine *sm)
{
u32 index;
@@ -988,13 +1010,13 @@ static void sci_port_ready_substate_operational_enter(struct sci_base_state_mach
writel(iport->physical_port_index,
&iport->port_pe_configuration_register[
iport->phy_table[index]->phy_index]);
+ if (((iport->active_phy_mask^iport->enabled_phy_mask) & (1 << index)) != 0)
+ sci_port_resume_phy(iport, iport->phy_table[index]);
}
}
sci_port_update_viit_entry(iport);
- sci_port_resume_port_task_scheduler(iport);
-
/*
* Post the dummy task for the port so the hardware can schedule
* io correctly
@@ -1061,20 +1083,9 @@ static void sci_port_ready_substate_configuring_enter(struct sci_base_state_mach
if (iport->active_phy_mask == 0) {
isci_port_not_ready(ihost, iport);
- port_state_machine_change(iport,
- SCI_PORT_SUB_WAITING);
- } else if (iport->started_request_count == 0)
- port_state_machine_change(iport,
- SCI_PORT_SUB_OPERATIONAL);
-}
-
-static void sci_port_ready_substate_configuring_exit(struct sci_base_state_machine *sm)
-{
- struct isci_port *iport = container_of(sm, typeof(*iport), sm);
-
- sci_port_suspend_port_task_scheduler(iport);
- if (iport->ready_exit)
- sci_port_invalidate_dummy_remote_node(iport);
+ port_state_machine_change(iport, SCI_PORT_SUB_WAITING);
+ } else
+ port_state_machine_change(iport, SCI_PORT_SUB_OPERATIONAL);
}
enum sci_status sci_port_start(struct isci_port *iport)
@@ -1252,7 +1263,7 @@ enum sci_status sci_port_add_phy(struct isci_port *iport,
if (status != SCI_SUCCESS)
return status;
- sci_port_general_link_up_handler(iport, iphy, true);
+ sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME);
iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
port_state_machine_change(iport, SCI_PORT_SUB_CONFIGURING);
@@ -1262,7 +1273,7 @@ enum sci_status sci_port_add_phy(struct isci_port *iport,
if (status != SCI_SUCCESS)
return status;
- sci_port_general_link_up_handler(iport, iphy, true);
+ sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY);
/* Re-enter the configuring state since this may be the last phy in
* the port.
@@ -1338,13 +1349,13 @@ enum sci_status sci_port_link_up(struct isci_port *iport,
/* Since this is the first phy going link up for the port we
* can just enable it and continue
*/
- sci_port_activate_phy(iport, iphy, true);
+ sci_port_activate_phy(iport, iphy, PF_NOTIFY|PF_RESUME);
port_state_machine_change(iport,
SCI_PORT_SUB_OPERATIONAL);
return SCI_SUCCESS;
case SCI_PORT_SUB_OPERATIONAL:
- sci_port_general_link_up_handler(iport, iphy, true);
+ sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME);
return SCI_SUCCESS;
case SCI_PORT_RESETTING:
/* TODO We should make sure that the phy that has gone
@@ -1361,7 +1372,7 @@ enum sci_status sci_port_link_up(struct isci_port *iport,
/* In the resetting state we don't notify the user regarding
* link up and link down notifications.
*/
- sci_port_general_link_up_handler(iport, iphy, false);
+ sci_port_general_link_up_handler(iport, iphy, PF_RESUME);
return SCI_SUCCESS;
default:
dev_warn(sciport_to_dev(iport),
@@ -1584,14 +1595,14 @@ static const struct sci_base_state sci_port_state_table[] = {
},
[SCI_PORT_SUB_WAITING] = {
.enter_state = sci_port_ready_substate_waiting_enter,
+ .exit_state = scic_sds_port_ready_substate_waiting_exit,
},
[SCI_PORT_SUB_OPERATIONAL] = {
.enter_state = sci_port_ready_substate_operational_enter,
.exit_state = sci_port_ready_substate_operational_exit
},
[SCI_PORT_SUB_CONFIGURING] = {
- .enter_state = sci_port_ready_substate_configuring_enter,
- .exit_state = sci_port_ready_substate_configuring_exit
+ .enter_state = sci_port_ready_substate_configuring_enter
},
[SCI_PORT_RESETTING] = {
.exit_state = sci_port_resetting_state_exit
@@ -1609,6 +1620,7 @@ void sci_port_construct(struct isci_port *iport, u8 index,
iport->logical_port_index = SCIC_SDS_DUMMY_PORT;
iport->physical_port_index = index;
iport->active_phy_mask = 0;
+ iport->enabled_phy_mask = 0;
iport->last_active_phy = 0;
iport->ready_exit = false;
diff --git a/trunk/drivers/scsi/isci/port.h b/trunk/drivers/scsi/isci/port.h
index cb5ffbc38603..08116090eb70 100644
--- a/trunk/drivers/scsi/isci/port.h
+++ b/trunk/drivers/scsi/isci/port.h
@@ -63,6 +63,9 @@
#define SCIC_SDS_DUMMY_PORT 0xFF
+#define PF_NOTIFY (1 << 0)
+#define PF_RESUME (1 << 1)
+
struct isci_phy;
struct isci_host;
@@ -83,6 +86,8 @@ enum isci_status {
* @logical_port_index: software port index
* @physical_port_index: hardware port index
* @active_phy_mask: identifies phy members
+ * @enabled_phy_mask: phy mask for the port
+ * that are already part of the port
* @reserved_tag:
* @reserved_rni: reserver for port task scheduler workaround
* @started_request_count: reference count for outstanding commands
@@ -104,6 +109,7 @@ struct isci_port {
u8 logical_port_index;
u8 physical_port_index;
u8 active_phy_mask;
+ u8 enabled_phy_mask;
u8 last_active_phy;
u16 reserved_rni;
u16 reserved_tag;
@@ -250,6 +256,10 @@ bool sci_port_link_detected(
struct isci_port *iport,
struct isci_phy *iphy);
+enum sci_status sci_port_get_properties(
+ struct isci_port *iport,
+ struct sci_port_properties *prop);
+
enum sci_status sci_port_link_up(struct isci_port *iport,
struct isci_phy *iphy);
enum sci_status sci_port_link_down(struct isci_port *iport,
diff --git a/trunk/drivers/scsi/isci/port_config.c b/trunk/drivers/scsi/isci/port_config.c
index 38a99d281141..6d1e9544cbe5 100644
--- a/trunk/drivers/scsi/isci/port_config.c
+++ b/trunk/drivers/scsi/isci/port_config.c
@@ -57,7 +57,7 @@
#define SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT (10)
#define SCIC_SDS_APC_RECONFIGURATION_TIMEOUT (10)
-#define SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION (100)
+#define SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION (250)
enum SCIC_SDS_APC_ACTIVITY {
SCIC_SDS_APC_SKIP_PHY,
@@ -466,6 +466,23 @@ sci_apc_agent_validate_phy_configuration(struct isci_host *ihost,
return sci_port_configuration_agent_validate_ports(ihost, port_agent);
}
+/*
+ * This routine will restart the automatic port configuration timeout
+ * timer for the next time period. This could be caused by either a link
+ * down event or a link up event where we can not yet tell to which a phy
+ * belongs.
+ */
+static void sci_apc_agent_start_timer(
+ struct sci_port_configuration_agent *port_agent,
+ u32 timeout)
+{
+ if (port_agent->timer_pending)
+ sci_del_timer(&port_agent->timer);
+
+ port_agent->timer_pending = true;
+ sci_mod_timer(&port_agent->timer, timeout);
+}
+
static void sci_apc_agent_configure_ports(struct isci_host *ihost,
struct sci_port_configuration_agent *port_agent,
struct isci_phy *iphy,
@@ -565,17 +582,8 @@ static void sci_apc_agent_configure_ports(struct isci_host *ihost,
break;
case SCIC_SDS_APC_START_TIMER:
- /*
- * This can occur for either a link down event, or a link
- * up event where we cannot yet tell the port to which a
- * phy belongs.
- */
- if (port_agent->timer_pending)
- sci_del_timer(&port_agent->timer);
-
- port_agent->timer_pending = true;
- sci_mod_timer(&port_agent->timer,
- SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
+ sci_apc_agent_start_timer(port_agent,
+ SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
break;
case SCIC_SDS_APC_SKIP_PHY:
@@ -607,7 +615,8 @@ static void sci_apc_agent_link_up(struct isci_host *ihost,
if (!iport) {
/* the phy is not the part of this port */
port_agent->phy_ready_mask |= 1 << phy_index;
- sci_apc_agent_configure_ports(ihost, port_agent, iphy, true);
+ sci_apc_agent_start_timer(port_agent,
+ SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
} else {
/* the phy is already the part of the port */
u32 port_state = iport->sm.current_state_id;
diff --git a/trunk/drivers/scsi/isci/probe_roms.c b/trunk/drivers/scsi/isci/probe_roms.c
index b5f4341de243..9b8117b9d756 100644
--- a/trunk/drivers/scsi/isci/probe_roms.c
+++ b/trunk/drivers/scsi/isci/probe_roms.c
@@ -147,7 +147,7 @@ struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmw
memcpy(orom, fw->data, fw->size);
- if (is_c0(pdev))
+ if (is_c0(pdev) || is_c1(pdev))
goto out;
/*
diff --git a/trunk/drivers/scsi/isci/probe_roms.h b/trunk/drivers/scsi/isci/probe_roms.h
index 2c75248ca326..bb0e9d4d97c9 100644
--- a/trunk/drivers/scsi/isci/probe_roms.h
+++ b/trunk/drivers/scsi/isci/probe_roms.h
@@ -152,7 +152,7 @@ struct sci_user_parameters {
#define MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT 4
struct sci_oem_params;
-int sci_oem_parameters_validate(struct sci_oem_params *oem);
+int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version);
struct isci_orom;
struct isci_orom *isci_request_oprom(struct pci_dev *pdev);
@@ -191,6 +191,11 @@ struct isci_oem_hdr {
0x1a, 0x04, 0xc6)
#define ISCI_EFI_VAR_NAME "RstScuO"
+#define ISCI_ROM_VER_1_0 0x10
+#define ISCI_ROM_VER_1_1 0x11
+#define ISCI_ROM_VER_1_3 0x13
+#define ISCI_ROM_VER_LATEST ISCI_ROM_VER_1_3
+
/* Allowed PORT configuration modes APC Automatic PORT configuration mode is
* defined by the OEM configuration parameters providing no PHY_MASK parameters
* for any PORT. i.e. There are no phys assigned to any of the ports at start.
@@ -220,8 +225,86 @@ struct sci_oem_params {
struct {
uint8_t mode_type;
uint8_t max_concurr_spin_up;
- uint8_t do_enable_ssc;
- uint8_t reserved;
+ /*
+ * This bitfield indicates the OEM's desired default Tx
+ * Spread Spectrum Clocking (SSC) settings for SATA and SAS.
+ * NOTE: Default SSC Modulation Frequency is 31.5KHz.
+ */
+ union {
+ struct {
+ /*
+ * NOTE: Max spread for SATA is +0 / -5000 PPM.
+ * Down-spreading SSC (only method allowed for SATA):
+ * SATA SSC Tx Disabled = 0x0
+ * SATA SSC Tx at +0 / -1419 PPM Spread = 0x2
+ * SATA SSC Tx at +0 / -2129 PPM Spread = 0x3
+ * SATA SSC Tx at +0 / -4257 PPM Spread = 0x6
+ * SATA SSC Tx at +0 / -4967 PPM Spread = 0x7
+ */
+ uint8_t ssc_sata_tx_spread_level:4;
+ /*
+ * SAS SSC Tx Disabled = 0x0
+ *
+ * NOTE: Max spread for SAS down-spreading +0 /
+ * -2300 PPM
+ * Down-spreading SSC:
+ * SAS SSC Tx at +0 / -1419 PPM Spread = 0x2
+ * SAS SSC Tx at +0 / -2129 PPM Spread = 0x3
+ *
+ * NOTE: Max spread for SAS center-spreading +2300 /
+ * -2300 PPM
+ * Center-spreading SSC:
+ * SAS SSC Tx at +1064 / -1064 PPM Spread = 0x3
+ * SAS SSC Tx at +2129 / -2129 PPM Spread = 0x6
+ */
+ uint8_t ssc_sas_tx_spread_level:3;
+ /*
+ * NOTE: Refer to the SSC section of the SAS 2.x
+ * Specification for proper setting of this field.
+ * For standard SAS Initiator SAS PHY operation it
+ * should be 0 for Down-spreading.
+ * SAS SSC Tx spread type:
+ * Down-spreading SSC = 0
+ * Center-spreading SSC = 1
+ */
+ uint8_t ssc_sas_tx_type:1;
+ };
+ uint8_t do_enable_ssc;
+ };
+ /*
+ * This field indicates length of the SAS/SATA cable between
+ * host and device.
+ * This field is used make relationship between analog
+ * parameters of the phy in the silicon and length of the cable.
+ * Supported cable attenuation levels:
+ * "short"- up to 3m, "medium"-3m to 6m, and "long"- more than
+ * 6m.
+ *
+ * This is bit mask field:
+ *
+ * BIT: (MSB) 7 6 5 4
+ * ASSIGNMENT: - Medium cable
+ * length assignment
+ * BIT: 3 2 1 0 (LSB)
+ * ASSIGNMENT: - Long cable length
+ * assignment
+ *
+ * BITS 7-4 are set when the cable length is assigned to medium
+ * BITS 3-0 are set when the cable length is assigned to long
+ *
+ * The BIT positions are clear when the cable length is
+ * assigned to short.
+ *
+ * Setting the bits for both long and medium cable length is
+ * undefined.
+ *
+ * A value of 0x84 would assign
+ * phy3 - medium
+ * phy2 - long
+ * phy1 - short
+ * phy0 - short
+ */
+ uint8_t cable_selection_mask;
} controller;
struct {
diff --git a/trunk/drivers/scsi/isci/remote_device.c b/trunk/drivers/scsi/isci/remote_device.c
index b207cd3b15a0..dd74b6ceeb82 100644
--- a/trunk/drivers/scsi/isci/remote_device.c
+++ b/trunk/drivers/scsi/isci/remote_device.c
@@ -53,6 +53,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include
+#include
#include "isci.h"
#include "port.h"
#include "remote_device.h"
@@ -1101,6 +1102,7 @@ static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
struct isci_remote_device *idev)
{
enum sci_status status;
+ struct sci_port_properties properties;
struct domain_device *dev = idev->domain_dev;
sci_remote_device_construct(iport, idev);
@@ -1110,6 +1112,11 @@ static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
* entries will be needed to store the remote node.
*/
idev->is_direct_attached = true;
+
+ sci_port_get_properties(iport, &properties);
+ /* Get accurate port width from port's phy mask for a DA device. */
+ idev->device_port_width = hweight32(properties.phy_mask);
+
status = sci_controller_allocate_remote_node_context(iport->owning_controller,
idev,
&idev->rnc.remote_node_index);
@@ -1125,9 +1132,6 @@ static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
idev->connection_rate = sci_port_get_max_allowed_speed(iport);
- /* / @todo Should I assign the port width by reading all of the phys on the port? */
- idev->device_port_width = 1;
-
return SCI_SUCCESS;
}
diff --git a/trunk/drivers/scsi/isci/task.c b/trunk/drivers/scsi/isci/task.c
index 66ad3dc89498..f5a3f7d2bdab 100644
--- a/trunk/drivers/scsi/isci/task.c
+++ b/trunk/drivers/scsi/isci/task.c
@@ -496,7 +496,7 @@ static int isci_task_execute_tmf(struct isci_host *ihost,
}
}
- isci_print_tmf(tmf);
+ isci_print_tmf(ihost, tmf);
if (tmf->status == SCI_SUCCESS)
ret = TMF_RESP_FUNC_COMPLETE;
diff --git a/trunk/drivers/scsi/isci/task.h b/trunk/drivers/scsi/isci/task.h
index bc78c0a41d5c..1b27b3797c6c 100644
--- a/trunk/drivers/scsi/isci/task.h
+++ b/trunk/drivers/scsi/isci/task.h
@@ -106,7 +106,6 @@ struct isci_tmf {
} resp;
unsigned char lun[8];
u16 io_tag;
- struct isci_remote_device *device;
enum isci_tmf_function_codes tmf_code;
int status;
@@ -120,10 +119,10 @@ struct isci_tmf {
};
-static inline void isci_print_tmf(struct isci_tmf *tmf)
+static inline void isci_print_tmf(struct isci_host *ihost, struct isci_tmf *tmf)
{
if (SAS_PROTOCOL_SATA == tmf->proto)
- dev_dbg(&tmf->device->isci_port->isci_host->pdev->dev,
+ dev_dbg(&ihost->pdev->dev,
"%s: status = %x\n"
"tmf->resp.d2h_fis.status = %x\n"
"tmf->resp.d2h_fis.error = %x\n",
@@ -132,7 +131,7 @@ static inline void isci_print_tmf(struct isci_tmf *tmf)
tmf->resp.d2h_fis.status,
tmf->resp.d2h_fis.error);
else
- dev_dbg(&tmf->device->isci_port->isci_host->pdev->dev,
+ dev_dbg(&ihost->pdev->dev,
"%s: status = %x\n"
"tmf->resp.resp_iu.data_present = %x\n"
"tmf->resp.resp_iu.status = %x\n"
diff --git a/trunk/drivers/scsi/libfc/fc_disc.c b/trunk/drivers/scsi/libfc/fc_disc.c
index 7269e928824a..1d1b0c9da29b 100644
--- a/trunk/drivers/scsi/libfc/fc_disc.c
+++ b/trunk/drivers/scsi/libfc/fc_disc.c
@@ -61,7 +61,7 @@ static void fc_disc_restart(struct fc_disc *);
* Locking Note: This function expects that the lport mutex is locked before
* calling it.
*/
-void fc_disc_stop_rports(struct fc_disc *disc)
+static void fc_disc_stop_rports(struct fc_disc *disc)
{
struct fc_lport *lport;
struct fc_rport_priv *rdata;
@@ -682,7 +682,7 @@ static int fc_disc_single(struct fc_lport *lport, struct fc_disc_port *dp)
* fc_disc_stop() - Stop discovery for a given lport
* @lport: The local port that discovery should stop on
*/
-void fc_disc_stop(struct fc_lport *lport)
+static void fc_disc_stop(struct fc_lport *lport)
{
struct fc_disc *disc = &lport->disc;
@@ -698,7 +698,7 @@ void fc_disc_stop(struct fc_lport *lport)
* This function will block until discovery has been
* completely stopped and all rports have been deleted.
*/
-void fc_disc_stop_final(struct fc_lport *lport)
+static void fc_disc_stop_final(struct fc_lport *lport)
{
fc_disc_stop(lport);
lport->tt.rport_flush_queue();
diff --git a/trunk/drivers/scsi/libfc/fc_elsct.c b/trunk/drivers/scsi/libfc/fc_elsct.c
index fb9161dc4ca6..e17a28d324d0 100644
--- a/trunk/drivers/scsi/libfc/fc_elsct.c
+++ b/trunk/drivers/scsi/libfc/fc_elsct.c
@@ -28,6 +28,7 @@
#include
#include
#include
+#include "fc_libfc.h"
/**
* fc_elsct_send() - Send an ELS or CT frame
diff --git a/trunk/drivers/scsi/libfc/fc_exch.c b/trunk/drivers/scsi/libfc/fc_exch.c
index 9de9db27e874..4d70d96fa5dc 100644
--- a/trunk/drivers/scsi/libfc/fc_exch.c
+++ b/trunk/drivers/scsi/libfc/fc_exch.c
@@ -91,7 +91,7 @@ struct fc_exch_pool {
* It manages the allocation of exchange IDs.
*/
struct fc_exch_mgr {
- struct fc_exch_pool *pool;
+ struct fc_exch_pool __percpu *pool;
mempool_t *ep_pool;
enum fc_class class;
struct kref kref;
diff --git a/trunk/drivers/scsi/libfc/fc_fcp.c b/trunk/drivers/scsi/libfc/fc_fcp.c
index 221875ec3d7c..f607314810ac 100644
--- a/trunk/drivers/scsi/libfc/fc_fcp.c
+++ b/trunk/drivers/scsi/libfc/fc_fcp.c
@@ -155,6 +155,7 @@ static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lport, gfp_t gfp)
fsp->xfer_ddp = FC_XID_UNKNOWN;
atomic_set(&fsp->ref_cnt, 1);
init_timer(&fsp->timer);
+ fsp->timer.data = (unsigned long)fsp;
INIT_LIST_HEAD(&fsp->list);
spin_lock_init(&fsp->scsi_pkt_lock);
}
@@ -1850,9 +1851,6 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
}
put_cpu();
- init_timer(&fsp->timer);
- fsp->timer.data = (unsigned long)fsp;
-
/*
* send it to the lower layer
* if we get -1 return then put the request in the pending
diff --git a/trunk/drivers/scsi/libfc/fc_lport.c b/trunk/drivers/scsi/libfc/fc_lport.c
index e77094a587ed..83750ebb527f 100644
--- a/trunk/drivers/scsi/libfc/fc_lport.c
+++ b/trunk/drivers/scsi/libfc/fc_lport.c
@@ -677,7 +677,8 @@ EXPORT_SYMBOL(fc_set_mfs);
* @lport: The local port receiving the event
* @event: The discovery event
*/
-void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event)
+static void fc_lport_disc_callback(struct fc_lport *lport,
+ enum fc_disc_event event)
{
switch (event) {
case DISC_EV_SUCCESS:
@@ -1568,7 +1569,7 @@ EXPORT_SYMBOL(fc_lport_flogi_resp);
* Locking Note: The lport lock is expected to be held before calling
* this routine.
*/
-void fc_lport_enter_flogi(struct fc_lport *lport)
+static void fc_lport_enter_flogi(struct fc_lport *lport)
{
struct fc_frame *fp;
diff --git a/trunk/drivers/scsi/libfc/fc_rport.c b/trunk/drivers/scsi/libfc/fc_rport.c
index b9e434844a69..83aa1efec875 100644
--- a/trunk/drivers/scsi/libfc/fc_rport.c
+++ b/trunk/drivers/scsi/libfc/fc_rport.c
@@ -391,7 +391,7 @@ static void fc_rport_work(struct work_struct *work)
* If it appears we are already logged in, ADISC is used to verify
* the setup.
*/
-int fc_rport_login(struct fc_rport_priv *rdata)
+static int fc_rport_login(struct fc_rport_priv *rdata)
{
mutex_lock(&rdata->rp_mutex);
@@ -451,7 +451,7 @@ static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
* function will hold the rport lock, call an _enter_*
* function and then unlock the rport.
*/
-int fc_rport_logoff(struct fc_rport_priv *rdata)
+static int fc_rport_logoff(struct fc_rport_priv *rdata)
{
mutex_lock(&rdata->rp_mutex);
@@ -653,8 +653,8 @@ static int fc_rport_login_complete(struct fc_rport_priv *rdata,
* @fp: The FLOGI response frame
* @rp_arg: The remote port that received the FLOGI response
*/
-void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
- void *rp_arg)
+static void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *rp_arg)
{
struct fc_rport_priv *rdata = rp_arg;
struct fc_lport *lport = rdata->local_port;
@@ -1520,7 +1520,7 @@ static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp)
*
* Locking Note: Called with the lport lock held.
*/
-void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
+static void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
{
struct fc_seq_els_data els_data;
diff --git a/trunk/drivers/scsi/megaraid.c b/trunk/drivers/scsi/megaraid.c
index 5c1776406c96..15eefa1d61fd 100644
--- a/trunk/drivers/scsi/megaraid.c
+++ b/trunk/drivers/scsi/megaraid.c
@@ -306,19 +306,22 @@ mega_query_adapter(adapter_t *adapter)
adapter->host->sg_tablesize = adapter->sglen;
- /* use HP firmware and bios version encoding */
+ /* use HP firmware and bios version encoding
+ Note: fw_version[0|1] and bios_version[0|1] were originally shifted
+ right 8 bits making them zero. This 0 value was hardcoded to fix
+ sparse warnings. */
if (adapter->product_info.subsysvid == HP_SUBSYS_VID) {
sprintf (adapter->fw_version, "%c%d%d.%d%d",
adapter->product_info.fw_version[2],
- adapter->product_info.fw_version[1] >> 8,
+ 0,
adapter->product_info.fw_version[1] & 0x0f,
- adapter->product_info.fw_version[0] >> 8,
+ 0,
adapter->product_info.fw_version[0] & 0x0f);
sprintf (adapter->bios_version, "%c%d%d.%d%d",
adapter->product_info.bios_version[2],
- adapter->product_info.bios_version[1] >> 8,
+ 0,
adapter->product_info.bios_version[1] & 0x0f,
- adapter->product_info.bios_version[0] >> 8,
+ 0,
adapter->product_info.bios_version[0] & 0x0f);
} else {
memcpy(adapter->fw_version,
diff --git a/trunk/drivers/scsi/megaraid/megaraid_sas.h b/trunk/drivers/scsi/megaraid/megaraid_sas.h
index dd94c7d574fb..e5f416f8042d 100644
--- a/trunk/drivers/scsi/megaraid/megaraid_sas.h
+++ b/trunk/drivers/scsi/megaraid/megaraid_sas.h
@@ -33,9 +33,9 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "00.00.06.12-rc1"
-#define MEGASAS_RELDATE "Oct. 5, 2011"
-#define MEGASAS_EXT_VERSION "Wed. Oct. 5 17:00:00 PDT 2011"
+#define MEGASAS_VERSION "00.00.06.14-rc1"
+#define MEGASAS_RELDATE "Jan. 6, 2012"
+#define MEGASAS_EXT_VERSION "Fri. Jan. 6 17:00:00 PDT 2012"
/*
* Device IDs
@@ -773,7 +773,6 @@ struct megasas_ctrl_info {
#define MFI_OB_INTR_STATUS_MASK 0x00000002
#define MFI_POLL_TIMEOUT_SECS 60
-#define MEGASAS_COMPLETION_TIMER_INTERVAL (HZ/10)
#define MFI_REPLY_1078_MESSAGE_INTERRUPT 0x80000000
#define MFI_REPLY_GEN2_MESSAGE_INTERRUPT 0x00000001
@@ -1353,7 +1352,6 @@ struct megasas_instance {
u32 mfiStatus;
u32 last_seq_num;
- struct timer_list io_completion_timer;
struct list_head internal_reset_pending_q;
/* Ptr to hba specific information */
diff --git a/trunk/drivers/scsi/megaraid/megaraid_sas_base.c b/trunk/drivers/scsi/megaraid/megaraid_sas_base.c
index 29a994f9c4f1..8b300be44284 100644
--- a/trunk/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/trunk/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -18,7 +18,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* FILE: megaraid_sas_base.c
- * Version : v00.00.06.12-rc1
+ * Version : v00.00.06.14-rc1
*
* Authors: LSI Corporation
* Sreenivas Bagalkote
@@ -58,14 +58,6 @@
#include "megaraid_sas_fusion.h"
#include "megaraid_sas.h"
-/*
- * poll_mode_io:1- schedule complete completion from q cmd
- */
-static unsigned int poll_mode_io;
-module_param_named(poll_mode_io, poll_mode_io, int, 0);
-MODULE_PARM_DESC(poll_mode_io,
- "Complete cmds from IO path, (default=0)");
-
/*
* Number of sectors per IO command
* Will be set in megasas_init_mfi if user does not provide
@@ -1439,11 +1431,6 @@ megasas_build_and_issue_cmd(struct megasas_instance *instance,
instance->instancet->fire_cmd(instance, cmd->frame_phys_addr,
cmd->frame_count-1, instance->reg_set);
- /*
- * Check if we have pend cmds to be completed
- */
- if (poll_mode_io && atomic_read(&instance->fw_outstanding))
- tasklet_schedule(&instance->isr_tasklet);
return 0;
out_return_cmd:
@@ -3370,47 +3357,6 @@ megasas_issue_init_mfi(struct megasas_instance *instance)
return -EINVAL;
}
-/**
- * megasas_start_timer - Initializes a timer object
- * @instance: Adapter soft state
- * @timer: timer object to be initialized
- * @fn: timer function
- * @interval: time interval between timer function call
- */
-static inline void
-megasas_start_timer(struct megasas_instance *instance,
- struct timer_list *timer,
- void *fn, unsigned long interval)
-{
- init_timer(timer);
- timer->expires = jiffies + interval;
- timer->data = (unsigned long)instance;
- timer->function = fn;
- add_timer(timer);
-}
-
-/**
- * megasas_io_completion_timer - Timer fn
- * @instance_addr: Address of adapter soft state
- *
- * Schedules tasklet for cmd completion
- * if poll_mode_io is set
- */
-static void
-megasas_io_completion_timer(unsigned long instance_addr)
-{
- struct megasas_instance *instance =
- (struct megasas_instance *)instance_addr;
-
- if (atomic_read(&instance->fw_outstanding))
- tasklet_schedule(&instance->isr_tasklet);
-
- /* Restart timer */
- if (poll_mode_io)
- mod_timer(&instance->io_completion_timer,
- jiffies + MEGASAS_COMPLETION_TIMER_INTERVAL);
-}
-
static u32
megasas_init_adapter_mfi(struct megasas_instance *instance)
{
@@ -3638,11 +3584,6 @@ static int megasas_init_fw(struct megasas_instance *instance)
tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
(unsigned long)instance);
- /* Initialize the cmd completion timer */
- if (poll_mode_io)
- megasas_start_timer(instance, &instance->io_completion_timer,
- megasas_io_completion_timer,
- MEGASAS_COMPLETION_TIMER_INTERVAL);
return 0;
fail_init_adapter:
@@ -4369,9 +4310,6 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
host = instance->host;
instance->unload = 1;
- if (poll_mode_io)
- del_timer_sync(&instance->io_completion_timer);
-
megasas_flush_cache(instance);
megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
@@ -4511,12 +4449,6 @@ megasas_resume(struct pci_dev *pdev)
}
instance->instancet->enable_intr(instance->reg_set);
-
- /* Initialize the cmd completion timer */
- if (poll_mode_io)
- megasas_start_timer(instance, &instance->io_completion_timer,
- megasas_io_completion_timer,
- MEGASAS_COMPLETION_TIMER_INTERVAL);
instance->unload = 0;
/*
@@ -4570,9 +4502,6 @@ static void __devexit megasas_detach_one(struct pci_dev *pdev)
host = instance->host;
fusion = instance->ctrl_context;
- if (poll_mode_io)
- del_timer_sync(&instance->io_completion_timer);
-
scsi_remove_host(instance->host);
megasas_flush_cache(instance);
megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
@@ -4773,6 +4702,8 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
cmd->frame->hdr.context = cmd->index;
cmd->frame->hdr.pad_0 = 0;
+ cmd->frame->hdr.flags &= ~(MFI_FRAME_IEEE | MFI_FRAME_SGL64 |
+ MFI_FRAME_SENSE64);
/*
* The management interface between applications and the fw uses
@@ -5219,60 +5150,6 @@ megasas_sysfs_set_dbg_lvl(struct device_driver *dd, const char *buf, size_t coun
static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUSR, megasas_sysfs_show_dbg_lvl,
megasas_sysfs_set_dbg_lvl);
-static ssize_t
-megasas_sysfs_show_poll_mode_io(struct device_driver *dd, char *buf)
-{
- return sprintf(buf, "%u\n", poll_mode_io);
-}
-
-static ssize_t
-megasas_sysfs_set_poll_mode_io(struct device_driver *dd,
- const char *buf, size_t count)
-{
- int retval = count;
- int tmp = poll_mode_io;
- int i;
- struct megasas_instance *instance;
-
- if (sscanf(buf, "%u", &poll_mode_io) < 1) {
- printk(KERN_ERR "megasas: could not set poll_mode_io\n");
- retval = -EINVAL;
- }
-
- /*
- * Check if poll_mode_io is already set or is same as previous value
- */
- if ((tmp && poll_mode_io) || (tmp == poll_mode_io))
- goto out;
-
- if (poll_mode_io) {
- /*
- * Start timers for all adapters
- */
- for (i = 0; i < megasas_mgmt_info.max_index; i++) {
- instance = megasas_mgmt_info.instance[i];
- if (instance) {
- megasas_start_timer(instance,
- &instance->io_completion_timer,
- megasas_io_completion_timer,
- MEGASAS_COMPLETION_TIMER_INTERVAL);
- }
- }
- } else {
- /*
- * Delete timers for all adapters
- */
- for (i = 0; i < megasas_mgmt_info.max_index; i++) {
- instance = megasas_mgmt_info.instance[i];
- if (instance)
- del_timer_sync(&instance->io_completion_timer);
- }
- }
-
-out:
- return retval;
-}
-
static void
megasas_aen_polling(struct work_struct *work)
{
@@ -5502,11 +5379,6 @@ megasas_aen_polling(struct work_struct *work)
kfree(ev);
}
-
-static DRIVER_ATTR(poll_mode_io, S_IRUGO|S_IWUSR,
- megasas_sysfs_show_poll_mode_io,
- megasas_sysfs_set_poll_mode_io);
-
/**
* megasas_init - Driver load entry point
*/
@@ -5565,11 +5437,6 @@ static int __init megasas_init(void)
&driver_attr_dbg_lvl);
if (rval)
goto err_dcf_dbg_lvl;
- rval = driver_create_file(&megasas_pci_driver.driver,
- &driver_attr_poll_mode_io);
- if (rval)
- goto err_dcf_poll_mode_io;
-
rval = driver_create_file(&megasas_pci_driver.driver,
&driver_attr_support_device_change);
if (rval)
@@ -5578,10 +5445,6 @@ static int __init megasas_init(void)
return rval;
err_dcf_support_device_change:
- driver_remove_file(&megasas_pci_driver.driver,
- &driver_attr_poll_mode_io);
-
-err_dcf_poll_mode_io:
driver_remove_file(&megasas_pci_driver.driver,
&driver_attr_dbg_lvl);
err_dcf_dbg_lvl:
@@ -5606,8 +5469,6 @@ static int __init megasas_init(void)
*/
static void __exit megasas_exit(void)
{
- driver_remove_file(&megasas_pci_driver.driver,
- &driver_attr_poll_mode_io);
driver_remove_file(&megasas_pci_driver.driver,
&driver_attr_dbg_lvl);
driver_remove_file(&megasas_pci_driver.driver,
diff --git a/trunk/drivers/scsi/megaraid/megaraid_sas_fp.c b/trunk/drivers/scsi/megaraid/megaraid_sas_fp.c
index 5255dd688aca..294abb0defa6 100644
--- a/trunk/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/trunk/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -282,7 +282,9 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
else {
*pDevHandle = MR_PD_INVALID; /* set dev handle as invalid. */
if ((raid->level >= 5) &&
- (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER))
+ ((instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER &&
+ raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))
pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
else if (raid->level == 1) {
/* Get alternate Pd. */
diff --git a/trunk/drivers/scsi/qla4xxx/ql4_def.h b/trunk/drivers/scsi/qla4xxx/ql4_def.h
index 22a3ff02e48a..bfe68545203f 100644
--- a/trunk/drivers/scsi/qla4xxx/ql4_def.h
+++ b/trunk/drivers/scsi/qla4xxx/ql4_def.h
@@ -150,6 +150,8 @@
#define QL4_SESS_RECOVERY_TMO 120 /* iSCSI session */
/* recovery timeout */
+#define MSB(x) ((uint8_t)((uint16_t)(x) >> 8))
+#define LSW(x) ((uint16_t)(x))
#define LSDW(x) ((u32)((u64)(x)))
#define MSDW(x) ((u32)((((u64)(x)) >> 16) >> 16))
@@ -671,6 +673,7 @@ struct scsi_qla_host {
uint16_t pri_ddb_idx;
uint16_t sec_ddb_idx;
int is_reset;
+ uint16_t temperature;
};
struct ql4_task_data {
diff --git a/trunk/drivers/scsi/qla4xxx/ql4_init.c b/trunk/drivers/scsi/qla4xxx/ql4_init.c
index 1bdfa8120ac8..90614f38b55d 100644
--- a/trunk/drivers/scsi/qla4xxx/ql4_init.c
+++ b/trunk/drivers/scsi/qla4xxx/ql4_init.c
@@ -697,6 +697,9 @@ int qla4xxx_start_firmware(struct scsi_qla_host *ha)
writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
&ha->reg->ctrl_status);
readl(&ha->reg->ctrl_status);
+ writel(set_rmask(CSR_SCSI_COMPLETION_INTR),
+ &ha->reg->ctrl_status);
+ readl(&ha->reg->ctrl_status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (qla4xxx_get_firmware_state(ha) == QLA_SUCCESS) {
DEBUG2(printk("scsi%ld: %s: Get firmware "
diff --git a/trunk/drivers/scsi/qla4xxx/ql4_mbx.c b/trunk/drivers/scsi/qla4xxx/ql4_mbx.c
index c2593782fbbe..e1e66a45e4d0 100644
--- a/trunk/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/trunk/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -219,6 +219,13 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
ha->mailbox_timeout_count++;
mbx_sts[0] = (-1);
set_bit(DPC_RESET_HA, &ha->dpc_flags);
+ if (is_qla8022(ha)) {
+ ql4_printk(KERN_INFO, ha,
+ "disabling pause transmit on port 0 & 1.\n");
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
+ CRB_NIU_XG_PAUSE_CTL_P0 |
+ CRB_NIU_XG_PAUSE_CTL_P1);
+ }
goto mbox_exit;
}
diff --git a/trunk/drivers/scsi/qla4xxx/ql4_nx.c b/trunk/drivers/scsi/qla4xxx/ql4_nx.c
index 8d6bc1b2ff17..78f1111158d7 100644
--- a/trunk/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/trunk/drivers/scsi/qla4xxx/ql4_nx.c
@@ -1875,6 +1875,11 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
int qla4_8xxx_load_risc(struct scsi_qla_host *ha)
{
int retval;
+
+ /* clear the interrupt */
+ writel(0, &ha->qla4_8xxx_reg->host_int);
+ readl(&ha->qla4_8xxx_reg->host_int);
+
retval = qla4_8xxx_device_state_handler(ha);
if (retval == QLA_SUCCESS && !test_bit(AF_INIT_DONE, &ha->flags))
diff --git a/trunk/drivers/scsi/qla4xxx/ql4_nx.h b/trunk/drivers/scsi/qla4xxx/ql4_nx.h
index 35376a1c3f1b..dc45ac923691 100644
--- a/trunk/drivers/scsi/qla4xxx/ql4_nx.h
+++ b/trunk/drivers/scsi/qla4xxx/ql4_nx.h
@@ -19,12 +19,28 @@
#define PHAN_PEG_RCV_INITIALIZED 0xff01
/*CRB_RELATED*/
-#define QLA82XX_CRB_BASE QLA82XX_CAM_RAM(0x200)
-#define QLA82XX_REG(X) (QLA82XX_CRB_BASE+(X))
-
+#define QLA82XX_CRB_BASE (QLA82XX_CAM_RAM(0x200))
+#define QLA82XX_REG(X) (QLA82XX_CRB_BASE+(X))
#define CRB_CMDPEG_STATE QLA82XX_REG(0x50)
#define CRB_RCVPEG_STATE QLA82XX_REG(0x13c)
#define CRB_DMA_SHIFT QLA82XX_REG(0xcc)
+#define CRB_TEMP_STATE QLA82XX_REG(0x1b4)
+
+#define qla82xx_get_temp_val(x) ((x) >> 16)
+#define qla82xx_get_temp_state(x) ((x) & 0xffff)
+#define qla82xx_encode_temp(val, state) (((val) << 16) | (state))
+
+/*
+ * Temperature control.
+ */
+enum {
+ QLA82XX_TEMP_NORMAL = 0x1, /* Normal operating range */
+ QLA82XX_TEMP_WARN, /* Sound alert, temperature getting high */
+ QLA82XX_TEMP_PANIC /* Fatal error, hardware has shut down. */
+};
+
+#define CRB_NIU_XG_PAUSE_CTL_P0 0x1
+#define CRB_NIU_XG_PAUSE_CTL_P1 0x8
#define QLA82XX_HW_H0_CH_HUB_ADR 0x05
#define QLA82XX_HW_H1_CH_HUB_ADR 0x0E
diff --git a/trunk/drivers/scsi/qla4xxx/ql4_os.c b/trunk/drivers/scsi/qla4xxx/ql4_os.c
index ec393a00c038..ce6d3b7f0c61 100644
--- a/trunk/drivers/scsi/qla4xxx/ql4_os.c
+++ b/trunk/drivers/scsi/qla4xxx/ql4_os.c
@@ -35,43 +35,44 @@ static struct kmem_cache *srb_cachep;
int ql4xdisablesysfsboot = 1;
module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql4xdisablesysfsboot,
- "Set to disable exporting boot targets to sysfs\n"
- " 0 - Export boot targets\n"
- " 1 - Do not export boot targets (Default)");
+ " Set to disable exporting boot targets to sysfs.\n"
+ "\t\t 0 - Export boot targets\n"
+ "\t\t 1 - Do not export boot targets (Default)");
int ql4xdontresethba = 0;
module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql4xdontresethba,
- "Don't reset the HBA for driver recovery \n"
- " 0 - It will reset HBA (Default)\n"
- " 1 - It will NOT reset HBA");
+ " Don't reset the HBA for driver recovery.\n"
+ "\t\t 0 - It will reset HBA (Default)\n"
+ "\t\t 1 - It will NOT reset HBA");
-int ql4xextended_error_logging = 0; /* 0 = off, 1 = log errors */
+int ql4xextended_error_logging;
module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql4xextended_error_logging,
- "Option to enable extended error logging, "
- "Default is 0 - no logging, 1 - debug logging");
+ " Option to enable extended error logging.\n"
+ "\t\t 0 - no logging (Default)\n"
+ "\t\t 2 - debug logging");
int ql4xenablemsix = 1;
module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql4xenablemsix,
- "Set to enable MSI or MSI-X interrupt mechanism.\n"
- " 0 = enable INTx interrupt mechanism.\n"
- " 1 = enable MSI-X interrupt mechanism (Default).\n"
- " 2 = enable MSI interrupt mechanism.");
+ " Set to enable MSI or MSI-X interrupt mechanism.\n"
+ "\t\t 0 = enable INTx interrupt mechanism.\n"
+ "\t\t 1 = enable MSI-X interrupt mechanism (Default).\n"
+ "\t\t 2 = enable MSI interrupt mechanism.");
#define QL4_DEF_QDEPTH 32
static int ql4xmaxqdepth = QL4_DEF_QDEPTH;
module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql4xmaxqdepth,
- "Maximum queue depth to report for target devices.\n"
- " Default: 32.");
+ " Maximum queue depth to report for target devices.\n"
+ "\t\t Default: 32.");
static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
MODULE_PARM_DESC(ql4xsess_recovery_tmo,
"Target Session Recovery Timeout.\n"
- " Default: 120 sec.");
+ "\t\t Default: 120 sec.");
static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
/*
@@ -1630,7 +1631,9 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
/* Update timers after login */
ddb_entry->default_relogin_timeout =
- le16_to_cpu(fw_ddb_entry->def_timeout);
+ (le16_to_cpu(fw_ddb_entry->def_timeout) > LOGIN_TOV) &&
+ (le16_to_cpu(fw_ddb_entry->def_timeout) < LOGIN_TOV * 10) ?
+ le16_to_cpu(fw_ddb_entry->def_timeout) : LOGIN_TOV;
ddb_entry->default_time2wait =
le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
@@ -1969,6 +1972,42 @@ static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
return QLA_ERROR;
}
+/**
+ * qla4_8xxx_check_temp - Check the ISP82XX temperature.
+ * @ha: adapter block pointer.
+ *
+ * Note: The caller should not hold the idc lock.
+ **/
+static int qla4_8xxx_check_temp(struct scsi_qla_host *ha)
+{
+ uint32_t temp, temp_state, temp_val;
+ int status = QLA_SUCCESS;
+
+ temp = qla4_8xxx_rd_32(ha, CRB_TEMP_STATE);
+
+ temp_state = qla82xx_get_temp_state(temp);
+ temp_val = qla82xx_get_temp_val(temp);
+
+ if (temp_state == QLA82XX_TEMP_PANIC) {
+ ql4_printk(KERN_WARNING, ha, "Device temperature %d degrees C"
+ " exceeds maximum allowed. Hardware has been shut"
+ " down.\n", temp_val);
+ status = QLA_ERROR;
+ } else if (temp_state == QLA82XX_TEMP_WARN) {
+ if (ha->temperature == QLA82XX_TEMP_NORMAL)
+ ql4_printk(KERN_WARNING, ha, "Device temperature %d"
+ " degrees C exceeds operating range."
+ " Immediate action needed.\n", temp_val);
+ } else {
+ if (ha->temperature == QLA82XX_TEMP_WARN)
+ ql4_printk(KERN_INFO, ha, "Device temperature is"
+ " now %d degrees C in normal range.\n",
+ temp_val);
+ }
+ ha->temperature = temp_state;
+ return status;
+}
+
/**
* qla4_8xxx_check_fw_alive - Check firmware health
* @ha: Pointer to host adapter structure.
@@ -2040,7 +2079,16 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
- if (dev_state == QLA82XX_DEV_NEED_RESET &&
+
+ if (qla4_8xxx_check_temp(ha)) {
+ ql4_printk(KERN_INFO, ha, "disabling pause"
+ " transmit on port 0 & 1.\n");
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
+ CRB_NIU_XG_PAUSE_CTL_P0 |
+ CRB_NIU_XG_PAUSE_CTL_P1);
+ set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
+ qla4xxx_wake_dpc(ha);
+ } else if (dev_state == QLA82XX_DEV_NEED_RESET &&
!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
if (!ql4xdontresethba) {
ql4_printk(KERN_INFO, ha, "%s: HW State: "
@@ -2057,9 +2105,21 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
} else {
/* Check firmware health */
if (qla4_8xxx_check_fw_alive(ha)) {
+ ql4_printk(KERN_INFO, ha, "disabling pause"
+ " transmit on port 0 & 1.\n");
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
+ CRB_NIU_XG_PAUSE_CTL_P0 |
+ CRB_NIU_XG_PAUSE_CTL_P1);
halt_status = qla4_8xxx_rd_32(ha,
QLA82XX_PEG_HALT_STATUS1);
+ if (LSW(MSB(halt_status)) == 0x67)
+ ql4_printk(KERN_ERR, ha, "%s:"
+ " Firmware aborted with"
+ " error code 0x00006700."
+ " Device is being reset\n",
+ __func__);
+
/* Since we cannot change dev_state in interrupt
* context, set appropriate DPC flag then wakeup
* DPC */
@@ -2078,7 +2138,7 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
}
}
-void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
+static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
{
struct iscsi_session *sess;
struct ddb_entry *ddb_entry;
@@ -3826,16 +3886,14 @@ static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha,
return ret;
}
-static void qla4xxx_free_nt_list(struct list_head *list_nt)
+static void qla4xxx_free_ddb_list(struct list_head *list_ddb)
{
- struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp;
+ struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
- /* Free up the normaltargets list */
- list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
- list_del_init(&nt_ddb_idx->list);
- vfree(nt_ddb_idx);
+ list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
+ list_del_init(&ddb_idx->list);
+ vfree(ddb_idx);
}
-
}
static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
@@ -3884,6 +3942,8 @@ static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx)
static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry)
{
+ uint16_t def_timeout;
+
ddb_entry->ddb_type = FLASH_DDB;
ddb_entry->fw_ddb_index = INVALID_ENTRY;
ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
@@ -3894,9 +3954,10 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
atomic_set(&ddb_entry->relogin_timer, 0);
atomic_set(&ddb_entry->relogin_retry_count, 0);
-
+ def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
ddb_entry->default_relogin_timeout =
- le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
+ (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
+ def_timeout : LOGIN_TOV;
ddb_entry->default_time2wait =
le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait);
}
@@ -3934,7 +3995,6 @@ static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
ip_state == IP_ADDRSTATE_DEPRICATED ||
ip_state == IP_ADDRSTATE_DISABLING)
ip_idx[idx] = -1;
-
}
/* Break if all IP states checked */
@@ -3947,58 +4007,37 @@ static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
} while (time_after(wtime, jiffies));
}
-void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
+static void qla4xxx_build_st_list(struct scsi_qla_host *ha,
+ struct list_head *list_st)
{
+ struct qla_ddb_index *st_ddb_idx;
int max_ddbs;
+ int fw_idx_size;
+ struct dev_db_entry *fw_ddb_entry;
+ dma_addr_t fw_ddb_dma;
int ret;
uint32_t idx = 0, next_idx = 0;
uint32_t state = 0, conn_err = 0;
- uint16_t conn_id;
- struct dev_db_entry *fw_ddb_entry;
- struct ddb_entry *ddb_entry = NULL;
- dma_addr_t fw_ddb_dma;
- struct iscsi_cls_session *cls_sess;
- struct iscsi_session *sess;
- struct iscsi_cls_conn *cls_conn;
- struct iscsi_endpoint *ep;
- uint16_t cmds_max = 32, tmo = 0;
- uint32_t initial_cmdsn = 0;
- struct list_head list_st, list_nt; /* List of sendtargets */
- struct qla_ddb_index *st_ddb_idx, *st_ddb_idx_tmp;
- int fw_idx_size;
- unsigned long wtime;
- struct qla_ddb_index *nt_ddb_idx;
-
- if (!test_bit(AF_LINK_UP, &ha->flags)) {
- set_bit(AF_BUILD_DDB_LIST, &ha->flags);
- ha->is_reset = is_reset;
- return;
- }
- max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
- MAX_DEV_DB_ENTRIES;
+ uint16_t conn_id = 0;
fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
&fw_ddb_dma);
if (fw_ddb_entry == NULL) {
DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
- goto exit_ddb_list;
+ goto exit_st_list;
}
- INIT_LIST_HEAD(&list_st);
- INIT_LIST_HEAD(&list_nt);
+ max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+ MAX_DEV_DB_ENTRIES;
fw_idx_size = sizeof(struct qla_ddb_index);
for (idx = 0; idx < max_ddbs; idx = next_idx) {
- ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry,
- fw_ddb_dma, NULL,
- &next_idx, &state, &conn_err,
- NULL, &conn_id);
+ ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
+ NULL, &next_idx, &state,
+ &conn_err, NULL, &conn_id);
if (ret == QLA_ERROR)
break;
- if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
- goto continue_next_st;
-
/* Check if ST, add to the list_st */
if (strlen((char *) fw_ddb_entry->iscsi_name) != 0)
goto continue_next_st;
@@ -4009,59 +4048,155 @@ void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
st_ddb_idx->fw_ddb_idx = idx;
- list_add_tail(&st_ddb_idx->list, &list_st);
+ list_add_tail(&st_ddb_idx->list, list_st);
continue_next_st:
if (next_idx == 0)
break;
}
- /* Before issuing conn open mbox, ensure all IPs states are configured
- * Note, conn open fails if IPs are not configured
+exit_st_list:
+ if (fw_ddb_entry)
+ dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
+}
+
+/**
+ * qla4xxx_remove_failed_ddb - Remove inactive or failed ddb from list
+ * @ha: pointer to adapter structure
+ * @list_ddb: List from which failed ddb to be removed
+ *
+ * Iterate over the list of DDBs and find and remove DDBs that are either in
+ * no connection active state or failed state
+ **/
+static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha,
+ struct list_head *list_ddb)
+{
+ struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
+ uint32_t next_idx = 0;
+ uint32_t state = 0, conn_err = 0;
+ int ret;
+
+ list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
+ ret = qla4xxx_get_fwddb_entry(ha, ddb_idx->fw_ddb_idx,
+ NULL, 0, NULL, &next_idx, &state,
+ &conn_err, NULL, NULL);
+ if (ret == QLA_ERROR)
+ continue;
+
+ if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
+ state == DDB_DS_SESSION_FAILED) {
+ list_del_init(&ddb_idx->list);
+ vfree(ddb_idx);
+ }
+ }
+}
+
+static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
+ struct dev_db_entry *fw_ddb_entry,
+ int is_reset)
+{
+ struct iscsi_cls_session *cls_sess;
+ struct iscsi_session *sess;
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_endpoint *ep;
+ uint16_t cmds_max = 32;
+ uint16_t conn_id = 0;
+ uint32_t initial_cmdsn = 0;
+ int ret = QLA_SUCCESS;
+
+ struct ddb_entry *ddb_entry = NULL;
+
+ /* Create session object, with INVALID_ENTRY,
+ * the targer_id would get set when we issue the login
*/
- qla4xxx_wait_for_ip_configuration(ha);
+ cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, ha->host,
+ cmds_max, sizeof(struct ddb_entry),
+ sizeof(struct ql4_task_data),
+ initial_cmdsn, INVALID_ENTRY);
+ if (!cls_sess) {
+ ret = QLA_ERROR;
+ goto exit_setup;
+ }
- /* Go thru the STs and fire the sendtargets by issuing conn open mbx */
- list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
- qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx);
+ /*
+ * so calling module_put function to decrement the
+ * reference count.
+ **/
+ module_put(qla4xxx_iscsi_transport.owner);
+ sess = cls_sess->dd_data;
+ ddb_entry = sess->dd_data;
+ ddb_entry->sess = cls_sess;
+
+ cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
+ memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
+ sizeof(struct dev_db_entry));
+
+ qla4xxx_setup_flash_ddb_entry(ha, ddb_entry);
+
+ cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), conn_id);
+
+ if (!cls_conn) {
+ ret = QLA_ERROR;
+ goto exit_setup;
}
- /* Wait to ensure all sendtargets are done for min 12 sec wait */
- tmo = ((ha->def_timeout < LOGIN_TOV) ? LOGIN_TOV : ha->def_timeout);
- DEBUG2(ql4_printk(KERN_INFO, ha,
- "Default time to wait for build ddb %d\n", tmo));
+ ddb_entry->conn = cls_conn;
- wtime = jiffies + (HZ * tmo);
- do {
- list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st,
- list) {
- ret = qla4xxx_get_fwddb_entry(ha,
- st_ddb_idx->fw_ddb_idx,
- NULL, 0, NULL, &next_idx,
- &state, &conn_err, NULL,
- NULL);
- if (ret == QLA_ERROR)
- continue;
+ /* Setup ep, for displaying attributes in sysfs */
+ ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry);
+ if (ep) {
+ ep->conn = cls_conn;
+ cls_conn->ep = ep;
+ } else {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "Unable to get ep\n"));
+ ret = QLA_ERROR;
+ goto exit_setup;
+ }
- if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
- state == DDB_DS_SESSION_FAILED) {
- list_del_init(&st_ddb_idx->list);
- vfree(st_ddb_idx);
- }
- }
- schedule_timeout_uninterruptible(HZ / 10);
- } while (time_after(wtime, jiffies));
+ /* Update sess/conn params */
+ qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
- /* Free up the sendtargets list */
- list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
- list_del_init(&st_ddb_idx->list);
- vfree(st_ddb_idx);
+ if (is_reset == RESET_ADAPTER) {
+ iscsi_block_session(cls_sess);
+ /* Use the relogin path to discover new devices
+ * by short-circuting the logic of setting
+ * timer to relogin - instead set the flags
+ * to initiate login right away.
+ */
+ set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
+ set_bit(DF_RELOGIN, &ddb_entry->flags);
}
+exit_setup:
+ return ret;
+}
+
+static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,
+ struct list_head *list_nt, int is_reset)
+{
+ struct dev_db_entry *fw_ddb_entry;
+ dma_addr_t fw_ddb_dma;
+ int max_ddbs;
+ int fw_idx_size;
+ int ret;
+ uint32_t idx = 0, next_idx = 0;
+ uint32_t state = 0, conn_err = 0;
+ uint16_t conn_id = 0;
+ struct qla_ddb_index *nt_ddb_idx;
+
+ fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
+ &fw_ddb_dma);
+ if (fw_ddb_entry == NULL) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
+ goto exit_nt_list;
+ }
+ max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+ MAX_DEV_DB_ENTRIES;
+ fw_idx_size = sizeof(struct qla_ddb_index);
+
for (idx = 0; idx < max_ddbs; idx = next_idx) {
- ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry,
- fw_ddb_dma, NULL,
- &next_idx, &state, &conn_err,
- NULL, &conn_id);
+ ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
+ NULL, &next_idx, &state,
+ &conn_err, NULL, &conn_id);
if (ret == QLA_ERROR)
break;
@@ -4072,107 +4207,113 @@ void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
if (strlen((char *) fw_ddb_entry->iscsi_name) == 0)
goto continue_next_nt;
- if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
- state == DDB_DS_SESSION_FAILED) {
- DEBUG2(ql4_printk(KERN_INFO, ha,
- "Adding DDB to session = 0x%x\n",
- idx));
- if (is_reset == INIT_ADAPTER) {
- nt_ddb_idx = vmalloc(fw_idx_size);
- if (!nt_ddb_idx)
- break;
-
- nt_ddb_idx->fw_ddb_idx = idx;
-
- memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
- sizeof(struct dev_db_entry));
-
- if (qla4xxx_is_flash_ddb_exists(ha, &list_nt,
- fw_ddb_entry) == QLA_SUCCESS) {
- vfree(nt_ddb_idx);
- goto continue_next_nt;
- }
- list_add_tail(&nt_ddb_idx->list, &list_nt);
- } else if (is_reset == RESET_ADAPTER) {
- if (qla4xxx_is_session_exists(ha,
- fw_ddb_entry) == QLA_SUCCESS)
- goto continue_next_nt;
- }
+ if (!(state == DDB_DS_NO_CONNECTION_ACTIVE ||
+ state == DDB_DS_SESSION_FAILED))
+ goto continue_next_nt;
- /* Create session object, with INVALID_ENTRY,
- * the targer_id would get set when we issue the login
- */
- cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport,
- ha->host, cmds_max,
- sizeof(struct ddb_entry),
- sizeof(struct ql4_task_data),
- initial_cmdsn, INVALID_ENTRY);
- if (!cls_sess)
- goto exit_ddb_list;
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Adding DDB to session = 0x%x\n", idx));
+ if (is_reset == INIT_ADAPTER) {
+ nt_ddb_idx = vmalloc(fw_idx_size);
+ if (!nt_ddb_idx)
+ break;
- /*
- * iscsi_session_setup increments the driver reference
- * count which wouldn't let the driver to be unloaded.
- * so calling module_put function to decrement the
- * reference count.
- **/
- module_put(qla4xxx_iscsi_transport.owner);
- sess = cls_sess->dd_data;
- ddb_entry = sess->dd_data;
- ddb_entry->sess = cls_sess;
+ nt_ddb_idx->fw_ddb_idx = idx;
- cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
- memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
+ memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
sizeof(struct dev_db_entry));
- qla4xxx_setup_flash_ddb_entry(ha, ddb_entry);
-
- cls_conn = iscsi_conn_setup(cls_sess,
- sizeof(struct qla_conn),
- conn_id);
- if (!cls_conn)
- goto exit_ddb_list;
-
- ddb_entry->conn = cls_conn;
-
- /* Setup ep, for displaying attributes in sysfs */
- ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry);
- if (ep) {
- ep->conn = cls_conn;
- cls_conn->ep = ep;
- } else {
- DEBUG2(ql4_printk(KERN_ERR, ha,
- "Unable to get ep\n"));
- }
-
- /* Update sess/conn params */
- qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess,
- cls_conn);
-
- if (is_reset == RESET_ADAPTER) {
- iscsi_block_session(cls_sess);
- /* Use the relogin path to discover new devices
- * by short-circuting the logic of setting
- * timer to relogin - instead set the flags
- * to initiate login right away.
- */
- set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
- set_bit(DF_RELOGIN, &ddb_entry->flags);
+ if (qla4xxx_is_flash_ddb_exists(ha, list_nt,
+ fw_ddb_entry) == QLA_SUCCESS) {
+ vfree(nt_ddb_idx);
+ goto continue_next_nt;
}
+ list_add_tail(&nt_ddb_idx->list, list_nt);
+ } else if (is_reset == RESET_ADAPTER) {
+ if (qla4xxx_is_session_exists(ha, fw_ddb_entry) ==
+ QLA_SUCCESS)
+ goto continue_next_nt;
}
+
+ ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset);
+ if (ret == QLA_ERROR)
+ goto exit_nt_list;
+
continue_next_nt:
if (next_idx == 0)
break;
}
-exit_ddb_list:
- qla4xxx_free_nt_list(&list_nt);
+
+exit_nt_list:
if (fw_ddb_entry)
dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
+}
+
+/**
+ * qla4xxx_build_ddb_list - Build ddb list and setup sessions
+ * @ha: pointer to adapter structure
+ * @is_reset: Is this init path or reset path
+ *
+ * Create a list of sendtargets (st) from firmware DDBs, issue send targets
+ * using connection open, then create the list of normal targets (nt)
+ * from firmware DDBs. Based on the list of nt setup session and connection
+ * objects.
+ **/
+void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
+{
+ uint16_t tmo = 0;
+ struct list_head list_st, list_nt;
+ struct qla_ddb_index *st_ddb_idx, *st_ddb_idx_tmp;
+ unsigned long wtime;
+
+ if (!test_bit(AF_LINK_UP, &ha->flags)) {
+ set_bit(AF_BUILD_DDB_LIST, &ha->flags);
+ ha->is_reset = is_reset;
+ return;
+ }
+
+ INIT_LIST_HEAD(&list_st);
+ INIT_LIST_HEAD(&list_nt);
+
+ qla4xxx_build_st_list(ha, &list_st);
+
+ /* Before issuing conn open mbox, ensure all IPs states are configured
+ * Note, conn open fails if IPs are not configured
+ */
+ qla4xxx_wait_for_ip_configuration(ha);
+
+ /* Go thru the STs and fire the sendtargets by issuing conn open mbx */
+ list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
+ qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx);
+ }
+
+ /* Wait to ensure all sendtargets are done for min 12 sec wait */
+ tmo = ((ha->def_timeout > LOGIN_TOV) &&
+ (ha->def_timeout < LOGIN_TOV * 10) ?
+ ha->def_timeout : LOGIN_TOV);
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Default time to wait for build ddb %d\n", tmo));
+
+ wtime = jiffies + (HZ * tmo);
+ do {
+ if (list_empty(&list_st))
+ break;
+
+ qla4xxx_remove_failed_ddb(ha, &list_st);
+ schedule_timeout_uninterruptible(HZ / 10);
+ } while (time_after(wtime, jiffies));
+
+ /* Free up the sendtargets list */
+ qla4xxx_free_ddb_list(&list_st);
+
+ qla4xxx_build_nt_list(ha, &list_nt, is_reset);
+
+ qla4xxx_free_ddb_list(&list_nt);
qla4xxx_free_ddb_index(ha);
}
-
/**
* qla4xxx_probe_adapter - callback function to probe HBA
* @pdev: pointer to pci_dev structure
diff --git a/trunk/drivers/scsi/qla4xxx/ql4_version.h b/trunk/drivers/scsi/qla4xxx/ql4_version.h
index 26a3fa34a33c..133989b3a9f4 100644
--- a/trunk/drivers/scsi/qla4xxx/ql4_version.h
+++ b/trunk/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
* See LICENSE.qla4xxx for copyright and licensing details.
*/
-#define QLA4XXX_DRIVER_VERSION "5.02.00-k10"
+#define QLA4XXX_DRIVER_VERSION "5.02.00-k12"
diff --git a/trunk/drivers/scsi/scsi_lib.c b/trunk/drivers/scsi/scsi_lib.c
index f85cfa6c47b5..b2c95dbe9d65 100644
--- a/trunk/drivers/scsi/scsi_lib.c
+++ b/trunk/drivers/scsi/scsi_lib.c
@@ -1316,15 +1316,10 @@ static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
}
if (scsi_target_is_busy(starget)) {
- if (list_empty(&sdev->starved_entry))
- list_add_tail(&sdev->starved_entry,
- &shost->starved_list);
+ list_move_tail(&sdev->starved_entry, &shost->starved_list);
return 0;
}
- /* We're OK to process the command, so we can't be starved */
- if (!list_empty(&sdev->starved_entry))
- list_del_init(&sdev->starved_entry);
return 1;
}
diff --git a/trunk/drivers/scsi/scsi_transport_fc.c b/trunk/drivers/scsi/scsi_transport_fc.c
index 1b214910b714..f59d4a05ecd7 100644
--- a/trunk/drivers/scsi/scsi_transport_fc.c
+++ b/trunk/drivers/scsi/scsi_transport_fc.c
@@ -3048,7 +3048,8 @@ fc_remote_port_rolechg(struct fc_rport *rport, u32 roles)
spin_lock_irqsave(shost->host_lock, flags);
rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT |
- FC_RPORT_DEVLOSS_PENDING);
+ FC_RPORT_DEVLOSS_PENDING |
+ FC_RPORT_DEVLOSS_CALLBK_DONE);
spin_unlock_irqrestore(shost->host_lock, flags);
/* ensure any stgt delete functions are done */
diff --git a/trunk/drivers/scsi/sg.c b/trunk/drivers/scsi/sg.c
index 02d99982a74d..eacd46bb36b9 100644
--- a/trunk/drivers/scsi/sg.c
+++ b/trunk/drivers/scsi/sg.c
@@ -2368,16 +2368,15 @@ static ssize_t
sg_proc_write_adio(struct file *filp, const char __user *buffer,
size_t count, loff_t *off)
{
- int num;
- char buff[11];
+ int err;
+ unsigned long num;
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
return -EACCES;
- num = (count < 10) ? count : 10;
- if (copy_from_user(buff, buffer, num))
- return -EFAULT;
- buff[num] = '\0';
- sg_allow_dio = simple_strtoul(buff, NULL, 10) ? 1 : 0;
+ err = kstrtoul_from_user(buffer, count, 0, &num);
+ if (err)
+ return err;
+ sg_allow_dio = num ? 1 : 0;
return count;
}
@@ -2390,17 +2389,15 @@ static ssize_t
sg_proc_write_dressz(struct file *filp, const char __user *buffer,
size_t count, loff_t *off)
{
- int num;
+ int err;
unsigned long k = ULONG_MAX;
- char buff[11];
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
return -EACCES;
- num = (count < 10) ? count : 10;
- if (copy_from_user(buff, buffer, num))
- return -EFAULT;
- buff[num] = '\0';
- k = simple_strtoul(buff, NULL, 10);
+
+ err = kstrtoul_from_user(buffer, count, 0, &k);
+ if (err)
+ return err;
if (k <= 1048576) { /* limit "big buff" to 1 MB */
sg_big_buff = k;
return count;
diff --git a/trunk/drivers/scsi/sym53c8xx_2/sym_glue.c b/trunk/drivers/scsi/sym53c8xx_2/sym_glue.c
index b4543f575f46..36d1ed7817eb 100644
--- a/trunk/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/trunk/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -839,6 +839,10 @@ static void sym53c8xx_slave_destroy(struct scsi_device *sdev)
struct sym_lcb *lp = sym_lp(tp, sdev->lun);
unsigned long flags;
+ /* if slave_alloc returned before allocating a sym_lcb, return */
+ if (!lp)
+ return;
+
spin_lock_irqsave(np->s.host->host_lock, flags);
if (lp->busy_itlq || lp->busy_itl) {
diff --git a/trunk/drivers/target/iscsi/iscsi_target.c b/trunk/drivers/target/iscsi/iscsi_target.c
index 8599545cdf9e..ac44af165b27 100644
--- a/trunk/drivers/target/iscsi/iscsi_target.c
+++ b/trunk/drivers/target/iscsi/iscsi_target.c
@@ -27,8 +27,7 @@
#include
#include
#include
-#include
-#include
+#include
#include "iscsi_target_core.h"
#include "iscsi_target_parameters.h"
@@ -284,8 +283,8 @@ static struct iscsi_np *iscsit_get_np(
sock_in6 = (struct sockaddr_in6 *)sockaddr;
sock_in6_e = (struct sockaddr_in6 *)&np->np_sockaddr;
- if (!memcmp((void *)&sock_in6->sin6_addr.in6_u,
- (void *)&sock_in6_e->sin6_addr.in6_u,
+ if (!memcmp(&sock_in6->sin6_addr.in6_u,
+ &sock_in6_e->sin6_addr.in6_u,
sizeof(struct in6_addr)))
ip_match = 1;
@@ -1225,7 +1224,7 @@ static void iscsit_do_crypto_hash_buf(
crypto_hash_init(hash);
- sg_init_one(&sg, (u8 *)buf, payload_length);
+ sg_init_one(&sg, buf, payload_length);
crypto_hash_update(hash, &sg, payload_length);
if (padding) {
@@ -1603,7 +1602,7 @@ static int iscsit_handle_nop_out(
/*
* Attach ping data to struct iscsi_cmd->buf_ptr.
*/
- cmd->buf_ptr = (void *)ping_data;
+ cmd->buf_ptr = ping_data;
cmd->buf_ptr_size = payload_length;
pr_debug("Got %u bytes of NOPOUT ping"
@@ -3197,7 +3196,7 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
end_of_buf = 1;
goto eob;
}
- memcpy((void *)payload + payload_len, buf, len);
+ memcpy(payload + payload_len, buf, len);
payload_len += len;
spin_lock(&tiqn->tiqn_tpg_lock);
@@ -3229,7 +3228,7 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
end_of_buf = 1;
goto eob;
}
- memcpy((void *)payload + payload_len, buf, len);
+ memcpy(payload + payload_len, buf, len);
payload_len += len;
}
spin_unlock(&tpg->tpg_np_lock);
@@ -3486,7 +3485,7 @@ int iscsi_target_tx_thread(void *arg)
struct iscsi_conn *conn;
struct iscsi_queue_req *qr = NULL;
struct se_cmd *se_cmd;
- struct iscsi_thread_set *ts = (struct iscsi_thread_set *)arg;
+ struct iscsi_thread_set *ts = arg;
/*
* Allow ourselves to be interrupted by SIGINT so that a
* connection recovery / failure event can be triggered externally.
@@ -3775,7 +3774,7 @@ int iscsi_target_rx_thread(void *arg)
u8 buffer[ISCSI_HDR_LEN], opcode;
u32 checksum = 0, digest = 0;
struct iscsi_conn *conn = NULL;
- struct iscsi_thread_set *ts = (struct iscsi_thread_set *)arg;
+ struct iscsi_thread_set *ts = arg;
struct kvec iov;
/*
* Allow ourselves to be interrupted by SIGINT so that a
diff --git a/trunk/drivers/target/iscsi/iscsi_target_auth.c b/trunk/drivers/target/iscsi/iscsi_target_auth.c
index 1cd6ce373b83..db0cf7c8adde 100644
--- a/trunk/drivers/target/iscsi/iscsi_target_auth.c
+++ b/trunk/drivers/target/iscsi/iscsi_target_auth.c
@@ -82,7 +82,7 @@ static void chap_gen_challenge(
unsigned int *c_len)
{
unsigned char challenge_asciihex[CHAP_CHALLENGE_LENGTH * 2 + 1];
- struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol;
+ struct iscsi_chap *chap = conn->auth_protocol;
memset(challenge_asciihex, 0, CHAP_CHALLENGE_LENGTH * 2 + 1);
@@ -120,7 +120,7 @@ static struct iscsi_chap *chap_server_open(
if (!conn->auth_protocol)
return NULL;
- chap = (struct iscsi_chap *) conn->auth_protocol;
+ chap = conn->auth_protocol;
/*
* We only support MD5 MDA presently.
*/
@@ -165,14 +165,15 @@ static int chap_server_compute_md5(
unsigned int *nr_out_len)
{
char *endptr;
- unsigned char id, digest[MD5_SIGNATURE_SIZE];
+ unsigned long id;
+ unsigned char digest[MD5_SIGNATURE_SIZE];
unsigned char type, response[MD5_SIGNATURE_SIZE * 2 + 2];
unsigned char identifier[10], *challenge = NULL;
unsigned char *challenge_binhex = NULL;
unsigned char client_digest[MD5_SIGNATURE_SIZE];
unsigned char server_digest[MD5_SIGNATURE_SIZE];
unsigned char chap_n[MAX_CHAP_N_SIZE], chap_r[MAX_RESPONSE_LENGTH];
- struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol;
+ struct iscsi_chap *chap = conn->auth_protocol;
struct crypto_hash *tfm;
struct hash_desc desc;
struct scatterlist sg;
@@ -246,7 +247,7 @@ static int chap_server_compute_md5(
goto out;
}
- sg_init_one(&sg, (void *)&chap->id, 1);
+ sg_init_one(&sg, &chap->id, 1);
ret = crypto_hash_update(&desc, &sg, 1);
if (ret < 0) {
pr_err("crypto_hash_update() failed for id\n");
@@ -254,7 +255,7 @@ static int chap_server_compute_md5(
goto out;
}
- sg_init_one(&sg, (void *)&auth->password, strlen(auth->password));
+ sg_init_one(&sg, &auth->password, strlen(auth->password));
ret = crypto_hash_update(&desc, &sg, strlen(auth->password));
if (ret < 0) {
pr_err("crypto_hash_update() failed for password\n");
@@ -262,7 +263,7 @@ static int chap_server_compute_md5(
goto out;
}
- sg_init_one(&sg, (void *)chap->challenge, CHAP_CHALLENGE_LENGTH);
+ sg_init_one(&sg, chap->challenge, CHAP_CHALLENGE_LENGTH);
ret = crypto_hash_update(&desc, &sg, CHAP_CHALLENGE_LENGTH);
if (ret < 0) {
pr_err("crypto_hash_update() failed for challenge\n");
@@ -305,14 +306,17 @@ static int chap_server_compute_md5(
}
if (type == HEX)
- id = (unsigned char)simple_strtoul((char *)&identifier[2],
- &endptr, 0);
+ id = simple_strtoul(&identifier[2], &endptr, 0);
else
- id = (unsigned char)simple_strtoul(identifier, &endptr, 0);
+ id = simple_strtoul(identifier, &endptr, 0);
+ if (id > 255) {
+ pr_err("chap identifier: %lu greater than 255\n", id);
+ goto out;
+ }
/*
* RFC 1994 says Identifier is no more than octet (8 bits).
*/
- pr_debug("[server] Got CHAP_I=%d\n", id);
+ pr_debug("[server] Got CHAP_I=%lu\n", id);
/*
* Get CHAP_C.
*/
@@ -351,7 +355,7 @@ static int chap_server_compute_md5(
goto out;
}
- sg_init_one(&sg, (void *)&id, 1);
+ sg_init_one(&sg, &id, 1);
ret = crypto_hash_update(&desc, &sg, 1);
if (ret < 0) {
pr_err("crypto_hash_update() failed for id\n");
@@ -359,7 +363,7 @@ static int chap_server_compute_md5(
goto out;
}
- sg_init_one(&sg, (void *)auth->password_mutual,
+ sg_init_one(&sg, auth->password_mutual,
strlen(auth->password_mutual));
ret = crypto_hash_update(&desc, &sg, strlen(auth->password_mutual));
if (ret < 0) {
@@ -371,7 +375,7 @@ static int chap_server_compute_md5(
/*
* Convert received challenge to binary hex.
*/
- sg_init_one(&sg, (void *)challenge_binhex, challenge_len);
+ sg_init_one(&sg, challenge_binhex, challenge_len);
ret = crypto_hash_update(&desc, &sg, challenge_len);
if (ret < 0) {
pr_err("crypto_hash_update() failed for ma challenge\n");
@@ -414,7 +418,7 @@ static int chap_got_response(
char *nr_out_ptr,
unsigned int *nr_out_len)
{
- struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol;
+ struct iscsi_chap *chap = conn->auth_protocol;
switch (chap->digest_type) {
case CHAP_DIGEST_MD5:
@@ -437,7 +441,7 @@ u32 chap_main_loop(
int *in_len,
int *out_len)
{
- struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol;
+ struct iscsi_chap *chap = conn->auth_protocol;
if (!chap) {
chap = chap_server_open(conn, auth, in_text, out_text, out_len);
diff --git a/trunk/drivers/target/iscsi/iscsi_target_configfs.c b/trunk/drivers/target/iscsi/iscsi_target_configfs.c
index db327845e46b..3468caab47a2 100644
--- a/trunk/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/trunk/drivers/target/iscsi/iscsi_target_configfs.c
@@ -22,12 +22,8 @@
#include
#include
#include
-#include
-#include
+#include
#include
-#include
-#include
-#include
#include